diff --git a/Cargo.lock b/Cargo.lock index d4b54311623..77de82967f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2072,6 +2072,7 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-core", + "sp-inherents", "sp-keyring", "sp-maybe-compressed-blob", "sp-offchain", @@ -5088,6 +5089,99 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "nimbus-consensus" +version = "0.1.0" +dependencies = [ + "async-trait", + "cumulus-client-consensus-common", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "futures 0.3.16", + "log", + "nimbus-primitives", + "parity-scale-codec", + "parking_lot 0.9.0", + "polkadot-service", + "sc-client-api", + "sc-consensus", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "substrate-prometheus-endpoint", + "tracing", +] + +[[package]] +name = "nimbus-primitives" +version = "0.1.0" +dependencies = [ + "async-trait", + "frame-support", + "frame-system", + "parity-scale-codec", + "sp-api", + "sp-application-crypto", + "sp-inherents", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "nimbus-runtime" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-core", + "cumulus-primitives-timestamp", + "cumulus-primitives-utility", + "frame-executive", + "frame-support", + "frame-system", + "hex", + "hex-literal 0.3.1", + "log", + "nimbus-primitives", + "pallet-assets", + "pallet-author-inherent", + "pallet-author-slot-filter", + "pallet-balances", + "pallet-randomness-collective-flip", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-xcm", + "parachain-info", + "parity-scale-codec", + "polkadot-parachain", + "serde", + "sp-api", + "sp-block-builder", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", + "xcm", + "xcm-builder", + "xcm-executor", +] + [[package]] name = "nix" version = "0.17.0" @@ -5298,6 +5392,56 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-aura-style-filter" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "nimbus-primitives", + "parity-scale-codec", + "serde", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-author-inherent" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "log", + "nimbus-primitives", + "parity-scale-codec", + "sp-api", + "sp-application-crypto", + "sp-authorship", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-author-slot-filter" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "frame-support", + "frame-system", + "log", + "nimbus-primitives", + "pallet-author-inherent", + "parity-scale-codec", + "serde", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" @@ -6680,7 +6824,11 @@ dependencies = [ "hex-literal 0.2.1", "jsonrpc-core", "log", + "nimbus-consensus", + "nimbus-primitives", + "nimbus-runtime", "nix", + "pallet-author-inherent", "parity-scale-codec", "parking_lot 0.10.2", "polkadot-cli", diff --git a/Cargo.toml b/Cargo.toml index ae84d34ea5c..61498d40fa3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,9 +4,13 @@ members = [ "client/consensus/aura", "client/consensus/common", "client/consensus/relay-chain", + "client/consensus/nimbus", "client/network", "client/pov-recovery", "client/service", + "pallets/author-inherent", + "pallets/author-slot-filter", + "pallets/aura-style-filter", "pallets/aura-ext", "pallets/collator-selection", "pallets/dmp-queue", @@ -16,12 +20,14 @@ members = [ "pallets/xcm", "pallets/xcmp-queue", "primitives/core", + "primitives/nimbus", "primitives/parachain-inherent", "primitives/timestamp", "primitives/utility", "polkadot-parachains/", "polkadot-parachains/pallets/parachain-info", "polkadot-parachains/pallets/ping", + "polkadot-parachains/nimbus-runtime",#TODO change this to match chevdor's scheme "polkadot-parachains/rococo", "polkadot-parachains/shell", "polkadot-parachains/statemint-common", diff --git a/client/collator/src/lib.rs b/client/collator/src/lib.rs index 610d41ad52d..e1eadd5bcb8 100644 --- a/client/collator/src/lib.rs +++ b/client/collator/src/lib.rs @@ -259,7 +259,7 @@ where tracing::info!( target: LOG_TARGET, ?block_hash, - "Produced proof-of-validity candidate.", + "๐Ÿˆด Produced proof-of-validity candidate.", ); Some(CollationResult { diff --git a/client/consensus/nimbus/Cargo.toml b/client/consensus/nimbus/Cargo.toml new file mode 100644 index 00000000000..88f50383999 --- /dev/null +++ b/client/consensus/nimbus/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "nimbus-consensus" +description = "Client-side worker for the Nimbus family of slot-based consensus algorithms" +version = "0.1.0" +edition = "2018" + +[dependencies] +# Substrate deps +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } + +# Polkadot dependencies +polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" } + +# Cumulus dependencies +nimbus-primitives = { path = "../../../primitives/nimbus" } +cumulus-client-consensus-common = { path = "../common" } +cumulus-primitives-core = { path = "../../../primitives/core" } +cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" } + +# Other deps +futures = { version = "0.3.8", features = ["compat"] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] } +tracing = "0.1.22" +async-trait = "0.1.42" +parking_lot = "0.9" +log = "0.4" diff --git a/client/consensus/nimbus/README.md b/client/consensus/nimbus/README.md new file mode 100644 index 00000000000..ca05c484a18 --- /dev/null +++ b/client/consensus/nimbus/README.md @@ -0,0 +1,194 @@ +# Cumulo -- Nimbus โ›ˆ๏ธ + +Nimbus is a framework for building parachain consensus systems on [cumulus](https://github.com/paritytech/cumulus)-based parachains. + +Given the regular six-second pulse-like nature of the relay chain, it is natural to think about slot- +based consensus algorithms for parachains. The parachain network is responsible for liveness and +decetralization and the relay chain is responsible for finality. There is a rich design space for such +algorithms, yet some tasks are common to all (or most) of them. These common tasks include: + +* Signing and signature checking blocks +* Injecting authorship information into the parachain +* Block authorship and import accounting +* Filtering a large (potentially unbounded) set of potential authors to a smaller (but still potentially unbounded) set. +* Detecting when it is your turn to author an skipping other slots + +Nimbus aims to provide standard implementations for the logistical parts of such consensus engines, +along with helpful traits for implementing the parts that researchers and developers want to customize. + +## Try the Demo + +While Nimbus is primarily a development framework meant to be included in other projects, it is useful +to see a basic network in action. An example network is included in the `polkadot-parachains` example collator. You +can build it with `cargo build --release` and launch it like any other cumulus parachian. +Make sure to specify `--chain nimbus`. + +Rather than reiterate how to start a relay-para network here, I'll simply recommend you use the +excellent [Polkadot Launch](https://github.com/paritytech/polkadot-launch) tool. This repo was tested with version 1.4.1. +A [lauch config file](./nimbus-launch-config.json) is provided. + +```bash +# Install polkadot launch (I used v1.4.1) +npm i -g polkadot-launch + +# Build polkadot (I used 82aa404c; check Cargo.lock to be sure) +cd polkadot +cargo build --release +cd .. + +# Build Polkadot-parachains example collator +cd cumulus +git checkout nimbus +cargo build --release + +# Launch the multi-chain +polkdot-launch ./nimbus-launch-config.json +``` + +To learn more about launching relay-para networks, check out the [cumulus workshop](https://substrate.dev/cumulus-workshop). + +## Design Overview + +If you want to start using Nimbus in your project, it is worth reading this. + +At its core nimbus is a consensus engine that considers blocks valid if and only if they inject the author's public identity into the runtime, _and_ seal the block with a signature +by the author's private key. + +Compared to most consensus engines, this is _very_ permissive -- anyone who can create a signature can author valid blocks. In order to build more useful and familiar consensus engine on this foundation, nimbus provides a framework for creating filters to further restrict the set of eligible authors. These filters live inside the runtime. + +Being general in the consensus layer and deferring most checks to the runtime is the key +to nimbus's re-usability as a framework. And is the reason that *writing a consensus engine is as easy as writing a pallet* when you use nimbus. + +### Author Inherent + +The Author inherent pallet allows block authors to insert their identity into +the runtime. This feature alone is useful in many blockchains and can be used for things like block rewards. + +The author inherent provides a validation hook called `CanAuthor`. This check will be called during the inherent execution and is the main entry point to nimbus's author filters. +If you don't want to restrict authorship at all, you can just use `()`. + +As a concrete example, in a simple Proof of Stake system this check will determine +whether the author is staked. In a more realistic PoS system the `CanAuthor` check might +first make sure the author is staked, and then make sure they are eligible in _this slot_ according to round robin rules. + +Finally, the pallet copies the authorship information into a consensus digest that will stick around +in the block header. This digest can be used by UIs to display the author, and also by the consensus +engine to verify the block authorship. + +**PreRuntimeDigest** +I believe the design should be changed slightly to use a preruntime digest rather than an inherent for a few reasons: + +* The data wouldn't be duplicated between an inherent and a digest. +* Nimbus client-side worker would support non-frame runtimes. +* That's how sc-consensus-aura does it. + +### Author Filters + +A primary job of a consensus engine is deciding who can author each block. Some may have a static set, others +may rotate the set each era, others may elect an always-changing subset of all potential authors. There +is much space for creativity, research, and design, and Nimbus strives to provide a flexible interface +for this creative work. You can express all the interesting parts of your +consensus engine simply by creating filters that implement the `CanAuthor` trait. The rest of Nimubs will #JustWork for you. + +This repository comes with a few example filters already, and additional examples are welcome. The examples are: +* PseudoRandom FixedSized Subset - This filter takes a finite set (eg a staked set) and filters it down to a pseudo-random +subset at each height. The eligible ratio is configurable in the pallet. This is a good learning example. +* Aura - The authority round consensus engine is popular in the Substrate ecosystem because it was one +of the first (and simplest!) engines implemented in Substrate. Aura can be expressed in the Nimbus +filter framework and is included as an example filter. If you are considering using aura, that crate +has good documentation on how it differs from `sc-consensus-aura`. +* (Planned) FixedSizedSubset - The author submits a VRF output that has to be below a threshold to be able to author. +* (Planed) Filter Combinator - A filter that wraps two other filters. It uses one in even slots and the other in odd slots. + +### Author Filter Runtime API + +Nimbus makes the design choice to include the author checking logic in the runtime. This is in contrast to the existing implementations of Aura and Babe where the authorship checks are offchain. + +While moving the check in-runtime, provides a lot of flexibility, and simplifies interfacing with relay-chain validators, it makes it impossible +for authoring nodes to predict whether they will be eligible without calling into the runtime. +To achieve this, we provide a runtime API that makes the minimal calculation necessary to determine +whether a specified author will be eligible at the specified slot. + +### Nimbus Consensus Worker + +Nimbus consensus is the primary client-side consensus worker. It implements the `ParachainConsensus` +trait introduced to cumulus in https://github.com/paritytech/cumulus/pull/329. It is not likely that +you will need to change this code directly to implement your engine as it is entirely abstracted over +the filters you use. The consensus engine performs these tasks: + +* Slot prediction - it calls the runtime API mentioned previously to determine whether ti is eligible. If not, it returns early. +* Authorship - It calls into a standard Substrate proposer to construct a block (probably including the author inherent). +* Self import - it imports the block that the proposer created (called the pre-block) into the node's local database. +* Sealing - It adds a seal digest to the block - This is what is used by other nodes to verify the authorship information. + +### Verifier and Import Queue + +For a parachain node to import a sealed block authored by one of its peers, it needs to first check that the signature is valid by the author that was injected into the runtime. This is the job of the verifier. It +will remove the nimbus seal and check it against the nimbus consensus digest from the runtime. If that process fails, +the block is immediately thrown away before the expensive execution even begins. If it succeeds, then +the pre-block (the part that's left after the seal is stripped) is passed into the +[import pipeline](https://substrate.dev/docs/en/knowledgebase/advanced/block-import) for processing +and execution. Finally, the locally produced result is compared to the result received across the network. + +### Custom Block Executor + +We've already discussed how parachain nodes (both the one that authors a block, and also its peers) +import blocks. In a standalone blockchain, that's the end of the story. But for a parachain, we also +need our relay chain validators to re-execute and validate the parachain block. Validators do this in +a unique way, and entirely in wasm. Providing the `validate_block` function that the validators use +is the job of the `register_validate_block!` macro from Cumulus. + +Typically a cumulus runtime invokes that macro like this: +```rust +cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive); +``` + +You can see that the validators use the exact same executive that the parachain nodes do. Now that +we have sealed blocks, that must change. The validators need to strip and verify the seal, and re-execute +the pre-block just like the parachain nodes did. And without access to an offchain verifier, they must +do this all in the runtime. For that purpose, we provide and alternate executive which wraps the normal +FRAME executive. The wrapper strips and checks the seal, just like the verifier did, and then passes the pre-block to the inner FRAME executive for re-execution. + +## Write Your Own Consensus Logic + +If you have an idea for a new slot-based parachain consensus algorithm, Nimbus is a quick way to get +it working! The fastest way to start hacking is to fork this repo and customize the template node. + +If you'd rather dive in than read one more sentence, then **start hacking in the `author-slot-filter` +pallet.** + +In most cases, you can use all the off-the-shelf components and simply write your filters. It is also +possible to compose existing filters to build more complex logic from smaller pieces. + +## Authoring and Import Diagrams + +One node authors the block, then it is processed in three different ways. + +| | Author | Parachain Peer | Relay Validator | +| ------------------- | ------ | -------------- | --------- | +| Predict Eligibility | โœ… | โŒ | โŒ | +| Author Block | โœ… | โŒ | โŒ | +| Runs Verifier | โŒ | โœ… | โŒ | +| Import Pipeline | โœ… | โœ… | โŒ | +| Custom Pre exec | โŒ | โŒ | โœ… | +| Normal FRAME exec | โœ… | โœ… | โœ… | + +## Roadmap + +The Nimbus framework is intended to be loosely coupled with Cumulus. It remains to be +seen whether it should live with Cumulus or in its own repository. + +### Next tasks +* Proper trait for interacting with digests +* More example filters +* Share code between verifier and wrapper executive +* Client-side worker for standalone (non para) blockchain +* Aurand as an example of composing filters +* Second filter trait for exhaustive sets (As opposed to current propositional approach) + +## Contributions Welcome + +Try it out, open issues, submit PRs, review code. Whether you like to tinker with a running node, or +analyze security from an academic perspective, your contributions are welcome. + +I am happy to support users who want to use nimbus, or want feedback on their consensus engines. diff --git a/client/consensus/nimbus/src/import_queue.rs b/client/consensus/nimbus/src/import_queue.rs new file mode 100644 index 00000000000..bec5c4ec0a0 --- /dev/null +++ b/client/consensus/nimbus/src/import_queue.rs @@ -0,0 +1,195 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use std::{marker::PhantomData, sync::Arc}; + +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::Result as ClientResult; +use sp_consensus::{ + error::Error as ConsensusError, CacheKeyId, +}; +use sc_consensus::{ + BlockImport, BlockImportParams, + import_queue::{BasicQueue, Verifier as VerifierT}, +}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, +}; +use nimbus_primitives::{NimbusId, NimbusSignature, NimbusPair}; +use sp_application_crypto::{TryFrom, Pair as _, Public as _}; +use log::debug; + +/// The Nimbus verifier strips the seal digest, and checks that it is a valid signature by +/// the same key that was injected into the runtime and noted in the Seal digest. +/// From Nimbu's perspective any block that faithfully reports its authorship to the runtime +/// is valid. The intention is that the runtime itself may then put further restrictions on +/// the identity of the author. +struct Verifier { + client: Arc, + create_inherent_data_providers: CIDP, + _marker: PhantomData, +} + +#[async_trait::async_trait] +impl VerifierT for Verifier +where + Block: BlockT, + Client: ProvideRuntimeApi + Send + Sync, + >::Api: BlockBuilderApi, + CIDP: CreateInherentDataProviders , +{ + async fn verify( + &mut self, + mut block_params: BlockImportParams, + ) -> Result< + ( + BlockImportParams, + Option)>>, + ), + String, + > { + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Header hash before popping digest {:?}", block_params.header.hash()); + // Grab the digest from the seal + //TODO use CompatibleDigest trait here once I write it. For now assume the seal is last. + let seal = block_params.header.digest_mut().pop().expect("Block should have at least one digest on it"); + + let sig = match seal { + DigestItem::Seal(id, ref sig) if id == *b"nmbs" => sig.clone(), + _ => return Err("HeaderUnsealed".into()), + }; + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Header hash after popping digest {:?}", block_params.header.hash()); + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Signature according to verifier is {:?}", sig); + + // Grab the digest from the runtime + //TODO use the trait. Maybe this code should move to the trait. + let consensus_digest = block_params.header + .digest() + .logs + .iter() + .find(|digest| { + match *digest { + DigestItem::Consensus(id, _) if id == b"nmbs" => true, + _ => false, + } + }) + .expect("A single consensus digest should be added by the runtime when executing the author inherent."); + + let claimed_author = match *consensus_digest { + DigestItem::Consensus(id, ref author_id) if id == *b"nmbs" => author_id.clone(), + _ => panic!("Expected consensus digest to contains author id bytes"), + }; + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Claimed Author according to verifier is {:?}", claimed_author); + + // Verify the signature + let valid_signature = NimbusPair::verify( + &NimbusSignature::try_from(sig).expect("Bytes should convert to signature correctly"), + block_params.header.hash(), + &NimbusId::from_slice(&claimed_author), + ); + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Valid signature? {:?}", valid_signature); + + if !valid_signature{ + return Err("Block signature invalid".into()); + } + + // This part copied from RelayChainConsensus. I guess this is the inherent checking. + if let Some(inner_body) = block_params.body.take() { + let inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(*block_params.header.parent_hash(), ()) + .await + .map_err(|e| e.to_string())?; + + let inherent_data = inherent_data_providers + .create_inherent_data() + .map_err(|e| format!("{:?}", e))?; + + let block = Block::new(block_params.header.clone(), inner_body); + + let inherent_res = self + .client + .runtime_api() + .check_inherents( + &BlockId::Hash(*block_params.header.parent_hash()), + block.clone(), + inherent_data, + ) + .map_err(|e| format!("{:?}", e))?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match inherent_data_providers.try_handle_error(&i, &e).await { + Some(r) => r.map_err(|e| format!("{:?}", e))?, + None => Err(format!( + "Unhandled inherent error from `{}`.", + String::from_utf8_lossy(&i) + ))?, + } + } + } + + let (_, inner_body) = block.deconstruct(); + block_params.body = Some(inner_body); + } + + block_params.post_digests.push(seal); + + debug!(target: crate::LOG_TARGET, "๐Ÿชฒ Just finished verifier. posthash from params is {:?}", &block_params.post_hash()); + + Ok((block_params, None)) + } +} + +/// Start an import queue for a Cumulus collator that does not uses any special authoring logic. +pub fn import_queue( + client: Arc, + block_import: I, + create_inherent_data_providers: CIDP, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&substrate_prometheus_endpoint::Registry>, +) -> ClientResult> +where + I: BlockImport + Send + Sync + 'static, + I::Transaction: Send, + Client: ProvideRuntimeApi + Send + Sync + 'static, + >::Api: BlockBuilderApi, + CIDP: CreateInherentDataProviders + 'static, +{ + let verifier = Verifier { + client, + create_inherent_data_providers, + _marker: PhantomData, + }; + + Ok(BasicQueue::new( + verifier, + Box::new(cumulus_client_consensus_common::ParachainBlockImport::new( + block_import, + )), + None, + spawner, + registry, + )) +} diff --git a/client/consensus/nimbus/src/lib.rs b/client/consensus/nimbus/src/lib.rs new file mode 100644 index 00000000000..4a436900ff8 --- /dev/null +++ b/client/consensus/nimbus/src/lib.rs @@ -0,0 +1,522 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The nimbus consensus client-side worker +//! +//! It queries the in-runtime filter to determine whether any keys +//! stored in its keystore are eligible to author at this slot. If it has an eligible +//! key it authors. + +use cumulus_client_consensus_common::{ + ParachainBlockImport, ParachainCandidate, ParachainConsensus, +}; +use cumulus_primitives_core::{ + relay_chain::v1::{Block as PBlock, Hash as PHash, ParachainHost}, + ParaId, PersistedValidationData, +}; +pub use import_queue::import_queue; +use log::{info, warn, debug}; +use parking_lot::Mutex; +use polkadot_service::ClientHandle; +use sc_client_api::Backend; +use sp_api::{ProvideRuntimeApi, BlockId, ApiExt}; +use sp_consensus::{ + BlockOrigin, EnableProofRecording, Environment, + ProofRecording, Proposal, Proposer, +}; +use sc_consensus::{BlockImport, BlockImportParams}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; +use tracing::error; +use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sp_core::crypto::Public; +use nimbus_primitives::{AuthorFilterAPI, NIMBUS_ENGINE_ID, NIMBUS_KEY_ID, NimbusId}; +mod import_queue; + +const LOG_TARGET: &str = "filtering-consensus"; + +/// The implementation of the relay-chain provided consensus for parachains. +pub struct NimbusConsensus { + para_id: ParaId, + _phantom: PhantomData, + proposer_factory: Arc>, + create_inherent_data_providers: Arc, + block_import: Arc>>, + relay_chain_client: Arc, + relay_chain_backend: Arc, + parachain_client: Arc, + keystore: SyncCryptoStorePtr, + skip_prediction: bool, +} + +impl Clone for NimbusConsensus { + fn clone(&self) -> Self { + Self { + para_id: self.para_id, + _phantom: PhantomData, + proposer_factory: self.proposer_factory.clone(), + create_inherent_data_providers: self.create_inherent_data_providers.clone(), + block_import: self.block_import.clone(), + relay_chain_backend: self.relay_chain_backend.clone(), + relay_chain_client: self.relay_chain_client.clone(), + parachain_client: self.parachain_client.clone(), + keystore: self.keystore.clone(), + skip_prediction: self.skip_prediction, + } + } +} + +impl NimbusConsensus +where + B: BlockT, + RClient: ProvideRuntimeApi, + RClient::Api: ParachainHost, + RBackend: Backend, + ParaClient: ProvideRuntimeApi, + CIDP: CreateInherentDataProviders, +{ + /// Create a new instance of nimbus consensus. + pub fn new( + para_id: ParaId, + proposer_factory: PF, + create_inherent_data_providers: CIDP, + block_import: BI, + polkadot_client: Arc, + polkadot_backend: Arc, + parachain_client: Arc, + keystore: SyncCryptoStorePtr, + skip_prediction: bool, + ) -> Self { + Self { + para_id, + proposer_factory: Arc::new(Mutex::new(proposer_factory)), + create_inherent_data_providers: Arc::new(create_inherent_data_providers), + block_import: Arc::new(futures::lock::Mutex::new(ParachainBlockImport::new( + block_import, + ))), + relay_chain_backend: polkadot_backend, + relay_chain_client: polkadot_client, + parachain_client, + keystore, + skip_prediction, + _phantom: PhantomData, + } + } + + //TODO Could this be a provided implementation now that we have this async inherent stuff? + /// Create the data. + async fn inherent_data( + &self, + parent: B::Hash, + validation_data: &PersistedValidationData, + relay_parent: PHash, + author_id: NimbusId, + ) -> Option { + let inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent, (relay_parent, validation_data.clone(), author_id)) + .await + .map_err(|e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to create inherent data providers.", + ) + }) + .ok()?; + + inherent_data_providers + .create_inherent_data() + .map_err(|e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to create inherent data.", + ) + }) + .ok() + } +} + +#[async_trait::async_trait] +impl ParachainConsensus + for NimbusConsensus +where + B: BlockT, + RClient: ProvideRuntimeApi + Send + Sync, + RClient::Api: ParachainHost, + RBackend: Backend, + BI: BlockImport + Send + Sync, + PF: Environment + Send + Sync, + PF::Proposer: Proposer< + B, + Transaction = BI::Transaction, + ProofRecording = EnableProofRecording, + Proof = ::Proof, + >, + ParaClient: ProvideRuntimeApi + Send + Sync, + ParaClient::Api: AuthorFilterAPI, + CIDP: CreateInherentDataProviders, +{ + async fn produce_candidate( + &mut self, + parent: &B::Header, + relay_parent: PHash, + validation_data: &PersistedValidationData, + ) -> Option> { + // Design decision: We will check the keystore for any available keys. Then we will iterate + // those keys until we find one that is eligible. If none are eligible, we skip this slot. + // If multiple are eligible, we only author with the first one. + + // Get allthe available keys + let available_keys = + SyncCryptoStore::keys(&*self.keystore, NIMBUS_KEY_ID) + .expect("keystore should return the keys it has"); + + // Print a more helpful message than "not eligible" when there are no keys at all. + if available_keys.is_empty() { + warn!(target: LOG_TARGET, "๐Ÿ” No Nimbus keys available. We will not be able to author."); + return None; + } + + let at = BlockId::Hash(parent.hash()); + // Get `AuthorFilterAPI` version. + let api_version = self.parachain_client.runtime_api() + .api_version::>(&at) + .expect("Runtime api access to not error."); + + if api_version.is_none() { + tracing::error!( + target: LOG_TARGET, "Could not find `AuthorFilterAPI` version.", + ); + return None; + } + let api_version = api_version.unwrap(); + + // Iterate keys until we find an eligible one, or run out of candidates. + // If we are skipping prediction, then we author withthe first key we find. + // prediction skipping only really amkes sense when there is a single key in the keystore. + let maybe_key = available_keys.into_iter().find(|type_public_pair| { + + // If we are not predicting, just return the first one we find. + self.skip_prediction || + + // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue + // That I should be passing Vec across the wasm boundary? + if api_version >= 2 { + self.parachain_client.runtime_api().can_author( + &at, + NimbusId::from_slice(&type_public_pair.1), + validation_data.relay_parent_number, + parent, + ) + .expect("Author API should not return error") + } else { + #[allow(deprecated)] + self.parachain_client.runtime_api().can_author_before_version_2( + &at, + NimbusId::from_slice(&type_public_pair.1), + validation_data.relay_parent_number, + ) + .expect("Author API version 2 should not return error") + } + }); + + // If there are no eligible keys, print the log, and exit early. + let type_public_pair = match maybe_key { + Some(p) => p, + None => { + info!( + target: LOG_TARGET, + "๐Ÿ”ฎ Skipping candidate production because we are not eligible" + ); + return None; + } + }; + + let proposer_future = self.proposer_factory.lock().init(&parent); + + let proposer = proposer_future + .await + .map_err( + |e| error!(target: LOG_TARGET, error = ?e, "Could not create proposer."), + ) + .ok()?; + + let inherent_data = self.inherent_data(parent.hash(),&validation_data, relay_parent, NimbusId::from_slice(&type_public_pair.1)).await?; + + let Proposal { + block, + storage_changes, + proof, + } = proposer + .propose( + inherent_data, + Default::default(), + //TODO: Fix this. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes that encapsulates the proof size, + // we should be able to use the maximum pov size. + Some((validation_data.max_pov_size / 2) as usize), + ) + .await + .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Proposing failed.")) + .ok()?; + + let (header, extrinsics) = block.clone().deconstruct(); + + let pre_hash = header.hash(); + + let sig = SyncCryptoStore::sign_with( + &*self.keystore, + NIMBUS_KEY_ID, + &type_public_pair, + pre_hash.as_ref(), + ) + .expect("Keystore should be able to sign") + .expect("We already checked that the key was present"); + + debug!( + target: LOG_TARGET, + "The signature is \n{:?}", sig + ); + + // TODO Make a proper CompatibleDigest trait https://github.com/paritytech/substrate/blob/master/primitives/consensus/aura/src/digests.rs#L45 + let sig_digest = sp_runtime::generic::DigestItem::Seal(NIMBUS_ENGINE_ID, sig); + + let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header.clone()); + block_import_params.post_digests.push(sig_digest.clone()); + block_import_params.body = Some(extrinsics.clone()); + block_import_params.state_action = sc_consensus::StateAction::ApplyChanges( + sc_consensus::StorageChanges::Changes(storage_changes) + ); + + // Print the same log line as slots (aura and babe) + info!( + "๐Ÿ”– Sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + *header.number(), + block_import_params.post_hash(), + pre_hash, + ); + + if let Err(err) = self + .block_import + .lock() + .await + .import_block(block_import_params, Default::default()) + .await + { + error!( + target: LOG_TARGET, + at = ?parent.hash(), + error = ?err, + "Error importing built block.", + ); + + return None; + } + + // Compute info about the block after the digest is added + let mut post_header = header.clone(); + post_header.digest_mut().logs.push(sig_digest.clone()); + let post_block = B::new(post_header, extrinsics); + + // Returning the block WITH the seal for distribution around the network. + Some(ParachainCandidate { block: post_block, proof }) + } +} + +/// Paramaters of [`build_relay_chain_consensus`]. +/// +/// I briefly tried the async keystore approach, but decided to go sync so I can copy +/// code from Aura. Maybe after it is working, Jeremy can help me go async. +pub struct BuildNimbusConsensusParams { + pub para_id: ParaId, + pub proposer_factory: PF, + pub create_inherent_data_providers: CIDP, + pub block_import: BI, + pub relay_chain_client: polkadot_service::Client, + pub relay_chain_backend: Arc, + pub parachain_client: Arc, + pub keystore: SyncCryptoStorePtr, + pub skip_prediction: bool, + +} + +/// Build the [`NimbusConsensus`]. +/// +/// Returns a boxed [`ParachainConsensus`]. +pub fn build_nimbus_consensus( + BuildNimbusConsensusParams { + para_id, + proposer_factory, + create_inherent_data_providers, + block_import, + relay_chain_client, + relay_chain_backend, + parachain_client, + keystore, + skip_prediction, + }: BuildNimbusConsensusParams, +) -> Box> +where + Block: BlockT, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer< + Block, + Transaction = BI::Transaction, + ProofRecording = EnableProofRecording, + Proof = ::Proof, + >, + BI: BlockImport + Send + Sync + 'static, + RBackend: Backend + 'static, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sc_client_api::StateBackendFor: sc_client_api::StateBackend>, + ParaClient: ProvideRuntimeApi + Send + Sync + 'static, + ParaClient::Api: AuthorFilterAPI, + CIDP: CreateInherentDataProviders + 'static, +{ + NimbusConsensusBuilder::new( + para_id, + proposer_factory, + block_import, + create_inherent_data_providers, + relay_chain_client, + relay_chain_backend, + parachain_client, + keystore, + skip_prediction, + ) + .build() +} + +/// Nimbus consensus builder. +/// +/// Builds a [`NimbusConsensus`] for a parachain. As this requires +/// a concrete relay chain client instance, the builder takes a [`polkadot_service::Client`] +/// that wraps this concrete instanace. By using [`polkadot_service::ExecuteWithClient`] +/// the builder gets access to this concrete instance. +struct NimbusConsensusBuilder { + para_id: ParaId, + _phantom: PhantomData, + proposer_factory: PF, + create_inherent_data_providers: CIDP, + block_import: BI, + relay_chain_backend: Arc, + relay_chain_client: polkadot_service::Client, + parachain_client: Arc, + keystore: SyncCryptoStorePtr, + skip_prediction: bool, +} + +impl NimbusConsensusBuilder +where + Block: BlockT, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sc_client_api::StateBackendFor: sc_client_api::StateBackend>, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer< + Block, + Transaction = BI::Transaction, + ProofRecording = EnableProofRecording, + Proof = ::Proof, + >, + BI: BlockImport + Send + Sync + 'static, + RBackend: Backend + 'static, + ParaClient: ProvideRuntimeApi + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + 'static, +{ + /// Create a new instance of the builder. + fn new( + para_id: ParaId, + proposer_factory: PF, + block_import: BI, + create_inherent_data_providers: CIDP, + relay_chain_client: polkadot_service::Client, + relay_chain_backend: Arc, + parachain_client: Arc, + keystore: SyncCryptoStorePtr, + skip_prediction: bool, + ) -> Self { + Self { + para_id, + _phantom: PhantomData, + proposer_factory, + block_import, + create_inherent_data_providers, + relay_chain_backend, + relay_chain_client, + parachain_client, + keystore, + skip_prediction, + } + } + + /// Build the nimbus consensus. + fn build(self) -> Box> + where + ParaClient::Api: AuthorFilterAPI, + { + self.relay_chain_client.clone().execute_with(self) + } +} + +impl polkadot_service::ExecuteWithClient + for NimbusConsensusBuilder +where + Block: BlockT, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sc_client_api::StateBackendFor: sc_client_api::StateBackend>, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer< + Block, + Transaction = BI::Transaction, + ProofRecording = EnableProofRecording, + Proof = ::Proof, + >, + BI: BlockImport + Send + Sync + 'static, + RBackend: Backend + 'static, + ParaClient: ProvideRuntimeApi + Send + Sync + 'static, + ParaClient::Api: AuthorFilterAPI, + CIDP: CreateInherentDataProviders + 'static, +{ + type Output = Box>; + + fn execute_with_client(self, client: Arc) -> Self::Output + where + >::StateBackend: sp_api::StateBackend>, + PBackend: Backend, + PBackend::State: sp_api::StateBackend, + Api: polkadot_service::RuntimeApiCollection, + PClient: polkadot_service::AbstractClient + 'static, + ParaClient::Api: AuthorFilterAPI, + { + Box::new(NimbusConsensus::new( + self.para_id, + self.proposer_factory, + self.create_inherent_data_providers, + self.block_import, + client.clone(), + self.relay_chain_backend, + self.parachain_client, + self.keystore, + self.skip_prediction, + )) + } +} diff --git a/nimbus-launch-config.json b/nimbus-launch-config.json new file mode 100644 index 00000000000..8002eb3e0a4 --- /dev/null +++ b/nimbus-launch-config.json @@ -0,0 +1,60 @@ +{ + "relaychain": { + "bin": "../polkadot/target/release/polkadot", + "chain": "rococo-local", + "nodes": [ + { + "name": "alice", + "wsPort": 9944, + "port": 30444 + }, + { + "name": "bob", + "wsPort": 9955, + "port": 30555 + }, + { + "name": "charlie", + "wsPort": 9966, + "port": 30666 + }, + { + "name": "dave", + "wsPort": 9977, + "port": 30777 + } + ], + "runtime_genesis_config": { + "parachainsConfiguration": { + "config": { + "validation_upgrade_frequency": 1, + "validation_upgrade_delay": 1 + } + } + } + }, + "parachains": [ + { + "bin": "./target/release/polkadot-collator", + "id": "200", + "chain": "nimbus", + "balance": "1000000000000000000000", + "nodes": [ + { + "wsPort": 9988, + "port": 30888, + "flags": ["--alice", "--", "--execution=wasm"] + }, + { + "wsPort": 9999, + "port": 30999, + "flags": ["--bob", "--", "--execution=wasm"] + } + ] + } + ], + "simpleParachains": [], + "hrmpChannels": [], + "types": {}, + "finalization": false +} diff --git a/pallets/aura-style-filter/Cargo.toml b/pallets/aura-style-filter/Cargo.toml new file mode 100644 index 00000000000..795c3414248 --- /dev/null +++ b/pallets/aura-style-filter/Cargo.toml @@ -0,0 +1,28 @@ +[package] +authors = ["PureStake"] +edition = "2018" +name = "pallet-aura-style-filter" +version = "0.1.0" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.101", optional = true, features = ["derive"] } +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +nimbus-primitives = { path = "../../primitives/nimbus", default-features = false } + +[features] +default = ["std"] +std = [ + "serde", + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "nimbus-primitives/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/pallets/aura-style-filter/src/lib.rs b/pallets/aura-style-filter/src/lib.rs new file mode 100644 index 00000000000..7cfe52397c3 --- /dev/null +++ b/pallets/aura-style-filter/src/lib.rs @@ -0,0 +1,78 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A Nimbus filter for the AuRa consensus algorithm. This filter does not use any entropy, it +//! simply rotates authors in order. A single author is eligible at each slot. +//! +//! In the Substrate ecosystem, this algorithm is typically known as AuRa (authority round). +//! There is a well known implementation in the main Substrate repository and published at +//! https://crates.io/crates/sc-consensus-aura. There are two primary differences between +//! the approaches: +//! +//! 1. This filter leverages all the heavy lifting of the Nimbus framework and consequently is +//! capable of expressing Aura in < 100 lines of code. +//! +//! Whereas sc-consensus-aura includes the entire consensus stack including block signing, digest +//! formats, and slot prediction. This is a lot of overhead for a sipmle round robin +//! consensus that basically boils down to this function +//! https://github.com/paritytech/substrate/blob/0f849efc/client/consensus/aura/src/lib.rs#L91-L106 +//! +//! 2. The Nimbus framework places the author checking logic in the runtime which makes it relatively +//! easy for relay chain validators to confirm the author is valid. +//! +//! Whereas sc-consensus-aura places the author checking offchain. The offchain approach is fine +//! for standalone layer 1 blockchains, but net well suited for verification on the relay chain +//! where validators only run a wasm blob. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet; +pub use pallet::*; + +#[pallet] +pub mod pallet { + + use frame_support::pallet_prelude::*; + use sp_std::vec::Vec; + + //TODO Now that the CanAuthor trait takes a slot number, I don't think this even needs to be a pallet. + // I think it could eb jsut a simple type. + /// The Author Filter pallet + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// Configuration trait of this pallet. + #[pallet::config] + pub trait Config: frame_system::Config { + /// A source for the complete set of potential authors. + /// The starting point of the filtering. + type PotentialAuthors: Get>; + } + + // This code will be called by the author-inherent pallet to check whether the reported author + // of this block is eligible at this slot. We calculate that result on demand and do not + // record it instorage. + impl nimbus_primitives::CanAuthor for Pallet { + fn can_author(account: &T::AccountId, slot: &u32) -> bool { + let active: Vec = T::PotentialAuthors::get(); + + // This is the core Aura logic right here. + let active_author = &active[*slot as usize % active.len()]; + + account == active_author + } + } +} diff --git a/pallets/author-inherent/Cargo.toml b/pallets/author-inherent/Cargo.toml new file mode 100644 index 00000000000..3f382bc5e2f --- /dev/null +++ b/pallets/author-inherent/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "pallet-author-inherent" +version = "0.1.0" +description = "Inject the block author via an inherent, and persist it via a Consensus digest" +authors = ["PureStake"] +edition = "2018" +license = 'GPL-3.0-only' + +[dependencies] +log = { version = "0.4", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +nimbus-primitives = { path = "../../primitives/nimbus", default-features = false } + +[dev-dependencies] +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "log/std", + "frame-support/std", + "frame-system/std", + "parity-scale-codec/std", + "sp-application-crypto/std", + "sp-authorship/std", + "sp-inherents/std", + "sp-runtime/std", + "sp-std/std", + "sp-api/std", + "nimbus-primitives/std", +] diff --git a/pallets/author-inherent/src/exec.rs b/pallets/author-inherent/src/exec.rs new file mode 100644 index 00000000000..5f8c2f8a8a3 --- /dev/null +++ b/pallets/author-inherent/src/exec.rs @@ -0,0 +1,108 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Block executive to be used by relay chain validators when validating parachain blocks built +//! with the nimubs consensus family. + +use frame_support::traits::ExecuteBlock; +use sp_api::{BlockT, HeaderT}; +// For some reason I can't get these logs to actually print +use log::debug; +use sp_runtime::{RuntimeAppPublic, generic::DigestItem}; +use nimbus_primitives::{NIMBUS_ENGINE_ID, NimbusId, NimbusSignature}; +use sp_application_crypto::{TryFrom, Public as _}; + +/// Block executive to be used by relay chain validators when validating parachain blocks built +/// with the nimubs consensus family. +/// +/// This will strip the seal digest, and confirm that it contains a valid signature +/// By the block author reported in the author inherent. +/// +/// Essentially this contains the logic of the verifier plus the inner executive. +/// TODO Degisn improvement: +/// Can we share code with the verifier? +/// Can this struct take a verifier as an associated type? +/// Or maybe this will just get simpler in general when https://github.com/paritytech/polkadot/issues/2888 lands +pub struct BlockExecutor(sp_std::marker::PhantomData<(T, I)>); + +impl ExecuteBlock for BlockExecutor +where + Block: BlockT, + I: ExecuteBlock, +{ + fn execute_block(block: Block) { + let (mut header, extrinsics) = block.deconstruct(); + + debug!(target: "executive", "In hacked Executive. Initial digests are {:?}", header.digest()); + + // Set the seal aside for checking. + let seal = header + .digest_mut() + .logs + .pop() + .expect("Seal digest is present and is last item"); + + debug!(target: "executive", "In hacked Executive. digests after stripping {:?}", header.digest()); + debug!(target: "executive", "The seal we got {:?}", seal); + + let sig = match seal { + DigestItem::Seal(id, ref sig) if id == NIMBUS_ENGINE_ID => sig.clone(), + _ => panic!("HeaderUnsealed"), + }; + + debug!(target: "executive", "๐Ÿชฒ Header hash after popping digest {:?}", header.hash()); + + debug!(target: "executive", "๐Ÿชฒ Signature according to executive is {:?}", sig); + + // Grab the digest from the runtime + //TODO use the CompatibleDigest trait. Maybe this code should move to the trait. + let consensus_digest = header + .digest() + .logs + .iter() + .find(|digest| { + match *digest { + DigestItem::Consensus(id, _) if id == &NIMBUS_ENGINE_ID => true, + _ => false, + } + }) + .expect("A single consensus digest should be added by the runtime when executing the author inherent."); + + let claimed_author = match *consensus_digest { + DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => author_id.clone(), + _ => panic!("Expected consensus digest to contains author id bytes"), + }; + + debug!(target: "executive", "๐Ÿชฒ Claimed Author according to executive is {:?}", claimed_author); + + // Verify the signature + let valid_signature = NimbusId::from_slice(&claimed_author).verify( + &header.hash(), + &NimbusSignature::try_from(sig).expect("Bytes should convert to signature correctly"), + ); + + debug!(target: "executive", "๐Ÿชฒ Valid signature? {:?}", valid_signature); + + if !valid_signature{ + panic!("Block signature invalid"); + } + + + // Now that we've verified the signature, hand execution off to the inner executor + // which is probably the normal frame executive. + I::execute_block(Block::new(header, extrinsics)); + } +} diff --git a/pallets/author-inherent/src/lib.rs b/pallets/author-inherent/src/lib.rs new file mode 100644 index 00000000000..81d530951d0 --- /dev/null +++ b/pallets/author-inherent/src/lib.rs @@ -0,0 +1,367 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Pallet that allows block authors to include their identity in a block via an inherent. +//! Currently the author does not _prove_ their identity, just states it. So it should not be used, +//! for things like equivocation slashing that require authenticated authorship information. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{ + traits::FindAuthor, +}; +use parity_scale_codec::{Decode, Encode}; +use sp_inherents::{InherentIdentifier, IsFatalError}; +use sp_runtime::{ + ConsensusEngineId, DigestItem, RuntimeString, RuntimeAppPublic, +}; +use log::debug; +use nimbus_primitives::{AccountLookup, CanAuthor, NIMBUS_ENGINE_ID, SlotBeacon, EventHandler, INHERENT_IDENTIFIER}; + +mod exec; +pub use exec::BlockExecutor; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + /// The Author Inherent pallet. The core of the nimbus consensus framework's runtime presence. + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config { + // This is copied from Aura. I wonder if I really need all those trait bounds. For now I'll leave them. + // TODO could I remove this type entirely and just always use NimbusId? Why didn't Aura do that? + /// The identifier type for an authority. + type AuthorId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; + + /// A type to convert between AuthorId and AccountId. This is useful when you want to associate + /// Block authoring behavior with an AccoutId for rewards or slashing. If you do not need to + /// hold an AccountID responsible for authoring use `()` which acts as an identity mapping. + type AccountLookup: AccountLookup; + + /// Other pallets that want to be informed about block authorship + type EventHandler: EventHandler; + + /// The final word on whether the reported author can author at this height. + /// This will be used when executing the inherent. This check is often stricter than the + /// Preliminary check, because it can use more data. + /// If the pallet that implements this trait depends on an inherent, that inherent **must** + /// be included before this one. + type CanAuthor: CanAuthor; + + /// Some way of determining the current slot for purposes of verifying the author's eligibility + type SlotBeacon: SlotBeacon; + } + + // If the AccountId type supports it, then this pallet can be BoundToRuntimeAppPublic + impl sp_runtime::BoundToRuntimeAppPublic for Pallet + where + T: Config, + T::AuthorId: RuntimeAppPublic, + { + type Public = T::AuthorId; + } + #[pallet::error] + pub enum Error { + /// Author already set in block. + AuthorAlreadySet, + /// No AccountId was found to be associated with this author + NoAccountId, + /// The author in the inherent is not an eligible author. + CannotBeAuthor, + } + + + /// Author of current block. + #[pallet::storage] + pub type Author = StorageValue<_, T::AccountId, OptionQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: T::BlockNumber) -> Weight { + >::kill(); + 0 + } + } + + #[pallet::call] + impl Pallet { + /// Inherent to set the author of a block + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn set_author(origin: OriginFor, author: T::AuthorId) -> DispatchResult { + + ensure_none(origin)?; + + ensure!(>::get().is_none(), Error::::AuthorAlreadySet); + debug!(target: "author-inherent", "Author was not already set"); + + let slot = T::SlotBeacon::slot(); + debug!(target: "author-inherent", "Slot is {:?}", slot); + + let account = T::AccountLookup::lookup_account(&author).ok_or( + Error::::NoAccountId + )?; + + ensure!(T::CanAuthor::can_author(&account, &slot), Error::::CannotBeAuthor); + + // Update storage + Author::::put(&account); + + // Add a consensus digest so the client-side worker can verify the block is signed by the right person. + frame_system::Pallet::::deposit_log(DigestItem::::Consensus( + NIMBUS_ENGINE_ID, + author.encode(), + )); + + // Notify any other pallets that are listening (eg rewards) about the author + T::EventHandler::note_author(account); + + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { + // Return Ok(Some(_)) unconditionally because this inherent is required in every block + // If it is not found, throw an AuthorInherentRequired error. + Ok(Some(InherentError::Other( + sp_runtime::RuntimeString::Borrowed("AuthorInherentRequired"), + ))) + } + + fn create_inherent(data: &InherentData) -> Option { + let author_raw = data + .get_data::(&INHERENT_IDENTIFIER); + + debug!("In create_inherent (runtime side). data is"); + debug!("{:?}", author_raw); + + let author = author_raw + .expect("Gets and decodes authorship inherent data")?; + + Some(Call::set_author(author)) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set_author(_)) + } + } + + impl FindAuthor for Pallet { + fn find_author<'a, I>(_digests: I) -> Option + where + I: 'a + IntoIterator, + { + // We don't use the digests at all. + // This will only return the correct author _after_ the authorship inherent is processed. + >::get() + } + } + + /// To learn whether a given AuthorId can author, you call the author-inherent directly. + /// It will do the mapping lookup. + impl CanAuthor for Pallet { + fn can_author(author: &T::AuthorId, slot: &u32) -> bool { + let account = match T::AccountLookup::lookup_account(&author) { + Some(account) => account, + // Authors whose account lookups fail will not be eligible + None => { + return false; + }, + }; + + T::CanAuthor::can_author(&account, slot) + } + } +} + +#[derive(Encode)] +#[cfg_attr(feature = "std", derive(Debug, Decode))] +pub enum InherentError { + Other(RuntimeString), +} + +impl IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + match *self { + InherentError::Other(_) => true, + } + } +} + +impl InherentError { + /// Try to create an instance ouf of the given identifier and data. + #[cfg(feature = "std")] + pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { + if id == &INHERENT_IDENTIFIER { + ::decode(&mut &data[..]).ok() + } else { + None + } + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use crate as author_inherent; + + use frame_support::{ + assert_noop, assert_ok, parameter_types, + traits::{OnFinalize, OnInitialize}, + }; + use sp_core::H256; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + }; + use nimbus_primitives::NimbusId; + use sp_core::Public; + const TEST_AUTHOR_ID: [u8; 32] = [0u8; 32]; + const BOGUS_AUTHOR_ID: [u8; 32] = [1u8; 32]; + + pub fn new_test_ext() -> TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t) + } + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + // Configure a mock runtime to test the pallet. + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + AuthorInherent: author_inherent::{Pallet, Call, Storage, Inherent}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + } + impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + } + impl Config for Test { + type AuthorId = NimbusId; + type EventHandler = (); + type CanAuthor = (); + type AccountLookup = DummyAccountLookup; + type SlotBeacon = (); + } + + pub struct DummyAccountLookup; + impl AccountLookup for DummyAccountLookup { + fn lookup_account(author: &NimbusId) -> Option { + if author.as_slice() == &TEST_AUTHOR_ID { + Some(0) + } else { + None + } + } + } + + pub fn roll_to(n: u64) { + while System::block_number() < n { + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + AuthorInherent::on_initialize(System::block_number()); + } + } + + #[test] + fn set_author_works() { + new_test_ext().execute_with(|| { + assert_ok!(AuthorInherent::set_author(Origin::none(), NimbusId::from_slice(&TEST_AUTHOR_ID))); + roll_to(1); + assert_ok!(AuthorInherent::set_author(Origin::none(), NimbusId::from_slice(&TEST_AUTHOR_ID))); + roll_to(2); + }); + } + + #[test] + fn must_be_inherent() { + new_test_ext().execute_with(|| { + assert_noop!( + AuthorInherent::set_author(Origin::signed(1), NimbusId::from_slice(&TEST_AUTHOR_ID)), + sp_runtime::DispatchError::BadOrigin + ); + }); + } + + #[test] + fn double_author_fails() { + new_test_ext().execute_with(|| { + assert_ok!(AuthorInherent::set_author(Origin::none(), NimbusId::from_slice(&TEST_AUTHOR_ID))); + assert_noop!( + AuthorInherent::set_author(Origin::none(), NimbusId::from_slice(&TEST_AUTHOR_ID)), + Error::::AuthorAlreadySet + ); + }); + } + + #[test] + fn fails_when_account_lookup_fails() { + new_test_ext().execute_with(|| { + assert_noop!( + AuthorInherent::set_author(Origin::none(), NimbusId::from_slice(&BOGUS_AUTHOR_ID)), + Error::::NoAccountId + ); + }); + } +} diff --git a/pallets/author-slot-filter/Cargo.toml b/pallets/author-slot-filter/Cargo.toml new file mode 100644 index 00000000000..ea6de6a1e67 --- /dev/null +++ b/pallets/author-slot-filter/Cargo.toml @@ -0,0 +1,34 @@ +[package] +authors = ["PureStake"] +edition = "2018" +name = "pallet-author-slot-filter" +version = "0.1.0" + +[dependencies] +log = { version = "0.4", default-features = false } +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.101", optional = true, features = ["derive"] } +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-author-inherent = { path = "../author-inherent", default-features = false } +cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false} +nimbus-primitives = { path = "../../primitives/nimbus", default-features = false } + +[features] +default = ["std"] +std = [ + "log/std", + "serde", + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "pallet-author-inherent/std", + "sp-core/std", + "sp-runtime/std", + "cumulus-pallet-parachain-system/std", + "sp-std/std", + "nimbus-primitives/std", +] diff --git a/pallets/author-slot-filter/src/lib.rs b/pallets/author-slot-filter/src/lib.rs new file mode 100644 index 00000000000..f2aabdabbe9 --- /dev/null +++ b/pallets/author-slot-filter/src/lib.rs @@ -0,0 +1,168 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Small pallet responsible determining which accounts are eligible to author at the current +//! slot. +//! +//! Using a randomness beacon supplied by the `Randomness` trait, this pallet takes the set of +//! currently active accounts from an upstream source, and filters them down to a pseudorandom subset. +//! The current technique gives no preference to any particular author. In the future, we could +//! disfavor authors who are authoring a disproportionate amount of the time in an attempt to +//! "even the playing field". + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet; + +pub use pallet::*; + +#[pallet] +pub mod pallet { + + use log::debug; + use frame_support::pallet_prelude::*; + use frame_support::traits::Randomness; + use sp_std::vec::Vec; + use frame_system::pallet_prelude::*; + use sp_core::H256; + use sp_runtime::Percent; + use nimbus_primitives::CanAuthor; + + /// The Author Filter pallet + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// Configuration trait of this pallet. + #[pallet::config] + pub trait Config: frame_system::Config + cumulus_pallet_parachain_system::Config { + /// The overarching event type + type Event: From + IsType<::Event>; + /// Deterministic on-chain pseudo-randomness used to do the filtering + type RandomnessSource: Randomness; + //TODO introduce a new trait for exhaustive sets and use it here. + // Oh actually, we can use the same trait. First we call the inner one + // to determine whether this particular author is eligible there. then we + // use the author as part of the subject when querying eligibility. I like this better. + /// A source for the complete set of potential authors. + /// The starting point of the filtering. + type PotentialAuthors: Get>; + } + + // This code will be called by the author-inherent pallet to check whether the reported author + // of this block is eligible in this slot. We calculate that result on demand and do not + // record it in storage (although we do emit a debugging event for now). + impl CanAuthor for Pallet { + fn can_author(author: &T::AccountId, slot: &u32) -> bool { + let mut active: Vec = T::PotentialAuthors::get(); + + let num_eligible = EligibleRatio::::get().mul_ceil(active.len()); + let mut eligible = Vec::with_capacity(num_eligible); + + for i in 0..num_eligible { + // A context identifier for grabbing the randomness. Consists of three parts + // - The constant string *b"filter" - to identify this pallet + // - The index `i` when we're selecting the ith eligible author + // - The relay parent block number so that the eligible authors at the next height + // change. Avoids liveness attacks from colluding minorities of active authors. + // Third one may not be necessary once we leverage the relay chain's randomness. + let subject: [u8; 8] = [ + b'f', + b'i', + b'l', + b't', + b'e', + b'r', + i as u8, + *slot as u8, + ]; + let (randomness, _) = T::RandomnessSource::random(&subject); + debug!(target: "author-filter", "๐ŸŽฒRandomness sample {}: {:?}", i, &randomness); + + // Cast to u32 first so we get consistent results on 32- and 64-bit platforms. + let index = (randomness.to_fixed_bytes()[0] as u32) as usize; + + // Move the selected author from the original vector into the eligible vector + // TODO we could short-circuit this check by returning early when the claimed + // author is selected. For now I'll leave it like this because: + // 1. it is easier to understand what our core filtering logic is + // 2. we currently show the entire filtered set in the debug event + eligible.push(active.remove(index % active.len())); + } + + // Print some logs for debugging purposes. + debug!(target: "author-filter", "Eligible Authors: {:?}", eligible); + debug!(target: "author-filter", "Ineligible Authors: {:?}", &active); + debug!(target: "author-filter", + "Current author, {:?}, is eligible: {}", + author, + eligible.contains(author) + ); + + eligible.contains(author) + } + } + + #[pallet::call] + impl Pallet { + /// Update the eligible ratio. Intended to be called by governance. + #[pallet::weight(0)] + pub fn set_eligible(origin: OriginFor, new: Percent) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + EligibleRatio::::put(&new); + >::deposit_event(Event::EligibleUpdated(new)); + + Ok(Default::default()) + } + } + + /// The percentage of active authors that will be eligible at each height. + #[pallet::storage] + pub type EligibleRatio = StorageValue<_, Percent, ValueQuery, Half>; + + // Default value for the `EligibleRatio` is one half. + #[pallet::type_value] + pub fn Half() -> Percent { + Percent::from_percent(50) + } + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub eligible_ratio: Percent, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + eligible_ratio: Percent::from_percent(50), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + EligibleRatio::::put(self.eligible_ratio); + } + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event { + /// The amount of eligible authors for the filter to select has been changed. + EligibleUpdated(Percent), + } +} diff --git a/pallets/parachain-system/src/validate_block/implementation.rs b/pallets/parachain-system/src/validate_block/implementation.rs index 439e1450db2..41e8b319393 100644 --- a/pallets/parachain-system/src/validate_block/implementation.rs +++ b/pallets/parachain-system/src/validate_block/implementation.rs @@ -38,6 +38,8 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } +type ParachainSystem = crate::Module; + /// Validate a given parachain block on a validator. #[doc(hidden)] pub fn validate_block< diff --git a/polkadot-parachains/Cargo.toml b/polkadot-parachains/Cargo.toml index fc142b46a89..53a315625a6 100644 --- a/polkadot-parachains/Cargo.toml +++ b/polkadot-parachains/Cargo.toml @@ -29,6 +29,7 @@ statemint-runtime = { path = "statemint" } statemine-runtime = { path = "statemine" } westmint-runtime = { path = "westmint" } statemint-common = { path = "statemint-common" } +nimbus-runtime = { path = "nimbus-runtime" } # Substrate dependencies frame-benchmarking = { git = 'https://github.com/paritytech/substrate', branch = "master" } @@ -75,6 +76,9 @@ cumulus-client-service = { path = "../client/service" } cumulus-client-network = { path = "../client/network" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" } +nimbus-primitives = { path = "../primitives/nimbus"} +nimbus-consensus = { path = "../client/consensus/nimbus"} +pallet-author-inherent = { path = "../pallets/author-inherent"} # Polkadot dependencies polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } @@ -99,4 +103,5 @@ runtime-benchmarks = [ 'statemint-runtime/runtime-benchmarks', 'statemine-runtime/runtime-benchmarks', 'westmint-runtime/runtime-benchmarks', + #TODO Benchmark nimbus runtime ] diff --git a/polkadot-parachains/nimbus-runtime/Cargo.toml b/polkadot-parachains/nimbus-runtime/Cargo.toml new file mode 100644 index 00000000000..0877c540223 --- /dev/null +++ b/polkadot-parachains/nimbus-runtime/Cargo.toml @@ -0,0 +1,106 @@ +[package] +name = 'nimbus-runtime' +version = '0.1.0' +authors = ["Parity Technologies "] +edition = '2018' +description = "Simple runtime to demonstrate nimbus consensus" + +[dependencies] +serde = { version = "1.0.101", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4.14", default-features = false } +parachain-info = { path = "../pallets/parachain-info", default-features = false } + +# Substrate dependencies +sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-version = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-session = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-offchain = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } + +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-executive = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-assets = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-sudo = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } + +# Cumulus dependencies +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } +cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../primitives/timestamp", default-features = false } +pallet-author-inherent = { path = "../../pallets/author-inherent", default-features = false } +nimbus-primitives = { path = "../../primitives/nimbus", default-features = false } +pallet-author-filter = {package = "pallet-author-slot-filter", path = "../../pallets/author-slot-filter", default-features = false } +cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../pallets/dmp-queue", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } +cumulus-ping = { path = "../pallets/ping", default-features = false } + +# Polkadot dependencies +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +xcm-builder = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +xcm-executor = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } + +[dev-dependencies] +hex-literal = "0.3.1" +hex = "0.4.3" + +[build-dependencies] +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "serde", + "log/std", + "sp-api/std", + "sp-std/std", + "sp-io/std", + "sp-core/std", + "sp-runtime/std", + "sp-version/std", + "sp-offchain/std", + "sp-session/std", + "sp-block-builder/std", + "sp-transaction-pool/std", + "sp-inherents/std", + "frame-support/std", + "frame-executive/std", + "frame-system/std", + "pallet-assets/std", + "pallet-balances/std", + "pallet-randomness-collective-flip/std", + "pallet-timestamp/std", + "pallet-sudo/std", + "pallet-transaction-payment/std", + "parachain-info/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", + "cumulus-primitives-utility/std", + "cumulus-ping/std", + "xcm/std", + "xcm-builder/std", + "xcm-executor/std", + "pallet-author-inherent/std", + "pallet-author-filter/std", + "nimbus-primitives/std", + "log/std", +] diff --git a/polkadot-parachains/nimbus-runtime/build.rs b/polkadot-parachains/nimbus-runtime/build.rs new file mode 100644 index 00000000000..fe1a2ea911d --- /dev/null +++ b/polkadot-parachains/nimbus-runtime/build.rs @@ -0,0 +1,25 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/polkadot-parachains/nimbus-runtime/src/lib.rs b/polkadot-parachains/nimbus-runtime/src/lib.rs new file mode 100644 index 00000000000..91923e37948 --- /dev/null +++ b/polkadot-parachains/nimbus-runtime/src/lib.rs @@ -0,0 +1,654 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_api::impl_runtime_apis; +use sp_core::OpaqueMetadata; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{BlakeTwo256, Block as BlockT, AccountIdLookup}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; +use cumulus_pallet_parachain_system::RelaychainBlockNumberProvider; + +// A few exports that help ease life for downstream crates. +pub use frame_support::{ + construct_runtime, parameter_types, match_type, + traits::{Randomness, IsInVec, All, OnInitialize}, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + DispatchClass, IdentityFee, Weight, + }, + StorageValue, +}; +use frame_system::limits::{BlockLength, BlockWeights}; +use frame_system::InitKind; +// pub use pallet_balances::Call as BalancesCall; +// pub use pallet_timestamp::Call as TimestampCall; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +use nimbus_primitives::{CanAuthor, NimbusId}; + +// XCM imports +use polkadot_parachain::primitives::Sibling; +use xcm::v0::{MultiAsset, MultiLocation, MultiLocation::*, Junction::*, BodyId, NetworkId}; +use xcm_builder::{ + AccountId32Aliases, CurrencyAdapter, LocationInverter, ParentIsDefault, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SovereignSignedViaLocation, EnsureXcmOrigin, AllowUnpaidExecutionFrom, ParentAsSuperuser, + AllowTopLevelPaidExecutionFrom, TakeWeightCredit, FixedWeightBounds, IsConcrete, NativeAsset, + UsingComponents, SignedToAccountId32, +}; +use xcm_executor::{Config, XcmExecutor}; +use pallet_xcm::{XcmPassthrough, EnsureXcm, IsMajorityOfBody}; +use xcm::v0::Xcm; + +mod pallet_account_set; + +pub type SessionHandlers = (); + +impl_opaque_keys! { + pub struct SessionKeys { + pub author_inherent: AuthorInherent, + } +} + +/// This runtime version. +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("test-parachain"), + impl_name: create_runtime_str!("test-parachain"), + authoring_version: 1, + spec_version: 14, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, +}; + +pub const MILLISECS_PER_BLOCK: u64 = 12000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; + +// These time units are defined in number of blocks. +pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); +pub const HOURS: BlockNumber = MINUTES * 60; +pub const DAYS: BlockNumber = HOURS * 24; + +pub const ROC: Balance = 1_000_000_000_000; +pub const MILLIROC: Balance = 1_000_000_000; +pub const MICROROC: Balance = 1_000_000; + +// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. +pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } +} + +/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for .5 seconds of compute with a 12 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND / 2; + +parameter_types! { + pub const BlockHashCount: BlockNumber = 250; + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +pub struct BaseFilter; +impl frame_support::traits::Filter for BaseFilter { + fn filter(_c: &Call) -> bool { true } +} + +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = AccountIdLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// Converts a module to an index of this module in the runtime. + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = BaseFilter; + type SystemWeightInfo = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; +} + +parameter_types! { + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 1 * MILLIROC; + pub const TransferFee: u128 = 1 * MILLIROC; + pub const CreationFee: u128 = 1 * MILLIROC; + pub const TransactionByteFee: u128 = 1 * MICROROC; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +impl pallet_randomness_collective_flip::Config for Runtime {} + +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type Call = Call; + type Event = Event; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type Event = Event; + type OnValidationData = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpMessageHandler = DmpQueue; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub const RocLocation: MultiLocation = X1(Parent); + pub const RococoNetwork: NetworkId = NetworkId::Polkadot; + pub RelayChainOrigin: Origin = cumulus_pallet_xcm::Origin::Relay.into(); + pub Ancestry: MultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the default `AccountId`. + ParentIsDefault, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting assets on this chain. +pub type LocalAssetTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `Origin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + // One XCM operation is 1_000_000 weight - almost certainly a conservative estimate. + pub UnitWeightCost: Weight = 1_000_000; + // One ROC buys 1 second of weight. + pub const WeightPrice: (MultiLocation, u128) = (X1(Parent), ROC); +} + +match_type! { + pub type ParentOrParentsUnitPlurality: impl Contains = { + X1(Parent) | X2(Parent, Plurality { id: BodyId::Unit, .. }) + }; +} + +pub type Barrier = ( + TakeWeightCredit, + AllowTopLevelPaidExecutionFrom>, + AllowUnpaidExecutionFrom, + // ^^^ Parent & its unit plurality gets free execution +); + +pub struct XcmConfig; +impl Config for XcmConfig { + type Call = Call; + type XcmSender = XcmRouter; + // How to withdraw and deposit an asset. + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + type IsReserve = NativeAsset; + type IsTeleporter = NativeAsset; // <- should be enough to allow teleportation of ROC + type LocationInverter = LocationInverter; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = UsingComponents, RocLocation, AccountId, Balances, ()>; + type ResponseHandler = (); // Don't handle responses for now. +} + +/// No local origins on this chain are allowed to dispatch XCM sends/executions. +pub type LocalOriginToLocation = ( + SignedToAccountId32, +); + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = ( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +); + +impl pallet_xcm::Config for Runtime { + type Event = Event; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = All<(MultiLocation, Xcm)>; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = All<(MultiLocation, Vec)>; + type XcmReserveTransferFilter = (); + type Weigher = FixedWeightBounds; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; +} + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; + type ChannelInfo = ParachainSystem; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; + type ExecuteOverweightOrigin = frame_system::EnsureRoot; +} + +impl cumulus_ping::Config for Runtime { + type Event = Event; + type Origin = Origin; + type Call = Call; + type XcmSender = XcmRouter; +} + +impl pallet_author_inherent::Config for Runtime { + type AuthorId = NimbusId; + // We start a new slot each time we see a new relay block. + type SlotBeacon = RelaychainBlockNumberProvider; + type AccountLookup = PotentialAuthorSet; + type EventHandler = (); + type CanAuthor = AuthorFilter; +} + +impl pallet_author_filter::Config for Runtime { + type Event = Event; + type RandomnessSource = RandomnessCollectiveFlip; + type PotentialAuthors = PotentialAuthorSet; +} + +impl pallet_account_set::Config for Runtime { + type AuthorId = NimbusId; +} + +parameter_types! { + pub const AssetDeposit: Balance = 1 * ROC; + pub const ApprovalDeposit: Balance = 100 * MILLIROC; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 1 * ROC; + pub const MetadataDepositPerByte: Balance = 10 * MILLIROC; + pub const UnitBody: BodyId = BodyId::Unit; +} + +/// A majority of the Unit body from Rococo over XCM is our required administration origin. +pub type AdminOrigin = EnsureXcm>; + +impl pallet_assets::Config for Runtime { + type Event = Event; + type Balance = u64; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = AdminOrigin; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = pallet_assets::weights::SubstrateWeight; +} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = generic::Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + + ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Config, Storage, Inherent, Event} = 20, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 21, + + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 30, + Assets: pallet_assets::{Pallet, Call, Storage, Event} = 31, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 50, + PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin} = 51, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event, Origin} = 52, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 53, + + // Nimbus stuff + AuthorInherent: pallet_author_inherent::{Pallet, Call, Storage, Inherent}, + AuthorFilter: pallet_author_filter::{Pallet, Storage, Event, Config}, + PotentialAuthorSet: pallet_account_set::{Pallet, Storage, Config}, + + Spambot: cumulus_ping::{Pallet, Call, Storage, Event} = 99, + } +} + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = sp_runtime::MultiSignature; +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as sp_runtime::traits::IdentifyAccount>::AccountId; +/// Balance of an account. +pub type Balance = u128; +/// Index of a transaction in the chain. +pub type Index = u32; +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; +/// An index to a block. +pub type BlockNumber = u32; +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPallets, +>; + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic( + extrinsic: ::Extrinsic, + ) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + } + + + impl nimbus_primitives::AuthorFilterAPI for Runtime { + fn can_author(author: nimbus_primitives::NimbusId, slot: u32, parent_header: &::Header) -> bool { + // This runtime uses an entropy source that is updated during block initialization + // Therefore we need to initialize it to match the state it will be in when the + // next block is being executed. + System::initialize(&(parent_header.number + 1), &parent_header.hash(), &parent_header.digest, InitKind::Inspection); + ::RandomnessSource::on_initialize(System::block_number()); + + // And now the actual prediction call + AuthorInherent::can_author(&author, &slot) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info() -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info() + } + } +} + +// Check the timestamp and parachain inherents +struct CheckInherents; + +impl cumulus_pallet_parachain_system::CheckInherents for CheckInherents { + fn check_inherents( + block: &Block, + relay_state_proof: &cumulus_pallet_parachain_system::RelayChainStateProof, + ) -> sp_inherents::CheckInherentsResult { + let relay_chain_slot = relay_state_proof + .read_slot() + .expect("Could not read the relay chain slot from the proof"); + + let inherent_data = + cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration( + relay_chain_slot, + sp_std::time::Duration::from_secs(6), + ) + .create_inherent_data() + .expect("Could not create the timestamp inherent data"); + + inherent_data.check_extrinsics(&block) + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = pallet_author_inherent::BlockExecutor::, + CheckInherents = CheckInherents, +} diff --git a/polkadot-parachains/nimbus-runtime/src/pallet_account_set.rs b/polkadot-parachains/nimbus-runtime/src/pallet_account_set.rs new file mode 100644 index 00000000000..8ead6685ee1 --- /dev/null +++ b/polkadot-parachains/nimbus-runtime/src/pallet_account_set.rs @@ -0,0 +1,111 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Small pallet responsible for storing a set of accounts, and their associated session keys. +//! This is a minimal solution where staking would be used in practice. +//! The accounts are set and genesis and never change. +//! +//! The Substrate ecosystem has a wide variety of real-world solutions and examples of what this +//! pallet could be replaced with. +//! Gautam's validator set pallet - https://github.com/paritytech/substrate/tree/master/frame/staking/ +//! Parity's pallet staking - https://github.com/paritytech/substrate/tree/master/frame/staking/ +//! Moonbeam's Parachain Staking - https://github.com/PureStake/moonbeam/tree/master/pallets/parachain-staking +//! Recipe for AccountSet, VecSet, and MapSet + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::pallet; + +pub use pallet::*; + +#[pallet] +pub mod pallet { + + #[cfg(feature = "std")] + use log::warn; + use frame_support::pallet_prelude::*; + use sp_std::vec::Vec; + use nimbus_primitives::{AccountLookup, CanAuthor}; + + /// The Account Set pallet + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// Configuration trait of this pallet. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The identifier type for an author. + type AuthorId: Member + Parameter + MaybeSerializeDeserialize; + } + + /// The set of accounts that is stored in this pallet. + #[pallet::storage] + pub type StoredAccounts = StorageValue<_, Vec, ValueQuery>; + + impl Get> for Pallet { + fn get() -> Vec { + StoredAccounts::::get() + } + } + + #[pallet::storage] + #[pallet::getter(fn account_id_of)] + /// A mapping from the AuthorIds used in the consensus layer + /// to the AccountIds runtime. + type Mapping = StorageMap<_, Twox64Concat, T::AuthorId, T::AccountId, OptionQuery>; + + #[pallet::genesis_config] + /// Genesis config for author mapping pallet + pub struct GenesisConfig { + /// The associations that should exist at chain genesis + pub mapping: Vec<(T::AuthorId, T::AccountId)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { mapping: vec![] } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + if self.mapping.is_empty() { + warn!(target: "account-set", "No mappings at genesis. Your chain will have no valid authors."); + } + for (author_id, account_id) in &self.mapping { + Mapping::::insert(author_id, account_id); + StoredAccounts::::append(account_id); + } + } + } + + /// This pallet is compatible with nimbus's author filtering system. Any account stored in this pallet + /// is a valid author. Notice that this implementation does not have an inner filter, so it + /// can only be the beginning of the nimbus filter pipeline. + impl CanAuthor for Pallet { + fn can_author(author: &T::AccountId, _slot: &u32) -> bool { + StoredAccounts::::get().contains(author) + } + } + + impl AccountLookup for Pallet { + fn lookup_account(author: &T::AuthorId) -> Option { + Mapping::::get(&author) + } + } +} diff --git a/polkadot-parachains/src/chain_spec.rs b/polkadot-parachains/src/chain_spec.rs index 780d56a6426..76b36e35745 100644 --- a/polkadot-parachains/src/chain_spec.rs +++ b/polkadot-parachains/src/chain_spec.rs @@ -16,12 +16,15 @@ use cumulus_primitives_core::ParaId; use hex_literal::hex; +// Notice we are bringing in these types from the rococo_parachain_runtime. They are the same types for the nimbus runtime, so they still work. use rococo_parachain_runtime::{AccountId, AuraId, Signature}; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; +use sp_runtime::Percent; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; +use nimbus_primitives::NimbusId; /// Specialized `ChainSpec` for the normal parachain runtime. pub type ChainSpec = sc_service::GenericChainSpec; @@ -29,6 +32,9 @@ pub type ChainSpec = sc_service::GenericChainSpec; +/// Specialized ChainSpec` for the nimbus parachain runtime. +pub type NimbusChainSpec = sc_service::GenericChainSpec; + /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) @@ -102,6 +108,47 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec { ) } +pub fn get_nimbus_chain_spec(id: ParaId) -> NimbusChainSpec { + NimbusChainSpec::from_genesis( + "Nimbus Local Testnet", + "nimbus_local_testnet", + ChainType::Local, + move || { + nimbus_testnet_genesis( + get_account_id_from_seed::("Alice"), + //These are hardcoded in the nimbus_testnet_genesis function below. They could be moved here too. + // vec![ + // get_from_seed::("Alice"), + // get_from_seed::("Bob"), + // ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + id, + ) + }, + vec![], + None, + None, + None, + Extensions { + relay_chain: "westend-dev".into(), + para_id: id.into(), + }, + ) +} + pub fn get_shell_chain_spec(id: ParaId) -> ShellChainSpec { ShellChainSpec::from_genesis( "Shell Local Testnet", @@ -153,9 +200,9 @@ pub fn staging_test_net(id: ParaId) -> ChainSpec { } fn testnet_genesis( - root_key: AccountId, + root_key: rococo_parachain_runtime::AccountId, initial_authorities: Vec, - endowed_accounts: Vec, + endowed_accounts: Vec, id: ParaId, ) -> rococo_parachain_runtime::GenesisConfig { rococo_parachain_runtime::GenesisConfig { @@ -182,6 +229,41 @@ fn testnet_genesis( } } +fn nimbus_testnet_genesis( + root_key: AccountId, + // initial_authorities: Vec, + endowed_accounts: Vec, + parachain_id: ParaId, +) -> nimbus_runtime::GenesisConfig { + nimbus_runtime::GenesisConfig { + system: rococo_parachain_runtime::SystemConfig { + code: nimbus_runtime::WASM_BINARY + .expect("WASM binary was not build, please build it!") + .to_vec(), + changes_trie_config: Default::default(), + }, + balances: nimbus_runtime::BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1 << 60)) + .collect(), + }, + sudo: nimbus_runtime::SudoConfig { key: root_key }, + parachain_info: nimbus_runtime::ParachainInfoConfig { parachain_id }, + author_filter: nimbus_runtime::AuthorFilterConfig { + eligible_ratio: Percent::from_percent(50), + }, + potential_author_set: nimbus_runtime::PotentialAuthorSetConfig { + mapping: vec![ + (get_from_seed::("Alice"), get_account_id_from_seed::("Alice")), + (get_from_seed::("Bob"), get_account_id_from_seed::("Bob")), + ] + }, + parachain_system: Default::default(), + } +} + fn shell_testnet_genesis(parachain_id: ParaId) -> shell_runtime::GenesisConfig { shell_runtime::GenesisConfig { system: shell_runtime::SystemConfig { diff --git a/polkadot-parachains/src/command.rs b/polkadot-parachains/src/command.rs index 5432a4c33c6..d7d651ace77 100644 --- a/polkadot-parachains/src/command.rs +++ b/polkadot-parachains/src/command.rs @@ -19,7 +19,7 @@ use crate::{ cli::{Cli, RelayChainCli, Subcommand}, service::{ StatemineRuntimeExecutor, StatemintRuntimeExecutor, WestmintRuntimeExecutor, new_partial, - RococoParachainRuntimeExecutor, ShellRuntimeExecutor, Block, + RococoParachainRuntimeExecutor, ShellRuntimeExecutor, NimbusRuntimeExecutor, Block, }, }; use codec::Encode; @@ -41,6 +41,7 @@ const DEFAULT_PARA_ID: u32 = 1000; trait IdentifyChain { fn is_shell(&self) -> bool; + fn is_nimbus(&self) -> bool; fn is_statemint(&self) -> bool; fn is_statemine(&self) -> bool; fn is_westmint(&self) -> bool; @@ -50,6 +51,9 @@ impl IdentifyChain for dyn sc_service::ChainSpec { fn is_shell(&self) -> bool { self.id().starts_with("shell") } + fn is_nimbus(&self) -> bool { + self.id().starts_with("nimbus") + } fn is_statemint(&self) -> bool { self.id().starts_with("statemint") } @@ -65,6 +69,9 @@ impl IdentifyChain for T { fn is_shell(&self) -> bool { ::is_shell(self) } + fn is_nimbus(&self) -> bool { + ::is_nimbus(self) + } fn is_statemint(&self) -> bool { ::is_statemint(self) } @@ -92,6 +99,7 @@ fn load_spec( &include_bytes!("../res/track.json")[..], )?), "shell" => Box::new(chain_spec::get_shell_chain_spec(para_id)), + "nimbus" => Box::new(chain_spec::get_nimbus_chain_spec(para_id)), "statemint-dev" => Box::new(chain_spec::statemint_development_config(para_id)), "statemint-local" => Box::new(chain_spec::statemint_local_config(para_id)), "statemine-dev" => Box::new(chain_spec::statemine_development_config(para_id)), @@ -121,6 +129,8 @@ fn load_spec( Box::new(chain_spec::WestmintChainSpec::from_json_file(path.into())?) } else if chain_spec.is_shell() { Box::new(chain_spec::ShellChainSpec::from_json_file(path.into())?) + } else if chain_spec.is_nimbus() { + Box::new(chain_spec::NimbusChainSpec::from_json_file(path.into())?) } else { Box::new(chain_spec) } @@ -172,6 +182,8 @@ impl SubstrateCli for Cli { &westmint_runtime::VERSION } else if chain_spec.is_shell() { &shell_runtime::VERSION + } else if chain_spec.is_nimbus(){ + &nimbus_runtime::VERSION } else { &rococo_parachain_runtime::VERSION } @@ -267,7 +279,22 @@ macro_rules! construct_async_run { let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) }) - } else { + } + else if runner.config().chain_spec.is_nimbus() { + runner.async_run(|$config| { + let $components = new_partial::< + nimbus_runtime::RuntimeApi, + NimbusRuntimeExecutor, + _ + >( + &$config, + crate::service::nimbus_build_import_queue, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + } + else { runner.async_run(|$config| { let $components = new_partial::< rococo_parachain_runtime::RuntimeApi, @@ -472,7 +499,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into) - } else { + } + else if config.chain_spec.is_nimbus() { + crate::service::start_nimbus_node(config, polkadot_config, id) + .await + .map(|r| r.0) + .map_err(Into::into) + } + else { crate::service::start_rococo_parachain_node(config, polkadot_config, id) .await .map(|r| r.0) diff --git a/polkadot-parachains/src/service.rs b/polkadot-parachains/src/service.rs index 9268b64a7fb..4d1050ca9e3 100644 --- a/polkadot-parachains/src/service.rs +++ b/polkadot-parachains/src/service.rs @@ -17,6 +17,10 @@ use cumulus_client_consensus_aura::{ build_aura_consensus, BuildAuraConsensusParams, SlotProportion, }; +use nimbus_consensus::{ + build_nimbus_consensus, BuildNimbusConsensusParams, +}; +use nimbus_primitives::NimbusId; use cumulus_client_consensus_common::{ ParachainBlockImport, ParachainCandidate, ParachainConsensus, }; @@ -72,6 +76,13 @@ native_executor_instance!( shell_runtime::native_version, ); +native_executor_instance!( + pub NimbusRuntimeExecutor, + nimbus_runtime::api::dispatch, + nimbus_runtime::native_version, + //TODO benchmark nimbus +); + // Native Statemint executor instance. native_executor_instance!( pub StatemintRuntimeExecutor, @@ -594,6 +605,111 @@ pub async fn start_shell_node( .await } +/// Build the import queue for the nimbus runtime. +pub fn nimbus_build_import_queue( + client: Arc>, + config: &Configuration, + _: Option, + task_manager: &TaskManager, +) -> Result< + sc_consensus::DefaultImportQueue< + Block, + TFullClient, + >, + sc_service::Error, +> { + nimbus_consensus::import_queue( + client.clone(), + client, + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry().clone(), + ) + .map_err(Into::into) +} + +/// Start a nimbus node. +pub async fn start_nimbus_node( + parachain_config: Configuration, + polkadot_config: Configuration, + id: ParaId, +) -> sc_service::error::Result< + (TaskManager, Arc>) +> { + let skip_prediction = parachain_config.force_authoring; + start_node_impl::( + parachain_config, + polkadot_config, + id, + |_| Ok(Default::default()), + nimbus_build_import_queue, + |client, + prometheus_registry, + telemetry, + task_manager, + relay_chain_node, + transaction_pool, + _, + keystore, + _| { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry.clone(), + telemetry.clone(), + ); + + let relay_chain_backend = relay_chain_node.backend.clone(); + let relay_chain_client = relay_chain_node.client.clone(); + + Ok( + build_nimbus_consensus( + BuildNimbusConsensusParams { + para_id: id, + proposer_factory, + block_import: client.clone(), + relay_chain_client: relay_chain_node.client.clone(), + relay_chain_backend: relay_chain_node.backend.clone(), + parachain_client: client.clone(), + keystore, + skip_prediction, + create_inherent_data_providers: + move |_, (relay_parent, validation_data, author_id)| { + let parachain_inherent = + cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client( + relay_parent, + &relay_chain_client, + &*relay_chain_backend, + &validation_data, + id, + ); + async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + + let author = nimbus_primitives::InherentDataProvider::(author_id); + + Ok((time, parachain_inherent, author)) + } + }, + }, + ), + ) + }, + ) + .await +} + enum BuildOnAccess { Uninitialized(Option R + Send + Sync>>), Initialized(R), diff --git a/primitives/nimbus/Cargo.toml b/primitives/nimbus/Cargo.toml new file mode 100644 index 00000000000..65abded165e --- /dev/null +++ b/primitives/nimbus/Cargo.toml @@ -0,0 +1,32 @@ +[package] +authors = ["PureStake"] +edition = "2018" +name = "nimbus-primitives" +version = "0.1.0" +description = "Primitive types and traites used in the nimbus consensus framework" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +async-trait = { version = "0.1", optional = true } + +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "frame-support/std", + "frame-system/std", + "sp-api/std", + "sp-std/std", + "sp-application-crypto/std", + "sp-runtime/std", + "sp-inherents/std", + "async-trait", +] diff --git a/primitives/nimbus/src/digests.rs b/primitives/nimbus/src/digests.rs new file mode 100644 index 00000000000..272e6b29977 --- /dev/null +++ b/primitives/nimbus/src/digests.rs @@ -0,0 +1,48 @@ +//! A convenient interface over the digests used in nimbus. +//! +//! Currently Nimbus has two digests; +//! 1. A consensus digest that contains the block author identity +//! This information is copied from the author inehrent. +//! This may be replaced with a pre-runtime digest in the future. +//! 2. A seal digest that contains a signature over the rest of the +//! block including the first digest. + +use crate::{NIMBUS_ENGINE_ID, NimbusSignature, NimbusId}; +use sp_runtime::generic::DigestItem; +use parity_scale_codec::{Encode, Codec}; +use sp_std::fmt::Debug; + +/// A digest item which is usable with aura consensus. +pub trait CompatibleDigestItem: Sized { + /// Construct a seal digest item from the given signature + fn nimbus_seal(signature: NimbusSignature) -> Self; + + /// If this item is a nimbus seal, return the signature. + fn as_nimbus_seal(&self) -> Option; + + /// Construct a consensus digest from the given AuthorId + fn nimbus_consensus_digest(author: NimbusId) -> Self; + + /// If this item is a nimbus consensus digest, return the author + fn as_nimbus_consensus_digest(&self) -> Option; +} + +impl CompatibleDigestItem for DigestItem where + Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static +{ + fn nimbus_seal(signature: NimbusSignature) -> Self { + DigestItem::Seal(NIMBUS_ENGINE_ID, signature.encode()) + } + + fn as_nimbus_seal(&self) -> Option { + self.seal_try_to(&NIMBUS_ENGINE_ID) + } + + fn nimbus_consensus_digest(author: NimbusId) -> Self { + DigestItem::Consensus(NIMBUS_ENGINE_ID, author.encode()) + } + + fn as_nimbus_consensus_digest(&self) -> Option { + self.pre_runtime_try_to(&NIMBUS_ENGINE_ID) + } +} \ No newline at end of file diff --git a/primitives/nimbus/src/inherents.rs b/primitives/nimbus/src/inherents.rs new file mode 100644 index 00000000000..16a6bb1d960 --- /dev/null +++ b/primitives/nimbus/src/inherents.rs @@ -0,0 +1,35 @@ +use sp_inherents::{InherentData, InherentIdentifier}; +use parity_scale_codec::Encode; + +/// The InherentIdentifier for nimbus's author inherent +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"author__"; + +/// A thing that an outer node could use to inject the inherent data. +/// This should be used in simple uses of the author inherent (eg permissionless authoring) +/// When using the full nimbus system, we are manually inserting the inherent. +pub struct InherentDataProvider(pub AuthorId); + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { + inherent_data.put_data(INHERENT_IDENTIFIER, &self.0) + } + + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + _error: &[u8], + ) -> Option> { + // Dont' process modules from other inherents + if *identifier != INHERENT_IDENTIFIER { + return None + } + + // All errors with the author inehrent are fatal + Some(Err(sp_inherents::Error::Application(Box::from(String::from("Error processing author inherent"))))) + } +} \ No newline at end of file diff --git a/primitives/nimbus/src/lib.rs b/primitives/nimbus/src/lib.rs new file mode 100644 index 00000000000..63609cff5c8 --- /dev/null +++ b/primitives/nimbus/src/lib.rs @@ -0,0 +1,147 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Nimbus Consensus Primitives +//! +//! Primitive types and traits for working with the Nimbus consensus framework. +//! This code can be built to no_std for use in the runtime + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::vec::Vec; +use parity_scale_codec::Codec; +use sp_application_crypto::KeyTypeId; +use sp_runtime::ConsensusEngineId; +use sp_runtime::traits::BlockNumberProvider; + +mod digests; +mod inherents; + +pub use inherents::{INHERENT_IDENTIFIER, InherentDataProvider}; + +/// The given account ID is the author of the current block. +pub trait EventHandler { + //TODO should we be tking ownership here? + fn note_author(author: Author); +} + +impl EventHandler for () { + fn note_author(_author: T) {} +} + +/// A mechanism for determining the current slot. +/// For now we use u32 as the slot type everywhere. Let's see how long we can get away with that. +pub trait SlotBeacon { + fn slot() -> u32; +} + +/// Anything that can provide a block height can be used as a slot beacon. This could be +/// used in at least two realistic ways. +/// 1. Use your own chain's height as the slot number +/// 2. If you're a parachain, use the relay chain's height as the slot number. +impl> SlotBeacon for T { + fn slot() -> u32 { + Self::current_block_number() + } +} + +/// PLANNED: A SlotBeacon that starts a new slot based on the timestamp. Behaviorally, this is +/// similar to what aura, babe and company do. Implementation-wise it is different because it +/// depends on the timestamp pallet for its notion of time. +pub struct IntervalBeacon; + +impl SlotBeacon for IntervalBeacon { + fn slot() -> u32 { + todo!() + } +} + +/// Trait to determine whether this author is eligible to author in this slot. +/// This is the primary trait your nimbus filter needs to implement. +/// +/// This is the proposition-logic variant. +/// That is to say the caller specifies an author an author and the implementation +/// replies whether that author is eligible. This is useful in many cases and is +/// particularly useful when the active set is unbounded. +/// There may be another variant where the caller only supplies a slot and the +/// implementation replies with a complete set of eligible authors. +pub trait CanAuthor { + fn can_author(author: &AuthorId, slot: &u32) -> bool; +} +/// Default implementation where anyone can author. +/// +/// This is identical to Cumulus's RelayChainConsensus +impl CanAuthor for () { + fn can_author(_: &T, _: &u32) -> bool { + true + } +} + +/// A Trait to lookup runtime AccountIds from AuthorIds (probably NimbusIds) +/// The trait is generic over the AccountId, becuase different runtimes use +/// different notions of AccoutId. It is also generic over the AuthorId to +/// support the usecase where the author inherent is used for beneficiary info +/// and contains an AccountId directly. +pub trait AccountLookup { + fn lookup_account(author: &AuthorId) -> Option; +} + +// A dummy impl used in simple tests +impl AccountLookup for () { + fn lookup_account(_: &AuthorId) -> Option { + None + } +} + +/// The ConsensusEngineId for nimbus consensus +/// this same identifier will be used regardless of the filters installed +pub const NIMBUS_ENGINE_ID: ConsensusEngineId = *b"nmbs"; + +/// The KeyTypeId used in the Nimbus consensus framework regardles of wat filters are in place. +/// If this gets well adopted, we could move this definition to sp_core to avoid conflicts. +pub const NIMBUS_KEY_ID: KeyTypeId = KeyTypeId(*b"nmbs"); + +// The strongly-typed crypto wrappers to be used by Nimbus in the keystore +mod nimbus_crypto { + use sp_application_crypto::{ + app_crypto, + sr25519, + }; + app_crypto!(sr25519, crate::NIMBUS_KEY_ID); +} + +/// A nimbus author identifier (A public key). +pub type NimbusId = nimbus_crypto::Public; + +/// A nimbus signature. +pub type NimbusSignature = nimbus_crypto::Signature; + +sp_application_crypto::with_pair! { + /// A nimbus keypair + pub type NimbusPair = nimbus_crypto::Pair; +} + + +sp_api::decl_runtime_apis! { + /// The runtime api used to predict whether an author will be eligible in the given slot + #[api_version(2)] + pub trait AuthorFilterAPI { + #[changed_in(2)] + fn can_author(author: AuthorId, relay_parent: u32) -> bool; + + fn can_author(author: AuthorId, relay_parent: u32, parent_header: &Block::Header) -> bool; + } +} diff --git a/test/service/Cargo.toml b/test/service/Cargo.toml index 164fbf16be5..92e83d9e65f 100644 --- a/test/service/Cargo.toml +++ b/test/service/Cargo.toml @@ -30,6 +30,7 @@ sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "mas sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }