diff --git a/Cargo.lock b/Cargo.lock
index d4b54311623..77de82967f3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2072,6 +2072,7 @@ dependencies = [
"sp-block-builder",
"sp-blockchain",
"sp-core",
+ "sp-inherents",
"sp-keyring",
"sp-maybe-compressed-blob",
"sp-offchain",
@@ -5088,6 +5089,99 @@ dependencies = [
"winapi 0.3.9",
]
+[[package]]
+name = "nimbus-consensus"
+version = "0.1.0"
+dependencies = [
+ "async-trait",
+ "cumulus-client-consensus-common",
+ "cumulus-primitives-core",
+ "cumulus-primitives-parachain-inherent",
+ "futures 0.3.16",
+ "log",
+ "nimbus-primitives",
+ "parity-scale-codec",
+ "parking_lot 0.9.0",
+ "polkadot-service",
+ "sc-client-api",
+ "sc-consensus",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-core",
+ "sp-inherents",
+ "sp-keystore",
+ "sp-runtime",
+ "substrate-prometheus-endpoint",
+ "tracing",
+]
+
+[[package]]
+name = "nimbus-primitives"
+version = "0.1.0"
+dependencies = [
+ "async-trait",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-inherents",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "nimbus-runtime"
+version = "0.1.0"
+dependencies = [
+ "cumulus-pallet-dmp-queue",
+ "cumulus-pallet-parachain-system",
+ "cumulus-pallet-xcm",
+ "cumulus-pallet-xcmp-queue",
+ "cumulus-ping",
+ "cumulus-primitives-core",
+ "cumulus-primitives-timestamp",
+ "cumulus-primitives-utility",
+ "frame-executive",
+ "frame-support",
+ "frame-system",
+ "hex",
+ "hex-literal 0.3.1",
+ "log",
+ "nimbus-primitives",
+ "pallet-assets",
+ "pallet-author-inherent",
+ "pallet-author-slot-filter",
+ "pallet-balances",
+ "pallet-randomness-collective-flip",
+ "pallet-sudo",
+ "pallet-timestamp",
+ "pallet-transaction-payment",
+ "pallet-xcm",
+ "parachain-info",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "serde",
+ "sp-api",
+ "sp-block-builder",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-std",
+ "sp-transaction-pool",
+ "sp-version",
+ "substrate-wasm-builder",
+ "xcm",
+ "xcm-builder",
+ "xcm-executor",
+]
+
[[package]]
name = "nix"
version = "0.17.0"
@@ -5298,6 +5392,56 @@ dependencies = [
"sp-std",
]
+[[package]]
+name = "pallet-aura-style-filter"
+version = "0.1.0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "nimbus-primitives",
+ "parity-scale-codec",
+ "serde",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-author-inherent"
+version = "0.1.0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "log",
+ "nimbus-primitives",
+ "parity-scale-codec",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-authorship",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-author-slot-filter"
+version = "0.1.0"
+dependencies = [
+ "cumulus-pallet-parachain-system",
+ "frame-support",
+ "frame-system",
+ "log",
+ "nimbus-primitives",
+ "pallet-author-inherent",
+ "parity-scale-codec",
+ "serde",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
[[package]]
name = "pallet-authority-discovery"
version = "4.0.0-dev"
@@ -6680,7 +6824,11 @@ dependencies = [
"hex-literal 0.2.1",
"jsonrpc-core",
"log",
+ "nimbus-consensus",
+ "nimbus-primitives",
+ "nimbus-runtime",
"nix",
+ "pallet-author-inherent",
"parity-scale-codec",
"parking_lot 0.10.2",
"polkadot-cli",
diff --git a/Cargo.toml b/Cargo.toml
index ae84d34ea5c..61498d40fa3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,9 +4,13 @@ members = [
"client/consensus/aura",
"client/consensus/common",
"client/consensus/relay-chain",
+ "client/consensus/nimbus",
"client/network",
"client/pov-recovery",
"client/service",
+ "pallets/author-inherent",
+ "pallets/author-slot-filter",
+ "pallets/aura-style-filter",
"pallets/aura-ext",
"pallets/collator-selection",
"pallets/dmp-queue",
@@ -16,12 +20,14 @@ members = [
"pallets/xcm",
"pallets/xcmp-queue",
"primitives/core",
+ "primitives/nimbus",
"primitives/parachain-inherent",
"primitives/timestamp",
"primitives/utility",
"polkadot-parachains/",
"polkadot-parachains/pallets/parachain-info",
"polkadot-parachains/pallets/ping",
+ "polkadot-parachains/nimbus-runtime",#TODO change this to match chevdor's scheme
"polkadot-parachains/rococo",
"polkadot-parachains/shell",
"polkadot-parachains/statemint-common",
diff --git a/client/collator/src/lib.rs b/client/collator/src/lib.rs
index 610d41ad52d..e1eadd5bcb8 100644
--- a/client/collator/src/lib.rs
+++ b/client/collator/src/lib.rs
@@ -259,7 +259,7 @@ where
tracing::info!(
target: LOG_TARGET,
?block_hash,
- "Produced proof-of-validity candidate.",
+ "๐ด Produced proof-of-validity candidate.",
);
Some(CollationResult {
diff --git a/client/consensus/nimbus/Cargo.toml b/client/consensus/nimbus/Cargo.toml
new file mode 100644
index 00000000000..88f50383999
--- /dev/null
+++ b/client/consensus/nimbus/Cargo.toml
@@ -0,0 +1,37 @@
+[package]
+name = "nimbus-consensus"
+description = "Client-side worker for the Nimbus family of slot-based consensus algorithms"
+version = "0.1.0"
+edition = "2018"
+
+[dependencies]
+# Substrate deps
+sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
+substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+# Polkadot dependencies
+polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+
+# Cumulus dependencies
+nimbus-primitives = { path = "../../../primitives/nimbus" }
+cumulus-client-consensus-common = { path = "../common" }
+cumulus-primitives-core = { path = "../../../primitives/core" }
+cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" }
+
+# Other deps
+futures = { version = "0.3.8", features = ["compat"] }
+codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
+tracing = "0.1.22"
+async-trait = "0.1.42"
+parking_lot = "0.9"
+log = "0.4"
diff --git a/client/consensus/nimbus/README.md b/client/consensus/nimbus/README.md
new file mode 100644
index 00000000000..ca05c484a18
--- /dev/null
+++ b/client/consensus/nimbus/README.md
@@ -0,0 +1,194 @@
+# Cumulo -- Nimbus โ๏ธ
+
+Nimbus is a framework for building parachain consensus systems on [cumulus](https://github.com/paritytech/cumulus)-based parachains.
+
+Given the regular six-second pulse-like nature of the relay chain, it is natural to think about slot-
+based consensus algorithms for parachains. The parachain network is responsible for liveness and
+decetralization and the relay chain is responsible for finality. There is a rich design space for such
+algorithms, yet some tasks are common to all (or most) of them. These common tasks include:
+
+* Signing and signature checking blocks
+* Injecting authorship information into the parachain
+* Block authorship and import accounting
+* Filtering a large (potentially unbounded) set of potential authors to a smaller (but still potentially unbounded) set.
+* Detecting when it is your turn to author an skipping other slots
+
+Nimbus aims to provide standard implementations for the logistical parts of such consensus engines,
+along with helpful traits for implementing the parts that researchers and developers want to customize.
+
+## Try the Demo
+
+While Nimbus is primarily a development framework meant to be included in other projects, it is useful
+to see a basic network in action. An example network is included in the `polkadot-parachains` example collator. You
+can build it with `cargo build --release` and launch it like any other cumulus parachian.
+Make sure to specify `--chain nimbus`.
+
+Rather than reiterate how to start a relay-para network here, I'll simply recommend you use the
+excellent [Polkadot Launch](https://github.com/paritytech/polkadot-launch) tool. This repo was tested with version 1.4.1.
+A [lauch config file](./nimbus-launch-config.json) is provided.
+
+```bash
+# Install polkadot launch (I used v1.4.1)
+npm i -g polkadot-launch
+
+# Build polkadot (I used 82aa404c; check Cargo.lock to be sure)
+cd polkadot
+cargo build --release
+cd ..
+
+# Build Polkadot-parachains example collator
+cd cumulus
+git checkout nimbus
+cargo build --release
+
+# Launch the multi-chain
+polkdot-launch ./nimbus-launch-config.json
+```
+
+To learn more about launching relay-para networks, check out the [cumulus workshop](https://substrate.dev/cumulus-workshop).
+
+## Design Overview
+
+If you want to start using Nimbus in your project, it is worth reading this.
+
+At its core nimbus is a consensus engine that considers blocks valid if and only if they inject the author's public identity into the runtime, _and_ seal the block with a signature
+by the author's private key.
+
+Compared to most consensus engines, this is _very_ permissive -- anyone who can create a signature can author valid blocks. In order to build more useful and familiar consensus engine on this foundation, nimbus provides a framework for creating filters to further restrict the set of eligible authors. These filters live inside the runtime.
+
+Being general in the consensus layer and deferring most checks to the runtime is the key
+to nimbus's re-usability as a framework. And is the reason that *writing a consensus engine is as easy as writing a pallet* when you use nimbus.
+
+### Author Inherent
+
+The Author inherent pallet allows block authors to insert their identity into
+the runtime. This feature alone is useful in many blockchains and can be used for things like block rewards.
+
+The author inherent provides a validation hook called `CanAuthor`. This check will be called during the inherent execution and is the main entry point to nimbus's author filters.
+If you don't want to restrict authorship at all, you can just use `()`.
+
+As a concrete example, in a simple Proof of Stake system this check will determine
+whether the author is staked. In a more realistic PoS system the `CanAuthor` check might
+first make sure the author is staked, and then make sure they are eligible in _this slot_ according to round robin rules.
+
+Finally, the pallet copies the authorship information into a consensus digest that will stick around
+in the block header. This digest can be used by UIs to display the author, and also by the consensus
+engine to verify the block authorship.
+
+**PreRuntimeDigest**
+I believe the design should be changed slightly to use a preruntime digest rather than an inherent for a few reasons:
+
+* The data wouldn't be duplicated between an inherent and a digest.
+* Nimbus client-side worker would support non-frame runtimes.
+* That's how sc-consensus-aura does it.
+
+### Author Filters
+
+A primary job of a consensus engine is deciding who can author each block. Some may have a static set, others
+may rotate the set each era, others may elect an always-changing subset of all potential authors. There
+is much space for creativity, research, and design, and Nimbus strives to provide a flexible interface
+for this creative work. You can express all the interesting parts of your
+consensus engine simply by creating filters that implement the `CanAuthor` trait. The rest of Nimubs will #JustWork for you.
+
+This repository comes with a few example filters already, and additional examples are welcome. The examples are:
+* PseudoRandom FixedSized Subset - This filter takes a finite set (eg a staked set) and filters it down to a pseudo-random
+subset at each height. The eligible ratio is configurable in the pallet. This is a good learning example.
+* Aura - The authority round consensus engine is popular in the Substrate ecosystem because it was one
+of the first (and simplest!) engines implemented in Substrate. Aura can be expressed in the Nimbus
+filter framework and is included as an example filter. If you are considering using aura, that crate
+has good documentation on how it differs from `sc-consensus-aura`.
+* (Planned) FixedSizedSubset - The author submits a VRF output that has to be below a threshold to be able to author.
+* (Planed) Filter Combinator - A filter that wraps two other filters. It uses one in even slots and the other in odd slots.
+
+### Author Filter Runtime API
+
+Nimbus makes the design choice to include the author checking logic in the runtime. This is in contrast to the existing implementations of Aura and Babe where the authorship checks are offchain.
+
+While moving the check in-runtime, provides a lot of flexibility, and simplifies interfacing with relay-chain validators, it makes it impossible
+for authoring nodes to predict whether they will be eligible without calling into the runtime.
+To achieve this, we provide a runtime API that makes the minimal calculation necessary to determine
+whether a specified author will be eligible at the specified slot.
+
+### Nimbus Consensus Worker
+
+Nimbus consensus is the primary client-side consensus worker. It implements the `ParachainConsensus`
+trait introduced to cumulus in https://github.com/paritytech/cumulus/pull/329. It is not likely that
+you will need to change this code directly to implement your engine as it is entirely abstracted over
+the filters you use. The consensus engine performs these tasks:
+
+* Slot prediction - it calls the runtime API mentioned previously to determine whether ti is eligible. If not, it returns early.
+* Authorship - It calls into a standard Substrate proposer to construct a block (probably including the author inherent).
+* Self import - it imports the block that the proposer created (called the pre-block) into the node's local database.
+* Sealing - It adds a seal digest to the block - This is what is used by other nodes to verify the authorship information.
+
+### Verifier and Import Queue
+
+For a parachain node to import a sealed block authored by one of its peers, it needs to first check that the signature is valid by the author that was injected into the runtime. This is the job of the verifier. It
+will remove the nimbus seal and check it against the nimbus consensus digest from the runtime. If that process fails,
+the block is immediately thrown away before the expensive execution even begins. If it succeeds, then
+the pre-block (the part that's left after the seal is stripped) is passed into the
+[import pipeline](https://substrate.dev/docs/en/knowledgebase/advanced/block-import) for processing
+and execution. Finally, the locally produced result is compared to the result received across the network.
+
+### Custom Block Executor
+
+We've already discussed how parachain nodes (both the one that authors a block, and also its peers)
+import blocks. In a standalone blockchain, that's the end of the story. But for a parachain, we also
+need our relay chain validators to re-execute and validate the parachain block. Validators do this in
+a unique way, and entirely in wasm. Providing the `validate_block` function that the validators use
+is the job of the `register_validate_block!` macro from Cumulus.
+
+Typically a cumulus runtime invokes that macro like this:
+```rust
+cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive);
+```
+
+You can see that the validators use the exact same executive that the parachain nodes do. Now that
+we have sealed blocks, that must change. The validators need to strip and verify the seal, and re-execute
+the pre-block just like the parachain nodes did. And without access to an offchain verifier, they must
+do this all in the runtime. For that purpose, we provide and alternate executive which wraps the normal
+FRAME executive. The wrapper strips and checks the seal, just like the verifier did, and then passes the pre-block to the inner FRAME executive for re-execution.
+
+## Write Your Own Consensus Logic
+
+If you have an idea for a new slot-based parachain consensus algorithm, Nimbus is a quick way to get
+it working! The fastest way to start hacking is to fork this repo and customize the template node.
+
+If you'd rather dive in than read one more sentence, then **start hacking in the `author-slot-filter`
+pallet.**
+
+In most cases, you can use all the off-the-shelf components and simply write your filters. It is also
+possible to compose existing filters to build more complex logic from smaller pieces.
+
+## Authoring and Import Diagrams
+
+One node authors the block, then it is processed in three different ways.
+
+| | Author | Parachain Peer | Relay Validator |
+| ------------------- | ------ | -------------- | --------- |
+| Predict Eligibility | โ | โ | โ |
+| Author Block | โ | โ | โ |
+| Runs Verifier | โ | โ | โ |
+| Import Pipeline | โ | โ | โ |
+| Custom Pre exec | โ | โ | โ |
+| Normal FRAME exec | โ | โ | โ |
+
+## Roadmap
+
+The Nimbus framework is intended to be loosely coupled with Cumulus. It remains to be
+seen whether it should live with Cumulus or in its own repository.
+
+### Next tasks
+* Proper trait for interacting with digests
+* More example filters
+* Share code between verifier and wrapper executive
+* Client-side worker for standalone (non para) blockchain
+* Aurand as an example of composing filters
+* Second filter trait for exhaustive sets (As opposed to current propositional approach)
+
+## Contributions Welcome
+
+Try it out, open issues, submit PRs, review code. Whether you like to tinker with a running node, or
+analyze security from an academic perspective, your contributions are welcome.
+
+I am happy to support users who want to use nimbus, or want feedback on their consensus engines.
diff --git a/client/consensus/nimbus/src/import_queue.rs b/client/consensus/nimbus/src/import_queue.rs
new file mode 100644
index 00000000000..bec5c4ec0a0
--- /dev/null
+++ b/client/consensus/nimbus/src/import_queue.rs
@@ -0,0 +1,195 @@
+// Copyright 2019 Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+use std::{marker::PhantomData, sync::Arc};
+
+use sp_api::ProvideRuntimeApi;
+use sp_block_builder::BlockBuilder as BlockBuilderApi;
+use sp_blockchain::Result as ClientResult;
+use sp_consensus::{
+ error::Error as ConsensusError, CacheKeyId,
+};
+use sc_consensus::{
+ BlockImport, BlockImportParams,
+ import_queue::{BasicQueue, Verifier as VerifierT},
+};
+use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
+use sp_runtime::{
+ generic::BlockId,
+ traits::{Block as BlockT, Header as HeaderT},
+ DigestItem,
+};
+use nimbus_primitives::{NimbusId, NimbusSignature, NimbusPair};
+use sp_application_crypto::{TryFrom, Pair as _, Public as _};
+use log::debug;
+
+/// The Nimbus verifier strips the seal digest, and checks that it is a valid signature by
+/// the same key that was injected into the runtime and noted in the Seal digest.
+/// From Nimbu's perspective any block that faithfully reports its authorship to the runtime
+/// is valid. The intention is that the runtime itself may then put further restrictions on
+/// the identity of the author.
+struct Verifier {
+ client: Arc,
+ create_inherent_data_providers: CIDP,
+ _marker: PhantomData,
+}
+
+#[async_trait::async_trait]
+impl VerifierT for Verifier
+where
+ Block: BlockT,
+ Client: ProvideRuntimeApi + Send + Sync,
+ >::Api: BlockBuilderApi,
+ CIDP: CreateInherentDataProviders ,
+{
+ async fn verify(
+ &mut self,
+ mut block_params: BlockImportParams,
+ ) -> Result<
+ (
+ BlockImportParams,
+ Option)>>,
+ ),
+ String,
+ > {
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Header hash before popping digest {:?}", block_params.header.hash());
+ // Grab the digest from the seal
+ //TODO use CompatibleDigest trait here once I write it. For now assume the seal is last.
+ let seal = block_params.header.digest_mut().pop().expect("Block should have at least one digest on it");
+
+ let sig = match seal {
+ DigestItem::Seal(id, ref sig) if id == *b"nmbs" => sig.clone(),
+ _ => return Err("HeaderUnsealed".into()),
+ };
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Header hash after popping digest {:?}", block_params.header.hash());
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Signature according to verifier is {:?}", sig);
+
+ // Grab the digest from the runtime
+ //TODO use the trait. Maybe this code should move to the trait.
+ let consensus_digest = block_params.header
+ .digest()
+ .logs
+ .iter()
+ .find(|digest| {
+ match *digest {
+ DigestItem::Consensus(id, _) if id == b"nmbs" => true,
+ _ => false,
+ }
+ })
+ .expect("A single consensus digest should be added by the runtime when executing the author inherent.");
+
+ let claimed_author = match *consensus_digest {
+ DigestItem::Consensus(id, ref author_id) if id == *b"nmbs" => author_id.clone(),
+ _ => panic!("Expected consensus digest to contains author id bytes"),
+ };
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Claimed Author according to verifier is {:?}", claimed_author);
+
+ // Verify the signature
+ let valid_signature = NimbusPair::verify(
+ &NimbusSignature::try_from(sig).expect("Bytes should convert to signature correctly"),
+ block_params.header.hash(),
+ &NimbusId::from_slice(&claimed_author),
+ );
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Valid signature? {:?}", valid_signature);
+
+ if !valid_signature{
+ return Err("Block signature invalid".into());
+ }
+
+ // This part copied from RelayChainConsensus. I guess this is the inherent checking.
+ if let Some(inner_body) = block_params.body.take() {
+ let inherent_data_providers = self
+ .create_inherent_data_providers
+ .create_inherent_data_providers(*block_params.header.parent_hash(), ())
+ .await
+ .map_err(|e| e.to_string())?;
+
+ let inherent_data = inherent_data_providers
+ .create_inherent_data()
+ .map_err(|e| format!("{:?}", e))?;
+
+ let block = Block::new(block_params.header.clone(), inner_body);
+
+ let inherent_res = self
+ .client
+ .runtime_api()
+ .check_inherents(
+ &BlockId::Hash(*block_params.header.parent_hash()),
+ block.clone(),
+ inherent_data,
+ )
+ .map_err(|e| format!("{:?}", e))?;
+
+ if !inherent_res.ok() {
+ for (i, e) in inherent_res.into_errors() {
+ match inherent_data_providers.try_handle_error(&i, &e).await {
+ Some(r) => r.map_err(|e| format!("{:?}", e))?,
+ None => Err(format!(
+ "Unhandled inherent error from `{}`.",
+ String::from_utf8_lossy(&i)
+ ))?,
+ }
+ }
+ }
+
+ let (_, inner_body) = block.deconstruct();
+ block_params.body = Some(inner_body);
+ }
+
+ block_params.post_digests.push(seal);
+
+ debug!(target: crate::LOG_TARGET, "๐ชฒ Just finished verifier. posthash from params is {:?}", &block_params.post_hash());
+
+ Ok((block_params, None))
+ }
+}
+
+/// Start an import queue for a Cumulus collator that does not uses any special authoring logic.
+pub fn import_queue(
+ client: Arc,
+ block_import: I,
+ create_inherent_data_providers: CIDP,
+ spawner: &impl sp_core::traits::SpawnEssentialNamed,
+ registry: Option<&substrate_prometheus_endpoint::Registry>,
+) -> ClientResult>
+where
+ I: BlockImport + Send + Sync + 'static,
+ I::Transaction: Send,
+ Client: ProvideRuntimeApi + Send + Sync + 'static,
+ >::Api: BlockBuilderApi,
+ CIDP: CreateInherentDataProviders + 'static,
+{
+ let verifier = Verifier {
+ client,
+ create_inherent_data_providers,
+ _marker: PhantomData,
+ };
+
+ Ok(BasicQueue::new(
+ verifier,
+ Box::new(cumulus_client_consensus_common::ParachainBlockImport::new(
+ block_import,
+ )),
+ None,
+ spawner,
+ registry,
+ ))
+}
diff --git a/client/consensus/nimbus/src/lib.rs b/client/consensus/nimbus/src/lib.rs
new file mode 100644
index 00000000000..4a436900ff8
--- /dev/null
+++ b/client/consensus/nimbus/src/lib.rs
@@ -0,0 +1,522 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! The nimbus consensus client-side worker
+//!
+//! It queries the in-runtime filter to determine whether any keys
+//! stored in its keystore are eligible to author at this slot. If it has an eligible
+//! key it authors.
+
+use cumulus_client_consensus_common::{
+ ParachainBlockImport, ParachainCandidate, ParachainConsensus,
+};
+use cumulus_primitives_core::{
+ relay_chain::v1::{Block as PBlock, Hash as PHash, ParachainHost},
+ ParaId, PersistedValidationData,
+};
+pub use import_queue::import_queue;
+use log::{info, warn, debug};
+use parking_lot::Mutex;
+use polkadot_service::ClientHandle;
+use sc_client_api::Backend;
+use sp_api::{ProvideRuntimeApi, BlockId, ApiExt};
+use sp_consensus::{
+ BlockOrigin, EnableProofRecording, Environment,
+ ProofRecording, Proposal, Proposer,
+};
+use sc_consensus::{BlockImport, BlockImportParams};
+use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
+use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT};
+use std::{marker::PhantomData, sync::Arc, time::Duration};
+use tracing::error;
+use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore};
+use sp_core::crypto::Public;
+use nimbus_primitives::{AuthorFilterAPI, NIMBUS_ENGINE_ID, NIMBUS_KEY_ID, NimbusId};
+mod import_queue;
+
+const LOG_TARGET: &str = "filtering-consensus";
+
+/// The implementation of the relay-chain provided consensus for parachains.
+pub struct NimbusConsensus {
+ para_id: ParaId,
+ _phantom: PhantomData,
+ proposer_factory: Arc>,
+ create_inherent_data_providers: Arc,
+ block_import: Arc>>,
+ relay_chain_client: Arc,
+ relay_chain_backend: Arc,
+ parachain_client: Arc,
+ keystore: SyncCryptoStorePtr,
+ skip_prediction: bool,
+}
+
+impl Clone for NimbusConsensus {
+ fn clone(&self) -> Self {
+ Self {
+ para_id: self.para_id,
+ _phantom: PhantomData,
+ proposer_factory: self.proposer_factory.clone(),
+ create_inherent_data_providers: self.create_inherent_data_providers.clone(),
+ block_import: self.block_import.clone(),
+ relay_chain_backend: self.relay_chain_backend.clone(),
+ relay_chain_client: self.relay_chain_client.clone(),
+ parachain_client: self.parachain_client.clone(),
+ keystore: self.keystore.clone(),
+ skip_prediction: self.skip_prediction,
+ }
+ }
+}
+
+impl NimbusConsensus
+where
+ B: BlockT,
+ RClient: ProvideRuntimeApi,
+ RClient::Api: ParachainHost,
+ RBackend: Backend,
+ ParaClient: ProvideRuntimeApi,
+ CIDP: CreateInherentDataProviders,
+{
+ /// Create a new instance of nimbus consensus.
+ pub fn new(
+ para_id: ParaId,
+ proposer_factory: PF,
+ create_inherent_data_providers: CIDP,
+ block_import: BI,
+ polkadot_client: Arc,
+ polkadot_backend: Arc,
+ parachain_client: Arc,
+ keystore: SyncCryptoStorePtr,
+ skip_prediction: bool,
+ ) -> Self {
+ Self {
+ para_id,
+ proposer_factory: Arc::new(Mutex::new(proposer_factory)),
+ create_inherent_data_providers: Arc::new(create_inherent_data_providers),
+ block_import: Arc::new(futures::lock::Mutex::new(ParachainBlockImport::new(
+ block_import,
+ ))),
+ relay_chain_backend: polkadot_backend,
+ relay_chain_client: polkadot_client,
+ parachain_client,
+ keystore,
+ skip_prediction,
+ _phantom: PhantomData,
+ }
+ }
+
+ //TODO Could this be a provided implementation now that we have this async inherent stuff?
+ /// Create the data.
+ async fn inherent_data(
+ &self,
+ parent: B::Hash,
+ validation_data: &PersistedValidationData,
+ relay_parent: PHash,
+ author_id: NimbusId,
+ ) -> Option {
+ let inherent_data_providers = self
+ .create_inherent_data_providers
+ .create_inherent_data_providers(parent, (relay_parent, validation_data.clone(), author_id))
+ .await
+ .map_err(|e| {
+ tracing::error!(
+ target: LOG_TARGET,
+ error = ?e,
+ "Failed to create inherent data providers.",
+ )
+ })
+ .ok()?;
+
+ inherent_data_providers
+ .create_inherent_data()
+ .map_err(|e| {
+ tracing::error!(
+ target: LOG_TARGET,
+ error = ?e,
+ "Failed to create inherent data.",
+ )
+ })
+ .ok()
+ }
+}
+
+#[async_trait::async_trait]
+impl ParachainConsensus
+ for NimbusConsensus
+where
+ B: BlockT,
+ RClient: ProvideRuntimeApi + Send + Sync,
+ RClient::Api: ParachainHost,
+ RBackend: Backend,
+ BI: BlockImport + Send + Sync,
+ PF: Environment + Send + Sync,
+ PF::Proposer: Proposer<
+ B,
+ Transaction = BI::Transaction,
+ ProofRecording = EnableProofRecording,
+ Proof = ::Proof,
+ >,
+ ParaClient: ProvideRuntimeApi + Send + Sync,
+ ParaClient::Api: AuthorFilterAPI,
+ CIDP: CreateInherentDataProviders,
+{
+ async fn produce_candidate(
+ &mut self,
+ parent: &B::Header,
+ relay_parent: PHash,
+ validation_data: &PersistedValidationData,
+ ) -> Option> {
+ // Design decision: We will check the keystore for any available keys. Then we will iterate
+ // those keys until we find one that is eligible. If none are eligible, we skip this slot.
+ // If multiple are eligible, we only author with the first one.
+
+ // Get allthe available keys
+ let available_keys =
+ SyncCryptoStore::keys(&*self.keystore, NIMBUS_KEY_ID)
+ .expect("keystore should return the keys it has");
+
+ // Print a more helpful message than "not eligible" when there are no keys at all.
+ if available_keys.is_empty() {
+ warn!(target: LOG_TARGET, "๐ No Nimbus keys available. We will not be able to author.");
+ return None;
+ }
+
+ let at = BlockId::Hash(parent.hash());
+ // Get `AuthorFilterAPI` version.
+ let api_version = self.parachain_client.runtime_api()
+ .api_version::>(&at)
+ .expect("Runtime api access to not error.");
+
+ if api_version.is_none() {
+ tracing::error!(
+ target: LOG_TARGET, "Could not find `AuthorFilterAPI` version.",
+ );
+ return None;
+ }
+ let api_version = api_version.unwrap();
+
+ // Iterate keys until we find an eligible one, or run out of candidates.
+ // If we are skipping prediction, then we author withthe first key we find.
+ // prediction skipping only really amkes sense when there is a single key in the keystore.
+ let maybe_key = available_keys.into_iter().find(|type_public_pair| {
+
+ // If we are not predicting, just return the first one we find.
+ self.skip_prediction ||
+
+ // Have to convert to a typed NimbusId to pass to the runtime API. Maybe this is a clue
+ // That I should be passing Vec across the wasm boundary?
+ if api_version >= 2 {
+ self.parachain_client.runtime_api().can_author(
+ &at,
+ NimbusId::from_slice(&type_public_pair.1),
+ validation_data.relay_parent_number,
+ parent,
+ )
+ .expect("Author API should not return error")
+ } else {
+ #[allow(deprecated)]
+ self.parachain_client.runtime_api().can_author_before_version_2(
+ &at,
+ NimbusId::from_slice(&type_public_pair.1),
+ validation_data.relay_parent_number,
+ )
+ .expect("Author API version 2 should not return error")
+ }
+ });
+
+ // If there are no eligible keys, print the log, and exit early.
+ let type_public_pair = match maybe_key {
+ Some(p) => p,
+ None => {
+ info!(
+ target: LOG_TARGET,
+ "๐ฎ Skipping candidate production because we are not eligible"
+ );
+ return None;
+ }
+ };
+
+ let proposer_future = self.proposer_factory.lock().init(&parent);
+
+ let proposer = proposer_future
+ .await
+ .map_err(
+ |e| error!(target: LOG_TARGET, error = ?e, "Could not create proposer."),
+ )
+ .ok()?;
+
+ let inherent_data = self.inherent_data(parent.hash(),&validation_data, relay_parent, NimbusId::from_slice(&type_public_pair.1)).await?;
+
+ let Proposal {
+ block,
+ storage_changes,
+ proof,
+ } = proposer
+ .propose(
+ inherent_data,
+ Default::default(),
+ //TODO: Fix this.
+ Duration::from_millis(500),
+ // Set the block limit to 50% of the maximum PoV size.
+ //
+ // TODO: If we got benchmarking that includes that encapsulates the proof size,
+ // we should be able to use the maximum pov size.
+ Some((validation_data.max_pov_size / 2) as usize),
+ )
+ .await
+ .map_err(|e| error!(target: LOG_TARGET, error = ?e, "Proposing failed."))
+ .ok()?;
+
+ let (header, extrinsics) = block.clone().deconstruct();
+
+ let pre_hash = header.hash();
+
+ let sig = SyncCryptoStore::sign_with(
+ &*self.keystore,
+ NIMBUS_KEY_ID,
+ &type_public_pair,
+ pre_hash.as_ref(),
+ )
+ .expect("Keystore should be able to sign")
+ .expect("We already checked that the key was present");
+
+ debug!(
+ target: LOG_TARGET,
+ "The signature is \n{:?}", sig
+ );
+
+ // TODO Make a proper CompatibleDigest trait https://github.com/paritytech/substrate/blob/master/primitives/consensus/aura/src/digests.rs#L45
+ let sig_digest = sp_runtime::generic::DigestItem::Seal(NIMBUS_ENGINE_ID, sig);
+
+ let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header.clone());
+ block_import_params.post_digests.push(sig_digest.clone());
+ block_import_params.body = Some(extrinsics.clone());
+ block_import_params.state_action = sc_consensus::StateAction::ApplyChanges(
+ sc_consensus::StorageChanges::Changes(storage_changes)
+ );
+
+ // Print the same log line as slots (aura and babe)
+ info!(
+ "๐ Sealed block for proposal at {}. Hash now {:?}, previously {:?}.",
+ *header.number(),
+ block_import_params.post_hash(),
+ pre_hash,
+ );
+
+ if let Err(err) = self
+ .block_import
+ .lock()
+ .await
+ .import_block(block_import_params, Default::default())
+ .await
+ {
+ error!(
+ target: LOG_TARGET,
+ at = ?parent.hash(),
+ error = ?err,
+ "Error importing built block.",
+ );
+
+ return None;
+ }
+
+ // Compute info about the block after the digest is added
+ let mut post_header = header.clone();
+ post_header.digest_mut().logs.push(sig_digest.clone());
+ let post_block = B::new(post_header, extrinsics);
+
+ // Returning the block WITH the seal for distribution around the network.
+ Some(ParachainCandidate { block: post_block, proof })
+ }
+}
+
+/// Paramaters of [`build_relay_chain_consensus`].
+///
+/// I briefly tried the async keystore approach, but decided to go sync so I can copy
+/// code from Aura. Maybe after it is working, Jeremy can help me go async.
+pub struct BuildNimbusConsensusParams {
+ pub para_id: ParaId,
+ pub proposer_factory: PF,
+ pub create_inherent_data_providers: CIDP,
+ pub block_import: BI,
+ pub relay_chain_client: polkadot_service::Client,
+ pub relay_chain_backend: Arc,
+ pub parachain_client: Arc,
+ pub keystore: SyncCryptoStorePtr,
+ pub skip_prediction: bool,
+
+}
+
+/// Build the [`NimbusConsensus`].
+///
+/// Returns a boxed [`ParachainConsensus`].
+pub fn build_nimbus_consensus(
+ BuildNimbusConsensusParams {
+ para_id,
+ proposer_factory,
+ create_inherent_data_providers,
+ block_import,
+ relay_chain_client,
+ relay_chain_backend,
+ parachain_client,
+ keystore,
+ skip_prediction,
+ }: BuildNimbusConsensusParams,
+) -> Box>
+where
+ Block: BlockT,
+ PF: Environment + Send + Sync + 'static,
+ PF::Proposer: Proposer<
+ Block,
+ Transaction = BI::Transaction,
+ ProofRecording = EnableProofRecording,
+ Proof = ::Proof,
+ >,
+ BI: BlockImport + Send + Sync + 'static,
+ RBackend: Backend + 'static,
+ // Rust bug: https://github.com/rust-lang/rust/issues/24159
+ sc_client_api::StateBackendFor: sc_client_api::StateBackend>,
+ ParaClient: ProvideRuntimeApi + Send + Sync + 'static,
+ ParaClient::Api: AuthorFilterAPI,
+ CIDP: CreateInherentDataProviders + 'static,
+{
+ NimbusConsensusBuilder::new(
+ para_id,
+ proposer_factory,
+ block_import,
+ create_inherent_data_providers,
+ relay_chain_client,
+ relay_chain_backend,
+ parachain_client,
+ keystore,
+ skip_prediction,
+ )
+ .build()
+}
+
+/// Nimbus consensus builder.
+///
+/// Builds a [`NimbusConsensus`] for a parachain. As this requires
+/// a concrete relay chain client instance, the builder takes a [`polkadot_service::Client`]
+/// that wraps this concrete instanace. By using [`polkadot_service::ExecuteWithClient`]
+/// the builder gets access to this concrete instance.
+struct NimbusConsensusBuilder {
+ para_id: ParaId,
+ _phantom: PhantomData,
+ proposer_factory: PF,
+ create_inherent_data_providers: CIDP,
+ block_import: BI,
+ relay_chain_backend: Arc,
+ relay_chain_client: polkadot_service::Client,
+ parachain_client: Arc,
+ keystore: SyncCryptoStorePtr,
+ skip_prediction: bool,
+}
+
+impl NimbusConsensusBuilder
+where
+ Block: BlockT,
+ // Rust bug: https://github.com/rust-lang/rust/issues/24159
+ sc_client_api::StateBackendFor: sc_client_api::StateBackend>,
+ PF: Environment + Send + Sync + 'static,
+ PF::Proposer: Proposer<
+ Block,
+ Transaction = BI::Transaction,
+ ProofRecording = EnableProofRecording,
+ Proof = ::Proof,
+ >,
+ BI: BlockImport + Send + Sync + 'static,
+ RBackend: Backend + 'static,
+ ParaClient: ProvideRuntimeApi + Send + Sync + 'static,
+ CIDP: CreateInherentDataProviders + 'static,
+{
+ /// Create a new instance of the builder.
+ fn new(
+ para_id: ParaId,
+ proposer_factory: PF,
+ block_import: BI,
+ create_inherent_data_providers: CIDP,
+ relay_chain_client: polkadot_service::Client,
+ relay_chain_backend: Arc,
+ parachain_client: Arc,
+ keystore: SyncCryptoStorePtr,
+ skip_prediction: bool,
+ ) -> Self {
+ Self {
+ para_id,
+ _phantom: PhantomData,
+ proposer_factory,
+ block_import,
+ create_inherent_data_providers,
+ relay_chain_backend,
+ relay_chain_client,
+ parachain_client,
+ keystore,
+ skip_prediction,
+ }
+ }
+
+ /// Build the nimbus consensus.
+ fn build(self) -> Box>
+ where
+ ParaClient::Api: AuthorFilterAPI,
+ {
+ self.relay_chain_client.clone().execute_with(self)
+ }
+}
+
+impl polkadot_service::ExecuteWithClient
+ for NimbusConsensusBuilder
+where
+ Block: BlockT,
+ // Rust bug: https://github.com/rust-lang/rust/issues/24159
+ sc_client_api::StateBackendFor: sc_client_api::StateBackend>,
+ PF: Environment + Send + Sync + 'static,
+ PF::Proposer: Proposer<
+ Block,
+ Transaction = BI::Transaction,
+ ProofRecording = EnableProofRecording,
+ Proof = ::Proof,
+ >,
+ BI: BlockImport + Send + Sync + 'static,
+ RBackend: Backend + 'static,
+ ParaClient: ProvideRuntimeApi + Send + Sync + 'static,
+ ParaClient::Api: AuthorFilterAPI,
+ CIDP: CreateInherentDataProviders + 'static,
+{
+ type Output = Box>;
+
+ fn execute_with_client(self, client: Arc) -> Self::Output
+ where
+ >::StateBackend: sp_api::StateBackend>,
+ PBackend: Backend,
+ PBackend::State: sp_api::StateBackend,
+ Api: polkadot_service::RuntimeApiCollection,
+ PClient: polkadot_service::AbstractClient + 'static,
+ ParaClient::Api: AuthorFilterAPI,
+ {
+ Box::new(NimbusConsensus::new(
+ self.para_id,
+ self.proposer_factory,
+ self.create_inherent_data_providers,
+ self.block_import,
+ client.clone(),
+ self.relay_chain_backend,
+ self.parachain_client,
+ self.keystore,
+ self.skip_prediction,
+ ))
+ }
+}
diff --git a/nimbus-launch-config.json b/nimbus-launch-config.json
new file mode 100644
index 00000000000..8002eb3e0a4
--- /dev/null
+++ b/nimbus-launch-config.json
@@ -0,0 +1,60 @@
+{
+ "relaychain": {
+ "bin": "../polkadot/target/release/polkadot",
+ "chain": "rococo-local",
+ "nodes": [
+ {
+ "name": "alice",
+ "wsPort": 9944,
+ "port": 30444
+ },
+ {
+ "name": "bob",
+ "wsPort": 9955,
+ "port": 30555
+ },
+ {
+ "name": "charlie",
+ "wsPort": 9966,
+ "port": 30666
+ },
+ {
+ "name": "dave",
+ "wsPort": 9977,
+ "port": 30777
+ }
+ ],
+ "runtime_genesis_config": {
+ "parachainsConfiguration": {
+ "config": {
+ "validation_upgrade_frequency": 1,
+ "validation_upgrade_delay": 1
+ }
+ }
+ }
+ },
+ "parachains": [
+ {
+ "bin": "./target/release/polkadot-collator",
+ "id": "200",
+ "chain": "nimbus",
+ "balance": "1000000000000000000000",
+ "nodes": [
+ {
+ "wsPort": 9988,
+ "port": 30888,
+ "flags": ["--alice", "--", "--execution=wasm"]
+ },
+ {
+ "wsPort": 9999,
+ "port": 30999,
+ "flags": ["--bob", "--", "--execution=wasm"]
+ }
+ ]
+ }
+ ],
+ "simpleParachains": [],
+ "hrmpChannels": [],
+ "types": {},
+ "finalization": false
+}
diff --git a/pallets/aura-style-filter/Cargo.toml b/pallets/aura-style-filter/Cargo.toml
new file mode 100644
index 00000000000..795c3414248
--- /dev/null
+++ b/pallets/aura-style-filter/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+authors = ["PureStake"]
+edition = "2018"
+name = "pallet-aura-style-filter"
+version = "0.1.0"
+
+[dependencies]
+parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0.101", optional = true, features = ["derive"] }
+frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+nimbus-primitives = { path = "../../primitives/nimbus", default-features = false }
+
+[features]
+default = ["std"]
+std = [
+ "serde",
+ "parity-scale-codec/std",
+ "frame-support/std",
+ "frame-system/std",
+ "nimbus-primitives/std",
+ "sp-core/std",
+ "sp-runtime/std",
+ "sp-std/std",
+]
diff --git a/pallets/aura-style-filter/src/lib.rs b/pallets/aura-style-filter/src/lib.rs
new file mode 100644
index 00000000000..7cfe52397c3
--- /dev/null
+++ b/pallets/aura-style-filter/src/lib.rs
@@ -0,0 +1,78 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! A Nimbus filter for the AuRa consensus algorithm. This filter does not use any entropy, it
+//! simply rotates authors in order. A single author is eligible at each slot.
+//!
+//! In the Substrate ecosystem, this algorithm is typically known as AuRa (authority round).
+//! There is a well known implementation in the main Substrate repository and published at
+//! https://crates.io/crates/sc-consensus-aura. There are two primary differences between
+//! the approaches:
+//!
+//! 1. This filter leverages all the heavy lifting of the Nimbus framework and consequently is
+//! capable of expressing Aura in < 100 lines of code.
+//!
+//! Whereas sc-consensus-aura includes the entire consensus stack including block signing, digest
+//! formats, and slot prediction. This is a lot of overhead for a sipmle round robin
+//! consensus that basically boils down to this function
+//! https://github.com/paritytech/substrate/blob/0f849efc/client/consensus/aura/src/lib.rs#L91-L106
+//!
+//! 2. The Nimbus framework places the author checking logic in the runtime which makes it relatively
+//! easy for relay chain validators to confirm the author is valid.
+//!
+//! Whereas sc-consensus-aura places the author checking offchain. The offchain approach is fine
+//! for standalone layer 1 blockchains, but net well suited for verification on the relay chain
+//! where validators only run a wasm blob.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use frame_support::pallet;
+pub use pallet::*;
+
+#[pallet]
+pub mod pallet {
+
+ use frame_support::pallet_prelude::*;
+ use sp_std::vec::Vec;
+
+ //TODO Now that the CanAuthor trait takes a slot number, I don't think this even needs to be a pallet.
+ // I think it could eb jsut a simple type.
+ /// The Author Filter pallet
+ #[pallet::pallet]
+ pub struct Pallet(PhantomData);
+
+ /// Configuration trait of this pallet.
+ #[pallet::config]
+ pub trait Config: frame_system::Config {
+ /// A source for the complete set of potential authors.
+ /// The starting point of the filtering.
+ type PotentialAuthors: Get>;
+ }
+
+ // This code will be called by the author-inherent pallet to check whether the reported author
+ // of this block is eligible at this slot. We calculate that result on demand and do not
+ // record it instorage.
+ impl nimbus_primitives::CanAuthor for Pallet {
+ fn can_author(account: &T::AccountId, slot: &u32) -> bool {
+ let active: Vec = T::PotentialAuthors::get();
+
+ // This is the core Aura logic right here.
+ let active_author = &active[*slot as usize % active.len()];
+
+ account == active_author
+ }
+ }
+}
diff --git a/pallets/author-inherent/Cargo.toml b/pallets/author-inherent/Cargo.toml
new file mode 100644
index 00000000000..3f382bc5e2f
--- /dev/null
+++ b/pallets/author-inherent/Cargo.toml
@@ -0,0 +1,40 @@
+[package]
+name = "pallet-author-inherent"
+version = "0.1.0"
+description = "Inject the block author via an inherent, and persist it via a Consensus digest"
+authors = ["PureStake"]
+edition = "2018"
+license = 'GPL-3.0-only'
+
+[dependencies]
+log = { version = "0.4", default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+nimbus-primitives = { path = "../../primitives/nimbus", default-features = false }
+
+[dev-dependencies]
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+[features]
+default = ["std"]
+std = [
+ "log/std",
+ "frame-support/std",
+ "frame-system/std",
+ "parity-scale-codec/std",
+ "sp-application-crypto/std",
+ "sp-authorship/std",
+ "sp-inherents/std",
+ "sp-runtime/std",
+ "sp-std/std",
+ "sp-api/std",
+ "nimbus-primitives/std",
+]
diff --git a/pallets/author-inherent/src/exec.rs b/pallets/author-inherent/src/exec.rs
new file mode 100644
index 00000000000..5f8c2f8a8a3
--- /dev/null
+++ b/pallets/author-inherent/src/exec.rs
@@ -0,0 +1,108 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! Block executive to be used by relay chain validators when validating parachain blocks built
+//! with the nimubs consensus family.
+
+use frame_support::traits::ExecuteBlock;
+use sp_api::{BlockT, HeaderT};
+// For some reason I can't get these logs to actually print
+use log::debug;
+use sp_runtime::{RuntimeAppPublic, generic::DigestItem};
+use nimbus_primitives::{NIMBUS_ENGINE_ID, NimbusId, NimbusSignature};
+use sp_application_crypto::{TryFrom, Public as _};
+
+/// Block executive to be used by relay chain validators when validating parachain blocks built
+/// with the nimubs consensus family.
+///
+/// This will strip the seal digest, and confirm that it contains a valid signature
+/// By the block author reported in the author inherent.
+///
+/// Essentially this contains the logic of the verifier plus the inner executive.
+/// TODO Degisn improvement:
+/// Can we share code with the verifier?
+/// Can this struct take a verifier as an associated type?
+/// Or maybe this will just get simpler in general when https://github.com/paritytech/polkadot/issues/2888 lands
+pub struct BlockExecutor(sp_std::marker::PhantomData<(T, I)>);
+
+impl ExecuteBlock for BlockExecutor
+where
+ Block: BlockT,
+ I: ExecuteBlock,
+{
+ fn execute_block(block: Block) {
+ let (mut header, extrinsics) = block.deconstruct();
+
+ debug!(target: "executive", "In hacked Executive. Initial digests are {:?}", header.digest());
+
+ // Set the seal aside for checking.
+ let seal = header
+ .digest_mut()
+ .logs
+ .pop()
+ .expect("Seal digest is present and is last item");
+
+ debug!(target: "executive", "In hacked Executive. digests after stripping {:?}", header.digest());
+ debug!(target: "executive", "The seal we got {:?}", seal);
+
+ let sig = match seal {
+ DigestItem::Seal(id, ref sig) if id == NIMBUS_ENGINE_ID => sig.clone(),
+ _ => panic!("HeaderUnsealed"),
+ };
+
+ debug!(target: "executive", "๐ชฒ Header hash after popping digest {:?}", header.hash());
+
+ debug!(target: "executive", "๐ชฒ Signature according to executive is {:?}", sig);
+
+ // Grab the digest from the runtime
+ //TODO use the CompatibleDigest trait. Maybe this code should move to the trait.
+ let consensus_digest = header
+ .digest()
+ .logs
+ .iter()
+ .find(|digest| {
+ match *digest {
+ DigestItem::Consensus(id, _) if id == &NIMBUS_ENGINE_ID => true,
+ _ => false,
+ }
+ })
+ .expect("A single consensus digest should be added by the runtime when executing the author inherent.");
+
+ let claimed_author = match *consensus_digest {
+ DigestItem::Consensus(id, ref author_id) if id == NIMBUS_ENGINE_ID => author_id.clone(),
+ _ => panic!("Expected consensus digest to contains author id bytes"),
+ };
+
+ debug!(target: "executive", "๐ชฒ Claimed Author according to executive is {:?}", claimed_author);
+
+ // Verify the signature
+ let valid_signature = NimbusId::from_slice(&claimed_author).verify(
+ &header.hash(),
+ &NimbusSignature::try_from(sig).expect("Bytes should convert to signature correctly"),
+ );
+
+ debug!(target: "executive", "๐ชฒ Valid signature? {:?}", valid_signature);
+
+ if !valid_signature{
+ panic!("Block signature invalid");
+ }
+
+
+ // Now that we've verified the signature, hand execution off to the inner executor
+ // which is probably the normal frame executive.
+ I::execute_block(Block::new(header, extrinsics));
+ }
+}
diff --git a/pallets/author-inherent/src/lib.rs b/pallets/author-inherent/src/lib.rs
new file mode 100644
index 00000000000..81d530951d0
--- /dev/null
+++ b/pallets/author-inherent/src/lib.rs
@@ -0,0 +1,367 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+//! Pallet that allows block authors to include their identity in a block via an inherent.
+//! Currently the author does not _prove_ their identity, just states it. So it should not be used,
+//! for things like equivocation slashing that require authenticated authorship information.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use frame_support::{
+ traits::FindAuthor,
+};
+use parity_scale_codec::{Decode, Encode};
+use sp_inherents::{InherentIdentifier, IsFatalError};
+use sp_runtime::{
+ ConsensusEngineId, DigestItem, RuntimeString, RuntimeAppPublic,
+};
+use log::debug;
+use nimbus_primitives::{AccountLookup, CanAuthor, NIMBUS_ENGINE_ID, SlotBeacon, EventHandler, INHERENT_IDENTIFIER};
+
+mod exec;
+pub use exec::BlockExecutor;
+
+pub use pallet::*;
+
+#[frame_support::pallet]
+pub mod pallet {
+ use frame_support::pallet_prelude::*;
+ use frame_system::pallet_prelude::*;
+ use super::*;
+
+ /// The Author Inherent pallet. The core of the nimbus consensus framework's runtime presence.
+ #[pallet::pallet]
+ pub struct Pallet(PhantomData);
+
+ #[pallet::config]
+ pub trait Config: frame_system::Config {
+ // This is copied from Aura. I wonder if I really need all those trait bounds. For now I'll leave them.
+ // TODO could I remove this type entirely and just always use NimbusId? Why didn't Aura do that?
+ /// The identifier type for an authority.
+ type AuthorId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize;
+
+ /// A type to convert between AuthorId and AccountId. This is useful when you want to associate
+ /// Block authoring behavior with an AccoutId for rewards or slashing. If you do not need to
+ /// hold an AccountID responsible for authoring use `()` which acts as an identity mapping.
+ type AccountLookup: AccountLookup;
+
+ /// Other pallets that want to be informed about block authorship
+ type EventHandler: EventHandler;
+
+ /// The final word on whether the reported author can author at this height.
+ /// This will be used when executing the inherent. This check is often stricter than the
+ /// Preliminary check, because it can use more data.
+ /// If the pallet that implements this trait depends on an inherent, that inherent **must**
+ /// be included before this one.
+ type CanAuthor: CanAuthor;
+
+ /// Some way of determining the current slot for purposes of verifying the author's eligibility
+ type SlotBeacon: SlotBeacon;
+ }
+
+ // If the AccountId type supports it, then this pallet can be BoundToRuntimeAppPublic
+ impl sp_runtime::BoundToRuntimeAppPublic for Pallet
+ where
+ T: Config,
+ T::AuthorId: RuntimeAppPublic,
+ {
+ type Public = T::AuthorId;
+ }
+ #[pallet::error]
+ pub enum Error {
+ /// Author already set in block.
+ AuthorAlreadySet,
+ /// No AccountId was found to be associated with this author
+ NoAccountId,
+ /// The author in the inherent is not an eligible author.
+ CannotBeAuthor,
+ }
+
+
+ /// Author of current block.
+ #[pallet::storage]
+ pub type Author = StorageValue<_, T::AccountId, OptionQuery>;
+
+ #[pallet::hooks]
+ impl Hooks> for Pallet {
+ fn on_initialize(_: T::BlockNumber) -> Weight {
+ >::kill();
+ 0
+ }
+ }
+
+ #[pallet::call]
+ impl Pallet {
+ /// Inherent to set the author of a block
+ #[pallet::weight((0, DispatchClass::Mandatory))]
+ pub fn set_author(origin: OriginFor, author: T::AuthorId) -> DispatchResult {
+
+ ensure_none(origin)?;
+
+ ensure!(>::get().is_none(), Error::::AuthorAlreadySet);
+ debug!(target: "author-inherent", "Author was not already set");
+
+ let slot = T::SlotBeacon::slot();
+ debug!(target: "author-inherent", "Slot is {:?}", slot);
+
+ let account = T::AccountLookup::lookup_account(&author).ok_or(
+ Error::::NoAccountId
+ )?;
+
+ ensure!(T::CanAuthor::can_author(&account, &slot), Error::::CannotBeAuthor);
+
+ // Update storage
+ Author::::put(&account);
+
+ // Add a consensus digest so the client-side worker can verify the block is signed by the right person.
+ frame_system::Pallet::::deposit_log(DigestItem::::Consensus(
+ NIMBUS_ENGINE_ID,
+ author.encode(),
+ ));
+
+ // Notify any other pallets that are listening (eg rewards) about the author
+ T::EventHandler::note_author(account);
+
+ Ok(())
+ }
+ }
+
+ #[pallet::inherent]
+ impl ProvideInherent for Pallet {
+ type Call = Call;
+ type Error = InherentError;
+ const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER;
+
+ fn is_inherent_required(_: &InherentData) -> Result