Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Merge remote-tracking branch 'origin/main' into update-substrate
# Conflicts:
#	crates/sp-domains/src/lib.rs
#	crates/subspace-networking/src/create.rs
#	domains/client/domain-executor/src/system_bundle_producer.rs
#	domains/client/relayer/Cargo.toml
#	domains/client/relayer/src/lib.rs
#	domains/client/relayer/src/worker.rs
#	domains/pallets/domain-tracker/src/tests.rs
#	domains/primitives/domain-tracker/Cargo.toml
  • Loading branch information
nazar-pc committed Nov 15, 2022
commit f19f91034b6e56182fea04da68f49cc38395eae3
3 changes: 1 addition & 2 deletions crates/sp-domains/src/bundle_election.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use sp_core::H256;
#[cfg(feature = "std")]
use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue};
use sp_runtime::traits::BlakeTwo256;
use sp_std::vec;
use sp_std::vec::Vec;
use sp_trie::{read_trie_value, LayoutV1, StorageProof};
use subspace_core_primitives::crypto::blake2b_256_hash_list;
Expand Down Expand Up @@ -191,7 +190,7 @@ pub fn read_bundle_election_params(
let db = storage_proof.into_memory_db::<BlakeTwo256>();

let read_value = |storage_key| {
read_trie_value::<LayoutV1<BlakeTwo256>, _>(&db, state_root, storage_key)
read_trie_value::<LayoutV1<BlakeTwo256>, _>(&db, state_root, storage_key, None, None)
.map_err(|_| ReadBundleElectionParamsError::TrieError)
};

Expand Down
195 changes: 0 additions & 195 deletions crates/sp-domains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -199,201 +199,6 @@ impl<Hash: Encode> BundleHeader<Hash> {
}
}

fn derive_local_randomness(
vrf_output: [u8; VRF_OUTPUT_LENGTH],
public_key: &ExecutorPublicKey,
global_challenge: &Blake2b256Hash,
) -> SignatureResult<LocalRandomness> {
let in_out = VRFOutput(vrf_output).attach_input_hash(
&schnorrkel::PublicKey::from_bytes(public_key.as_ref())?,
make_local_randomness_transcript(global_challenge),
)?;

Ok(in_out.make_bytes(LOCAL_RANDOMNESS_CONTEXT))
}

/// Returns the domain-specific solution for the challenge of producing a bundle.
pub fn derive_bundle_election_solution(
domain_id: DomainId,
vrf_output: [u8; VRF_OUTPUT_LENGTH],
public_key: &ExecutorPublicKey,
global_challenge: &Blake2b256Hash,
) -> SignatureResult<u128> {
let local_randomness = derive_local_randomness(vrf_output, public_key, global_challenge)?;
let local_domain_randomness =
blake2b_256_hash_list(&[&domain_id.to_le_bytes(), &local_randomness]);

let election_solution = u128::from_le_bytes(
local_domain_randomness
.split_at(core::mem::size_of::<u128>())
.0
.try_into()
.expect("Local domain randomness must fit into u128; qed"),
);

Ok(election_solution)
}

/// Returns the election threshold based on the stake weight proportion and slot probability.
pub fn calculate_bundle_election_threshold(
stake_weight: StakeWeight,
total_stake_weight: StakeWeight,
slot_probability: (u64, u64),
) -> u128 {
// The calculation is written for not causing the overflow, which might be harder to
// understand, the formula in a readable form is as followes:
//
// slot_probability.0 stake_weight
// threshold = ------------------ * --------------------- * u128::MAX
// slot_probability.1 total_stake_weight
//
// TODO: better to have more audits on this calculation.
u128::MAX / u128::from(slot_probability.1) * u128::from(slot_probability.0) / total_stake_weight
* stake_weight
}

pub fn is_election_solution_within_threshold(election_solution: u128, threshold: u128) -> bool {
election_solution <= threshold
}

/// Make a VRF transcript.
pub fn make_local_randomness_transcript(global_challenge: &Blake2b256Hash) -> Transcript {
let mut transcript = Transcript::new(VRF_TRANSCRIPT_LABEL);
transcript.append_message(b"global challenge", global_challenge);
transcript
}

/// Make a VRF transcript data.
#[cfg(feature = "std")]
pub fn make_local_randomness_transcript_data(
global_challenge: &Blake2b256Hash,
) -> VRFTranscriptData {
VRFTranscriptData {
label: VRF_TRANSCRIPT_LABEL,
items: vec![(
"global challenge",
VRFTranscriptValue::Bytes(global_challenge.to_vec()),
)],
}
}

pub mod well_known_keys {
use sp_std::vec;
use sp_std::vec::Vec;

/// Storage key of `pallet_executor_registry::Authorities`.
///
/// Authorities::<T>::hashed_key().
pub(crate) const AUTHORITIES: [u8; 32] = [
185, 61, 20, 0, 90, 16, 106, 134, 14, 150, 35, 100, 152, 229, 203, 187, 94, 6, 33, 196,
134, 154, 166, 12, 2, 190, 154, 220, 201, 138, 13, 29,
];

/// Storage key of `pallet_executor_registry::TotalStakeWeight`.
///
/// TotalStakeWeight::<T>::hashed_key().
pub(crate) const TOTAL_STAKE_WEIGHT: [u8; 32] = [
185, 61, 20, 0, 90, 16, 106, 134, 14, 150, 35, 100, 152, 229, 203, 187, 173, 245, 4, 89,
128, 92, 85, 189, 74, 160, 138, 209, 188, 18, 62, 94,
];

/// Storage key of `pallet_executor_registry::SlotProbability`.
///
/// SlotProbability::<T>::hashed_key().
pub(crate) const SLOT_PROBABILITY: [u8; 32] = [
185, 61, 20, 0, 90, 16, 106, 134, 14, 150, 35, 100, 152, 229, 203, 187, 60, 16, 174, 72,
214, 175, 220, 254, 34, 167, 168, 222, 147, 18, 4, 168,
];

pub fn bundle_election_storage_keys() -> Vec<[u8; 32]> {
vec![AUTHORITIES, TOTAL_STAKE_WEIGHT, SLOT_PROBABILITY]
}
}

/// Parameters for the bundle election.
#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)]
pub struct BundleElectionParams {
pub authorities: Vec<(ExecutorPublicKey, StakeWeight)>,
pub total_stake_weight: StakeWeight,
pub slot_probability: (u64, u64),
}

#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)]
pub enum VrfProofError {
/// Can not construct the vrf public_key/output/proof from the raw bytes.
VrfSignatureConstructionError,
/// Invalid vrf proof.
BadProof,
}

/// Verify the vrf proof generated in the bundle election.
pub fn verify_vrf_proof(
public_key: &ExecutorPublicKey,
vrf_output: &[u8],
vrf_proof: &[u8],
global_challenge: &Blake2b256Hash,
) -> Result<(), VrfProofError> {
let public_key = schnorrkel::PublicKey::from_bytes(public_key.as_ref())
.map_err(|_| VrfProofError::VrfSignatureConstructionError)?;

public_key
.vrf_verify(
make_local_randomness_transcript(global_challenge),
&VRFOutput::from_bytes(vrf_output)
.map_err(|_| VrfProofError::VrfSignatureConstructionError)?,
&VRFProof::from_bytes(vrf_proof)
.map_err(|_| VrfProofError::VrfSignatureConstructionError)?,
)
.map_err(|_| VrfProofError::BadProof)?;

Ok(())
}

#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)]
pub enum ReadBundleElectionParamsError {
/// Trie error.
TrieError,
/// The value does not exist in the trie.
MissingValue,
/// Failed to decode the value read from the trie.
DecodeError,
}

/// Returns the bundle election parameters read from the given storage proof.
pub fn read_bundle_election_params(
storage_proof: StorageProof,
state_root: &H256,
) -> Result<BundleElectionParams, ReadBundleElectionParamsError> {
let db = storage_proof.into_memory_db::<BlakeTwo256>();

let read_value = |storage_key| {
read_trie_value::<LayoutV1<BlakeTwo256>, _>(&db, state_root, storage_key, None, None)
.map_err(|_| ReadBundleElectionParamsError::TrieError)
};

let authorities =
read_value(&AUTHORITIES)?.ok_or(ReadBundleElectionParamsError::MissingValue)?;
let authorities: Vec<(ExecutorPublicKey, StakeWeight)> =
Decode::decode(&mut authorities.as_slice())
.map_err(|_| ReadBundleElectionParamsError::DecodeError)?;

let total_stake_weight_value =
read_value(&TOTAL_STAKE_WEIGHT)?.ok_or(ReadBundleElectionParamsError::MissingValue)?;
let total_stake_weight: StakeWeight = Decode::decode(&mut total_stake_weight_value.as_slice())
.map_err(|_| ReadBundleElectionParamsError::DecodeError)?;

let slot_probability_value =
read_value(&SLOT_PROBABILITY)?.ok_or(ReadBundleElectionParamsError::MissingValue)?;
let slot_probability: (u64, u64) = Decode::decode(&mut slot_probability_value.as_slice())
.map_err(|_| ReadBundleElectionParamsError::DecodeError)?;

Ok(BundleElectionParams {
authorities,
total_stake_weight,
slot_probability,
})
}

#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)]
pub struct ProofOfElection<SecondaryHash> {
/// Domain id.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
use libp2p::kad::kbucket::{Distance, Sha256Hash};
use libp2p::kad::kbucket::Distance;
pub use libp2p::kad::record::Key;
pub use libp2p::PeerId;
use std::cmp::Ordering;
use std::collections::BinaryHeap;

type KademliaBucketKey<T> = libp2p::kad::kbucket::Key<T, Sha256Hash>;
type KademliaBucketKey<T> = libp2p::kad::kbucket::Key<T>;

// Helper structure. It wraps Kademlia distance to a given peer for heap-metrics.
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -57,7 +57,7 @@ impl RecordBinaryHeap {
/// Constructs a heap with given PeerId and size limit.
pub fn new(peer_id: PeerId, limit: usize) -> Self {
Self {
peer_key: KademliaBucketKey::new(peer_id),
peer_key: KademliaBucketKey::from(peer_id),
max_heap: BinaryHeap::new(),
limit,
}
Expand Down
7 changes: 3 additions & 4 deletions crates/subspace-networking/src/behavior/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use crate::behavior::custom_record_store::{
};
use crate::behavior::record_binary_heap::RecordBinaryHeap;
use chrono::Duration;
use libp2p::kad::kbucket::Sha256Hash;
use libp2p::kad::record::Key;
use libp2p::kad::store::RecordStore;
use libp2p::kad::ProviderRecord;
Expand Down Expand Up @@ -188,7 +187,7 @@ fn binary_heap_limit_works() {

#[test]
fn binary_heap_eviction_works() {
type KademliaBucketKey<T> = libp2p::kad::kbucket::Key<T, Sha256Hash>;
type KademliaBucketKey<T> = libp2p::kad::kbucket::Key<T>;

let peer_id =
PeerId::from_multihash(Multihash::wrap(Code::Identity.into(), [0u8].as_slice()).unwrap())
Expand All @@ -206,8 +205,8 @@ fn binary_heap_eviction_works() {
let bucket_key2: KademliaBucketKey<Key> = KademliaBucketKey::new(key2.clone());

let evicted = evicted.unwrap();
if bucket_key1.distance::<KademliaBucketKey<_>>(&KademliaBucketKey::new(peer_id))
> bucket_key2.distance::<KademliaBucketKey<_>>(&KademliaBucketKey::new(peer_id))
if bucket_key1.distance::<KademliaBucketKey<_>>(&KademliaBucketKey::from(peer_id))
> bucket_key2.distance::<KademliaBucketKey<_>>(&KademliaBucketKey::from(peer_id))
{
assert_eq!(evicted, key1);
} else {
Expand Down
1 change: 1 addition & 0 deletions crates/subspace-networking/src/create.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use libp2p::gossipsub::{
GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode,
};
use libp2p::identify::Config as IdentifyConfig;
use libp2p::identity::Keypair;
use libp2p::kad::{KademliaBucketInserts, KademliaCaching, KademliaConfig, KademliaStoreInserts};
use libp2p::mplex::MplexConfig;
use libp2p::multiaddr::Protocol;
Expand Down
2 changes: 1 addition & 1 deletion crates/subspace-runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ fn extract_core_bundles(
extrinsics
.into_iter()
.filter_map(|uxt| match uxt.function {
Call::Domains(pallet_domains::Call::submit_bundle {
RuntimeCall::Domains(pallet_domains::Call::submit_bundle {
signed_opaque_bundle,
}) if signed_opaque_bundle.domain_id() == domain_id => {
Some(signed_opaque_bundle.bundle)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,9 @@ where
let storage_keys = well_known_keys::bundle_election_storage_keys(domain_id);
// TODO: bench how large the storage proof we can afford and try proving a single
// electioned executor storage instead of the whole authority set.
let storage_proof = self.client.read_proof(
&best_block_id,
&mut storage_keys.iter().map(|s| s.as_slice()),
)?;
let storage_proof = self
.client
.read_proof(best_hash, &mut storage_keys.iter().map(|s| s.as_slice()))?;

let state_root = *self
.client
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ where
) -> Result<(), sp_blockchain::Error> {
let extrinsics = self
.primary_chain_client
.block_body(&BlockId::Hash(primary_hash))?
.block_body(primary_hash)?
.ok_or_else(|| {
sp_blockchain::Error::Backend(format!(
"Primary block body for {:?} not found",
Expand Down
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.