Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion client/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ fn main() {
.global(true)
.takes_value(true)
.value_name("STRING")
.default_value("127.0.0.1")
.default_value("ws://127.0.0.1")
.help("worker url"),
)
.arg(
Expand Down
73 changes: 47 additions & 26 deletions enclave/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ use chain_relay::{
use sp_runtime::OpaqueExtrinsic;
use sp_runtime::{generic::SignedBlock, traits::Header as HeaderT};
use substrate_api_client::extrinsic::xt_primitives::UncheckedExtrinsicV4;
use substratee_stf::sgx::OpaqueCall;
use substratee_stf::sgx::{OpaqueCall, shards_key_hash, storage_hashes_to_update_per_shard};

mod aes;
mod attestation;
Expand Down Expand Up @@ -218,9 +218,16 @@ pub unsafe extern "C" fn get_state(
return sgx_status_t::SGX_ERROR_UNEXPECTED;
}

if !state::exists(&shard) {
info!("Initialized new shard that was queried chain: {:?}", shard);
if let Err(e) = state::init_shard(&shard) {
return e;
}
}

let mut state = match state::load(&shard) {
Ok(s) => s,
Err(status) => return status,
Err(status) => return status
};

let validator = match io::light_validation::unseal() {
Expand Down Expand Up @@ -348,14 +355,15 @@ pub unsafe extern "C" fn sync_chain_relay(
return sgx_status_t::SGX_ERROR_UNEXPECTED;
}

if update_states(signed_block.block.header.clone()).is_err() {
error!("Error performing state updates upon block import")
}

match scan_block_for_relevant_xt(&signed_block.block) {
Ok(c) => calls.extend(c.into_iter()),
Err(_) => error!("Error executing relevant extrinsics"),
};

if update_states(signed_block.block.header).is_err() {
error!("Error performing state updates upon block import")
}
}

if let Err(_e) = stf_post_actions(validator, calls, xt_slice, *nonce) {
Expand All @@ -376,16 +384,37 @@ pub fn update_states(header: Header) -> SgxResult<()> {
return Ok(());
}

// global requests they are the same for every shard
let responses: Vec<WorkerResponse<Vec<u8>>> = worker_request(requests)?;
let update_map = verify_worker_responses(responses, header)?;

let shards = state::list_shards()?;
debug!("found shards: {:?}", shards);
for s in shards {
let mut state = state::load(&s)?;
Stf::update_storage(&mut state, &update_map);
state::write(state, &s)?;
}
let update_map = verify_worker_responses(responses, header.clone())?;
// look for new shards an initialize them
if let Some(maybe_shards) = update_map.get(&shards_key_hash()) {
match maybe_shards {
Some(shards) => {
let shards: Vec<ShardIdentifier> = Decode::decode(&mut shards.as_slice()).sgx_error_with_log("error decoding shards")?;
for s in shards {
if !state::exists(&s) {
info!("Initialized new shard that was found on chain: {:?}", s);
state::init_shard(&s)?;
}
// per shard (cid) requests
let per_shard_request = storage_hashes_to_update_per_shard(&s)
.into_iter()
.map(|key| WorkerRequest::ChainStorage(key, Some(header.hash())))
.collect();

let responses: Vec<WorkerResponse<Vec<u8>>> = worker_request(per_shard_request)?;
let per_shard_update_map = verify_worker_responses(responses, header.clone())?;

let mut state = state::load(&s)?;
Stf::update_storage(&mut state, &per_shard_update_map);
Stf::update_storage(&mut state, &update_map);
state::write(state, &s)?;
}
}
None => info!("No shards are on the chain yet")
};
};
Ok(())
}

Expand Down Expand Up @@ -500,12 +529,7 @@ fn handle_call_worker_xt(
return Ok(());
}

let mut state = if state::exists(&shard) {
state::load(&shard)?
} else {
state::init_shard(&shard)?;
Stf::init_state()
};
let mut state = state::load(&shard)?;

debug!("Update STF storage!");
let requests = Stf::get_storage_hashes_to_update(&stf_call_signed)
Expand Down Expand Up @@ -541,7 +565,7 @@ fn handle_call_worker_xt(
fn verify_worker_responses(
responses: Vec<WorkerResponse<Vec<u8>>>,
header: Header,
) -> SgxResult<HashMap<Vec<u8>, Vec<u8>>> {
) -> SgxResult<HashMap<Vec<u8>, Option<Vec<u8>>>> {
let mut update_map = HashMap::new();
for response in responses.iter() {
match response {
Expand All @@ -562,10 +586,7 @@ fn verify_worker_responses(
error!("Wrong storage value supplied");
return Err(sgx_status_t::SGX_ERROR_UNEXPECTED);
}

if let Some(val) = value {
update_map.insert(key.clone(), val.clone());
}
update_map.insert(key.clone(), value.clone());
}
}
}
Expand Down Expand Up @@ -658,7 +679,7 @@ fn worker_request<V: Encode + Decode>(
req: Vec<WorkerRequest>,
) -> SgxResult<Vec<WorkerResponse<V>>> {
let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED;
let mut resp: Vec<u8> = vec![0; 4196];
let mut resp: Vec<u8> = vec![0; 4196 * 4];

let res = unsafe {
ocall_worker_request(
Expand Down
26 changes: 7 additions & 19 deletions enclave/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

use std::fs;
use std::vec::Vec;
use std::io::Write;

use log::*;
use sgx_tcrypto::rsgx_sha256_slice;
Expand All @@ -27,8 +28,8 @@ use crate::constants::{ENCRYPTED_STATE_FILE, SHARDS_PATH};
use crate::hex;
use crate::io;
use crate::utils::UnwrapOrSgxErrorUnexpected;
use base58::{FromBase58, ToBase58};
use codec::{Decode, Encode};
use base58::ToBase58;
use codec::Encode;
use sgx_externalities::SgxExternalitiesTrait;
use sp_core::H256;
use std::path::Path;
Expand Down Expand Up @@ -93,7 +94,10 @@ pub fn exists(shard: &ShardIdentifier) -> bool {
}

pub fn init_shard(shard: &ShardIdentifier) -> SgxResult<()> {
fs::create_dir_all(format!("{}/{}", SHARDS_PATH, shard.encode().to_base58())).sgx_error()
let path = format!("{}/{}", SHARDS_PATH, shard.encode().to_base58());
fs::create_dir_all(path.clone()).sgx_error()?;
let mut file = fs::File::create(format!("{}/{}", path, ENCRYPTED_STATE_FILE)).sgx_error()?;
file.write_all(b"").sgx_error()
}

fn read(path: &str) -> SgxResult<Vec<u8>> {
Expand Down Expand Up @@ -125,22 +129,6 @@ fn encrypt(mut state: Vec<u8>) -> SgxResult<Vec<u8>> {
Ok(state)
}

pub fn list_shards() -> SgxResult<Vec<ShardIdentifier>> {
let files = fs::read_dir(SHARDS_PATH).sgx_error()?;
let mut shards = Vec::new();
for file in files {
let s = file
.sgx_error()?
.file_name()
.into_string()
.sgx_error()?
.from_base58()
.sgx_error()?;
shards.push(ShardIdentifier::decode(&mut s.as_slice()).sgx_error()?);
}
Ok(shards)
}

pub fn test_encrypted_state_io_works() {
let path = "test_state_file.bin";
let plaintext = b"The quick brown fox jumps over the lazy dog.";
Expand Down
2 changes: 1 addition & 1 deletion stf/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ pub enum TrustedOperationSigned {
get(TrustedGetterSigned),
}

#[derive(Encode, Decode, Clone)]
#[derive(Encode, Decode, Clone, Debug)]
#[allow(non_camel_case_types)]
pub enum TrustedCall {
balance_set_balance(AccountId, AccountId, Balance, Balance),
Expand Down
34 changes: 24 additions & 10 deletions stf/src/sgx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use sp_runtime::traits::Dispatchable;

use crate::{
AccountId, State, Stf, TrustedCall, TrustedCallSigned, TrustedGetter, TrustedGetterSigned,
SUBSRATEE_REGISTRY_MODULE, UNSHIELD,
ShardIdentifier, SUBSRATEE_REGISTRY_MODULE, UNSHIELD,
};
use sp_core::blake2_256;

Expand Down Expand Up @@ -69,11 +69,16 @@ impl Stf {
ext
}

pub fn update_storage(ext: &mut State, map_update: &HashMap<Vec<u8>, Vec<u8>>) {
pub fn update_storage(ext: &mut State, map_update: &HashMap<Vec<u8>, Option<Vec<u8>>>) {
ext.execute_with(|| {
map_update
.iter()
.for_each(|(k, v)| sp_io::storage::set(k, v))
.for_each(|(k, v)| {
match v {
Some(value) => sp_io::storage::set(k, value),
None => sp_io::storage::clear(k)
};
});
});
}

Expand Down Expand Up @@ -205,19 +210,28 @@ impl Stf {
}

pub fn get_storage_hashes_to_update_for_getter(getter: &TrustedGetterSigned) -> Vec<Vec<u8>> {
let key_hashes = Vec::new();
info!("No storage updates needed for getter: {:?}", getter.getter); // dummy. Is currently not needed
key_hashes
info!("No specific storage updates needed for getter. Returning those for on block: {:?}", getter.getter);
Self::storage_hashes_to_update_on_block()
}

pub fn storage_hashes_to_update_on_block() -> Vec<Vec<u8>> {
// let key_hashes = Vec::new();
// key_hashes.push(storage_value_key("dummy", "dummy"));
// key_hashes
Vec::new()
let mut key_hashes = Vec::new();

// get all shards that are currently registered
key_hashes.push(shards_key_hash());

key_hashes
}
}

pub fn storage_hashes_to_update_per_shard(_shard: &ShardIdentifier) -> Vec<Vec<u8>> {
Vec::new()
}

pub fn shards_key_hash() -> Vec<u8> {
storage_value_key("EncointerCurrencies", "CurrencyIdentifiers")
}

// get the AccountInfo key where the nonce is stored
pub fn nonce_key_hash(account: &AccountId) -> Vec<u8> {
storage_map_key(
Expand Down
50 changes: 34 additions & 16 deletions worker/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,11 @@ fn worker(w_ip: &str, w_port: &str, mu_ra_port: &str, shard: &ShardIdentifier) {
}
}

println!("*** [+] finished remote attestation\n");

println!("*** Syncing chain relay\n\n");
let mut latest_head = init_chain_relay(eid, &api);
println!("*** [+] Finished syncing chain relay\n");

// ------------------------------------------------------------------------
// subscribe to events and react on firing
Expand Down Expand Up @@ -470,33 +474,47 @@ pub fn sync_chain_relay(

// Todo: Check, is this dangerous such that it could be an eternal or too big loop?
let mut head = curr_head.clone();

let no_blocks_to_sync = head.block.header.number - last_synced_head.number;
if no_blocks_to_sync > 1 {
println!("Chain Relay is synced until block: {:?}", last_synced_head.number);
println!("Last finalized block number: {:?}\n", head.block.header.number);
}

while head.block.header.parent_hash != last_synced_head.hash() {
head = api
.get_signed_block(Some(head.block.header.parent_hash))
.unwrap();
blocks_to_sync.push(head.clone());
debug!("Syncing Block: {:?}", head.block)

if head.block.header.number % 100 == 0 {
println!("Remaining blocks to fetch until last synced header: {:?}", head.block.header.number - last_synced_head.number)
}
}
blocks_to_sync.reverse();
debug!(
"Got {} headers to sync in chain relay.",
blocks_to_sync.len()
);

let tee_accountid = enclave_account(eid);
let tee_nonce = get_nonce(&api, &tee_accountid);

let xts = enclave_sync_chain_relay(eid, blocks_to_sync, tee_nonce).unwrap();

let extrinsics: Vec<Vec<u8>> = Decode::decode(&mut xts.as_slice()).unwrap();
info!(
"Sync chain relay: Enclave wants to send {} extrinsics",
extrinsics.len()
);
// only feed 100 blocks at a time into the enclave to save enclave state regularly
let mut i = blocks_to_sync[0].block.header.number as usize;
for chunk in blocks_to_sync.chunks(100) {
let tee_nonce = get_nonce(&api, &tee_accountid);
let xts = enclave_sync_chain_relay(eid, chunk.to_vec(), tee_nonce).unwrap();
let extrinsics: Vec<Vec<u8>> = Decode::decode(&mut xts.as_slice()).unwrap();

if !extrinsics.is_empty() {
println!(
"Sync chain relay: Enclave wants to send {} extrinsics",
extrinsics.len()
);
}
for xt in extrinsics.into_iter() {
api.send_extrinsic(hex_encode(xt), XtStatus::InBlock)
.unwrap();
}

for xt in extrinsics.into_iter() {
api.send_extrinsic(hex_encode(xt), XtStatus::InBlock)
.unwrap();
i += chunk.len();
println!("Synced {} blocks out of {} finalized blocks", i , blocks_to_sync[0].block.header.number as usize + blocks_to_sync.len())
}

curr_head.block.header
Expand Down
2 changes: 1 addition & 1 deletion worker/worker-api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub struct Api {
impl Api {
pub fn new(url: String) -> Api {
Api {
url: format!("ws://{}", url),
url,
}
}

Expand Down