diff --git a/client/src/main.rs b/client/src/main.rs index a7a80b4a91..888d08600b 100644 --- a/client/src/main.rs +++ b/client/src/main.rs @@ -99,7 +99,7 @@ fn main() { .global(true) .takes_value(true) .value_name("STRING") - .default_value("127.0.0.1") + .default_value("ws://127.0.0.1") .help("worker url"), ) .arg( diff --git a/enclave/src/lib.rs b/enclave/src/lib.rs index 7c96ff00a9..9de341cd8d 100644 --- a/enclave/src/lib.rs +++ b/enclave/src/lib.rs @@ -62,7 +62,7 @@ use chain_relay::{ use sp_runtime::OpaqueExtrinsic; use sp_runtime::{generic::SignedBlock, traits::Header as HeaderT}; use substrate_api_client::extrinsic::xt_primitives::UncheckedExtrinsicV4; -use substratee_stf::sgx::OpaqueCall; +use substratee_stf::sgx::{OpaqueCall, shards_key_hash, storage_hashes_to_update_per_shard}; mod aes; mod attestation; @@ -218,9 +218,16 @@ pub unsafe extern "C" fn get_state( return sgx_status_t::SGX_ERROR_UNEXPECTED; } + if !state::exists(&shard) { + info!("Initialized new shard that was queried chain: {:?}", shard); + if let Err(e) = state::init_shard(&shard) { + return e; + } + } + let mut state = match state::load(&shard) { Ok(s) => s, - Err(status) => return status, + Err(status) => return status }; let validator = match io::light_validation::unseal() { @@ -348,14 +355,15 @@ pub unsafe extern "C" fn sync_chain_relay( return sgx_status_t::SGX_ERROR_UNEXPECTED; } + if update_states(signed_block.block.header.clone()).is_err() { + error!("Error performing state updates upon block import") + } + match scan_block_for_relevant_xt(&signed_block.block) { Ok(c) => calls.extend(c.into_iter()), Err(_) => error!("Error executing relevant extrinsics"), }; - if update_states(signed_block.block.header).is_err() { - error!("Error performing state updates upon block import") - } } if let Err(_e) = stf_post_actions(validator, calls, xt_slice, *nonce) { @@ -376,16 +384,37 @@ pub fn update_states(header: Header) -> SgxResult<()> { return Ok(()); } + // global requests they are the same for every shard let responses: Vec>> = worker_request(requests)?; - let update_map = verify_worker_responses(responses, header)?; - - let shards = state::list_shards()?; - debug!("found shards: {:?}", shards); - for s in shards { - let mut state = state::load(&s)?; - Stf::update_storage(&mut state, &update_map); - state::write(state, &s)?; - } + let update_map = verify_worker_responses(responses, header.clone())?; + // look for new shards an initialize them + if let Some(maybe_shards) = update_map.get(&shards_key_hash()) { + match maybe_shards { + Some(shards) => { + let shards: Vec = Decode::decode(&mut shards.as_slice()).sgx_error_with_log("error decoding shards")?; + for s in shards { + if !state::exists(&s) { + info!("Initialized new shard that was found on chain: {:?}", s); + state::init_shard(&s)?; + } + // per shard (cid) requests + let per_shard_request = storage_hashes_to_update_per_shard(&s) + .into_iter() + .map(|key| WorkerRequest::ChainStorage(key, Some(header.hash()))) + .collect(); + + let responses: Vec>> = worker_request(per_shard_request)?; + let per_shard_update_map = verify_worker_responses(responses, header.clone())?; + + let mut state = state::load(&s)?; + Stf::update_storage(&mut state, &per_shard_update_map); + Stf::update_storage(&mut state, &update_map); + state::write(state, &s)?; + } + } + None => info!("No shards are on the chain yet") + }; + }; Ok(()) } @@ -500,12 +529,7 @@ fn handle_call_worker_xt( return Ok(()); } - let mut state = if state::exists(&shard) { - state::load(&shard)? - } else { - state::init_shard(&shard)?; - Stf::init_state() - }; + let mut state = state::load(&shard)?; debug!("Update STF storage!"); let requests = Stf::get_storage_hashes_to_update(&stf_call_signed) @@ -541,7 +565,7 @@ fn handle_call_worker_xt( fn verify_worker_responses( responses: Vec>>, header: Header, -) -> SgxResult, Vec>> { +) -> SgxResult, Option>>> { let mut update_map = HashMap::new(); for response in responses.iter() { match response { @@ -562,10 +586,7 @@ fn verify_worker_responses( error!("Wrong storage value supplied"); return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } - - if let Some(val) = value { - update_map.insert(key.clone(), val.clone()); - } + update_map.insert(key.clone(), value.clone()); } } } @@ -658,7 +679,7 @@ fn worker_request( req: Vec, ) -> SgxResult>> { let mut rt: sgx_status_t = sgx_status_t::SGX_ERROR_UNEXPECTED; - let mut resp: Vec = vec![0; 4196]; + let mut resp: Vec = vec![0; 4196 * 4]; let res = unsafe { ocall_worker_request( diff --git a/enclave/src/state.rs b/enclave/src/state.rs index e7b0bbbe5c..1321da3826 100644 --- a/enclave/src/state.rs +++ b/enclave/src/state.rs @@ -17,6 +17,7 @@ use std::fs; use std::vec::Vec; +use std::io::Write; use log::*; use sgx_tcrypto::rsgx_sha256_slice; @@ -27,8 +28,8 @@ use crate::constants::{ENCRYPTED_STATE_FILE, SHARDS_PATH}; use crate::hex; use crate::io; use crate::utils::UnwrapOrSgxErrorUnexpected; -use base58::{FromBase58, ToBase58}; -use codec::{Decode, Encode}; +use base58::ToBase58; +use codec::Encode; use sgx_externalities::SgxExternalitiesTrait; use sp_core::H256; use std::path::Path; @@ -93,7 +94,10 @@ pub fn exists(shard: &ShardIdentifier) -> bool { } pub fn init_shard(shard: &ShardIdentifier) -> SgxResult<()> { - fs::create_dir_all(format!("{}/{}", SHARDS_PATH, shard.encode().to_base58())).sgx_error() + let path = format!("{}/{}", SHARDS_PATH, shard.encode().to_base58()); + fs::create_dir_all(path.clone()).sgx_error()?; + let mut file = fs::File::create(format!("{}/{}", path, ENCRYPTED_STATE_FILE)).sgx_error()?; + file.write_all(b"").sgx_error() } fn read(path: &str) -> SgxResult> { @@ -125,22 +129,6 @@ fn encrypt(mut state: Vec) -> SgxResult> { Ok(state) } -pub fn list_shards() -> SgxResult> { - let files = fs::read_dir(SHARDS_PATH).sgx_error()?; - let mut shards = Vec::new(); - for file in files { - let s = file - .sgx_error()? - .file_name() - .into_string() - .sgx_error()? - .from_base58() - .sgx_error()?; - shards.push(ShardIdentifier::decode(&mut s.as_slice()).sgx_error()?); - } - Ok(shards) -} - pub fn test_encrypted_state_io_works() { let path = "test_state_file.bin"; let plaintext = b"The quick brown fox jumps over the lazy dog."; diff --git a/stf/src/lib.rs b/stf/src/lib.rs index e20116e459..219e6c943e 100644 --- a/stf/src/lib.rs +++ b/stf/src/lib.rs @@ -63,7 +63,7 @@ pub enum TrustedOperationSigned { get(TrustedGetterSigned), } -#[derive(Encode, Decode, Clone)] +#[derive(Encode, Decode, Clone, Debug)] #[allow(non_camel_case_types)] pub enum TrustedCall { balance_set_balance(AccountId, AccountId, Balance, Balance), diff --git a/stf/src/sgx.rs b/stf/src/sgx.rs index 7c68a4519c..5d8e30f7ac 100644 --- a/stf/src/sgx.rs +++ b/stf/src/sgx.rs @@ -13,7 +13,7 @@ use sp_runtime::traits::Dispatchable; use crate::{ AccountId, State, Stf, TrustedCall, TrustedCallSigned, TrustedGetter, TrustedGetterSigned, - SUBSRATEE_REGISTRY_MODULE, UNSHIELD, + ShardIdentifier, SUBSRATEE_REGISTRY_MODULE, UNSHIELD, }; use sp_core::blake2_256; @@ -69,11 +69,16 @@ impl Stf { ext } - pub fn update_storage(ext: &mut State, map_update: &HashMap, Vec>) { + pub fn update_storage(ext: &mut State, map_update: &HashMap, Option>>) { ext.execute_with(|| { map_update .iter() - .for_each(|(k, v)| sp_io::storage::set(k, v)) + .for_each(|(k, v)| { + match v { + Some(value) => sp_io::storage::set(k, value), + None => sp_io::storage::clear(k) + }; + }); }); } @@ -205,19 +210,28 @@ impl Stf { } pub fn get_storage_hashes_to_update_for_getter(getter: &TrustedGetterSigned) -> Vec> { - let key_hashes = Vec::new(); - info!("No storage updates needed for getter: {:?}", getter.getter); // dummy. Is currently not needed - key_hashes + info!("No specific storage updates needed for getter. Returning those for on block: {:?}", getter.getter); + Self::storage_hashes_to_update_on_block() } pub fn storage_hashes_to_update_on_block() -> Vec> { - // let key_hashes = Vec::new(); - // key_hashes.push(storage_value_key("dummy", "dummy")); - // key_hashes - Vec::new() + let mut key_hashes = Vec::new(); + + // get all shards that are currently registered + key_hashes.push(shards_key_hash()); + + key_hashes } } +pub fn storage_hashes_to_update_per_shard(_shard: &ShardIdentifier) -> Vec> { + Vec::new() +} + +pub fn shards_key_hash() -> Vec { + storage_value_key("EncointerCurrencies", "CurrencyIdentifiers") +} + // get the AccountInfo key where the nonce is stored pub fn nonce_key_hash(account: &AccountId) -> Vec { storage_map_key( diff --git a/worker/src/main.rs b/worker/src/main.rs index 057e5b1037..fd1249e074 100644 --- a/worker/src/main.rs +++ b/worker/src/main.rs @@ -289,7 +289,11 @@ fn worker(w_ip: &str, w_port: &str, mu_ra_port: &str, shard: &ShardIdentifier) { } } + println!("*** [+] finished remote attestation\n"); + + println!("*** Syncing chain relay\n\n"); let mut latest_head = init_chain_relay(eid, &api); + println!("*** [+] Finished syncing chain relay\n"); // ------------------------------------------------------------------------ // subscribe to events and react on firing @@ -470,33 +474,47 @@ pub fn sync_chain_relay( // Todo: Check, is this dangerous such that it could be an eternal or too big loop? let mut head = curr_head.clone(); + + let no_blocks_to_sync = head.block.header.number - last_synced_head.number; + if no_blocks_to_sync > 1 { + println!("Chain Relay is synced until block: {:?}", last_synced_head.number); + println!("Last finalized block number: {:?}\n", head.block.header.number); + } + while head.block.header.parent_hash != last_synced_head.hash() { head = api .get_signed_block(Some(head.block.header.parent_hash)) .unwrap(); blocks_to_sync.push(head.clone()); - debug!("Syncing Block: {:?}", head.block) + + if head.block.header.number % 100 == 0 { + println!("Remaining blocks to fetch until last synced header: {:?}", head.block.header.number - last_synced_head.number) + } } blocks_to_sync.reverse(); - debug!( - "Got {} headers to sync in chain relay.", - blocks_to_sync.len() - ); let tee_accountid = enclave_account(eid); - let tee_nonce = get_nonce(&api, &tee_accountid); - let xts = enclave_sync_chain_relay(eid, blocks_to_sync, tee_nonce).unwrap(); - - let extrinsics: Vec> = Decode::decode(&mut xts.as_slice()).unwrap(); - info!( - "Sync chain relay: Enclave wants to send {} extrinsics", - extrinsics.len() - ); + // only feed 100 blocks at a time into the enclave to save enclave state regularly + let mut i = blocks_to_sync[0].block.header.number as usize; + for chunk in blocks_to_sync.chunks(100) { + let tee_nonce = get_nonce(&api, &tee_accountid); + let xts = enclave_sync_chain_relay(eid, chunk.to_vec(), tee_nonce).unwrap(); + let extrinsics: Vec> = Decode::decode(&mut xts.as_slice()).unwrap(); + + if !extrinsics.is_empty() { + println!( + "Sync chain relay: Enclave wants to send {} extrinsics", + extrinsics.len() + ); + } + for xt in extrinsics.into_iter() { + api.send_extrinsic(hex_encode(xt), XtStatus::InBlock) + .unwrap(); + } - for xt in extrinsics.into_iter() { - api.send_extrinsic(hex_encode(xt), XtStatus::InBlock) - .unwrap(); + i += chunk.len(); + println!("Synced {} blocks out of {} finalized blocks", i , blocks_to_sync[0].block.header.number as usize + blocks_to_sync.len()) } curr_head.block.header diff --git a/worker/worker-api/src/lib.rs b/worker/worker-api/src/lib.rs index b636f533c0..b622438f6a 100644 --- a/worker/worker-api/src/lib.rs +++ b/worker/worker-api/src/lib.rs @@ -39,7 +39,7 @@ pub struct Api { impl Api { pub fn new(url: String) -> Api { Api { - url: format!("ws://{}", url), + url, } }