diff --git a/Cargo.lock b/Cargo.lock
index 8678339ce2de9..6045fda557d8a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3412,6 +3412,7 @@ dependencies = [
"assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
diff --git a/core/executor/Cargo.toml b/core/executor/Cargo.toml
index f7e261f4e84fc..07e627e40294d 100644
--- a/core/executor/Cargo.toml
+++ b/core/executor/Cargo.toml
@@ -19,6 +19,7 @@ byteorder = "1.1"
lazy_static = "1.0"
parking_lot = "0.7.1"
log = "0.4"
+fnv = "1.0.6"
[dev-dependencies]
assert_matches = "1.1"
diff --git a/core/executor/src/heap.rs b/core/executor/src/heap.rs
new file mode 100644
index 0000000000000..3dedfa121e933
--- /dev/null
+++ b/core/executor/src/heap.rs
@@ -0,0 +1,380 @@
+// Copyright 2017-2019 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate. If not, see .
+
+//! This module implements a buddy allocation heap.
+//! It uses a binary tree and follows the concepts outlined in
+//! https://en.wikipedia.org/wiki/Buddy_memory_allocation.
+
+extern crate fnv;
+
+use std::vec;
+use self::fnv::FnvHashMap;
+
+// The pointers need to be aligned to 8 bytes.
+const ALIGNMENT: u32 = 8;
+
+// The block size needs to be a multiple of the memory alignment
+// requirement. This is so that the pointer returned by `allocate()`
+// always fulfills the alignment. In buddy allocation a pointer always
+// points to the start of a block, which with a fitting block size
+// will then be a multiple of the alignment requirement.
+const BLOCK_SIZE: u32 = 8192; // 2^13 bytes
+
+#[allow(path_statements)]
+fn _assert_block_size_aligned() {
+ // mem::transmute checks that type sizes are equal.
+ // this enables us to assert that pointers are aligned -- at compile time.
+ ::std::mem::transmute::<[u8; BLOCK_SIZE as usize % ALIGNMENT as usize], [u8; 0]>;
+}
+
+#[derive(PartialEq, Copy, Clone)]
+enum Node {
+ Free,
+ Full,
+ Split,
+}
+
+/// A buddy allocation heap, which tracks allocations and deallocations
+/// using a binary tree.
+pub struct Heap {
+ allocated_bytes: FnvHashMap,
+ levels: u32,
+ ptr_offset: u32,
+ tree: vec::Vec,
+ total_size: u32,
+}
+
+impl Heap {
+
+ /// Creates a new buddy allocation heap.
+ ///
+ /// # Arguments
+ ///
+ /// * `ptr_offset` - The pointers returned by `allocate()`
+ /// start from this offset on. The pointer offset needs
+ /// to be aligned to a multiple of 8, hence a padding might
+ /// be added to align `ptr_offset` properly.
+ ///
+ /// * `heap_size` - The size available to this heap instance
+ /// (in bytes) for allocating memory.
+ ///
+ pub fn new(mut ptr_offset: u32, heap_size: u32) -> Self {
+ let padding = ptr_offset % ALIGNMENT;
+ if padding != 0 {
+ ptr_offset += ALIGNMENT - padding;
+ }
+
+ let leaves = heap_size / BLOCK_SIZE;
+ let levels = Heap::get_tree_levels(leaves);
+ let node_count: usize = (1 << levels + 1) - 1;
+
+ Heap {
+ allocated_bytes: FnvHashMap::default(),
+ levels,
+ ptr_offset,
+ tree: vec![Node::Free; node_count],
+ total_size: 0,
+ }
+ }
+
+ /// Gets requested number of bytes to allocate and returns a pointer.
+ pub fn allocate(&mut self, size: u32) -> u32 {
+ // Get the requested level from number of blocks requested
+ let blocks_needed = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ let block_offset = match self.allocate_block_in_tree(blocks_needed) {
+ Some(v) => v,
+ None => return 0,
+ };
+
+ let ptr = BLOCK_SIZE * block_offset as u32;
+ self.allocated_bytes.insert(ptr, size as u32);
+
+ self.total_size += size;
+ trace!(target: "wasm-heap", "Heap size over {} bytes after allocation", self.total_size);
+
+ self.ptr_offset + ptr
+ }
+
+ fn allocate_block_in_tree(&mut self, blocks_needed: u32) -> Option {
+ let levels_needed = Heap::get_tree_levels(blocks_needed);
+ if levels_needed > self.levels {
+ trace!(target: "wasm-heap", "Heap is too small: {:?} > {:?}", levels_needed, self.levels);
+ return None;
+ }
+
+ // Start at tree root and traverse down
+ let mut index = 0;
+ let mut current_level = self.levels;
+ 'down: loop {
+ let buddy_exists = index & 1 == 1;
+
+ if current_level == levels_needed {
+ if self.tree[index] == Node::Free {
+ self.tree[index] = Node::Full;
+
+ if index > 0 {
+ let parent = self.get_parent_node_index(index);
+ self.update_parent_nodes(parent);
+ }
+
+ break 'down;
+ }
+ } else {
+ match self.tree[index] {
+ Node::Full => {
+ if buddy_exists {
+ // Check if buddy is free
+ index += 1;
+ } else {
+ break 'down;
+ }
+ continue 'down;
+ },
+
+ Node::Free => {
+ // If node is free we split it and descend further down
+ self.tree[index] = Node::Split;
+ index = index * 2 + 1;
+ current_level -= 1;
+ continue 'down;
+ },
+
+ Node::Split => {
+ // Descend further
+ index = index * 2 + 1;
+ current_level -= 1;
+ continue 'down;
+ },
+ }
+ }
+
+ if buddy_exists {
+ // If a buddy exists it needs to be checked as well
+ index += 1;
+ continue 'down;
+ }
+
+ // Backtrack once we're at the bottom and haven't matched a free block yet
+ 'up: loop {
+ if index == 0 {
+ trace!(target: "wasm-heap", "Heap is too small: tree root reached.");
+ return None;
+ }
+
+ index = self.get_parent_node_index(index);
+ current_level += 1;
+ let has_buddy = index & 1 == 1;
+ if has_buddy {
+ index += 1;
+ break 'up;
+ }
+ }
+ }
+
+ let current_level_offset = (1 << self.levels - current_level) - 1;
+ let level_offset = index - current_level_offset;
+
+ let block_offset = level_offset * (1 << current_level);
+ Some(block_offset as usize)
+ }
+
+ /// Deallocates all blocks which were allocated for a pointer.
+ pub fn deallocate(&mut self, mut ptr: u32) {
+ ptr -= self.ptr_offset;
+
+ let allocated_size = match self.allocated_bytes.get(&ptr) {
+ Some(v) => *v,
+
+ // If nothing has been allocated for the pointer nothing happens
+ None => return (),
+ };
+
+ let count_blocks = (allocated_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ let block_offset = ptr / BLOCK_SIZE;
+ self.free(block_offset, count_blocks);
+ self.allocated_bytes.remove(&ptr).unwrap_or_default();
+
+ self.total_size = self.total_size.checked_sub(allocated_size).unwrap_or(0);
+ trace!(target: "wasm-heap", "Heap size over {} bytes after deallocation", self.total_size);
+ }
+
+ fn free(&mut self, block_offset: u32, count_blocks: u32) {
+ let requested_level = Heap::get_tree_levels(count_blocks);
+ let current_level_offset = (1 << self.levels - requested_level) - 1;
+ let level_offset = block_offset / (1 << requested_level);
+ let index_offset = current_level_offset + level_offset;
+
+ if index_offset > self.tree.len() as u32 - 1 {
+ trace!(target: "wasm-heap", "Index offset {} is > length of tree {}", index_offset, self.tree.len());
+ }
+
+ self.free_and_merge(index_offset as usize);
+
+ let parent = self.get_parent_node_index(index_offset as usize);
+ self.update_parent_nodes(parent);
+ }
+
+ fn get_parent_node_index(&mut self, index: usize) -> usize {
+ (index + 1) / 2 - 1
+ }
+
+ fn free_and_merge(&mut self, index: usize) {
+ self.tree[index] = Node::Free;
+
+ if index == 0 {
+ return;
+ }
+
+ let has_right_buddy = (index & 1) == 1;
+ let other_node = if has_right_buddy {
+ index + 1
+ } else {
+ index - 1
+ };
+
+ if self.tree[other_node] == Node::Free {
+ let parent = self.get_parent_node_index(index);
+ self.free_and_merge(parent);
+ }
+ }
+
+ fn update_parent_nodes(&mut self, index: usize) {
+ let left_child = index * 2 + 1;
+ let right_child = index * 2 + 2;
+
+ let children_free = self.tree[left_child] == Node::Free && self.tree[right_child] == Node::Free;
+ let children_full = self.tree[left_child] == Node::Full && self.tree[right_child] == Node::Full;
+ if children_free {
+ self.tree[index] = Node::Free;
+ } else if children_full {
+ self.tree[index] = Node::Full;
+ } else {
+ self.tree[index] = Node::Split;
+ }
+
+ if index == 0 {
+ // Tree root
+ return;
+ }
+
+ let parent = self.get_parent_node_index(index);
+ self.update_parent_nodes(parent);
+ }
+
+ fn get_tree_levels(mut count_blocks: u32) -> u32 {
+ if count_blocks == 0 {
+ 0
+ } else {
+ let mut counter = 0;
+ while {count_blocks >>= 1; count_blocks > 0} {
+ counter += 1;
+ }
+ counter
+ }
+ }
+
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn should_always_align_pointers_to_multiples_of_8() {
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(13, heap_size);
+
+ let ptr = heap.allocate(1);
+ assert_eq!(ptr, 16); // 16 is the next multiple of 8 from 13
+ }
+
+ #[test]
+ fn should_start_first_pointer_at_offset() {
+ let start_offset = 40;
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(start_offset, heap_size);
+
+ let ptr = heap.allocate(BLOCK_SIZE - 1);
+ assert_eq!(ptr, start_offset);
+ }
+
+ #[test]
+ fn should_start_second_pointer_at_second_block() {
+ let start_offset = 40;
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(start_offset, heap_size);
+
+ let _ptr1 = heap.allocate(BLOCK_SIZE - 1);
+ let ptr2 = heap.allocate(BLOCK_SIZE - 1);
+ assert_eq!(ptr2, start_offset + BLOCK_SIZE);
+ }
+
+ #[test]
+ fn should_not_panic_on_deallocation_of_nonexistent_pointer() {
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(1, heap_size);
+ let ret = heap.deallocate(heap_size + 1);
+ assert_eq!(ret, ());
+ }
+
+ #[test]
+ fn should_calculate_tree_size_from_heap_size() {
+ let heap_size = BLOCK_SIZE * 4;
+ let heap = super::Heap::new(1, heap_size);
+
+ assert_eq!(heap.levels, 2);
+ }
+
+ #[test]
+ fn should_round_tree_size_to_nearest_possible() {
+ let heap_size = BLOCK_SIZE * 4 + 1;
+ let heap = super::Heap::new(1, heap_size);
+
+ assert_eq!(heap.levels, 2);
+ }
+
+ #[test]
+ fn heap_size_should_stay_zero_in_total() {
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(1, heap_size);
+ assert_eq!(heap.total_size, 0);
+
+ let ptr = heap.allocate(42);
+ assert_eq!(heap.total_size, 42);
+
+ heap.deallocate(ptr);
+ assert_eq!(heap.total_size, 0);
+ }
+
+ #[test]
+ fn heap_size_should_stay_constant() {
+ let heap_size = BLOCK_SIZE * 4;
+ let mut heap = super::Heap::new(9, heap_size);
+ for _ in 1..10 {
+ assert_eq!(heap.total_size, 0);
+
+ let ptr = heap.allocate(42);
+ assert_eq!(ptr, 16);
+ assert_eq!(heap.total_size, 42);
+
+ heap.deallocate(ptr);
+ assert_eq!(heap.total_size, 0);
+ }
+
+ assert_eq!(heap.total_size, 0);
+ }
+
+}
diff --git a/core/executor/src/lib.rs b/core/executor/src/lib.rs
index ae2b383ae7ecd..8ce1c775fb9f9 100644
--- a/core/executor/src/lib.rs
+++ b/core/executor/src/lib.rs
@@ -63,6 +63,7 @@ mod wasm_executor;
#[macro_use]
mod native_executor;
mod sandbox;
+mod heap;
pub mod error;
pub use wasm_executor::WasmExecutor;
diff --git a/core/executor/src/wasm_executor.rs b/core/executor/src/wasm_executor.rs
index 60e2c84b1eb5a..87704805c1e4c 100644
--- a/core/executor/src/wasm_executor.rs
+++ b/core/executor/src/wasm_executor.rs
@@ -22,7 +22,7 @@ use wasmi::{
Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef,
};
use wasmi::RuntimeValue::{I32, I64};
-use wasmi::memory_units::Pages;
+use wasmi::memory_units::{Bytes, Pages};
use state_machine::Externalities;
use error::{Error, ErrorKind, Result};
use wasm_utils::UserError;
@@ -32,35 +32,7 @@ use primitives::sandbox as sandbox_primitives;
use primitives::{H256, Blake2Hasher};
use trie::ordered_trie_root;
use sandbox;
-
-
-struct Heap {
- end: u32,
-}
-
-impl Heap {
- /// Construct new `Heap` struct.
- ///
- /// Returns `Err` if the heap couldn't allocate required
- /// number of pages.
- ///
- /// This could mean that wasm binary specifies memory
- /// limit and we are trying to allocate beyond that limit.
- fn new(memory: &MemoryRef) -> Self {
- Heap {
- end: memory.used_size().0 as u32,
- }
- }
-
- fn allocate(&mut self, size: u32) -> u32 {
- let r = self.end;
- self.end += size;
- r
- }
-
- fn deallocate(&mut self, _offset: u32) {
- }
-}
+use heap;
#[cfg(feature="wasm-extern-trace")]
macro_rules! debug_trace {
@@ -73,7 +45,7 @@ macro_rules! debug_trace {
struct FunctionExecutor<'e, E: Externalities + 'e> {
sandbox_store: sandbox::Store,
- heap: Heap,
+ heap: heap::Heap,
memory: MemoryRef,
table: Option,
ext: &'e mut E,
@@ -82,9 +54,14 @@ struct FunctionExecutor<'e, E: Externalities + 'e> {
impl<'e, E: Externalities> FunctionExecutor<'e, E> {
fn new(m: MemoryRef, t: Option, e: &'e mut E) -> Result {
+ let current_size: Bytes = m.current_size().into();
+ let current_size = current_size.0 as u32;
+ let used_size = m.used_size().0 as u32;
+ let heap_size = current_size - used_size;
+
Ok(FunctionExecutor {
sandbox_store: sandbox::Store::new(),
- heap: Heap::new(&m),
+ heap: heap::Heap::new(used_size, heap_size),
memory: m,
table: t,
ext: e,
diff --git a/core/sr-io/without_std.rs b/core/sr-io/without_std.rs
index 6cc5a15d7f280..cc20aff96a0c9 100644
--- a/core/sr-io/without_std.rs
+++ b/core/sr-io/without_std.rs
@@ -132,9 +132,10 @@ pub fn storage(key: &[u8]) -> Option> {
if length == u32::max_value() {
None
} else {
- let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
- ext_free(ptr);
- Some(ret)
+ // Invariants required by Vec::from_raw_parts are not formally fulfilled.
+ // We don't allocate via String/Vec, but use a custom allocator instead.
+ // See #300 for more details.
+ Some(>::from_raw_parts(ptr, length as usize, length as usize))
}
}
}
@@ -147,9 +148,10 @@ pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option> {
if length == u32::max_value() {
None
} else {
- let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
- ext_free(ptr);
- Some(ret)
+ // Invariants required by Vec::from_raw_parts are not formally fulfilled.
+ // We don't allocate via String/Vec, but use a custom allocator instead.
+ // See #300 for more details.
+ Some(>::from_raw_parts(ptr, length as usize, length as usize))
}
}
}