cranelift: Add heap support to the interpreter (#3302)
* cranelift: Add heaps to interpreter * cranelift: Add RunTest Environment mechanism to test interpret * cranelift: Remove unused `MemoryError` * cranelift: Add docs for `State::resolve_global_value` * cranelift: Rename heap tests * cranelift: Refactor heap address resolution * Fix typos and clarify docs (thanks @cfallin)
This commit is contained in:
@@ -1,10 +1,11 @@
|
|||||||
|
test interpret
|
||||||
test run
|
test run
|
||||||
target x86_64
|
target x86_64
|
||||||
target s390x
|
target s390x
|
||||||
target aarch64
|
target aarch64
|
||||||
|
|
||||||
|
|
||||||
function %static_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
|
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||||
@@ -16,13 +17,13 @@ block0(v0: i64, v1: i64, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %static_heap_i64_load_store(0, 1) == 1
|
; run: %static_heap_i64(0, 1) == 1
|
||||||
; run: %static_heap_i64_load_store(0, -1) == -1
|
; run: %static_heap_i64(0, -1) == -1
|
||||||
; run: %static_heap_i64_load_store(16, 1) == 1
|
; run: %static_heap_i64(16, 1) == 1
|
||||||
; run: %static_heap_i64_load_store(16, -1) == -1
|
; run: %static_heap_i64(16, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
function %static_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
|
function %static_heap_i32(i64 vmctx, i32, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||||
@@ -34,13 +35,13 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %static_heap_i32_load_store(0, 1) == 1
|
; run: %static_heap_i32(0, 1) == 1
|
||||||
; run: %static_heap_i32_load_store(0, -1) == -1
|
; run: %static_heap_i32(0, -1) == -1
|
||||||
; run: %static_heap_i32_load_store(16, 1) == 1
|
; run: %static_heap_i32(16, 1) == 1
|
||||||
; run: %static_heap_i32_load_store(16, -1) == -1
|
; run: %static_heap_i32(16, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
function %static_heap_i32_load_store_no_min(i64 vmctx, i32, i32) -> i32 {
|
function %heap_no_min(i64 vmctx, i32, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||||
@@ -52,13 +53,13 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %static_heap_i32_load_store_no_min(0, 1) == 1
|
; run: %heap_no_min(0, 1) == 1
|
||||||
; run: %static_heap_i32_load_store_no_min(0, -1) == -1
|
; run: %heap_no_min(0, -1) == -1
|
||||||
; run: %static_heap_i32_load_store_no_min(16, 1) == 1
|
; run: %heap_no_min(16, 1) == 1
|
||||||
; run: %static_heap_i32_load_store_no_min(16, -1) == -1
|
; run: %heap_no_min(16, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
function %dynamic_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
|
function %dynamic_i64(i64 vmctx, i64, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
gv2 = load.i64 notrap aligned gv0+8
|
gv2 = load.i64 notrap aligned gv0+8
|
||||||
@@ -71,13 +72,13 @@ block0(v0: i64, v1: i64, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %dynamic_heap_i64_load_store(0, 1) == 1
|
; run: %dynamic_i64(0, 1) == 1
|
||||||
; run: %dynamic_heap_i64_load_store(0, -1) == -1
|
; run: %dynamic_i64(0, -1) == -1
|
||||||
; run: %dynamic_heap_i64_load_store(16, 1) == 1
|
; run: %dynamic_i64(16, 1) == 1
|
||||||
; run: %dynamic_heap_i64_load_store(16, -1) == -1
|
; run: %dynamic_i64(16, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
function %dynamic_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
|
function %dynamic_i32(i64 vmctx, i32, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
gv2 = load.i64 notrap aligned gv0+8
|
gv2 = load.i64 notrap aligned gv0+8
|
||||||
@@ -90,13 +91,13 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %dynamic_heap_i32_load_store(0, 1) == 1
|
; run: %dynamic_i32(0, 1) == 1
|
||||||
; run: %dynamic_heap_i32_load_store(0, -1) == -1
|
; run: %dynamic_i32(0, -1) == -1
|
||||||
; run: %dynamic_heap_i32_load_store(16, 1) == 1
|
; run: %dynamic_i32(16, 1) == 1
|
||||||
; run: %dynamic_heap_i32_load_store(16, -1) == -1
|
; run: %dynamic_i32(16, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
function %multi_heap_load_store(i64 vmctx, i32, i32) -> i32 {
|
function %multi_load_store(i64 vmctx, i32, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
gv2 = load.i64 notrap aligned gv0+16
|
gv2 = load.i64 notrap aligned gv0+16
|
||||||
@@ -125,12 +126,47 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
||||||
; run: %multi_heap_load_store(1, 2) == 3
|
; run: %multi_load_store(1, 2) == 3
|
||||||
; run: %multi_heap_load_store(4, 5) == 9
|
; run: %multi_load_store(4, 5) == 9
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
function %static_heap_i64_load_store_unaligned(i64 vmctx, i64, i32) -> i32 {
|
; Uses multiple heaps, but heap0 refers to the second heap, and heap1 refers to the first heap
|
||||||
|
; This is a regression test for the interpreter
|
||||||
|
function %out_of_order(i64 vmctx, i32, i32) -> i32 {
|
||||||
|
gv0 = vmctx
|
||||||
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
|
gv2 = load.i64 notrap aligned gv0+16
|
||||||
|
gv3 = load.i64 notrap aligned gv0+24
|
||||||
|
heap0 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
|
||||||
|
heap1 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||||
|
|
||||||
|
block0(v0: i64, v1: i32, v2: i32):
|
||||||
|
v3 = iconst.i32 0
|
||||||
|
v4 = iconst.i64 0
|
||||||
|
|
||||||
|
; Store lhs in heap0
|
||||||
|
v5 = heap_addr.i64 heap0, v3, 4
|
||||||
|
store.i32 v1, v5
|
||||||
|
|
||||||
|
; Store rhs in heap1
|
||||||
|
v6 = heap_addr.i64 heap1, v4, 4
|
||||||
|
store.i32 v2, v6
|
||||||
|
|
||||||
|
|
||||||
|
v7 = load.i32 v5
|
||||||
|
v8 = load.i32 v6
|
||||||
|
|
||||||
|
v9 = iadd.i32 v7, v8
|
||||||
|
return v9
|
||||||
|
}
|
||||||
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
|
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
||||||
|
; run: %out_of_order(1, 2) == 3
|
||||||
|
; run: %out_of_order(4, 5) == 9
|
||||||
|
|
||||||
|
|
||||||
|
function %unaligned_access(i64 vmctx, i64, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = load.i64 notrap aligned gv0+0
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||||
@@ -142,18 +178,18 @@ block0(v0: i64, v1: i64, v2: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %static_heap_i64_load_store_unaligned(0, 1) == 1
|
; run: %unaligned_access(0, 1) == 1
|
||||||
; run: %static_heap_i64_load_store_unaligned(0, -1) == -1
|
; run: %unaligned_access(0, -1) == -1
|
||||||
; run: %static_heap_i64_load_store_unaligned(1, 1) == 1
|
; run: %unaligned_access(1, 1) == 1
|
||||||
; run: %static_heap_i64_load_store_unaligned(1, -1) == -1
|
; run: %unaligned_access(1, -1) == -1
|
||||||
; run: %static_heap_i64_load_store_unaligned(2, 1) == 1
|
; run: %unaligned_access(2, 1) == 1
|
||||||
; run: %static_heap_i64_load_store_unaligned(2, -1) == -1
|
; run: %unaligned_access(2, -1) == -1
|
||||||
; run: %static_heap_i64_load_store_unaligned(3, 1) == 1
|
; run: %unaligned_access(3, 1) == 1
|
||||||
; run: %static_heap_i64_load_store_unaligned(3, -1) == -1
|
; run: %unaligned_access(3, -1) == -1
|
||||||
|
|
||||||
|
|
||||||
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
|
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
|
||||||
function %static_heap_i64_iadd_imm(i64 vmctx, i32) -> i32 {
|
function %iadd_imm(i64 vmctx, i32) -> i32 {
|
||||||
gv0 = vmctx
|
gv0 = vmctx
|
||||||
gv1 = iadd_imm.i64 gv0, 0
|
gv1 = iadd_imm.i64 gv0, 0
|
||||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
|
||||||
@@ -166,5 +202,5 @@ block0(v0: i64, v1: i32):
|
|||||||
return v4
|
return v4
|
||||||
}
|
}
|
||||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||||
; run: %static_heap_i64_iadd_imm(1) == 1
|
; run: %iadd_imm(1) == 1
|
||||||
; run: %static_heap_i64_iadd_imm(-1) == -1
|
; run: %iadd_imm(-1) == -1
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use cranelift_codegen::data_value::DataValue;
|
use cranelift_codegen::ir::{ArgumentPurpose, Function};
|
||||||
use cranelift_codegen::ir::Type;
|
|
||||||
use cranelift_reader::parse_heap_command;
|
use cranelift_reader::parse_heap_command;
|
||||||
use cranelift_reader::{Comment, HeapCommand};
|
use cranelift_reader::{Comment, HeapCommand};
|
||||||
|
|
||||||
@@ -45,68 +44,37 @@ impl RuntestEnvironment {
|
|||||||
!self.heaps.is_empty()
|
!self.heaps.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates a struct to be injected into the test.
|
/// Allocates memory for heaps
|
||||||
pub fn runtime_struct(&self) -> RuntestContext {
|
pub fn allocate_memory(&self) -> Vec<HeapMemory> {
|
||||||
RuntestContext::new(&self)
|
self.heaps
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type HeapMemory = Vec<u8>;
|
|
||||||
|
|
||||||
/// A struct that provides info about the environment to the test
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct RuntestContext {
|
|
||||||
/// Store the heap memory alongside the context info so that we don't accidentally deallocate
|
|
||||||
/// it too early.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
heaps: Vec<HeapMemory>,
|
|
||||||
|
|
||||||
/// This is the actual struct that gets passed into the `vmctx` argument of the tests.
|
|
||||||
/// It has a specific memory layout that all tests agree with.
|
|
||||||
///
|
|
||||||
/// Currently we only have to store heap info, so we store the heap start and end addresses in
|
|
||||||
/// a 64 bit slot for each heap.
|
|
||||||
///
|
|
||||||
/// ┌────────────┐
|
|
||||||
/// │heap0: start│
|
|
||||||
/// ├────────────┤
|
|
||||||
/// │heap0: end │
|
|
||||||
/// ├────────────┤
|
|
||||||
/// │heap1: start│
|
|
||||||
/// ├────────────┤
|
|
||||||
/// │heap1: end │
|
|
||||||
/// ├────────────┤
|
|
||||||
/// │etc... │
|
|
||||||
/// └────────────┘
|
|
||||||
context_struct: Vec<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RuntestContext {
|
|
||||||
pub fn new(env: &RuntestEnvironment) -> Self {
|
|
||||||
let heaps: Vec<HeapMemory> = env
|
|
||||||
.heaps
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|cmd| {
|
.map(|cmd| {
|
||||||
let size: u64 = cmd.size.into();
|
let size: u64 = cmd.size.into();
|
||||||
vec![0u8; size as usize]
|
vec![0u8; size as usize]
|
||||||
})
|
})
|
||||||
.collect();
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
let context_struct = heaps
|
/// Validates the signature of a [Function] ensuring that if this environment is active, the
|
||||||
.iter()
|
/// function has a `vmctx` argument
|
||||||
.flat_map(|heap| [heap.as_ptr(), heap.as_ptr().wrapping_add(heap.len())])
|
pub fn validate_signature(&self, func: &Function) -> Result<(), String> {
|
||||||
.map(|p| p as usize as u64)
|
let first_arg_is_vmctx = func
|
||||||
.collect();
|
.signature
|
||||||
|
.params
|
||||||
|
.first()
|
||||||
|
.map(|p| p.purpose == ArgumentPurpose::VMContext)
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
Self {
|
if !first_arg_is_vmctx && self.is_active() {
|
||||||
heaps,
|
return Err(concat!(
|
||||||
context_struct,
|
"This test requests a heap, but the first argument is not `i64 vmctx`.\n",
|
||||||
|
"See docs/testing.md for more info on using heap annotations."
|
||||||
|
)
|
||||||
|
.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a [DataValue] with a target isa pointer type to the context struct.
|
pub(crate) type HeapMemory = Vec<u8>;
|
||||||
pub fn pointer(&self, ty: Type) -> DataValue {
|
|
||||||
let ptr = self.context_struct.as_ptr() as usize as i128;
|
|
||||||
DataValue::from_integer(ptr, ty).expect("Failed to cast pointer to native target size")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,10 +3,13 @@
|
|||||||
//! The `interpret` test command interprets each function on the host machine
|
//! The `interpret` test command interprets each function on the host machine
|
||||||
//! using [RunCommand](cranelift_reader::RunCommand)s.
|
//! using [RunCommand](cranelift_reader::RunCommand)s.
|
||||||
|
|
||||||
|
use crate::runtest_environment::RuntestEnvironment;
|
||||||
use crate::subtest::{Context, SubTest};
|
use crate::subtest::{Context, SubTest};
|
||||||
|
use cranelift_codegen::data_value::DataValue;
|
||||||
|
use cranelift_codegen::ir::types::I64;
|
||||||
use cranelift_codegen::{self, ir};
|
use cranelift_codegen::{self, ir};
|
||||||
use cranelift_interpreter::environment::FunctionStore;
|
use cranelift_interpreter::environment::FunctionStore;
|
||||||
use cranelift_interpreter::interpreter::{Interpreter, InterpreterState};
|
use cranelift_interpreter::interpreter::{HeapInit, Interpreter, InterpreterState};
|
||||||
use cranelift_interpreter::step::ControlFlow;
|
use cranelift_interpreter::step::ControlFlow;
|
||||||
use cranelift_reader::{parse_run_command, TestCommand};
|
use cranelift_reader::{parse_run_command, TestCommand};
|
||||||
use log::trace;
|
use log::trace;
|
||||||
@@ -36,6 +39,7 @@ impl SubTest for TestInterpret {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn run(&self, func: Cow<ir::Function>, context: &Context) -> anyhow::Result<()> {
|
fn run(&self, func: Cow<ir::Function>, context: &Context) -> anyhow::Result<()> {
|
||||||
|
let test_env = RuntestEnvironment::parse(&context.details.comments[..])?;
|
||||||
for comment in context.details.comments.iter() {
|
for comment in context.details.comments.iter() {
|
||||||
if let Some(command) = parse_run_command(comment.text, &func.signature)? {
|
if let Some(command) = parse_run_command(comment.text, &func.signature)? {
|
||||||
trace!("Parsed run command: {}", command);
|
trace!("Parsed run command: {}", command);
|
||||||
@@ -44,11 +48,21 @@ impl SubTest for TestInterpret {
|
|||||||
env.add(func.name.to_string(), &func);
|
env.add(func.name.to_string(), &func);
|
||||||
|
|
||||||
command
|
command
|
||||||
.run(|func_name, args| {
|
.run(|func_name, run_args| {
|
||||||
|
test_env.validate_signature(&func)?;
|
||||||
|
|
||||||
|
let mut state = InterpreterState::default().with_function_store(env);
|
||||||
|
|
||||||
|
let mut args = Vec::with_capacity(run_args.len());
|
||||||
|
if test_env.is_active() {
|
||||||
|
let vmctx_addr = register_heaps(&mut state, &test_env);
|
||||||
|
args.push(vmctx_addr);
|
||||||
|
}
|
||||||
|
args.extend_from_slice(run_args);
|
||||||
|
|
||||||
// Because we have stored function names with a leading %, we need to re-add it.
|
// Because we have stored function names with a leading %, we need to re-add it.
|
||||||
let func_name = &format!("%{}", func_name);
|
let func_name = &format!("%{}", func_name);
|
||||||
let state = InterpreterState::default().with_function_store(env);
|
match Interpreter::new(state).call_by_name(func_name, &args) {
|
||||||
match Interpreter::new(state).call_by_name(func_name, args) {
|
|
||||||
Ok(ControlFlow::Return(results)) => Ok(results.to_vec()),
|
Ok(ControlFlow::Return(results)) => Ok(results.to_vec()),
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
panic!("Unexpected returned control flow--this is likely a bug.")
|
panic!("Unexpected returned control flow--this is likely a bug.")
|
||||||
@@ -62,3 +76,34 @@ impl SubTest for TestInterpret {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build a VMContext struct with the layout described in docs/testing.md.
|
||||||
|
pub fn register_heaps<'a>(
|
||||||
|
state: &mut InterpreterState<'a>,
|
||||||
|
test_env: &RuntestEnvironment,
|
||||||
|
) -> DataValue {
|
||||||
|
let mem = test_env.allocate_memory();
|
||||||
|
let vmctx_struct = mem
|
||||||
|
.into_iter()
|
||||||
|
// This memory layout (a contiguous list of base + bound ptrs)
|
||||||
|
// is enforced by the RuntestEnvironment when parsing the heap
|
||||||
|
// directives. So we are safe to replicate that here.
|
||||||
|
.flat_map(|mem| {
|
||||||
|
let heap_len = mem.len() as u64;
|
||||||
|
let heap = state.register_heap(HeapInit::FromBacking(mem));
|
||||||
|
[
|
||||||
|
state.get_heap_address(I64, heap, 0).unwrap(),
|
||||||
|
state.get_heap_address(I64, heap, heap_len).unwrap(),
|
||||||
|
]
|
||||||
|
})
|
||||||
|
.map(|addr| {
|
||||||
|
let mut mem = [0u8; 8];
|
||||||
|
addr.write_to_slice(&mut mem[..]);
|
||||||
|
mem
|
||||||
|
})
|
||||||
|
.flatten()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let vmctx_heap = state.register_heap(HeapInit::FromBacking(vmctx_struct));
|
||||||
|
state.get_heap_address(I64, vmctx_heap, 0).unwrap()
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,10 +3,11 @@
|
|||||||
//! The `run` test command compiles each function on the host machine and executes it
|
//! The `run` test command compiles each function on the host machine and executes it
|
||||||
|
|
||||||
use crate::function_runner::SingleFunctionCompiler;
|
use crate::function_runner::SingleFunctionCompiler;
|
||||||
use crate::runtest_environment::RuntestEnvironment;
|
use crate::runtest_environment::{HeapMemory, RuntestEnvironment};
|
||||||
use crate::subtest::{Context, SubTest};
|
use crate::subtest::{Context, SubTest};
|
||||||
|
use cranelift_codegen::data_value::DataValue;
|
||||||
use cranelift_codegen::ir;
|
use cranelift_codegen::ir;
|
||||||
use cranelift_codegen::ir::ArgumentPurpose;
|
use cranelift_codegen::ir::Type;
|
||||||
use cranelift_reader::parse_run_command;
|
use cranelift_reader::parse_run_command;
|
||||||
use cranelift_reader::TestCommand;
|
use cranelift_reader::TestCommand;
|
||||||
use log::trace;
|
use log::trace;
|
||||||
@@ -64,25 +65,13 @@ impl SubTest for TestRun {
|
|||||||
let compiled_fn = compiler.compile(func.clone().into_owned())?;
|
let compiled_fn = compiler.compile(func.clone().into_owned())?;
|
||||||
command
|
command
|
||||||
.run(|_, run_args| {
|
.run(|_, run_args| {
|
||||||
let runtime_struct = test_env.runtime_struct();
|
test_env.validate_signature(&func)?;
|
||||||
|
let (_heaps, _ctx_struct, vmctx_ptr) =
|
||||||
let first_arg_is_vmctx = func
|
build_vmctx_struct(&test_env, context.isa.unwrap().pointer_type());
|
||||||
.signature
|
|
||||||
.params
|
|
||||||
.first()
|
|
||||||
.map(|p| p.purpose == ArgumentPurpose::VMContext)
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
if !first_arg_is_vmctx && test_env.is_active() {
|
|
||||||
return Err(concat!(
|
|
||||||
"This test requests a heap, but the first argument is not `i64 vmctx`.\n",
|
|
||||||
"See docs/testing.md for more info on using heap annotations."
|
|
||||||
).to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut args = Vec::with_capacity(run_args.len());
|
let mut args = Vec::with_capacity(run_args.len());
|
||||||
if test_env.is_active() {
|
if test_env.is_active() {
|
||||||
args.push(runtime_struct.pointer(context.isa.unwrap().pointer_type()));
|
args.push(vmctx_ptr);
|
||||||
}
|
}
|
||||||
args.extend_from_slice(run_args);
|
args.extend_from_slice(run_args);
|
||||||
|
|
||||||
@@ -94,3 +83,24 @@ impl SubTest for TestRun {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build a VMContext struct with the layout described in docs/testing.md.
|
||||||
|
pub fn build_vmctx_struct(
|
||||||
|
test_env: &RuntestEnvironment,
|
||||||
|
ptr_ty: Type,
|
||||||
|
) -> (Vec<HeapMemory>, Vec<u64>, DataValue) {
|
||||||
|
let heaps = test_env.allocate_memory();
|
||||||
|
|
||||||
|
let context_struct: Vec<u64> = heaps
|
||||||
|
.iter()
|
||||||
|
.flat_map(|heap| [heap.as_ptr(), heap.as_ptr().wrapping_add(heap.len())])
|
||||||
|
.map(|p| p as usize as u64)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let ptr = context_struct.as_ptr() as usize as i128;
|
||||||
|
let ptr_dv =
|
||||||
|
DataValue::from_integer(ptr, ptr_ty).expect("Failed to cast pointer to native target size");
|
||||||
|
|
||||||
|
// Return all these to make sure we don't deallocate the heaps too early
|
||||||
|
(heaps, context_struct, ptr_dv)
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,12 +8,16 @@ use crate::frame::Frame;
|
|||||||
use crate::instruction::DfgInstructionContext;
|
use crate::instruction::DfgInstructionContext;
|
||||||
use crate::state::{MemoryError, State};
|
use crate::state::{MemoryError, State};
|
||||||
use crate::step::{step, ControlFlow, StepError};
|
use crate::step::{step, ControlFlow, StepError};
|
||||||
use crate::value::ValueError;
|
use crate::value::{Value, ValueError};
|
||||||
use cranelift_codegen::data_value::DataValue;
|
use cranelift_codegen::data_value::DataValue;
|
||||||
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
||||||
use cranelift_codegen::ir::{Block, FuncRef, Function, StackSlot, Type, Value as ValueRef};
|
use cranelift_codegen::ir::{
|
||||||
|
ArgumentPurpose, Block, FuncRef, Function, GlobalValue, GlobalValueData, Heap, StackSlot, Type,
|
||||||
|
Value as ValueRef,
|
||||||
|
};
|
||||||
use log::trace;
|
use log::trace;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::convert::{TryFrom, TryInto};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::iter;
|
use std::iter;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -172,6 +176,21 @@ pub enum InterpreterError {
|
|||||||
FuelExhausted,
|
FuelExhausted,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type HeapBacking = Vec<u8>;
|
||||||
|
|
||||||
|
/// Represents a registered heap with an interpreter.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
pub struct HeapId(u32);
|
||||||
|
|
||||||
|
/// Options for initializing a heap memory region
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum HeapInit {
|
||||||
|
/// A zero initialized heap with `size` bytes
|
||||||
|
Zeroed(usize),
|
||||||
|
/// Initializes the heap with the backing memory unchanged.
|
||||||
|
FromBacking(HeapBacking),
|
||||||
|
}
|
||||||
|
|
||||||
/// Maintains the [Interpreter]'s state, implementing the [State] trait.
|
/// Maintains the [Interpreter]'s state, implementing the [State] trait.
|
||||||
pub struct InterpreterState<'a> {
|
pub struct InterpreterState<'a> {
|
||||||
pub functions: FunctionStore<'a>,
|
pub functions: FunctionStore<'a>,
|
||||||
@@ -179,7 +198,7 @@ pub struct InterpreterState<'a> {
|
|||||||
/// Number of bytes from the bottom of the stack where the current frame's stack space is
|
/// Number of bytes from the bottom of the stack where the current frame's stack space is
|
||||||
pub frame_offset: usize,
|
pub frame_offset: usize,
|
||||||
pub stack: Vec<u8>,
|
pub stack: Vec<u8>,
|
||||||
pub heap: Vec<u8>,
|
pub heaps: Vec<HeapBacking>,
|
||||||
pub iflags: HashSet<IntCC>,
|
pub iflags: HashSet<IntCC>,
|
||||||
pub fflags: HashSet<FloatCC>,
|
pub fflags: HashSet<FloatCC>,
|
||||||
}
|
}
|
||||||
@@ -191,7 +210,7 @@ impl Default for InterpreterState<'_> {
|
|||||||
frame_stack: vec![],
|
frame_stack: vec![],
|
||||||
frame_offset: 0,
|
frame_offset: 0,
|
||||||
stack: Vec::with_capacity(1024),
|
stack: Vec::with_capacity(1024),
|
||||||
heap: vec![0; 1024],
|
heaps: Vec::new(),
|
||||||
iflags: HashSet::new(),
|
iflags: HashSet::new(),
|
||||||
fflags: HashSet::new(),
|
fflags: HashSet::new(),
|
||||||
}
|
}
|
||||||
@@ -203,6 +222,57 @@ impl<'a> InterpreterState<'a> {
|
|||||||
Self { functions, ..self }
|
Self { functions, ..self }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Registers a static heap and returns a reference to it
|
||||||
|
///
|
||||||
|
/// This heap reference can be used to generate a heap pointer, which
|
||||||
|
/// can be used inside the interpreter to load / store values into the heap.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use cranelift_codegen::ir::types::I64;
|
||||||
|
/// # use cranelift_interpreter::interpreter::{InterpreterState, HeapInit};
|
||||||
|
/// let mut state = InterpreterState::default();
|
||||||
|
/// let heap0 = state.register_heap(HeapInit::Zeroed(1024));
|
||||||
|
///
|
||||||
|
/// let backing = Vec::from([10u8; 24]);
|
||||||
|
/// let heap1 = state.register_heap(HeapInit::FromBacking(backing));
|
||||||
|
/// ```
|
||||||
|
pub fn register_heap(&mut self, init: HeapInit) -> HeapId {
|
||||||
|
let heap_id = HeapId(self.heaps.len() as u32);
|
||||||
|
|
||||||
|
self.heaps.push(match init {
|
||||||
|
HeapInit::Zeroed(size) => iter::repeat(0).take(size).collect(),
|
||||||
|
HeapInit::FromBacking(backing) => backing,
|
||||||
|
});
|
||||||
|
|
||||||
|
heap_id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a heap address that can be used inside the interpreter
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use cranelift_codegen::ir::types::I64;
|
||||||
|
/// # use cranelift_interpreter::interpreter::{InterpreterState, HeapInit};
|
||||||
|
/// let mut state = InterpreterState::default();
|
||||||
|
/// let heap_id = state.register_heap(HeapInit::Zeroed(1024));
|
||||||
|
/// let heap_base = state.get_heap_address(I64, heap_id, 0);
|
||||||
|
/// let heap_bound = state.get_heap_address(I64, heap_id, 1024);
|
||||||
|
/// ```
|
||||||
|
pub fn get_heap_address(
|
||||||
|
&self,
|
||||||
|
ty: Type,
|
||||||
|
heap_id: HeapId,
|
||||||
|
offset: u64,
|
||||||
|
) -> Result<DataValue, MemoryError> {
|
||||||
|
let size = AddressSize::try_from(ty)?;
|
||||||
|
let heap_id = heap_id.0 as u64;
|
||||||
|
let addr = Address::from_parts(size, AddressRegion::Heap, heap_id, offset)?;
|
||||||
|
|
||||||
|
self.validate_address(&addr)?;
|
||||||
|
let dv = addr.try_into()?;
|
||||||
|
|
||||||
|
Ok(dv)
|
||||||
|
}
|
||||||
|
|
||||||
fn current_frame_mut(&mut self) -> &mut Frame<'a> {
|
fn current_frame_mut(&mut self) -> &mut Frame<'a> {
|
||||||
let num_frames = self.frame_stack.len();
|
let num_frames = self.frame_stack.len();
|
||||||
match num_frames {
|
match num_frames {
|
||||||
@@ -310,23 +380,54 @@ impl<'a> State<'a, DataValue> for InterpreterState<'a> {
|
|||||||
Address::from_parts(size, AddressRegion::Stack, 0, final_offset)
|
Address::from_parts(size, AddressRegion::Stack, 0, final_offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn heap_address(&self, _size: AddressSize, _offset: u64) -> Result<Address, MemoryError> {
|
/// Builds an [Address] for the [Heap] referenced in the currently executing function.
|
||||||
unimplemented!()
|
///
|
||||||
|
/// A CLIF Heap is essentially a GlobalValue and some metadata about that memory
|
||||||
|
/// region, such as bounds. Since heaps are based on Global Values it means that
|
||||||
|
/// once that GV is resolved we can essentially end up anywhere in memory.
|
||||||
|
///
|
||||||
|
/// To build an [Address] we perform GV resolution, and try to ensure that we end up
|
||||||
|
/// in a valid region of memory.
|
||||||
|
fn heap_address(
|
||||||
|
&self,
|
||||||
|
size: AddressSize,
|
||||||
|
heap: Heap,
|
||||||
|
offset: u64,
|
||||||
|
) -> Result<Address, MemoryError> {
|
||||||
|
let heap_data = &self.get_current_function().heaps[heap];
|
||||||
|
let heap_base = self.resolve_global_value(heap_data.base)?;
|
||||||
|
let mut addr = Address::try_from(heap_base)?;
|
||||||
|
addr.size = size;
|
||||||
|
addr.offset += offset;
|
||||||
|
|
||||||
|
// After resolving the address can point anywhere, we need to check if it's
|
||||||
|
// still valid.
|
||||||
|
self.validate_address(&addr)?;
|
||||||
|
|
||||||
|
Ok(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn checked_load(&self, addr: Address, ty: Type) -> Result<DataValue, MemoryError> {
|
fn checked_load(&self, addr: Address, ty: Type) -> Result<DataValue, MemoryError> {
|
||||||
let load_size = ty.bytes() as usize;
|
let load_size = ty.bytes() as usize;
|
||||||
|
let addr_start = addr.offset as usize;
|
||||||
|
let addr_end = addr_start + load_size;
|
||||||
|
|
||||||
let src = match addr.region {
|
let src = match addr.region {
|
||||||
AddressRegion::Stack => {
|
AddressRegion::Stack => {
|
||||||
let addr_start = addr.offset as usize;
|
|
||||||
let addr_end = addr_start + load_size;
|
|
||||||
if addr_end > self.stack.len() {
|
if addr_end > self.stack.len() {
|
||||||
return Err(MemoryError::OutOfBoundsLoad { addr, load_size });
|
return Err(MemoryError::OutOfBoundsLoad { addr, load_size });
|
||||||
}
|
}
|
||||||
|
|
||||||
&self.stack[addr_start..addr_end]
|
&self.stack[addr_start..addr_end]
|
||||||
}
|
}
|
||||||
|
AddressRegion::Heap => {
|
||||||
|
let heap_mem = match self.heaps.get(addr.entry as usize) {
|
||||||
|
Some(mem) if addr_end <= mem.len() => mem,
|
||||||
|
_ => return Err(MemoryError::OutOfBoundsLoad { addr, load_size }),
|
||||||
|
};
|
||||||
|
|
||||||
|
&heap_mem[addr_start..addr_end]
|
||||||
|
}
|
||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -335,28 +436,172 @@ impl<'a> State<'a, DataValue> for InterpreterState<'a> {
|
|||||||
|
|
||||||
fn checked_store(&mut self, addr: Address, v: DataValue) -> Result<(), MemoryError> {
|
fn checked_store(&mut self, addr: Address, v: DataValue) -> Result<(), MemoryError> {
|
||||||
let store_size = v.ty().bytes() as usize;
|
let store_size = v.ty().bytes() as usize;
|
||||||
|
let addr_start = addr.offset as usize;
|
||||||
|
let addr_end = addr_start + store_size;
|
||||||
|
|
||||||
let dst = match addr.region {
|
let dst = match addr.region {
|
||||||
AddressRegion::Stack => {
|
AddressRegion::Stack => {
|
||||||
let addr_start = addr.offset as usize;
|
|
||||||
let addr_end = addr_start + store_size;
|
|
||||||
if addr_end > self.stack.len() {
|
if addr_end > self.stack.len() {
|
||||||
return Err(MemoryError::OutOfBoundsStore { addr, store_size });
|
return Err(MemoryError::OutOfBoundsStore { addr, store_size });
|
||||||
}
|
}
|
||||||
|
|
||||||
&mut self.stack[addr_start..addr_end]
|
&mut self.stack[addr_start..addr_end]
|
||||||
}
|
}
|
||||||
|
AddressRegion::Heap => {
|
||||||
|
let heap_mem = match self.heaps.get_mut(addr.entry as usize) {
|
||||||
|
Some(mem) if addr_end <= mem.len() => mem,
|
||||||
|
_ => return Err(MemoryError::OutOfBoundsStore { addr, store_size }),
|
||||||
|
};
|
||||||
|
|
||||||
|
&mut heap_mem[addr_start..addr_end]
|
||||||
|
}
|
||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(v.write_to_slice(dst))
|
Ok(v.write_to_slice(dst))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Non-Recursively resolves a global value until its address is found
|
||||||
|
fn resolve_global_value(&self, gv: GlobalValue) -> Result<DataValue, MemoryError> {
|
||||||
|
// Resolving a Global Value is a "pointer" chasing operation that lends itself to
|
||||||
|
// using a recursive solution. However, resolving this in a recursive manner
|
||||||
|
// is a bad idea because its very easy to add a bunch of global values and
|
||||||
|
// blow up the call stack.
|
||||||
|
//
|
||||||
|
// Adding to the challenges of this, is that the operations possible with GlobalValues
|
||||||
|
// mean that we cannot use a simple loop to resolve each global value, we must keep
|
||||||
|
// a pending list of operations.
|
||||||
|
|
||||||
|
// These are the possible actions that we can perform
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum ResolveAction {
|
||||||
|
Resolve(GlobalValue),
|
||||||
|
/// Perform an add on the current address
|
||||||
|
Add(DataValue),
|
||||||
|
/// Load From the current address and replace it with the loaded value
|
||||||
|
Load {
|
||||||
|
/// Offset added to the base pointer before doing the load.
|
||||||
|
offset: i32,
|
||||||
|
|
||||||
|
/// Type of the loaded value.
|
||||||
|
global_type: Type,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
let func = self.get_current_function();
|
||||||
|
|
||||||
|
// We start with a sentinel value that will fail if we try to load / add to it
|
||||||
|
// without resolving the base GV First.
|
||||||
|
let mut current_val = DataValue::B(false);
|
||||||
|
let mut action_stack = vec![ResolveAction::Resolve(gv)];
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match action_stack.pop() {
|
||||||
|
Some(ResolveAction::Resolve(gv)) => match func.global_values[gv] {
|
||||||
|
GlobalValueData::VMContext => {
|
||||||
|
// Fetch the VMContext value from the values of the first block in the function
|
||||||
|
let index = func
|
||||||
|
.signature
|
||||||
|
.params
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.find(|(_, p)| p.purpose == ArgumentPurpose::VMContext)
|
||||||
|
.map(|(i, _)| i)
|
||||||
|
// This should be validated by the verifier
|
||||||
|
.expect("No VMCtx argument was found, but one is referenced");
|
||||||
|
|
||||||
|
let first_block =
|
||||||
|
func.layout.blocks().next().expect("to have a first block");
|
||||||
|
let vmctx_value = func.dfg.block_params(first_block)[index];
|
||||||
|
current_val = self.current_frame().get(vmctx_value).clone();
|
||||||
|
}
|
||||||
|
GlobalValueData::Load {
|
||||||
|
base,
|
||||||
|
offset,
|
||||||
|
global_type,
|
||||||
|
..
|
||||||
|
} => {
|
||||||
|
action_stack.push(ResolveAction::Load {
|
||||||
|
offset: offset.into(),
|
||||||
|
global_type,
|
||||||
|
});
|
||||||
|
action_stack.push(ResolveAction::Resolve(base));
|
||||||
|
}
|
||||||
|
GlobalValueData::IAddImm {
|
||||||
|
base,
|
||||||
|
offset,
|
||||||
|
global_type,
|
||||||
|
} => {
|
||||||
|
let offset: i64 = offset.into();
|
||||||
|
let dv = DataValue::int(offset as i128, global_type)
|
||||||
|
.map_err(|_| MemoryError::InvalidAddressType(global_type))?;
|
||||||
|
action_stack.push(ResolveAction::Add(dv));
|
||||||
|
action_stack.push(ResolveAction::Resolve(base));
|
||||||
|
}
|
||||||
|
GlobalValueData::Symbol { .. } => unimplemented!(),
|
||||||
|
},
|
||||||
|
Some(ResolveAction::Add(dv)) => {
|
||||||
|
current_val = current_val
|
||||||
|
.add(dv.clone())
|
||||||
|
.map_err(|_| MemoryError::InvalidAddress(dv))?;
|
||||||
|
}
|
||||||
|
Some(ResolveAction::Load {
|
||||||
|
offset,
|
||||||
|
global_type,
|
||||||
|
}) => {
|
||||||
|
let mut addr = Address::try_from(current_val)?;
|
||||||
|
// We can forego bounds checking here since its performed in `checked_load`
|
||||||
|
addr.offset += offset as u64;
|
||||||
|
current_val = self.checked_load(addr, global_type)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are done resolving this, return the current value
|
||||||
|
None => return Ok(current_val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_address(&self, addr: &Address) -> Result<(), MemoryError> {
|
||||||
|
match addr.region {
|
||||||
|
AddressRegion::Stack => {
|
||||||
|
let stack_len = self.stack.len() as u64;
|
||||||
|
|
||||||
|
if addr.offset > stack_len {
|
||||||
|
return Err(MemoryError::InvalidEntry {
|
||||||
|
entry: addr.entry,
|
||||||
|
max: self.heaps.len() as u64,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AddressRegion::Heap => {
|
||||||
|
let heap_len = self
|
||||||
|
.heaps
|
||||||
|
.get(addr.entry as usize)
|
||||||
|
.ok_or_else(|| MemoryError::InvalidEntry {
|
||||||
|
entry: addr.entry,
|
||||||
|
max: self.heaps.len() as u64,
|
||||||
|
})
|
||||||
|
.map(|heap| heap.len() as u64)?;
|
||||||
|
|
||||||
|
if addr.offset > heap_len {
|
||||||
|
return Err(MemoryError::InvalidOffset {
|
||||||
|
offset: addr.offset,
|
||||||
|
max: heap_len,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unimplemented!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::step::CraneliftTrap;
|
use crate::step::CraneliftTrap;
|
||||||
|
use cranelift_codegen::ir::types::I64;
|
||||||
use cranelift_codegen::ir::TrapCode;
|
use cranelift_codegen::ir::TrapCode;
|
||||||
use cranelift_reader::parse_functions;
|
use cranelift_reader::parse_functions;
|
||||||
|
|
||||||
@@ -720,4 +965,51 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(trap, CraneliftTrap::User(TrapCode::HeapOutOfBounds));
|
assert_eq!(trap, CraneliftTrap::User(TrapCode::HeapOutOfBounds));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Most heap tests are in .clif files using the filetest machinery. However, this is a sanity
|
||||||
|
/// check that the heap mechanism works without the rest of the filetest infrastructure
|
||||||
|
#[test]
|
||||||
|
fn heap_sanity_test() {
|
||||||
|
let code = "
|
||||||
|
function %heap_load_store(i64 vmctx) -> b1 {
|
||||||
|
gv0 = vmctx
|
||||||
|
gv1 = load.i64 notrap aligned gv0+0
|
||||||
|
; gv2/3 do nothing, but makes sure we understand the iadd_imm mechanism
|
||||||
|
gv2 = iadd_imm.i64 gv1, 1
|
||||||
|
gv3 = iadd_imm.i64 gv2, -1
|
||||||
|
heap0 = static gv3, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||||
|
|
||||||
|
block0(v0: i64):
|
||||||
|
v1 = iconst.i64 0
|
||||||
|
v2 = iconst.i64 123
|
||||||
|
v3 = heap_addr.i64 heap0, v1, 8
|
||||||
|
store.i64 v2, v3
|
||||||
|
v4 = load.i64 v3
|
||||||
|
v5 = icmp eq v2, v4
|
||||||
|
return v5
|
||||||
|
}";
|
||||||
|
|
||||||
|
let func = parse_functions(code).unwrap().into_iter().next().unwrap();
|
||||||
|
let mut env = FunctionStore::default();
|
||||||
|
env.add(func.name.to_string(), &func);
|
||||||
|
let mut state = InterpreterState::default().with_function_store(env);
|
||||||
|
|
||||||
|
let heap0 = state.register_heap(HeapInit::Zeroed(0x1000));
|
||||||
|
let base_addr = state.get_heap_address(I64, heap0, 0).unwrap();
|
||||||
|
|
||||||
|
// Build a vmctx struct by writing the base pointer at index 0
|
||||||
|
let mut vmctx_struct = vec![0u8; 8];
|
||||||
|
base_addr.write_to_slice(&mut vmctx_struct[..]);
|
||||||
|
|
||||||
|
// This is our vmctx "heap"
|
||||||
|
let vmctx = state.register_heap(HeapInit::FromBacking(vmctx_struct));
|
||||||
|
let vmctx_addr = state.get_heap_address(I64, vmctx, 0).unwrap();
|
||||||
|
|
||||||
|
let result = Interpreter::new(state)
|
||||||
|
.call_by_name("%heap_load_store", &[vmctx_addr])
|
||||||
|
.unwrap()
|
||||||
|
.unwrap_return();
|
||||||
|
|
||||||
|
assert_eq!(result, vec![DataValue::B(true)])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
use crate::address::{Address, AddressSize};
|
use crate::address::{Address, AddressSize};
|
||||||
use cranelift_codegen::data_value::DataValue;
|
use cranelift_codegen::data_value::DataValue;
|
||||||
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
||||||
use cranelift_codegen::ir::{FuncRef, Function, StackSlot, Type, Value};
|
use cranelift_codegen::ir::{FuncRef, Function, GlobalValue, Heap, StackSlot, Type, Value};
|
||||||
use cranelift_entity::PrimaryMap;
|
use cranelift_entity::PrimaryMap;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -67,13 +67,25 @@ pub trait State<'a, V> {
|
|||||||
offset: u64,
|
offset: u64,
|
||||||
) -> Result<Address, MemoryError>;
|
) -> Result<Address, MemoryError>;
|
||||||
/// Computes a heap address
|
/// Computes a heap address
|
||||||
fn heap_address(&self, size: AddressSize, offset: u64) -> Result<Address, MemoryError>;
|
fn heap_address(
|
||||||
|
&self,
|
||||||
|
size: AddressSize,
|
||||||
|
heap: Heap,
|
||||||
|
offset: u64,
|
||||||
|
) -> Result<Address, MemoryError>;
|
||||||
/// Retrieve a value `V` from memory at the given `address`, checking if it belongs either to the
|
/// Retrieve a value `V` from memory at the given `address`, checking if it belongs either to the
|
||||||
/// stack or to one of the heaps; the number of bytes loaded corresponds to the specified [Type].
|
/// stack or to one of the heaps; the number of bytes loaded corresponds to the specified [Type].
|
||||||
fn checked_load(&self, address: Address, ty: Type) -> Result<V, MemoryError>;
|
fn checked_load(&self, address: Address, ty: Type) -> Result<V, MemoryError>;
|
||||||
/// Store a value `V` into memory at the given `address`, checking if it belongs either to the
|
/// Store a value `V` into memory at the given `address`, checking if it belongs either to the
|
||||||
/// stack or to one of the heaps; the number of bytes stored corresponds to the specified [Type].
|
/// stack or to one of the heaps; the number of bytes stored corresponds to the specified [Type].
|
||||||
fn checked_store(&mut self, address: Address, v: V) -> Result<(), MemoryError>;
|
fn checked_store(&mut self, address: Address, v: V) -> Result<(), MemoryError>;
|
||||||
|
|
||||||
|
/// Given a global value, compute the final value for that global value, applying all operations
|
||||||
|
/// in intermediate global values.
|
||||||
|
fn resolve_global_value(&self, gv: GlobalValue) -> Result<V, MemoryError>;
|
||||||
|
|
||||||
|
/// Checks if an address is valid and within a known region of memory
|
||||||
|
fn validate_address(&self, address: &Address) -> Result<(), MemoryError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
@@ -151,7 +163,12 @@ where
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn heap_address(&self, _size: AddressSize, _offset: u64) -> Result<Address, MemoryError> {
|
fn heap_address(
|
||||||
|
&self,
|
||||||
|
_size: AddressSize,
|
||||||
|
_heap: Heap,
|
||||||
|
_offset: u64,
|
||||||
|
) -> Result<Address, MemoryError> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,4 +179,12 @@ where
|
|||||||
fn checked_store(&mut self, _addr: Address, _v: V) -> Result<(), MemoryError> {
|
fn checked_store(&mut self, _addr: Address, _v: V) -> Result<(), MemoryError> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn resolve_global_value(&self, _gv: GlobalValue) -> Result<V, MemoryError> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_address(&self, _addr: &Address) -> Result<(), MemoryError> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,11 +7,12 @@ use crate::value::{Value, ValueConversionKind, ValueError, ValueResult};
|
|||||||
use cranelift_codegen::data_value::DataValue;
|
use cranelift_codegen::data_value::DataValue;
|
||||||
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
|
||||||
use cranelift_codegen::ir::{
|
use cranelift_codegen::ir::{
|
||||||
types, Block, FuncRef, Function, InstructionData, Opcode, TrapCode, Value as ValueRef,
|
types, Block, FuncRef, Function, InstructionData, Opcode, TrapCode, Type, Value as ValueRef,
|
||||||
};
|
};
|
||||||
use log::trace;
|
use log::trace;
|
||||||
use smallvec::{smallvec, SmallVec};
|
use smallvec::{smallvec, SmallVec};
|
||||||
use std::convert::{TryFrom, TryInto};
|
use std::convert::{TryFrom, TryInto};
|
||||||
|
use std::fmt::Debug;
|
||||||
use std::ops::RangeFrom;
|
use std::ops::RangeFrom;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
@@ -135,11 +136,11 @@ where
|
|||||||
Err(e) => ControlFlow::Trap(CraneliftTrap::User(memerror_to_trap(e))),
|
Err(e) => ControlFlow::Trap(CraneliftTrap::User(memerror_to_trap(e))),
|
||||||
};
|
};
|
||||||
|
|
||||||
let calculate_addr = |imm: V, args: SmallVec<[V; 1]>| -> ValueResult<u64> {
|
let calculate_addr = |addr_ty: Type, imm: V, args: SmallVec<[V; 1]>| -> ValueResult<u64> {
|
||||||
let imm = imm.convert(ValueConversionKind::ZeroExtend(ctrl_ty))?;
|
let imm = imm.convert(ValueConversionKind::ZeroExtend(addr_ty))?;
|
||||||
let args = args
|
let args = args
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|v| v.convert(ValueConversionKind::ZeroExtend(ctrl_ty)))
|
.map(|v| v.convert(ValueConversionKind::ZeroExtend(addr_ty)))
|
||||||
.collect::<ValueResult<SmallVec<[V; 1]>>>()?;
|
.collect::<ValueResult<SmallVec<[V; 1]>>>()?;
|
||||||
|
|
||||||
Ok(sum(imm, args)? as u64)
|
Ok(sum(imm, args)? as u64)
|
||||||
@@ -315,7 +316,7 @@ where
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let addr_value = calculate_addr(imm(), args()?)?;
|
let addr_value = calculate_addr(types::I64, imm(), args()?)?;
|
||||||
let loaded = assign_or_memtrap(
|
let loaded = assign_or_memtrap(
|
||||||
Address::try_from(addr_value).and_then(|addr| state.checked_load(addr, load_ty)),
|
Address::try_from(addr_value).and_then(|addr| state.checked_load(addr, load_ty)),
|
||||||
);
|
);
|
||||||
@@ -338,7 +339,7 @@ where
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let addr_value = calculate_addr(imm(), args_range(1..)?)?;
|
let addr_value = calculate_addr(types::I64, imm(), args_range(1..)?)?;
|
||||||
let reduced = if let Some(c) = kind {
|
let reduced = if let Some(c) = kind {
|
||||||
arg(0)?.convert(c)?
|
arg(0)?.convert(c)?
|
||||||
} else {
|
} else {
|
||||||
@@ -383,7 +384,21 @@ where
|
|||||||
Opcode::GlobalValue => unimplemented!("GlobalValue"),
|
Opcode::GlobalValue => unimplemented!("GlobalValue"),
|
||||||
Opcode::SymbolValue => unimplemented!("SymbolValue"),
|
Opcode::SymbolValue => unimplemented!("SymbolValue"),
|
||||||
Opcode::TlsValue => unimplemented!("TlsValue"),
|
Opcode::TlsValue => unimplemented!("TlsValue"),
|
||||||
Opcode::HeapAddr => unimplemented!("HeapAddr"),
|
Opcode::HeapAddr => {
|
||||||
|
if let InstructionData::HeapAddr { heap, .. } = inst {
|
||||||
|
let load_ty = inst_context.controlling_type().unwrap();
|
||||||
|
let offset = calculate_addr(ctrl_ty, imm(), args()?)? as u64;
|
||||||
|
assign_or_memtrap({
|
||||||
|
AddressSize::try_from(load_ty).and_then(|addr_size| {
|
||||||
|
let addr = state.heap_address(addr_size, heap, offset)?;
|
||||||
|
let dv = DataValue::try_from(addr)?;
|
||||||
|
Ok(dv.into())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
Opcode::GetPinnedReg => unimplemented!("GetPinnedReg"),
|
Opcode::GetPinnedReg => unimplemented!("GetPinnedReg"),
|
||||||
Opcode::SetPinnedReg => unimplemented!("SetPinnedReg"),
|
Opcode::SetPinnedReg => unimplemented!("SetPinnedReg"),
|
||||||
Opcode::TableAddr => unimplemented!("TableAddr"),
|
Opcode::TableAddr => unimplemented!("TableAddr"),
|
||||||
|
|||||||
Reference in New Issue
Block a user