cranelift: Add heap support to filetest infrastructure (#3154)
* cranelift: Add heap support to filetest infrastructure * cranelift: Explicit heap pointer placement in filetest annotations * cranelift: Add documentation about the Heap directive * cranelift: Clarify that heap filetests pointers must be laid out sequentially * cranelift: Use wrapping add when computing bound pointer * cranelift: Better error messages when invalid signatures are found for heap file tests.
This commit is contained in:
170
cranelift/filetests/filetests/runtests/heap.clif
Normal file
170
cranelift/filetests/filetests/runtests/heap.clif
Normal file
@@ -0,0 +1,170 @@
|
||||
test run
|
||||
target x86_64 machinst
|
||||
target s390x
|
||||
target aarch64
|
||||
|
||||
|
||||
function %static_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store(0, 1) == 1
|
||||
; run: %static_heap_i64_load_store(0, -1) == -1
|
||||
; run: %static_heap_i64_load_store(16, 1) == 1
|
||||
; run: %static_heap_i64_load_store(16, -1) == -1
|
||||
|
||||
|
||||
function %static_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i32_load_store(0, 1) == 1
|
||||
; run: %static_heap_i32_load_store(0, -1) == -1
|
||||
; run: %static_heap_i32_load_store(16, 1) == 1
|
||||
; run: %static_heap_i32_load_store(16, -1) == -1
|
||||
|
||||
|
||||
function %static_heap_i32_load_store_no_min(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i32_load_store_no_min(0, 1) == 1
|
||||
; run: %static_heap_i32_load_store_no_min(0, -1) == -1
|
||||
; run: %static_heap_i32_load_store_no_min(16, 1) == 1
|
||||
; run: %static_heap_i32_load_store_no_min(16, -1) == -1
|
||||
|
||||
|
||||
function %dynamic_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %dynamic_heap_i64_load_store(0, 1) == 1
|
||||
; run: %dynamic_heap_i64_load_store(0, -1) == -1
|
||||
; run: %dynamic_heap_i64_load_store(16, 1) == 1
|
||||
; run: %dynamic_heap_i64_load_store(16, -1) == -1
|
||||
|
||||
|
||||
function %dynamic_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %dynamic_heap_i32_load_store(0, 1) == 1
|
||||
; run: %dynamic_heap_i32_load_store(0, -1) == -1
|
||||
; run: %dynamic_heap_i32_load_store(16, 1) == 1
|
||||
; run: %dynamic_heap_i32_load_store(16, -1) == -1
|
||||
|
||||
|
||||
function %multi_heap_load_store(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+16
|
||||
gv3 = load.i64 notrap aligned gv0+24
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = iconst.i64 0
|
||||
v4 = iconst.i32 0
|
||||
|
||||
; Store lhs in heap0
|
||||
v5 = heap_addr.i64 heap0, v3, 4
|
||||
store.i32 v1, v5
|
||||
|
||||
; Store rhs in heap1
|
||||
v6 = heap_addr.i64 heap1, v4, 4
|
||||
store.i32 v2, v6
|
||||
|
||||
|
||||
v7 = load.i32 v5
|
||||
v8 = load.i32 v6
|
||||
|
||||
v9 = iadd.i32 v7, v8
|
||||
return v9
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
||||
; run: %multi_heap_load_store(1, 2) == 3
|
||||
; run: %multi_heap_load_store(4, 5) == 9
|
||||
|
||||
|
||||
|
||||
function %static_heap_i64_load_store_unaligned(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store_unaligned(0, 1) == 1
|
||||
; run: %static_heap_i64_load_store_unaligned(0, -1) == -1
|
||||
; run: %static_heap_i64_load_store_unaligned(1, 1) == 1
|
||||
; run: %static_heap_i64_load_store_unaligned(1, -1) == -1
|
||||
; run: %static_heap_i64_load_store_unaligned(2, 1) == 1
|
||||
; run: %static_heap_i64_load_store_unaligned(2, -1) == -1
|
||||
; run: %static_heap_i64_load_store_unaligned(3, 1) == 1
|
||||
; run: %static_heap_i64_load_store_unaligned(3, -1) == -1
|
||||
|
||||
|
||||
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
|
||||
function %static_heap_i64_iadd_imm(i64 vmctx, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, 0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = iconst.i64 0
|
||||
v3 = heap_addr.i64 heap0, v2, 4
|
||||
store.i32 v1, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_iadd_imm(1) == 1
|
||||
; run: %static_heap_i64_iadd_imm(-1) == -1
|
||||
@@ -34,6 +34,7 @@ pub mod function_runner;
|
||||
mod match_directive;
|
||||
mod runner;
|
||||
mod runone;
|
||||
mod runtest_environment;
|
||||
mod subtest;
|
||||
|
||||
mod test_binemit;
|
||||
|
||||
111
cranelift/filetests/src/runtest_environment.rs
Normal file
111
cranelift/filetests/src/runtest_environment.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use anyhow::anyhow;
|
||||
use cranelift_codegen::data_value::DataValue;
|
||||
use cranelift_codegen::ir::Type;
|
||||
use cranelift_reader::parse_heap_command;
|
||||
use cranelift_reader::{Comment, HeapCommand};
|
||||
|
||||
/// Stores info about the expected environment for a test function.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RuntestEnvironment {
|
||||
pub heaps: Vec<HeapCommand>,
|
||||
}
|
||||
|
||||
impl RuntestEnvironment {
|
||||
/// Parse the environment from a set of comments
|
||||
pub fn parse(comments: &[Comment]) -> anyhow::Result<Self> {
|
||||
let mut env = RuntestEnvironment { heaps: Vec::new() };
|
||||
|
||||
for comment in comments.iter() {
|
||||
if let Some(heap_command) = parse_heap_command(comment.text)? {
|
||||
let heap_index = env.heaps.len() as u64;
|
||||
let expected_ptr = heap_index * 16;
|
||||
if Some(expected_ptr) != heap_command.ptr_offset.map(|p| p.into()) {
|
||||
return Err(anyhow!(
|
||||
"Invalid ptr offset, expected vmctx+{}",
|
||||
expected_ptr
|
||||
));
|
||||
}
|
||||
|
||||
let expected_bound = (heap_index * 16) + 8;
|
||||
if Some(expected_bound) != heap_command.bound_offset.map(|p| p.into()) {
|
||||
return Err(anyhow!(
|
||||
"Invalid bound offset, expected vmctx+{}",
|
||||
expected_bound
|
||||
));
|
||||
}
|
||||
|
||||
env.heaps.push(heap_command);
|
||||
};
|
||||
}
|
||||
|
||||
Ok(env)
|
||||
}
|
||||
|
||||
pub fn is_active(&self) -> bool {
|
||||
!self.heaps.is_empty()
|
||||
}
|
||||
|
||||
/// Allocates a struct to be injected into the test.
|
||||
pub fn runtime_struct(&self) -> RuntestContext {
|
||||
RuntestContext::new(&self)
|
||||
}
|
||||
}
|
||||
|
||||
type HeapMemory = Vec<u8>;
|
||||
|
||||
/// A struct that provides info about the environment to the test
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RuntestContext {
|
||||
/// Store the heap memory alongside the context info so that we don't accidentally deallocate
|
||||
/// it too early.
|
||||
heaps: Vec<HeapMemory>,
|
||||
|
||||
/// This is the actual struct that gets passed into the `vmctx` argument of the tests.
|
||||
/// It has a specific memory layout that all tests agree with.
|
||||
///
|
||||
/// Currently we only have to store heap info, so we store the heap start and end addresses in
|
||||
/// a 64 bit slot for each heap.
|
||||
///
|
||||
/// ┌────────────┐
|
||||
/// │heap0: start│
|
||||
/// ├────────────┤
|
||||
/// │heap0: end │
|
||||
/// ├────────────┤
|
||||
/// │heap1: start│
|
||||
/// ├────────────┤
|
||||
/// │heap1: end │
|
||||
/// ├────────────┤
|
||||
/// │etc... │
|
||||
/// └────────────┘
|
||||
context_struct: Vec<u64>,
|
||||
}
|
||||
|
||||
impl RuntestContext {
|
||||
pub fn new(env: &RuntestEnvironment) -> Self {
|
||||
let heaps: Vec<HeapMemory> = env
|
||||
.heaps
|
||||
.iter()
|
||||
.map(|cmd| {
|
||||
let size: u64 = cmd.size.into();
|
||||
vec![0u8; size as usize]
|
||||
})
|
||||
.collect();
|
||||
|
||||
let context_struct = heaps
|
||||
.iter()
|
||||
.flat_map(|heap| [heap.as_ptr(), heap.as_ptr().wrapping_add(heap.len())])
|
||||
.map(|p| p as usize as u64)
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
heaps,
|
||||
context_struct,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a [DataValue] with a target isa pointer type to the context struct.
|
||||
pub fn pointer(&self, ty: Type) -> DataValue {
|
||||
let ptr = self.context_struct.as_ptr() as usize as i128;
|
||||
DataValue::from_integer(ptr, ty).expect("Failed to cast pointer to native target size")
|
||||
}
|
||||
}
|
||||
@@ -3,8 +3,10 @@
|
||||
//! The `run` test command compiles each function on the host machine and executes it
|
||||
|
||||
use crate::function_runner::SingleFunctionCompiler;
|
||||
use crate::runtest_environment::RuntestEnvironment;
|
||||
use crate::subtest::{Context, SubTest};
|
||||
use cranelift_codegen::ir;
|
||||
use cranelift_codegen::ir::ArgumentPurpose;
|
||||
use cranelift_reader::parse_run_command;
|
||||
use cranelift_reader::TestCommand;
|
||||
use log::trace;
|
||||
@@ -48,6 +50,8 @@ impl SubTest for TestRun {
|
||||
}
|
||||
let variant = context.isa.unwrap().variant();
|
||||
|
||||
let test_env = RuntestEnvironment::parse(&context.details.comments[..])?;
|
||||
|
||||
let mut compiler = SingleFunctionCompiler::with_host_isa(context.flags.clone(), variant);
|
||||
for comment in context.details.comments.iter() {
|
||||
if let Some(command) = parse_run_command(comment.text, &func.signature)? {
|
||||
@@ -60,7 +64,31 @@ impl SubTest for TestRun {
|
||||
// running x86_64 code on aarch64 platforms.
|
||||
let compiled_fn = compiler.compile(func.clone().into_owned())?;
|
||||
command
|
||||
.run(|_, args| Ok(compiled_fn.call(args)))
|
||||
.run(|_, run_args| {
|
||||
let runtime_struct = test_env.runtime_struct();
|
||||
|
||||
let first_arg_is_vmctx = func
|
||||
.signature
|
||||
.params
|
||||
.first()
|
||||
.map(|p| p.purpose == ArgumentPurpose::VMContext)
|
||||
.unwrap_or(false);
|
||||
|
||||
if !first_arg_is_vmctx && test_env.is_active() {
|
||||
return Err(concat!(
|
||||
"This test requests a heap, but the first argument is not `i64 vmctx`.\n",
|
||||
"See docs/testing.md for more info on using heap annotations."
|
||||
).to_string());
|
||||
}
|
||||
|
||||
let mut args = Vec::with_capacity(run_args.len());
|
||||
if test_env.is_active() {
|
||||
args.push(runtime_struct.pointer(context.isa.unwrap().pointer_type()));
|
||||
}
|
||||
args.extend_from_slice(run_args);
|
||||
|
||||
Ok(compiled_fn.call(&args))
|
||||
})
|
||||
.map_err(|s| anyhow::anyhow!("{}", s))?;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user