cranelift: Add heap support to filetest infrastructure (#3154)

* cranelift: Add heap support to filetest infrastructure

* cranelift: Explicit heap pointer placement in filetest annotations

* cranelift: Add documentation about the Heap directive

* cranelift: Clarify that heap filetests pointers must be laid out sequentially

* cranelift: Use wrapping add when computing bound pointer

* cranelift: Better error messages when invalid signatures are found for heap file tests.
This commit is contained in:
Afonso Bordado
2021-08-24 17:28:41 +01:00
committed by GitHub
parent 3f6b889067
commit f4ff7c350a
8 changed files with 631 additions and 5 deletions

View File

@@ -367,3 +367,93 @@ Example:
}
; run
```
#### Environment directives
Some tests need additional resources to be provided by the filetest infrastructure.
When any of the following directives is present the first argument of the function is *required* to be a `i64 vmctx`.
The filetest infrastructure will then pass a pointer to the environment struct via this argument.
The environment struct is essentially a list of pointers with info about the resources requested by the directives. These
pointers are always 8 bytes, and laid out sequentially in memory. Even for 32 bit machines, where we only fill the first
4 bytes of the pointer slot.
Currently, we only support requesting heaps, however this is a generic mechanism that should
be able to introduce any sort of environment support that we may need later. (e.g. tables, global values, external functions)
##### `heap` directive
The `heap` directive allows a test to request a heap to be allocated and passed to the test via the environment struct.
A sample heap annotation is the following:
```
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
```
This indicates the following:
* `static`: We have requested a non-resizable and non-movable static heap.
* `size=0x1000`: It has to have a size of 4096 bytes.
* `ptr=vmctx+0`: The pointer to the address to the start of this heap is placed at offset 0 in the `vmctx` struct
* `bound=vmctx+8`: The pointer to the address to the end of this heap is placed at offset 8 in the `vmctx` struct
The `ptr` and `bound` arguments make explicit the placement of the pointers to the start and end of the heap memory in
the environment struct. `vmctx+0` means that at offset 0 of the environment struct there will be the pointer to the start
similarly, at offset 8 the pointer to the end.
You can combine multiple heap annotations, in which case, their pointers are laid out sequentially in memory in
the order that the annotations appear in the source file.
```
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
```
An invalid or unexpected offset will raise an error when the test is run.
See the diagram below, on how the `vmctx` struct ends up if with multiple heaps:
```
┌─────────────────────┐ vmctx+0
│heap0: start address │
├─────────────────────┤ vmctx+8
│heap0: end address │
├─────────────────────┤ vmctx+16
│heap1: start address │
├─────────────────────┤ vmctx+24
│heap1: end address │
├─────────────────────┤ vmctx+32
│etc... │
└─────────────────────┘
```
With this setup, you can now use the global values to load heaps, and load / store to them.
Example:
```
function %heap_load_store(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_load_store(0, 1) == 1
```
### `test interpret`
Test the CLIF interpreter
This test supports the same commands as `test run`, but runs the code in the cranelift
interpreter instead of the host machine.

View File

@@ -0,0 +1,170 @@
test run
target x86_64 machinst
target s390x
target aarch64
function %static_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 1
; run: %static_heap_i64_load_store(0, -1) == -1
; run: %static_heap_i64_load_store(16, 1) == 1
; run: %static_heap_i64_load_store(16, -1) == -1
function %static_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32_load_store(0, 1) == 1
; run: %static_heap_i32_load_store(0, -1) == -1
; run: %static_heap_i32_load_store(16, 1) == 1
; run: %static_heap_i32_load_store(16, -1) == -1
function %static_heap_i32_load_store_no_min(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32_load_store_no_min(0, 1) == 1
; run: %static_heap_i32_load_store_no_min(0, -1) == -1
; run: %static_heap_i32_load_store_no_min(16, 1) == 1
; run: %static_heap_i32_load_store_no_min(16, -1) == -1
function %dynamic_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_heap_i64_load_store(0, 1) == 1
; run: %dynamic_heap_i64_load_store(0, -1) == -1
; run: %dynamic_heap_i64_load_store(16, 1) == 1
; run: %dynamic_heap_i64_load_store(16, -1) == -1
function %dynamic_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_heap_i32_load_store(0, 1) == 1
; run: %dynamic_heap_i32_load_store(0, -1) == -1
; run: %dynamic_heap_i32_load_store(16, 1) == 1
; run: %dynamic_heap_i32_load_store(16, -1) == -1
function %multi_heap_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i64 0
v4 = iconst.i32 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %multi_heap_load_store(1, 2) == 3
; run: %multi_heap_load_store(4, 5) == 9
function %static_heap_i64_load_store_unaligned(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store_unaligned(0, 1) == 1
; run: %static_heap_i64_load_store_unaligned(0, -1) == -1
; run: %static_heap_i64_load_store_unaligned(1, 1) == 1
; run: %static_heap_i64_load_store_unaligned(1, -1) == -1
; run: %static_heap_i64_load_store_unaligned(2, 1) == 1
; run: %static_heap_i64_load_store_unaligned(2, -1) == -1
; run: %static_heap_i64_load_store_unaligned(3, 1) == 1
; run: %static_heap_i64_load_store_unaligned(3, -1) == -1
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
function %static_heap_i64_iadd_imm(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
block0(v0: i64, v1: i32):
v2 = iconst.i64 0
v3 = heap_addr.i64 heap0, v2, 4
store.i32 v1, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_iadd_imm(1) == 1
; run: %static_heap_i64_iadd_imm(-1) == -1

View File

@@ -34,6 +34,7 @@ pub mod function_runner;
mod match_directive;
mod runner;
mod runone;
mod runtest_environment;
mod subtest;
mod test_binemit;

View File

@@ -0,0 +1,111 @@
use anyhow::anyhow;
use cranelift_codegen::data_value::DataValue;
use cranelift_codegen::ir::Type;
use cranelift_reader::parse_heap_command;
use cranelift_reader::{Comment, HeapCommand};
/// Stores info about the expected environment for a test function.
#[derive(Debug, Clone)]
pub struct RuntestEnvironment {
pub heaps: Vec<HeapCommand>,
}
impl RuntestEnvironment {
/// Parse the environment from a set of comments
pub fn parse(comments: &[Comment]) -> anyhow::Result<Self> {
let mut env = RuntestEnvironment { heaps: Vec::new() };
for comment in comments.iter() {
if let Some(heap_command) = parse_heap_command(comment.text)? {
let heap_index = env.heaps.len() as u64;
let expected_ptr = heap_index * 16;
if Some(expected_ptr) != heap_command.ptr_offset.map(|p| p.into()) {
return Err(anyhow!(
"Invalid ptr offset, expected vmctx+{}",
expected_ptr
));
}
let expected_bound = (heap_index * 16) + 8;
if Some(expected_bound) != heap_command.bound_offset.map(|p| p.into()) {
return Err(anyhow!(
"Invalid bound offset, expected vmctx+{}",
expected_bound
));
}
env.heaps.push(heap_command);
};
}
Ok(env)
}
pub fn is_active(&self) -> bool {
!self.heaps.is_empty()
}
/// Allocates a struct to be injected into the test.
pub fn runtime_struct(&self) -> RuntestContext {
RuntestContext::new(&self)
}
}
type HeapMemory = Vec<u8>;
/// A struct that provides info about the environment to the test
#[derive(Debug, Clone)]
pub struct RuntestContext {
/// Store the heap memory alongside the context info so that we don't accidentally deallocate
/// it too early.
heaps: Vec<HeapMemory>,
/// This is the actual struct that gets passed into the `vmctx` argument of the tests.
/// It has a specific memory layout that all tests agree with.
///
/// Currently we only have to store heap info, so we store the heap start and end addresses in
/// a 64 bit slot for each heap.
///
/// ┌────────────┐
/// │heap0: start│
/// ├────────────┤
/// │heap0: end │
/// ├────────────┤
/// │heap1: start│
/// ├────────────┤
/// │heap1: end │
/// ├────────────┤
/// │etc... │
/// └────────────┘
context_struct: Vec<u64>,
}
impl RuntestContext {
pub fn new(env: &RuntestEnvironment) -> Self {
let heaps: Vec<HeapMemory> = env
.heaps
.iter()
.map(|cmd| {
let size: u64 = cmd.size.into();
vec![0u8; size as usize]
})
.collect();
let context_struct = heaps
.iter()
.flat_map(|heap| [heap.as_ptr(), heap.as_ptr().wrapping_add(heap.len())])
.map(|p| p as usize as u64)
.collect();
Self {
heaps,
context_struct,
}
}
/// Creates a [DataValue] with a target isa pointer type to the context struct.
pub fn pointer(&self, ty: Type) -> DataValue {
let ptr = self.context_struct.as_ptr() as usize as i128;
DataValue::from_integer(ptr, ty).expect("Failed to cast pointer to native target size")
}
}

View File

@@ -3,8 +3,10 @@
//! The `run` test command compiles each function on the host machine and executes it
use crate::function_runner::SingleFunctionCompiler;
use crate::runtest_environment::RuntestEnvironment;
use crate::subtest::{Context, SubTest};
use cranelift_codegen::ir;
use cranelift_codegen::ir::ArgumentPurpose;
use cranelift_reader::parse_run_command;
use cranelift_reader::TestCommand;
use log::trace;
@@ -48,6 +50,8 @@ impl SubTest for TestRun {
}
let variant = context.isa.unwrap().variant();
let test_env = RuntestEnvironment::parse(&context.details.comments[..])?;
let mut compiler = SingleFunctionCompiler::with_host_isa(context.flags.clone(), variant);
for comment in context.details.comments.iter() {
if let Some(command) = parse_run_command(comment.text, &func.signature)? {
@@ -60,7 +64,31 @@ impl SubTest for TestRun {
// running x86_64 code on aarch64 platforms.
let compiled_fn = compiler.compile(func.clone().into_owned())?;
command
.run(|_, args| Ok(compiled_fn.call(args)))
.run(|_, run_args| {
let runtime_struct = test_env.runtime_struct();
let first_arg_is_vmctx = func
.signature
.params
.first()
.map(|p| p.purpose == ArgumentPurpose::VMContext)
.unwrap_or(false);
if !first_arg_is_vmctx && test_env.is_active() {
return Err(concat!(
"This test requests a heap, but the first argument is not `i64 vmctx`.\n",
"See docs/testing.md for more info on using heap annotations."
).to_string());
}
let mut args = Vec::with_capacity(run_args.len());
if test_env.is_active() {
args.push(runtime_struct.pointer(context.isa.unwrap().pointer_type()));
}
args.extend_from_slice(run_args);
Ok(compiled_fn.call(&args))
})
.map_err(|s| anyhow::anyhow!("{}", s))?;
}
}

View File

@@ -0,0 +1,71 @@
//! Heap commands.
//!
//! Functions in a `.clif` file can have *heap commands* appended that control the heaps allocated
//! by the `test run` and `test interpret` infrastructure.
//!
//! The general syntax is:
//! - `; heap: <heap_type>, size=n`
//!
//! `heap_type` can have two values:
//! - `static`: This is a non resizable heap type with a fixed size
//! - `dynamic`: This is a resizable heap, which can grow
//!
//! `size=n` indicates the size of the heap. For dynamic heaps, it indicates the starting size of
//! the heap.
use cranelift_codegen::ir::immediates::Uimm64;
use std::fmt::{self, Display, Formatter};
/// A heap command appearing in a test file.
///
/// For parsing, see `Parser::parse_heap_command`
#[derive(PartialEq, Debug, Clone)]
pub struct HeapCommand {
/// Indicates the requested heap type
pub heap_type: HeapType,
/// Size of the heap.
///
/// For dynamic heaps this is the starting size. For static heaps, this is the total size.
pub size: Uimm64,
/// Offset of the heap pointer from the vmctx base
///
/// This is done for verification purposes only
pub ptr_offset: Option<Uimm64>,
/// Offset of the bound pointer from the vmctx base
///
/// This is done for verification purposes only
pub bound_offset: Option<Uimm64>,
}
impl Display for HeapCommand {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "heap: {}, size={}", self.heap_type, self.size)?;
if let Some(offset) = self.ptr_offset {
write!(f, ", ptr=vmctx+{}", offset)?
}
if let Some(offset) = self.bound_offset {
write!(f, ", bound=vmctx+{}", offset)?
}
Ok(())
}
}
/// CLIF Representation of a heap type. e.g.: `static`
#[allow(missing_docs)]
#[derive(Debug, PartialEq, Clone)]
pub enum HeapType {
Static,
Dynamic,
}
impl Display for HeapType {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
HeapType::Static => write!(f, "static"),
HeapType::Dynamic => write!(f, "dynamic"),
}
}
}

View File

@@ -26,14 +26,18 @@
)]
pub use crate::error::{Location, ParseError, ParseResult};
pub use crate::heap_command::{HeapCommand, HeapType};
pub use crate::isaspec::{parse_options, IsaSpec, ParseOptionError};
pub use crate::parser::{parse_functions, parse_run_command, parse_test, ParseOptions};
pub use crate::parser::{
parse_functions, parse_heap_command, parse_run_command, parse_test, ParseOptions,
};
pub use crate::run_command::{Comparison, Invocation, RunCommand};
pub use crate::sourcemap::SourceMap;
pub use crate::testcommand::{TestCommand, TestOption};
pub use crate::testfile::{Comment, Details, Feature, TestFile};
mod error;
mod heap_command;
mod isaspec;
mod lexer;
mod parser;

View File

@@ -1,6 +1,7 @@
//! Parser for .clif files.
use crate::error::{Location, ParseError, ParseResult};
use crate::heap_command::{HeapCommand, HeapType};
use crate::isaspec;
use crate::lexer::{LexError, Lexer, LocatedError, LocatedToken, Token};
use crate::run_command::{Comparison, Invocation, RunCommand};
@@ -140,6 +141,24 @@ pub fn parse_run_command<'a>(text: &str, signature: &Signature) -> ParseResult<O
}
}
/// Parse a CLIF comment `text` as a heap command.
///
/// Return:
/// - `Ok(None)` if the comment is not intended to be a `HeapCommand` (i.e. does not start with `heap`
/// - `Ok(Some(heap))` if the comment is intended as a `HeapCommand` and can be parsed to one
/// - `Err` otherwise.
pub fn parse_heap_command<'a>(text: &str) -> ParseResult<Option<HeapCommand>> {
let _tt = timing::parse_text();
// We remove leading spaces and semi-colons for convenience here instead of at the call sites
// since this function will be attempting to parse a HeapCommand from a CLIF comment.
let trimmed_text = text.trim_start_matches(|c| c == ' ' || c == ';');
let mut parser = Parser::new(trimmed_text);
match parser.token() {
Some(Token::Identifier("heap")) => parser.parse_heap_command().map(|c| Some(c)),
Some(_) | None => Ok(None),
}
}
pub struct Parser<'a> {
lex: Lexer<'a>,
@@ -2559,6 +2578,86 @@ impl<'a> Parser<'a> {
Ok(args)
}
/// Parse a vmctx offset annotation
///
/// vmctx-offset ::= "vmctx" "+" UImm64(offset)
fn parse_vmctx_offset(&mut self) -> ParseResult<Uimm64> {
self.match_token(Token::Identifier("vmctx"), "expected a 'vmctx' token")?;
// The '+' token here gets parsed as part of the integer text, so we can't just match_token it
// and `match_uimm64` doesn't support leading '+' tokens, so we can't use that either.
match self.token() {
Some(Token::Integer(text)) if text.starts_with('+') => {
self.consume();
text[1..]
.parse()
.map_err(|_| self.error("expected u64 decimal immediate"))
}
token => err!(
self.loc,
format!("Unexpected token {:?} after vmctx", token)
),
}
}
/// Parse a CLIF heap command.
///
/// heap-command ::= "heap" ":" heap-type { "," heap-attr }
/// heap-attr ::= "size" "=" UImm64(bytes)
fn parse_heap_command(&mut self) -> ParseResult<HeapCommand> {
self.match_token(Token::Identifier("heap"), "expected a 'heap:' command")?;
self.match_token(Token::Colon, "expected a ':' after heap command")?;
let mut heap_command = HeapCommand {
heap_type: self.parse_heap_type()?,
size: Uimm64::new(0),
ptr_offset: None,
bound_offset: None,
};
while self.optional(Token::Comma) {
let identifier = self.match_any_identifier("expected heap attribute name")?;
self.match_token(Token::Equal, "expected '=' after heap attribute name")?;
match identifier {
"size" => {
heap_command.size = self.match_uimm64("expected integer size")?;
}
"ptr" => {
heap_command.ptr_offset = Some(self.parse_vmctx_offset()?);
}
"bound" => {
heap_command.bound_offset = Some(self.parse_vmctx_offset()?);
}
t => return err!(self.loc, "unknown heap attribute '{}'", t),
}
}
if heap_command.size == Uimm64::new(0) {
return err!(self.loc, self.error("Expected a heap size to be specified"));
}
Ok(heap_command)
}
/// Parse a heap type.
///
/// heap-type ::= "static" | "dynamic"
fn parse_heap_type(&mut self) -> ParseResult<HeapType> {
match self.token() {
Some(Token::Identifier("static")) => {
self.consume();
Ok(HeapType::Static)
}
Some(Token::Identifier("dynamic")) => {
self.consume();
Ok(HeapType::Dynamic)
}
_ => Err(self.error("expected a heap type, e.g. static or dynamic")),
}
}
/// Parse a CLIF run command.
///
/// run-command ::= "run" [":" invocation comparison expected]
@@ -2618,9 +2717,22 @@ impl<'a> Parser<'a> {
"expected invocation parentheses, e.g. %fn(...)",
)?;
let args = self.parse_data_value_list(
&sig.params.iter().map(|a| a.value_type).collect::<Vec<_>>(),
)?;
let arg_types = sig
.params
.iter()
.enumerate()
.filter_map(|(i, p)| {
// The first argument being VMCtx indicates that this is a argument that is going
// to be passed in with info about the test environment, and should not be passed
// in the run params.
if p.purpose == ir::ArgumentPurpose::VMContext && i == 0 {
None
} else {
Some(p.value_type)
}
})
.collect::<Vec<_>>();
let args = self.parse_data_value_list(&arg_types)?;
self.match_token(
Token::RPar,
@@ -3965,6 +4077,45 @@ mod tests {
assert!(parse("run: ", &sig(&[], &[])).is_err());
}
#[test]
fn parse_heap_commands() {
fn parse(text: &str) -> ParseResult<HeapCommand> {
Parser::new(text).parse_heap_command()
}
// Check that we can parse and display the same set of heap commands.
fn assert_roundtrip(text: &str) {
assert_eq!(parse(text).unwrap().to_string(), text);
}
assert_roundtrip("heap: static, size=10");
assert_roundtrip("heap: dynamic, size=10");
assert_roundtrip("heap: static, size=10, ptr=vmctx+10");
assert_roundtrip("heap: static, size=10, bound=vmctx+11");
assert_roundtrip("heap: static, size=10, ptr=vmctx+10, bound=vmctx+10");
assert_roundtrip("heap: dynamic, size=10, ptr=vmctx+10");
assert_roundtrip("heap: dynamic, size=10, bound=vmctx+11");
assert_roundtrip("heap: dynamic, size=10, ptr=vmctx+10, bound=vmctx+10");
let static_heap = parse("heap: static, size=10, ptr=vmctx+8, bound=vmctx+2").unwrap();
assert_eq!(static_heap.size, Uimm64::new(10));
assert_eq!(static_heap.heap_type, HeapType::Static);
assert_eq!(static_heap.ptr_offset, Some(Uimm64::new(8)));
assert_eq!(static_heap.bound_offset, Some(Uimm64::new(2)));
let dynamic_heap = parse("heap: dynamic, size=0x10").unwrap();
assert_eq!(dynamic_heap.size, Uimm64::new(16));
assert_eq!(dynamic_heap.heap_type, HeapType::Dynamic);
assert_eq!(dynamic_heap.ptr_offset, None);
assert_eq!(dynamic_heap.bound_offset, None);
assert!(parse("heap: static").is_err());
assert!(parse("heap: dynamic").is_err());
assert!(parse("heap: static size=0").is_err());
assert!(parse("heap: dynamic size=0").is_err());
assert!(parse("heap: static, size=10, ptr=10").is_err());
assert!(parse("heap: static, size=10, bound=vmctx-10").is_err());
}
#[test]
fn parse_data_values() {
fn parse(text: &str, ty: Type) -> DataValue {