Remove heaps from core Cranelift, push them into cranelift-wasm (#5386)

* cranelift-wasm: translate Wasm loads into lower-level CLIF operations

Rather than using `heap_{load,store,addr}`.

* cranelift: Remove the `heap_{addr,load,store}` instructions

These are now legalized in the `cranelift-wasm` frontend.

* cranelift: Remove the `ir::Heap` entity from CLIF

* Port basic memory operation tests to .wat filetests

* Remove test for verifying CLIF heaps

* Remove `heap_addr` from replace_branching_instructions_and_cfg_predecessors.clif test

* Remove `heap_addr` from readonly.clif test

* Remove `heap_addr` from `table_addr.clif` test

* Remove `heap_addr` from the simd-fvpromote_low.clif test

* Remove `heap_addr` from simd-fvdemote.clif test

* Remove `heap_addr` from the load-op-store.clif test

* Remove the CLIF heap runtest

* Remove `heap_addr` from the global_value.clif test

* Remove `heap_addr` from fpromote.clif runtests

* Remove `heap_addr` from fdemote.clif runtests

* Remove `heap_addr` from memory.clif parser test

* Remove `heap_addr` from reject_load_readonly.clif test

* Remove `heap_addr` from reject_load_notrap.clif test

* Remove `heap_addr` from load_readonly_notrap.clif test

* Remove `static-heap-without-guard-pages.clif` test

Will be subsumed when we port `make-heap-load-store-tests.sh` to generating
`.wat` tests.

* Remove `static-heap-with-guard-pages.clif` test

Will be subsumed when we port `make-heap-load-store-tests.sh` over to `.wat`
tests.

* Remove more heap tests

These will be subsumed by porting `make-heap-load-store-tests.sh` over to `.wat`
tests.

* Remove `heap_addr` from `simple-alias.clif` test

* Remove `heap_addr` from partial-redundancy.clif test

* Remove `heap_addr` from multiple-blocks.clif test

* Remove `heap_addr` from fence.clif test

* Remove `heap_addr` from extends.clif test

* Remove runtests that rely on heaps

Heaps are not a thing in CLIF or the interpreter anymore

* Add generated load/store `.wat` tests

* Enable memory-related wasm features in `.wat` tests

* Remove CLIF heap from fcmp-mem-bug.clif test

* Add a mode for compiling `.wat` all the way to assembly in filetests

* Also generate WAT to assembly tests in `make-load-store-tests.sh`

* cargo fmt

* Reinstate `f{de,pro}mote.clif` tests without the heap bits

* Remove undefined doc link

* Remove outdated SVG and dot file from docs

* Add docs about `None` returns for base address computation helpers

* Factor out `env.heap_access_spectre_mitigation()` to a local

* Expand docs for `FuncEnvironment::heaps` trait method

* Restore f{de,pro}mote+load clif runtests with stack memory
This commit is contained in:
Nick Fitzgerald
2022-12-14 16:26:45 -08:00
committed by GitHub
parent e03d65cca7
commit c0b587ac5f
198 changed files with 2494 additions and 4232 deletions

1
Cargo.lock generated
View File

@@ -563,6 +563,7 @@ dependencies = [
"regalloc2",
"serde",
"sha2 0.10.2",
"similar",
"smallvec",
"souper-ir",
"target-lexicon",

View File

@@ -35,6 +35,7 @@ sha2 = { version = "0.10.2", optional = true }
[dev-dependencies]
criterion = "0.3"
similar = "2.1.0"
[build-dependencies]
cranelift-codegen-meta = { path = "meta", version = "0.92.0" }

View File

@@ -116,15 +116,6 @@ impl InstructionFormatBuilder {
self
}
pub fn imm_with_name(mut self, name: &'static str, operand_kind: &OperandKind) -> Self {
let field = FormatField {
kind: operand_kind.clone(),
member: name,
};
self.0.imm_fields.push(field);
self
}
pub fn typevar_operand(mut self, operand_index: usize) -> Self {
assert!(self.0.typevar_operand.is_none());
assert!(operand_index < self.0.num_value_operands);

View File

@@ -35,9 +35,6 @@ pub(crate) struct EntityRefs {
/// A reference to a jump table declared in the function preamble.
pub(crate) jump_table: OperandKind,
/// A reference to a heap declared in the function preamble.
pub(crate) heap: OperandKind,
/// A reference to a table declared in the function preamble.
pub(crate) table: OperandKind,
@@ -69,8 +66,6 @@ impl EntityRefs {
jump_table: new("table", "ir::JumpTable", "A jump table."),
heap: new("heap", "ir::Heap", "A heap."),
table: new("table", "ir::Table", "A table."),
varargs: OperandKind::new(

View File

@@ -15,9 +15,6 @@ pub(crate) struct Formats {
pub(crate) cond_trap: Rc<InstructionFormat>,
pub(crate) float_compare: Rc<InstructionFormat>,
pub(crate) func_addr: Rc<InstructionFormat>,
pub(crate) heap_addr: Rc<InstructionFormat>,
pub(crate) heap_load: Rc<InstructionFormat>,
pub(crate) heap_store: Rc<InstructionFormat>,
pub(crate) int_compare: Rc<InstructionFormat>,
pub(crate) int_compare_imm: Rc<InstructionFormat>,
pub(crate) int_add_trap: Rc<InstructionFormat>,
@@ -200,25 +197,6 @@ impl Formats {
.imm(&entities.dynamic_stack_slot)
.build(),
// Accessing a WebAssembly heap.
heap_addr: Builder::new("HeapAddr")
.imm(&entities.heap)
.value()
.imm_with_name("offset", &imm.uimm32)
.imm_with_name("size", &imm.uimm8)
.build(),
heap_load: Builder::new("HeapLoad").imm(&imm.heap_imm).value().build(),
heap_store: Builder::new("HeapStore")
// We have more fields for this instruction than
// `InstructionData` can hold without growing in size, so we
// push the immediates out into a side table.
.imm(&imm.heap_imm)
.value()
.value()
.build(),
// Accessing a WebAssembly table.
table_addr: Builder::new("TableAddr")
.imm(&entities.table)

View File

@@ -14,9 +14,6 @@ pub(crate) struct Immediates {
/// counts on shift instructions.
pub uimm8: OperandKind,
/// An unsigned 32-bit immediate integer operand.
pub uimm32: OperandKind,
/// An unsigned 128-bit immediate integer operand.
///
/// This operand is used to pass entire 128-bit vectors as immediates to instructions like
@@ -59,9 +56,6 @@ pub(crate) struct Immediates {
/// Flags for memory operations like `load` and `store`.
pub memflags: OperandKind,
/// A reference to out-of-line immediates for heap accesses.
pub heap_imm: OperandKind,
/// A trap code indicating the reason for trapping.
///
/// The Rust enum type also has a `User(u16)` variant for user-provided trap codes.
@@ -110,11 +104,6 @@ impl Immediates {
"ir::immediates::Uimm8",
"An 8-bit immediate unsigned integer.",
),
uimm32: new_imm(
"imm",
"ir::immediates::Uimm32",
"A 32-bit immediate unsigned integer.",
),
uimm128: new_imm(
"imm",
"ir::Immediate",
@@ -186,12 +175,6 @@ impl Immediates {
memflags: new_imm("flags", "ir::MemFlags", "Memory operation flags"),
heap_imm: new_imm(
"heap_imm",
"ir::HeapImm",
"Reference to out-of-line heap access immediates",
),
trapcode: {
let mut trapcode_values = HashMap::new();
trapcode_values.insert("stk_ovf", "StackOverflow");

View File

@@ -1118,89 +1118,6 @@ pub(crate) fn define(
.operands_out(vec![a]),
);
let HeapOffset = &TypeVar::new(
"HeapOffset",
"An unsigned heap offset",
TypeSetBuilder::new().ints(32..64).build(),
);
let H = &Operand::new("H", &entities.heap);
let index = &Operand::new("index", HeapOffset);
let Offset = &Operand::new("Offset", &imm.uimm32).with_doc("Static offset immediate in bytes");
let Size = &Operand::new("Size", &imm.uimm8).with_doc("Static size immediate in bytes");
ig.push(
Inst::new(
"heap_addr",
r#"
Bounds check and compute absolute address of ``index + Offset`` in heap memory.
Verify that the range ``index .. index + Offset + Size`` is in bounds for the
heap ``H``, and generate an absolute address that is safe to dereference.
1. If ``index + Offset + Size`` is less than or equal ot the heap bound, return an
absolute address corresponding to a byte offset of ``index + Offset`` from the
heap's base address.
2. If ``index + Offset + Size`` is greater than the heap bound, return the
``NULL`` pointer or any other address that is guaranteed to generate a trap
when accessed.
"#,
&formats.heap_addr,
)
.operands_in(vec![H, index, Offset, Size])
.operands_out(vec![addr]),
);
let heap_imm = &Operand::new("heap_imm", &imm.heap_imm);
let index =
&Operand::new("index", HeapOffset).with_doc("Dynamic index (in bytes) into the heap");
let a = &Operand::new("a", Mem).with_doc("The value loaded from the heap");
ig.push(
Inst::new(
"heap_load",
r#"
Load a value from the given heap at address ``index + offset``,
trapping on out-of-bounds accesses.
Checks that ``index + offset .. index + offset + sizeof(a)`` is
within the heap's bounds, trapping if it is not. Otherwise, when
that range is in bounds, loads the value from the heap.
Traps on ``index + offset + sizeof(a)`` overflow.
"#,
&formats.heap_load,
)
.operands_in(vec![heap_imm, index])
.operands_out(vec![a])
.can_load(true)
.can_trap(true),
);
let a = &Operand::new("a", Mem).with_doc("The value stored into the heap");
ig.push(
Inst::new(
"heap_store",
r#"
Store ``a`` into the given heap at address ``index + offset``,
trapping on out-of-bounds accesses.
Checks that ``index + offset .. index + offset + sizeof(a)`` is
within the heap's bounds, trapping if it is not. Otherwise, when
that range is in bounds, stores the value into the heap.
Traps on ``index + offset + sizeof(a)`` overflow.
"#,
&formats.heap_store,
)
.operands_in(vec![heap_imm, index, a])
.operands_out(vec![])
.can_store(true)
.can_trap(true),
);
// Note this instruction is marked as having other side-effects, so GVN won't try to hoist it,
// which would result in it being subject to spilling. While not hoisting would generally hurt
// performance, since a computed value used many times may need to be regenerated before each

View File

@@ -139,21 +139,6 @@ pub(crate) fn define() -> SettingGroup {
false,
);
settings.add_bool(
"use_pinned_reg_as_heap_base",
"Use the pinned register as the heap base.",
r#"
Enabling this requires the enable_pinned_reg setting to be set to true. It enables a custom
legalization of the `heap_addr` instruction so it will use the pinned register as the heap
base, instead of fetching it from a global value.
Warning! Enabling this means that the pinned register *must* be maintained to contain the
heap base address at all times, during the lifetime of a function. Using the pinned
register for other purposes when this is set is very likely to cause crashes.
"#,
false,
);
settings.add_bool(
"enable_simd",
"Enable the use of SIMD instructions.",

View File

@@ -4,12 +4,11 @@ use crate::entity::{self, PrimaryMap, SecondaryMap};
use crate::ir;
use crate::ir::builder::ReplaceBuilder;
use crate::ir::dynamic_type::{DynamicTypeData, DynamicTypes};
use crate::ir::immediates::HeapImmData;
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData};
use crate::ir::{types, ConstantData, ConstantPool, Immediate};
use crate::ir::{
Block, DynamicType, FuncRef, HeapImm, Inst, SigRef, Signature, Type, Value,
ValueLabelAssignments, ValueList, ValueListPool,
Block, DynamicType, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments,
ValueList, ValueListPool,
};
use crate::ir::{ExtFuncData, RelSourceLoc};
use crate::packed_option::ReservedValue;
@@ -84,9 +83,6 @@ pub struct DataFlowGraph {
/// Stores large immediates that otherwise will not fit on InstructionData
pub immediates: PrimaryMap<Immediate, ConstantData>,
/// Out-of-line heap access immediates that don't fit in `InstructionData`.
pub heap_imms: PrimaryMap<HeapImm, HeapImmData>,
}
impl DataFlowGraph {
@@ -105,7 +101,6 @@ impl DataFlowGraph {
values_labels: None,
constants: ConstantPool::new(),
immediates: PrimaryMap::new(),
heap_imms: PrimaryMap::new(),
}
}

View File

@@ -368,60 +368,6 @@ impl SigRef {
}
}
/// An opaque reference to a [heap](https://en.wikipedia.org/wiki/Memory_management#DYNAMIC).
///
/// Heaps are used to access dynamically allocated memory through
/// [`heap_addr`](super::InstBuilder::heap_addr).
///
/// To create a heap, use [`FunctionBuilder::create_heap`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_heap).
///
/// While the order is stable, it is arbitrary.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct Heap(u32);
entity_impl!(Heap, "heap");
impl Heap {
/// Create a new heap reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Self> {
if n < u32::MAX {
Some(Self(n))
} else {
None
}
}
}
/// An opaque reference to some out-of-line immediates for `heap_{load,store}`
/// instructions.
///
/// These immediates are too large to store in
/// [`InstructionData`](super::instructions::InstructionData) and therefore must
/// be tracked separately in
/// [`DataFlowGraph::heap_imms`](super::dfg::DataFlowGraph). `HeapImm` provides
/// a way to reference values stored there.
///
/// While the order is stable, it is arbitrary.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct HeapImm(u32);
entity_impl!(HeapImm, "heap_imm");
impl HeapImm {
/// Create a new `HeapImm` reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Self> {
if n < u32::MAX {
Some(Self(n))
} else {
None
}
}
}
/// An opaque reference to a [WebAssembly
/// table](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format#WebAssembly_tables).
///
@@ -477,8 +423,6 @@ pub enum AnyEntity {
FuncRef(FuncRef),
/// A function call signature.
SigRef(SigRef),
/// A heap.
Heap(Heap),
/// A table.
Table(Table),
/// A function's stack limit
@@ -500,7 +444,6 @@ impl fmt::Display for AnyEntity {
Self::Constant(r) => r.fmt(f),
Self::FuncRef(r) => r.fmt(f),
Self::SigRef(r) => r.fmt(f),
Self::Heap(r) => r.fmt(f),
Self::Table(r) => r.fmt(f),
Self::StackLimit => write!(f, "stack_limit"),
}
@@ -579,12 +522,6 @@ impl From<SigRef> for AnyEntity {
}
}
impl From<Heap> for AnyEntity {
fn from(r: Heap) -> Self {
Self::Heap(r)
}
}
impl From<Table> for AnyEntity {
fn from(r: Table) -> Self {
Self::Table(r)

View File

@@ -8,8 +8,8 @@ use crate::ir;
use crate::ir::JumpTables;
use crate::ir::{
instructions::BranchInfo, Block, DynamicStackSlot, DynamicStackSlotData, DynamicType,
ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, HeapData, Inst, InstructionData,
JumpTable, JumpTableData, Opcode, SigRef, StackSlot, StackSlotData, Table, TableData, Type,
ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Inst, InstructionData, JumpTable,
JumpTableData, Opcode, SigRef, StackSlot, StackSlotData, Table, TableData, Type,
};
use crate::ir::{DataFlowGraph, Layout, Signature};
use crate::ir::{DynamicStackSlots, SourceLocs, StackSlots};
@@ -170,9 +170,6 @@ pub struct FunctionStencil {
/// Global values referenced.
pub global_values: PrimaryMap<ir::GlobalValue, ir::GlobalValueData>,
/// Heaps referenced.
pub heaps: PrimaryMap<ir::Heap, ir::HeapData>,
/// Tables referenced.
pub tables: PrimaryMap<ir::Table, ir::TableData>,
@@ -205,7 +202,6 @@ impl FunctionStencil {
self.sized_stack_slots.clear();
self.dynamic_stack_slots.clear();
self.global_values.clear();
self.heaps.clear();
self.tables.clear();
self.jump_tables.clear();
self.dfg.clear();
@@ -261,11 +257,6 @@ impl FunctionStencil {
.concrete()
}
/// Declares a heap accessible to the function.
pub fn create_heap(&mut self, data: HeapData) -> Heap {
self.heaps.push(data)
}
/// Declares a table accessible to the function.
pub fn create_table(&mut self, data: TableData) -> Table {
self.tables.push(data)
@@ -447,7 +438,6 @@ impl Function {
sized_stack_slots: StackSlots::new(),
dynamic_stack_slots: DynamicStackSlots::new(),
global_values: PrimaryMap::new(),
heaps: PrimaryMap::new(),
tables: PrimaryMap::new(),
jump_tables: PrimaryMap::new(),
dfg: DataFlowGraph::new(),

View File

@@ -1,67 +0,0 @@
//! Heaps.
use crate::ir::immediates::Uimm64;
use crate::ir::{GlobalValue, Type};
use core::fmt;
#[cfg(feature = "enable-serde")]
use serde::{Deserialize, Serialize};
/// Information about a heap declaration.
#[derive(Clone, PartialEq, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct HeapData {
/// The address of the start of the heap's storage.
pub base: GlobalValue,
/// Guaranteed minimum heap size in bytes. Heap accesses before `min_size` don't need bounds
/// checking.
pub min_size: Uimm64,
/// Size in bytes of the offset-guard pages following the heap.
pub offset_guard_size: Uimm64,
/// Heap style, with additional style-specific info.
pub style: HeapStyle,
/// The index type for the heap.
pub index_type: Type,
}
/// Style of heap including style-specific information.
#[derive(Clone, PartialEq, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub enum HeapStyle {
/// A dynamic heap can be relocated to a different base address when it is grown.
Dynamic {
/// Global value providing the current bound of the heap in bytes.
bound_gv: GlobalValue,
},
/// A static heap has a fixed base address and a number of not-yet-allocated pages before the
/// offset-guard pages.
Static {
/// Heap bound in bytes. The offset-guard pages are allocated after the bound.
bound: Uimm64,
},
}
impl fmt::Display for HeapData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self.style {
HeapStyle::Dynamic { .. } => "dynamic",
HeapStyle::Static { .. } => "static",
})?;
write!(f, " {}, min {}", self.base, self.min_size)?;
match self.style {
HeapStyle::Dynamic { bound_gv } => write!(f, ", bound {}", bound_gv)?,
HeapStyle::Static { bound } => write!(f, ", bound {}", bound)?,
}
write!(
f,
", offset_guard {}, index_type {}",
self.offset_guard_size, self.index_type
)
}
}

View File

@@ -4,7 +4,6 @@
//! Each type here should have a corresponding definition in the
//! `cranelift-codegen/meta/src/shared/immediates` crate in the meta language.
use crate::ir;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::convert::TryFrom;
@@ -1178,18 +1177,6 @@ impl Not for Ieee64 {
}
}
/// Out-of-line heap access immediates.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct HeapImmData {
/// The memory flags for the heap access.
pub flags: ir::MemFlags,
/// The heap being accessed.
pub heap: ir::Heap,
/// The static offset added to the heap access's index.
pub offset: Uimm32,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -11,7 +11,6 @@ mod extfunc;
mod extname;
pub mod function;
mod globalvalue;
mod heap;
pub mod immediates;
pub mod instructions;
pub mod jumptable;
@@ -37,8 +36,8 @@ pub use crate::ir::constant::{ConstantData, ConstantPool};
pub use crate::ir::dfg::{DataFlowGraph, ValueDef};
pub use crate::ir::dynamic_type::{dynamic_to_fixed, DynamicTypeData, DynamicTypes};
pub use crate::ir::entities::{
Block, Constant, DynamicStackSlot, DynamicType, FuncRef, GlobalValue, Heap, HeapImm, Immediate,
Inst, JumpTable, SigRef, StackSlot, Table, UserExternalNameRef, Value,
Block, Constant, DynamicStackSlot, DynamicType, FuncRef, GlobalValue, Immediate, Inst,
JumpTable, SigRef, StackSlot, Table, UserExternalNameRef, Value,
};
pub use crate::ir::extfunc::{
AbiParam, ArgumentExtension, ArgumentPurpose, ExtFuncData, Signature,
@@ -46,7 +45,6 @@ pub use crate::ir::extfunc::{
pub use crate::ir::extname::{ExternalName, UserExternalName, UserFuncName};
pub use crate::ir::function::{DisplayFunctionAnnotations, Function};
pub use crate::ir::globalvalue::GlobalValueData;
pub use crate::ir::heap::{HeapData, HeapStyle};
pub use crate::ir::instructions::{
InstructionData, Opcode, ValueList, ValueListPool, VariableArgs,
};

View File

@@ -22,11 +22,9 @@ use crate::isa::TargetIsa;
use crate::trace;
mod globalvalue;
mod heap;
mod table;
use self::globalvalue::expand_global_value;
use self::heap::{expand_heap_addr, expand_heap_load, expand_heap_store};
use self::table::expand_table_addr;
fn imm_const(pos: &mut FuncCursor, arg: Value, imm: Imm64, is_signed: bool) -> Value {
@@ -71,23 +69,6 @@ pub fn simple_legalize(func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa:
opcode: ir::Opcode::GlobalValue,
global_value,
} => expand_global_value(inst, &mut pos.func, isa, global_value),
InstructionData::HeapAddr {
opcode: ir::Opcode::HeapAddr,
heap,
arg,
offset,
size,
} => expand_heap_addr(inst, &mut pos.func, cfg, isa, heap, arg, offset, size),
InstructionData::HeapLoad {
opcode: ir::Opcode::HeapLoad,
heap_imm,
arg,
} => expand_heap_load(inst, &mut pos.func, cfg, isa, heap_imm, arg),
InstructionData::HeapStore {
opcode: ir::Opcode::HeapStore,
heap_imm,
args,
} => expand_heap_store(inst, &mut pos.func, cfg, isa, heap_imm, args[0], args[1]),
InstructionData::StackLoad {
opcode: ir::Opcode::StackLoad,
stack_slot,

View File

@@ -8,8 +8,8 @@ pub use crate::ir::immediates::{Ieee32, Ieee64, Imm64, Offset32, Uimm32, Uimm64,
pub use crate::ir::types::*;
pub use crate::ir::{
dynamic_to_fixed, AtomicRmwOp, Block, Constant, DataFlowGraph, DynamicStackSlot, FuncRef,
GlobalValue, Heap, HeapImm, Immediate, InstructionData, JumpTable, MemFlags, Opcode, StackSlot,
Table, TrapCode, Type, Value,
GlobalValue, Immediate, InstructionData, JumpTable, MemFlags, Opcode, StackSlot, Table,
TrapCode, Type, Value,
};
use crate::isle_common_prelude_methods;
use crate::machinst::isle::*;

View File

@@ -518,9 +518,8 @@ mod tests {
fn display_default() {
let b = builder();
let f = Flags::new(b);
assert_eq!(
f.to_string(),
r#"[shared]
let actual = f.to_string();
let expected = r#"[shared]
opt_level = "none"
tls_model = "none"
libcall_call_conv = "isa_default"
@@ -537,7 +536,6 @@ avoid_div_traps = false
enable_float = true
enable_nan_canonicalization = false
enable_pinned_reg = false
use_pinned_reg_as_heap_base = false
enable_simd = false
enable_atomics = true
enable_safepoints = false
@@ -551,8 +549,15 @@ enable_jump_tables = true
enable_heap_access_spectre_mitigation = true
enable_table_access_spectre_mitigation = true
enable_incremental_compilation_cache_checks = false
"#
);
"#;
if actual != expected {
panic!(
"Default settings do not match expectations:\n\n{}",
similar::TextDiff::from_lines(expected, &actual)
.unified_diff()
.header("expected", "actual")
);
}
assert_eq!(f.opt_level(), super::OptLevel::None);
assert_eq!(f.enable_simd(), false);
}

View File

@@ -63,7 +63,6 @@ use crate::entity::SparseSet;
use crate::flowgraph::{BlockPredecessor, ControlFlowGraph};
use crate::ir;
use crate::ir::entities::AnyEntity;
use crate::ir::immediates::HeapImmData;
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionFormat, ResolvedConstraint};
use crate::ir::{
types, ArgumentPurpose, Block, Constant, DynamicStackSlot, FuncRef, Function, GlobalValue,
@@ -404,49 +403,6 @@ impl<'a> Verifier<'a> {
Ok(())
}
fn verify_heaps(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(isa) = self.isa {
for (heap, heap_data) in &self.func.heaps {
let base = heap_data.base;
if !self.func.global_values.is_valid(base) {
return errors.nonfatal((heap, format!("invalid base global value {}", base)));
}
let pointer_type = isa.pointer_type();
let base_type = self.func.global_values[base].global_type(isa);
if base_type != pointer_type {
errors.report((
heap,
format!(
"heap base has type {}, which is not the pointer type {}",
base_type, pointer_type
),
));
}
if let ir::HeapStyle::Dynamic { bound_gv, .. } = heap_data.style {
if !self.func.global_values.is_valid(bound_gv) {
return errors
.nonfatal((heap, format!("invalid bound global value {}", bound_gv)));
}
let bound_type = self.func.global_values[bound_gv].global_type(isa);
if pointer_type != bound_type {
errors.report((
heap,
format!(
"heap pointer type {} differs from the type of its bound, {}",
pointer_type, bound_type
),
));
}
}
}
}
Ok(())
}
fn verify_tables(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(isa) = self.isa {
for (table, table_data) in &self.func.tables {
@@ -676,13 +632,6 @@ impl<'a> Verifier<'a> {
UnaryGlobalValue { global_value, .. } => {
self.verify_global_value(inst, global_value, errors)?;
}
HeapLoad { heap_imm, .. } | HeapStore { heap_imm, .. } => {
let HeapImmData { heap, .. } = self.func.dfg.heap_imms[heap_imm];
self.verify_heap(inst, heap, errors)?;
}
HeapAddr { heap, .. } => {
self.verify_heap(inst, heap, errors)?;
}
TableAddr { table, .. } => {
self.verify_table(inst, table, errors)?;
}
@@ -878,19 +827,6 @@ impl<'a> Verifier<'a> {
}
}
fn verify_heap(
&self,
inst: Inst,
heap: ir::Heap,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.heaps.is_valid(heap) {
errors.nonfatal((inst, self.context(inst), format!("invalid heap {}", heap)))
} else {
Ok(())
}
}
fn verify_table(
&self,
inst: Inst,
@@ -1557,20 +1493,6 @@ impl<'a> Verifier<'a> {
_ => {}
}
}
ir::InstructionData::HeapAddr { heap, arg, .. } => {
let index_type = self.func.dfg.value_type(arg);
let heap_index_type = self.func.heaps[heap].index_type;
if index_type != heap_index_type {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"index type {} differs from heap index type {}",
index_type, heap_index_type,
),
));
}
}
ir::InstructionData::TableAddr { table, arg, .. } => {
let index_type = self.func.dfg.value_type(arg);
let table_index_type = self.func.tables[table].index_type;
@@ -1775,7 +1697,6 @@ impl<'a> Verifier<'a> {
pub fn run(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.verify_global_values(errors)?;
self.verify_heaps(errors)?;
self.verify_tables(errors)?;
self.verify_jump_tables(errors)?;
self.typecheck_entry_block_params(errors)?;

View File

@@ -5,7 +5,6 @@
use crate::entity::SecondaryMap;
use crate::ir::entities::AnyEntity;
use crate::ir::immediates::{HeapImmData, Uimm32};
use crate::ir::{Block, DataFlowGraph, Function, Inst, SigRef, Type, Value, ValueDef};
use crate::packed_option::ReservedValue;
use alloc::string::{String, ToString};
@@ -57,13 +56,6 @@ pub trait FuncWriter {
self.write_entity_definition(w, func, gv.into(), gv_data)?;
}
for (heap, heap_data) in &func.heaps {
if !heap_data.index_type.is_invalid() {
any = true;
self.write_entity_definition(w, func, heap.into(), heap_data)?;
}
}
for (table, table_data) in &func.tables {
if !table_data.index_type.is_invalid() {
any = true;
@@ -478,54 +470,6 @@ pub fn write_operands(w: &mut dyn Write, dfg: &DataFlowGraph, inst: Inst) -> fmt
dynamic_stack_slot,
..
} => write!(w, " {}, {}", arg, dynamic_stack_slot),
HeapLoad {
opcode: _,
heap_imm,
arg,
} => {
let HeapImmData {
flags,
heap,
offset,
} = dfg.heap_imms[heap_imm];
write!(
w,
" {heap} {flags} {arg}{optional_offset}",
optional_offset = if offset == Uimm32::from(0) {
"".to_string()
} else {
format!("+{offset}")
}
)
}
HeapStore {
opcode: _,
heap_imm,
args,
} => {
let HeapImmData {
flags,
heap,
offset,
} = dfg.heap_imms[heap_imm];
let [index, value] = args;
write!(
w,
" {heap} {flags} {index}{optional_offset}, {value}",
optional_offset = if offset == Uimm32::from(0) {
"".to_string()
} else {
format!("+{offset}")
}
)
}
HeapAddr {
heap,
arg,
offset,
size,
..
} => write!(w, " {}, {}, {}, {}", heap, arg, offset, size),
TableAddr { table, arg, .. } => write!(w, " {}, {}", table, arg),
Load {
flags, arg, offset, ..

View File

@@ -1,8 +0,0 @@
digraph {
node [
shape=record,
fontsize=10,
fontname="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans"
]
"static" [label="mapped\npages|unmapped\npages|offset_guard\npages"]
}

View File

@@ -1,26 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.42.3 (0)
-->
<!-- Title: %3 Pages: 1 -->
<svg width="209pt" height="45pt"
viewBox="0.00 0.00 209.00 45.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 41)">
<title>%3</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-41 205,-41 205,4 -4,4"/>
<!-- static -->
<g id="node1" class="node">
<title>static</title>
<polygon fill="none" stroke="black" points="0,-0.5 0,-36.5 201,-36.5 201,-0.5 0,-0.5"/>
<text text-anchor="middle" x="28" y="-21.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">mapped</text>
<text text-anchor="middle" x="28" y="-10.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">pages</text>
<polyline fill="none" stroke="black" points="56,-0.5 56,-36.5 "/>
<text text-anchor="middle" x="90" y="-21.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">unmapped</text>
<text text-anchor="middle" x="90" y="-10.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">pages</text>
<polyline fill="none" stroke="black" points="124,-0.5 124,-36.5 "/>
<text text-anchor="middle" x="162.5" y="-21.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">offset_guard</text>
<text text-anchor="middle" x="162.5" y="-10.5" font-family="Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans" font-size="10.00">pages</text>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.8 KiB

View File

@@ -559,148 +559,6 @@ GV = [colocated] symbol Name
:arg Name: External name.
:result GV: Global value.
### Heaps
Code compiled from WebAssembly or asm.js runs in a sandbox where it can't access
all process memory. Instead, it is given a small set of memory areas to work
in, and all accesses are bounds checked. Cranelift models this through the
concept of *heaps*.
A heap is declared in the function preamble and can be accessed with the
`heap_addr` instruction that [traps] on out-of-bounds accesses or
returns a pointer that is guaranteed to trap. Heap addresses can be smaller than
the native pointer size, for example unsigned `i32` offsets on a 64-bit
architecture.
![Heap address space layout](./heap.svg)
A heap appears as three consecutive ranges of address space:
1. The *mapped pages* are the [accessible] memory range in the heap. A
heap may have a minimum guaranteed size which means that some mapped pages
are always present.
2. The *unmapped pages* is a possibly empty range of address space that may be
mapped in the future when the heap is grown. They are [addressable] but
not [accessible].
3. The *offset-guard pages* is a range of address space that is guaranteed to
always cause a trap when accessed. It is used to optimize bounds checking for
heap accesses with a shared base pointer. They are [addressable] but
not [accessible].
The *heap bound* is the total size of the mapped and unmapped pages. This is
the bound that `heap_addr` checks against. Memory accesses inside the
heap bounds can trap if they hit an unmapped page (which is not
[accessible]).
Two styles of heaps are supported, *static* and *dynamic*. They behave
differently when resized.
#### Static heaps
A *static heap* starts out with all the address space it will ever need, so it
never moves to a different address. At the base address is a number of mapped
pages corresponding to the heap's current size. Then follows a number of
unmapped pages where the heap can grow up to its maximum size. After the
unmapped pages follow the offset-guard pages which are also guaranteed to
generate a trap when accessed.
H = static Base, min MinBytes, bound BoundBytes, offset_guard OffsetGuardBytes
Declare a static heap in the preamble.
:arg Base: Global value holding the heap's base address.
:arg MinBytes: Guaranteed minimum heap size in bytes. Accesses below this
size will never trap.
:arg BoundBytes: Fixed heap bound in bytes. This defines the amount of
address space reserved for the heap, not including the offset-guard
pages.
:arg OffsetGuardBytes: Size of the offset-guard pages in bytes.
#### Dynamic heaps
A *dynamic heap* can be relocated to a different base address when it is
resized, and its bound can move dynamically. The offset-guard pages move when
the heap is resized. The bound of a dynamic heap is stored in a global value.
H = dynamic Base, min MinBytes, bound BoundGV, offset_guard OffsetGuardBytes
Declare a dynamic heap in the preamble.
:arg Base: Global value holding the heap's base address.
:arg MinBytes: Guaranteed minimum heap size in bytes. Accesses below this
size will never trap.
:arg BoundGV: Global value containing the current heap bound in bytes.
:arg OffsetGuardBytes: Size of the offset-guard pages in bytes.
#### Heap examples
Some Wasm VMs prefer to use fixed heaps with a 4 GB bound and 2 GB of
offset-guard pages when running WebAssembly code on 64-bit CPUs. The combination
of a 4 GB fixed bound and 1-byte bounds checks means that no code needs to be
generated for bounds checks at all:
```
test verifier
function %add_members(i32, i64 vmctx) -> f32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v5: i64):
v1 = heap_addr.i64 heap0, v0, 1
v2 = load.f32 v1+16
v3 = load.f32 v1+20
v4 = fadd v2, v3
return v4
}
```
A static heap can also be used for 32-bit code when the WebAssembly module
declares a small upper bound on its memory. A 1 MB static bound with a single 4
KB offset-guard page still has opportunities for sharing bounds checking code:
```
test verifier
function %add_members(i32, i32 vmctx) -> f32 {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x10_0000, offset_guard 0x1000
block0(v0: i32, v5: i32):
v1 = heap_addr.i32 heap0, v0, 1
v2 = load.f32 v1+16
v3 = load.f32 v1+20
v4 = fadd v2, v3
return v4
}
```
If the upper bound on the heap size is too large, a dynamic heap is required
instead.
Finally, a runtime environment that simply allocates a heap with
`malloc()` may not have any offset-guard pages at all. In that case,
full bounds checking is required for each access:
```
test verifier
function %add_members(i32, i64 vmctx) -> f32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+64
gv2 = load.i32 notrap aligned gv0+72
heap0 = dynamic gv1, min 0x1000, bound gv2, offset_guard 0
block0(v0: i32, v6: i64):
v1 = heap_addr.i64 heap0, v0, 20
v2 = load.f32 v1+16
v3 = heap_addr.i64 heap0, v0, 24
v4 = load.f32 v3+20
v5 = fadd v2, v4
return v5
}
```
### Tables
Code compiled from WebAssembly often needs access to objects outside of its
@@ -728,7 +586,7 @@ T = dynamic Base, min MinElements, bound BoundGV, element_size ElementSize
:arg Base: Global value holding the table's base address.
:arg MinElements: Guaranteed minimum table size in elements.
:arg BoundGV: Global value containing the current heap bound in elements.
:arg BoundGV: Global value containing the current table bound in elements.
:arg ElementSize: Size of each element.
### Constant materialization

View File

@@ -374,79 +374,3 @@ pointers are always 8 bytes, and laid out sequentially in memory. Even for 32 bi
Currently, we only support requesting heaps, however this is a generic mechanism that should
be able to introduce any sort of environment support that we may need later. (e.g. tables, global values, external functions)
##### `heap` directive
The `heap` directive allows a test to request a heap to be allocated and passed to the test via the environment struct.
A sample heap annotation is the following:
```
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
```
This indicates the following:
* `static`: We have requested a non-resizable and non-movable static heap.
* `size=0x1000`: It has to have a size of 4096 bytes.
* `ptr=vmctx+0`: The pointer to the address to the start of this heap is placed at offset 0 in the `vmctx` struct
* `bound=vmctx+8`: The pointer to the address to the end of this heap is placed at offset 8 in the `vmctx` struct
The `ptr` and `bound` arguments make explicit the placement of the pointers to the start and end of the heap memory in
the environment struct. `vmctx+0` means that at offset 0 of the environment struct there will be the pointer to the start
similarly, at offset 8 the pointer to the end.
You can combine multiple heap annotations, in which case, their pointers are laid out sequentially in memory in
the order that the annotations appear in the source file.
```
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
```
An invalid or unexpected offset will raise an error when the test is run.
See the diagram below, on how the `vmctx` struct ends up if with multiple heaps:
```
┌─────────────────────┐ vmctx+0
│heap0: start address │
├─────────────────────┤ vmctx+8
│heap0: end address │
├─────────────────────┤ vmctx+16
│heap1: start address │
├─────────────────────┤ vmctx+24
│heap1: end address │
├─────────────────────┤ vmctx+32
│etc... │
└─────────────────────┘
```
With this setup, you can now use the global values to load heaps, and load / store to them.
Example:
```
function %heap_load_store(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_load_store(0, 1) == 1
```
### `test interpret`
Test the CLIF interpreter
This test supports the same commands as `test run`, but runs the code in the cranelift
interpreter instead of the host machine.

View File

@@ -8,10 +8,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i64, i64, i64 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
;; Initial load. This will not be reused by anything below, even
;; though it does access the same address.

View File

@@ -8,10 +8,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
v4 = load.i32 vmctx v0+16

View File

@@ -7,11 +7,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
brz v2, block1
jump block2

View File

@@ -8,7 +8,6 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
@@ -16,17 +15,17 @@ block0(v0: i64, v1: i32):
jump block2
block1:
v2 = heap_addr.i64 heap0, v1, 68, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+64
jump block3(v3)
block2:
v4 = heap_addr.i64 heap0, v1, 132, 0
v4 = global_value.i64 gv1
v5 = load.i32 v4+128
jump block3(v5)
block3(v6: i32):
v7 = heap_addr.i64 heap0, v1, 68, 0
v7 = global_value.i64 gv1
v8 = load.i32 v7+64
;; load should survive:
; check: v8 = load.i32 v7+64

View File

@@ -9,14 +9,13 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
;; This should reuse the load above.
v4 = heap_addr.i64 heap0, v1, 12, 0
v4 = global_value.i64 gv1
v5 = load.i32 v4+8
; check: v5 -> v3
@@ -38,15 +37,14 @@ block0(v0: i64, v1: i32):
function %f1(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
store.i32 v1, v2+8
;; This load should pick up the store above.
v3 = heap_addr.i64 heap0, v1, 12, 0
v3 = global_value.i64 gv1
v4 = load.i32 v3+8
; check: v4 -> v1

View File

@@ -1,88 +0,0 @@
test compile precise-output
set unwind_info=false
set enable_heap_access_spectre_mitigation=true
target aarch64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; mov w8, w1
; ldr x9, [x0]
; mov x9, x9
; add x10, x0, x1, UXTW
; movz x7, #0
; subs xzr, x8, x9
; csel x0, x7, x10, hi
; csdb
; ret
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; mov w6, w1
; add x7, x0, x1, UXTW
; movz x5, #0
; subs xzr, x6, #65536
; csel x0, x5, x7, hi
; csdb
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; mov w10, w1
; movz x9, #24
; adds x11, x10, x9
; b.lo 8 ; udf
; ldr x12, [x0]
; add x13, x0, x1, UXTW
; add x13, x13, #16
; movz x10, #0
; subs xzr, x11, x12
; csel x0, x10, x13, hi
; csdb
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; mov w8, w1
; add x9, x0, x1, UXTW
; add x9, x9, #16
; movz x6, #65512
; movz x10, #0
; subs xzr, x8, x6
; csel x0, x10, x9, hi
; csdb
; ret

View File

@@ -1,86 +0,0 @@
test compile precise-output
set unwind_info=false
target riscv64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; uext.w a6,a1
; ld a7,0(a0)
; addi t3,a7,0
; add a7,a0,a6
; ugt a5,a6,t3##ty=i64
; li t3,0
; selectif_spectre_guard a0,t3,a7##test=a5
; ret
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; uext.w a6,a1
; add a5,a0,a6
; lui a3,16
; ugt a6,a6,a3##ty=i64
; li a7,0
; selectif_spectre_guard a0,a7,a5##test=a6
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; uext.w t4,a1
; li a7,24
; add t0,t4,a7
; ult t1,t0,t4##ty=i64
; trap_if t1,heap_oob
; ld t1,0(a0)
; add t2,a0,t4
; addi t2,t2,16
; ugt t4,t0,t1##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t2##test=t4
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; uext.w a7,a1
; add t3,a0,a7
; addi t3,t3,16
; lui a5,16
; addi a5,a5,4072
; ugt t4,a7,a5##ty=i64
; li t0,0
; selectif_spectre_guard a0,t0,t3##test=t4
; ret

View File

@@ -1,81 +0,0 @@
test compile precise-output
target s390x
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; llgfr %r4, %r3
; lghi %r3, 0
; ag %r3, 0(%r2)
; agr %r2, %r4
; lghi %r5, 0
; clgr %r4, %r3
; locgrh %r2, %r5
; br %r14
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; llgfr %r4, %r3
; agr %r2, %r4
; lghi %r3, 0
; clgfi %r4, 65536
; locgrh %r2, %r3
; br %r14
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; llgfr %r5, %r3
; lghi %r4, 24
; algfr %r4, %r3
; jle 6 ; trap
; lg %r3, 0(%r2)
; agrk %r5, %r2, %r5
; aghik %r2, %r5, 16
; lghi %r5, 0
; clgr %r4, %r3
; locgrh %r2, %r5
; br %r14
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; llgfr %r5, %r3
; agr %r2, %r5
; aghi %r2, 16
; lghi %r4, 0
; clgfi %r5, 65512
; locgrh %r2, %r4
; br %r14

View File

@@ -7,7 +7,6 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast {
gv2 = load.i64 notrap aligned gv1
gv3 = vmctx
gv4 = load.i64 notrap aligned readonly gv3+504
heap0 = static gv4, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
sig0 = (i64 vmctx, i64, i32, i32, i32) -> i32 fast
sig1 = (i64 vmctx, i64, i32, i32, i32) -> i32 fast
sig2 = (i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast

View File

@@ -1,86 +0,0 @@
test compile precise-output
set enable_heap_access_spectre_mitigation=false
target x86_64
;; Calculate a heap address on a dynamically-allocated memory with Spectre
;; mitigations disabled. This is a 7-instruction sequence with loads, ignoring
;; intermediate `mov`s.
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000, 0
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %edi, %eax
; movq %rax, %r10
; addq %r10, $32768, %r10
; jnb ; ud2 heap_oob ;
; movq 8(%rsi), %r11
; cmpq %r11, %r10
; jbe label1; j label2
; block1:
; addq %rax, 0(%rsi), %rax
; addq %rax, $32768, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
; block2:
; ud2 heap_oob
;; For a static memory with no Spectre mitigations, we observe a smaller amount
;; of bounds checking: the offset check (`cmp + jbe + j`) and the offset
;; calculation (`add`)--4 instructions.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; cmpq $4096, %rax
; jbe label1; j label2
; block1:
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
; block2:
; ud2 heap_oob
;; For a static memory with no Spectre mitigations and the "right" size (4GB
;; memory, 2GB guard regions), Cranelift emits no bounds checking, simply
;; `add`--a single instruction.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -1,147 +0,0 @@
test compile precise-output
target x86_64
;; Calculate a heap address on a dynamically-allocated memory. Because the
;; Spectre mitigations are on by default (i.e.,
;; `set enable_heap_access_spectre_mitigation=true`), this code not only does
;; the dynamic bounds check (`add + jnb + cmp + jbe + j`) but also re-compares
;; the address to the upper bound (`add + xor + cmp + cmov`)--Cranelift's
;; Spectre mitigation. With loads and ignoring intermediate `mov`s, this amounts
;; to a 10-instruction sequence.
;;
;; And it uses quite a few registers; see this breakdown of what each register
;; generally contains:
;; - %rax holds the passed-in heap offset (argument #1) and ends up holding the
;; final address
;; - %rcx also holds the passed-in heap offset; checked for overflow when added
;; to the `0x8000` immediate
;; - %rsi holds the VM context pointer (argument #2)
;; - %rdi holds the heap limit (computed from argument #2)
;; - %rdx holds the null pointer
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000, 0
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %edi, %eax
; movq %rax, %rdi
; addq %rdi, $32768, %rdi
; jnb ; ud2 heap_oob ;
; movq 8(%rsi), %rcx
; addq %rax, 0(%rsi), %rax
; addq %rax, $32768, %rax
; xorq %rsi, %rsi, %rsi
; cmpq %rcx, %rdi
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; The heap address calculation for this statically-allocated memory checks that
;; the passed offset (%r11) is within bounds (`cmp + jbe + j`) and then includes
;; the same Spectre mitigation as above. This results in a 7-instruction
;; sequence (ignoring `mov`s).
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r9d
; movq %r9, %rax
; addq %rax, 0(%rdi), %rax
; xorq %r8, %r8, %r8
; cmpq $4096, %r9
; cmovnbeq %r8, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; When a static memory is the "right" size (4GB memory, 2GB guard regions), the
;; Spectre mitigation is not present. Cranelift relies on the memory permissions
;; and emits no bounds checking, simply `add`--a single instruction.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %esi
; movq %rsi, %r11
; addq %r11, $24, %r11
; jnb ; ud2 heap_oob ;
; movq %rdi, %rax
; addq %rax, %rsi, %rax
; addq %rax, $16, %rax
; xorq %rsi, %rsi, %rsi
; cmpq 0(%rdi), %r11
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r10d
; movq %rdi, %rax
; addq %rax, %r10, %rax
; addq %rax, $16, %rax
; xorq %r9, %r9, %r9
; cmpq $65512, %r10
; cmovnbeq %r9, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -1,33 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target aarch64
target x86_64
;; Test that when both (1) dynamic memories and (2) heap access spectre
;; mitigations are enabled, we deduplicate the bounds check between the two.
function %wasm_load(i64 vmctx, i32) -> i32 wasmtime_system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+88
gv2 = load.i64 notrap aligned gv0+80
heap0 = dynamic gv2, min 0, bound gv1, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
v3 = load.i32 little heap v2
return v3
}
; check: block0(v0: i64, v1: i32):
; nextln: v4 = uextend.i64 v1
; nextln: v5 = iconst.i64 4
; nextln: v6 = uadd_overflow_trap v4, v5, heap_oob ; v5 = 4
; nextln: v7 = load.i64 notrap aligned v0+88
; nextln: v8 = load.i64 notrap aligned v0+80
; nextln: v9 = iadd v8, v4
; nextln: v10 = iconst.i64 0
; nextln: v11 = icmp ugt v6, v7
; nextln: v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
; nextln: v2 -> v12
; nextln: v3 = load.i32 little heap v2
; nextln: return v3

View File

@@ -1,22 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target x86_64
;; The offset guard is large enough that we don't need explicit bounds checks.
function %test(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1000, offset_guard 0xffff_ffff, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
return v2
}
; check: block0(v0: i64, v1: i32):
; nextln: v3 = uextend.i64 v1
; nextln: v4 = load.i64 notrap aligned v0
; nextln: v5 = iadd v4, v3
; nextln: v2 -> v5
; nextln: return v2

View File

@@ -1,28 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target x86_64
;; The offset guard is not large enough to avoid explicit bounds checks.
;; Additionally, the explicit bounds check gets deduped with the Spectre
;; mitigation.
function %test(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1000, offset_guard 0xffff_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
return v2
}
; check: block0(v0: i64, v1: i32):
; nextln: v3 = uextend.i64 v1
; nextln: v4 = iconst.i64 4092
; nextln: v5 = load.i64 notrap aligned v0
; nextln: v6 = iadd v5, v3
; nextln: v7 = iconst.i64 0
; nextln: v8 = icmp ugt v3, v4 ; v4 = 4092
; nextln: v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
; nextln: v2 -> v9
; nextln: return v2

View File

@@ -9,14 +9,13 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
jump block1(v0, v1)
block1(v2: i32, v3: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
v6 = load.i32 notrap aligned readonly v5
v7 = iadd v2, v6
brz v2, block3(v2)
@@ -33,11 +32,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4
; nextln: v5 = global_value.i64 gv1
; nextln: v6 = load.i32 notrap aligned readonly v5
; nextln: jump block1(v0, v1)
; nextln:

View File

@@ -10,11 +10,10 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
jump block1(v0, v1)
block1(v2: i32, v3: i64):
@@ -34,11 +33,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4 ; v4 = 1
; nextln: v5 = global_value.i64 gv1
; nextln: jump block1(v0, v1)
; nextln:
; nextln: block1(v2: i32, v3: i64):

View File

@@ -10,14 +10,13 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
jump block1(v0, v1)
block1(v2: i32, v3: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
v6 = load.i32 aligned readonly v5
v7 = iadd v2, v6
brz v2, block3(v2)
@@ -34,11 +33,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4
; nextln: v5 = global_value.i64 gv1
; nextln: jump block1(v0, v1)
; nextln:
; nextln: block1(v2: i32, v3: i64):

View File

@@ -49,34 +49,3 @@ block0:
v2 = bxor v0, v1
return v2
}
; Declare static heaps.
function %sheap(i32, i64 vmctx) -> i64 {
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000
heap2 = static gv5, offset_guard 0x1000, bound 0x1_0000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, offset_guard 4096
block0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap1, v1, 0, 0
; check: v3 = heap_addr.i64 heap1, v1, 0, 0
return v3
}
; Declare dynamic heaps.
function %dheap(i32, i64 vmctx) -> i64 {
heap1 = dynamic gv5, min 0x1_0000, bound gv6, offset_guard 0x8000_0000
heap2 = dynamic gv5, bound gv6, offset_guard 0x1000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
gv6 = iadd_imm.i64 gv4, 72
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, offset_guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, offset_guard 4096
block0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap2, v1, 0, 0
; check: v3 = heap_addr.i64 heap2, v1, 0, 0
return v3
}

View File

@@ -71,19 +71,16 @@ block0(v0: f64):
;; Tests a fdemote+load combo which some backends may optimize
function %fdemote_load(i64 vmctx, i64, f64) -> f32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
function %fdemote_load(i64, f64) -> f32 {
ss0 = explicit_slot 16
block0(v0: i64, v1: i64, v2: f64):
v3 = heap_addr.i64 heap0, v1, 0, 8
block0(v1: i64, v2: f64):
v3 = stack_addr.i64 ss0
store.f64 v2, v3
v4 = load.f64 v3
v5 = fdemote.f32 v4
return v5
}
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
; run: %fdemote_load(0, 0x0.0) == 0x0.0
; run: %fdemote_load(1, 0x0.1) == 0x0.1
; run: %fdemote_load(2, 0x0.2) == 0x0.2

View File

@@ -79,20 +79,16 @@ block0(v0: f32):
;; Tests a fpromote+load combo which some backends may optimize
function %fpromote_load(i64 vmctx, i64, f32) -> f64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
function %fpromote_load(i64, f32) -> f64 {
ss0 = explicit_slot 16
block0(v0: i64, v1: i64, v2: f32):
v3 = heap_addr.i64 heap0, v1, 0, 4
block0(v1: i64, v2: f32):
v3 = stack_addr.i64 ss0
store.f32 v2, v3
v4 = load.f32 v3
v5 = fpromote.f64 v4
return v5
}
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
; run: %fpromote_load(0, 0x0.0) == 0x0.0
; run: %fpromote_load(1, 0x0.1) == 0x0.1
; run: %fpromote_load(2, 0x0.2) == 0x0.2

View File

@@ -1,24 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
; Store a value in the heap using `heap_addr` and load it using `global_value`
function %store_load(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 0
store.i32 v2, v3
v4 = global_value.i64 gv1
v5 = load.i32 v4
return v5
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %store_load(0, 1) == 1
; run: %store_load(0, -1) == -1

View File

@@ -1,223 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64(0, 1) == 1
; run: %static_heap_i64(0, -1) == -1
; run: %static_heap_i64(16, 1) == 1
; run: %static_heap_i64(16, -1) == -1
function %static_heap_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32(0, 1) == 1
; run: %static_heap_i32(0, -1) == -1
; run: %static_heap_i32(16, 1) == 1
; run: %static_heap_i32(16, -1) == -1
function %heap_no_min(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_no_min(0, 1) == 1
; run: %heap_no_min(0, -1) == -1
; run: %heap_no_min(16, 1) == 1
; run: %heap_no_min(16, -1) == -1
function %dynamic_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_i64(0, 1) == 1
; run: %dynamic_i64(0, -1) == -1
; run: %dynamic_i64(16, 1) == 1
; run: %dynamic_i64(16, -1) == -1
function %dynamic_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_i32(0, 1) == 1
; run: %dynamic_i32(0, -1) == -1
; run: %dynamic_i32(16, 1) == 1
; run: %dynamic_i32(16, -1) == -1
function %multi_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i64 0
v4 = iconst.i32 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 0, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 0, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %multi_load_store(1, 2) == 3
; run: %multi_load_store(4, 5) == 9
; Uses multiple heaps, but heap0 refers to the second heap, and heap1 refers to the first heap
; This is a regression test for the interpreter
function %out_of_order(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
heap1 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i32 0
v4 = iconst.i64 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 0, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 0, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %out_of_order(1, 2) == 3
; run: %out_of_order(4, 5) == 9
function %unaligned_access(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %unaligned_access(0, 1) == 1
; run: %unaligned_access(0, -1) == -1
; run: %unaligned_access(1, 1) == 1
; run: %unaligned_access(1, -1) == -1
; run: %unaligned_access(2, 1) == 1
; run: %unaligned_access(2, -1) == -1
; run: %unaligned_access(3, 1) == 1
; run: %unaligned_access(3, -1) == -1
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
function %iadd_imm(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
block0(v0: i64, v1: i32):
v2 = iconst.i64 0
v3 = heap_addr.i64 heap0, v2, 0, 4
store.i32 v1, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %iadd_imm(1) == 1
; run: %iadd_imm(-1) == -1
function %heap_limit_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0, bound 0x8, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x8, ptr=vmctx+0, bound=vmctx+8
; run: %heap_limit_i64(0, 1) == 1
; run: %heap_limit_i64(0, -1) == -1
; run: %heap_limit_i64(4, 1) == 1
; run: %heap_limit_i64(4, -1) == -1

View File

@@ -1,98 +0,0 @@
test run
target x86_64
target s390x
target aarch64
target riscv64
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
v4 = iconst.i64 42
store.i64 v4, v3
v5 = load.i64 v3
v6 = iadd.i64 v5, v2
store.i64 v6, v3
v7 = load.i64 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i32 42
store.i32 v4, v3
v5 = load.i32 v3
v6 = iadd.i32 v5, v2
store.i32 v6, v3
v7 = load.i32 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i8 42
store.i8 v4, v3
v5 = load.i8 v3
v6 = iadd.i8 v5, v2
store.i8 v6, v3
v7 = load.i8 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_isub_iand_ior_ixor_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
store.i64 v2, v3
v4 = load.i64 v3
v5 = iconst.i64 1
v6 = iadd.i64 v5, v4
store.i64 v6, v3
v7 = load.i64 v3
v8 = iconst.i64 2
v9 = load.i64 v3
v10 = isub.i64 v9, v8
store.i64 v10, v3
v11 = load.i64 v3
v12 = iconst.i64 0xf
v13 = band.i64 v12, v11
store.i64 v13, v3
v14 = iconst.i64 0x10
v15 = load.i64 v3
v16 = bor.i64 v15, v14
store.i64 v16, v3
v17 = load.i64 v3
v18 = iconst.i64 0xff
v19 = bxor.i64 v17, v18
store.i64 v19, v3
v20 = load.i64 v3
return v20
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 0x1234) == 236

View File

@@ -1,25 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %fvdemote_test(i64 vmctx, i64, f64x2) -> f32x4 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: f64x2):
v3 = heap_addr.i64 heap0, v1, 0, 16
store.f64x2 v2, v3
v4 = load.f64x2 v3
v5 = fvdemote v4
return v5
}
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
; run: %fvdemote_test(0, [0x0.0 0x0.0]) == [0x0.0 0x0.0 0x0.0 0x0.0]
; run: %fvdemote_test(1, [0x0.1 0x0.2]) == [0x0.1 0x0.2 0x0.0 0x0.0]
; run: %fvdemote_test(2, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
; run: %fvdemote_test(8, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
; run: %fvdemote_test(16, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]

View File

@@ -1,26 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %fvpromote_low_test(i64 vmctx, i64, f32x4) -> f64x2 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: f32x4):
v3 = heap_addr.i64 heap0, v1, 0, 16
store.f32x4 v2, v3
v4 = load.f32x4 v3
v5 = fvpromote_low v4
return v5
}
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
; run: %fvpromote_low_test(0, [0x0.0 0x0.0 0x0.0 0x0.0]) == [0x0.0 0x0.0]
; run: %fvpromote_low_test(1, [0x0.1 0x0.2 0x0.0 0x0.0]) == [0x0.1 0x0.2]
; run: %fvpromote_low_test(2, [0x2.1 0x1.2 0x0.0 0x0.0]) == [0x2.1 0x1.2]
; run: %fvpromote_low_test(5, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]
; run: %fvpromote_low_test(16, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]

View File

@@ -1,144 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
function %set_get_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
store.i64 v2, v3
v4 = load.i64 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i64(0, 1) == 1
; run: %set_get_i64(0, 10) == 10
; run: %set_get_i64(1, 1) == 1
; run: %set_get_i64(1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %set_get_i64(10, 1) == 1
; run: %set_get_i64(10, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %set_get_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i32):
;; Note here the offset +4
v3 = table_addr.i64 table0, v1, +4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i32(0, 1) == 1
; run: %set_get_i32(0, 10) == 10
; run: %set_get_i32(1, 1) == 1
; run: %set_get_i32(1, 0xC0FFEEEE) == 0xC0FFEEEE
; run: %set_get_i32(10, 1) == 1
; run: %set_get_i32(10, 0xC0FFEEEE) == 0xC0FFEEEE
function %set_get_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = table_addr.i64 table0, v1, +0
store.i8 v2, v3
v4 = load.i8 v3
return v4
}
; heap: static, size=2, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i8(0, 1) == 1
; run: %set_get_i8(0, 0xC0) == 0xC0
; run: %set_get_i8(1, 1) == 1
; run: %set_get_i8(1, 0xFF) == 0xFF
function %large_elm_size(i64 vmctx, i64, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 10240, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i8):
v4 = table_addr.i64 table0, v1, +0
v5 = iadd.i64 v4, v2
store.i8 v3, v5
v6 = load.i8 v5
return v6
}
; heap: static, size=0xC800, ptr=vmctx+0, bound=vmctx+8
; run: %large_elm_size(0, 0, 1) == 1
; run: %large_elm_size(1, 0, 0xC0) == 0xC0
; run: %large_elm_size(0, 1, 1) == 1
; run: %large_elm_size(1, 1, 0xFF) == 0xFF
; run: %large_elm_size(0, 127, 1) == 1
; run: %large_elm_size(1, 127, 0xFF) == 0xFF
; run: %large_elm_size(0, 10239, 1) == 1
; run: %large_elm_size(1, 10239, 0xBB) == 0xBB
; Tests writing a i64 which covers 8 table entries at once
; Loads the first byte and the last to confirm that the slots were written
function %multi_elm_write(i64 vmctx, i64, i64) -> i8, i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
v4 = table_addr.i64 table0, v1, +7
store.i64 v2, v3
v5 = load.i8 v3
v6 = load.i8 v4
return v5, v6
}
; heap: static, size=16, ptr=vmctx+0, bound=vmctx+8
;; When writing these test cases keep in mind that s390x is big endian!
;; We just make sure that the first and last byte are the same to deal with that.
; run: %multi_elm_write(0, 0xC0FFEEEE_FFEEEEC0) == [0xC0, 0xC0]
; run: %multi_elm_write(1, 0xAABBCCDD_EEFF00AA) == [0xAA, 0xAA]
function %heap_table(i64 vmctx, i64, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
table0 = dynamic gv1, element_size 9, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i64):
; v1 - heap offset (bytes)
; v2 - table offset (elements)
; v3 - store/load value
v4 = heap_addr.i64 heap0, v1, 0, 0
v5 = table_addr.i64 table0, v2, +2
; Store via heap, load via table
store.i64 v3, v4
v6 = load.i64 v5
return v6
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_table(2, 0, 0xAABBCCDD_EEFF0011) == 0xAABBCCDD_EEFF0011
; run: %heap_table(11, 1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %heap_table(20, 2, 1) == 1
; run: %heap_table(29, 3, -10) == -10

View File

@@ -6,11 +6,10 @@ target x86_64
function %eliminate_redundant_global_loads(i32, i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = heap_addr.i64 heap0, v0, 0, 1
v2 = global_value.i64 gv1
v3 = global_value.i64 gv1
v4 = iconst.i32 0
store.i32 notrap aligned v4, v2
@@ -18,7 +17,7 @@ block0(v0: i32, v1: i64):
return
}
; check: v2 = heap_addr.i64 heap0, v0, 0, 1
; check: v2 = global_value.i64 gv1
; check: v3 -> v2
; check: v4 = iconst.i32 0
; check: store notrap aligned v4, v2

View File

@@ -3,12 +3,8 @@ target aarch64
target x86_64
function u0:2(i64 , i64) {
gv1 = load.i64 notrap aligned gv0
heap0 = static gv1
block0(v0: i64, v1: i64):
v16 = iconst.i32 6
v17 = heap_addr.i64 heap0, v16, 0, 1
v18 = load.i32 v17
v18 = load.i32 v0
v19 = iconst.i32 4
v20 = icmp ne v18, v19
v21 = uextend.i32 v20

View File

@@ -1,45 +0,0 @@
test verifier
target x86_64
function %heap_base_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
heap0 = static gv1, offset_guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
block0(v0: i64):
return
}
function %invalid_base(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv1, bound gv0, offset_guard 0x1000, index_type i64 ; error: invalid base global value gv1
block0(v0: i64):
return
}
function %invalid_bound(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i64 ; error: invalid bound global value gv1
block0(v0: i64):
return
}
function %heap_bound_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i16 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16
block0(v0: i64):
return
}
function %heap_addr_index_type(i64 vmctx, i64) {
gv0 = vmctx
heap0 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
block0(v0: i64, v1: i64):
v2 = heap_addr.i64 heap0, v1, 0, 0; error: index type i64 differs from heap index type i32
return
}

View File

@@ -27,16 +27,19 @@
;; function u0:0(i32, i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;; heap0 = static gv1, min 0, bound 4096, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0021 v4 = heap_addr.i64 heap0, v0, 0, 4
;; @0021 v5 = load.i32 little heap v4
;; @0026 v6 = heap_addr.i64 heap0, v1, 0, 4
;; @0026 v7 = load.i32 little heap v6
;; @0029 v8 = iadd v5, v7
;; @002a jump block1(v8)
;; @0021 v4 = uextend.i64 v0
;; @0021 v5 = global_value.i64 gv1
;; @0021 v6 = iadd v5, v4
;; @0021 v7 = load.i32 little heap v6
;; @0026 v8 = uextend.i64 v1
;; @0026 v9 = global_value.i64 gv1
;; @0026 v10 = iadd v9, v8
;; @0026 v11 = load.i32 little heap v10
;; @0029 v12 = iadd v7, v11
;; @002a jump block1(v12)
;;
;; block1(v3: i32):
;; @002a return v3
;; }
;; }

View File

@@ -0,0 +1,22 @@
;;! target = "x86_64"
(module
(memory 1)
(func (export "f32.load") (param i32) (result f32)
local.get 0
f32.load))
;; function u0:0(i32, i64 vmctx) -> f32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.f32 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: f32):
;; @0031 return v2
;; }

View File

@@ -1,27 +0,0 @@
; Test basic code generation for f32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %f32_load(i32, i64 vmctx) -> f32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.f32 v2
return v3
}
function %f32_store(f32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: f32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for f32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f32.store") (param i32 f32)
local.get 0
local.get 1
f32.store))
;; function u0:0(i32, f32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: f32, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for f64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f64.load") (param i32) (result f64)
local.get 0
f64.load))
;; function u0:0(i32, i64 vmctx) -> f64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.f64 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: f64):
;; @0031 return v2
;; }

View File

@@ -1,27 +0,0 @@
; Test basic code generation for f64 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %f64_load(i32, i64 vmctx) -> f64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.f64 v2
return v3
}
function %f64_store(f64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: f64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for f64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f64.store") (param i32 f64)
local.get 0
local.get 1
f64.store))
;; function u0:0(i32, f64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: f64, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load") (param i32) (result i32)
local.get 0
i32.load))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.i32 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: i32):
;; @0031 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load16_s") (param i32) (result i32)
local.get 0
i32.load16_s))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = sload16.i32 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i32):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load16_u") (param i32) (result i32)
local.get 0
i32.load16_u))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = uload16.i32 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i32):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load8_s") (param i32) (result i32)
local.get 0
i32.load8_s))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = sload8.i32 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i32):
;; @0034 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load8_u") (param i32) (result i32)
local.get 0
i32.load8_u))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = uload8.i32 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i32):
;; @0034 return v2
;; }

View File

@@ -1,87 +0,0 @@
; Test basic code generation for i32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %i32_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.i32 v2
return v3
}
function %i32_store(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}
function %i32_load8_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload8.i32 v2
return v3
}
function %i32_load8_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload8.i32 v2
return v3
}
function %i32_store8(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore8 v0, v3
return
}
function %i32_load16_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload16.i32 v2
return v3
}
function %i32_load16_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload16.i32 v2
return v3
}
function %i32_store16(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore16 v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store") (param i32 i32)
local.get 0
local.get 1
i32.store))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store16") (param i32 i32)
local.get 0
local.get 1
i32.store16))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore16 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store8") (param i32 i32)
local.get 0
local.get 1
i32.store8))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 istore8 little heap v1, v5
;; @0035 jump block1
;;
;; block1:
;; @0035 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load") (param i32) (result i64)
local.get 0
i64.load))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.i64 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: i64):
;; @0031 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load16_s") (param i32) (result i64)
local.get 0
i64.load16_s))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = sload16.i64 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i64):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load16_u") (param i32) (result i64)
local.get 0
i64.load16_u))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = uload16.i64 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i64):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load8_s") (param i32) (result i64)
local.get 0
i64.load8_s))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = sload8.i64 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i64):
;; @0034 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load8_u") (param i32) (result i64)
local.get 0
i64.load8_u))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = uload8.i64 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i64):
;; @0034 return v2
;; }

View File

@@ -1,117 +0,0 @@
; Test basic code generation for i32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %i64_load(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.i64 v2
return v3
}
function %i64_store(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}
function %i64_load8_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload8.i64 v2
return v3
}
function %i64_load8_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload8.i64 v2
return v3
}
function %i64_store8(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore8 v0, v3
return
}
function %i64_load16_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload16.i64 v2
return v3
}
function %i64_load16_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload16.i64 v2
return v3
}
function %i64_store16(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore16 v0, v3
return
}
function %i64_load32_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload32.i64 v2
return v3
}
function %i64_load32_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload32.i64 v2
return v3
}
function %i64_store32(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore32 v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store") (param i32 i64)
local.get 0
local.get 1
i64.store))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store16") (param i32 i64)
local.get 0
local.get 1
i64.store16))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore16 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store32") (param i32 i64)
local.get 0
local.get 1
i64.store32))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore32 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store8") (param i32 i64)
local.get 0
local.get 1
i64.store8))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 istore8 little heap v1, v5
;; @0035 jump block1
;;
;; block1:
;; @0035 return
;; }

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 store little heap v1, v8
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = icmp ugt v3, v5
;; @0048 trapnz v6, heap_oob
;; @0048 v7 = global_value.i64 gv2
;; @0048 v8 = iadd v7, v3
;; @0048 v9 = load.i32 little heap v8
;; @004b jump block1(v9)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 store little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = load.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 store little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = load.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = icmp uge v3, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 istore8 little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = icmp uge v3, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = uload8.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 istore8 little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = uload8.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 istore8 little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = uload8.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v3, v5
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = iconst.i64 0
;; @0048 v9 = icmp ugt v3, v5
;; @0048 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0048 v11 = load.i32 little heap v10
;; @004b jump block1(v11)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = load.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 store little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = load.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v3
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp uge v3, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 istore8 little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v3
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp uge v3, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = uload8.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 istore8 little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = uload8.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 istore8 little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = uload8.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 store little heap v1, v8
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = icmp ugt v3, v5
;; @0048 trapnz v6, heap_oob
;; @0048 v7 = global_value.i64 gv2
;; @0048 v8 = iadd v7, v3
;; @0048 v9 = load.i32 little heap v8
;; @004b jump block1(v9)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 store little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = load.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 store little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = load.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = icmp uge v3, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 istore8 little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = icmp uge v3, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = uload8.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 istore8 little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = uload8.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 istore8 little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = uload8.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v3, v5
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = iconst.i64 0
;; @0048 v9 = icmp ugt v3, v5
;; @0048 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0048 v11 = load.i32 little heap v10
;; @004b jump block1(v11)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = load.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 store little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = load.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v3
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp uge v3, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 istore8 little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v3
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp uge v3, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = uload8.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

Some files were not shown because too many files have changed in this diff Show More