Add host calls

This commit is contained in:
Jef
2019-03-18 19:41:36 +01:00
parent dc42a58277
commit 4e48dad9b6
5 changed files with 902 additions and 180 deletions

View File

@@ -165,32 +165,32 @@ Now obviously I'm not advocating for replacing FireFox's optimising compiler wit
## Specification compliance
It's hard to judge, since each test in the spec testsuite covers a wide range of features (to check their interactions), but currently 50 out of 76 of the spec suite tests pass when run in Wasmtime with Lightbeam as a backend. Here's the full test output:
It's hard to judge, since each test in the spec testsuite covers a wide range of features (to check their interactions), but currently 61 out of 76 of the spec suite tests pass when run in Wasmtime with Lightbeam as a backend. Here's the full test output:
```
running 76 tests
test misc_testsuite::stack_overflow ... ok
test spec_testsuite::binary ... ok
test misc_testsuite::misc_traps ... ok
test spec_testsuite::align ... FAILED
test spec_testsuite::br_if ... FAILED
test spec_testsuite::address ... FAILED
test spec_testsuite::break_drop ... ok
test spec_testsuite::binary ... ok
test spec_testsuite::align ... ok
test spec_testsuite::block ... ok
test spec_testsuite::br ... ok
test spec_testsuite::break_drop ... ok
test spec_testsuite::br_if ... ok
test spec_testsuite::address ... ok
test spec_testsuite::comments ... ok
test spec_testsuite::const_ ... ok
test spec_testsuite::call ... ok
test spec_testsuite::conversions ... FAILED
test spec_testsuite::custom ... ok
test spec_testsuite::custom_section ... ok
test spec_testsuite::data ... ok
test spec_testsuite::call ... ok
test spec_testsuite::endianness ... FAILED
test spec_testsuite::br_table ... FAILED
test spec_testsuite::elem ... FAILED
test spec_testsuite::data ... ok
test spec_testsuite::exports ... ok
test spec_testsuite::f32_bitwise ... ok
test spec_testsuite::elem ... ok
test spec_testsuite::endianness ... ok
test spec_testsuite::call_indirect ... ok
test spec_testsuite::f32_bitwise ... ok
test spec_testsuite::f64_bitwise ... ok
test spec_testsuite::f32_cmp ... ok
test spec_testsuite::f32 ... ok
@@ -199,38 +199,38 @@ test spec_testsuite::f64 ... ok
test spec_testsuite::float_memory ... ok
test spec_testsuite::f64_cmp ... ok
test spec_testsuite::forward ... ok
test spec_testsuite::float_literals ... ok
test spec_testsuite::func_ptrs ... FAILED
test spec_testsuite::get_local ... FAILED
test spec_testsuite::float_misc ... ok
test spec_testsuite::func_ptrs ... FAILED
test spec_testsuite::float_literals ... ok
test spec_testsuite::get_local ... FAILED
test spec_testsuite::float_exprs ... FAILED
test spec_testsuite::globals ... ok
test spec_testsuite::func ... ok
test spec_testsuite::globals ... ok
test spec_testsuite::imports ... FAILED
test spec_testsuite::inline_module ... ok
test spec_testsuite::if_ ... FAILED
test spec_testsuite::i32 ... ok
test spec_testsuite::i64 ... ok
test spec_testsuite::if_ ... ok
test spec_testsuite::labels ... ok
test spec_testsuite::linking ... FAILED
test spec_testsuite::int_literals ... ok
test spec_testsuite::loop_ ... FAILED
test spec_testsuite::memory_grow ... FAILED
test spec_testsuite::memory_redundancy ... ok
test spec_testsuite::memory_trap ... FAILED
test spec_testsuite::linking ... FAILED
test spec_testsuite::int_exprs ... ok
test spec_testsuite::nop ... FAILED
test spec_testsuite::resizing ... FAILED
test spec_testsuite::left_to_right ... ok
test spec_testsuite::loop_ ... ok
test spec_testsuite::memory_redundancy ... ok
test spec_testsuite::memory_grow ... FAILED
test spec_testsuite::memory_trap ... FAILED
test spec_testsuite::memory ... ok
test spec_testsuite::resizing ... ok
test spec_testsuite::left_to_right ... ok
test spec_testsuite::return_minimal ... ok
test spec_testsuite::set_local ... FAILED
test spec_testsuite::skip_stack_guard_page ... FAILED
test spec_testsuite::select ... FAILED
test spec_testsuite::nop ... ok
test spec_testsuite::return_ ... ok
test spec_testsuite::select ... ok
test spec_testsuite::stack ... ok
test spec_testsuite::start ... ok
test spec_testsuite::set_local ... FAILED
test spec_testsuite::store_retval ... ok
test spec_testsuite::start ... FAILED
test spec_testsuite::tee_local ... FAILED
test spec_testsuite::skip_stack_guard_page ... FAILED
test spec_testsuite::token ... ok
test spec_testsuite::switch ... ok
test spec_testsuite::type_ ... ok
@@ -239,14 +239,14 @@ test spec_testsuite::traps ... FAILED
test spec_testsuite::unreached_invalid ... ok
test spec_testsuite::unwind ... FAILED
test spec_testsuite::utf8_custom_section_id ... ok
test spec_testsuite::return_ ... ok
test spec_testsuite::utf8_import_field ... ok
test spec_testsuite::utf8_import_module ... ok
test spec_testsuite::utf8_invalid_encoding ... ok
test spec_testsuite::tee_local ... FAILED
test spec_testsuite::unreachable ... ok
test spec_testsuite::names ... FAILED
test result: FAILED. 50 passed; 26 failed; 0 ignored; 0 measured; 0 filtered out
test result: FAILED. 61 passed; 15 failed; 0 ignored; 0 measured; 0 filtered out
```
## Getting involved

View File

@@ -268,12 +268,12 @@ macro_rules! asm_println {
; call rax
; jmp >pop_rest
; with_adjusted_stack_ptr:
; with_adjusted_stack_ptr:
; push 1
; call rax
; pop r11
; pop_rest:
; pop_rest:
; pop r11
; pop r10
; pop r9
@@ -689,8 +689,11 @@ struct Labels {
neg_const_f64: Option<Pending<Label>>,
abs_const_f32: Option<Pending<Label>>,
abs_const_f64: Option<Pending<Label>>,
truncate_f32_const_u64: Option<Pending<Label>>,
truncate_f64_const_u64: Option<Pending<Label>>,
copysign_consts_f32: Option<Pending<(Label, Label)>>,
copysign_consts_f64: Option<Pending<(Label, Label)>>,
from_u64_consts_f64: Option<Pending<(Label, Label)>>,
}
pub struct Context<'a, M> {
@@ -936,7 +939,7 @@ macro_rules! conversion {
let offset = self.adjusted_offset(offset);
let temp = self.block_state.regs.take(Type::for_::<$out_typ>());
dynasm!(self.asm
; $instr $out_reg_ty(temp.$out_reg_fn().unwrap()), DWORD [rsp + offset]
; $instr $out_reg_ty(temp.$out_reg_fn().unwrap()), [rsp + offset]
);
ValueLocation::Reg(temp)
}
@@ -1302,13 +1305,13 @@ macro_rules! minmax_float {
; je >equal
; $instr Rx(left.rx().unwrap()), Rx(right.rx().unwrap())
; jmp >ret
; equal:
; equal:
; jnp >equal_but_not_parity
; $addinstr Rx(left.rx().unwrap()), Rx(right.rx().unwrap())
; jmp >ret
; equal_but_not_parity:
; equal_but_not_parity:
; $combineinstr Rx(left.rx().unwrap()), Rx(right.rx().unwrap())
; ret:
; ret:
);
self.push(ValueLocation::Reg(left));
@@ -1640,9 +1643,40 @@ macro_rules! load {
dst: GPR,
(offset, runtime_offset): (i32, Result<i32, GPR>)
) {
let vmctx_mem_ptr_offset = ctx.module_context
let memory_def_index = ctx.module_context
.defined_memory_index(0).unwrap();
let vmctx_mem_ptr_offset = ctx.module_context
.vmctx_vmmemory_definition_base(memory_def_index) as i32;
if ctx.module_context.emit_memory_bounds_check() {
let vmctx_mem_len_offset = ctx.module_context
.vmctx_vmmemory_definition_current_length(memory_def_index) as i32;
let trap_label = ctx.trap_label();
let addr_reg = ctx.block_state.regs.take(I64);
match runtime_offset {
Ok(imm) => {
dynasm!(ctx.asm
; mov Rq(addr_reg.rq().unwrap()), QWORD imm as i64 + offset as i64
);
}
Err(gpr) => {
let offset_reg = ctx.block_state.regs.take(I64);
dynasm!(ctx.asm
; mov Rd(offset_reg.rq().unwrap()), offset
; mov Rq(addr_reg.rq().unwrap()), Rq(gpr.rq().unwrap())
; add Rq(addr_reg.rq().unwrap()), Rq(offset_reg.rq().unwrap())
);
ctx.block_state.regs.release(offset_reg);
}
}
dynasm!(ctx.asm
; cmp [Rq(VMCTX) + vmctx_mem_len_offset], Rq(addr_reg.rq().unwrap())
; jna =>trap_label.0
);
ctx.block_state.regs.release(addr_reg);
}
.vmctx_vmmemory_definition_base(0) as i32;
let mem_ptr_reg = ctx.block_state.regs.take(I64);
dynasm!(ctx.asm
; mov Rq(mem_ptr_reg.rq().unwrap()), [Rq(VMCTX) + vmctx_mem_ptr_offset]
@@ -1731,9 +1765,40 @@ macro_rules! store {
src: GPR,
(offset, runtime_offset): (i32, Result<i32, GPR>)
) {
let vmctx_mem_ptr_offset = ctx.module_context
let memory_def_index = ctx.module_context
.defined_memory_index(0).unwrap();
let vmctx_mem_ptr_offset = ctx.module_context
.vmctx_vmmemory_definition_base(memory_def_index) as i32;
if ctx.module_context.emit_memory_bounds_check() {
let vmctx_mem_len_offset = ctx.module_context
.vmctx_vmmemory_definition_current_length(memory_def_index) as i32;
let trap_label = ctx.trap_label();
let addr_reg = ctx.block_state.regs.take(I64);
match runtime_offset {
Ok(imm) => {
dynasm!(ctx.asm
; mov Rq(addr_reg.rq().unwrap()), QWORD imm as i64 + offset as i64
);
}
Err(gpr) => {
let offset_reg = ctx.block_state.regs.take(I64);
dynasm!(ctx.asm
; mov Rd(offset_reg.rq().unwrap()), offset
; mov Rq(addr_reg.rq().unwrap()), Rq(gpr.rq().unwrap())
; add Rq(addr_reg.rq().unwrap()), Rq(offset_reg.rq().unwrap())
);
ctx.block_state.regs.release(offset_reg);
}
}
dynasm!(ctx.asm
; cmp [Rq(VMCTX) + vmctx_mem_len_offset], Rq(addr_reg.rq().unwrap())
; jna =>trap_label.0
);
ctx.block_state.regs.release(addr_reg);
}
.vmctx_vmmemory_definition_base(0) as i32;
let mem_ptr_reg = ctx.block_state.regs.take(GPRType::Rq);
dynasm!(ctx.asm
; mov Rq(mem_ptr_reg.rq().unwrap()), [Rq(VMCTX) + vmctx_mem_ptr_offset]
@@ -2115,7 +2180,7 @@ impl<'module, M: ModuleContext> Context<'module, M> {
]
; add Rq(selector_reg.rq().unwrap()), Rq(tmp.rq().unwrap())
; jmp Rq(selector_reg.rq().unwrap())
; start_label:
; start_label:
);
self.block_state.regs.release(tmp);
@@ -2523,16 +2588,24 @@ impl<'module, M: ModuleContext> Context<'module, M> {
}
/// Puts this value into a register so that it can be efficiently read
// TODO: We should allow choosing which reg type we want to allocate here (Rx/Rq)
fn into_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> GPR {
let out = self.to_reg(ty, val);
self.free_value(val);
out
}
/// Clones this value into a register so that it can be efficiently read
fn to_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> GPR {
let ty = ty.into();
match val {
ValueLocation::Reg(r) if ty.map(|t| t == r.type_()).unwrap_or(true) => r,
ValueLocation::Reg(r) if ty.map(|t| t == r.type_()).unwrap_or(true) => {
self.block_state.regs.mark_used(r);
r
}
val => {
let scratch = self.block_state.regs.take(ty.unwrap_or(GPRType::Rq));
self.copy_value(&val, &mut ValueLocation::Reg(scratch));
self.free_value(val);
scratch
}
@@ -2542,6 +2615,14 @@ impl<'module, M: ModuleContext> Context<'module, M> {
/// Puts this value into a temporary register so that operations
/// on that register don't write to a local.
fn into_temp_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> GPR {
let out = self.to_temp_reg(ty, val);
self.free_value(val);
out
}
/// Clones this value into a temporary register so that operations
/// on that register don't write to a local.
fn to_temp_reg(&mut self, ty: impl Into<Option<GPRType>>, val: ValueLocation) -> GPR {
// If we have `None` as the type then it always matches (`.unwrap_or(true)`)
match val {
ValueLocation::Reg(r) => {
@@ -2549,17 +2630,17 @@ impl<'module, M: ModuleContext> Context<'module, M> {
let type_matches = ty.map(|t| t == r.type_()).unwrap_or(true);
if self.block_state.regs.num_usages(r) <= 1 && type_matches {
self.block_state.regs.mark_used(r);
r
} else {
let scratch = self.block_state.regs.take(ty.unwrap_or(GPRType::Rq));
self.copy_value(&val, &mut ValueLocation::Reg(scratch));
self.free_value(val);
scratch
}
}
val => self.into_reg(ty, val),
val => self.to_reg(ty, val),
}
}
@@ -2825,7 +2906,31 @@ impl<'module, M: ModuleContext> Context<'module, M> {
unop!(i32_popcnt, popcnt, Rd, u32, u32::count_ones);
conversion!(
i32_truncate_f32,
f64_from_f32,
cvtss2sd,
Rx,
rx,
Rx,
rx,
f32,
f64,
as_f32,
|a: wasmparser::Ieee32| wasmparser::Ieee64((f32::from_bits(a.bits()) as f64).to_bits())
);
conversion!(
f32_from_f64,
cvtsd2ss,
Rx,
rx,
Rx,
rx,
f64,
f32,
as_f64,
|a: wasmparser::Ieee64| wasmparser::Ieee32((f64::from_bits(a.bits()) as f32).to_bits())
);
conversion!(
i32_truncate_f32_s,
cvttss2si,
Rx,
rx,
@@ -2834,7 +2939,43 @@ impl<'module, M: ModuleContext> Context<'module, M> {
f32,
i32,
as_f32,
|a: wasmparser::Ieee32| a.bits()
|a: wasmparser::Ieee32| f32::from_bits(a.bits()) as i32
);
conversion!(
i32_truncate_f32_u,
cvttss2si,
Rx,
rx,
Rq,
rq,
f32,
i32,
as_f32,
|a: wasmparser::Ieee32| f32::from_bits(a.bits()) as i32
);
conversion!(
i32_truncate_f64_s,
cvttsd2si,
Rx,
rx,
Rd,
rq,
f64,
i32,
as_f64,
|a: wasmparser::Ieee64| f64::from_bits(a.bits()) as i32
);
conversion!(
i32_truncate_f64_u,
cvttsd2si,
Rx,
rx,
Rq,
rq,
f64,
i32,
as_f64,
|a: wasmparser::Ieee64| f64::from_bits(a.bits()) as i32
);
conversion!(
f32_convert_from_i32_s,
@@ -2848,6 +2989,137 @@ impl<'module, M: ModuleContext> Context<'module, M> {
as_i32,
|a| wasmparser::Ieee32((a as f32).to_bits())
);
conversion!(
f64_convert_from_i32_s,
cvtsi2sd,
Rd,
rq,
Rx,
rx,
i32,
f64,
as_i32,
|a| wasmparser::Ieee64((a as f64).to_bits())
);
conversion!(
f32_convert_from_i64_s,
cvtsi2ss,
Rq,
rq,
Rx,
rx,
i64,
f32,
as_i32,
|a| wasmparser::Ieee32((a as f32).to_bits())
);
conversion!(
f64_convert_from_i64_s,
cvtsi2sd,
Rq,
rq,
Rx,
rx,
i64,
f64,
as_i32,
|a| wasmparser::Ieee64((a as f64).to_bits())
);
conversion!(
i64_truncate_f32_s,
cvttss2si,
Rx,
rx,
Rq,
rq,
f32,
i64,
as_f32,
|a: wasmparser::Ieee32| f32::from_bits(a.bits()) as i64
);
conversion!(
i64_truncate_f64_s,
cvttsd2si,
Rx,
rx,
Rq,
rq,
f64,
i64,
as_f64,
|a: wasmparser::Ieee64| f64::from_bits(a.bits()) as i64
);
pub fn i64_truncate_f32_u(&mut self) {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
(f32::from_bits(imm.as_f32().unwrap().bits()) as u64).into(),
),
_ => {
let reg = self.into_reg(F32, val);
val = ValueLocation::Reg(reg);
let temp = self.block_state.regs.take(I64);
let u64_trunc_f32_const = self.truncate_f32_const_u64_label();
dynasm!(self.asm
; comiss Rx(reg.rx().unwrap()), [=>u64_trunc_f32_const.0]
; jnb >large
; cvttss2si Rq(temp.rq().unwrap()), Rx(reg.rx().unwrap())
; jmp >cont
; large:
; subss Rx(reg.rx().unwrap()), [=>u64_trunc_f32_const.0]
; cvttss2si Rq(temp.rq().unwrap()), Rx(reg.rx().unwrap())
; btc Rq(temp.rq().unwrap()), 0b00111111
; cont:
);
ValueLocation::Reg(temp)
}
};
self.free_value(val);
self.push(out_val);
}
pub fn i64_truncate_f64_u(&mut self) {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
(f64::from_bits(imm.as_f64().unwrap().bits()) as u64).into(),
),
_ => {
let reg = self.into_reg(F64, val);
val = ValueLocation::Reg(reg);
let temp = self.block_state.regs.take(I64);
let u64_trunc_f64_const = self.truncate_f64_const_u64_label();
dynasm!(self.asm
; comisd Rx(reg.rx().unwrap()), [=>u64_trunc_f64_const.0]
; jnb >large
; cvttsd2si Rq(temp.rq().unwrap()), Rx(reg.rx().unwrap())
; jmp >cont
; large:
; subsd Rx(reg.rx().unwrap()), [=>u64_trunc_f64_const.0]
; cvttsd2si Rq(temp.rq().unwrap()), Rx(reg.rx().unwrap())
; btc Rq(temp.rq().unwrap()), 0b00111111
; cont:
);
ValueLocation::Reg(temp)
}
};
self.free_value(val);
self.push(out_val);
}
pub fn f32_convert_from_i32_u(&mut self) {
let mut val = self.pop();
@@ -2863,7 +3135,7 @@ impl<'module, M: ModuleContext> Context<'module, M> {
let temp = self.block_state.regs.take(F32);
dynasm!(self.asm
; mov Rq(reg.rq().unwrap()), Rq(reg.rq().unwrap())
; mov Rd(reg.rq().unwrap()), Rd(reg.rq().unwrap())
; cvtsi2ss Rx(temp.rx().unwrap()), Rq(reg.rq().unwrap())
);
@@ -2876,6 +3148,106 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.push(out_val);
}
pub fn f64_convert_from_i32_u(&mut self) {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
wasmparser::Ieee64((imm.as_i32().unwrap() as u32 as f64).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I32, val);
val = ValueLocation::Reg(reg);
let temp = self.block_state.regs.take(F64);
dynasm!(self.asm
; mov Rd(reg.rq().unwrap()), Rd(reg.rq().unwrap())
; cvtsi2sd Rx(temp.rx().unwrap()), Rq(reg.rq().unwrap())
);
ValueLocation::Reg(temp)
}
};
self.free_value(val);
self.push(out_val);
}
pub fn f32_convert_from_i64_u(&mut self) {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
wasmparser::Ieee32((imm.as_i32().unwrap() as u64 as f32).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I64, val);
val = ValueLocation::Reg(reg);
let out = self.block_state.regs.take(F32);
let temp = self.block_state.regs.take(I64);
dynasm!(self.asm
; test Rq(reg.rq().unwrap()), Rq(reg.rq().unwrap())
; js >negative
; cvtsi2ss Rx(out.rx().unwrap()), Rq(reg.rq().unwrap())
; negative:
; mov Rq(temp.rq().unwrap()), Rq(reg.rq().unwrap())
; shr Rq(temp.rq().unwrap()), 1
; and Rq(reg.rq().unwrap()), 1
; or Rq(reg.rq().unwrap()), Rq(temp.rq().unwrap())
; cvtsi2ss Rx(out.rx().unwrap()), Rq(reg.rq().unwrap())
; addss Rx(out.rx().unwrap()), Rx(out.rx().unwrap())
);
self.free_value(ValueLocation::Reg(temp));
ValueLocation::Reg(out)
}
};
self.free_value(val);
self.push(out_val);
}
pub fn f64_convert_from_i64_u(&mut self) {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
wasmparser::Ieee64((imm.as_i64().unwrap() as u64 as f64).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I64, val);
val = ValueLocation::Reg(reg);
let out = self.block_state.regs.take(F64);
let temp = self.block_state.regs.take(F64);
let (conv_const_0, conv_const_1) = self.from_u64_consts_f64_labels();
dynasm!(self.asm
; movq Rx(temp.rx().unwrap()), rdi
; punpckldq Rx(temp.rx().unwrap()), [=>conv_const_0.0]
; subpd Rx(temp.rx().unwrap()), [=>conv_const_1.0]
; pshufd Rx(out.rx().unwrap()), Rx(temp.rx().unwrap()), 78
; addpd Rx(out.rx().unwrap()), Rx(temp.rx().unwrap())
);
self.free_value(ValueLocation::Reg(temp));
ValueLocation::Reg(out)
}
};
self.free_value(val);
self.push(out_val);
}
unop!(i64_popcnt, popcnt, Rq, u64, |a: u64| a.count_ones() as u64);
// TODO: Use `lea` when the LHS operand isn't a temporary but both of the operands
@@ -3764,8 +4136,6 @@ impl<'module, M: ModuleContext> Context<'module, M> {
let callee = self.pop();
let callee = self.into_temp_reg(I32, callee);
let temp0 = self.block_state.regs.take(I64);
let temp1 = self.block_state.regs.take(I64);
for &loc in &locs {
if let CCLoc::Reg(r) = loc {
@@ -3773,15 +4143,40 @@ impl<'module, M: ModuleContext> Context<'module, M> {
}
}
self.block_state.depth.reserve(1);
dynasm!(self.asm
; push Rq(VMCTX)
);
let depth = self.block_state.depth.clone();
self.pass_outgoing_args(&locs);
let fail = self.trap_label().0;
let table_index = 0;
let reg_offset = self
.module_context
.defined_table_index(table_index)
.map(|index| (None, self.module_context.vmctx_vmtable_definition(index) as i32));
let vmctx = GPR::Rq(VMCTX);
let (reg, offset) = reg_offset.unwrap_or_else(|| {
let reg = self.block_state.regs.take(I64);
dynasm!(self.asm
; mov Rq(reg.rq().unwrap()), [
Rq(VMCTX) + self.module_context.vmctx_vmtable_import_from(table_index) as i32
]
);
(Some(reg), 0)
});
let temp0 = self.block_state.regs.take(I64);
dynasm!(self.asm
; cmp Rd(callee.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
.vmctx_vmtable_definition_current_elements(0) as i32
Rq(reg.unwrap_or(vmctx).rq().unwrap()) +
offset +
self.module_context.vmtable_definition_current_elements() as i32
]
; jae =>fail
; imul
@@ -3789,10 +4184,19 @@ impl<'module, M: ModuleContext> Context<'module, M> {
Rd(callee.rq().unwrap()),
self.module_context.size_of_vmcaller_checked_anyfunc() as i32
; mov Rq(temp0.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
.vmctx_vmtable_definition_base(0) as i32
Rq(reg.unwrap_or(vmctx).rq().unwrap()) +
offset +
self.module_context.vmtable_definition_base() as i32
]
);
if let Some(reg) = reg {
self.block_state.regs.release(reg);
}
let temp1 = self.block_state.regs.take(I64);
dynasm!(self.asm
; mov Rd(temp1.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
@@ -3804,6 +4208,11 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.module_context.vmcaller_checked_anyfunc_type_index() as i32
], Rd(temp1.rq().unwrap())
; jne =>fail
; mov Rq(VMCTX), [
Rq(temp0.rq().unwrap()) +
Rq(callee.rq().unwrap()) +
self.module_context.vmcaller_checked_anyfunc_vmctx() as i32
]
; call QWORD [
Rq(temp0.rq().unwrap()) +
Rq(callee.rq().unwrap()) +
@@ -3820,6 +4229,12 @@ impl<'module, M: ModuleContext> Context<'module, M> {
}
self.push_function_returns(return_types);
self.set_stack_depth(depth);
dynasm!(self.asm
; pop Rq(VMCTX)
);
self.block_state.depth.free(1);
}
pub fn swap(&mut self, depth: u32) {
@@ -3835,6 +4250,7 @@ impl<'module, M: ModuleContext> Context<'module, M> {
return_types: impl IntoIterator<Item = SignlessType>,
) {
let locs = arg_locs(arg_types);
self.pass_outgoing_args(&locs);
let label = &self.func_starts[index as usize].1;
@@ -3849,6 +4265,48 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.push_function_returns(return_types);
}
/// Call a function with the given index
pub fn call_direct_imported(
&mut self,
index: u32,
arg_types: impl IntoIterator<Item = SignlessType>,
return_types: impl IntoIterator<Item = SignlessType>,
) {
let locs = arg_locs(arg_types);
self.block_state.depth.reserve(1);
dynasm!(self.asm
; push Rq(VMCTX)
);
let depth = self.block_state.depth.clone();
self.pass_outgoing_args(&locs);
let callee = self.block_state.regs.take(I64);
dynasm!(self.asm
; mov Rq(callee.rq().unwrap()), [
Rq(VMCTX) + self.module_context.vmctx_vmfunction_import_body(index) as i32
]
; mov Rq(VMCTX), [
Rq(VMCTX) + self.module_context.vmctx_vmfunction_import_vmctx(index) as i32
]
; call Rq(callee.rq().unwrap())
);
for i in locs {
self.free_value(i.into());
}
self.push_function_returns(return_types);
self.set_stack_depth(depth);
dynasm!(self.asm
; pop Rq(VMCTX)
);
self.block_state.depth.free(1);
}
// TODO: Reserve space to store RBX, RBP, and R12..R15 so we can use them
// as scratch registers
/// Writes the function prologue and stores the arguments as locals
@@ -3945,6 +4403,35 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.labels.abs_const_f64 = Some(Pending::defined(l));
}
if let Some(l) = self
.labels
.truncate_f32_const_u64
.as_ref()
.and_then(Pending::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
; .dword 0x5F000000
);
self.labels.truncate_f32_const_u64 = Some(Pending::defined(l));
}
if let Some(l) = self
.labels
.truncate_f64_const_u64
.as_ref()
.and_then(Pending::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
; .dword 0
; .dword 0x43E00000
);
self.labels.truncate_f64_const_u64 = Some(Pending::defined(l));
}
if let Some((sign_mask, rest_mask)) = self
.labels
.copysign_consts_f32
@@ -3982,6 +4469,27 @@ impl<'module, M: ModuleContext> Context<'module, M> {
);
self.labels.copysign_consts_f64 = Some(Pending::defined((sign_mask, rest_mask)));
}
if let Some((conv_const_0, conv_const_1)) = self
.labels
.from_u64_consts_f64
.as_ref()
.and_then(Pending::as_undefined)
{
self.align(16);
self.define_label(conv_const_0);
dynasm!(self.asm
; .dword 0x43300000
; .dword 0x43300000
);
self.align(16);
self.define_label(conv_const_1);
dynasm!(self.asm
; .qword 4841369599423283200
; .qword 4985484787499139072
);
self.labels.from_u64_consts_f64 = Some(Pending::defined((conv_const_0, conv_const_1)));
}
}
pub fn trap(&mut self) {
@@ -4063,6 +4571,28 @@ impl<'module, M: ModuleContext> Context<'module, M> {
label
}
#[must_use]
fn truncate_f32_const_u64_label(&mut self) -> Label {
if let Some(l) = &self.labels.truncate_f32_const_u64 {
return l.label;
}
let label = self.create_label();
self.labels.truncate_f32_const_u64 = Some(label.into());
label
}
#[must_use]
fn truncate_f64_const_u64_label(&mut self) -> Label {
if let Some(l) = &self.labels.truncate_f64_const_u64 {
return l.label;
}
let label = self.create_label();
self.labels.truncate_f64_const_u64 = Some(label.into());
label
}
#[must_use]
fn copysign_consts_f32_labels(&mut self) -> (Label, Label) {
if let Some(l) = &self.labels.copysign_consts_f32 {
@@ -4088,5 +4618,18 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.labels.copysign_consts_f64 = Some(labels.into());
labels
}
#[must_use]
fn from_u64_consts_f64_labels(&mut self) -> (Label, Label) {
if let Some(l) = &self.labels.from_u64_consts_f64 {
return l.label;
}
let sign_mask = self.create_label();
let rest_mask = self.create_label();
let labels = (sign_mask, rest_mask);
self.labels.from_u64_consts_f64 = Some(labels.into());
labels
}
}

View File

@@ -42,8 +42,6 @@ where
let ty = session.module_context.func_type(func_idx);
if DISASSEMBLE {
let mut microwasm = vec![];
let microwasm_conv = MicrowasmConv::new(
session.module_context,
ty.params().iter().map(SigType::to_microwasm_type),
@@ -51,11 +49,11 @@ where
body,
);
for ops in microwasm_conv {
microwasm.extend(ops?);
}
println!("{}", crate::microwasm::dis(func_idx, &microwasm));
crate::microwasm::dis(
std::io::stdout(),
func_idx,
microwasm_conv.flat_map(|ops| ops.unwrap()),
);
}
let microwasm_conv = MicrowasmConv::new(
@@ -86,10 +84,21 @@ where
Operator<L>: std::fmt::Display,
{
fn drop_elements<T>(stack: &mut Vec<T>, depths: std::ops::RangeInclusive<u32>) {
let real_range =
stack.len() - 1 - *depths.end() as usize..=stack.len() - 1 - *depths.start() as usize;
let _ = (|| {
let start = stack
.len()
.checked_sub(1)?
.checked_sub(*depths.end() as usize)?;
let end = stack
.len()
.checked_sub(1)?
.checked_sub(*depths.start() as usize)?;
let real_range = start..=end;
stack.drain(real_range);
stack.drain(real_range);
Some(())
})();
}
let func_type = session.module_context.defined_func_type(func_idx);
@@ -131,7 +140,12 @@ where
if let Some(Operator::Label(label)) = body.peek() {
let block = blocks
.get_mut(&BrTarget::Label(label.clone()))
.expect("Block definition should be before label definition");
.unwrap_or_else(|| {
panic!(
"Block definition should be before label definition: {}",
Operator::Label(label.clone())
)
});
block.is_next = true;
}
@@ -142,12 +156,14 @@ where
Operator::Label(label) => {
use std::collections::hash_map::Entry;
if let Entry::Occupied(mut entry) = blocks.entry(BrTarget::Label(label)) {
if let Entry::Occupied(mut entry) = blocks.entry(BrTarget::Label(label.clone())) {
let has_backwards_callers = {
let block = entry.get_mut();
// TODO: Is it possible with arbitrary CFGs that a block will have _only_ backwards callers?
// Certainly for Microwasm generated from Wasm that is currently impossible.
// TODO: Maybe we want to restrict Microwasm so that at least one of its callers
// must be before the label. In an ideal world the restriction would be that
// blocks without callers are illegal, but that's not reasonably possible for
// Microwasm generated from Wasm.
if block.actual_num_callers == 0 {
loop {
let done = match body.peek() {
@@ -159,7 +175,30 @@ where
break;
}
body.next();
let skipped = body.next();
// We still want to honour block definitions even in unreachable code
if let Some(Operator::Block {
label,
has_backwards_callers,
params,
num_callers,
}) = skipped
{
let asm_label = ctx.create_label();
blocks.insert(
BrTarget::Label(label),
Block {
label: BrTarget::Label(asm_label),
params: params.len() as _,
calling_convention: None,
is_next: false,
has_backwards_callers,
actual_num_callers: 0,
num_callers,
},
);
}
}
continue;
@@ -188,7 +227,10 @@ where
entry.remove_entry();
}
} else {
panic!("Label defined before being declared");
panic!(
"Label defined before being declared: {}",
Operator::Label(label)
);
}
}
Operator::Block {
@@ -503,9 +545,51 @@ where
Operator::F64ReinterpretFromI64 => {}
Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: SignfulInt(_, Size::_32),
output_ty: sint::I32,
} => {
ctx.i32_truncate_f32();
ctx.i32_truncate_f32_s();
}
Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::U32,
} => {
ctx.i32_truncate_f32_u();
}
Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I32,
} => {
ctx.i32_truncate_f64_s();
}
Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U32,
} => {
ctx.i32_truncate_f64_u();
}
Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::I64,
} => {
ctx.i64_truncate_f32_s();
}
Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::U64,
} => {
ctx.i64_truncate_f32_u();
}
Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I64,
} => {
ctx.i64_truncate_f64_s();
}
Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U64,
} => {
ctx.i64_truncate_f64_u();
}
Operator::Extend {
sign: Signedness::Unsigned,
@@ -517,10 +601,36 @@ where
input_ty: sint::I32,
output_ty: Size::_32,
} => ctx.f32_convert_from_i32_s(),
Operator::FConvertFromI {
input_ty: sint::I32,
output_ty: Size::_64,
} => ctx.f64_convert_from_i32_s(),
Operator::FConvertFromI {
input_ty: sint::I64,
output_ty: Size::_32,
} => ctx.f32_convert_from_i64_s(),
Operator::FConvertFromI {
input_ty: sint::I64,
output_ty: Size::_64,
} => ctx.f64_convert_from_i64_s(),
Operator::FConvertFromI {
input_ty: sint::U32,
output_ty: Size::_32,
} => ctx.f32_convert_from_i32_u(),
Operator::FConvertFromI {
input_ty: sint::U32,
output_ty: Size::_64,
} => ctx.f64_convert_from_i32_u(),
Operator::FConvertFromI {
input_ty: sint::U64,
output_ty: Size::_32,
} => ctx.f32_convert_from_i64_u(),
Operator::FConvertFromI {
input_ty: sint::U64,
output_ty: Size::_64,
} => ctx.f64_convert_from_i64_u(),
Operator::F64PromoteFromF32 => ctx.f64_from_f32(),
Operator::F32DemoteFromF64 => ctx.f32_from_f64(),
Operator::Load8 {
ty: sint::U32,
memarg,
@@ -585,17 +695,22 @@ where
ctx.memory_grow();
}
Operator::Call { function_index } => {
let function_index = module_context
.defined_func_index(function_index)
.expect("We don't support host calls yet");
let callee_ty = module_context.func_type(function_index);
// TODO: this implementation assumes that this function is locally defined.
ctx.call_direct(
function_index,
callee_ty.params().iter().map(|t| t.to_microwasm_type()),
callee_ty.returns().iter().map(|t| t.to_microwasm_type()),
);
if let Some(defined_func_index) = module_context.defined_func_index(function_index)
{
ctx.call_direct(
defined_func_index,
callee_ty.params().iter().map(|t| t.to_microwasm_type()),
callee_ty.returns().iter().map(|t| t.to_microwasm_type()),
);
} else {
ctx.call_direct_imported(
function_index,
callee_ty.params().iter().map(|t| t.to_microwasm_type()),
callee_ty.returns().iter().map(|t| t.to_microwasm_type()),
);
}
}
Operator::CallIndirect {
type_index,

View File

@@ -1,7 +1,7 @@
use crate::module::{ModuleContext, SigType, Signature};
use smallvec::SmallVec;
use std::{
convert::TryFrom,
convert::{TryFrom, TryInto},
fmt,
iter::{self, FromIterator},
ops::RangeInclusive,
@@ -11,7 +11,11 @@ use wasmparser::{
FunctionBody, Ieee32, Ieee64, MemoryImmediate, Operator as WasmOperator, OperatorsReader,
};
pub fn dis<L>(function_name: impl fmt::Display, microwasm: &[Operator<L>]) -> String
pub fn dis<L>(
mut out: impl std::io::Write,
function_name: impl fmt::Display,
microwasm: impl IntoIterator<Item = Operator<L>>,
) -> std::io::Result<()>
where
BrTarget<L>: fmt::Display,
L: Clone,
@@ -20,29 +24,20 @@ where
const DISASSEMBLE_BLOCK_DEFS: bool = true;
let mut asm = format!(".fn_{}:\n", function_name);
let mut out = String::new();
writeln!(out, ".fn_{}:", function_name)?;
let p = " ";
for op in microwasm {
if op.is_label() {
writeln!(asm, "{}", op).unwrap();
writeln!(out, "{}", op)?;
} else if op.is_block() {
writeln!(out, "{}", op).unwrap();
writeln!(out, "{}", op)?;
} else {
writeln!(asm, "{}{}", p, op).unwrap();
writeln!(out, "{}{}", p, op)?;
}
}
let out = if DISASSEMBLE_BLOCK_DEFS {
writeln!(out).unwrap();
writeln!(out, "{}", asm).unwrap();
out
} else {
asm
};
out
Ok(())
}
/// A constant value embedded in the instructions
@@ -772,16 +767,16 @@ where
Operator::Shr(ty) => write!(f, "{}.shr", ty),
Operator::Rotl(ty) => write!(f, "{}.rotl", SignfulInt(Signedness::Unsigned, *ty)),
Operator::Rotr(ty) => write!(f, "{}.rotr", SignfulInt(Signedness::Unsigned, *ty)),
Operator::Abs(ty) => write!(f, "{}.abs", Type::<Size>::Float(*ty)),
Operator::Neg(ty) => write!(f, "{}.neg", Type::<Size>::Float(*ty)),
Operator::Ceil(ty) => write!(f, "{}.ceil", Type::<Size>::Float(*ty)),
Operator::Floor(ty) => write!(f, "{}.floor", Type::<Size>::Float(*ty)),
Operator::Trunc(ty) => write!(f, "{}.trunc", Type::<Size>::Float(*ty)),
Operator::Nearest(ty) => write!(f, "{}.nearest", Type::<Size>::Float(*ty)),
Operator::Sqrt(ty) => write!(f, "{}.sqrt", Type::<Size>::Float(*ty)),
Operator::Min(ty) => write!(f, "{}.min", Type::<Size>::Float(*ty)),
Operator::Max(ty) => write!(f, "{}.max", Type::<Size>::Float(*ty)),
Operator::Copysign(ty) => write!(f, "{}.copysign", Type::<Size>::Float(*ty)),
Operator::Abs(ty) => write!(f, "{}.abs", Type::<Int>::Float(*ty)),
Operator::Neg(ty) => write!(f, "{}.neg", Type::<Int>::Float(*ty)),
Operator::Ceil(ty) => write!(f, "{}.ceil", Type::<Int>::Float(*ty)),
Operator::Floor(ty) => write!(f, "{}.floor", Type::<Int>::Float(*ty)),
Operator::Trunc(ty) => write!(f, "{}.trunc", Type::<Int>::Float(*ty)),
Operator::Nearest(ty) => write!(f, "{}.nearest", Type::<Int>::Float(*ty)),
Operator::Sqrt(ty) => write!(f, "{}.sqrt", Type::<Int>::Float(*ty)),
Operator::Min(ty) => write!(f, "{}.min", Type::<Int>::Float(*ty)),
Operator::Max(ty) => write!(f, "{}.max", Type::<Int>::Float(*ty)),
Operator::Copysign(ty) => write!(f, "{}.copysign", Type::<Int>::Float(*ty)),
Operator::I32WrapFromI64 => write!(f, "i32.wrap_from.i64"),
Operator::F32DemoteFromF64 => write!(f, "f32.demote_from.f64"),
Operator::F64PromoteFromF32 => write!(f, "f64.promote_from.f32"),
@@ -798,12 +793,39 @@ where
} => write!(
f,
"{}.convert_from.{}",
Type::Float::<Int>(*output_ty),
input_ty,
Type::Float::<Int>(*output_ty)
),
Operator::GetGlobal(index) => write!(f, "global.get {}", index),
Operator::SetGlobal(index) => write!(f, "global.set {}", index),
_ => unimplemented!(),
Operator::ITruncFromF {
input_ty,
output_ty,
} => write!(
f,
"{}.truncate_from.{}",
output_ty,
Type::<Int>::Float(*input_ty)
),
Operator::ISatTruncFromF {
input_ty,
output_ty,
} => write!(
f,
"{}.saturating_truncate_from.{}",
output_ty,
Type::<Int>::Float(*input_ty)
),
Operator::Extend { sign } => write!(
f,
"{}.extend_from.{}",
SignfulInt(*sign, Size::_64),
SignfulInt(*sign, Size::_32)
),
Operator::MemoryInit { .. } => unimplemented!(),
Operator::TableInit { .. } => unimplemented!(),
Operator::DataDrop { .. } => unimplemented!(),
Operator::ElemDrop { .. } => unimplemented!(),
}
}
}
@@ -916,7 +938,6 @@ enum ControlFrameKind {
Loop,
/// True-subblock of if expression.
If {
params: Vec<SignlessType>,
has_else: bool,
},
}
@@ -925,7 +946,7 @@ enum ControlFrameKind {
struct ControlFrame {
id: u32,
arguments: u32,
returns: u32,
returns: Vec<SignlessType>,
kind: ControlFrameKind,
}
@@ -953,13 +974,6 @@ impl ControlFrame {
}
}
}
fn params(&self) -> Option<&[SignlessType]> {
match &self.kind {
ControlFrameKind::If { params, .. } => Some(params),
_ => None,
}
}
}
pub struct MicrowasmConv<'a, 'b, M> {
@@ -1076,7 +1090,7 @@ where
out.control_frames.push(ControlFrame {
id,
arguments: num_locals,
returns: returns.into_iter().count() as _,
returns: returns.into_iter().collect(),
kind: ControlFrameKind::Function,
});
@@ -1371,8 +1385,8 @@ where
self.control_frames.first().unwrap()
}
fn local_depth(&self, idx: u32) -> u32 {
self.stack.len() as u32 - 1 - idx
fn local_depth(&self, idx: u32) -> i32 {
self.stack.len() as i32 - 1 - idx as i32
}
fn apply_op(&mut self, sig: OpSig) {
@@ -1417,13 +1431,6 @@ where
out.extend(Type::from_wasm(ty));
out
}
fn drop(&mut self, range: RangeInclusive<u32>) {
let internal_range = self.stack.len() - 1 - *range.end() as usize
..=self.stack.len() - 1 - *range.start() as usize;
for _ in self.stack.drain(internal_range) {}
}
}
impl<'a, 'b, M: ModuleContext> Iterator for MicrowasmConv<'a, 'b, M>
@@ -1436,7 +1443,7 @@ where
macro_rules! to_drop {
($block:expr) => {{
let block = &$block;
let first_non_local_depth = block.returns;
let first_non_local_depth = block.returns.len() as u32;
(|| {
let last_non_local_depth = (self.stack.len() as u32)
@@ -1484,7 +1491,7 @@ where
if depth == 0 {
let block = self.control_frames.last_mut().expect("Failed");
self.stack = block.params().unwrap().to_vec();
self.stack.truncate(block.arguments as _);
if let ControlFrameKind::If { has_else, .. } = &mut block.kind {
*has_else = true;
@@ -1497,23 +1504,20 @@ where
if depth == 0 {
let block = self.control_frames.pop().expect("Failed");
if let Some(to_drop) = to_drop!(block) {
self.drop(to_drop.clone());
}
if self.control_frames.is_empty() {
self.is_done = true;
return None;
}
self.stack.truncate(block.arguments as _);
self.stack.extend(block.returns);
let end_label = (block.id, NameTag::End);
if let ControlFrameKind::If {
has_else: false, ..
} = block.kind
{
self.stack = block.params().unwrap().to_vec();
break smallvec![
Operator::Label((block.id, NameTag::Else)),
Operator::Br {
@@ -1553,11 +1557,7 @@ where
self.control_frames.push(ControlFrame {
id,
arguments: self.stack.len() as u32,
returns: if ty == wasmparser::Type::EmptyBlockType {
0
} else {
1
},
returns: Vec::from_iter(Type::from_wasm(ty)),
kind: ControlFrameKind::Block {
needs_end_label: false,
},
@@ -1572,11 +1572,7 @@ where
self.control_frames.push(ControlFrame {
id,
arguments: self.stack.len() as u32,
returns: if ty == wasmparser::Type::EmptyBlockType {
0
} else {
1
},
returns: Vec::from_iter(Type::from_wasm(ty)),
kind: ControlFrameKind::Loop,
});
let label = (id, NameTag::Header);
@@ -1591,17 +1587,11 @@ where
}
WasmOperator::If { ty } => {
let id = self.next_id();
let params = self.block_params();
self.control_frames.push(ControlFrame {
id,
arguments: self.stack.len() as u32,
returns: if ty == wasmparser::Type::EmptyBlockType {
0
} else {
1
},
returns: Vec::from_iter(Type::from_wasm(ty)),
kind: ControlFrameKind::If {
params,
has_else: false,
},
});
@@ -1630,7 +1620,7 @@ where
*has_else = true;
}
self.stack = block.params().unwrap().to_vec();
self.stack.truncate(block.arguments as _);
let label = (block.id, NameTag::Else);
@@ -1649,9 +1639,8 @@ where
let to_drop = to_drop!(block);
if let Some(to_drop) = &to_drop {
self.drop(to_drop.clone());
}
self.stack.truncate(block.arguments as _);
self.stack.extend(block.returns.iter().cloned());
if let ControlFrameKind::If {
has_else: false, ..
@@ -1660,14 +1649,12 @@ where
let else_ = (block.id, NameTag::Else);
let end = (block.id, NameTag::End);
self.stack = block.params().unwrap().to_vec();
to_drop
.map(Operator::Drop)
.into_iter()
.chain::<SmallVec<[_; 4]>>(smallvec![
Operator::Br {
target: BrTarget::Label(else_),
target: BrTarget::Label(end),
},
Operator::Label(else_),
Operator::Br {
@@ -1792,21 +1779,27 @@ where
WasmOperator::Select => smallvec![Operator::Select],
WasmOperator::GetLocal { local_index } => {
// TODO: `- 1` because we apply the stack difference _before_ this point
// `- 1` because we apply the stack difference _before_ this point
let depth = self.local_depth(local_index) - 1;
smallvec![Operator::Pick(depth)]
smallvec![Operator::Pick(
depth.try_into().expect("Local out of range")
)]
}
WasmOperator::SetLocal { local_index } => {
// TODO: `+ 1` because we apply the stack difference _before_ this point
// `+ 1` because we apply the stack difference _before_ this point
let depth = self.local_depth(local_index) + 1;
smallvec![Operator::Swap(depth), Operator::Drop(0..=0)]
smallvec![
Operator::Swap(depth.try_into().expect("Local out of range")),
Operator::Drop(0..=0)
]
}
WasmOperator::TeeLocal { local_index } => {
let depth = self.local_depth(local_index);
// `+ 1` because we `pick` before `swap`
let depth = self.local_depth(local_index) + 1;
smallvec![
Operator::Swap(depth),
Operator::Pick(0),
Operator::Swap(depth.try_into().expect("Local out of range")),
Operator::Drop(0..=0),
Operator::Pick(depth - 1),
]
}
WasmOperator::GetGlobal { global_index } => {
@@ -2075,18 +2068,36 @@ where
input_ty: Size::_32,
output_ty: sint::U32
}],
WasmOperator::I32TruncSF64 => unimplemented!("{:?}", op),
WasmOperator::I32TruncUF64 => unimplemented!("{:?}", op),
WasmOperator::I32TruncSF64 => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I32
}],
WasmOperator::I32TruncUF64 => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U32
}],
WasmOperator::I64ExtendSI32 => smallvec![Operator::Extend {
sign: Signedness::Signed
}],
WasmOperator::I64ExtendUI32 => smallvec![Operator::Extend {
sign: Signedness::Unsigned
}],
WasmOperator::I64TruncSF32 => unimplemented!("{:?}", op),
WasmOperator::I64TruncUF32 => unimplemented!("{:?}", op),
WasmOperator::I64TruncSF64 => unimplemented!("{:?}", op),
WasmOperator::I64TruncUF64 => unimplemented!("{:?}", op),
WasmOperator::I64TruncSF32 => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::I64,
}],
WasmOperator::I64TruncUF32 => smallvec![Operator::ITruncFromF {
input_ty: Size::_32,
output_ty: sint::U64,
}],
WasmOperator::I64TruncSF64 => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::I64,
}],
WasmOperator::I64TruncUF64 => smallvec![Operator::ITruncFromF {
input_ty: Size::_64,
output_ty: sint::U64,
}],
WasmOperator::F32ConvertSI32 => smallvec![Operator::FConvertFromI {
input_ty: sint::I32,
output_ty: Size::_32
@@ -2119,8 +2130,8 @@ where
input_ty: sint::U64,
output_ty: Size::_64
}],
WasmOperator::F32DemoteF64 => unimplemented!("{:?}", op),
WasmOperator::F64PromoteF32 => unimplemented!("{:?}", op),
WasmOperator::F32DemoteF64 => smallvec![Operator::F32DemoteFromF64],
WasmOperator::F64PromoteF32 => smallvec![Operator::F64PromoteFromF32],
WasmOperator::I32ReinterpretF32 => smallvec![Operator::I32ReinterpretFromF32],
WasmOperator::I64ReinterpretF64 => smallvec![Operator::I64ReinterpretFromF64],
WasmOperator::F32ReinterpretI32 => smallvec![Operator::F32ReinterpretFromI32],

View File

@@ -164,7 +164,11 @@ impl TranslatedModule {
// This will panic if `hashes.len() > 64`
out[..hashes.len()].copy_from_slice(&hashes[..]);
Some(Box::new(GVmCtx { table, mem, hashes: out }) as Box<VmCtx>)
Some(Box::new(GVmCtx {
table,
mem,
hashes: out,
}) as Box<VmCtx>)
} else {
None
};
@@ -418,13 +422,23 @@ pub trait ModuleContext {
fn vmctx_vmglobal_definition(&self, index: u32) -> u32;
fn vmctx_vmmemory_definition_base(&self, defined_memory_index: u32) -> u32;
fn vmctx_vmmemory_definition_current_length(&self, defined_memory_index: u32) -> u32;
fn vmctx_vmtable_import_from(&self, table_index: u32) -> u32;
fn vmctx_vmtable_definition(&self, defined_table_index: u32) -> u32;
fn vmctx_vmtable_definition_base(&self, defined_table_index: u32) -> u32;
fn vmctx_vmtable_definition_current_elements(&self, defined_table_index: u32) -> u32;
fn vmctx_vmfunction_import_body(&self, func_index: u32) -> u32;
fn vmctx_vmfunction_import_vmctx(&self, func_index: u32) -> u32;
fn vmtable_definition_base(&self) -> u8;
fn vmtable_definition_current_elements(&self) -> u8;
fn vmctx_vmshared_signature_id(&self, signature_idx: u32) -> u32;
fn vmcaller_checked_anyfunc_type_index(&self) -> u8;
fn vmcaller_checked_anyfunc_func_ptr(&self) -> u8;
fn vmcaller_checked_anyfunc_vmctx(&self) -> u8;
fn size_of_vmcaller_checked_anyfunc(&self) -> u8;
fn defined_table_index(&self, table_index: u32) -> Option<u32>;
fn defined_memory_index(&self, index: u32) -> Option<u32>;
fn defined_global_index(&self, global_index: u32) -> Option<u32>;
fn global_type(&self, global_index: u32) -> &Self::GlobalType;
@@ -443,6 +457,10 @@ pub trait ModuleContext {
// TODO: This assumes that there are no imported functions.
self.signature(self.func_type_index(func_idx))
}
fn emit_memory_bounds_check(&self) -> bool {
true
}
}
impl ModuleContext for SimpleContext {
@@ -478,6 +496,25 @@ impl ModuleContext for SimpleContext {
unimplemented!()
}
fn defined_memory_index(&self, index: u32) -> Option<u32> {
unimplemented!()
}
fn defined_table_index(&self, index: u32) -> Option<u32> {
Some(index)
}
fn vmctx_vmfunction_import_body(&self, func_index: u32) -> u32 {
unimplemented!()
}
fn vmctx_vmfunction_import_vmctx(&self, func_index: u32) -> u32 {
unimplemented!()
}
fn vmctx_vmtable_import_from(&self, table_index: u32) -> u32 {
unimplemented!()
}
fn vmctx_vmmemory_definition_base(&self, defined_memory_index: u32) -> u32 {
VmCtx::offset_of_memory_ptr()
}
@@ -486,6 +523,10 @@ impl ModuleContext for SimpleContext {
VmCtx::offset_of_memory_len()
}
fn vmctx_vmtable_definition(&self, defined_table_index: u32) -> u32 {
VmCtx::offset_of_funcs_ptr() as _
}
fn vmctx_vmtable_definition_base(&self, defined_table_index: u32) -> u32 {
VmCtx::offset_of_funcs_ptr() as _
}
@@ -494,6 +535,18 @@ impl ModuleContext for SimpleContext {
VmCtx::offset_of_funcs_len() as _
}
fn vmtable_definition_base(&self) -> u8 {
unimplemented!()
}
fn vmtable_definition_current_elements(&self) -> u8 {
unimplemented!()
}
fn vmcaller_checked_anyfunc_vmctx(&self) -> u8 {
unimplemented!()
}
fn vmcaller_checked_anyfunc_type_index(&self) -> u8 {
RuntimeFunc::offset_of_sig_hash() as _
}