Implement memory.size/memory.grow, globals, call_indirect

This commit is contained in:
Jef
2019-03-15 11:51:11 +01:00
parent a9ab1a1467
commit 1eb419cdda
7 changed files with 651 additions and 177 deletions

View File

@@ -6,6 +6,7 @@ use self::registers::*;
use crate::error::Error;
use crate::microwasm::Value;
use crate::module::{ModuleContext, RuntimeFunc};
use cranelift_codegen::binemit;
use dynasmrt::x64::Assembler;
use dynasmrt::{AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer};
use std::{
@@ -14,6 +15,35 @@ use std::{
ops::RangeInclusive,
};
// TODO: Get rid of this! It's a total hack.
mod magic {
use cranelift_codegen::ir;
/// Compute an `ir::ExternalName` for the `memory.grow` libcall for
/// 32-bit locally-defined memories.
pub fn get_memory32_grow_name() -> ir::ExternalName {
ir::ExternalName::user(1, 0)
}
/// Compute an `ir::ExternalName` for the `memory.grow` libcall for
/// 32-bit imported memories.
pub fn get_imported_memory32_grow_name() -> ir::ExternalName {
ir::ExternalName::user(1, 1)
}
/// Compute an `ir::ExternalName` for the `memory.size` libcall for
/// 32-bit locally-defined memories.
pub fn get_memory32_size_name() -> ir::ExternalName {
ir::ExternalName::user(1, 2)
}
/// Compute an `ir::ExternalName` for the `memory.size` libcall for
/// 32-bit imported memories.
pub fn get_imported_memory32_size_name() -> ir::ExternalName {
ir::ExternalName::user(1, 3)
}
}
/// Size of a pointer on the target in bytes.
const WORD_SIZE: u32 = 8;
@@ -467,6 +497,7 @@ pub struct FunctionEnd {
pub struct CodeGenSession<'a, M> {
assembler: Assembler,
pub module_context: &'a M,
labels: Labels,
func_starts: Vec<(Option<AssemblyOffset>, DynamicLabel)>,
}
@@ -479,12 +510,17 @@ impl<'a, M> CodeGenSession<'a, M> {
CodeGenSession {
assembler,
labels: Default::default(),
func_starts,
module_context,
}
}
pub fn new_context(&mut self, func_idx: u32) -> Context<'_, M> {
pub fn new_context<'this>(
&'this mut self,
func_idx: u32,
reloc_sink: &'this mut dyn binemit::RelocSink,
) -> Context<'this, M> {
{
let func_start = &mut self.func_starts[func_idx as usize];
@@ -496,8 +532,10 @@ impl<'a, M> CodeGenSession<'a, M> {
Context {
asm: &mut self.assembler,
current_function: func_idx,
reloc_sink: reloc_sink,
func_starts: &self.func_starts,
labels: Default::default(),
labels: &mut self.labels,
block_state: Default::default(),
module_context: self.module_context,
}
@@ -627,22 +665,64 @@ pub enum MemoryAccessMode {
Unchecked,
}
struct PendingLabel {
label: Label,
is_defined: bool,
}
impl PendingLabel {
fn undefined(label: Label) -> Self {
PendingLabel {
label,
is_defined: false,
}
}
fn defined(label: Label) -> Self {
PendingLabel {
label,
is_defined: true,
}
}
fn as_undefined(&self) -> Option<Label> {
if !self.is_defined {
Some(self.label)
} else {
None
}
}
}
impl From<Label> for PendingLabel {
fn from(label: Label) -> Self {
PendingLabel {
label,
is_defined: false,
}
}
}
// TODO: We can share one trap/constant for all functions by reusing this struct
#[derive(Default)]
struct Labels {
trap: Option<Label>,
ret: Option<Label>,
neg_const_f32: Option<Label>,
neg_const_f64: Option<Label>,
trap: Option<PendingLabel>,
ret: Option<PendingLabel>,
neg_const_f32: Option<PendingLabel>,
neg_const_f64: Option<PendingLabel>,
abs_const_f32: Option<PendingLabel>,
abs_const_f64: Option<PendingLabel>,
}
pub struct Context<'a, M> {
asm: &'a mut Assembler,
reloc_sink: &'a mut dyn binemit::RelocSink,
module_context: &'a M,
current_function: u32,
func_starts: &'a Vec<(Option<AssemblyOffset>, DynamicLabel)>,
/// Each push and pop on the value stack increments or decrements this value by 1 respectively.
pub block_state: BlockState,
labels: Labels,
labels: &'a mut Labels,
}
/// Label in code.
@@ -1487,7 +1567,9 @@ macro_rules! load {
dst: GPR,
(offset, runtime_offset): (i32, Result<i32, GPR>)
) {
let vmctx_mem_ptr_offset = ctx.module_context.offset_of_memory_ptr() as i32;
let vmctx_mem_ptr_offset = ctx.module_context
.vmctx_vmmemory_definition_base(0) as i32;
let mem_ptr_reg = ctx.block_state.regs.take(I64);
dynasm!(ctx.asm
; mov Rq(mem_ptr_reg.rq().unwrap()), [Rq(VMCTX) + vmctx_mem_ptr_offset]
@@ -1576,7 +1658,9 @@ macro_rules! store {
src: GPR,
(offset, runtime_offset): (i32, Result<i32, GPR>)
) {
let vmctx_mem_ptr_offset = ctx.module_context.offset_of_memory_ptr() as i32;
let vmctx_mem_ptr_offset = ctx.module_context
.vmctx_vmmemory_definition_base(0) as i32;
let mem_ptr_reg = ctx.block_state.regs.take(GPRType::Rq);
dynasm!(ctx.asm
; mov Rq(mem_ptr_reg.rq().unwrap()), [Rq(VMCTX) + vmctx_mem_ptr_offset]
@@ -2048,6 +2132,41 @@ impl<'module, M: ModuleContext> Context<'module, M> {
}
}
pub fn get_global(&mut self, global_idx: u32) {
let offset = self.module_context.vmctx_vmglobal_definition(
self.module_context
.defined_global_index(global_idx)
.expect("TODO: Support imported globals"),
);
let out = self.block_state.regs.take(GPRType::Rq);
// We always use `Rq` (even for floats) since the globals are not necessarily aligned to 128 bits
dynasm!(self.asm
; mov Rq(out.rq().unwrap()), [Rq(VMCTX) + offset as i32]
);
self.push(ValueLocation::Reg(out));
}
pub fn set_global(&mut self, global_idx: u32) {
let val = self.pop();
let offset = self.module_context.vmctx_vmglobal_definition(
self.module_context
.defined_global_index(global_idx)
.expect("TODO: Support imported globals"),
);
let val = self.into_reg(GPRType::Rq, val);
// We always use `Rq` (even for floats) since the globals are not necessarily aligned to 128 bits
dynasm!(self.asm
; mov [Rq(VMCTX) + offset as i32], Rq(val.rq().unwrap())
);
self.block_state.regs.release(val);
}
fn immediate_to_reg(&mut self, reg: GPR, val: Value) {
if val.as_bytes() == 0 {
self.zero_reg(reg);
@@ -2397,6 +2516,48 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.push(out);
}
pub fn f32_abs(&mut self) {
let val = self.pop();
let out = if let Some(i) = val.imm_f32() {
ValueLocation::Immediate(
wasmparser::Ieee32(f32::from_bits(i.bits()).abs().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, val);
let const_label = self.abs_const_f32_label();
dynasm!(self.asm
; andps Rx(reg.rx().unwrap()), [=>const_label.0]
);
ValueLocation::Reg(reg)
};
self.push(out);
}
pub fn f64_abs(&mut self) {
let val = self.pop();
let out = if let Some(i) = val.imm_f64() {
ValueLocation::Immediate(
wasmparser::Ieee64(f64::from_bits(i.bits()).abs().to_bits()).into(),
)
} else {
let reg = self.into_temp_reg(GPRType::Rx, val);
let const_label = self.abs_const_f64_label();
dynasm!(self.asm
; andps Rx(reg.rx().unwrap()), [=>const_label.0]
);
ValueLocation::Reg(reg)
};
self.push(out);
}
unop!(i32_clz, lzcnt, Rd, u32, u32::leading_zeros);
unop!(i64_clz, lzcnt, Rq, u64, |a: u64| a.leading_zeros() as u64);
unop!(i32_ctz, tzcnt, Rd, u32, u32::trailing_zeros);
@@ -2507,10 +2668,9 @@ impl<'module, M: ModuleContext> Context<'module, M> {
let mut val = self.pop();
let out_val = match val {
ValueLocation::Immediate(imm) =>
ValueLocation::Immediate(
wasmparser::Ieee32((imm.as_i32().unwrap() as u32 as f32).to_bits()).into()
),
ValueLocation::Immediate(imm) => ValueLocation::Immediate(
wasmparser::Ieee32((imm.as_i32().unwrap() as u32 as f32).to_bits()).into(),
),
_ => {
let reg = self.into_reg(I32, val);
val = ValueLocation::Reg(reg);
@@ -3040,18 +3200,63 @@ impl<'module, M: ModuleContext> Context<'module, M> {
self.push(ValueLocation::Immediate(imm));
}
pub fn memory_size(&mut self) {
let tmp = self.block_state.regs.take(I32);
// 16 is log2(64KiB as bytes)
dynasm!(self.asm
; mov Rd(tmp.rq().unwrap()), [
rdi + self.module_context.offset_of_memory_len() as i32
]
; shr Rd(tmp.rq().unwrap()), 16
fn relocated_function_call(
&mut self,
name: &cranelift_codegen::ir::ExternalName,
args: impl IntoIterator<Item = SignlessType>,
rets: impl IntoIterator<Item = SignlessType>,
) {
self.pass_outgoing_args(&arg_locs(args));
// 2 bytes for the 64-bit `mov` opcode + register ident, the rest is the immediate
self.reloc_sink.reloc_external(
(self.asm.offset().0
- self.func_starts[self.current_function as usize]
.0
.unwrap()
.0) as u32
+ 2,
binemit::Reloc::Abs8,
name,
0,
);
let temp = self.block_state.regs.take(I64);
dynasm!(self.asm
; mov Rq(temp.rq().unwrap()), QWORD 0xdeadbeefdeadbeefu64 as i64
; call Rq(temp.rq().unwrap())
);
self.block_state.regs.release(temp);
self.push_function_returns(rets);
}
self.push(ValueLocation::Reg(tmp));
// TODO: Other memory indices
pub fn memory_size(&mut self) {
self.push(ValueLocation::Immediate(0u32.into()));
self.relocated_function_call(
&magic::get_memory32_size_name(),
iter::once(I32),
iter::once(I32),
);
// let tmp = self.block_state.regs.take(I32);
//
// // 16 is log2(64KiB as bytes)
// dynasm!(self.asm
// ; mov Rd(tmp.rq().unwrap()), [
// rdi + self.module_context.vmctx_vmmemory_definition_current_length(0) as i32
// ]
// ; shr Rd(tmp.rq().unwrap()), 16
// );
//
// self.push(ValueLocation::Reg(tmp));
}
// TODO: Other memory indices
pub fn memory_grow(&mut self) {
self.push(ValueLocation::Immediate(0u32.into()));
self.relocated_function_call(
&magic::get_memory32_grow_name(),
iter::once(I32).chain(iter::once(I32)),
iter::once(I32),
);
}
// TODO: Use `ArrayVec`?
@@ -3172,27 +3377,23 @@ impl<'module, M: ModuleContext> Context<'module, M> {
}
// TODO: Multiple returns
fn push_function_return(&mut self, arity: u32) {
if arity == 0 {
return;
fn push_function_returns(&mut self, returns: impl IntoIterator<Item = SignlessType>) {
for loc in ret_locs(returns) {
if let CCLoc::Reg(reg) = loc {
self.block_state.regs.mark_used(reg);
}
self.push(loc.into());
}
debug_assert_eq!(arity, 1);
self.block_state.regs.mark_used(RAX);
self.push(ValueLocation::Reg(RAX));
}
// TODO: Do return types properly
pub fn call_indirect(
&mut self,
signature_hash: u32,
type_id: u32,
arg_types: impl IntoIterator<Item = SignlessType>,
return_arity: u32,
return_types: impl IntoIterator<Item = SignlessType>,
) {
debug_assert!(
return_arity == 0 || return_arity == 1,
"We don't support multiple return yet"
);
let locs = arg_locs(arg_types);
for &loc in &locs {
@@ -3204,6 +3405,7 @@ impl<'module, M: ModuleContext> Context<'module, M> {
let callee = self.pop();
let callee = self.into_temp_reg(I32, callee);
let temp0 = self.block_state.regs.take(I64);
let temp1 = self.block_state.regs.take(I64);
for &loc in &locs {
if let CCLoc::Reg(r) = loc {
@@ -3217,30 +3419,44 @@ impl<'module, M: ModuleContext> Context<'module, M> {
// TODO: Consider generating a single trap function and jumping to that instead.
dynasm!(self.asm
; cmp Rd(callee.rq().unwrap()), [Rq(VMCTX) + self.module_context.offset_of_funcs_len() as i32]
; cmp Rd(callee.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
.vmctx_vmtable_definition_current_elements(0) as i32
]
; jae =>fail
; imul Rd(callee.rq().unwrap()), Rd(callee.rq().unwrap()), mem::size_of::<RuntimeFunc>() as i32
; mov Rq(temp0.rq().unwrap()), [Rq(VMCTX) + self.module_context.offset_of_funcs_ptr() as i32]
; imul
Rd(callee.rq().unwrap()),
Rd(callee.rq().unwrap()),
self.module_context.size_of_vmcaller_checked_anyfunc() as i32
; mov Rq(temp0.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
.vmctx_vmtable_definition_base(0) as i32
]
; mov Rd(temp1.rq().unwrap()), [
Rq(VMCTX) +
self.module_context
.vmctx_vmshared_signature_id(type_id) as i32
]
; cmp DWORD [
Rq(temp0.rq().unwrap()) +
Rq(callee.rq().unwrap()) +
RuntimeFunc::offset_of_sig_hash() as i32
], signature_hash as i32
self.module_context.vmcaller_checked_anyfunc_type_index() as i32
], Rd(temp1.rq().unwrap())
; jne =>fail
);
dynasm!(self.asm
; call QWORD [
Rq(temp0.rq().unwrap()) +
Rq(callee.rq().unwrap()) +
RuntimeFunc::offset_of_func_start() as i32
self.module_context.vmcaller_checked_anyfunc_func_ptr() as i32
]
);
self.block_state.regs.release(temp0);
self.block_state.regs.release(temp1);
self.block_state.regs.release(callee);
self.push_function_return(return_arity);
self.push_function_returns(return_types);
}
pub fn swap(&mut self, depth: u32) {
@@ -3253,13 +3469,8 @@ impl<'module, M: ModuleContext> Context<'module, M> {
&mut self,
index: u32,
arg_types: impl IntoIterator<Item = SignlessType>,
return_arity: u32,
return_types: impl IntoIterator<Item = SignlessType>,
) {
debug_assert!(
return_arity == 0 || return_arity == 1,
"We don't support multiple return yet"
);
self.pass_outgoing_args(&arg_locs(arg_types));
let label = &self.func_starts[index as usize].1;
@@ -3267,7 +3478,7 @@ impl<'module, M: ModuleContext> Context<'module, M> {
; call =>*label
);
self.push_function_return(return_arity);
self.push_function_returns(return_types);
}
// TODO: Reserve space to store RBX, RBP, and R12..R15 so we can use them
@@ -3295,21 +3506,38 @@ impl<'module, M: ModuleContext> Context<'module, M> {
/// conditional traps in `call_indirect` use)
pub fn epilogue(&mut self) {
// TODO: We don't want to redefine this label if we're sharing it between functions
if let Some(l) = self.labels.trap {
if let Some(l) = self
.labels
.trap
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.define_label(l);
dynasm!(self.asm
; ud2
);
self.labels.trap = Some(PendingLabel::defined(l));
}
if let Some(l) = self.labels.ret {
if let Some(l) = self
.labels
.ret
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.define_label(l);
dynasm!(self.asm
; ret
);
self.labels.ret = Some(PendingLabel::defined(l));
}
if let Some(l) = self.labels.neg_const_f32 {
if let Some(l) = self
.labels
.neg_const_f32
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
@@ -3318,9 +3546,15 @@ impl<'module, M: ModuleContext> Context<'module, M> {
; .dword 0
; .dword 0
);
self.labels.neg_const_f32 = Some(PendingLabel::defined(l));
}
if let Some(l) = self.labels.neg_const_f64 {
if let Some(l) = self
.labels
.neg_const_f64
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
@@ -3329,6 +3563,39 @@ impl<'module, M: ModuleContext> Context<'module, M> {
; .dword 0
; .dword 0
);
self.labels.neg_const_f64 = Some(PendingLabel::defined(l));
}
if let Some(l) = self
.labels
.abs_const_f32
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
; .dword 2147483647
; .dword 2147483647
; .dword 2147483647
; .dword 2147483647
);
self.labels.abs_const_f32 = Some(PendingLabel::defined(l));
}
if let Some(l) = self
.labels
.abs_const_f64
.as_ref()
.and_then(PendingLabel::as_undefined)
{
self.align(16);
self.define_label(l);
dynasm!(self.asm
; .qword 9223372036854775807
; .qword 9223372036854775807
);
self.labels.abs_const_f64 = Some(PendingLabel::defined(l));
}
}
@@ -3347,45 +3614,67 @@ impl<'module, M: ModuleContext> Context<'module, M> {
#[must_use]
fn trap_label(&mut self) -> Label {
if let Some(l) = self.labels.trap {
return l;
if let Some(l) = &self.labels.trap {
return l.label;
}
let label = self.create_label();
self.labels.trap = Some(label);
self.labels.trap = Some(label.into());
label
}
#[must_use]
fn ret_label(&mut self) -> Label {
if let Some(l) = self.labels.ret {
return l;
if let Some(l) = &self.labels.ret {
return l.label;
}
let label = self.create_label();
self.labels.ret = Some(label);
self.labels.ret = Some(label.into());
label
}
#[must_use]
fn neg_const_f32_label(&mut self) -> Label {
if let Some(l) = self.labels.neg_const_f32 {
return l;
if let Some(l) = &self.labels.neg_const_f32 {
return l.label;
}
let label = self.create_label();
self.labels.neg_const_f32 = Some(label);
self.labels.neg_const_f32 = Some(label.into());
label
}
#[must_use]
fn neg_const_f64_label(&mut self) -> Label {
if let Some(l) = self.labels.neg_const_f64 {
return l;
if let Some(l) = &self.labels.neg_const_f64 {
return l.label;
}
let label = self.create_label();
self.labels.neg_const_f64 = Some(label);
self.labels.neg_const_f64 = Some(label.into());
label
}
#[must_use]
fn abs_const_f32_label(&mut self) -> Label {
if let Some(l) = &self.labels.abs_const_f32 {
return l.label;
}
let label = self.create_label();
self.labels.abs_const_f32 = Some(label.into());
label
}
#[must_use]
fn abs_const_f64_label(&mut self) -> Label {
if let Some(l) = &self.labels.abs_const_f64 {
return l.label;
}
let label = self.create_label();
self.labels.abs_const_f64 = Some(label.into());
label
}
}

View File

@@ -2,6 +2,7 @@ use crate::backend::*;
use crate::error::Error;
use crate::microwasm::*;
use crate::module::{quickhash, ModuleContext, SigType, Signature};
use cranelift_codegen::binemit;
use either::{Either, Left, Right};
use multi_mut::HashMapMultiMut;
use std::{collections::HashMap, convert::TryInto, hash::Hash};
@@ -28,12 +29,14 @@ impl Block {
const DISASSEMBLE: bool = false;
pub fn translate_wasm<M: ModuleContext>(
pub fn translate_wasm<M>(
session: &mut CodeGenSession<M>,
reloc_sink: &mut dyn binemit::RelocSink,
func_idx: u32,
body: &wasmparser::FunctionBody,
) -> Result<(), Error>
where
M: ModuleContext,
for<'any> &'any M::Signature: Into<OpSig>,
{
let ty = session.module_context.func_type(func_idx);
@@ -64,17 +67,20 @@ where
translate(
session,
reloc_sink,
func_idx,
microwasm_conv.flat_map(|i| i.expect("TODO: Make this not panic")),
)
}
pub fn translate<M: ModuleContext, I, L>(
pub fn translate<M, I, L>(
session: &mut CodeGenSession<M>,
reloc_sink: &mut dyn binemit::RelocSink,
func_idx: u32,
body: I,
) -> Result<(), Error>
where
M: ModuleContext,
I: IntoIterator<Item = Operator<L>>,
L: Hash + Clone + Eq,
Operator<L>: std::fmt::Display,
@@ -90,7 +96,7 @@ where
let mut body = body.into_iter().peekable();
let module_context = &*session.module_context;
let ctx = &mut session.new_context(func_idx);
let ctx = &mut session.new_context(func_idx, reloc_sink);
let params = func_type
.params()
@@ -367,7 +373,7 @@ where
}
let cc = cc.unwrap_or_else(||
if max_num_callers == Some(1) {
if max_num_callers.map(|callers| callers <= 1).unwrap_or(false) {
Right(ctx.virtual_calling_convention())
} else {
Left(ctx.serialize_args(params))
@@ -452,6 +458,7 @@ where
Operator::Sub(F32) => ctx.f32_sub(),
Operator::Div(SF32) => ctx.f32_div(),
Operator::Neg(Size::_32) => ctx.f32_neg(),
Operator::Abs(Size::_32) => ctx.f32_abs(),
Operator::Gt(SF32) => ctx.f32_gt(),
Operator::Ge(SF32) => ctx.f32_ge(),
Operator::Lt(SF32) => ctx.f32_lt(),
@@ -461,6 +468,7 @@ where
Operator::Sub(F64) => ctx.f64_sub(),
Operator::Div(SF64) => ctx.f64_div(),
Operator::Neg(Size::_64) => ctx.f64_neg(),
Operator::Abs(Size::_64) => ctx.f64_abs(),
Operator::Gt(SF64) => ctx.f64_gt(),
Operator::Ge(SF64) => ctx.f64_ge(),
Operator::Lt(SF64) => ctx.f64_lt(),
@@ -487,16 +495,12 @@ where
} => ctx.i32_extend_s(),
Operator::FConvertFromI {
input_ty: sint::I32,
output_ty: Size::_32
} => {
ctx.f32_convert_from_i32_s()
},
output_ty: Size::_32,
} => ctx.f32_convert_from_i32_s(),
Operator::FConvertFromI {
input_ty: sint::U32,
output_ty: Size::_32
} => {
ctx.f32_convert_from_i32_u()
},
output_ty: Size::_32,
} => ctx.f32_convert_from_i32_u(),
Operator::Load8 {
ty: sint::U32,
memarg,
@@ -543,19 +547,23 @@ where
Operator::Load { ty: F64, memarg } => ctx.f64_load(memarg.offset),
Operator::Store8 { ty: _, memarg } => ctx.store8(memarg.offset),
Operator::Store16 { ty: _, memarg } => ctx.store16(memarg.offset),
Operator::Store32 { memarg } => ctx.store32(memarg.offset),
Operator::Store { ty: I32, memarg } | Operator::Store { ty: F32, memarg } => {
ctx.store32(memarg.offset)
}
Operator::Store32 { memarg }
| Operator::Store { ty: I32, memarg }
| Operator::Store { ty: F32, memarg } => ctx.store32(memarg.offset),
Operator::Store { ty: I64, memarg } | Operator::Store { ty: F64, memarg } => {
ctx.store64(memarg.offset)
}
Operator::GetGlobal(idx) => ctx.get_global(idx),
Operator::SetGlobal(idx) => ctx.set_global(idx),
Operator::Select => {
ctx.select();
}
Operator::MemorySize { reserved: _ } => {
ctx.memory_size();
}
Operator::MemoryGrow { reserved: _ } => {
ctx.memory_grow();
}
Operator::Call { function_index } => {
let function_index = module_context
.defined_func_index(function_index)
@@ -566,7 +574,7 @@ where
ctx.call_direct(
function_index,
callee_ty.params().iter().map(|t| t.to_microwasm_type()),
callee_ty.returns().len() as u32,
callee_ty.returns().iter().map(|t| t.to_microwasm_type()),
);
}
Operator::CallIndirect {
@@ -580,9 +588,9 @@ where
// TODO: this implementation assumes that this function is locally defined.
ctx.call_indirect(
quickhash(callee_ty) as u32,
type_index,
callee_ty.params().iter().map(|t| t.to_microwasm_type()),
callee_ty.returns().len() as u32,
callee_ty.returns().iter().map(|t| t.to_microwasm_type()),
);
}
op => {

View File

@@ -467,12 +467,8 @@ pub enum Operator<Label> {
// TODO: Is it better to have `Swap`, to have `Pull` (which moves the `nth` element instead of swapping)
// or to have both?
Swap(u32),
GetGlobal {
index: u32,
},
SetGlobal {
index: u32,
},
GetGlobal(u32),
SetGlobal(u32),
Load {
ty: SignlessType,
memarg: MemoryImmediate,
@@ -799,12 +795,111 @@ where
Operator::FConvertFromI {
input_ty,
output_ty,
} => write!(f, "{}.convert_from.{}", input_ty, Type::Float::<Int>(*output_ty)),
} => write!(
f,
"{}.convert_from.{}",
input_ty,
Type::Float::<Int>(*output_ty)
),
_ => unimplemented!(),
}
}
}
// TODO: If we return a `Vec<<T as MicrowasmReceiver>::Item>` will that convert to (essentially) a no-op
// in the case that `Item` is a ZST? That is important for ensuring that we don't do unnecessary
// work when we're directly generating asm.
/// WIP: Trait to abstract over either producing a stream of Microwasm or directly producing assembly
/// from the Wasm. This should give a significant speedup since we don't need to allocate any vectors
/// or pay the cost of branches - we can just use iterators and direct function calls.
pub trait MicrowasmReceiver<Label> {
type Item;
fn unreachable(&mut self) -> Self::Item;
fn block(
&mut self,
label: Label,
params: impl Iterator<Item = SignlessType>,
has_backwards_callers: bool,
num_callers: Option<u32>,
) -> Self::Item;
fn label(&mut self, _: Label) -> Self::Item;
fn br(&mut self, target: BrTarget<Label>) -> Self::Item;
fn br_if(&mut self, then: BrTargetDrop<Label>, else_: BrTargetDrop<Label>) -> Self::Item;
fn br_table(&mut self, _: BrTable<Label>) -> Self::Item;
fn call(&mut self, function_index: u32) -> Self::Item;
fn call_indirect(&mut self, type_index: u32, table_index: u32) -> Self::Item;
fn drop(&mut self, _: RangeInclusive<u32>) -> Self::Item;
fn select(&mut self) -> Self::Item;
fn pick(&mut self, _: u32) -> Self::Item;
fn swap(&mut self, _: u32) -> Self::Item;
fn get_global(&mut self, index: u32) -> Self::Item;
fn set_global(&mut self, index: u32) -> Self::Item;
fn load(&mut self, ty: SignlessType, memarg: MemoryImmediate) -> Self::Item;
fn load8(&mut self, ty: SignfulInt, memarg: MemoryImmediate) -> Self::Item;
fn load16(&mut self, ty: SignfulInt, memarg: MemoryImmediate) -> Self::Item;
fn load32(&mut self, sign: Signedness, memarg: MemoryImmediate) -> Self::Item;
fn store(&mut self, ty: SignlessType, memarg: MemoryImmediate) -> Self::Item;
fn store8(&mut self, ty: Int, memarg: MemoryImmediate) -> Self::Item;
fn store16(&mut self, ty: Int, memarg: MemoryImmediate) -> Self::Item;
fn store32(&mut self, memarg: MemoryImmediate) -> Self::Item;
fn memory_size(&mut self, reserved: u32) -> Self::Item;
fn memory_grow(&mut self, reserved: u32) -> Self::Item;
fn const_(&mut self, _: Value) -> Self::Item;
fn ref_null(&mut self) -> Self::Item;
fn ref_is_null(&mut self) -> Self::Item;
fn eq(&mut self, _: SignlessType) -> Self::Item;
fn ne(&mut self, _: SignlessType) -> Self::Item;
fn eqz(&mut self, _: Int) -> Self::Item;
fn lt(&mut self, _: SignfulType) -> Self::Item;
fn gt(&mut self, _: SignfulType) -> Self::Item;
fn le(&mut self, _: SignfulType) -> Self::Item;
fn ge(&mut self, _: SignfulType) -> Self::Item;
fn add(&mut self, _: SignlessType) -> Self::Item;
fn sub(&mut self, _: SignlessType) -> Self::Item;
fn mul(&mut self, _: SignlessType) -> Self::Item;
fn clz(&mut self, _: Int) -> Self::Item;
fn ctz(&mut self, _: Int) -> Self::Item;
fn popcnt(&mut self, _: Int) -> Self::Item;
fn div(&mut self, _: SignfulType) -> Self::Item;
fn rem(&mut self, _: SignfulInt) -> Self::Item;
fn and(&mut self, _: Int) -> Self::Item;
fn or(&mut self, _: Int) -> Self::Item;
fn xor(&mut self, _: Int) -> Self::Item;
fn shl(&mut self, _: Int) -> Self::Item;
fn shr(&mut self, _: SignfulInt) -> Self::Item;
fn rotl(&mut self, _: Int) -> Self::Item;
fn rotr(&mut self, _: Int) -> Self::Item;
fn abs(&mut self, _: Float) -> Self::Item;
fn neg(&mut self, _: Float) -> Self::Item;
fn ceil(&mut self, _: Float) -> Self::Item;
fn floor(&mut self, _: Float) -> Self::Item;
fn trunc(&mut self, _: Float) -> Self::Item;
fn nearest(&mut self, _: Float) -> Self::Item;
fn sqrt(&mut self, _: Float) -> Self::Item;
fn min(&mut self, _: Float) -> Self::Item;
fn max(&mut self, _: Float) -> Self::Item;
fn copysign(&mut self, _: Float) -> Self::Item;
fn i32_wrap_from_i64(&mut self) -> Self::Item;
fn i_trunc_from_f(&mut self, input_ty: Float, output_ty: SignfulInt) -> Self::Item;
fn f_convert_from_i(&mut self, input_ty: SignfulInt, output_ty: Float) -> Self::Item;
fn f32_demote_from_f64(&mut self) -> Self::Item;
fn f64_promote_from_f32(&mut self) -> Self::Item;
fn i32_reinterpret_from_f32(&mut self) -> Self::Item;
fn i64_reinterpret_from_f64(&mut self) -> Self::Item;
fn f32_reinterpret_from_i32(&mut self) -> Self::Item;
fn f64_reinterpret_from_i64(&mut self) -> Self::Item;
fn extend(&mut self, sign: Signedness) -> Self::Item;
fn i_sat_trunc_from_f(&mut self, input_ty: Float, output_ty: SignfulInt) -> Self::Item;
fn memory_init(&mut self, segment: u32) -> Self::Item;
fn data_drop(&mut self, segment: u32) -> Self::Item;
fn memory_copy(&mut self) -> Self::Item;
fn memory_fill(&mut self) -> Self::Item;
fn table_init(&mut self, segment: u32) -> Self::Item;
fn elem_drop(&mut self, segment: u32) -> Self::Item;
fn table_copy(&mut self) -> Self::Item;
}
/// Type of a control frame.
#[derive(Debug, Clone, PartialEq)]
enum ControlFrameKind {
@@ -1074,11 +1169,11 @@ where
sig!((ty) -> (ty))
}
WasmOperator::GetGlobal { global_index: _ } => {
unimplemented!("Haven't implemented getting type of globals yet")
WasmOperator::GetGlobal { global_index } => {
sig!(() -> (self.module.global_type(*global_index).to_microwasm_type()))
}
WasmOperator::SetGlobal { global_index: _ } => {
unimplemented!("Haven't implemented getting type of globals yet")
WasmOperator::SetGlobal { global_index } => {
sig!((self.module.global_type(*global_index).to_microwasm_type()) -> ())
}
WasmOperator::F32Load { .. } => sig!((I32) -> (F32)),
@@ -1712,6 +1807,12 @@ where
Operator::Pick(depth - 1),
]
}
WasmOperator::GetGlobal { global_index } => {
smallvec![Operator::GetGlobal(global_index)]
}
WasmOperator::SetGlobal { global_index } => {
smallvec![Operator::SetGlobal(global_index)]
}
WasmOperator::I32Load { memarg } => smallvec![Operator::Load { ty: I32, memarg }],
WasmOperator::I64Load { memarg } => smallvec![Operator::Load { ty: I64, memarg }],

View File

@@ -102,7 +102,7 @@ impl_function_args!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S);
#[derive(Default)]
pub struct TranslatedModule {
translated_code_section: Option<TranslatedCodeSection>,
types: SimpleContext,
ctx: SimpleContext,
// TODO: Should we wrap this in a `Mutex` so that calling functions from multiple
// threads doesn't cause data races?
table: Option<(TableType, Vec<u32>)>,
@@ -122,7 +122,7 @@ impl TranslatedModule {
.translated_code_section
.as_ref()
.expect("We don't currently support a table section without a code section");
let types = &self.types;
let ctx = &self.ctx;
self.table
.as_mut()
@@ -131,11 +131,11 @@ impl TranslatedModule {
.iter()
.map(|i| {
let start = code_section.func_start(*i as _);
let ty = types.func_type(*i);
let ty = ctx.func_type(*i);
RuntimeFunc {
func_start: start,
sig_hash: quickhash(ty) as u32,
sig_hash: quickhash(ty),
}
})
.collect::<Vec<_>>();
@@ -154,7 +154,17 @@ impl TranslatedModule {
.into();
let ctx = if mem.len > 0 || table.len > 0 {
Some(Box::new(VmCtx { table, mem }))
let hashes = self.ctx.types.iter().map(quickhash).collect::<Vec<_>>();
// Hardcoded maximum number of hashes supported for now - we will eventually port all our
// tests over to wasmtime which will make this obsolete so implementing this properly is
// unnecessary.
let mut out = [0; 64];
// This will panic if `hashes.len() > 64`
out[..hashes.len()].copy_from_slice(&hashes[..]);
Some(Box::new(GVmCtx { table, mem, hashes: out }) as Box<VmCtx>)
} else {
None
};
@@ -215,11 +225,11 @@ impl ExecutableModule {
) -> Result<T, ExecutionError> {
let module = &self.module;
if func_idx as usize >= module.types.func_ty_indicies.len() {
if func_idx as usize >= module.ctx.func_ty_indicies.len() {
return Err(ExecutionError::FuncIndexOutOfBounds);
}
let type_ = module.types.func_type(func_idx);
let type_ = module.ctx.func_type(func_idx);
// TODO: Handle "compatible" types (i.e. f32 and i32)
if (&type_.params[..], &type_.returns[..]) != (Args::TYPE_LIST, T::TYPE_LIST) {
@@ -237,7 +247,7 @@ impl ExecutableModule {
type FuncRef = *const u8;
pub struct RuntimeFunc {
sig_hash: u32,
sig_hash: u64,
func_start: FuncRef,
}
@@ -279,32 +289,41 @@ impl<T> Drop for BoxSlice<T> {
}
}
pub struct VmCtx {
pub type VmCtx = GVmCtx<[u64]>;
pub struct GVmCtx<T: ?Sized> {
table: BoxSlice<RuntimeFunc>,
mem: BoxSlice<u8>,
hashes: T,
}
impl VmCtx {
pub fn offset_of_memory_ptr() -> u8 {
offset_of!(Self, mem.ptr)
impl<T: ?Sized> GVmCtx<T> {
pub fn offset_of_memory_ptr() -> u32 {
offset_of!(GVmCtx<[u64; 0]>, mem.ptr)
.try_into()
.expect("Offset exceeded size of u8")
.expect("Offset exceeded size of u32")
}
pub fn offset_of_memory_len() -> u8 {
offset_of!(Self, mem.len)
pub fn offset_of_memory_len() -> u32 {
offset_of!(GVmCtx<[u64; 0]>, mem.len)
.try_into()
.expect("Offset exceeded size of u8")
.expect("Offset exceeded size of u32")
}
pub fn offset_of_funcs_ptr() -> u8 {
offset_of!(Self, table.ptr)
offset_of!(GVmCtx<[u64; 0]>, table.ptr)
.try_into()
.expect("Offset exceeded size of u8")
}
pub fn offset_of_funcs_len() -> u8 {
offset_of!(Self, table.len)
offset_of!(GVmCtx<[u64; 0]>, table.len)
.try_into()
.expect("Offset exceeded size of u8")
}
pub fn offset_of_hashes() -> u8 {
offset_of!(GVmCtx<[u64; 0]>, hashes)
.try_into()
.expect("Offset exceeded size of u8")
}
@@ -327,21 +346,20 @@ pub trait Signature {
pub trait SigType {
fn to_microwasm_type(&self) -> microwasm::SignlessType;
fn is_float(&self) -> bool;
}
impl SigType for AbiParam {
impl SigType for ir::Type {
fn to_microwasm_type(&self) -> microwasm::SignlessType {
use crate::microwasm::{Size::*, Type::*};
if self.value_type.is_int() {
match self.value_type.bits() {
if self.is_int() {
match self.bits() {
32 => Int(_32),
64 => Int(_64),
_ => unimplemented!(),
}
} else if self.value_type.is_float() {
match self.value_type.bits() {
} else if self.is_float() {
match self.bits() {
32 => Float(_32),
64 => Float(_64),
_ => unimplemented!(),
@@ -350,9 +368,11 @@ impl SigType for AbiParam {
unimplemented!()
}
}
}
fn is_float(&self) -> bool {
self.value_type.is_float()
impl SigType for AbiParam {
fn to_microwasm_type(&self) -> microwasm::SignlessType {
self.value_type.to_microwasm_type()
}
}
@@ -377,13 +397,6 @@ impl SigType for wasmparser::Type {
fn to_microwasm_type(&self) -> microwasm::SignlessType {
microwasm::Type::from_wasm(*self).unwrap()
}
fn is_float(&self) -> bool {
match self {
wasmparser::Type::F32 | wasmparser::Type::F64 => true,
_ => false,
}
}
}
impl Signature for FuncType {
@@ -399,14 +412,24 @@ impl Signature for FuncType {
}
pub trait ModuleContext {
type Signature: Signature + Hash;
type Signature: Signature;
type GlobalType: SigType;
fn vmctx_vmglobal_definition(&self, index: u32) -> u32;
fn vmctx_vmmemory_definition_base(&self, defined_memory_index: u32) -> u32;
fn vmctx_vmmemory_definition_current_length(&self, defined_memory_index: u32) -> u32;
fn vmctx_vmtable_definition_base(&self, defined_table_index: u32) -> u32;
fn vmctx_vmtable_definition_current_elements(&self, defined_table_index: u32) -> u32;
fn vmctx_vmshared_signature_id(&self, signature_idx: u32) -> u32;
fn vmcaller_checked_anyfunc_type_index(&self) -> u8;
fn vmcaller_checked_anyfunc_func_ptr(&self) -> u8;
fn size_of_vmcaller_checked_anyfunc(&self) -> u8;
fn defined_global_index(&self, global_index: u32) -> Option<u32>;
fn global_type(&self, global_index: u32) -> &Self::GlobalType;
fn func_type_index(&self, func_idx: u32) -> u32;
fn signature(&self, index: u32) -> &Self::Signature;
fn offset_of_memory_ptr(&self) -> u32;
fn offset_of_memory_len(&self) -> u32;
fn offset_of_funcs_ptr(&self) -> u8;
fn offset_of_funcs_len(&self) -> u8;
fn func_index(&self, defined_func_index: u32) -> u32;
fn defined_func_index(&self, func_index: u32) -> Option<u32>;
@@ -424,6 +447,7 @@ pub trait ModuleContext {
impl ModuleContext for SimpleContext {
type Signature = FuncType;
type GlobalType = wasmparser::Type;
// TODO: We don't support external functions yet
fn func_index(&self, func_idx: u32) -> u32 {
@@ -438,24 +462,52 @@ impl ModuleContext for SimpleContext {
self.func_ty_indicies[func_idx as usize]
}
fn defined_global_index(&self, index: u32) -> Option<u32> {
unimplemented!()
}
fn global_type(&self, global_index: u32) -> &Self::GlobalType {
unimplemented!()
}
fn signature(&self, index: u32) -> &Self::Signature {
&self.types[index as usize]
}
fn offset_of_memory_ptr(&self) -> u32 {
VmCtx::offset_of_memory_ptr() as _
fn vmctx_vmglobal_definition(&self, index: u32) -> u32 {
unimplemented!()
}
fn offset_of_memory_len(&self) -> u32 {
VmCtx::offset_of_memory_len() as _
fn vmctx_vmmemory_definition_base(&self, defined_memory_index: u32) -> u32 {
VmCtx::offset_of_memory_ptr()
}
fn offset_of_funcs_ptr(&self) -> u8 {
VmCtx::offset_of_funcs_ptr()
fn vmctx_vmmemory_definition_current_length(&self, defined_memory_index: u32) -> u32 {
VmCtx::offset_of_memory_len()
}
fn offset_of_funcs_len(&self) -> u8 {
VmCtx::offset_of_funcs_len()
fn vmctx_vmtable_definition_base(&self, defined_table_index: u32) -> u32 {
VmCtx::offset_of_funcs_ptr() as _
}
fn vmctx_vmtable_definition_current_elements(&self, defined_table_index: u32) -> u32 {
VmCtx::offset_of_funcs_len() as _
}
fn vmcaller_checked_anyfunc_type_index(&self) -> u8 {
RuntimeFunc::offset_of_sig_hash() as _
}
fn vmcaller_checked_anyfunc_func_ptr(&self) -> u8 {
RuntimeFunc::offset_of_func_start() as _
}
fn size_of_vmcaller_checked_anyfunc(&self) -> u8 {
std::mem::size_of::<RuntimeFunc>() as _
}
fn vmctx_vmshared_signature_id(&self, signature_idx: u32) -> u32 {
VmCtx::offset_of_hashes() as u32 + signature_idx * std::mem::size_of::<u64>() as u32
}
// TODO: type of a global
@@ -479,7 +531,7 @@ pub fn translate_only(data: &[u8]) -> Result<TranslatedModule, Error> {
if let SectionCode::Type = section.code {
let types_reader = section.get_type_section_reader()?;
output.types.types = translate_sections::type_(types_reader)?;
output.ctx.types = translate_sections::type_(types_reader)?;
reader.skip_custom_sections()?;
if reader.eof() {
@@ -501,7 +553,7 @@ pub fn translate_only(data: &[u8]) -> Result<TranslatedModule, Error> {
if let SectionCode::Function = section.code {
let functions = section.get_function_section_reader()?;
output.types.func_ty_indicies = translate_sections::function(functions)?;
output.ctx.func_ty_indicies = translate_sections::function(functions)?;
reader.skip_custom_sections()?;
if reader.eof() {
@@ -598,7 +650,7 @@ pub fn translate_only(data: &[u8]) -> Result<TranslatedModule, Error> {
if let SectionCode::Code = section.code {
let code = section.get_code_section_reader()?;
output.translated_code_section = Some(translate_sections::code(code, &output.types)?);
output.translated_code_section = Some(translate_sections::code(code, &output.ctx)?);
reader.skip_custom_sections()?;
if reader.eof() {

View File

@@ -404,6 +404,7 @@ mod opf32 {
binop_test!(le, |a, b| a <= b, i32);
unop_test!(neg, |a: f32| -a);
unop_test!(abs, |a: f32| a.abs());
}
mod opf64 {
@@ -520,6 +521,7 @@ mod opf64 {
binop_test!(le, |a, b| a <= b, i32);
unop_test!(neg, |a: f64| -a);
unop_test!(abs, |a: f64| a.abs());
}
quickcheck! {

View File

@@ -3,6 +3,7 @@ use crate::error::Error;
use crate::function_body;
use crate::microwasm::{MicrowasmConv, Type as MWType};
use crate::module::{ModuleContext, SimpleContext};
use cranelift_codegen::{binemit, ir};
#[allow(unused_imports)] // for now
use wasmparser::{
CodeSectionReader, Data, DataSectionReader, Element, ElementSectionReader, Export,
@@ -105,6 +106,28 @@ pub fn element(elements: ElementSectionReader) -> Result<Vec<u32>, Error> {
Ok(out)
}
struct UnimplementedRelocSink;
impl binemit::RelocSink for UnimplementedRelocSink {
fn reloc_ebb(&mut self, _: binemit::CodeOffset, _: binemit::Reloc, _: binemit::CodeOffset) {
unimplemented!()
}
fn reloc_external(
&mut self,
_: binemit::CodeOffset,
_: binemit::Reloc,
_: &ir::ExternalName,
_: binemit::Addend,
) {
unimplemented!()
}
fn reloc_jt(&mut self, _: binemit::CodeOffset, _: binemit::Reloc, _: ir::JumpTable) {
unimplemented!()
}
}
/// Parses the Code section of the wasm module.
pub fn code(
code: CodeSectionReader,
@@ -115,8 +138,9 @@ pub fn code(
for (idx, body) in code.into_iter().enumerate() {
let body = body?;
let mut relocs = UnimplementedRelocSink;
function_body::translate_wasm(&mut session, idx as u32, &body)?;
function_body::translate_wasm(&mut session, &mut relocs, idx as u32, &body)?;
}
Ok(session.into_translated_code_section()?)