diff --git a/Cargo.toml b/Cargo.toml index b7801bd8d3..a6e3028e0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,8 @@ capstone = "0.5.0" failure = "0.1.3" failure_derive = "0.1.3" wabt = "0.7" +lazy_static = "1.2" +quickcheck = "0.7" [badges] maintenance = { status = "experimental" } diff --git a/src/backend.rs b/src/backend.rs index f9bc16125c..9759a6b49f 100644 --- a/src/backend.rs +++ b/src/backend.rs @@ -160,6 +160,7 @@ impl CodeGenSession { } } +#[derive(Debug)] pub struct TranslatedCodeSection { exec_buf: ExecutableBuffer, func_starts: Vec, @@ -170,6 +171,10 @@ impl TranslatedCodeSection { let offset = self.func_starts[idx]; self.exec_buf.ptr(offset) } + + pub fn disassemble(&self) { + ::disassemble::disassemble(&*self.exec_buf).unwrap(); + } } pub struct Context<'a> { @@ -238,14 +243,64 @@ fn pop_i32(ctx: &mut Context) -> GPR { gpr } -pub fn add_i32(ctx: &mut Context) { +pub fn i32_add(ctx: &mut Context) { let op0 = pop_i32(ctx); let op1 = pop_i32(ctx); dynasm!(ctx.asm - ; add Rd(op0), Rd(op1) + ; add Rd(op1), Rd(op0) ); - push_i32(ctx, op0); - ctx.regs.release_scratch_gpr(op1); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); +} + +pub fn i32_sub(ctx: &mut Context) { + let op0 = pop_i32(ctx); + let op1 = pop_i32(ctx); + dynasm!(ctx.asm + ; sub Rd(op1), Rd(op0) + ); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); +} + +pub fn i32_and(ctx: &mut Context) { + let op0 = pop_i32(ctx); + let op1 = pop_i32(ctx); + dynasm!(ctx.asm + ; and Rd(op1), Rd(op0) + ); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); +} + +pub fn i32_or(ctx: &mut Context) { + let op0 = pop_i32(ctx); + let op1 = pop_i32(ctx); + dynasm!(ctx.asm + ; or Rd(op1), Rd(op0) + ); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); +} + +pub fn i32_xor(ctx: &mut Context) { + let op0 = pop_i32(ctx); + let op1 = pop_i32(ctx); + dynasm!(ctx.asm + ; xor Rd(op1), Rd(op0) + ); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); +} + +pub fn i32_mul(ctx: &mut Context) { + let op0 = pop_i32(ctx); + let op1 = pop_i32(ctx); + dynasm!(ctx.asm + ; imul Rd(op1), Rd(op0) + ); + push_i32(ctx, op1); + ctx.regs.release_scratch_gpr(op0); } fn sp_relative_offset(ctx: &mut Context, slot_idx: u32) -> i32 { @@ -411,6 +466,7 @@ pub fn call_direct(ctx: &mut Context, index: u32, arg_arity: u32, return_arity: } pub fn prologue(ctx: &mut Context, stack_slots: u32) { + let stack_slots = stack_slots; // Align stack slots to the nearest even number. This is required // by x86-64 ABI. let aligned_stack_slots = (stack_slots + 1) & !1; diff --git a/src/function_body.rs b/src/function_body.rs index 925b32e502..e27c8a3bfc 100644 --- a/src/function_body.rs +++ b/src/function_body.rs @@ -208,18 +208,15 @@ pub fn translate( } } } - Operator::I32Eq => { - relop_eq_i32(&mut ctx); - } - Operator::I32Add => { - add_i32(&mut ctx); - } - Operator::GetLocal { local_index } => { - get_local_i32(&mut ctx, local_index); - } - Operator::I32Const { value } => { - literal_i32(&mut ctx, value); - } + Operator::I32Eq => relop_eq_i32(&mut ctx), + Operator::I32Add => i32_add(&mut ctx), + Operator::I32Sub => i32_sub(&mut ctx), + Operator::I32And => i32_and(&mut ctx), + Operator::I32Or => i32_or(&mut ctx), + Operator::I32Xor => i32_xor(&mut ctx), + Operator::I32Mul => i32_mul(&mut ctx), + Operator::GetLocal { local_index } => get_local_i32(&mut ctx, local_index), + Operator::I32Const { value } => literal_i32(&mut ctx, value), Operator::Call { function_index } => { let callee_ty = translation_ctx.func_type(function_index); diff --git a/src/lib.rs b/src/lib.rs index 5b63cfb9cc..0d5f130639 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,7 +7,10 @@ extern crate wasmparser; #[macro_use] extern crate failure_derive; extern crate dynasmrt; - +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate quickcheck; extern crate wabt; mod backend; diff --git a/src/module.rs b/src/module.rs index a38aac0940..c3b7f4ddaf 100644 --- a/src/module.rs +++ b/src/module.rs @@ -52,6 +52,13 @@ impl TranslatedModule { args.call(start_buf) } + + pub fn disassemble(&self) { + self.translated_code_section + .as_ref() + .expect("no code section") + .disassemble(); + } } #[derive(Default)] diff --git a/src/tests.rs b/src/tests.rs index 14b48c15b9..5a1d49e085 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -18,20 +18,33 @@ fn empty() { let _ = translate_wat("(module (func))"); } -#[test] -fn adds() { - const CASES: &[(u32, u32, u32)] = &[(5, 3, 8), (0, 228, 228), (u32::max_value(), 1, 0)]; +macro_rules! binop_test { + ($op:ident, $func:path) => { + quickcheck! { + fn $op(a: u32, b: u32) -> bool { + static CODE: &str = concat!( + "(module (func (param i32) (param i32) (result i32) (i32.", + stringify!($op), + " (get_local 0) (get_local 1))))" + ); - let code = r#" -(module - (func (param i32) (param i32) (result i32) (i32.add (get_local 0) (get_local 1))) -) - "#; - for (a, b, expected) in CASES { - assert_eq!(execute_wat(code, *a, *b), *expected); - } + lazy_static! { + static ref TRANSLATED: TranslatedModule = translate_wat(CODE); + } + + unsafe { TRANSLATED.execute_func::<(u32, u32), u32>(0, (a, b)) == $func(a, b) } + } + } + }; } +binop_test!(add, u32::wrapping_add); +binop_test!(sub, u32::wrapping_sub); +binop_test!(and, std::ops::BitAnd::bitand); +binop_test!(or, std::ops::BitOr::bitor); +binop_test!(xor, std::ops::BitXor::bitxor); +binop_test!(mul, u32::wrapping_mul); + #[test] fn relop_eq() { const CASES: &[(u32, u32, u32)] = &[