winch: Use cranelift-codegen x64 backend for emission. (#5581)

This change substitutes the string based emission mechanism with
cranelift-codegen's x64 backend.

This change _does not_:

* Introduce new functionality in terms of supported instructions.
* Change the semantics of the assembler/macroassembler in terms of the logic to
emit instructions.

The most notable differences between this change and the previous version are:

* Handling of shared flags and ISA-specific flags, which for now are left with
the default value.
* Simplification of instruction emission per operand size: previously the
assembler defined different methods depending on the operand size (e.g. `mov`
for 64 bits, and `movl` for 32 bits). This change updates such approach so that
each assembler method takes an operand size as a parameter, reducing duplication
and making the code more concise and easier to integrate with the x64's `Inst` enum.
* Introduction of a disassembler for testing purposes.

As of this change, Winch generates the following code for the following test
programs:

```wat
(module
  (export "main" (func $main))

  (func $main (result i32)
        (i32.const 10)
        (i32.const 20)
        i32.add
        ))
```

```asm
   0:	55                   	push	rbp
   1:	48 89 e5             	mov	rbp, rsp
   4:	b8 0a 00 00 00       	mov	eax, 0xa
   9:	83 c0 14             	add	eax, 0x14
   c:	5d                   	pop	rbp
   d:	c3                   	ret
```

```wat
(module
  (export "main" (func $main))

  (func $main (result i32)
        (local $foo i32)
    (local $bar i32)
        (i32.const 10)
    (local.set $foo)
        (i32.const 20)
    (local.set $bar)

        (local.get $foo)
        (local.get $bar)
        i32.add
        ))
```

```asm
   0:	55                   	push	rbp
   1:	48 89 e5             	mov	rbp, rsp
   4:	48 83 ec 08          	sub	rsp, 8
   8:	48 c7 04 24 00 00 00 00	mov	qword ptr [rsp], 0
  10:	b8 0a 00 00 00       	mov	eax, 0xa
  15:	89 44 24 04          	mov	dword ptr [rsp + 4], eax
  19:	b8 14 00 00 00       	mov	eax, 0x14
  1e:	89 04 24             	mov	dword ptr [rsp], eax
  21:	8b 04 24             	mov	eax, dword ptr [rsp]
  24:	8b 4c 24 04          	mov	ecx, dword ptr [rsp + 4]
  28:	01 c1                	add	ecx, eax
  2a:	48 89 c8             	mov	rax, rcx
  2d:	48 83 c4 08          	add	rsp, 8
  31:	5d                   	pop	rbp
  32:	c3                   	ret
```

```wat
(module
  (export "main" (func $main))

  (func $main (param i32) (param i32) (result i32)
        (local.get 0)
        (local.get 1)
        i32.add
        ))
```

```asm
   0:	55                   	push	rbp
   1:	48 89 e5             	mov	rbp, rsp
   4:	48 83 ec 08          	sub	rsp, 8
   8:	89 7c 24 04          	mov	dword ptr [rsp + 4], edi
   c:	89 34 24             	mov	dword ptr [rsp], esi
   f:	8b 04 24             	mov	eax, dword ptr [rsp]
  12:	8b 4c 24 04          	mov	ecx, dword ptr [rsp + 4]
  16:	01 c1                	add	ecx, eax
  18:	48 89 c8             	mov	rax, rcx
  1b:	48 83 c4 08          	add	rsp, 8
  1f:	5d                   	pop	rbp
  20:	c3                   	ret
```
This commit is contained in:
Saúl Cabrera
2023-01-18 06:58:13 -05:00
committed by GitHub
parent 1e6c13d83e
commit 94b51cdb17
22 changed files with 533 additions and 383 deletions

3
Cargo.lock generated
View File

@@ -3783,6 +3783,7 @@ name = "wasmtime-winch"
version = "6.0.0" version = "6.0.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"cranelift-codegen",
"object", "object",
"target-lexicon", "target-lexicon",
"wasmtime-environ", "wasmtime-environ",
@@ -3951,7 +3952,9 @@ name = "winch-tools"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"capstone",
"clap 3.2.8", "clap 3.2.8",
"cranelift-codegen",
"target-lexicon", "target-lexicon",
"wasmparser", "wasmparser",
"wasmtime-environ", "wasmtime-environ",

View File

@@ -176,6 +176,7 @@ clap = { version = "3.2.0", features = ["color", "suggestions", "derive"] }
hashbrown = "0.12" hashbrown = "0.12"
cap-std = "1.0.0" cap-std = "1.0.0"
cap-rand = "1.0.0" cap-rand = "1.0.0"
capstone = "0.9.0"
once_cell = "1.12.0" once_cell = "1.12.0"
smallvec = { version = "1.6.1", features = ["union"] } smallvec = { version = "1.6.1", features = ["union"] }
io-lifetimes = { version = "1.0.0", default-features = false } io-lifetimes = { version = "1.0.0", default-features = false }

View File

@@ -36,7 +36,7 @@ cranelift = { workspace = true }
filecheck = "0.5.0" filecheck = "0.5.0"
log = { workspace = true } log = { workspace = true }
termcolor = "1.1.2" termcolor = "1.1.2"
capstone = { version = "0.9.0", optional = true } capstone = { workspace = true, optional = true }
wat = { workspace = true, optional = true } wat = { workspace = true, optional = true }
target-lexicon = { workspace = true, features = ["std"] } target-lexicon = { workspace = true, features = ["std"] }
pretty_env_logger = "0.4.0" pretty_env_logger = "0.4.0"

View File

@@ -13,6 +13,7 @@ target-lexicon = { workspace = true }
wasmtime-environ = { workspace = true } wasmtime-environ = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
object = { workspace = true } object = { workspace = true }
cranelift-codegen = { workspace = true }
[features] [features]
default = ["all-arch", "component-model"] default = ["all-arch", "component-model"]

View File

@@ -1,17 +1,30 @@
use crate::compiler::Compiler; use crate::compiler::Compiler;
use anyhow::Result; use anyhow::Result;
use cranelift_codegen::settings;
use std::sync::Arc; use std::sync::Arc;
use target_lexicon::Triple; use target_lexicon::Triple;
use wasmtime_environ::{CompilerBuilder, Setting}; use wasmtime_environ::{CompilerBuilder, Setting};
use winch_codegen::isa; use winch_codegen::isa;
/// Compiler builder.
struct Builder { struct Builder {
/// Target triple.
triple: Triple, triple: Triple,
/// Shared flags builder.
shared_flags: settings::Builder,
/// ISA builder.
isa_builder: isa::Builder,
} }
pub fn builder() -> Box<dyn CompilerBuilder> { pub fn builder() -> Box<dyn CompilerBuilder> {
let triple = Triple::host();
Box::new(Builder { Box::new(Builder {
triple: Triple::host(), triple: triple.clone(),
shared_flags: settings::builder(),
// TODO:
// Either refactor and re-use `cranelift-native::builder()` or come up with a similar
// mechanism to lookup the host's architecture ISA and infer native flags.
isa_builder: isa::lookup(triple).expect("host architecture is not supported"),
}) })
} }
@@ -38,8 +51,10 @@ impl CompilerBuilder for Builder {
} }
fn build(&self) -> Result<Box<dyn wasmtime_environ::Compiler>> { fn build(&self) -> Result<Box<dyn wasmtime_environ::Compiler>> {
let isa = isa::lookup(self.triple.clone())?; let flags = settings::Flags::new(self.shared_flags.clone());
Ok(Box::new(Compiler::new(isa))) Ok(Box::new(Compiler::new(
self.isa_builder.clone().build(flags)?,
)))
} }
fn enable_incremental_compilation( fn enable_incremental_compilation(

View File

@@ -5,7 +5,7 @@ use wasmtime_environ::{
CompileError, DefinedFuncIndex, FuncIndex, FunctionBodyData, FunctionLoc, ModuleTranslation, CompileError, DefinedFuncIndex, FuncIndex, FunctionBodyData, FunctionLoc, ModuleTranslation,
ModuleTypes, PrimaryMap, Tunables, WasmFunctionInfo, ModuleTypes, PrimaryMap, Tunables, WasmFunctionInfo,
}; };
use winch_codegen::isa::TargetIsa; use winch_codegen::TargetIsa;
pub(crate) struct Compiler { pub(crate) struct Compiler {
isa: Box<dyn TargetIsa>, isa: Box<dyn TargetIsa>,

View File

@@ -20,6 +20,8 @@ anyhow = { workspace = true }
wasmparser = { workspace = true } wasmparser = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
wat = { workspace = true } wat = { workspace = true }
cranelift-codegen = { workspace = true }
capstone = { workspace = true }
[features] [features]
default = ["all-arch"] default = ["all-arch"]

View File

@@ -4,7 +4,9 @@ use std::ops::{Add, BitAnd, Not, Sub};
use wasmparser::{FuncType, ValType}; use wasmparser::{FuncType, ValType};
pub(crate) mod addressing_mode; pub(crate) mod addressing_mode;
pub(crate) use addressing_mode::*;
pub(crate) mod local; pub(crate) mod local;
pub(crate) use local::*;
/// Trait implemented by a specific ISA and used to provide /// Trait implemented by a specific ISA and used to provide
/// information about alignment, parameter passing, usage of /// information about alignment, parameter passing, usage of

View File

@@ -13,7 +13,7 @@ pub(crate) struct CodeGenContext<'a, M>
where where
M: MacroAssembler, M: MacroAssembler,
{ {
pub masm: M, pub masm: &'a mut M,
pub stack: Stack, pub stack: Stack,
pub frame: &'a Frame, pub frame: &'a Frame,
} }
@@ -22,7 +22,7 @@ impl<'a, M> CodeGenContext<'a, M>
where where
M: MacroAssembler, M: MacroAssembler,
{ {
pub fn new(masm: M, stack: Stack, frame: &'a Frame) -> Self { pub fn new(masm: &'a mut M, stack: Stack, frame: &'a Frame) -> Self {
Self { masm, stack, frame } Self { masm, stack, frame }
} }
} }
@@ -63,13 +63,12 @@ where
&mut self, &mut self,
body: &mut BinaryReader<'a>, body: &mut BinaryReader<'a>,
validator: FuncValidator<ValidatorResources>, validator: FuncValidator<ValidatorResources>,
) -> Result<Vec<String>> { ) -> Result<()> {
self.emit_start() self.emit_start()
.and(self.emit_body(body, validator)) .and_then(|_| self.emit_body(body, validator))
.and(self.emit_end())?; .and_then(|_| self.emit_end())?;
let buf = self.context.masm.finalize();
let code = Vec::from(buf); Ok(())
Ok(code)
} }
// TODO stack checks // TODO stack checks

View File

@@ -1,4 +1,4 @@
use crate::abi::{align_to, local::LocalSlot, ty_size, ABIArg, ABISig, ABI}; use crate::abi::{align_to, ty_size, ABIArg, ABISig, LocalSlot, ABI};
use anyhow::Result; use anyhow::Result;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::ops::Range; use std::ops::Range;

View File

@@ -3,6 +3,7 @@ use crate::{
isa::reg::Reg, isa::reg::Reg,
masm::{MacroAssembler as Masm, OperandSize, RegImm}, masm::{MacroAssembler as Masm, OperandSize, RegImm},
}; };
use cranelift_codegen::{Final, MachBufferFinalized};
#[derive(Default)] #[derive(Default)]
pub(crate) struct MacroAssembler; pub(crate) struct MacroAssembler;
@@ -34,7 +35,7 @@ impl Masm for MacroAssembler {
0u32 0u32
} }
fn finalize(&mut self) -> &[String] { fn finalize(self) -> MachBufferFinalized<Final> {
todo!() todo!()
} }

View File

@@ -1,5 +1,8 @@
use crate::isa::TargetIsa; use crate::isa::{Builder, TargetIsa};
use anyhow::Result; use anyhow::Result;
use cranelift_codegen::{
isa::aarch64::settings as aarch64_settings, settings::Flags, Final, MachBufferFinalized,
};
use target_lexicon::Triple; use target_lexicon::Triple;
use wasmparser::{FuncType, FuncValidator, FunctionBody, ValidatorResources}; use wasmparser::{FuncType, FuncValidator, FunctionBody, ValidatorResources};
@@ -8,17 +11,38 @@ mod masm;
mod regs; mod regs;
/// Create an ISA from the given triple. /// Create an ISA from the given triple.
pub(crate) fn isa_from(triple: Triple) -> Aarch64 { pub(crate) fn isa_builder(triple: Triple) -> Builder {
Aarch64::new(triple) Builder {
triple,
settings: aarch64_settings::builder(),
constructor: |triple, shared_flags, settings| {
let isa_flags = aarch64_settings::Flags::new(&shared_flags, settings);
let isa = Aarch64::new(triple, shared_flags, isa_flags);
Ok(Box::new(isa))
},
}
} }
/// Aarch64 ISA.
// Until Aarch64 emission is supported.
#[allow(dead_code)]
pub(crate) struct Aarch64 { pub(crate) struct Aarch64 {
/// The target triple.
triple: Triple, triple: Triple,
/// ISA specific flags.
isa_flags: aarch64_settings::Flags,
/// Shared flags.
shared_flags: Flags,
} }
impl Aarch64 { impl Aarch64 {
pub fn new(triple: Triple) -> Self { /// Create a Aarch64 ISA.
Self { triple } pub fn new(triple: Triple, shared_flags: Flags, isa_flags: aarch64_settings::Flags) -> Self {
Self {
isa_flags,
shared_flags,
triple,
}
} }
} }
@@ -36,7 +60,7 @@ impl TargetIsa for Aarch64 {
_sig: &FuncType, _sig: &FuncType,
_body: &FunctionBody, _body: &FunctionBody,
mut _validator: FuncValidator<ValidatorResources>, mut _validator: FuncValidator<ValidatorResources>,
) -> Result<Vec<String>> { ) -> Result<MachBufferFinalized<Final>> {
todo!() todo!()
} }
} }

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use core::fmt::Formatter; use core::fmt::Formatter;
use cranelift_codegen::isa::CallConv; use cranelift_codegen::{isa::CallConv, settings, Final, MachBufferFinalized};
use std::{ use std::{
error, error,
fmt::{self, Debug, Display}, fmt::{self, Debug, Display},
@@ -16,11 +16,11 @@ pub(crate) mod aarch64;
pub(crate) mod reg; pub(crate) mod reg;
macro_rules! isa { macro_rules! isa_builder {
($name: ident, $cfg_terms: tt, $triple: ident) => {{ ($name: ident, $cfg_terms: tt, $triple: ident) => {{
#[cfg $cfg_terms] #[cfg $cfg_terms]
{ {
Ok(Box::new($name::isa_from($triple))) Ok($name::isa_builder($triple))
} }
#[cfg(not $cfg_terms)] #[cfg(not $cfg_terms)]
{ {
@@ -29,23 +29,33 @@ macro_rules! isa {
}}; }};
} }
/// Look for an ISA for the given target triple. /// The target ISA builder.
// #[derive(Clone)]
// The ISA, as it's currently implemented in Cranelift pub struct Builder {
// needs a builder since it adds settings /// The target triple.
// depending on those available in the host architecture. triple: Triple,
// I'm intentionally skipping the builder for now. /// The ISA settings builder.
// The lookup method will return the ISA directly. settings: settings::Builder,
// /// The Target ISA constructor.
// Once features like SIMD are supported, returning a builder constructor: fn(Triple, settings::Flags, settings::Builder) -> Result<Box<dyn TargetIsa>>,
// will make more sense. }
pub fn lookup(triple: Triple) -> Result<Box<dyn TargetIsa>> {
impl Builder {
/// Create a TargetIsa by combining ISA-specific settings with the provided
/// shared flags.
pub fn build(self, shared_flags: settings::Flags) -> Result<Box<dyn TargetIsa>> {
(self.constructor)(self.triple, shared_flags, self.settings)
}
}
/// Look for an ISA builder for the given target triple.
pub fn lookup(triple: Triple) -> Result<Builder> {
match triple.architecture { match triple.architecture {
Architecture::X86_64 => { Architecture::X86_64 => {
isa!(x64, (feature = "x64"), triple) isa_builder!(x64, (feature = "x64"), triple)
} }
Architecture::Aarch64 { .. } => { Architecture::Aarch64 { .. } => {
isa!(aarch64, (feature = "arm64"), triple) isa_builder!(aarch64, (feature = "arm64"), triple)
} }
_ => Err(anyhow!(LookupError::Unsupported)), _ => Err(anyhow!(LookupError::Unsupported)),
@@ -87,7 +97,7 @@ pub trait TargetIsa: Send + Sync {
sig: &FuncType, sig: &FuncType,
body: &FunctionBody, body: &FunctionBody,
validator: FuncValidator<ValidatorResources>, validator: FuncValidator<ValidatorResources>,
) -> Result<Vec<String>>; ) -> Result<MachBufferFinalized<Final>>;
/// Get the default calling convention of the underlying target triple. /// Get the default calling convention of the underlying target triple.
fn call_conv(&self) -> CallConv { fn call_conv(&self) -> CallConv {

View File

@@ -36,6 +36,17 @@ impl Reg {
pub fn hw_enc(self) -> u8 { pub fn hw_enc(self) -> u8 {
self.0.hw_enc() as u8 self.0.hw_enc() as u8
} }
/// Get the physical register representation.
pub(super) fn inner(&self) -> PReg {
self.0
}
}
impl From<Reg> for cranelift_codegen::Reg {
fn from(reg: Reg) -> Self {
reg.inner().into()
}
} }
impl std::fmt::Debug for Reg { impl std::fmt::Debug for Reg {

View File

@@ -0,0 +1,262 @@
//! Assembler library implementation for x64.
use crate::{abi::Address, isa::reg::Reg, masm::OperandSize};
use cranelift_codegen::{
isa::x64::{
args::{
self, AluRmiROpcode, Amode, ExtMode, FromWritableReg, Gpr, GprMem, GprMemImm, RegMem,
RegMemImm, SyntheticAmode, WritableGpr,
},
settings as x64_settings, EmitInfo, EmitState, Inst,
},
settings, Final, MachBuffer, MachBufferFinalized, MachInstEmit, Writable,
};
/// A x64 instruction operand.
#[derive(Debug, Copy, Clone)]
pub(crate) enum Operand {
/// Register.
Reg(Reg),
/// Memory address.
Mem(Address),
/// Immediate.
Imm(i32),
}
// Conversions between winch-codegen x64 types and cranelift-codegen x64 types.
impl From<Reg> for RegMemImm {
fn from(reg: Reg) -> Self {
RegMemImm::reg(reg.into())
}
}
impl From<OperandSize> for args::OperandSize {
fn from(size: OperandSize) -> Self {
match size {
OperandSize::S32 => Self::Size32,
OperandSize::S64 => Self::Size64,
}
}
}
/// Low level assembler implementation for x64.
pub(crate) struct Assembler {
/// The machine instruction buffer.
buffer: MachBuffer<Inst>,
/// Constant emission information.
emit_info: EmitInfo,
/// Emission state.
emit_state: EmitState,
}
impl Assembler {
/// Create a new x64 assembler.
pub fn new(shared_flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
Self {
buffer: MachBuffer::<Inst>::new(),
emit_state: Default::default(),
emit_info: EmitInfo::new(shared_flags, isa_flags),
}
}
/// Return the emitted code.
pub fn finalize(self) -> MachBufferFinalized<Final> {
let stencil = self.buffer.finish();
stencil.apply_base_srcloc(Default::default())
}
fn emit(&mut self, inst: Inst) {
inst.emit(&[], &mut self.buffer, &self.emit_info, &mut self.emit_state);
}
/// Push register.
pub fn push_r(&mut self, reg: Reg) {
let src = GprMemImm::new(reg.into()).expect("valid gpr");
self.emit(Inst::Push64 { src });
}
/// Pop to register.
pub fn pop_r(&mut self, dst: Reg) {
let writable = Writable::from_reg(dst.into());
let dst = WritableGpr::from_writable_reg(writable).expect("valid writable gpr");
self.emit(Inst::Pop64 { dst });
}
/// Return instruction.
pub fn ret(&mut self) {
self.emit(Inst::Ret { rets: vec![] });
}
/// Move instruction variants.
pub fn mov(&mut self, src: Operand, dst: Operand, size: OperandSize) {
use self::Operand::*;
match &(src, dst) {
(Reg(lhs), Reg(rhs)) => self.mov_rr(*lhs, *rhs, size),
(Reg(lhs), Mem(addr)) => match addr {
Address::Base { base, imm } => self.mov_rm(*lhs, *base, *imm, size),
},
(Imm(imm), Mem(addr)) => match addr {
Address::Base { base, imm: disp } => self.mov_im(*imm as u64, *base, *disp, size),
},
(Imm(imm), Reg(reg)) => self.mov_ir(*imm as u64, *reg, size),
(Mem(addr), Reg(reg)) => match addr {
Address::Base { base, imm } => self.mov_mr(*base, *imm, *reg, size),
},
_ => panic!(
"Invalid operand combination for mov; src={:?}, dst={:?}",
src, dst
),
}
}
/// Register-to-register move.
pub fn mov_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
let src = Gpr::new(src.into()).expect("valid gpr");
let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
self.emit(Inst::MovRR {
src,
dst,
size: size.into(),
});
}
/// Register-to-memory move.
pub fn mov_rm(&mut self, src: Reg, base: Reg, disp: u32, size: OperandSize) {
let src = Gpr::new(src.into()).expect("valid gpr");
let dst = Amode::imm_reg(disp, base.into());
self.emit(Inst::MovRM {
size: size.into(),
src,
dst: SyntheticAmode::real(dst),
});
}
/// Immediate-to-memory move.
pub fn mov_im(&mut self, src: u64, base: Reg, disp: u32, size: OperandSize) {
let dst = Amode::imm_reg(disp, base.into());
self.emit(Inst::MovImmM {
size: size.into(),
simm64: src,
dst: SyntheticAmode::real(dst),
});
}
/// Immediate-to-register move.
pub fn mov_ir(&mut self, imm: u64, dst: Reg, size: OperandSize) {
let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
self.emit(Inst::Imm {
dst_size: size.into(),
simm64: imm,
dst,
});
}
/// Memory-to-register load.
pub fn mov_mr(&mut self, base: Reg, disp: u32, dst: Reg, size: OperandSize) {
use OperandSize::S64;
let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
let amode = Amode::imm_reg(disp, base.into());
let src = SyntheticAmode::real(amode);
if size == S64 {
self.emit(Inst::Mov64MR { src, dst });
} else {
let reg_mem = RegMem::mem(src);
self.emit(Inst::MovzxRmR {
ext_mode: ExtMode::LQ,
src: GprMem::new(reg_mem).expect("valid memory address"),
dst,
});
}
}
/// Subtact immediate register.
pub fn sub_ir(&mut self, imm: u32, dst: Reg, size: OperandSize) {
let writable = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
let src = Gpr::new(dst.into()).expect("valid gpr");
let imm = RegMemImm::imm(imm);
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Sub,
src1: src,
src2: GprMemImm::new(imm).expect("valid immediate"),
dst: writable,
});
}
/// Add instruction variants.
pub fn add(&mut self, src: Operand, dst: Operand, size: OperandSize) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => self.add_ir(*imm, *dst, size),
(Operand::Reg(src), Operand::Reg(dst)) => self.add_rr(*src, *dst, size),
_ => panic!(
"Invalid operand combination for add; src = {:?} dst = {:?}",
src, dst
),
}
}
/// Add immediate and register.
pub fn add_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) {
let writable = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
let src = Gpr::new(dst.into()).expect("valid gpr");
let imm = RegMemImm::imm(imm as u32);
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Add,
src1: src,
src2: GprMemImm::new(imm).expect("valid immediate"),
dst: writable,
});
}
/// Add register and register.
pub fn add_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
let dest = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
let src1 = Gpr::new(dst.into()).expect("valid gpr");
let src2 = RegMemImm::reg(src.into());
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Add,
src1,
src2: GprMemImm::new(src2).expect("valid gpr"),
dst: dest,
});
}
/// Logical exclusive or with registers.
pub fn xor_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
let dest = WritableGpr::from_writable_reg(Writable::from_reg(dst.into()))
.expect("valid writable gpr");
let src1 = Gpr::new(dst.into()).expect("valid gpr");
let src2 = RegMemImm::reg(src.into());
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Xor,
src1,
src2: GprMemImm::new(src2).expect("valid gpr"),
dst: dest,
});
}
}

View File

@@ -1,21 +1,51 @@
use super::regs::{rbp, reg_name, rsp}; use super::{
use crate::abi::addressing_mode::Address; asm::{Assembler, Operand},
use crate::abi::local::LocalSlot; regs::{rbp, rsp},
};
use crate::abi::{Address, LocalSlot};
use crate::isa::reg::Reg; use crate::isa::reg::Reg;
use crate::masm::{MacroAssembler as Masm, OperandSize, RegImm}; use crate::masm::{MacroAssembler as Masm, OperandSize, RegImm};
use cranelift_codegen::{isa::x64::settings as x64_settings, settings, Final, MachBufferFinalized};
/// x64 MacroAssembler.
pub(crate) struct MacroAssembler { pub(crate) struct MacroAssembler {
/// Stack pointer offset.
sp_offset: u32, sp_offset: u32,
/// Low level assembler.
asm: Assembler, asm: Assembler,
} }
// Conversions between generic masm arguments and x64 operands.
impl From<RegImm> for Operand {
fn from(rimm: RegImm) -> Self {
match rimm {
RegImm::Reg(r) => r.into(),
RegImm::Imm(imm) => Operand::Imm(imm),
}
}
}
impl From<Reg> for Operand {
fn from(reg: Reg) -> Self {
Operand::Reg(reg)
}
}
impl From<Address> for Operand {
fn from(addr: Address) -> Self {
Operand::Mem(addr)
}
}
impl Masm for MacroAssembler { impl Masm for MacroAssembler {
fn prologue(&mut self) { fn prologue(&mut self) {
let frame_pointer = rbp(); let frame_pointer = rbp();
let stack_pointer = rsp(); let stack_pointer = rsp();
self.asm.push_r(frame_pointer); self.asm.push_r(frame_pointer);
self.asm.mov_rr(stack_pointer, frame_pointer); self.asm
.mov_rr(stack_pointer, frame_pointer, OperandSize::S64);
} }
fn push(&mut self, reg: Reg) -> u32 { fn push(&mut self, reg: Reg) -> u32 {
@@ -33,7 +63,7 @@ impl Masm for MacroAssembler {
return; return;
} }
self.asm.sub_ir(bytes, rsp()); self.asm.sub_ir(bytes, rsp(), OperandSize::S64);
self.increment_sp(bytes); self.increment_sp(bytes);
} }
@@ -56,24 +86,13 @@ impl Masm for MacroAssembler {
let src: Operand = src.into(); let src: Operand = src.into();
let dst: Operand = dst.into(); let dst: Operand = dst.into();
match size { self.asm.mov(src, dst, size);
OperandSize::S32 => {
self.asm.movl(src, dst);
}
OperandSize::S64 => {
self.asm.mov(src, dst);
}
}
} }
fn load(&mut self, src: Address, dst: Reg, size: OperandSize) { fn load(&mut self, src: Address, dst: Reg, size: OperandSize) {
let src = src.into(); let src = src.into();
let dst = dst.into(); let dst = dst.into();
self.asm.mov(src, dst, size);
match size {
OperandSize::S32 => self.asm.movl(src, dst),
OperandSize::S64 => self.asm.mov(src, dst),
}
} }
fn sp_offset(&mut self) -> u32 { fn sp_offset(&mut self) -> u32 {
@@ -81,21 +100,14 @@ impl Masm for MacroAssembler {
} }
fn zero(&mut self, reg: Reg) { fn zero(&mut self, reg: Reg) {
self.asm.xorl_rr(reg, reg); self.asm.xor_rr(reg, reg, OperandSize::S32);
} }
fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) { fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) {
let src: Operand = src.into(); let src: Operand = src.into();
let dst: Operand = dst.into(); let dst: Operand = dst.into();
match size { self.asm.mov(src, dst, size);
OperandSize::S32 => {
self.asm.movl(src, dst);
}
OperandSize::S64 => {
self.asm.mov(src, dst);
}
}
} }
fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) {
@@ -108,36 +120,29 @@ impl Masm for MacroAssembler {
); );
}; };
match size { self.asm.add(src, dst, size);
OperandSize::S32 => {
self.asm.addl(src, dst);
}
OperandSize::S64 => {
self.asm.add(src, dst);
}
}
} }
fn epilogue(&mut self, locals_size: u32) { fn epilogue(&mut self, locals_size: u32) {
let rsp = rsp(); let rsp = rsp();
if locals_size > 0 { if locals_size > 0 {
self.asm.add_ir(locals_size as i32, rsp); self.asm.add_ir(locals_size as i32, rsp, OperandSize::S64);
} }
self.asm.pop_r(rbp()); self.asm.pop_r(rbp());
self.asm.ret(); self.asm.ret();
} }
fn finalize(&mut self) -> &[String] { fn finalize(self) -> MachBufferFinalized<Final> {
self.asm.finalize() self.asm.finalize()
} }
} }
impl MacroAssembler { impl MacroAssembler {
/// Crate a x64 MacroAssembler /// Create an x64 MacroAssembler.
pub fn new() -> Self { pub fn new(shared_flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
Self { Self {
sp_offset: 0, sp_offset: 0,
asm: Default::default(), asm: Assembler::new(shared_flags, isa_flags),
} }
} }
@@ -156,285 +161,3 @@ impl MacroAssembler {
self.sp_offset -= bytes; self.sp_offset -= bytes;
} }
} }
/// A x64 instruction operand.
#[derive(Debug, Copy, Clone)]
enum Operand {
Reg(Reg),
Mem(Address),
Imm(i32),
}
/// Low level assembler implementation for x64
/// All instructions denote a 64 bit size, unless
/// otherwise specified by the corresponding function
/// name suffix.
// NOTE
// This is an interim, debug approach; the long term idea
// is to make each ISA assembler available through
// `cranelift_asm`. The literal representation of the
// instructions use intel syntax for easier manual verification.
// This shouldn't be an issue, once we plug in Cranelift's backend
// we are going to be able to properly disassemble.
#[derive(Default)]
struct Assembler {
buffer: Vec<String>,
}
impl Assembler {
pub fn push_r(&mut self, reg: Reg) {
self.buffer.push(format!("push {}", reg_name(reg, 8)));
}
pub fn pop_r(&mut self, reg: Reg) {
self.buffer.push(format!("pop {}", reg_name(reg, 8)));
}
pub fn ret(&mut self) {
self.buffer.push("ret".into());
}
pub fn mov(&mut self, src: Operand, dst: Operand) {
// r, r
// r, m (displacement)
// r, m (displace,ent, index)
// i, r
// i, m (displacement)
// i, m (displacement, index)
// load combinations
match &(src, dst) {
(Operand::Reg(lhs), Operand::Reg(rhs)) => self.mov_rr(*lhs, *rhs),
(Operand::Reg(r), Operand::Mem(addr)) => match addr {
Address::Base { base, imm } => self.mov_rm(*r, *base, *imm),
},
(Operand::Imm(op), Operand::Mem(addr)) => match addr {
Address::Base { base, imm } => self.mov_im(*op, *base, *imm),
},
(Operand::Imm(imm), Operand::Reg(reg)) => self.mov_ir(*imm, *reg),
(Operand::Mem(addr), Operand::Reg(reg)) => match addr {
Address::Base { base, imm } => self.mov_mr(*base, *imm, *reg),
},
_ => panic!(
"Invalid operand combination for mov; src = {:?}; dst = {:?}",
src, dst
),
}
}
pub fn mov_rr(&mut self, src: Reg, dst: Reg) {
let dst = reg_name(dst, 8);
let src = reg_name(src, 8);
self.buffer.push(format!("mov {}, {}", dst, src));
}
pub fn mov_rm(&mut self, src: Reg, base: Reg, disp: u32) {
let src = reg_name(src, 8);
let dst = reg_name(base, 8);
let addr = if disp == 0 {
format!("[{}]", dst)
} else {
format!("[{} + {}]", dst, disp)
};
self.buffer.push(format!("mov {}, {}", addr, src));
}
pub fn mov_im(&mut self, imm: i32, base: Reg, disp: u32) {
let reg = reg_name(base, 8);
let addr = if disp == 0 {
format!("[{}]", reg)
} else {
format!("[{} + {}]", reg, disp)
};
self.buffer.push(format!("mov qword {}, {}", addr, imm));
}
pub fn mov_ir(&mut self, imm: i32, dst: Reg) {
let reg = reg_name(dst, 8);
self.buffer.push(format!("mov {}, {}", reg, imm));
}
pub fn mov_mr(&mut self, base: Reg, disp: u32, dst: Reg) {
let base = reg_name(base, 8);
let dst = reg_name(dst, 8);
let addr = if disp == 0 {
format!("[{}]", base)
} else {
format!("[{} + {}]", base, disp)
};
self.buffer.push(format!("mov {}, {}", dst, addr));
}
pub fn movl(&mut self, src: Operand, dst: Operand) {
// r, r
// r, m (displacement)
// r, m (displace,ent, index)
// i, r
// i, m (displacement)
// i, m (displacement, index)
// load combinations
match &(src, dst) {
(Operand::Reg(lhs), Operand::Reg(rhs)) => self.movl_rr(*lhs, *rhs),
(Operand::Reg(r), Operand::Mem(addr)) => match addr {
Address::Base { base, imm } => self.movl_rm(*r, *base, *imm),
},
(Operand::Imm(op), Operand::Mem(addr)) => match addr {
Address::Base { base, imm } => self.movl_im(*op, *base, *imm),
},
(Operand::Imm(imm), Operand::Reg(reg)) => self.movl_ir(*imm, *reg),
(Operand::Mem(addr), Operand::Reg(reg)) => match addr {
Address::Base { base, imm } => self.movl_mr(*base, *imm, *reg),
},
_ => panic!(
"Invalid operand combination for movl; src = {:?}; dst = {:?}",
src, dst
),
}
}
pub fn movl_rr(&mut self, src: Reg, dst: Reg) {
let dst = reg_name(dst, 4);
let src = reg_name(src, 4);
self.buffer.push(format!("mov {}, {}", dst, src));
}
pub fn movl_rm(&mut self, src: Reg, base: Reg, disp: u32) {
let src = reg_name(src, 4);
let dst = reg_name(base, 8);
let addr = if disp == 0 {
format!("[{}]", dst)
} else {
format!("[{} + {}]", dst, disp)
};
self.buffer.push(format!("mov {}, {}", addr, src));
}
pub fn movl_im(&mut self, imm: i32, base: Reg, disp: u32) {
let reg = reg_name(base, 8);
let addr = if disp == 0 {
format!("[{}]", reg)
} else {
format!("[{} + {}]", reg, disp)
};
self.buffer.push(format!("mov dword {}, {}", addr, imm));
}
pub fn movl_ir(&mut self, imm: i32, dst: Reg) {
let reg = reg_name(dst, 4);
self.buffer.push(format!("mov {}, {}", reg, imm));
}
pub fn movl_mr(&mut self, base: Reg, disp: u32, dst: Reg) {
let base = reg_name(base, 8);
let dst = reg_name(dst, 4);
let addr = if disp == 0 {
format!("[{}]", base)
} else {
format!("[{} + {}]", base, disp)
};
self.buffer.push(format!("mov {}, {}", dst, addr));
}
pub fn sub_ir(&mut self, imm: u32, dst: Reg) {
let dst = reg_name(dst, 8);
self.buffer.push(format!("sub {}, {}", dst, imm));
}
pub fn add(&mut self, src: Operand, dst: Operand) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => self.add_ir(*imm, *dst),
(Operand::Reg(src), Operand::Reg(dst)) => self.add_rr(*src, *dst),
_ => panic!(
"Invalid operand combination for add; src = {:?} dst = {:?}",
src, dst
),
}
}
pub fn add_ir(&mut self, imm: i32, dst: Reg) {
let dst = reg_name(dst, 8);
self.buffer.push(format!("add {}, {}", dst, imm));
}
pub fn add_rr(&mut self, src: Reg, dst: Reg) {
let src = reg_name(src, 8);
let dst = reg_name(dst, 8);
self.buffer.push(format!("add {}, {}", dst, src));
}
pub fn addl(&mut self, src: Operand, dst: Operand) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => self.addl_ir(*imm, *dst),
(Operand::Reg(src), Operand::Reg(dst)) => self.addl_rr(*src, *dst),
_ => panic!(
"Invalid operand combination for add; src = {:?} dst = {:?}",
src, dst
),
}
}
pub fn addl_ir(&mut self, imm: i32, dst: Reg) {
let dst = reg_name(dst, 4);
self.buffer.push(format!("add {}, {}", dst, imm));
}
pub fn addl_rr(&mut self, src: Reg, dst: Reg) {
let src = reg_name(src, 4);
let dst = reg_name(dst, 4);
self.buffer.push(format!("add {}, {}", dst, src));
}
pub fn xorl_rr(&mut self, src: Reg, dst: Reg) {
let src = reg_name(src, 4);
let dst = reg_name(dst, 4);
self.buffer.push(format!("xor {}, {}", dst, src));
}
/// Return the emitted code
pub fn finalize(&mut self) -> &[String] {
&self.buffer
}
}
impl From<RegImm> for Operand {
fn from(rimm: RegImm) -> Self {
match rimm {
RegImm::Reg(r) => r.into(),
RegImm::Imm(imm) => Operand::Imm(imm),
}
}
}
impl From<Reg> for Operand {
fn from(reg: Reg) -> Self {
Operand::Reg(reg)
}
}
impl From<Address> for Operand {
fn from(addr: Address) -> Self {
Operand::Mem(addr)
}
}

View File

@@ -1,17 +1,25 @@
use crate::abi::ABI; use crate::abi::ABI;
use crate::codegen::{CodeGen, CodeGenContext}; use crate::codegen::{CodeGen, CodeGenContext};
use crate::frame::Frame; use crate::frame::Frame;
use crate::isa::x64::masm::MacroAssembler; use crate::isa::x64::masm::MacroAssembler as X64Masm;
use crate::masm::MacroAssembler;
use crate::regalloc::RegAlloc; use crate::regalloc::RegAlloc;
use crate::stack::Stack; use crate::stack::Stack;
use crate::{isa::TargetIsa, regset::RegSet}; use crate::{
isa::{Builder, TargetIsa},
regset::RegSet,
};
use anyhow::Result; use anyhow::Result;
use cranelift_codegen::{
isa::x64::settings as x64_settings, settings::Flags, Final, MachBufferFinalized,
};
use target_lexicon::Triple; use target_lexicon::Triple;
use wasmparser::{FuncType, FuncValidator, FunctionBody, ValidatorResources}; use wasmparser::{FuncType, FuncValidator, FunctionBody, ValidatorResources};
use self::regs::ALL_GPR; use self::regs::ALL_GPR;
mod abi; mod abi;
mod asm;
mod masm; mod masm;
// Not all the fpr and gpr constructors are used at the moment; // Not all the fpr and gpr constructors are used at the moment;
// in that sense, this directive is a temporary measure to avoid // in that sense, this directive is a temporary measure to avoid
@@ -19,18 +27,39 @@ mod masm;
#[allow(dead_code)] #[allow(dead_code)]
mod regs; mod regs;
/// Create an ISA from the given triple. /// Create an ISA builder.
pub(crate) fn isa_from(triple: Triple) -> X64 { pub(crate) fn isa_builder(triple: Triple) -> Builder {
X64::new(triple) Builder {
triple,
settings: x64_settings::builder(),
constructor: |triple, shared_flags, settings| {
// TODO: Once enabling/disabling flags is allowed, and once features like SIMD are supported
// ensure compatibility between shared flags and ISA flags.
let isa_flags = x64_settings::Flags::new(&shared_flags, settings);
let isa = X64::new(triple, shared_flags, isa_flags);
Ok(Box::new(isa))
},
}
} }
/// x64 ISA.
pub(crate) struct X64 { pub(crate) struct X64 {
/// The target triple.
triple: Triple, triple: Triple,
/// ISA specific flags.
isa_flags: x64_settings::Flags,
/// Shared flags.
shared_flags: Flags,
} }
impl X64 { impl X64 {
pub fn new(triple: Triple) -> Self { /// Create a x64 ISA.
Self { triple } pub fn new(triple: Triple, shared_flags: Flags, isa_flags: x64_settings::Flags) -> Self {
Self {
isa_flags,
shared_flags,
triple,
}
} }
} }
@@ -43,24 +72,25 @@ impl TargetIsa for X64 {
&self.triple &self.triple
} }
// Temporarily returns a Vec<String>
fn compile_function( fn compile_function(
&self, &self,
sig: &FuncType, sig: &FuncType,
body: &FunctionBody, body: &FunctionBody,
mut validator: FuncValidator<ValidatorResources>, mut validator: FuncValidator<ValidatorResources>,
) -> Result<Vec<String>> { ) -> Result<MachBufferFinalized<Final>> {
let mut body = body.get_binary_reader(); let mut body = body.get_binary_reader();
let masm = MacroAssembler::new(); let mut masm = X64Masm::new(self.shared_flags.clone(), self.isa_flags.clone());
let stack = Stack::new(); let stack = Stack::new();
let abi = abi::X64ABI::default(); let abi = abi::X64ABI::default();
let abi_sig = abi.sig(sig); let abi_sig = abi.sig(sig);
let frame = Frame::new(&abi_sig, &mut body, &mut validator, &abi)?; let frame = Frame::new(&abi_sig, &mut body, &mut validator, &abi)?;
// TODO Add in floating point bitmask // TODO Add in floating point bitmask
let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), regs::scratch()); let regalloc = RegAlloc::new(RegSet::new(ALL_GPR, 0), regs::scratch());
let codegen_context = CodeGenContext::new(masm, stack, &frame); let codegen_context = CodeGenContext::new(&mut masm, stack, &frame);
let mut codegen = CodeGen::new::<abi::X64ABI>(codegen_context, abi_sig, regalloc); let mut codegen = CodeGen::new::<abi::X64ABI>(codegen_context, abi_sig, regalloc);
codegen.emit(&mut body, validator) codegen.emit(&mut body, validator)?;
Ok(masm.finalize())
} }
} }

View File

@@ -10,6 +10,7 @@ mod abi;
mod codegen; mod codegen;
mod frame; mod frame;
pub mod isa; pub mod isa;
pub use isa::*;
mod masm; mod masm;
mod regalloc; mod regalloc;
mod regset; mod regset;

View File

@@ -1,11 +1,12 @@
use crate::abi::align_to; use crate::abi::align_to;
use crate::abi::{addressing_mode::Address, local::LocalSlot}; use crate::abi::{Address, LocalSlot};
use crate::isa::reg::Reg; use crate::isa::reg::Reg;
use crate::regalloc::RegAlloc; use crate::regalloc::RegAlloc;
use cranelift_codegen::{Final, MachBufferFinalized};
use std::ops::Range; use std::ops::Range;
/// Operand size, in bits. /// Operand size, in bits.
#[derive(Copy, Clone)] #[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum OperandSize { pub(crate) enum OperandSize {
/// 32 bits. /// 32 bits.
S32, S32,
@@ -89,8 +90,7 @@ pub(crate) trait MacroAssembler {
fn push(&mut self, src: Reg) -> u32; fn push(&mut self, src: Reg) -> u32;
/// Finalize the assembly and return the result. /// Finalize the assembly and return the result.
// NOTE Interim, debug approach fn finalize(self) -> MachBufferFinalized<Final>;
fn finalize(&mut self) -> &[String];
/// Zero a particular register. /// Zero a particular register.
fn zero(&mut self, reg: Reg); fn zero(&mut self, reg: Reg);

View File

@@ -43,7 +43,7 @@ impl RegAlloc {
let dst = self.any_gpr(context); let dst = self.any_gpr(context);
let val = context.stack.pop().expect("a value at stack top"); let val = context.stack.pop().expect("a value at stack top");
Self::move_val_to_reg(val, dst, &mut context.masm, context.frame, size); Self::move_val_to_reg(val, dst, context.masm, context.frame, size);
dst dst
} }
@@ -63,7 +63,7 @@ impl RegAlloc {
let dst = self.gpr(context, named); let dst = self.gpr(context, named);
let val = context.stack.pop().expect("a value at stack top"); let val = context.stack.pop().expect("a value at stack top");
Self::move_val_to_reg(val, dst, &mut context.masm, context.frame, size); Self::move_val_to_reg(val, dst, context.masm, context.frame, size);
dst dst
} }

61
winch/src/disasm.rs Normal file
View File

@@ -0,0 +1,61 @@
//! Disassembly utilities.
use anyhow::{bail, Result};
use capstone::prelude::*;
use std::fmt::Write;
use target_lexicon::Architecture;
use winch_codegen::TargetIsa;
/// Disassemble and print a machine code buffer.
pub fn print(bytes: &[u8], isa: &dyn TargetIsa) -> Result<()> {
let dis = disassembler_for(isa)?;
let insts = dis.disasm_all(bytes, 0x0).unwrap();
for i in insts.iter() {
let mut line = String::new();
write!(&mut line, "{:4x}:\t", i.address()).unwrap();
let mut bytes_str = String::new();
let mut len = 0;
let mut first = true;
for b in i.bytes() {
if !first {
write!(&mut bytes_str, " ").unwrap();
}
write!(&mut bytes_str, "{:02x}", b).unwrap();
len += 1;
first = false;
}
write!(&mut line, "{:21}\t", bytes_str).unwrap();
if len > 8 {
write!(&mut line, "\n\t\t\t\t").unwrap();
}
if let Some(s) = i.mnemonic() {
write!(&mut line, "{}\t", s).unwrap();
}
if let Some(s) = i.op_str() {
write!(&mut line, "{}", s).unwrap();
}
println!("{}", line);
}
Ok(())
}
fn disassembler_for(isa: &dyn TargetIsa) -> Result<Capstone> {
let disasm = match isa.triple().architecture {
Architecture::X86_64 => Capstone::new()
.x86()
.mode(arch::x86::ArchMode::Mode64)
.build()
.map_err(|e| anyhow::format_err!("{}", e))?,
_ => bail!("Unsupported ISA"),
};
Ok(disasm)
}

View File

@@ -5,13 +5,16 @@
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use clap::Parser; use clap::Parser;
use cranelift_codegen::settings;
use std::{fs, path::PathBuf, str::FromStr}; use std::{fs, path::PathBuf, str::FromStr};
use target_lexicon::Triple; use target_lexicon::Triple;
use wasmtime_environ::{ use wasmtime_environ::{
wasmparser::{types::Types, Parser as WasmParser, Validator}, wasmparser::{types::Types, Parser as WasmParser, Validator},
DefinedFuncIndex, FunctionBodyData, Module, ModuleEnvironment, Tunables, DefinedFuncIndex, FunctionBodyData, Module, ModuleEnvironment, Tunables,
}; };
use winch_codegen::isa::{self, TargetIsa}; use winch_codegen::{lookup, TargetIsa};
mod disasm;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
struct Options { struct Options {
@@ -29,7 +32,9 @@ fn main() -> Result<()> {
.with_context(|| format!("Failed to read input file {}", opt.input.display()))?; .with_context(|| format!("Failed to read input file {}", opt.input.display()))?;
let bytes = wat::parse_bytes(&bytes)?; let bytes = wat::parse_bytes(&bytes)?;
let triple = Triple::from_str(&opt.target)?; let triple = Triple::from_str(&opt.target)?;
let isa = isa::lookup(triple)?; let shared_flags = settings::Flags::new(settings::builder());
let isa_builder = lookup(triple)?;
let isa = isa_builder.build(shared_flags)?;
let mut validator = Validator::new(); let mut validator = Validator::new();
let parser = WasmParser::new(0); let parser = WasmParser::new(0);
let mut types = Default::default(); let mut types = Default::default();
@@ -65,9 +70,8 @@ fn compile(
let buffer = isa let buffer = isa
.compile_function(&sig, &body, validator) .compile_function(&sig, &body, validator)
.expect("Couldn't compile function"); .expect("Couldn't compile function");
for i in buffer {
println!("{}", i); disasm::print(buffer.data(), isa)?;
}
Ok(()) Ok(())
} }