Files
wasmtime/winch/codegen/src/isa/aarch64/masm.rs
Kevin Rizzo 3a92aa3d0a winch: Initial integration with wasmtime (#6119)
* Adding in trampoline compiling method for ISA

* Adding support for indirect call to memory address

* Refactoring frame to externalize defined locals, so it removes WASM depedencies in trampoline case

* Adding initial version of trampoline for testing

* Refactoring trampoline to be re-used by other architectures

* Initial wiring for winch with wasmtime

* Add a Wasmtime CLI option to select `winch`

This is effectively an option to select the `Strategy` enumeration.

* Implement `Compiler::compile_function` for Winch

Hook this into the `TargetIsa::compile_function` hook as well. Currently
this doesn't take into account `Tunables`, but that's left as a TODO for
later.

* Filling out Winch append_code method

* Adding back in changes from previous branch

Most of these are a WIP. It's missing trampolines for x64, but a basic
one exists for aarch64. It's missing the handling of arguments that
exist on the stack.

It currently imports `cranelift_wasm::WasmFuncType` since it's what's
passed to the `Compiler` trait. It's a bit awkward to use in the
`winch_codegen` crate since it mostly operates on `wasmparser` types.
I've had to hack in a conversion to get things working. Long term, I'm
not sure it's wise to rely on this type but it seems like it's easier on
the Cranelift side when creating the stub IR.

* Small API changes to make integration easier

* Adding in new FuncEnv, only a stub for now

* Removing unneeded parts of the old PoC, and refactoring trampoline code

* Moving FuncEnv into a separate file

* More comments for trampolines

* Adding in winch integration tests for first pass

* Using new addressing method to fix stack pointer error

* Adding test for stack arguments

* Only run tests on x86 for now, it's more complete for winch

* Add in missing documentation after rebase

* Updating based on feedback in draft PR

* Fixing formatting on doc comment for argv register

* Running formatting

* Lock updates, and turning on winch feature flags during tests

* Updating configuration with comments to no longer gate Strategy enum

* Using the winch-environ FuncEnv, but it required changing the sig

* Proper comment formatting

* Removing wasmtime-winch from dev-dependencies, adding the winch feature makes this not necessary

* Update doc attr to include winch check

* Adding winch feature to doc generation, which seems to fix the feature error in CI

* Add the `component-model` feature to the cargo doc invocation in CI

To match the metadata used by the docs.rs invocation when building docs.

* Add a comment clarifying the usage of `component-model` for docs.rs

* Correctly order wasmtime-winch and winch-environ in the publish script

* Ensure x86 test dependencies are included in cfg(target_arch)

* Further constrain Winch tests to x86_64 _and_ unix

---------

Co-authored-by: Alex Crichton <alex@alexcrichton.com>
Co-authored-by: Saúl Cabrera <saulecabrera@gmail.com>
2023-04-05 00:32:40 +00:00

222 lines
5.7 KiB
Rust

use super::{
address::Address,
asm::{Assembler, Operand},
regs,
};
use crate::{
abi::local::LocalSlot,
codegen::CodeGenContext,
isa::reg::Reg,
masm::{CalleeKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind},
};
use cranelift_codegen::{settings, Final, MachBufferFinalized};
/// Aarch64 MacroAssembler.
pub(crate) struct MacroAssembler {
/// Low level assembler.
asm: Assembler,
/// Stack pointer offset.
sp_offset: u32,
}
// Conversions between generic masm arguments and aarch64 operands.
impl From<RegImm> for Operand {
fn from(rimm: RegImm) -> Self {
match rimm {
RegImm::Reg(r) => r.into(),
RegImm::Imm(imm) => Operand::Imm(imm),
}
}
}
impl From<Reg> for Operand {
fn from(reg: Reg) -> Self {
Operand::Reg(reg)
}
}
impl From<Address> for Operand {
fn from(addr: Address) -> Self {
Operand::Mem(addr)
}
}
impl MacroAssembler {
/// Create an Aarch64 MacroAssembler.
pub fn new(shared_flags: settings::Flags) -> Self {
Self {
asm: Assembler::new(shared_flags),
sp_offset: 0u32,
}
}
}
impl Masm for MacroAssembler {
type Address = Address;
fn prologue(&mut self) {
let lr = regs::lr();
let fp = regs::fp();
let sp = regs::sp();
let addr = Address::pre_indexed_from_sp(-16);
self.asm.stp(fp, lr, addr);
self.asm.mov_rr(sp, fp, OperandSize::S64);
self.move_sp_to_shadow_sp();
}
fn epilogue(&mut self, locals_size: u32) {
assert!(self.sp_offset == locals_size);
let sp = regs::sp();
if locals_size > 0 {
self.asm
.add_ir(locals_size as u64, sp, sp, OperandSize::S64);
self.move_sp_to_shadow_sp();
}
let lr = regs::lr();
let fp = regs::fp();
let addr = Address::post_indexed_from_sp(16);
self.asm.ldp(fp, lr, addr);
self.asm.ret();
}
fn reserve_stack(&mut self, bytes: u32) {
if bytes == 0 {
return;
}
let sp = regs::sp();
self.asm.sub_ir(bytes as u64, sp, sp, OperandSize::S64);
self.move_sp_to_shadow_sp();
self.increment_sp(bytes);
}
fn free_stack(&mut self, _bytes: u32) {
todo!()
}
fn local_address(&mut self, local: &LocalSlot) -> Address {
let (reg, offset) = local
.addressed_from_sp()
.then(|| {
let offset = self.sp_offset.checked_sub(local.offset).expect(&format!(
"Invalid local offset = {}; sp offset = {}",
local.offset, self.sp_offset
));
(regs::shadow_sp(), offset)
})
.unwrap_or((regs::fp(), local.offset));
Address::offset(reg, offset as i64)
}
fn address_from_sp(&self, _offset: u32) -> Self::Address {
todo!()
}
fn address_at_sp(&self, _offset: u32) -> Self::Address {
todo!()
}
fn store(&mut self, src: RegImm, dst: Address, size: OperandSize) {
let src = match src {
RegImm::Imm(imm) => {
let scratch = regs::scratch();
self.asm.load_constant(imm as u64, scratch);
scratch
}
RegImm::Reg(reg) => reg,
};
self.asm.str(src, dst, size);
}
fn call(&mut self, _callee: CalleeKind) {
todo!()
}
fn load(&mut self, src: Address, dst: Reg, size: OperandSize) {
self.asm.ldr(src, dst, size);
}
fn pop(&mut self, _dst: Reg) {
todo!()
}
fn sp_offset(&self) -> u32 {
self.sp_offset
}
fn finalize(self) -> MachBufferFinalized<Final> {
self.asm.finalize()
}
fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) {
self.asm.mov(src.into(), dst.into(), size);
}
fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) {
self.asm.add(rhs.into(), lhs.into(), dst.into(), size);
}
fn sub(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) {
todo!()
}
fn mul(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) {
todo!()
}
fn div(&mut self, _context: &mut CodeGenContext, _kind: DivKind, _size: OperandSize) {
todo!()
}
fn rem(&mut self, _context: &mut CodeGenContext, _kind: RemKind, _size: OperandSize) {
todo!()
}
fn zero(&mut self, reg: Reg) {
self.asm.load_constant(0, reg);
}
fn push(&mut self, reg: Reg) -> u32 {
// The push is counted as pushing the 64-bit width in
// 64-bit architectures.
let size = 8u32;
self.reserve_stack(size);
let address = Address::from_shadow_sp(size as i64);
self.asm.str(reg, address, OperandSize::S64);
self.sp_offset
}
fn address_from_reg(&self, reg: Reg, offset: u32) -> Self::Address {
Address::offset(reg, offset as i64)
}
}
impl MacroAssembler {
fn increment_sp(&mut self, bytes: u32) {
self.sp_offset += bytes;
}
// Copies the value of the stack pointer to the shadow stack
// pointer: mov x28, sp
// This function is usually called whenever the real stack pointer
// changes, for example after allocating or deallocating stack
// space, or after performing a push or pop.
// For more details around the stack pointer and shadow stack
// pointer see the docs at regs::shadow_sp().
fn move_sp_to_shadow_sp(&mut self) {
let sp = regs::sp();
let shadow_sp = regs::shadow_sp();
self.asm.mov_rr(sp, shadow_sp, OperandSize::S64);
}
}