Address review comments.
This commit is contained in:
@@ -73,9 +73,9 @@
|
||||
//! https://searchfox.org/mozilla-central/rev/bc3600def806859c31b2c7ac06e3d69271052a89/js/src/wasm/WasmStubs.h#134
|
||||
//!
|
||||
//! In brief:
|
||||
//! - Returns are processed in *reverse* order.
|
||||
//! - The first return in this order (so the last return) goes into the ordinary
|
||||
//! return register, X0.
|
||||
//! - Return values are processed in *reverse* order.
|
||||
//! - The first return value in this order (so the last return) goes into the
|
||||
//! ordinary return register, X0.
|
||||
//! - Any further returns go in a struct-return area, allocated upwards (in
|
||||
//! address order) during the reverse traversal.
|
||||
//! - This struct-return area is provided by the caller, and a pointer to its
|
||||
@@ -98,6 +98,7 @@ use crate::isa;
|
||||
use crate::isa::aarch64::{inst::*, lower::ty_bits};
|
||||
use crate::machinst::*;
|
||||
use crate::settings;
|
||||
use crate::{CodegenError, CodegenResult};
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
@@ -134,6 +135,11 @@ struct ABISig {
|
||||
call_conv: isa::CallConv,
|
||||
}
|
||||
|
||||
/// This is the limit for the size of argument and return-value areas on the
|
||||
/// stack. We place a reasonable limit here to avoid integer overflow issues
|
||||
/// with 32-bit arithmetic: for now, 128 MB.
|
||||
static STACK_ARG_RET_SIZE_LIMIT: u64 = 128 * 1024 * 1024;
|
||||
|
||||
// Spidermonkey specific ABI convention.
|
||||
|
||||
/// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||
@@ -208,14 +214,15 @@ enum ArgsOrRets {
|
||||
/// Process a list of parameters or return values and allocate them to X-regs,
|
||||
/// V-regs, and stack slots.
|
||||
///
|
||||
/// Returns the list of argument locations, and the stack-space used (rounded up
|
||||
/// to a 16-byte-aligned boundary).
|
||||
/// Returns the list of argument locations, the stack-space used (rounded up
|
||||
/// to a 16-byte-aligned boundary), and if `add_ret_area_ptr` was passed, the
|
||||
/// index of the extra synthetic arg that was added.
|
||||
fn compute_arg_locs(
|
||||
call_conv: isa::CallConv,
|
||||
params: &[ir::AbiParam],
|
||||
args_or_rets: ArgsOrRets,
|
||||
add_ret_area_ptr: bool,
|
||||
) -> (Vec<ABIArg>, i64) {
|
||||
) -> CodegenResult<(Vec<ABIArg>, i64, Option<usize>)> {
|
||||
let is_baldrdash = call_conv.extends_baldrdash();
|
||||
|
||||
// See AArch64 ABI (https://c9x.me/compile/bib/abi-arm64.pdf), sections 5.4.
|
||||
@@ -290,7 +297,7 @@ fn compute_arg_locs(
|
||||
ret.reverse();
|
||||
}
|
||||
|
||||
if add_ret_area_ptr {
|
||||
let extra_arg = if add_ret_area_ptr {
|
||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||
if next_xreg < max_reg_vals {
|
||||
ret.push(ABIArg::Reg(xreg(next_xreg).to_real_reg(), I64));
|
||||
@@ -298,35 +305,39 @@ fn compute_arg_locs(
|
||||
ret.push(ABIArg::Stack(next_stack as i64, I64));
|
||||
next_stack += 8;
|
||||
}
|
||||
}
|
||||
Some(ret.len() - 1)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
next_stack = (next_stack + 15) & !15;
|
||||
|
||||
(ret, next_stack as i64)
|
||||
// To avoid overflow issues, limit the arg/return size to something
|
||||
// reasonable -- here, 128 MB.
|
||||
if next_stack > STACK_ARG_RET_SIZE_LIMIT {
|
||||
return Err(CodegenError::ImplLimitExceeded);
|
||||
}
|
||||
|
||||
Ok((ret, next_stack as i64, extra_arg))
|
||||
}
|
||||
|
||||
impl ABISig {
|
||||
fn from_func_sig(sig: &ir::Signature) -> ABISig {
|
||||
fn from_func_sig(sig: &ir::Signature) -> CodegenResult<ABISig> {
|
||||
// Compute args and retvals from signature. Handle retvals first,
|
||||
// because we may need to add a return-area arg to the args.
|
||||
let (rets, stack_ret_space) = compute_arg_locs(
|
||||
let (rets, stack_ret_space, _) = compute_arg_locs(
|
||||
sig.call_conv,
|
||||
&sig.returns,
|
||||
ArgsOrRets::Rets,
|
||||
/* extra ret-area ptr = */ false,
|
||||
);
|
||||
)?;
|
||||
let need_stack_return_area = stack_ret_space > 0;
|
||||
let (args, stack_arg_space) = compute_arg_locs(
|
||||
let (args, stack_arg_space, stack_ret_arg) = compute_arg_locs(
|
||||
sig.call_conv,
|
||||
&sig.params,
|
||||
ArgsOrRets::Args,
|
||||
need_stack_return_area,
|
||||
);
|
||||
let stack_ret_arg = if need_stack_return_area {
|
||||
Some(args.len() - 1)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
)?;
|
||||
|
||||
trace!(
|
||||
"ABISig: sig {:?} => args = {:?} rets = {:?} arg stack = {} ret stack = {} stack_ret_arg = {:?}",
|
||||
@@ -338,14 +349,14 @@ impl ABISig {
|
||||
stack_ret_arg
|
||||
);
|
||||
|
||||
ABISig {
|
||||
Ok(ABISig {
|
||||
args,
|
||||
rets,
|
||||
stack_arg_space,
|
||||
stack_ret_space,
|
||||
stack_ret_arg,
|
||||
call_conv: sig.call_conv,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,15 +457,7 @@ fn gen_stack_limit(f: &ir::Function, abi: &ABISig, gv: ir::GlobalValue) -> (Reg,
|
||||
} => {
|
||||
let base = generate_gv(f, abi, base, insts);
|
||||
let into_reg = writable_spilltmp_reg();
|
||||
let mem = if let Some(offset) =
|
||||
UImm12Scaled::maybe_from_i64(offset.into(), ir::types::I8)
|
||||
{
|
||||
MemArg::UnsignedOffset(base, offset)
|
||||
} else {
|
||||
let offset: i64 = offset.into();
|
||||
insts.extend(Inst::load_constant(into_reg, offset as u64));
|
||||
MemArg::RegReg(base, into_reg.to_reg())
|
||||
};
|
||||
let mem = MemArg::RegOffset(base, offset.into(), I64);
|
||||
insts.push(Inst::ULoad64 {
|
||||
rd: into_reg,
|
||||
mem,
|
||||
@@ -481,10 +484,10 @@ fn get_special_purpose_param_register(
|
||||
|
||||
impl AArch64ABIBody {
|
||||
/// Create a new body ABI instance.
|
||||
pub fn new(f: &ir::Function, flags: settings::Flags) -> Self {
|
||||
pub fn new(f: &ir::Function, flags: settings::Flags) -> CodegenResult<Self> {
|
||||
debug!("AArch64 ABI: func signature {:?}", f.signature);
|
||||
|
||||
let sig = ABISig::from_func_sig(&f.signature);
|
||||
let sig = ABISig::from_func_sig(&f.signature)?;
|
||||
|
||||
let call_conv = f.signature.call_conv;
|
||||
// Only these calling conventions are supported.
|
||||
@@ -517,7 +520,7 @@ impl AArch64ABIBody {
|
||||
.map(|reg| (reg, Vec::new()))
|
||||
.or_else(|| f.stack_limit.map(|gv| gen_stack_limit(f, &sig, gv)));
|
||||
|
||||
Self {
|
||||
Ok(Self {
|
||||
sig,
|
||||
stackslots,
|
||||
stackslots_size: stack_offset,
|
||||
@@ -529,7 +532,7 @@ impl AArch64ABIBody {
|
||||
flags,
|
||||
is_leaf: f.is_leaf(),
|
||||
stack_limit,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the offset from FP to the argument area, i.e., jumping over the saved FP, return
|
||||
@@ -635,15 +638,22 @@ impl AArch64ABIBody {
|
||||
|
||||
fn load_stack(mem: MemArg, into_reg: Writable<Reg>, ty: Type) -> Inst {
|
||||
match ty {
|
||||
types::B1
|
||||
| types::B8
|
||||
| types::I8
|
||||
| types::B16
|
||||
| types::I16
|
||||
| types::B32
|
||||
| types::I32
|
||||
| types::B64
|
||||
| types::I64 => Inst::ULoad64 {
|
||||
types::B1 | types::B8 | types::I8 => Inst::ULoad8 {
|
||||
rd: into_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B16 | types::I16 => Inst::ULoad16 {
|
||||
rd: into_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B32 | types::I32 => Inst::ULoad32 {
|
||||
rd: into_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B64 | types::I64 => Inst::ULoad64 {
|
||||
rd: into_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
@@ -664,15 +674,22 @@ fn load_stack(mem: MemArg, into_reg: Writable<Reg>, ty: Type) -> Inst {
|
||||
|
||||
fn store_stack(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
|
||||
match ty {
|
||||
types::B1
|
||||
| types::B8
|
||||
| types::I8
|
||||
| types::B16
|
||||
| types::I16
|
||||
| types::B32
|
||||
| types::I32
|
||||
| types::B64
|
||||
| types::I64 => Inst::Store64 {
|
||||
types::B1 | types::B8 | types::I8 => Inst::Store8 {
|
||||
rd: from_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B16 | types::I16 => Inst::Store16 {
|
||||
rd: from_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B32 | types::I32 => Inst::Store32 {
|
||||
rd: from_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
},
|
||||
types::B64 | types::I64 => Inst::Store64 {
|
||||
rd: from_reg,
|
||||
mem,
|
||||
srcloc: None,
|
||||
@@ -791,17 +808,14 @@ fn get_caller_saves(call_conv: isa::CallConv) -> Vec<Writable<Reg>> {
|
||||
impl ABIBody for AArch64ABIBody {
|
||||
type I = Inst;
|
||||
|
||||
fn needed_tmps(&self) -> usize {
|
||||
if self.sig.stack_ret_arg.is_some() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
fn temp_needed(&self) -> bool {
|
||||
self.sig.stack_ret_arg.is_some()
|
||||
}
|
||||
|
||||
fn init_with_tmps(&mut self, tmps: &[Writable<Reg>]) {
|
||||
fn init(&mut self, maybe_tmp: Option<Writable<Reg>>) {
|
||||
if self.sig.stack_ret_arg.is_some() {
|
||||
self.ret_area_ptr = Some(tmps[0]);
|
||||
assert!(maybe_tmp.is_some());
|
||||
self.ret_area_ptr = maybe_tmp;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -845,14 +859,14 @@ impl ABIBody for AArch64ABIBody {
|
||||
match &self.sig.args[idx] {
|
||||
&ABIArg::Reg(r, ty) => Inst::gen_move(into_reg, r.to_reg(), ty),
|
||||
&ABIArg::Stack(off, ty) => load_stack(
|
||||
MemArg::FPOffset(self.fp_to_arg_offset() + off),
|
||||
MemArg::FPOffset(self.fp_to_arg_offset() + off, ty),
|
||||
into_reg,
|
||||
ty,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_retval_area_setup(&self) -> Vec<Inst> {
|
||||
fn gen_retval_area_setup(&self) -> Option<Inst> {
|
||||
if let Some(i) = self.sig.stack_ret_arg {
|
||||
let inst = self.gen_copy_arg_to_reg(i, self.ret_area_ptr.unwrap());
|
||||
trace!(
|
||||
@@ -860,10 +874,10 @@ impl ABIBody for AArch64ABIBody {
|
||||
inst,
|
||||
self.ret_area_ptr.unwrap().to_reg()
|
||||
);
|
||||
vec![inst]
|
||||
Some(inst)
|
||||
} else {
|
||||
trace!("gen_retval_area_setup: not needed");
|
||||
vec![]
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -924,8 +938,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
let mem = MemArg::reg_maybe_offset(self.ret_area_ptr.unwrap().to_reg(), off, ty)
|
||||
.expect("Return-value area is too large");
|
||||
let mem = MemArg::RegOffset(self.ret_area_ptr.unwrap().to_reg(), off, ty);
|
||||
ret.push(store_stack(mem, from_reg.to_reg(), ty))
|
||||
}
|
||||
}
|
||||
@@ -961,7 +974,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
||||
let sp_off: i64 = stack_off + (offset as i64);
|
||||
trace!("load_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
||||
load_stack(MemArg::NominalSPOffset(sp_off), into_reg, ty)
|
||||
load_stack(MemArg::NominalSPOffset(sp_off, ty), into_reg, ty)
|
||||
}
|
||||
|
||||
/// Store to a stackslot.
|
||||
@@ -971,7 +984,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
||||
let sp_off: i64 = stack_off + (offset as i64);
|
||||
trace!("store_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
||||
store_stack(MemArg::NominalSPOffset(sp_off), from_reg, ty)
|
||||
store_stack(MemArg::NominalSPOffset(sp_off, ty), from_reg, ty)
|
||||
}
|
||||
|
||||
/// Produce an instruction that computes a stackslot address.
|
||||
@@ -982,7 +995,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
let sp_off: i64 = stack_off + (offset as i64);
|
||||
Inst::LoadAddr {
|
||||
rd: into_reg,
|
||||
mem: MemArg::NominalSPOffset(sp_off),
|
||||
mem: MemArg::NominalSPOffset(sp_off, I8),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -993,7 +1006,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
let spill_off = islot * 8;
|
||||
let sp_off = self.stackslots_size as i64 + spill_off;
|
||||
trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
||||
load_stack(MemArg::NominalSPOffset(sp_off), into_reg, ty)
|
||||
load_stack(MemArg::NominalSPOffset(sp_off, ty), into_reg, ty)
|
||||
}
|
||||
|
||||
/// Store to a spillslot.
|
||||
@@ -1003,7 +1016,7 @@ impl ABIBody for AArch64ABIBody {
|
||||
let spill_off = islot * 8;
|
||||
let sp_off = self.stackslots_size as i64 + spill_off;
|
||||
trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
||||
store_stack(MemArg::NominalSPOffset(sp_off), from_reg, ty)
|
||||
store_stack(MemArg::NominalSPOffset(sp_off, ty), from_reg, ty)
|
||||
}
|
||||
|
||||
fn gen_prologue(&mut self) -> Vec<Inst> {
|
||||
@@ -1290,17 +1303,17 @@ impl AArch64ABICall {
|
||||
extname: &ir::ExternalName,
|
||||
dist: RelocDistance,
|
||||
loc: ir::SourceLoc,
|
||||
) -> AArch64ABICall {
|
||||
let sig = ABISig::from_func_sig(sig);
|
||||
) -> CodegenResult<AArch64ABICall> {
|
||||
let sig = ABISig::from_func_sig(sig)?;
|
||||
let (uses, defs) = abisig_to_uses_and_defs(&sig);
|
||||
AArch64ABICall {
|
||||
Ok(AArch64ABICall {
|
||||
sig,
|
||||
uses,
|
||||
defs,
|
||||
dest: CallDest::ExtName(extname.clone(), dist),
|
||||
loc,
|
||||
opcode: ir::Opcode::Call,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a callsite ABI object for a call to a function pointer with the
|
||||
@@ -1310,17 +1323,17 @@ impl AArch64ABICall {
|
||||
ptr: Reg,
|
||||
loc: ir::SourceLoc,
|
||||
opcode: ir::Opcode,
|
||||
) -> AArch64ABICall {
|
||||
let sig = ABISig::from_func_sig(sig);
|
||||
) -> CodegenResult<AArch64ABICall> {
|
||||
let sig = ABISig::from_func_sig(sig)?;
|
||||
let (uses, defs) = abisig_to_uses_and_defs(&sig);
|
||||
AArch64ABICall {
|
||||
Ok(AArch64ABICall {
|
||||
sig,
|
||||
uses,
|
||||
defs,
|
||||
dest: CallDest::Reg(ptr),
|
||||
loc,
|
||||
opcode,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1394,7 +1407,9 @@ impl ABICall for AArch64ABICall {
|
||||
from_reg,
|
||||
ty,
|
||||
)),
|
||||
&ABIArg::Stack(off, ty) => ctx.emit(store_stack(MemArg::SPOffset(off), from_reg, ty)),
|
||||
&ABIArg::Stack(off, ty) => {
|
||||
ctx.emit(store_stack(MemArg::SPOffset(off, ty), from_reg, ty))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1409,7 +1424,7 @@ impl ABICall for AArch64ABICall {
|
||||
&ABIArg::Stack(off, ty) => {
|
||||
let ret_area_base = self.sig.stack_arg_space;
|
||||
ctx.emit(load_stack(
|
||||
MemArg::SPOffset(off + ret_area_base),
|
||||
MemArg::SPOffset(off + ret_area_base, ty),
|
||||
into_reg,
|
||||
ty,
|
||||
));
|
||||
@@ -1427,7 +1442,7 @@ impl ABICall for AArch64ABICall {
|
||||
let ret_area_base = self.sig.stack_arg_space;
|
||||
ctx.emit(Inst::LoadAddr {
|
||||
rd,
|
||||
mem: MemArg::SPOffset(ret_area_base),
|
||||
mem: MemArg::SPOffset(ret_area_base, I8),
|
||||
});
|
||||
self.emit_copy_reg_to_arg(ctx, i, rd.to_reg());
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user