x64 and aarch64: carry MemFlags on loads/stores; don't emit trap info unless an op can trap.
This end result was previously enacted by carrying a `SourceLoc` on every load/store, which was somewhat cumbersome, and only indirectly encoded metadata about a memory reference (can it trap) by its presence or absence. We have a type for this -- `MemFlags` -- that tells us everything we might want to know about a load or store, and we should plumb it through to code emission instead. This PR attaches a `MemFlags` to an `Amode` on x64, and puts it on load and store `Inst` variants on aarch64. These two choices seem to factor things out in the nicest way: there are relatively few load/store insts on aarch64 but many addressing modes, while the opposite is true on x64.
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
//! Implementation of the standard x64 ABI.
|
||||
|
||||
use crate::ir::types::*;
|
||||
use crate::ir::{self, types, TrapCode, Type};
|
||||
use crate::ir::{self, types, MemFlags, TrapCode, Type};
|
||||
use crate::isa;
|
||||
use crate::isa::{x64::inst::*, CallConv};
|
||||
use crate::machinst::abi_impl::*;
|
||||
@@ -618,6 +618,7 @@ impl From<StackAMode> for SyntheticAmode {
|
||||
SyntheticAmode::Real(Amode::ImmReg {
|
||||
simm32,
|
||||
base: regs::rbp(),
|
||||
flags: MemFlags::trusted(),
|
||||
})
|
||||
}
|
||||
StackAMode::NominalSPOffset(off, _ty) => {
|
||||
@@ -634,6 +635,7 @@ impl From<StackAMode> for SyntheticAmode {
|
||||
SyntheticAmode::Real(Amode::ImmReg {
|
||||
simm32,
|
||||
base: regs::rsp(),
|
||||
flags: MemFlags::trusted(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
use super::regs::{self, show_ireg_sized};
|
||||
use super::EmitState;
|
||||
use crate::ir::condcodes::{FloatCC, IntCC};
|
||||
use crate::ir::MemFlags;
|
||||
use crate::isa::x64::inst::Inst;
|
||||
use crate::machinst::*;
|
||||
use regalloc::{
|
||||
@@ -14,10 +15,14 @@ use std::string::String;
|
||||
|
||||
/// A possible addressing mode (amode) that can be used in instructions.
|
||||
/// These denote a 64-bit value only.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum Amode {
|
||||
/// Immediate sign-extended and a Register.
|
||||
ImmReg { simm32: u32, base: Reg },
|
||||
ImmReg {
|
||||
simm32: u32,
|
||||
base: Reg,
|
||||
flags: MemFlags,
|
||||
},
|
||||
|
||||
/// sign-extend-32-to-64(Immediate) + Register1 + (Register2 << Shift)
|
||||
ImmRegRegShift {
|
||||
@@ -25,6 +30,7 @@ pub enum Amode {
|
||||
base: Reg,
|
||||
index: Reg,
|
||||
shift: u8, /* 0 .. 3 only */
|
||||
flags: MemFlags,
|
||||
},
|
||||
|
||||
/// sign-extend-32-to-64(Immediate) + RIP (instruction pointer).
|
||||
@@ -35,7 +41,11 @@ pub enum Amode {
|
||||
impl Amode {
|
||||
pub(crate) fn imm_reg(simm32: u32, base: Reg) -> Self {
|
||||
debug_assert!(base.get_class() == RegClass::I64);
|
||||
Self::ImmReg { simm32, base }
|
||||
Self::ImmReg {
|
||||
simm32,
|
||||
base,
|
||||
flags: MemFlags::trusted(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn imm_reg_reg_shift(simm32: u32, base: Reg, index: Reg, shift: u8) -> Self {
|
||||
@@ -47,6 +57,7 @@ impl Amode {
|
||||
base,
|
||||
index,
|
||||
shift,
|
||||
flags: MemFlags::trusted(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +65,30 @@ impl Amode {
|
||||
Self::RipRelative { target }
|
||||
}
|
||||
|
||||
pub(crate) fn with_flags(&self, flags: MemFlags) -> Self {
|
||||
match self {
|
||||
&Self::ImmReg { simm32, base, .. } => Self::ImmReg {
|
||||
simm32,
|
||||
base,
|
||||
flags,
|
||||
},
|
||||
&Self::ImmRegRegShift {
|
||||
simm32,
|
||||
base,
|
||||
index,
|
||||
shift,
|
||||
..
|
||||
} => Self::ImmRegRegShift {
|
||||
simm32,
|
||||
base,
|
||||
index,
|
||||
shift,
|
||||
flags,
|
||||
},
|
||||
_ => panic!("Amode {:?} cannot take memflags", self),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add the regs mentioned by `self` to `collector`.
|
||||
pub(crate) fn get_regs_as_uses(&self, collector: &mut RegUsageCollector) {
|
||||
match self {
|
||||
@@ -69,12 +104,24 @@ impl Amode {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_flags(&self) -> MemFlags {
|
||||
match self {
|
||||
Amode::ImmReg { flags, .. } => *flags,
|
||||
Amode::ImmRegRegShift { flags, .. } => *flags,
|
||||
Amode::RipRelative { .. } => MemFlags::trusted(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn can_trap(&self) -> bool {
|
||||
!self.get_flags().notrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl PrettyPrint for Amode {
|
||||
fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
|
||||
match self {
|
||||
Amode::ImmReg { simm32, base } => {
|
||||
Amode::ImmReg { simm32, base, .. } => {
|
||||
format!("{}({})", *simm32 as i32, base.show_rru(mb_rru))
|
||||
}
|
||||
Amode::ImmRegRegShift {
|
||||
@@ -82,6 +129,7 @@ impl PrettyPrint for Amode {
|
||||
base,
|
||||
index,
|
||||
shift,
|
||||
..
|
||||
} => format!(
|
||||
"{}({},{},{})",
|
||||
*simm32 as i32,
|
||||
|
||||
@@ -194,14 +194,14 @@ fn emit_std_enc_mem(
|
||||
// expression. But `enc_g` can be derived from a register of any class.
|
||||
|
||||
let srcloc = state.cur_srcloc();
|
||||
if srcloc != SourceLoc::default() {
|
||||
if srcloc != SourceLoc::default() && mem_e.can_trap() {
|
||||
sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
|
||||
}
|
||||
|
||||
prefixes.emit(sink);
|
||||
|
||||
match mem_e {
|
||||
Amode::ImmReg { simm32, base } => {
|
||||
Amode::ImmReg { simm32, base, .. } => {
|
||||
// First, the REX byte.
|
||||
let enc_e = int_reg_enc(*base);
|
||||
rex.emit_two_op(sink, enc_g, enc_e);
|
||||
@@ -260,6 +260,7 @@ fn emit_std_enc_mem(
|
||||
base: reg_base,
|
||||
index: reg_index,
|
||||
shift,
|
||||
..
|
||||
} => {
|
||||
let enc_base = int_reg_enc(*reg_base);
|
||||
let enc_index = int_reg_enc(*reg_index);
|
||||
|
||||
@@ -61,7 +61,7 @@ impl UnwindInfoGenerator<Inst> for X64UnwindInfo {
|
||||
}
|
||||
Inst::MovRM {
|
||||
src,
|
||||
dst: SyntheticAmode::Real(Amode::ImmReg { simm32, base }),
|
||||
dst: SyntheticAmode::Real(Amode::ImmReg { simm32, base, .. }),
|
||||
..
|
||||
} if *base == regs::rsp() => {
|
||||
// `mov reg, imm(rsp)`
|
||||
|
||||
@@ -579,6 +579,10 @@ fn matches_small_constant_shift<C: LowerCtx<I = Inst>>(
|
||||
///
|
||||
/// Note: the 32-bit offset in Cranelift has to be sign-extended, which maps x86's behavior.
|
||||
fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i32) -> Amode {
|
||||
let flags = ctx
|
||||
.memflags(spec.insn)
|
||||
.expect("Instruction with amode should have memflags");
|
||||
|
||||
// We now either have an add that we must materialize, or some other input; as well as the
|
||||
// final offset.
|
||||
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
|
||||
@@ -632,7 +636,7 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i
|
||||
let final_offset = (offset as i64).wrapping_add(uext_cst as i64);
|
||||
if low32_will_sign_extend_to_64(final_offset as u64) {
|
||||
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
|
||||
return Amode::imm_reg(final_offset as u32, base);
|
||||
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -642,7 +646,7 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i
|
||||
let final_offset = (offset as i64).wrapping_add(cst as i64);
|
||||
if low32_will_sign_extend_to_64(final_offset as u64) {
|
||||
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
|
||||
return Amode::imm_reg(final_offset as u32, base);
|
||||
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -654,11 +658,11 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i
|
||||
)
|
||||
};
|
||||
|
||||
return Amode::imm_reg_reg_shift(offset as u32, base, index, shift);
|
||||
return Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags);
|
||||
}
|
||||
|
||||
let input = put_input_in_reg(ctx, spec);
|
||||
Amode::imm_reg(offset as u32, input)
|
||||
Amode::imm_reg(offset as u32, input).with_flags(flags)
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
@@ -3060,7 +3064,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let base = put_input_in_reg(ctx, inputs[0]);
|
||||
let index = put_input_in_reg(ctx, inputs[1]);
|
||||
let shift = 0;
|
||||
Amode::imm_reg_reg_shift(offset as u32, base, index, shift)
|
||||
let flags = ctx.memflags(insn).expect("load should have memflags");
|
||||
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
@@ -3132,7 +3137,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let base = put_input_in_reg(ctx, inputs[1]);
|
||||
let index = put_input_in_reg(ctx, inputs[2]);
|
||||
let shift = 0;
|
||||
Amode::imm_reg_reg_shift(offset as u32, base, index, shift)
|
||||
let flags = ctx.memflags(insn).expect("store should have memflags");
|
||||
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
|
||||
Reference in New Issue
Block a user