Add reference types R32 and R64

-Add resumable_trap, safepoint, isnull, and null instructions
-Add Stackmap struct and StackmapSink trait

Co-authored-by: Mir Ahmed <mirahmed753@gmail.com>
Co-authored-by: Dan Gohman <sunfish@mozilla.com>
This commit is contained in:
Carmen Kwan
2019-07-23 16:28:54 -07:00
committed by Dan Gohman
parent b659262d2a
commit 19257f80c1
47 changed files with 1027 additions and 62 deletions

View File

@@ -13,9 +13,11 @@
//! that a `MemoryCodeSink` will always write binary machine code to raw memory. It forwards any
//! relocations to a `RelocSink` trait object. Relocations are less frequent than the
//! `CodeSink::put*` methods, so the performance impact of the virtual callbacks is less severe.
use super::{Addend, CodeInfo, CodeOffset, CodeSink, Reloc};
use crate::ir::{ExternalName, JumpTable, SourceLoc, TrapCode};
use crate::binemit::stackmap::Stackmap;
use crate::ir::entities::Value;
use crate::ir::{ExternalName, Function, JumpTable, SourceLoc, TrapCode};
use crate::isa::TargetIsa;
use core::ptr::write_unaligned;
/// A `CodeSink` that writes binary machine code directly into memory.
@@ -36,6 +38,7 @@ pub struct MemoryCodeSink<'a> {
offset: isize,
relocs: &'a mut dyn RelocSink,
traps: &'a mut dyn TrapSink,
stackmaps: &'a mut dyn StackmapSink,
/// Information about the generated code and read-only data.
pub info: CodeInfo,
}
@@ -49,6 +52,7 @@ impl<'a> MemoryCodeSink<'a> {
data: *mut u8,
relocs: &'a mut dyn RelocSink,
traps: &'a mut dyn TrapSink,
stackmaps: &'a mut dyn StackmapSink,
) -> Self {
Self {
data,
@@ -61,6 +65,7 @@ impl<'a> MemoryCodeSink<'a> {
},
relocs,
traps,
stackmaps,
}
}
}
@@ -149,6 +154,12 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
self.info.rodata_size = self.offset() - (self.info.jumptables_size + self.info.code_size);
self.info.total_size = self.offset();
}
fn add_stackmap(&mut self, val_list: &[Value], func: &Function, isa: &dyn TargetIsa) {
let ofs = self.offset();
let stackmap = Stackmap::from_values(&val_list, func, isa);
self.stackmaps.add_stackmap(ofs, stackmap);
}
}
/// A `TrapSink` implementation that does nothing, which is convenient when
@@ -158,3 +169,16 @@ pub struct NullTrapSink {}
impl TrapSink for NullTrapSink {
fn trap(&mut self, _offset: CodeOffset, _srcloc: SourceLoc, _code: TrapCode) {}
}
/// A trait for emitting stackmaps.
pub trait StackmapSink {
/// Output a bitmap of the stack representing the live reference variables at this code offset.
fn add_stackmap(&mut self, _: CodeOffset, _: Stackmap);
}
/// Placeholder StackmapSink that does nothing.
pub struct NullStackmapSink {}
impl StackmapSink for NullStackmapSink {
fn add_stackmap(&mut self, _: CodeOffset, _: Stackmap) {}
}

View File

@@ -6,13 +6,18 @@
mod memorysink;
mod relaxation;
mod shrink;
mod stackmap;
pub use self::memorysink::{MemoryCodeSink, NullTrapSink, RelocSink, TrapSink};
pub use self::memorysink::{
MemoryCodeSink, NullStackmapSink, NullTrapSink, RelocSink, StackmapSink, TrapSink,
};
pub use self::relaxation::relax_branches;
pub use self::shrink::shrink_instructions;
pub use crate::regalloc::RegDiversions;
pub use self::stackmap::Stackmap;
use crate::ir::entities::Value;
use crate::ir::{ExternalName, Function, Inst, JumpTable, SourceLoc, TrapCode};
use crate::isa::TargetIsa;
pub use crate::regalloc::RegDiversions;
use core::fmt;
#[cfg(feature = "enable-serde")]
use serde::{Deserialize, Serialize};
@@ -141,6 +146,9 @@ pub trait CodeSink {
/// Read-only data output is complete, we're done.
fn end_codegen(&mut self);
/// Add a stackmap at the current code offset.
fn add_stackmap(&mut self, _: &[Value], _: &Function, _: &dyn TargetIsa);
}
/// Report a bad encoding error.
@@ -157,17 +165,17 @@ pub fn bad_encoding(func: &Function, inst: Inst) -> ! {
///
/// This function is called from the `TargetIsa::emit_function()` implementations with the
/// appropriate instruction emitter.
pub fn emit_function<CS, EI>(func: &Function, emit_inst: EI, sink: &mut CS)
pub fn emit_function<CS, EI>(func: &Function, emit_inst: EI, sink: &mut CS, isa: &dyn TargetIsa)
where
CS: CodeSink,
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS),
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS, &dyn TargetIsa),
{
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
divert.clear();
debug_assert_eq!(func.offsets[ebb], sink.offset());
for inst in func.layout.ebb_insts(ebb) {
emit_inst(func, inst, &mut divert, sink);
emit_inst(func, inst, &mut divert, sink, isa);
}
}

View File

@@ -0,0 +1,122 @@
use crate::bitset::BitSet;
use crate::ir;
use crate::isa::TargetIsa;
use std::vec::Vec;
/// Wrapper class for longer bit vectors that cannot be represented by a single BitSet.
#[derive(Clone, Debug)]
pub struct Stackmap {
bitmap: Vec<BitSet<u32>>,
}
impl Stackmap {
/// Create a stackmap based on where references are located on a function's stack.
pub fn from_values(
args: &[ir::entities::Value],
func: &ir::Function,
isa: &dyn TargetIsa,
) -> Self {
let loc = &func.locations;
let mut live_ref_in_stack_slot = std::collections::HashSet::new();
// References can be in registers, and live registers values are pushed onto the stack before calls and traps.
// TODO: Implement register maps. If a register containing a reference is spilled and reused after a safepoint,
// it could contain a stale reference value if the garbage collector relocated the value.
for val in args {
if let Some(value_loc) = loc.get(*val) {
match *value_loc {
ir::ValueLoc::Stack(stack_slot) => live_ref_in_stack_slot.insert(stack_slot),
_ => false,
};
}
}
// SpiderMonkey stackmap structure:
// <trap reg dump> + <general spill> + <frame> + <inbound args>
// Bit vector goes from lower addresses to higher addresses.
// TODO: Get trap register layout from Spidermonkey and prepend to bitvector below.
let stack = &func.stack_slots;
let frame_size = stack.frame_size.unwrap();
let word_size = ir::stackslot::StackSize::from(isa.pointer_bytes());
let num_words = (frame_size / word_size) as usize;
let mut vec = std::vec::Vec::with_capacity(num_words);
vec.resize(num_words, false);
// Frame (includes spills and inbound args).
for (ss, ssd) in stack.iter() {
if live_ref_in_stack_slot.contains(&ss) {
// Assumption: greater magnitude of offset imply higher address.
let index = (((ssd.offset.unwrap().abs() as u32) - ssd.size) / word_size) as usize;
vec[index] = true;
}
}
Stackmap::from_vec(&vec)
}
/// Create a vec of Bitsets from a vec of bools.
pub fn from_vec(vec: &Vec<bool>) -> Self {
let mut rem = vec.len();
let num_word = ((rem as f32) / 32.0).ceil() as usize;
let mut bitmap = Vec::with_capacity(num_word);
for i in 0..num_word {
let mut curr_word = 0;
let count = if rem > 32 { 32 } else { rem };
for j in 0..count {
if vec[i * 32 + j] {
curr_word |= 1 << j;
}
}
bitmap.push(BitSet::<u32>(curr_word));
rem -= count;
}
Self { bitmap }
}
/// Returns a specified bit.
pub fn get_bit(&self, bit_index: usize) -> bool {
assert!(bit_index < 32 * self.bitmap.len());
let word_index = bit_index / 32;
let word_offset = (bit_index % 32) as u8;
self.bitmap[word_index].contains(word_offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stackmaps() {
let vec: Vec<bool> = Vec::new();
assert!(Stackmap::from_vec(&vec).bitmap.is_empty());
let mut vec: [bool; 32] = Default::default();
let set_true_idx = [5, 7, 24, 31];
for idx in set_true_idx.iter() {
vec[*idx] = true;
}
let mut vec = vec.to_vec();
assert_eq!(
vec![BitSet::<u32>(2164261024)],
Stackmap::from_vec(&vec).bitmap
);
vec.push(false);
vec.push(true);
let res = Stackmap::from_vec(&vec);
assert_eq!(
vec![BitSet::<u32>(2164261024), BitSet::<u32>(2)],
res.bitmap
);
assert!(res.get_bit(5));
assert!(res.get_bit(31));
assert!(res.get_bit(33));
assert!(!res.get_bit(1));
}
}

View File

@@ -10,7 +10,8 @@
//! single ISA instance.
use crate::binemit::{
relax_branches, shrink_instructions, CodeInfo, MemoryCodeSink, RelocSink, TrapSink,
relax_branches, shrink_instructions, CodeInfo, MemoryCodeSink, RelocSink, StackmapSink,
TrapSink,
};
use crate::dce::do_dce;
use crate::dominator_tree::DominatorTree;
@@ -100,12 +101,14 @@ impl Context {
mem: &mut Vec<u8>,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodegenResult<CodeInfo> {
let info = self.compile(isa)?;
let old_len = mem.len();
mem.resize(old_len + info.total_size as usize, 0);
let new_info =
unsafe { self.emit_to_memory(isa, mem.as_mut_ptr().add(old_len), relocs, traps) };
let new_info = unsafe {
self.emit_to_memory(isa, mem.as_mut_ptr().add(old_len), relocs, traps, stackmaps)
};
debug_assert!(new_info == info);
Ok(info)
}
@@ -168,9 +171,10 @@ impl Context {
mem: *mut u8,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodeInfo {
let _tt = timing::binemit();
let mut sink = MemoryCodeSink::new(mem, relocs, traps);
let mut sink = MemoryCodeSink::new(mem, relocs, traps, stackmaps);
isa.emit_function_to_memory(&self.func, &mut sink);
sink.info
}

View File

@@ -444,6 +444,8 @@ pub struct ValueTypeSet {
pub floats: BitSet8,
/// Allowed bool widths
pub bools: BitSet8,
/// Allowed ref widths
pub refs: BitSet8,
}
impl ValueTypeSet {
@@ -458,6 +460,8 @@ impl ValueTypeSet {
self.floats.contains(l2b)
} else if scalar.is_bool() {
self.bools.contains(l2b)
} else if scalar.is_ref() {
self.refs.contains(l2b)
} else {
false
}
@@ -652,6 +656,7 @@ mod tests {
ints: BitSet8::from_range(4, 7),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(3, 7),
refs: BitSet8::from_range(5, 7),
};
assert!(!vts.contains(I8));
assert!(vts.contains(I32));
@@ -661,6 +666,8 @@ mod tests {
assert!(!vts.contains(B1));
assert!(vts.contains(B8));
assert!(vts.contains(B64));
assert!(vts.contains(R32));
assert!(vts.contains(R64));
assert_eq!(vts.example().to_string(), "i32");
let vts = ValueTypeSet {
@@ -668,6 +675,7 @@ mod tests {
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
bools: BitSet8::from_range(3, 7),
refs: BitSet8::from_range(0, 0),
};
assert_eq!(vts.example().to_string(), "f32");
@@ -676,6 +684,7 @@ mod tests {
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
bools: BitSet8::from_range(3, 7),
refs: BitSet8::from_range(0, 0),
};
assert_eq!(vts.example().to_string(), "f32x2");
@@ -684,6 +693,7 @@ mod tests {
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(3, 7),
refs: BitSet8::from_range(0, 0),
};
assert!(!vts.contains(B32X2));
assert!(vts.contains(B32X4));
@@ -695,8 +705,11 @@ mod tests {
ints: BitSet8::from_range(3, 7),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(0, 0),
refs: BitSet8::from_range(0, 0),
};
assert!(vts.contains(I32));
assert!(vts.contains(I32X4));
assert!(!vts.contains(R32));
assert!(!vts.contains(R64));
}
}

View File

@@ -61,8 +61,8 @@ impl Type {
B1 => 0,
B8 | I8 => 3,
B16 | I16 => 4,
B32 | I32 | F32 => 5,
B64 | I64 | F64 => 6,
B32 | I32 | F32 | R32 => 5,
B64 | I64 | F64 | R64 => 6,
_ => 0,
}
}
@@ -73,8 +73,8 @@ impl Type {
B1 => 1,
B8 | I8 => 8,
B16 | I16 => 16,
B32 | I32 | F32 => 32,
B64 | I64 | F64 => 64,
B32 | I32 | F32 | R32 => 32,
B64 | I64 | F64 | R64 => 64,
_ => 0,
}
}
@@ -99,7 +99,7 @@ impl Type {
/// Get a type with the same number of lanes as this type, but with the lanes replaced by
/// booleans of the same size.
///
/// Scalar types are treated as vectors with one lane, so they are converted to the multi-bit
/// Lane types are treated as vectors with one lane, so they are converted to the multi-bit
/// boolean types.
pub fn as_bool_pedantic(self) -> Self {
// Replace the low 4 bits with the boolean version, preserve the high 4 bits.
@@ -108,6 +108,7 @@ impl Type {
B16 | I16 => B16,
B32 | I32 | F32 => B32,
B64 | I64 | F64 => B64,
R32 | R64 => panic!("Reference types should not convert to bool"),
_ => B1,
})
}
@@ -210,6 +211,14 @@ impl Type {
}
}
/// Is this a ref type?
pub fn is_ref(self) -> bool {
match self {
R32 | R64 => true,
_ => false,
}
}
/// Get log_2 of the number of lanes in this SIMD vector type.
///
/// All SIMD types have a lane count that is a power of two and no larger than 256, so this
@@ -301,6 +310,8 @@ impl Display for Type {
write!(f, "f{}", self.lane_bits())
} else if self.is_vector() {
write!(f, "{}x{}", self.lane_type(), self.lane_count())
} else if self.is_ref() {
write!(f, "r{}", self.lane_bits())
} else {
f.write_str(match *self {
IFLAGS => "iflags",
@@ -322,6 +333,8 @@ impl Debug for Type {
write!(f, "types::F{}", self.lane_bits())
} else if self.is_vector() {
write!(f, "{:?}X{}", self.lane_type(), self.lane_count())
} else if self.is_ref() {
write!(f, "types::R{}", self.lane_bits())
} else {
match *self {
INVALID => write!(f, "types::INVALID"),
@@ -366,6 +379,8 @@ mod tests {
assert_eq!(B1, B1.by(8).unwrap().lane_type());
assert_eq!(I32, I32X4.lane_type());
assert_eq!(F64, F64X2.lane_type());
assert_eq!(R32, R32.lane_type());
assert_eq!(R64, R64.lane_type());
assert_eq!(INVALID.lane_bits(), 0);
assert_eq!(IFLAGS.lane_bits(), 0);
@@ -381,6 +396,8 @@ mod tests {
assert_eq!(I64.lane_bits(), 64);
assert_eq!(F32.lane_bits(), 32);
assert_eq!(F64.lane_bits(), 64);
assert_eq!(R32.lane_bits(), 32);
assert_eq!(R64.lane_bits(), 64);
}
#[test]
@@ -450,6 +467,8 @@ mod tests {
assert_eq!(I64.to_string(), "i64");
assert_eq!(F32.to_string(), "f32");
assert_eq!(F64.to_string(), "f64");
assert_eq!(R32.to_string(), "r32");
assert_eq!(R64.to_string(), "r64");
}
#[test]

View File

@@ -2,6 +2,7 @@
use crate::binemit::{bad_encoding, CodeSink};
use crate::ir::{Function, Inst};
use crate::isa::TargetIsa;
use crate::regalloc::RegDiversions;
include!(concat!(env!("OUT_DIR"), "/binemit-arm32.rs"));

View File

@@ -121,11 +121,11 @@ impl TargetIsa for Isa {
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink)
emit_function(func, binemit::emit_inst, sink, self)
}
}

View File

@@ -2,6 +2,7 @@
use crate::binemit::{bad_encoding, CodeSink};
use crate::ir::{Function, Inst};
use crate::isa::TargetIsa;
use crate::regalloc::RegDiversions;
include!(concat!(env!("OUT_DIR"), "/binemit-arm64.rs"));

View File

@@ -108,11 +108,11 @@ impl TargetIsa for Isa {
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink)
emit_function(func, binemit::emit_inst, sink, self)
}
}

View File

@@ -2,7 +2,7 @@
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::{Function, Inst, InstructionData};
use crate::isa::{RegUnit, StackBaseMask, StackRef};
use crate::isa::{RegUnit, StackBaseMask, StackRef, TargetIsa};
use crate::predicates::is_signed_int;
use crate::regalloc::RegDiversions;
use core::u32;

View File

@@ -115,11 +115,11 @@ impl TargetIsa for Isa {
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink)
emit_function(func, binemit::emit_inst, sink, self)
}
}

View File

@@ -5,7 +5,7 @@ use super::registers::RU;
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::condcodes::{CondCode, FloatCC, IntCC};
use crate::ir::{Ebb, Function, Inst, InstructionData, JumpTable, Opcode, TrapCode};
use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef};
use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef, TargetIsa};
use crate::regalloc::RegDiversions;
include!(concat!(env!("OUT_DIR"), "/binemit-x86.rs"));

View File

@@ -131,11 +131,11 @@ impl TargetIsa for Isa {
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink)
emit_function(func, binemit::emit_inst, sink, self)
}
fn prologue_epilogue(&self, func: &mut ir::Function) -> CodegenResult<()> {

View File

@@ -13,6 +13,7 @@ use crate::regalloc::coloring::Coloring;
use crate::regalloc::live_value_tracker::LiveValueTracker;
use crate::regalloc::liveness::Liveness;
use crate::regalloc::reload::Reload;
use crate::regalloc::safepoint::emit_stackmaps;
use crate::regalloc::spilling::Spilling;
use crate::regalloc::virtregs::VirtRegs;
use crate::result::CodegenResult;
@@ -192,6 +193,20 @@ impl Context {
self.coloring
.run(isa, func, domtree, &mut self.liveness, &mut self.tracker);
// This function runs after register allocation has taken
// place, meaning values have locations assigned already.
if isa.flags().enable_safepoints() {
emit_stackmaps(func, domtree, &self.liveness, &mut self.tracker, isa);
} else {
// Make sure no references are used.
for val in func.dfg.values() {
let ty = func.dfg.value_type(val);
if ty.lane_type().is_ref() {
panic!("reference types were found but safepoints were not enabled.");
}
}
}
if isa.flags().enable_verifier() {
let ok = verify_context(func, cfg, domtree, isa, &mut errors).is_ok()
&& verify_liveness(isa, func, cfg, &self.liveness, &mut errors).is_ok()

View File

@@ -15,9 +15,11 @@ mod context;
mod diversion;
mod pressure;
mod reload;
mod safepoint;
mod solver;
mod spilling;
pub use self::context::Context;
pub use self::diversion::RegDiversions;
pub use self::register_set::RegisterSet;
pub use self::safepoint::emit_stackmaps;

View File

@@ -0,0 +1,72 @@
use crate::cursor::{Cursor, FuncCursor};
use crate::dominator_tree::DominatorTree;
use crate::ir::{Function, InstBuilder, InstructionData, Opcode, TrapCode};
use crate::isa::TargetIsa;
use crate::regalloc::live_value_tracker::LiveValueTracker;
use crate::regalloc::liveness::Liveness;
use std::vec::Vec;
fn insert_and_encode_safepoint<'f>(
pos: &mut FuncCursor<'f>,
tracker: &LiveValueTracker,
isa: &dyn TargetIsa,
) {
// Iterate through all live values, collect only the references.
let live_ref_values = tracker
.live()
.iter()
.filter(|live_value| pos.func.dfg.value_type(live_value.value).is_ref())
.map(|live_val| live_val.value)
.collect::<Vec<_>>();
if !live_ref_values.is_empty() {
pos.ins().safepoint(&live_ref_values);
// Move cursor to the new safepoint instruction to encode it.
if let Some(inst) = pos.prev_inst() {
let ok = pos.func.update_encoding(inst, isa).is_ok();
debug_assert!(ok);
}
// Restore cursor position.
pos.next_inst();
}
}
// The emit_stackmaps() function analyzes each instruction to retrieve the liveness of
// the defs and operands by traversing a function's ebbs in layout order.
pub fn emit_stackmaps(
func: &mut Function,
domtree: &DominatorTree,
liveness: &Liveness,
tracker: &mut LiveValueTracker,
isa: &dyn TargetIsa,
) {
let mut curr = func.layout.entry_block();
while let Some(ebb) = curr {
tracker.ebb_top(ebb, &func.dfg, liveness, &func.layout, domtree);
tracker.drop_dead_params();
let mut pos = FuncCursor::new(func);
// From the top of the ebb, step through the instructions.
pos.goto_top(ebb);
while let Some(inst) = pos.next_inst() {
if let InstructionData::Trap {
code: TrapCode::Interrupt,
..
} = &pos.func.dfg[inst]
{
insert_and_encode_safepoint(&mut pos, tracker, isa);
} else if pos.func.dfg[inst].opcode().is_call() {
insert_and_encode_safepoint(&mut pos, tracker, isa);
} else if pos.func.dfg[inst].opcode() == Opcode::Safepoint {
panic!("safepoint instruction can only be used by the compiler!");
}
// Process the instruction and get rid of dead values.
tracker.process_inst(inst, &pos.func.dfg, liveness);
tracker.drop_dead(inst);
}
curr = func.layout.next_ebb(ebb);
}
}

View File

@@ -390,6 +390,7 @@ mod tests {
enable_nan_canonicalization = false\n\
enable_simd = false\n\
enable_atomics = true\n\
enable_safepoints = false\n\
allones_funcaddrs = false\n\
probestack_enabled = true\n\
probestack_func_adjusts_sp = false\n\

View File

@@ -1672,9 +1672,12 @@ impl<'a> Verifier<'a> {
// Instructions with side effects are not allowed to be ghost instructions.
let opcode = self.func.dfg[inst].opcode();
// The `fallthrough` and `fallthrough_return` instructions are marked as terminators and
// branches, but they are not required to have an encoding.
if opcode == Opcode::Fallthrough || opcode == Opcode::FallthroughReturn {
// The `fallthrough`, `fallthrough_return`, and `safepoint` instructions are not required
// to have an encoding.
if opcode == Opcode::Fallthrough
|| opcode == Opcode::FallthroughReturn
|| opcode == Opcode::Safepoint
{
return Ok(());
}
@@ -1739,6 +1742,24 @@ impl<'a> Verifier<'a> {
}
}
fn verify_safepoint_unused(
&self,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if let Some(isa) = self.isa {
if !isa.flags().enable_safepoints() && self.func.dfg[inst].opcode() == Opcode::Safepoint
{
return fatal!(
errors,
inst,
"safepoint instruction cannot be used when it is not enabled."
);
}
}
Ok(())
}
pub fn run(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.verify_global_values(errors)?;
self.verify_heaps(errors)?;
@@ -1750,6 +1771,7 @@ impl<'a> Verifier<'a> {
for inst in self.func.layout.ebb_insts(ebb) {
self.ebb_integrity(ebb, inst, errors)?;
self.instruction_integrity(inst, errors)?;
self.verify_safepoint_unused(inst, errors)?;
self.typecheck(inst, errors)?;
self.verify_encoding(inst, errors)?;
self.immediate_constraints(inst, errors)?;