diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit.rs b/cranelift/codegen/src/isa/aarch64/inst/emit.rs index abcb822b1a..d422fdc24f 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit.rs @@ -463,10 +463,26 @@ impl EmitState { } } +/// Constant state used during function compilation. +pub struct EmitInfo(settings::Flags); + +impl EmitInfo { + pub(crate) fn new(flags: settings::Flags) -> Self { + Self(flags) + } +} + +impl MachInstEmitInfo for EmitInfo { + fn flags(&self) -> &settings::Flags { + &self.0 + } +} + impl MachInstEmit for Inst { type State = EmitState; + type Info = EmitInfo; - fn emit(&self, sink: &mut MachBuffer, flags: &settings::Flags, state: &mut EmitState) { + fn emit(&self, sink: &mut MachBuffer, emit_info: &Self::Info, state: &mut EmitState) { // N.B.: we *must* not exceed the "worst-case size" used to compute // where to insert islands, except when islands are explicitly triggered // (with an `EmitIsland`). We check this in debug builds. This is `mut` @@ -742,7 +758,7 @@ impl MachInstEmit for Inst { let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } // ldst encoding helpers take Reg, not Writable. @@ -887,7 +903,7 @@ impl MachInstEmit for Inst { let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } let (op, bits) = match self { @@ -1500,11 +1516,11 @@ impl MachInstEmit for Inst { mem: AMode::Label(MemLabel::PCRel(8)), srcloc: None, }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(8), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); sink.put4(const_data.to_bits()); } &Inst::LoadFpuConst64 { rd, const_data } => { @@ -1513,11 +1529,11 @@ impl MachInstEmit for Inst { mem: AMode::Label(MemLabel::PCRel(8)), srcloc: None, }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(12), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); sink.put8(const_data.to_bits()); } &Inst::LoadFpuConst128 { rd, const_data } => { @@ -1526,11 +1542,11 @@ impl MachInstEmit for Inst { mem: AMode::Label(MemLabel::PCRel(8)), srcloc: None, }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(20), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); for i in const_data.to_le_bytes().iter() { sink.put1(*i); @@ -1854,7 +1870,7 @@ impl MachInstEmit for Inst { if top22 != 0 { sink.put4(enc_extend(top22, rd, rn)); } else { - Inst::mov32(rd, rn).emit(sink, flags, state); + Inst::mov32(rd, rn).emit(sink, emit_info, state); } } &Inst::Extend { @@ -1877,7 +1893,7 @@ impl MachInstEmit for Inst { rn: zero_reg(), rm: rd.to_reg(), }; - sub_inst.emit(sink, flags, state); + sub_inst.emit(sink, emit_info, state); } &Inst::Extend { rd, @@ -1964,7 +1980,7 @@ impl MachInstEmit for Inst { sink.use_label_at_offset(off, label, LabelUse::Branch19); // udf let trap = Inst::Udf { trap_info }; - trap.emit(sink, flags, state); + trap.emit(sink, emit_info, state); // LABEL: sink.bind_label(label); } @@ -2022,10 +2038,10 @@ impl MachInstEmit for Inst { // Save index in a tmp (the live range of ridx only goes to start of this // sequence; rtmp1 or rtmp2 may overwrite it). let inst = Inst::gen_move(rtmp2, ridx, I64); - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); // Load address of jump table let inst = Inst::Adr { rd: rtmp1, off: 16 }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); // Load value out of jump table let inst = Inst::SLoad32 { rd: rtmp2, @@ -2037,7 +2053,7 @@ impl MachInstEmit for Inst { ), srcloc: None, // can't cause a user trap. }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); // Add base of jump table to jump-table-sourced block offset let inst = Inst::AluRRR { alu_op: ALUOp::Add64, @@ -2045,14 +2061,14 @@ impl MachInstEmit for Inst { rn: rtmp1.to_reg(), rm: rtmp2.to_reg(), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); // Branch to computed address. (`targets` here is only used for successor queries // and is not needed for emission.) let inst = Inst::IndirectBr { rn: rtmp1.to_reg(), targets: vec![], }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); // Emit jump table (table of 32-bit offsets). let jt_off = sink.cur_offset(); for &target in info.targets.iter() { @@ -2085,13 +2101,13 @@ impl MachInstEmit for Inst { mem: AMode::Label(MemLabel::PCRel(8)), srcloc: None, // can't cause a user trap. }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(12), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); sink.add_reloc(srcloc, Reloc::Abs8, name, offset); - if flags.emit_all_ones_funcaddrs() { + if emit_info.flags().emit_all_ones_funcaddrs() { sink.put8(u64::max_value()); } else { sink.put8(0); @@ -2100,7 +2116,7 @@ impl MachInstEmit for Inst { &Inst::LoadAddr { rd, ref mem } => { let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } let (reg, offset) = match mem { @@ -2121,7 +2137,7 @@ impl MachInstEmit for Inst { if offset == 0 { let mov = Inst::mov(rd, reg); - mov.emit(sink, flags, state); + mov.emit(sink, emit_info, state); } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { let add = Inst::AluRRImm12 { alu_op, @@ -2129,7 +2145,7 @@ impl MachInstEmit for Inst { rn: reg, imm12, }; - add.emit(sink, flags, state); + add.emit(sink, emit_info, state); } else { // Use `tmp2` here: `reg` may be `spilltmp` if the `AMode` on this instruction // was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note @@ -2140,7 +2156,7 @@ impl MachInstEmit for Inst { debug_assert!(reg != tmp2_reg()); let tmp = writable_tmp2_reg(); for insn in Inst::load_constant(tmp, abs_offset).into_iter() { - insn.emit(sink, flags, state); + insn.emit(sink, emit_info, state); } let add = Inst::AluRRR { alu_op, @@ -2148,7 +2164,7 @@ impl MachInstEmit for Inst { rn: reg, rm: tmp.to_reg(), }; - add.emit(sink, flags, state); + add.emit(sink, emit_info, state); } } &Inst::VirtualSPOffsetAdj { offset } => { @@ -2165,7 +2181,7 @@ impl MachInstEmit for Inst { let jmp = Inst::Jump { dest: BranchTarget::Label(jump_around_label), }; - jmp.emit(sink, flags, state); + jmp.emit(sink, emit_info, state); sink.emit_island(); sink.bind_label(jump_around_label); } diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs index 1a29ba9659..6d981c2eaa 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs @@ -9,7 +9,6 @@ use alloc::vec::Vec; #[test] fn test_aarch64_binemit() { - let flags = settings::Flags::new(settings::builder()); let mut insns = Vec::<(Inst, &str, &str)>::new(); // N.B.: the architecture is little-endian, so when transcribing the 32-bit @@ -4668,7 +4667,9 @@ fn test_aarch64_binemit() { insns.push((Inst::Fence {}, "BF3B03D5", "dmb ish")); - let rru = create_reg_universe(&settings::Flags::new(settings::builder())); + let flags = settings::Flags::new(settings::builder()); + let rru = create_reg_universe(&flags); + let emit_info = EmitInfo::new(flags); for (insn, expected_encoding, expected_printing) in insns { println!( "AArch64: {:?}, {}, {}", @@ -4681,7 +4682,7 @@ fn test_aarch64_binemit() { let mut sink = test_utils::TestCodeSink::new(); let mut buffer = MachBuffer::new(); - insn.emit(&mut buffer, &flags, &mut Default::default()); + insn.emit(&mut buffer, &emit_info, &mut Default::default()); let buffer = buffer.finish(); buffer.emit(&mut sink); let actual_encoding = &sink.stringify(); diff --git a/cranelift/codegen/src/isa/aarch64/mod.rs b/cranelift/codegen/src/isa/aarch64/mod.rs index 9cbbd633f5..9b3fb3e91b 100644 --- a/cranelift/codegen/src/isa/aarch64/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/mod.rs @@ -20,6 +20,8 @@ mod lower_inst; use inst::create_reg_universe; +use self::inst::EmitInfo; + /// An AArch64 backend. pub struct AArch64Backend { triple: Triple, @@ -45,8 +47,9 @@ impl AArch64Backend { func: &Function, flags: settings::Flags, ) -> CodegenResult> { + let emit_info = EmitInfo::new(flags.clone()); let abi = Box::new(abi::AArch64ABICallee::new(func, flags)?); - compile::compile::(func, self, abi) + compile::compile::(func, self, abi, emit_info) } } @@ -58,6 +61,7 @@ impl MachBackend for AArch64Backend { ) -> CodegenResult { let flags = self.flags(); let vcode = self.compile_vcode(func, flags.clone())?; + let buffer = vcode.emit(); let frame_size = vcode.frame_size(); diff --git a/cranelift/codegen/src/isa/arm32/inst/emit.rs b/cranelift/codegen/src/isa/arm32/inst/emit.rs index e2fbe6679d..c671325c72 100644 --- a/cranelift/codegen/src/isa/arm32/inst/emit.rs +++ b/cranelift/codegen/src/isa/arm32/inst/emit.rs @@ -255,10 +255,27 @@ impl EmitState { } } +pub struct EmitInfo { + flags: settings::Flags, +} + +impl EmitInfo { + pub(crate) fn new(flags: settings::Flags) -> Self { + EmitInfo { flags } + } +} + +impl MachInstEmitInfo for EmitInfo { + fn flags(&self) -> &settings::Flags { + &self.flags + } +} + impl MachInstEmit for Inst { + type Info = EmitInfo; type State = EmitState; - fn emit(&self, sink: &mut MachBuffer, flags: &settings::Flags, state: &mut EmitState) { + fn emit(&self, sink: &mut MachBuffer, emit_info: &Self::Info, state: &mut EmitState) { let start_off = sink.cur_offset(); match self { @@ -446,7 +463,7 @@ impl MachInstEmit for Inst { } => { let (mem_insts, mem) = mem_finalize(mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } if let Some(srcloc) = srcloc { // Register the offset at which the store instruction starts. @@ -484,7 +501,7 @@ impl MachInstEmit for Inst { } => { let (mem_insts, mem) = mem_finalize(mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } if let Some(srcloc) = srcloc { // Register the offset at which the load instruction starts. @@ -537,7 +554,7 @@ impl MachInstEmit for Inst { &Inst::LoadAddr { rd, ref mem } => { let (mem_insts, mem) = mem_finalize(mem, state); for inst in mem_insts.into_iter() { - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } let inst = match mem { AMode::RegReg(reg1, reg2, shift) => { @@ -574,7 +591,7 @@ impl MachInstEmit for Inst { } _ => unreachable!(), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } &Inst::Extend { rd, @@ -617,7 +634,7 @@ impl MachInstEmit for Inst { rn: rm, imm8: UImm8::maybe_from_i64(1).unwrap(), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); if signed { let inst = Inst::AluRRImm8 { @@ -626,7 +643,7 @@ impl MachInstEmit for Inst { rn: rd.to_reg(), imm8: UImm8::maybe_from_i64(1).unwrap(), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); } } &Inst::Extend { .. } => { @@ -638,7 +655,7 @@ impl MachInstEmit for Inst { sink.put2(enc_16_it(cond, insts)); for inst in insts.iter() { - inst.inst.emit(sink, flags, state); + inst.inst.emit(sink, emit_info, state); } } &Inst::Push { ref reg_list } => match reg_list.len() { @@ -703,7 +720,7 @@ impl MachInstEmit for Inst { // continue: // if start_off & 0x3 != 0 { - Inst::Nop2.emit(sink, flags, state); + Inst::Nop2.emit(sink, emit_info, state); } assert_eq!(sink.cur_offset() & 0x3, 0); @@ -715,12 +732,12 @@ impl MachInstEmit for Inst { bits: 32, sign_extend: false, }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(4), }; - inst.emit(sink, flags, state); + inst.emit(sink, emit_info, state); sink.add_reloc(srcloc, Reloc::Abs4, name, offset.into()); sink.put4(0); @@ -779,7 +796,7 @@ impl MachInstEmit for Inst { emit_32(enc_32_cond_branch(cond, dest), sink); let trap = Inst::Udf { trap_info }; - trap.emit(sink, flags, state); + trap.emit(sink, emit_info, state); } &Inst::VirtualSPOffsetAdj { offset } => { debug!( diff --git a/cranelift/codegen/src/isa/arm32/mod.rs b/cranelift/codegen/src/isa/arm32/mod.rs index 2a278a233d..0aed5e3c9c 100644 --- a/cranelift/codegen/src/isa/arm32/mod.rs +++ b/cranelift/codegen/src/isa/arm32/mod.rs @@ -17,7 +17,7 @@ mod inst; mod lower; mod lower_inst; -use inst::create_reg_universe; +use inst::{create_reg_universe, EmitInfo}; /// An ARM32 backend. pub struct Arm32Backend { @@ -44,8 +44,9 @@ impl Arm32Backend { ) -> CodegenResult> { // This performs lowering to VCode, register-allocates the code, computes // block layout and finalizes branches. The result is ready for binary emission. + let emit_info = EmitInfo::new(flags.clone()); let abi = Box::new(abi::Arm32ABICallee::new(func, flags)?); - compile::compile::(func, self, abi) + compile::compile::(func, self, abi, emit_info) } } diff --git a/cranelift/codegen/src/isa/x64/inst/emit.rs b/cranelift/codegen/src/isa/x64/inst/emit.rs index edddb7e6b3..2dec527723 100644 --- a/cranelift/codegen/src/isa/x64/inst/emit.rs +++ b/cranelift/codegen/src/isa/x64/inst/emit.rs @@ -390,7 +390,7 @@ fn emit_simm(sink: &mut MachBuffer, size: u8, simm32: u32) { /// A small helper to generate a signed conversion instruction. fn emit_signed_cvt( sink: &mut MachBuffer, - flags: &settings::Flags, + info: &EmitInfo, state: &mut EmitState, src: Reg, dst: Writable, @@ -404,7 +404,7 @@ fn emit_signed_cvt( SseOpcode::Cvtsi2ss }; let inst = Inst::gpr_to_xmm(op, RegMem::reg(src), OperandSize::Size64, dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } /// Emits a one way conditional jump if CC is set (true). @@ -472,7 +472,7 @@ fn one_way_jmp(sink: &mut MachBuffer, cc: CC, label: MachLabel) { pub(crate) fn emit( inst: &Inst, sink: &mut MachBuffer, - flags: &settings::Flags, + info: &EmitInfo, state: &mut EmitState, ) { match inst { @@ -767,19 +767,19 @@ pub(crate) fn emit( // idiv %divisor // // $done: - debug_assert!(flags.avoid_div_traps()); + debug_assert!(info.flags().avoid_div_traps()); // Check if the divisor is zero, first. let inst = Inst::cmp_rmi_r(*size, RegMemImm::imm(0), divisor.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::trap_if(CC::Z, TrapCode::IntegerDivisionByZero, *loc); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let (do_op, done_label) = if kind.is_signed() { // Now check if the divisor is -1. let inst = Inst::cmp_rmi_r(*size, RegMemImm::imm(0xffffffff), divisor.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let do_op = sink.get_label(); @@ -796,10 +796,10 @@ pub(crate) fn emit( 0, Writable::from_reg(regs::rdx()), ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done_label)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); (Some(do_op), Some(done_label)) } else { @@ -808,18 +808,18 @@ pub(crate) fn emit( let tmp = tmp.expect("temporary for i64 sdiv"); let inst = Inst::imm(OperandSize::Size64, 0x8000000000000000, tmp); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::cmp_rmi_r(8, RegMemImm::reg(tmp.to_reg()), regs::rax()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { let inst = Inst::cmp_rmi_r(*size, RegMemImm::imm(0x80000000), regs::rax()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } // If not equal, jump over the trap. let inst = Inst::trap_if(CC::Z, TrapCode::IntegerOverflow, *loc); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); (Some(do_op), None) } @@ -840,15 +840,15 @@ pub(crate) fn emit( if kind.is_signed() { // sign-extend the sign-bit of rax into rdx, for signed opcodes. let inst = Inst::sign_extend_data(*size); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { // zero for unsigned opcodes. let inst = Inst::imm(OperandSize::Size64, 0, Writable::from_reg(regs::rdx())); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } let inst = Inst::div(*size, kind.is_signed(), RegMem::reg(divisor.to_reg()), *loc); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Lowering takes care of moving the result back into the right register, see comment // there. @@ -1382,7 +1382,7 @@ pub(crate) fn emit( SseOpcode::Movss }; let inst = Inst::xmm_unary_rm_r(op, src.clone(), *dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(next); } @@ -1623,7 +1623,7 @@ pub(crate) fn emit( // Copy the index (and make sure to clear the high 32-bits lane of tmp2). let inst = Inst::movzx_rm_r(ExtMode::LQ, RegMem::reg(*idx), *tmp2, None); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Load base address of jump table. let start_of_jumptable = sink.get_label(); @@ -1631,7 +1631,7 @@ pub(crate) fn emit( Amode::rip_relative(BranchTarget::Label(start_of_jumptable)), *tmp1, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Load value out of the jump table. It's a relative offset to the target block, so it // might be negative; use a sign-extension. @@ -1641,7 +1641,7 @@ pub(crate) fn emit( *tmp2, None, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Add base of jump table to jump-table-sourced block offset. let inst = Inst::alu_rmi_r( @@ -1650,11 +1650,11 @@ pub(crate) fn emit( RegMemImm::reg(tmp2.to_reg()), *tmp1, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Branch to computed address. let inst = Inst::jmp_unknown(RegMem::reg(tmp1.to_reg())); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Emit jump table (table of 32-bit offsets). sink.bind_label(start_of_jumptable); @@ -1683,7 +1683,7 @@ pub(crate) fn emit( // Trap! let inst = Inst::trap(*srcloc, *trap_code); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(else_label); } @@ -1890,7 +1890,7 @@ pub(crate) fn emit( }; let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(*lhs), rhs_dst.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::NZ, do_min_max); one_way_jmp(sink, CC::P, propagate_nan); @@ -1900,23 +1900,23 @@ pub(crate) fn emit( // case, and are no-ops otherwise. let op = if *is_min { or_op } else { and_op }; let inst = Inst::xmm_rm_r(op, RegMem::reg(*lhs), *rhs_dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // x86's min/max are not symmetric; if either operand is a NaN, they return the // read-only operand: perform an addition between the two operands, which has the // desired NaN propagation effects. sink.bind_label(propagate_nan); let inst = Inst::xmm_rm_r(add_op, RegMem::reg(*lhs), *rhs_dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::P, done); sink.bind_label(do_min_max); let inst = Inst::xmm_rm_r(min_max_op, RegMem::reg(*lhs), *rhs_dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(done); } @@ -1986,13 +1986,13 @@ pub(crate) fn emit( let constant_start_label = sink.get_label(); let load_offset = Amode::rip_relative(BranchTarget::Label(constant_start_label)); let load = Inst::load(*ty, load_offset, *dst, ExtKind::None, None); - load.emit(sink, flags, state); + load.emit(sink, info, state); // Jump over the constant. let constant_end_label = sink.get_label(); let continue_at_offset = BranchTarget::Label(constant_end_label); let jump = Inst::jmp_known(continue_at_offset); - jump.emit(sink, flags, state); + jump.emit(sink, info, state); // Emit the constant. sink.bind_label(constant_start_label); @@ -2151,30 +2151,30 @@ pub(crate) fn emit( // thing. // TODO use tst src, src here. let inst = Inst::cmp_rmi_r(8, RegMemImm::imm(0), src.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::L, handle_negative); // Handle a positive int64, which is the "easy" case: a signed conversion will do the // right thing. - emit_signed_cvt(sink, flags, state, src.to_reg(), *dst, *to_f64); + emit_signed_cvt(sink, info, state, src.to_reg(), *dst, *to_f64); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(handle_negative); // Divide x by two to get it in range for the signed conversion, keep the LSB, and // scale it back up on the FP side. let inst = Inst::gen_move(*tmp_gpr1, src.to_reg(), types::I64); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // tmp_gpr1 := src >> 1 let inst = Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(1), *tmp_gpr1); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::gen_move(*tmp_gpr2, src.to_reg(), types::I64); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::alu_rmi_r( true, /* 64bits */ @@ -2182,7 +2182,7 @@ pub(crate) fn emit( RegMemImm::imm(1), *tmp_gpr2, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::alu_rmi_r( true, /* 64bits */ @@ -2190,9 +2190,9 @@ pub(crate) fn emit( RegMemImm::reg(tmp_gpr1.to_reg()), *tmp_gpr2, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); - emit_signed_cvt(sink, flags, state, tmp_gpr2.to_reg(), *dst, *to_f64); + emit_signed_cvt(sink, info, state, tmp_gpr2.to_reg(), *dst, *to_f64); let add_op = if *to_f64 { SseOpcode::Addsd @@ -2200,7 +2200,7 @@ pub(crate) fn emit( SseOpcode::Addss }; let inst = Inst::xmm_rm_r(add_op, RegMem::reg(dst.to_reg()), *dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(done); } @@ -2273,18 +2273,18 @@ pub(crate) fn emit( // The truncation. let inst = Inst::xmm_to_gpr(trunc_op, src, *dst, *dst_size); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Compare against 1, in case of overflow the dst operand was INT_MIN. let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(1), dst.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::NO, done); // no overflow => done // Check for NaN. let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(src), src); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::NP, not_nan); // go to not_nan if not a NaN @@ -2296,10 +2296,10 @@ pub(crate) fn emit( RegMemImm::reg(dst.to_reg()), *dst, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); sink.bind_label(not_nan); @@ -2308,10 +2308,10 @@ pub(crate) fn emit( // Zero out tmp_xmm. let inst = Inst::xmm_rm_r(SseOpcode::Xorpd, RegMem::reg(tmp_xmm.to_reg()), *tmp_xmm); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(src), tmp_xmm.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Jump if >= to done. one_way_jmp(sink, CC::NB, done); @@ -2319,16 +2319,16 @@ pub(crate) fn emit( // Otherwise, put INT_MAX. if *dst_size == OperandSize::Size64 { let inst = Inst::imm(OperandSize::Size64, 0x7fffffffffffffff, *dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { let inst = Inst::imm(OperandSize::Size32, 0x7fffffff, *dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } } else { let check_positive = sink.get_label(); let inst = Inst::trap(*srcloc, TrapCode::BadConversionToInteger); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // Check if INT_MIN was the correct result: determine the smallest floating point // number that would convert to INT_MIN, put it in a temporary register, and compare @@ -2344,7 +2344,7 @@ pub(crate) fn emit( OperandSize::Size32 => { let cst = Ieee32::pow2(output_bits - 1).neg().bits(); let inst = Inst::imm(OperandSize::Size32, cst as u64, *tmp_gpr); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } OperandSize::Size64 => { // An f64 can represent `i32::min_value() - 1` exactly with precision to spare, @@ -2356,22 +2356,22 @@ pub(crate) fn emit( Ieee64::pow2(output_bits - 1).neg() }; let inst = Inst::imm(OperandSize::Size64, cst.bits(), *tmp_gpr); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } } let inst = Inst::gpr_to_xmm(cast_op, RegMem::reg(tmp_gpr.to_reg()), *src_size, *tmp_xmm); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(tmp_xmm.to_reg()), src); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // jump over trap if src >= or > threshold one_way_jmp(sink, no_overflow_cc, check_positive); let inst = Inst::trap(*srcloc, TrapCode::IntegerOverflow); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); // If positive, it was a real overflow. @@ -2380,15 +2380,15 @@ pub(crate) fn emit( // Zero out the tmp_xmm register. let inst = Inst::xmm_rm_r(SseOpcode::Xorpd, RegMem::reg(tmp_xmm.to_reg()), *tmp_xmm); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(src), tmp_xmm.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::NB, done); // jump over trap if 0 >= src let inst = Inst::trap(*srcloc, TrapCode::IntegerOverflow); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } sink.bind_label(done); @@ -2464,14 +2464,14 @@ pub(crate) fn emit( }; let inst = Inst::imm(*src_size, cst, *tmp_gpr); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::gpr_to_xmm(cast_op, RegMem::reg(tmp_gpr.to_reg()), *src_size, *tmp_xmm); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::xmm_cmp_rm_r(cmp_op, RegMem::reg(tmp_xmm.to_reg()), src.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let handle_large = sink.get_label(); one_way_jmp(sink, CC::NB, handle_large); // jump to handle_large if src >= large_threshold @@ -2487,14 +2487,14 @@ pub(crate) fn emit( RegMemImm::reg(dst.to_reg()), *dst, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { // Trap. let inst = Inst::trap(*srcloc, TrapCode::BadConversionToInteger); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } sink.bind_label(not_nan); @@ -2503,10 +2503,10 @@ pub(crate) fn emit( // overflow. let inst = Inst::xmm_to_gpr(trunc_op, src.to_reg(), *dst, *dst_size); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(0), dst.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); one_way_jmp(sink, CC::NL, done); // if dst >= 0, jump to done @@ -2519,14 +2519,14 @@ pub(crate) fn emit( RegMemImm::reg(dst.to_reg()), *dst, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { // Trap. let inst = Inst::trap(*srcloc, TrapCode::IntegerOverflow); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } // Now handle large inputs. @@ -2534,13 +2534,13 @@ pub(crate) fn emit( sink.bind_label(handle_large); let inst = Inst::xmm_rm_r(sub_op, RegMem::reg(tmp_xmm.to_reg()), *src); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::xmm_to_gpr(trunc_op, src.to_reg(), *dst, *dst_size); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(0), dst.to_reg()); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let next_is_large = sink.get_label(); one_way_jmp(sink, CC::NL, next_is_large); // if dst >= 0, jump to next_is_large @@ -2557,20 +2557,20 @@ pub(crate) fn emit( }, *dst, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::jmp_known(BranchTarget::Label(done)); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { let inst = Inst::trap(*srcloc, TrapCode::IntegerOverflow); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } sink.bind_label(next_is_large); if *dst_size == OperandSize::Size64 { let inst = Inst::imm(OperandSize::Size64, 1 << 63, *tmp_gpr); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); let inst = Inst::alu_rmi_r( true, @@ -2578,11 +2578,11 @@ pub(crate) fn emit( RegMemImm::reg(tmp_gpr.to_reg()), *dst, ); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } else { let inst = Inst::alu_rmi_r(false, AluRmiROpcode::Add, RegMemImm::imm(1 << 31), *dst); - inst.emit(sink, flags, state); + inst.emit(sink, info, state); } sink.bind_label(done); @@ -2600,7 +2600,7 @@ pub(crate) fn emit( sink.put1(0x48 | ((enc_dst >> 3) & 1)); sink.put1(0xB8 | (enc_dst & 7)); sink.add_reloc(*srcloc, Reloc::Abs8, name, *offset); - if flags.emit_all_ones_funcaddrs() { + if info.flags().emit_all_ones_funcaddrs() { sink.put8(u64::max_value()); } else { sink.put8(0); @@ -2664,14 +2664,14 @@ pub(crate) fn emit( // mov{zbq,zwq,zlq,q} (%r9), %rax // No need to call `add_trap` here, since the `i1` emit will do that. let i1 = Inst::load(*ty, amode.clone(), rax_w, ExtKind::ZeroExtend, *srcloc); - i1.emit(sink, flags, state); + i1.emit(sink, info, state); // again: sink.bind_label(again_label); // movq %rax, %r11 let i2 = Inst::mov_r_r(true, rax, r11_w); - i2.emit(sink, flags, state); + i2.emit(sink, info, state); // opq %r10, %r11 let r10_rmi = RegMemImm::reg(r10); @@ -2688,7 +2688,7 @@ pub(crate) fn emit( }; Inst::alu_rmi_r(true, alu_op, r10_rmi, r11_w) }; - i3.emit(sink, flags, state); + i3.emit(sink, info, state); // lock cmpxchg{b,w,l,q} %r11, (%r9) // No need to call `add_trap` here, since the `i4` emit will do that. @@ -2698,7 +2698,7 @@ pub(crate) fn emit( dst: amode.into(), srcloc: *srcloc, }; - i4.emit(sink, flags, state); + i4.emit(sink, info, state); // jnz again one_way_jmp(sink, CC::NZ, again_label); diff --git a/cranelift/codegen/src/isa/x64/inst/emit_tests.rs b/cranelift/codegen/src/isa/x64/inst/emit_tests.rs index e2f3c9d089..4bcf1d52f0 100644 --- a/cranelift/codegen/src/isa/x64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/x64/inst/emit_tests.rs @@ -14,6 +14,7 @@ use super::*; use crate::isa::test_utils; +use crate::isa::x64; use alloc::vec::Vec; #[test] @@ -3660,14 +3661,22 @@ fn test_x64_emit() { // ======================================================== // Actually run the tests! let flags = settings::Flags::new(settings::builder()); + + use crate::settings::Configurable; + let mut isa_flag_builder = x64::settings::builder(); + isa_flag_builder.enable("has_ssse3").unwrap(); + isa_flag_builder.enable("has_sse41").unwrap(); + let isa_flags = x64::settings::Flags::new(&flags, isa_flag_builder); + let rru = regs::create_reg_universe_systemv(&flags); + let emit_info = EmitInfo::new(flags, isa_flags); for (insn, expected_encoding, expected_printing) in insns { // Check the printed text is as expected. let actual_printing = insn.show_rru(Some(&rru)); assert_eq!(expected_printing, actual_printing); let mut sink = test_utils::TestCodeSink::new(); let mut buffer = MachBuffer::new(); - insn.emit(&mut buffer, &flags, &mut Default::default()); + insn.emit(&mut buffer, &emit_info, &mut Default::default()); let buffer = buffer.finish(); buffer.emit(&mut sink); let actual_encoding = &sink.stringify(); diff --git a/cranelift/codegen/src/isa/x64/inst/mod.rs b/cranelift/codegen/src/isa/x64/inst/mod.rs index e2f8a6c611..d57c01ced5 100644 --- a/cranelift/codegen/src/isa/x64/inst/mod.rs +++ b/cranelift/codegen/src/isa/x64/inst/mod.rs @@ -3,6 +3,7 @@ use crate::binemit::{CodeOffset, StackMap}; use crate::ir::{types, ExternalName, Opcode, SourceLoc, TrapCode, Type}; +use crate::isa::x64::settings as x64_settings; use crate::machinst::*; use crate::{settings, settings::Flags, CodegenError, CodegenResult}; use alloc::boxed::Box; @@ -2559,11 +2560,30 @@ pub struct EmitState { stack_map: Option, } +/// Constant state used during emissions of a sequence of instructions. +pub struct EmitInfo { + flags: settings::Flags, + isa_flags: x64_settings::Flags, +} + +impl EmitInfo { + pub(crate) fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self { + Self { flags, isa_flags } + } +} + +impl MachInstEmitInfo for EmitInfo { + fn flags(&self) -> &Flags { + &self.flags + } +} + impl MachInstEmit for Inst { type State = EmitState; + type Info = EmitInfo; - fn emit(&self, sink: &mut MachBuffer, flags: &settings::Flags, state: &mut Self::State) { - emit::emit(self, sink, flags, state); + fn emit(&self, sink: &mut MachBuffer, info: &Self::Info, state: &mut Self::State) { + emit::emit(self, sink, info, state); } fn pretty_print(&self, mb_rru: Option<&RealRegUniverse>, _: &mut Self::State) -> String { diff --git a/cranelift/codegen/src/isa/x64/mod.rs b/cranelift/codegen/src/isa/x64/mod.rs index 211bc50081..366765c6aa 100644 --- a/cranelift/codegen/src/isa/x64/mod.rs +++ b/cranelift/codegen/src/isa/x64/mod.rs @@ -1,5 +1,7 @@ //! X86_64-bit Instruction Set Architecture. +use self::inst::EmitInfo; + use super::TargetIsa; use crate::ir::{condcodes::IntCC, Function}; use crate::isa::x64::{inst::regs::create_reg_universe_systemv, settings as x64_settings}; @@ -20,7 +22,7 @@ mod settings; pub(crate) struct X64Backend { triple: Triple, flags: Flags, - _x64_flags: x64_settings::Flags, + x64_flags: x64_settings::Flags, reg_universe: RealRegUniverse, } @@ -31,7 +33,7 @@ impl X64Backend { Self { triple, flags, - _x64_flags: x64_flags, + x64_flags, reg_universe, } } @@ -39,8 +41,9 @@ impl X64Backend { fn compile_vcode(&self, func: &Function, flags: Flags) -> CodegenResult> { // This performs lowering to VCode, register-allocates the code, computes // block layout and finalizes branches. The result is ready for binary emission. + let emit_info = EmitInfo::new(flags.clone(), self.x64_flags.clone()); let abi = Box::new(abi::X64ABICallee::new(&func, flags)?); - compile::compile::(&func, self, abi) + compile::compile::(&func, self, abi, emit_info) } } @@ -52,6 +55,7 @@ impl MachBackend for X64Backend { ) -> CodegenResult { let flags = self.flags(); let vcode = self.compile_vcode(func, flags.clone())?; + let buffer = vcode.emit(); let buffer = buffer.finish(); let frame_size = vcode.frame_size(); diff --git a/cranelift/codegen/src/machinst/buffer.rs b/cranelift/codegen/src/machinst/buffer.rs index ec9f296402..d0189bfa3a 100644 --- a/cranelift/codegen/src/machinst/buffer.rs +++ b/cranelift/codegen/src/machinst/buffer.rs @@ -1421,7 +1421,7 @@ impl MachBranch { mod test { use super::*; use crate::isa::aarch64::inst::xreg; - use crate::isa::aarch64::inst::{BranchTarget, CondBrKind, Inst}; + use crate::isa::aarch64::inst::{BranchTarget, CondBrKind, EmitInfo, Inst}; use crate::machinst::MachInstEmit; use crate::settings; use std::default::Default; @@ -1435,14 +1435,14 @@ mod test { #[test] fn test_elide_jump_to_next() { - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); buf.reserve_labels_for_blocks(2); buf.bind_label(label(0)); let inst = Inst::Jump { dest: target(1) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let buf = buf.finish(); assert_eq!(0, buf.total_size()); @@ -1450,7 +1450,7 @@ mod test { #[test] fn test_elide_trivial_jump_blocks() { - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1462,15 +1462,15 @@ mod test { taken: target(1), not_taken: target(2), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let inst = Inst::Jump { dest: target(3) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(2)); let inst = Inst::Jump { dest: target(3) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(3)); @@ -1480,7 +1480,7 @@ mod test { #[test] fn test_flip_cond() { - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1492,17 +1492,17 @@ mod test { taken: target(1), not_taken: target(2), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let inst = Inst::Udf { trap_info: (SourceLoc::default(), TrapCode::Interrupt), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(2)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(3)); @@ -1514,9 +1514,9 @@ mod test { kind: CondBrKind::NotZero(xreg(0)), trap_info: (SourceLoc::default(), TrapCode::Interrupt), }; - inst.emit(&mut buf2, &flags, &mut state); + inst.emit(&mut buf2, &info, &mut state); let inst = Inst::Nop4; - inst.emit(&mut buf2, &flags, &mut state); + inst.emit(&mut buf2, &info, &mut state); let buf2 = buf2.finish(); @@ -1525,7 +1525,7 @@ mod test { #[test] fn test_island() { - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1537,7 +1537,7 @@ mod test { taken: target(2), not_taken: target(3), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); while buf.cur_offset() < 2000000 { @@ -1545,16 +1545,16 @@ mod test { buf.emit_island(); } let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); } buf.bind_label(label(2)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(3)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); let buf = buf.finish(); @@ -1567,7 +1567,7 @@ mod test { taken: BranchTarget::ResolvedOffset(1048576 - 4), not_taken: BranchTarget::ResolvedOffset(2000000 + 4 - 4), }; - inst.emit(&mut buf2, &flags, &mut state); + inst.emit(&mut buf2, &info, &mut state); let buf2 = buf2.finish(); @@ -1576,7 +1576,7 @@ mod test { #[test] fn test_island_backward() { - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1584,16 +1584,16 @@ mod test { buf.bind_label(label(0)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(2)); while buf.cur_offset() < 2000000 { let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); } buf.bind_label(label(3)); @@ -1602,7 +1602,7 @@ mod test { taken: target(0), not_taken: target(1), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); let buf = buf.finish(); @@ -1615,11 +1615,11 @@ mod test { taken: BranchTarget::ResolvedOffset(8), not_taken: BranchTarget::ResolvedOffset(4 - (2000000 + 4)), }; - inst.emit(&mut buf2, &flags, &mut state); + inst.emit(&mut buf2, &info, &mut state); let inst = Inst::Jump { dest: BranchTarget::ResolvedOffset(-(2000000 + 8)), }; - inst.emit(&mut buf2, &flags, &mut state); + inst.emit(&mut buf2, &info, &mut state); let buf2 = buf2.finish(); @@ -1661,7 +1661,7 @@ mod test { // label7: // ret - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1673,38 +1673,38 @@ mod test { taken: target(1), not_taken: target(2), }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let inst = Inst::Jump { dest: target(3) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(2)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); + inst.emit(&mut buf, &info, &mut state); let inst = Inst::Jump { dest: target(0) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(3)); let inst = Inst::Jump { dest: target(4) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(4)); let inst = Inst::Jump { dest: target(5) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(5)); let inst = Inst::Jump { dest: target(7) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(6)); let inst = Inst::Nop4; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(7)); let inst = Inst::Ret; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); let buf = buf.finish(); @@ -1737,7 +1737,7 @@ mod test { // // label0, label1, ..., label4: // b label0 - let flags = settings::Flags::new(settings::builder()); + let info = EmitInfo::new(settings::Flags::new(settings::builder())); let mut buf = MachBuffer::new(); let mut state = Default::default(); @@ -1745,23 +1745,23 @@ mod test { buf.bind_label(label(0)); let inst = Inst::Jump { dest: target(1) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(1)); let inst = Inst::Jump { dest: target(2) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(2)); let inst = Inst::Jump { dest: target(3) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(3)); let inst = Inst::Jump { dest: target(4) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); buf.bind_label(label(4)); let inst = Inst::Jump { dest: target(1) }; - inst.emit(&mut buf, &flags, &mut state); + inst.emit(&mut buf, &info, &mut state); let buf = buf.finish(); diff --git a/cranelift/codegen/src/machinst/compile.rs b/cranelift/codegen/src/machinst/compile.rs index 1264340a30..9a00cee805 100644 --- a/cranelift/codegen/src/machinst/compile.rs +++ b/cranelift/codegen/src/machinst/compile.rs @@ -14,6 +14,7 @@ pub fn compile( f: &Function, b: &B, abi: Box>, + emit_info: ::Info, ) -> CodegenResult> where B::MInst: PrettyPrint, @@ -21,7 +22,7 @@ where // Compute lowered block order. let block_order = BlockLoweringOrder::new(f); // Build the lowering context. - let lower = Lower::new(f, abi, block_order)?; + let lower = Lower::new(f, abi, emit_info, block_order)?; // Lower the IR. let (mut vcode, stack_map_request_info) = { let _tt = timing::vcode_lower(); diff --git a/cranelift/codegen/src/machinst/lower.rs b/cranelift/codegen/src/machinst/lower.rs index 38732fc6e9..abac6fa37d 100644 --- a/cranelift/codegen/src/machinst/lower.rs +++ b/cranelift/codegen/src/machinst/lower.rs @@ -315,9 +315,10 @@ impl<'func, I: VCodeInst> Lower<'func, I> { pub fn new( f: &'func Function, abi: Box>, + emit_info: I::Info, block_order: BlockLoweringOrder, ) -> CodegenResult> { - let mut vcode = VCodeBuilder::new(abi, block_order); + let mut vcode = VCodeBuilder::new(abi, emit_info, block_order); let mut next_vreg: u32 = 0; diff --git a/cranelift/codegen/src/machinst/mod.rs b/cranelift/codegen/src/machinst/mod.rs index 3a470d852c..d86e09ddbc 100644 --- a/cranelift/codegen/src/machinst/mod.rs +++ b/cranelift/codegen/src/machinst/mod.rs @@ -275,12 +275,21 @@ pub enum MachTerminator<'a> { pub trait MachInstEmit: MachInst { /// Persistent state carried across `emit` invocations. type State: MachInstEmitState; + /// Constant information used in `emit` invocations. + type Info: MachInstEmitInfo; /// Emit the instruction. - fn emit(&self, code: &mut MachBuffer, flags: &Flags, state: &mut Self::State); + fn emit(&self, code: &mut MachBuffer, info: &Self::Info, state: &mut Self::State); /// Pretty-print the instruction. fn pretty_print(&self, mb_rru: Option<&RealRegUniverse>, state: &mut Self::State) -> String; } +/// Constant information used to emit an instruction. +pub trait MachInstEmitInfo { + /// Return the target-independent settings used for the compilation of this + /// particular function. + fn flags(&self) -> &Flags; +} + /// A trait describing the emission state carried between MachInsts when /// emitting a function body. pub trait MachInstEmitState: Default + Clone + Debug { diff --git a/cranelift/codegen/src/machinst/vcode.rs b/cranelift/codegen/src/machinst/vcode.rs index 26176a7411..aa9b4d4d45 100644 --- a/cranelift/codegen/src/machinst/vcode.rs +++ b/cranelift/codegen/src/machinst/vcode.rs @@ -88,6 +88,10 @@ pub struct VCode { /// ABI object. abi: Box>, + /// Constant information used during code emission. This should be + /// immutable across function compilations within the same module. + emit_info: I::Info, + /// Safepoint instruction indices. Filled in post-regalloc. (Prior to /// regalloc, the safepoint instructions are listed in the separate /// `StackmapRequestInfo` held separate from the `VCode`.) @@ -132,9 +136,13 @@ pub struct VCodeBuilder { impl VCodeBuilder { /// Create a new VCodeBuilder. - pub fn new(abi: Box>, block_order: BlockLoweringOrder) -> VCodeBuilder { + pub fn new( + abi: Box>, + emit_info: I::Info, + block_order: BlockLoweringOrder, + ) -> VCodeBuilder { let reftype_class = I::ref_type_regclass(abi.flags()); - let vcode = VCode::new(abi, block_order); + let vcode = VCode::new(abi, emit_info, block_order); let stack_map_info = StackmapRequestInfo { reftype_class, reftyped_vregs: vec![], @@ -263,7 +271,11 @@ fn is_reftype(ty: Type) -> bool { impl VCode { /// New empty VCode. - fn new(abi: Box>, block_order: BlockLoweringOrder) -> VCode { + fn new( + abi: Box>, + emit_info: I::Info, + block_order: BlockLoweringOrder, + ) -> VCode { VCode { liveins: abi.liveins(), liveouts: abi.liveouts(), @@ -277,6 +289,7 @@ impl VCode { block_succs: vec![], block_order, abi, + emit_info, safepoint_insns: vec![], safepoint_slots: vec![], } @@ -431,7 +444,6 @@ impl VCode { buffer.reserve_labels_for_blocks(self.num_blocks() as BlockIndex); // first N MachLabels are simply block indices. - let flags = self.abi.flags(); let mut safepoint_idx = 0; let mut cur_srcloc = None; for block in 0..self.num_blocks() { @@ -440,7 +452,7 @@ impl VCode { while new_offset > buffer.cur_offset() { // Pad with NOPs up to the aligned block offset. let nop = I::gen_nop((new_offset - buffer.cur_offset()) as usize); - nop.emit(&mut buffer, flags, &mut Default::default()); + nop.emit(&mut buffer, &self.emit_info, &mut Default::default()); } assert_eq!(buffer.cur_offset(), new_offset); @@ -469,7 +481,7 @@ impl VCode { safepoint_idx += 1; } - self.insts[iix as usize].emit(&mut buffer, flags, &mut state); + self.insts[iix as usize].emit(&mut buffer, &self.emit_info, &mut state); } if cur_srcloc.is_some() {