From 9c5c872b3b134db21a5112135c83b6fd90045400 Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Tue, 8 Feb 2022 22:48:44 +0100 Subject: [PATCH] s390x: Add support for all remaining atomic operations (#3746) This adds support for all atomic operations that were unimplemented so far in the s390x back end: - atomic_rmw operations xchg, nand, smin, smax, umin, umax - $I8 and $I16 versions of atomic_rmw and atomic_cas - little endian versions of atomic_rmw and atomic_cas All of these have to be implemented by a compare-and-swap loop; and for the $I8 and $I16 versions the actual atomic instruction needs to operate on the surrounding aligned 32-bit word. Since we cannot emit new control flow during ISLE instruction selection, these compare-and-swap loops are emitted as a single meta-instruction to be expanded at emit time. However, since there is a large number of different versions of the loop required to implement all the above operations, I've implemented a facility to allow specifying the loop bodies from within ISLE after all, by creating a vector of MInst structures that will be emitted as part of the meta-instruction. There are still restrictions, in particular instructions that are part of the loop body may not modify any virtual register. But even so, this approach looks preferable to doing everything in emit.rs. A few instructions needed in those compare-and-swap loop bodies were added as well, in particular the RxSBG family of instructions as well as the LOAD REVERSED in-register byte-swap instructions. This patch also adds filetest runtests to verify the semantics of all operations, in particular the subword and little-endian variants (those are currently only executed on s390x). --- build.rs | 2 - cranelift/codegen/src/isa/s390x/inst.isle | 306 ++ cranelift/codegen/src/isa/s390x/inst/emit.rs | 117 + .../codegen/src/isa/s390x/inst/emit_tests.rs | 131 + cranelift/codegen/src/isa/s390x/inst/mod.rs | 109 +- cranelift/codegen/src/isa/s390x/lower.isle | 297 +- cranelift/codegen/src/isa/s390x/lower/isle.rs | 38 + .../s390x/lower/isle/generated_code.manifest | 4 +- .../isa/s390x/lower/isle/generated_code.rs | 3040 +++++++++++++---- .../isa/s390x/atomic_cas-little.clif | 63 + .../filetests/isa/s390x/atomic_cas.clif | 29 + .../isa/s390x/atomic_rmw-arch13.clif | 105 + .../isa/s390x/atomic_rmw-little.clif | 620 ++++ .../filetests/isa/s390x/atomic_rmw.clif | 466 +++ .../filetests/runtests/atomic-cas-little.clif | 40 + .../runtests/atomic-cas-subword.clif | 86 + .../filetests/runtests/atomic-cas.clif | 43 + .../filetests/runtests/atomic-rmw-2.clif | 238 -- .../filetests/runtests/atomic-rmw-little.clif | 429 +++ .../runtests/atomic-rmw-subword.clif | 907 +++++ .../filetests/runtests/atomic-rmw.clif | 234 ++ 21 files changed, 6413 insertions(+), 891 deletions(-) create mode 100644 cranelift/filetests/filetests/isa/s390x/atomic_cas-little.clif create mode 100644 cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif create mode 100644 cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif create mode 100644 cranelift/filetests/filetests/runtests/atomic-cas-little.clif create mode 100644 cranelift/filetests/filetests/runtests/atomic-cas-subword.clif create mode 100644 cranelift/filetests/filetests/runtests/atomic-cas.clif delete mode 100644 cranelift/filetests/filetests/runtests/atomic-rmw-2.clif create mode 100644 cranelift/filetests/filetests/runtests/atomic-rmw-little.clif create mode 100644 cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif diff --git a/build.rs b/build.rs index ee11d9572b..ddf4fbfe4a 100644 --- a/build.rs +++ b/build.rs @@ -173,8 +173,6 @@ fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool { // No simd support yet for s390x. ("simd", _) if platform_is_s390x() => return true, ("memory64", "simd") if platform_is_s390x() => return true, - // No full atomics support yet for s390x. - ("memory64", "threads") if platform_is_s390x() => return true, _ => {} }, _ => panic!("unrecognized strategy"), diff --git a/cranelift/codegen/src/isa/s390x/inst.isle b/cranelift/codegen/src/isa/s390x/inst.isle index f5122148bc..33b233758f 100644 --- a/cranelift/codegen/src/isa/s390x/inst.isle +++ b/cranelift/codegen/src/isa/s390x/inst.isle @@ -112,6 +112,26 @@ (shift_imm u8) (shift_reg Reg)) + ;; A rotate-then--selected-bits instruction with a register + ;; in/out-operand, another register source, and three immediates. + (RxSBG + (op RxSBGOp) + (rd WritableReg) + (rn Reg) + (start_bit u8) + (end_bit u8) + (rotate_amt i8)) + + ;; The test-only version of RxSBG, which does not modify any register + ;; but only sets the condition code. + (RxSBGTest + (op RxSBGOp) + (rd Reg) + (rn Reg) + (start_bit u8) + (end_bit u8) + (rotate_amt i8)) + ;; An unary operation with a register source and a register destination. (UnaryRR (op UnaryOp) @@ -658,6 +678,19 @@ (rd WritableReg) (mem MemArg)) + ;; Meta-instruction to emit a loop around a sequence of instructions. + ;; This control flow is not visible to the compiler core, in particular + ;; the register allocator. Therefore, instructions in the loop may not + ;; write to any virtual register, so any writes must use reserved hard + ;; registers (e.g. %r0, %r1). *Reading* virtual registers is OK. + (Loop + (body VecMInst) + (cond Cond)) + + ;; Conditional branch breaking out of a loop emitted via Loop. + (CondBreak + (cond Cond)) + ;; Marker, no-op in generated code SP "virtual offset" is adjusted. This ;; controls how MemArg::NominalSPOffset args are lowered. (VirtualSPOffsetAdj @@ -732,6 +765,8 @@ (Neg64Ext32) (PopcntByte) (PopcntReg) + (BSwap32) + (BSwap64) )) ;; A shift operation. @@ -747,6 +782,15 @@ (AShR64) )) +;; A rotate-then--selected-bits operation. +(type RxSBGOp + (enum + (Insert) + (And) + (Or) + (Xor) +)) + ;; An integer comparison operation. (type CmpOp (enum @@ -1395,6 +1439,13 @@ (_ Unit (emit (MInst.ShiftRR op dst src shift_imm shift_reg)))) (writable_reg_to_reg dst))) +;; Helper for emitting `MInst.RxSBGTest` instructions. +(decl rxsbg_test (RxSBGOp Reg Reg u8 u8 i8) ProducesFlags) +(rule (rxsbg_test op src1 src2 start_bit end_bit rotate_amt) + (ProducesFlags.ProducesFlags (MInst.RxSBGTest op src1 src2 + start_bit end_bit rotate_amt) + (invalid_reg))) + ;; Helper for emitting `MInst.UnaryRR` instructions. (decl unary_rr (Type UnaryOp Reg) Reg) (rule (unary_rr ty op src) @@ -1719,6 +1770,95 @@ result)) +;; Helpers for instruction sequences ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +;; Completed instruction sequence for use in MInst.Loop. +(type VecMInst (primitive VecMInst)) + +;; Partial (mutable) instruction sequence in the process of being created. +(type VecMInstBuilder extern (enum)) + +;; Create a new empty instruction sequence builder. +(decl inst_builder_new () VecMInstBuilder) +(extern constructor inst_builder_new inst_builder_new) + +;; Push an instruction to a sequence under construction. +(decl inst_builder_push (VecMInstBuilder MInst) Unit) +(extern constructor inst_builder_push inst_builder_push) + +;; Complete the sequence under construction. +(decl inst_builder_finish (VecMInstBuilder) VecMInst) +(extern constructor inst_builder_finish inst_builder_finish) + +;; It is not safe to write to virtual registers in the loop, so all destination +;; registers must be real. This must be handled by the user of these helpers, +;; so we simply verify this constraint here. +(decl real_reg (WritableReg) WritableReg) +(extern extractor real_reg real_reg) + +;; Similarly, because we cannot allocate temp registers, if an instruction +;; requires matching source and destination registers, this needs to be handled +;; by the user. Another helper to verify that constraint. +(decl same_reg (WritableReg) Reg) +(extern extractor same_reg same_reg (in)) + +;; Push a `MInst.AluRRR` instruction to a sequence. +(decl push_alu_reg (VecMInstBuilder ALUOp WritableReg Reg Reg) Reg) +(rule (push_alu_reg ib op (real_reg dst) src1 src2) + (let ((_ Unit (inst_builder_push ib (MInst.AluRRR op dst src1 src2)))) + (writable_reg_to_reg dst))) + +;; Push a `MInst.AluRUImm32Shifted` instruction to a sequence. +(decl push_alu_uimm32shifted (VecMInstBuilder ALUOp WritableReg Reg UImm32Shifted) Reg) +(rule (push_alu_uimm32shifted ib op (real_reg dst) (same_reg [u8; 6] { enc } +/// RIEf-type instructions. +/// +/// 47 39 35 31 23 15 7 +/// opcode1 r1 r2 i3 i4 i5 opcode2 +/// 40 36 32 24 16 8 0 +/// +fn enc_rie_f(opcode: u16, r1: Reg, r2: Reg, i3: u8, i4: u8, i5: u8) -> [u8; 6] { + let mut enc: [u8; 6] = [0; 6]; + let opcode1 = ((opcode >> 8) & 0xff) as u8; + let opcode2 = (opcode & 0xff) as u8; + let r1 = machreg_to_gpr(r1) & 0x0f; + let r2 = machreg_to_gpr(r2) & 0x0f; + + enc[0] = opcode1; + enc[1] = r1 << 4 | r2; + enc[2] = i3; + enc[3] = i4; + enc[4] = i5; + enc[5] = opcode2; + enc +} + /// RIEg-type instructions. /// /// 47 39 35 31 15 7 @@ -1188,6 +1210,60 @@ impl MachInstEmit for Inst { ); } + &Inst::RxSBG { + op, + rd, + rn, + start_bit, + end_bit, + rotate_amt, + } => { + let opcode = match op { + RxSBGOp::Insert => 0xec59, // RISBGN + RxSBGOp::And => 0xec54, // RNSBG + RxSBGOp::Or => 0xec56, // ROSBG + RxSBGOp::Xor => 0xec57, // RXSBG + }; + put( + sink, + &enc_rie_f( + opcode, + rd.to_reg(), + rn, + start_bit, + end_bit, + (rotate_amt as u8) & 63, + ), + ); + } + + &Inst::RxSBGTest { + op, + rd, + rn, + start_bit, + end_bit, + rotate_amt, + } => { + let opcode = match op { + RxSBGOp::And => 0xec54, // RNSBG + RxSBGOp::Or => 0xec56, // ROSBG + RxSBGOp::Xor => 0xec57, // RXSBG + _ => unreachable!(), + }; + put( + sink, + &enc_rie_f( + opcode, + rd, + rn, + start_bit | 0x80, + end_bit, + (rotate_amt as u8) & 63, + ), + ); + } + &Inst::UnaryRR { op, rd, rn } => { match op { UnaryOp::Abs32 => { @@ -1222,6 +1298,14 @@ impl MachInstEmit for Inst { let opcode = 0xb9e1; // POPCNT put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 8, 0)); } + UnaryOp::BSwap32 => { + let opcode = 0xb91f; // LRVR + put(sink, &enc_rre(opcode, rd.to_reg(), rn)); + } + UnaryOp::BSwap64 => { + let opcode = 0xb90f; // LRVRG + put(sink, &enc_rre(opcode, rd.to_reg(), rn)); + } } } @@ -1406,6 +1490,39 @@ impl MachInstEmit for Inst { state, ); } + &Inst::Loop { ref body, cond } => { + // This sequence is *one* instruction in the vcode, and is expanded only here at + // emission time, because it requires branching to internal labels. + let loop_label = sink.get_label(); + let done_label = sink.get_label(); + + // Emit label at the start of the loop. + sink.bind_label(loop_label); + + for inst in (&body).into_iter() { + match &inst { + // Replace a CondBreak with a branch to done_label. + &Inst::CondBreak { cond } => { + let inst = Inst::OneWayCondBr { + target: done_label, + cond: *cond, + }; + inst.emit(sink, emit_info, state); + } + _ => inst.emit(sink, emit_info, state), + }; + } + + let inst = Inst::OneWayCondBr { + target: loop_label, + cond, + }; + inst.emit(sink, emit_info, state); + + // Emit label at the end of the loop. + sink.bind_label(done_label); + } + &Inst::CondBreak { .. } => unreachable!(), // Only valid inside a Loop. &Inst::AtomicCas32 { rd, rn, ref mem } | &Inst::AtomicCas64 { rd, rn, ref mem } => { let (opcode_rs, opcode_rsy) = match self { &Inst::AtomicCas32 { .. } => (Some(0xba), Some(0xeb14)), // CS(Y) diff --git a/cranelift/codegen/src/isa/s390x/inst/emit_tests.rs b/cranelift/codegen/src/isa/s390x/inst/emit_tests.rs index 1af858ce2f..ed2310e288 100644 --- a/cranelift/codegen/src/isa/s390x/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/s390x/inst/emit_tests.rs @@ -1478,6 +1478,24 @@ fn test_s390x_binemit() { "B9E1801A", "popcnt %r1, %r10, 8", )); + insns.push(( + Inst::UnaryRR { + op: UnaryOp::BSwap32, + rd: writable_gpr(1), + rn: gpr(10), + }, + "B91F001A", + "lrvr %r1, %r10", + )); + insns.push(( + Inst::UnaryRR { + op: UnaryOp::BSwap64, + rd: writable_gpr(1), + rn: gpr(10), + }, + "B90F001A", + "lrvgr %r1, %r10", + )); insns.push(( Inst::CmpRR { @@ -2410,6 +2428,91 @@ fn test_s390x_binemit() { "srag %r4, %r5, 63(%r6)", )); + insns.push(( + Inst::RxSBG { + op: RxSBGOp::Insert, + rd: writable_gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: -16, + }, + "EC4508203059", + "risbgn %r4, %r5, 8, 32, 48", + )); + insns.push(( + Inst::RxSBG { + op: RxSBGOp::And, + rd: writable_gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4508203F54", + "rnsbg %r4, %r5, 8, 32, 63", + )); + insns.push(( + Inst::RxSBG { + op: RxSBGOp::Or, + rd: writable_gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4508203F56", + "rosbg %r4, %r5, 8, 32, 63", + )); + insns.push(( + Inst::RxSBG { + op: RxSBGOp::Xor, + rd: writable_gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4508203F57", + "rxsbg %r4, %r5, 8, 32, 63", + )); + insns.push(( + Inst::RxSBGTest { + op: RxSBGOp::And, + rd: gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4588203F54", + "rnsbg %r4, %r5, 136, 32, 63", + )); + insns.push(( + Inst::RxSBGTest { + op: RxSBGOp::Or, + rd: gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4588203F56", + "rosbg %r4, %r5, 136, 32, 63", + )); + insns.push(( + Inst::RxSBGTest { + op: RxSBGOp::Xor, + rd: gpr(4), + rn: gpr(5), + start_bit: 8, + end_bit: 32, + rotate_amt: 63, + }, + "EC4588203F57", + "rxsbg %r4, %r5, 136, 32, 63", + )); + insns.push(( Inst::AtomicRmw { alu_op: ALUOp::Add32, @@ -6699,6 +6802,34 @@ fn test_s390x_binemit() { "jno 6 ; trap", )); + insns.push(( + Inst::Loop { + body: vec![ + Inst::CmpRR { + op: CmpOp::CmpS32, + rn: gpr(2), + rm: gpr(3), + }, + Inst::CondBreak { + cond: Cond::from_mask(13), + }, + Inst::AtomicCas32 { + rd: writable_gpr(4), + rn: gpr(5), + mem: MemArg::BXD12 { + base: gpr(6), + index: zero_reg(), + disp: UImm12::maybe_from_u64(0).unwrap(), + flags: MemFlags::trusted(), + }, + }, + ], + cond: Cond::from_mask(6), + }, + "1923C0D400000008BA456000C064FFFFFFFA", + "0: cr %r2, %r3 ; jgnh 1f ; cs %r4, %r5, 0(%r6) ; jglh 0b ; 1:", + )); + insns.push(( Inst::FpuMove32 { rd: writable_fpr(8), diff --git a/cranelift/codegen/src/isa/s390x/inst/mod.rs b/cranelift/codegen/src/isa/s390x/inst/mod.rs index 896fad9664..adff4a4d95 100644 --- a/cranelift/codegen/src/isa/s390x/inst/mod.rs +++ b/cranelift/codegen/src/isa/s390x/inst/mod.rs @@ -35,7 +35,7 @@ mod emit_tests; pub use crate::isa::s390x::lower::isle::generated_code::{ ALUOp, CmpOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, - ShiftOp, UnaryOp, + RxSBGOp, ShiftOp, UnaryOp, }; /// Additional information for (direct) Call instructions, left out of line to lower the size of @@ -93,6 +93,8 @@ impl Inst { | Inst::AluRUImm16Shifted { .. } | Inst::AluRUImm32Shifted { .. } | Inst::ShiftRR { .. } + | Inst::RxSBG { .. } + | Inst::RxSBGTest { .. } | Inst::SMulWide { .. } | Inst::UMulWide { .. } | Inst::SDivMod32 { .. } @@ -191,6 +193,8 @@ impl Inst { | Inst::JTSequence { .. } | Inst::LoadExtNameFar { .. } | Inst::LoadAddr { .. } + | Inst::Loop { .. } + | Inst::CondBreak { .. } | Inst::VirtualSPOffsetAdj { .. } | Inst::ValueLabelMarker { .. } | Inst::Unwind { .. } => InstructionSet::Base, @@ -437,6 +441,14 @@ fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) { collector.add_use(shift_reg); } } + &Inst::RxSBG { rd, rn, .. } => { + collector.add_mod(rd); + collector.add_use(rn); + } + &Inst::RxSBGTest { rd, rn, .. } => { + collector.add_use(rd); + collector.add_use(rn); + } &Inst::UnaryRR { rd, rn, .. } => { collector.add_def(rd); collector.add_use(rn); @@ -687,6 +699,12 @@ fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) { collector.add_def(rd); memarg_regs(mem, collector); } + &Inst::Loop { ref body, .. } => { + for inst in body.iter() { + s390x_get_regs(inst, collector); + } + } + &Inst::CondBreak { .. } => {} &Inst::VirtualSPOffsetAdj { .. } => {} &Inst::ValueLabelMarker { reg, .. } => { collector.add_use(reg); @@ -812,6 +830,22 @@ pub fn s390x_map_regs(inst: &mut Inst, mapper: &RM) { mapper.map_use(shift_reg); } } + &mut Inst::RxSBG { + ref mut rd, + ref mut rn, + .. + } => { + mapper.map_mod(rd); + mapper.map_use(rn); + } + &mut Inst::RxSBGTest { + ref mut rd, + ref mut rn, + .. + } => { + mapper.map_use(rd); + mapper.map_use(rn); + } &mut Inst::UnaryRR { ref mut rd, ref mut rn, @@ -1408,6 +1442,12 @@ pub fn s390x_map_regs(inst: &mut Inst, mapper: &RM) { mapper.map_def(rd); map_mem(mapper, mem); } + &mut Inst::Loop { ref mut body, .. } => { + for inst in body.iter_mut() { + s390x_map_regs(inst, mapper); + } + } + &mut Inst::CondBreak { .. } => {} &mut Inst::VirtualSPOffsetAdj { .. } => {} &mut Inst::ValueLabelMarker { ref mut reg, .. } => { mapper.map_use(reg); @@ -1909,6 +1949,58 @@ impl Inst { }; format!("{} {}, {}, {}{}", op, rd, rn, shift_imm, shift_reg) } + &Inst::RxSBG { + op, + rd, + rn, + start_bit, + end_bit, + rotate_amt, + } => { + let op = match op { + RxSBGOp::Insert => "risbgn", + RxSBGOp::And => "rnsbg", + RxSBGOp::Or => "rosbg", + RxSBGOp::Xor => "rxsbg", + }; + let rd = rd.to_reg().show_rru(mb_rru); + let rn = rn.show_rru(mb_rru); + format!( + "{} {}, {}, {}, {}, {}", + op, + rd, + rn, + start_bit, + end_bit, + (rotate_amt as u8) & 63 + ) + } + &Inst::RxSBGTest { + op, + rd, + rn, + start_bit, + end_bit, + rotate_amt, + } => { + let op = match op { + RxSBGOp::And => "rnsbg", + RxSBGOp::Or => "rosbg", + RxSBGOp::Xor => "rxsbg", + _ => unreachable!(), + }; + let rd = rd.show_rru(mb_rru); + let rn = rn.show_rru(mb_rru); + format!( + "{} {}, {}, {}, {}, {}", + op, + rd, + rn, + start_bit | 0x80, + end_bit, + (rotate_amt as u8) & 63 + ) + } &Inst::UnaryRR { op, rd, rn } => { let (op, extra) = match op { UnaryOp::Abs32 => ("lpr", ""), @@ -1919,6 +2011,8 @@ impl Inst { UnaryOp::Neg64Ext32 => ("lcgfr", ""), UnaryOp::PopcntByte => ("popcnt", ""), UnaryOp::PopcntReg => ("popcnt", ", 8"), + UnaryOp::BSwap32 => ("lrvr", ""), + UnaryOp::BSwap64 => ("lrvgr", ""), }; let rd = rd.to_reg().show_rru(mb_rru); let rn = rn.show_rru(mb_rru); @@ -2644,6 +2738,19 @@ impl Inst { let mem = mem.show_rru(mb_rru); format!("{}{} {}, {}", mem_str, op, rd, mem) } + &Inst::Loop { ref body, cond } => { + let body = body + .into_iter() + .map(|inst| inst.show_rru(mb_rru)) + .collect::>() + .join(" ; "); + let cond = cond.show_rru(mb_rru); + format!("0: {} ; jg{} 0b ; 1:", body, cond) + } + &Inst::CondBreak { cond } => { + let cond = cond.show_rru(mb_rru); + format!("jg{} 1f", cond) + } &Inst::VirtualSPOffsetAdj { offset } => { state.virtual_sp_offset += offset; format!("virtual_sp_offset_adjust {}", offset) diff --git a/cranelift/codegen/src/isa/s390x/lower.isle b/cranelift/codegen/src/isa/s390x/lower.isle index dbd24fbdf0..2fc051d836 100644 --- a/cranelift/codegen/src/isa/s390x/lower.isle +++ b/cranelift/codegen/src/isa/s390x/lower.isle @@ -1497,24 +1497,44 @@ ;;;; Rules for `atomic_rmw` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Atomic operations that do not require a compare-and-swap loop. + ;; Atomic AND for 32/64-bit big-endian types, using a single instruction. (rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags @ (bigendian) (AtomicRmwOp.And) addr src))) (value_reg (atomic_rmw_and ty (put_in_reg src) (lower_address flags addr (zero_offset))))) +;; Atomic AND for 32/64-bit big-endian types, using byte-swapped input/output. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (littleendian) (AtomicRmwOp.And) addr src))) + (value_reg (bswap_reg ty (atomic_rmw_and ty (bswap_reg ty (put_in_reg src)) + (lower_address flags addr (zero_offset)))))) + ;; Atomic OR for 32/64-bit big-endian types, using a single instruction. (rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Or) addr src))) (value_reg (atomic_rmw_or ty (put_in_reg src) (lower_address flags addr (zero_offset))))) +;; Atomic OR for 32/64-bit little-endian types, using byte-swapped input/output. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (littleendian) (AtomicRmwOp.Or) addr src))) + (value_reg (bswap_reg ty (atomic_rmw_or ty (bswap_reg ty (put_in_reg src)) + (lower_address flags addr (zero_offset)))))) + ;; Atomic XOR for 32/64-bit big-endian types, using a single instruction. (rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Xor) addr src))) (value_reg (atomic_rmw_xor ty (put_in_reg src) (lower_address flags addr (zero_offset))))) +;; Atomic XOR for 32/64-bit little-endian types, using byte-swapped input/output. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_rmw flags @ (littleendian) (AtomicRmwOp.Xor) addr src))) + (value_reg (bswap_reg ty (atomic_rmw_xor ty (bswap_reg ty (put_in_reg src)) + (lower_address flags addr (zero_offset)))))) + ;; Atomic ADD for 32/64-bit big-endian types, using a single instruction. (rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags @ (bigendian) (AtomicRmwOp.Add) addr src))) @@ -1528,17 +1548,278 @@ (lower_address flags addr (zero_offset))))) +;; Atomic operations that require a compare-and-swap loop. + +;; Operations for 32/64-bit types can use a fullword compare-and-swap loop. +(rule (lower (has_type (ty_32_or_64 ty) (atomic_rmw flags op addr src))) + (let ((src_reg Reg (put_in_reg src)) + (addr_reg Reg (put_in_reg addr)) + ;; Create body of compare-and-swap loop. + (ib VecMInstBuilder (inst_builder_new)) + (val0 Reg (writable_reg_to_reg (casloop_val_reg))) + (val1 Reg (atomic_rmw_body ib ty flags op + (casloop_tmp_reg) val0 src_reg))) + ;; Emit compare-and-swap loop and extract final result. + (value_reg (casloop ib ty flags addr_reg val1)))) + +;; Operations for 8/16-bit types must operate on the surrounding aligned word. +(rule (lower (has_type (ty_8_or_16 ty) (atomic_rmw flags op addr src))) + (let ((src_reg Reg (put_in_reg src)) + (addr_reg Reg (put_in_reg addr)) + ;; Prepare access to surrounding aligned word. + (bitshift Reg (casloop_bitshift addr_reg)) + (aligned_addr Reg (casloop_aligned_addr addr_reg)) + ;; Create body of compare-and-swap loop. + (ib VecMInstBuilder (inst_builder_new)) + (val0 Reg (writable_reg_to_reg (casloop_val_reg))) + (val1 Reg (casloop_rotate_in ib ty flags bitshift val0)) + (val2 Reg (atomic_rmw_body ib ty flags op + (casloop_tmp_reg) val1 src_reg)) + (val3 Reg (casloop_rotate_out ib ty flags bitshift val2))) + ;; Emit compare-and-swap loop and extract final result. + (value_reg (casloop_subword ib ty flags aligned_addr bitshift val3)))) + +;; Loop bodies for atomic read-modify-write operations. +(decl atomic_rmw_body (VecMInstBuilder Type MemFlags AtomicRmwOp + WritableReg Reg Reg) Reg) + +;; Loop bodies for 32-/64-bit atomic XCHG operations. +;; Simply use the source (possibly byte-swapped) as new target value. +(rule (atomic_rmw_body ib (ty_32_or_64 ty) (bigendian) + (AtomicRmwOp.Xchg) tmp val src) + src) +(rule (atomic_rmw_body ib (ty_32_or_64 ty) (littleendian) + (AtomicRmwOp.Xchg) tmp val src) + (bswap_reg ty src)) + +;; Loop bodies for 32-/64-bit atomic NAND operations. +;; On z15 this can use the NN(G)RK instruction. On z14, perform an And +;; operation and invert the result. In the little-endian case, we can +;; simply byte-swap the source operand. +(rule (atomic_rmw_body ib (and (mie2_enabled) (ty_32_or_64 ty)) (bigendian) + (AtomicRmwOp.Nand) tmp val src) + (push_alu_reg ib (aluop_and_not ty) tmp val src)) +(rule (atomic_rmw_body ib (and (mie2_enabled) (ty_32_or_64 ty)) (littleendian) + (AtomicRmwOp.Nand) tmp val src) + (push_alu_reg ib (aluop_and_not ty) tmp val (bswap_reg ty src))) +(rule (atomic_rmw_body ib (and (mie2_disabled) (ty_32_or_64 ty)) (bigendian) + (AtomicRmwOp.Nand) tmp val src) + (push_not_reg ib ty tmp + (push_alu_reg ib (aluop_and ty) tmp val src))) +(rule (atomic_rmw_body ib (and (mie2_disabled) (ty_32_or_64 ty)) (littleendian) + (AtomicRmwOp.Nand) tmp val src) + (push_not_reg ib ty tmp + (push_alu_reg ib (aluop_and ty) tmp val (bswap_reg ty src)))) + +;; Loop bodies for 8-/16-bit atomic bit operations. +;; These use the "rotate-then--selected bits" family of instructions. +;; For the Nand operation, we again perform And and invert the result. +(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Xchg) tmp val src) + (atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Insert) tmp val src)) +(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.And) tmp val src) + (atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.And) tmp val src)) +(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Or) tmp val src) + (atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Or) tmp val src)) +(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Xor) tmp val src) + (atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.Xor) tmp val src)) +(rule (atomic_rmw_body ib (ty_8_or_16 ty) flags (AtomicRmwOp.Nand) tmp val src) + (atomic_rmw_body_invert ib ty flags tmp + (atomic_rmw_body_rxsbg ib ty flags (RxSBGOp.And) tmp val src))) + +;; RxSBG subword operation. +(decl atomic_rmw_body_rxsbg (VecMInstBuilder Type MemFlags RxSBGOp + WritableReg Reg Reg) Reg) +;; 8-bit case: use the low byte of "src" and the high byte of "val". +(rule (atomic_rmw_body_rxsbg ib $I8 _ op tmp val src) + (push_rxsbg ib op tmp val src 32 40 24)) +;; 16-bit big-endian case: use the low two bytes of "src" and the +;; high two bytes of "val". +(rule (atomic_rmw_body_rxsbg ib $I16 (bigendian) op tmp val src) + (push_rxsbg ib op tmp val src 32 48 16)) +;; 16-bit little-endian case: use the low two bytes of "src", byte-swapped +;; so they end up in the high two bytes, and the low two bytes of "val". +(rule (atomic_rmw_body_rxsbg ib $I16 (littleendian) op tmp val src) + (push_rxsbg ib op tmp val (bswap_reg $I32 src) 48 64 -16)) + +;; Invert a subword. +(decl atomic_rmw_body_invert (VecMInstBuilder Type MemFlags WritableReg Reg) Reg) +;; 8-bit case: invert the high byte. +(rule (atomic_rmw_body_invert ib $I8 _ tmp val) + (push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xff000000 0))) +;; 16-bit big-endian case: invert the two high bytes. +(rule (atomic_rmw_body_invert ib $I16 (bigendian) tmp val) + (push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xffff0000 0))) +;; 16-bit little-endian case: invert the two low bytes. +(rule (atomic_rmw_body_invert ib $I16 (littleendian) tmp val) + (push_xor_uimm32shifted ib $I32 tmp val (uimm32shifted 0xffff 0))) + +;; Loop bodies for atomic ADD/SUB operations. +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Add) tmp val src) + (atomic_rmw_body_addsub ib ty flags (aluop_add (ty_ext32 ty)) tmp val src)) +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Sub) tmp val src) + (atomic_rmw_body_addsub ib ty flags (aluop_sub (ty_ext32 ty)) tmp val src)) + +;; Addition or subtraction operation. +(decl atomic_rmw_body_addsub (VecMInstBuilder Type MemFlags ALUOp + WritableReg Reg Reg) Reg) +;; 32/64-bit big-endian case: just a regular add/sub operation. +(rule (atomic_rmw_body_addsub ib (ty_32_or_64 ty) (bigendian) op tmp val src) + (push_alu_reg ib op tmp val src)) +;; 32/64-bit little-endian case: byte-swap the value loaded from memory before +;; and after performing the operation in native endianness. +(rule (atomic_rmw_body_addsub ib (ty_32_or_64 ty) (littleendian) op tmp val src) + (let ((val_swapped Reg (push_bswap_reg ib ty tmp val)) + (res_swapped Reg (push_alu_reg ib op tmp val_swapped src))) + (push_bswap_reg ib ty tmp res_swapped))) +;; 8-bit case: perform a 32-bit addition of the source value shifted by 24 bits +;; to the memory value, which contains the target in its high byte. +(rule (atomic_rmw_body_addsub ib $I8 _ op tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 24))) + (push_alu_reg ib op tmp val src_shifted))) +;; 16-bit big-endian case: similar, just shift the source by 16 bits. +(rule (atomic_rmw_body_addsub ib $I16 (bigendian) op tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 16))) + (push_alu_reg ib op tmp val src_shifted))) +;; 16-bit little-endian case: the same, but in addition we need to byte-swap +;; the memory value before and after the operation. Since the value was placed +;; in the low two bytes by our standard rotation, we can use a 32-bit byte-swap +;; and the native-endian value will end up in the high bytes where we need it +;; to perform the operation. +(rule (atomic_rmw_body_addsub ib $I16 (littleendian) op tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 16)) + (val_swapped Reg (push_bswap_reg ib $I32 tmp val)) + (res_swapped Reg (push_alu_reg ib op tmp val_swapped src_shifted))) + (push_bswap_reg ib $I32 tmp res_swapped))) + +;; Loop bodies for atomic MIN/MAX operations. +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Smin) tmp val src) + (atomic_rmw_body_minmax ib ty flags (cmpop_cmps (ty_ext32 ty)) + (intcc_as_cond (IntCC.SignedLessThan)) tmp val src)) +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Smax) tmp val src) + (atomic_rmw_body_minmax ib ty flags (cmpop_cmps (ty_ext32 ty)) + (intcc_as_cond (IntCC.SignedGreaterThan)) tmp val src)) +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Umin) tmp val src) + (atomic_rmw_body_minmax ib ty flags (cmpop_cmpu (ty_ext32 ty)) + (intcc_as_cond (IntCC.UnsignedLessThan)) tmp val src)) +(rule (atomic_rmw_body ib ty flags (AtomicRmwOp.Umax) tmp val src) + (atomic_rmw_body_minmax ib ty flags (cmpop_cmpu (ty_ext32 ty)) + (intcc_as_cond (IntCC.UnsignedGreaterThan)) tmp val src)) + +;; Minimum or maximum operation. +(decl atomic_rmw_body_minmax (VecMInstBuilder Type MemFlags CmpOp Cond + WritableReg Reg Reg) Reg) +;; 32/64-bit big-endian case: just a comparison followed by a conditional +;; break out of the loop if the memory value does not need to change. +;; If it does need to change, the new value is simply the source operand. +(rule (atomic_rmw_body_minmax ib (ty_32_or_64 ty) (bigendian) + op cond tmp val src) + (let ((_ Reg (push_break_if ib (cmp_rr op src val) (invert_cond cond)))) + src)) +;; 32/64-bit little-endian case: similar, but we need to byte-swap the +;; memory value before the comparison. If we need to store the new value, +;; it also needs to be byte-swapped. +(rule (atomic_rmw_body_minmax ib (ty_32_or_64 ty) (littleendian) + op cond tmp val src) + (let ((val_swapped Reg (push_bswap_reg ib ty tmp val)) + (_ Reg (push_break_if ib (cmp_rr op src val_swapped) + (invert_cond cond)))) + (push_bswap_reg ib ty tmp src))) +;; 8-bit case: compare the memory value (which contains the target in the +;; high byte) with the source operand shifted by 24 bits. Note that in +;; the case where the high bytes are equal, the comparison may succeed +;; or fail depending on the unrelated low bits of the memory value, and +;; so we either may or may not perform the update. But it would be an +;; update with the same value in any case, so this does not matter. +(rule (atomic_rmw_body_minmax ib $I8 _ op cond tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 24)) + (_ Reg (push_break_if ib (cmp_rr op src_shifted val) + (invert_cond cond)))) + (push_rxsbg ib (RxSBGOp.Insert) tmp val src_shifted 32 40 0))) +;; 16-bit big-endian case: similar, just shift the source by 16 bits. +(rule (atomic_rmw_body_minmax ib $I16 (bigendian) op cond tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 16)) + (_ Reg (push_break_if ib (cmp_rr op src_shifted val) + (invert_cond cond)))) + (push_rxsbg ib (RxSBGOp.Insert) tmp val src_shifted 32 48 0))) +;; 16-bit little-endian case: similar, but in addition byte-swap the +;; memory value before and after the operation, like for _addsub_. +(rule (atomic_rmw_body_minmax ib $I16 (littleendian) op cond tmp val src) + (let ((src_shifted Reg (lshl_imm $I32 src 16)) + (val_swapped Reg (push_bswap_reg ib $I32 tmp val)) + (_ Reg (push_break_if ib (cmp_rr op src_shifted val_swapped) + (invert_cond cond))) + (res_swapped Reg (push_rxsbg ib (RxSBGOp.Insert) + tmp val_swapped src_shifted 32 48 0))) + (push_bswap_reg ib $I32 tmp res_swapped))) + + ;;;; Rules for `atomic_cas` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; 32-bit big-endian atomic compare-and-swap instruction. -(rule (lower (has_type $I32 (atomic_cas flags @ (bigendian) addr old new))) - (value_reg (atomic_cas32 (put_in_reg old) (put_in_reg new) - (lower_address flags addr (zero_offset))))) +;; 32/64-bit big-endian atomic compare-and-swap instruction. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_cas flags @ (bigendian) addr src1 src2))) + (value_reg (atomic_cas_impl ty (put_in_reg src1) (put_in_reg src2) + (lower_address flags addr (zero_offset))))) -;; 64-bit big-endian atomic compare-and-swap instruction. -(rule (lower (has_type $I64 (atomic_cas flags @ (bigendian) addr old new))) - (value_reg (atomic_cas64 (put_in_reg old) (put_in_reg new) - (lower_address flags addr (zero_offset))))) +;; 32/64-bit little-endian atomic compare-and-swap instruction. +;; Implemented by byte-swapping old/new inputs and the output. +(rule (lower (has_type (ty_32_or_64 ty) + (atomic_cas flags @ (littleendian) addr src1 src2))) + (value_reg (bswap_reg ty (atomic_cas_impl ty (bswap_reg ty (put_in_reg src1)) + (bswap_reg ty (put_in_reg src2)) + (lower_address flags addr (zero_offset)))))) + +;; 8/16-bit atomic compare-and-swap implemented via loop. +(rule (lower (has_type (ty_8_or_16 ty) (atomic_cas flags addr src1 src2))) + (let ((src1_reg Reg (put_in_reg src1)) + (src2_reg Reg (put_in_reg src2)) + (addr_reg Reg (put_in_reg addr)) + ;; Prepare access to the surrounding aligned word. + (bitshift Reg (casloop_bitshift addr_reg)) + (aligned_addr Reg (casloop_aligned_addr addr_reg)) + ;; Create body of compare-and-swap loop. + (ib VecMInstBuilder (inst_builder_new)) + (val0 Reg (writable_reg_to_reg (casloop_val_reg))) + (val1 Reg (casloop_rotate_in ib ty flags bitshift val0)) + (val2 Reg (atomic_cas_body ib ty flags + (casloop_tmp_reg) val1 src1_reg src2_reg)) + (val3 Reg (casloop_rotate_out ib ty flags bitshift val2))) + ;; Emit compare-and-swap loop and extract final result. + (value_reg (casloop_subword ib ty flags aligned_addr bitshift val3)))) + +;; Emit loop body instructions to perform a subword compare-and-swap. +(decl atomic_cas_body (VecMInstBuilder Type MemFlags + WritableReg Reg Reg Reg) Reg) + +;; 8-bit case: "val" contains the value loaded from memory in the high byte. +;; Compare with the comparison value in the low byte of "src1". If unequal, +;; break out of the loop, otherwise replace the target byte in "val" with +;; the low byte of "src2". +(rule (atomic_cas_body ib $I8 _ tmp val src1 src2) + (let ((_ Reg (push_break_if ib (rxsbg_test (RxSBGOp.Xor) val src1 32 40 24) + (intcc_as_cond (IntCC.NotEqual))))) + (push_rxsbg ib (RxSBGOp.Insert) tmp val src2 32 40 24))) + +;; 16-bit big-endian case: Same as above, except with values in the high +;; two bytes of "val" and low two bytes of "src1" and "src2". +(rule (atomic_cas_body ib $I16 (bigendian) tmp val src1 src2) + (let ((_ Reg (push_break_if ib (rxsbg_test (RxSBGOp.Xor) val src1 32 48 16) + (intcc_as_cond (IntCC.NotEqual))))) + (push_rxsbg ib (RxSBGOp.Insert) tmp val src2 32 48 16))) + +;; 16-bit little-endian case: "val" here contains a little-endian value in the +;; *low* two bytes. "src1" and "src2" contain native (i.e. big-endian) values +;; in their low two bytes. Perform the operation in little-endian mode by +;; byte-swapping "src1" and "src" ahead of the loop. Note that this is a +;; 32-bit operation so the little-endian 16-bit values end up in the *high* +;; two bytes of the swapped values. +(rule (atomic_cas_body ib $I16 (littleendian) tmp val src1 src2) + (let ((src1_swapped Reg (bswap_reg $I32 src1)) + (src2_swapped Reg (bswap_reg $I32 src2)) + (_ Reg (push_break_if ib + (rxsbg_test (RxSBGOp.Xor) val src1_swapped 48 64 -16) + (intcc_as_cond (IntCC.NotEqual))))) + (push_rxsbg ib (RxSBGOp.Insert) tmp val src2_swapped 48 64 -16))) ;;;; Rules for `atomic_load` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/cranelift/codegen/src/isa/s390x/lower/isle.rs b/cranelift/codegen/src/isa/s390x/lower/isle.rs index 32672cf0d3..1ad7758b64 100644 --- a/cranelift/codegen/src/isa/s390x/lower/isle.rs +++ b/cranelift/codegen/src/isa/s390x/lower/isle.rs @@ -21,6 +21,7 @@ use crate::{ machinst::{InsnOutput, LowerCtx, RelocDistance}, }; use std::boxed::Box; +use std::cell::Cell; use std::convert::TryFrom; use std::vec::Vec; @@ -28,6 +29,8 @@ type BoxCallInfo = Box; type BoxCallIndInfo = Box; type VecMachLabel = Vec; type BoxExternalName = Box; +type VecMInst = Vec; +type VecMInstBuilder = Cell>; /// The main entry point for lowering with ISLE. pub(crate) fn lower( @@ -485,6 +488,41 @@ where self.lower_ctx.abi().stackslot_addr(stack_slot, offset, dst) } + #[inline] + fn inst_builder_new(&mut self) -> VecMInstBuilder { + Cell::new(Vec::::new()) + } + + #[inline] + fn inst_builder_push(&mut self, builder: &VecMInstBuilder, inst: &MInst) -> Unit { + let mut vec = builder.take(); + vec.push(inst.clone()); + builder.set(vec); + } + + #[inline] + fn inst_builder_finish(&mut self, builder: &VecMInstBuilder) -> Vec { + builder.take() + } + + #[inline] + fn real_reg(&mut self, reg: WritableReg) -> Option { + if reg.to_reg().is_real() { + Some(reg) + } else { + None + } + } + + #[inline] + fn same_reg(&mut self, src: Reg, dst: WritableReg) -> Option<()> { + if dst.to_reg() == src { + Some(()) + } else { + None + } + } + #[inline] fn sinkable_inst(&mut self, val: Value) -> Option { let input = self.lower_ctx.get_value_as_source_or_const(val); diff --git a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest index eee9fc5608..283389b954 100644 --- a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest +++ b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.manifest @@ -1,4 +1,4 @@ src/clif.isle 9ea75a6f790b5c03 src/prelude.isle 6aaf8ce0f5a5c2ec -src/isa/s390x/inst.isle f5af3708848ef1aa -src/isa/s390x/lower.isle 57dcc39cbab2d1c6 +src/isa/s390x/inst.isle 1ae3c0f9c956affd +src/isa/s390x/lower.isle d18ee0bff12cad4e diff --git a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs index 2c52f5f2b9..36a0d2a434 100644 --- a/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs +++ b/cranelift/codegen/src/isa/s390x/lower/isle/generated_code.rs @@ -125,6 +125,11 @@ pub trait Context { fn abi_stackslot_addr(&mut self, arg0: WritableReg, arg1: StackSlot, arg2: Offset32) -> MInst; fn sinkable_inst(&mut self, arg0: Value) -> Option; fn sink_inst(&mut self, arg0: Inst) -> Unit; + fn inst_builder_new(&mut self) -> VecMInstBuilder; + fn inst_builder_push(&mut self, arg0: &VecMInstBuilder, arg1: &MInst) -> Unit; + fn inst_builder_finish(&mut self, arg0: &VecMInstBuilder) -> VecMInst; + fn real_reg(&mut self, arg0: WritableReg) -> Option; + fn same_reg(&mut self, arg0: Reg, arg1: WritableReg) -> Option<()>; } /// Internal type SideEffectNoResult: defined at src/prelude.isle line 307. @@ -226,6 +231,22 @@ pub enum MInst { shift_imm: u8, shift_reg: Reg, }, + RxSBG { + op: RxSBGOp, + rd: WritableReg, + rn: Reg, + start_bit: u8, + end_bit: u8, + rotate_amt: i8, + }, + RxSBGTest { + op: RxSBGOp, + rd: Reg, + rn: Reg, + start_bit: u8, + end_bit: u8, + rotate_amt: i8, + }, UnaryRR { op: UnaryOp, rd: WritableReg, @@ -640,6 +661,13 @@ pub enum MInst { rd: WritableReg, mem: MemArg, }, + Loop { + body: VecMInst, + cond: Cond, + }, + CondBreak { + cond: Cond, + }, VirtualSPOffsetAdj { offset: i64, }, @@ -652,7 +680,7 @@ pub enum MInst { }, } -/// Internal type ALUOp: defined at src/isa/s390x/inst.isle line 684. +/// Internal type ALUOp: defined at src/isa/s390x/inst.isle line 717. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ALUOp { Add32, @@ -690,7 +718,7 @@ pub enum ALUOp { XorNot64, } -/// Internal type UnaryOp: defined at src/isa/s390x/inst.isle line 725. +/// Internal type UnaryOp: defined at src/isa/s390x/inst.isle line 758. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum UnaryOp { Abs32, @@ -701,9 +729,11 @@ pub enum UnaryOp { Neg64Ext32, PopcntByte, PopcntReg, + BSwap32, + BSwap64, } -/// Internal type ShiftOp: defined at src/isa/s390x/inst.isle line 738. +/// Internal type ShiftOp: defined at src/isa/s390x/inst.isle line 773. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ShiftOp { RotL32, @@ -716,7 +746,16 @@ pub enum ShiftOp { AShR64, } -/// Internal type CmpOp: defined at src/isa/s390x/inst.isle line 751. +/// Internal type RxSBGOp: defined at src/isa/s390x/inst.isle line 786. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum RxSBGOp { + Insert, + And, + Or, + Xor, +} + +/// Internal type CmpOp: defined at src/isa/s390x/inst.isle line 795. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum CmpOp { CmpS32, @@ -731,7 +770,7 @@ pub enum CmpOp { CmpL64Ext32, } -/// Internal type FPUOp1: defined at src/isa/s390x/inst.isle line 766. +/// Internal type FPUOp1: defined at src/isa/s390x/inst.isle line 810. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp1 { Abs32, @@ -746,7 +785,7 @@ pub enum FPUOp1 { Cvt64To32, } -/// Internal type FPUOp2: defined at src/isa/s390x/inst.isle line 781. +/// Internal type FPUOp2: defined at src/isa/s390x/inst.isle line 825. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp2 { Add32, @@ -763,7 +802,7 @@ pub enum FPUOp2 { Min64, } -/// Internal type FPUOp3: defined at src/isa/s390x/inst.isle line 798. +/// Internal type FPUOp3: defined at src/isa/s390x/inst.isle line 842. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FPUOp3 { MAdd32, @@ -772,7 +811,7 @@ pub enum FPUOp3 { MSub64, } -/// Internal type FpuToIntOp: defined at src/isa/s390x/inst.isle line 807. +/// Internal type FpuToIntOp: defined at src/isa/s390x/inst.isle line 851. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FpuToIntOp { F32ToU32, @@ -785,7 +824,7 @@ pub enum FpuToIntOp { F64ToI64, } -/// Internal type IntToFpuOp: defined at src/isa/s390x/inst.isle line 820. +/// Internal type IntToFpuOp: defined at src/isa/s390x/inst.isle line 864. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum IntToFpuOp { U32ToF32, @@ -798,7 +837,7 @@ pub enum IntToFpuOp { I64ToF64, } -/// Internal type FpuRoundMode: defined at src/isa/s390x/inst.isle line 834. +/// Internal type FpuRoundMode: defined at src/isa/s390x/inst.isle line 878. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum FpuRoundMode { Minus32, @@ -811,19 +850,19 @@ pub enum FpuRoundMode { Nearest64, } -/// Internal type WritableRegPair: defined at src/isa/s390x/inst.isle line 1240. +/// Internal type WritableRegPair: defined at src/isa/s390x/inst.isle line 1284. #[derive(Clone, Debug)] pub enum WritableRegPair { WritableRegPair { hi: WritableReg, lo: WritableReg }, } -/// Internal type RegPair: defined at src/isa/s390x/inst.isle line 1262. +/// Internal type RegPair: defined at src/isa/s390x/inst.isle line 1306. #[derive(Clone, Debug)] pub enum RegPair { RegPair { hi: Reg, lo: Reg }, } -/// Internal type ProducesBool: defined at src/isa/s390x/inst.isle line 2187. +/// Internal type ProducesBool: defined at src/isa/s390x/inst.isle line 2334. #[derive(Clone, Debug)] pub enum ProducesBool { ProducesBool { producer: ProducesFlags, cond: Cond }, @@ -981,7 +1020,7 @@ pub fn constructor_mask_amt_reg(ctx: &mut C, arg0: Type, arg1: Reg) let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1009. + // Rule at src/isa/s390x/inst.isle line 1053. let expr0_0: i64 = -1; let expr1_0 = C::mask_amt_imm(ctx, pattern1_0, expr0_0); let expr2_0 = C::u8_as_u16(ctx, expr1_0); @@ -992,7 +1031,7 @@ pub fn constructor_mask_amt_reg(ctx: &mut C, arg0: Type, arg1: Reg) } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1012. + // Rule at src/isa/s390x/inst.isle line 1056. return Some(pattern2_0); } return None; @@ -1019,7 +1058,7 @@ pub fn constructor_lower_address( let pattern7_0 = arg2; let pattern8_0 = C::i64_from_offset(ctx, pattern7_0); if pattern8_0 == 0 { - // Rule at src/isa/s390x/inst.isle line 1104. + // Rule at src/isa/s390x/inst.isle line 1148. let expr0_0 = C::put_in_reg(ctx, pattern6_0); let expr1_0 = C::put_in_reg(ctx, pattern6_1); let expr2_0 = C::memarg_reg_plus_reg(ctx, expr0_0, expr1_0, pattern0_0); @@ -1038,7 +1077,7 @@ pub fn constructor_lower_address( if let Some(pattern8_0) = C::memarg_symbol_offset_sum(ctx, pattern6_0, pattern7_0) { - // Rule at src/isa/s390x/inst.isle line 1107. + // Rule at src/isa/s390x/inst.isle line 1151. let expr0_0 = C::memarg_symbol(ctx, pattern3_0, pattern8_0, pattern0_0); return Some(expr0_0); } @@ -1048,7 +1087,7 @@ pub fn constructor_lower_address( } let pattern2_0 = arg2; let pattern3_0 = C::i64_from_offset(ctx, pattern2_0); - // Rule at src/isa/s390x/inst.isle line 1101. + // Rule at src/isa/s390x/inst.isle line 1145. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = C::memarg_reg_plus_off(ctx, expr0_0, pattern3_0, pattern0_0); return Some(expr1_0); @@ -1064,7 +1103,7 @@ pub fn constructor_stack_addr_impl( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1134. + // Rule at src/isa/s390x/inst.isle line 1178. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = C::abi_stackslot_addr(ctx, expr0_0, pattern1_0, pattern2_0); let expr2_0 = C::emit(ctx, &expr1_0); @@ -1084,7 +1123,7 @@ pub fn constructor_sink_load(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C) -> Option { - // Rule at src/isa/s390x/inst.isle line 1245. + // Rule at src/isa/s390x/inst.isle line 1289. let expr0_0: u8 = 0; let expr1_0 = C::writable_gpr(ctx, expr0_0); let expr2_0: u8 = 1; @@ -1197,7 +1236,7 @@ pub fn constructor_copy_writable_regpair( arg0: &RegPair, ) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1251. + // Rule at src/isa/s390x/inst.isle line 1295. let expr0_0 = constructor_temp_writable_regpair(ctx)?; return Some(expr0_0); } @@ -1213,7 +1252,7 @@ pub fn constructor_writable_regpair_hi( lo: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1255. + // Rule at src/isa/s390x/inst.isle line 1299. return Some(pattern1_0); } return None; @@ -1230,7 +1269,7 @@ pub fn constructor_writable_regpair_lo( lo: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1259. + // Rule at src/isa/s390x/inst.isle line 1303. return Some(pattern1_1); } return None; @@ -1247,7 +1286,7 @@ pub fn constructor_writable_regpair_to_regpair( lo: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1266. + // Rule at src/isa/s390x/inst.isle line 1310. let expr0_0 = C::writable_reg_to_reg(ctx, pattern1_0); let expr1_0 = C::writable_reg_to_reg(ctx, pattern1_1); let expr2_0 = RegPair::RegPair { @@ -1261,7 +1300,7 @@ pub fn constructor_writable_regpair_to_regpair( // Generated as internal constructor for term uninitialized_regpair. pub fn constructor_uninitialized_regpair(ctx: &mut C) -> Option { - // Rule at src/isa/s390x/inst.isle line 1271. + // Rule at src/isa/s390x/inst.isle line 1315. let expr0_0 = constructor_temp_writable_regpair(ctx)?; let expr1_0 = constructor_writable_regpair_to_regpair(ctx, &expr0_0)?; return Some(expr1_0); @@ -1275,7 +1314,7 @@ pub fn constructor_regpair_hi(ctx: &mut C, arg0: &RegPair) -> Option lo: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1276. + // Rule at src/isa/s390x/inst.isle line 1320. return Some(pattern1_0); } return None; @@ -1289,7 +1328,7 @@ pub fn constructor_regpair_lo(ctx: &mut C, arg0: &RegPair) -> Option lo: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1280. + // Rule at src/isa/s390x/inst.isle line 1324. return Some(pattern1_1); } return None; @@ -1307,7 +1346,7 @@ pub fn constructor_alu_rrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1287. + // Rule at src/isa/s390x/inst.isle line 1331. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::AluRRR { alu_op: pattern1_0.clone(), @@ -1332,7 +1371,7 @@ pub fn constructor_alu_rrsimm16( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1294. + // Rule at src/isa/s390x/inst.isle line 1338. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::AluRRSImm16 { alu_op: pattern1_0.clone(), @@ -1357,7 +1396,7 @@ pub fn constructor_alu_rr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1301. + // Rule at src/isa/s390x/inst.isle line 1345. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRR { alu_op: pattern1_0.clone(), @@ -1381,7 +1420,7 @@ pub fn constructor_alu_rx( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1308. + // Rule at src/isa/s390x/inst.isle line 1352. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRX { alu_op: pattern1_0.clone(), @@ -1405,7 +1444,7 @@ pub fn constructor_alu_rsimm16( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1315. + // Rule at src/isa/s390x/inst.isle line 1359. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRSImm16 { alu_op: pattern1_0.clone(), @@ -1429,7 +1468,7 @@ pub fn constructor_alu_rsimm32( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1322. + // Rule at src/isa/s390x/inst.isle line 1366. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRSImm32 { alu_op: pattern1_0.clone(), @@ -1453,7 +1492,7 @@ pub fn constructor_alu_ruimm32( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1329. + // Rule at src/isa/s390x/inst.isle line 1373. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRUImm32 { alu_op: pattern1_0.clone(), @@ -1477,7 +1516,7 @@ pub fn constructor_alu_ruimm16shifted( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1336. + // Rule at src/isa/s390x/inst.isle line 1380. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRUImm16Shifted { alu_op: pattern1_0.clone(), @@ -1501,7 +1540,7 @@ pub fn constructor_alu_ruimm32shifted( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1343. + // Rule at src/isa/s390x/inst.isle line 1387. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::AluRUImm32Shifted { alu_op: pattern1_0.clone(), @@ -1517,7 +1556,7 @@ pub fn constructor_alu_ruimm32shifted( pub fn constructor_smul_wide(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1350. + // Rule at src/isa/s390x/inst.isle line 1394. let expr0_0 = constructor_temp_writable_regpair(ctx)?; let expr1_0 = MInst::SMulWide { rn: pattern0_0, @@ -1532,7 +1571,7 @@ pub fn constructor_smul_wide(ctx: &mut C, arg0: Reg, arg1: Reg) -> O pub fn constructor_umul_wide(ctx: &mut C, arg0: Reg, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1357. + // Rule at src/isa/s390x/inst.isle line 1401. let expr0_0 = constructor_temp_writable_regpair(ctx)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = MInst::Mov64 { @@ -1554,7 +1593,7 @@ pub fn constructor_sdivmod32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1365. + // Rule at src/isa/s390x/inst.isle line 1409. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; let expr1_0 = MInst::SDivMod32 { rn: pattern1_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -1570,7 +1609,7 @@ pub fn constructor_sdivmod64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1372. + // Rule at src/isa/s390x/inst.isle line 1416. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; let expr1_0 = MInst::SDivMod64 { rn: pattern1_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -1586,7 +1625,7 @@ pub fn constructor_udivmod32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1379. + // Rule at src/isa/s390x/inst.isle line 1423. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; let expr1_0 = MInst::UDivMod32 { rn: pattern1_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -1602,7 +1641,7 @@ pub fn constructor_udivmod64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1386. + // Rule at src/isa/s390x/inst.isle line 1430. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern0_0)?; let expr1_0 = MInst::UDivMod64 { rn: pattern1_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -1624,7 +1663,7 @@ pub fn constructor_shift_rr( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 1393. + // Rule at src/isa/s390x/inst.isle line 1437. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::ShiftRR { shift_op: pattern1_0.clone(), @@ -1638,6 +1677,39 @@ pub fn constructor_shift_rr( return Some(expr3_0); } +// Generated as internal constructor for term rxsbg_test. +pub fn constructor_rxsbg_test( + ctx: &mut C, + arg0: &RxSBGOp, + arg1: Reg, + arg2: Reg, + arg3: u8, + arg4: u8, + arg5: i8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + let pattern5_0 = arg5; + // Rule at src/isa/s390x/inst.isle line 1444. + let expr0_0 = MInst::RxSBGTest { + op: pattern0_0.clone(), + rd: pattern1_0, + rn: pattern2_0, + start_bit: pattern3_0, + end_bit: pattern4_0, + rotate_amt: pattern5_0, + }; + let expr1_0 = C::invalid_reg(ctx); + let expr2_0 = ProducesFlags::ProducesFlags { + inst: expr0_0, + result: expr1_0, + }; + return Some(expr2_0); +} + // Generated as internal constructor for term unary_rr. pub fn constructor_unary_rr( ctx: &mut C, @@ -1648,7 +1720,7 @@ pub fn constructor_unary_rr( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1400. + // Rule at src/isa/s390x/inst.isle line 1451. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::UnaryRR { op: pattern1_0.clone(), @@ -1670,7 +1742,7 @@ pub fn constructor_cmp_rr( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1407. + // Rule at src/isa/s390x/inst.isle line 1458. let expr0_0 = MInst::CmpRR { op: pattern0_0.clone(), rn: pattern1_0, @@ -1694,7 +1766,7 @@ pub fn constructor_cmp_rx( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1413. + // Rule at src/isa/s390x/inst.isle line 1464. let expr0_0 = MInst::CmpRX { op: pattern0_0.clone(), rn: pattern1_0, @@ -1718,7 +1790,7 @@ pub fn constructor_cmp_rsimm16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1419. + // Rule at src/isa/s390x/inst.isle line 1470. let expr0_0 = MInst::CmpRSImm16 { op: pattern0_0.clone(), rn: pattern1_0, @@ -1742,7 +1814,7 @@ pub fn constructor_cmp_rsimm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1425. + // Rule at src/isa/s390x/inst.isle line 1476. let expr0_0 = MInst::CmpRSImm32 { op: pattern0_0.clone(), rn: pattern1_0, @@ -1766,7 +1838,7 @@ pub fn constructor_cmp_ruimm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1431. + // Rule at src/isa/s390x/inst.isle line 1482. let expr0_0 = MInst::CmpRUImm32 { op: pattern0_0.clone(), rn: pattern1_0, @@ -1792,7 +1864,7 @@ pub fn constructor_atomic_rmw_impl( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1437. + // Rule at src/isa/s390x/inst.isle line 1488. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::AtomicRmw { alu_op: pattern1_0.clone(), @@ -1815,7 +1887,7 @@ pub fn constructor_atomic_cas32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1444. + // Rule at src/isa/s390x/inst.isle line 1495. let expr0_0: Type = I32; let expr1_0 = constructor_copy_writable_reg(ctx, expr0_0, pattern0_0)?; let expr2_0 = MInst::AtomicCas32 { @@ -1838,7 +1910,7 @@ pub fn constructor_atomic_cas64( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1451. + // Rule at src/isa/s390x/inst.isle line 1502. let expr0_0: Type = I64; let expr1_0 = constructor_copy_writable_reg(ctx, expr0_0, pattern0_0)?; let expr2_0 = MInst::AtomicCas64 { @@ -1853,7 +1925,7 @@ pub fn constructor_atomic_cas64( // Generated as internal constructor for term fence_impl. pub fn constructor_fence_impl(ctx: &mut C) -> Option { - // Rule at src/isa/s390x/inst.isle line 1458. + // Rule at src/isa/s390x/inst.isle line 1509. let expr0_0 = MInst::Fence; let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; return Some(expr1_0); @@ -1862,7 +1934,7 @@ pub fn constructor_fence_impl(ctx: &mut C) -> Option(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1463. + // Rule at src/isa/s390x/inst.isle line 1514. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::Load32 { @@ -1877,7 +1949,7 @@ pub fn constructor_load32(ctx: &mut C, arg0: &MemArg) -> Option // Generated as internal constructor for term load64. pub fn constructor_load64(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1470. + // Rule at src/isa/s390x/inst.isle line 1521. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::Load64 { @@ -1892,7 +1964,7 @@ pub fn constructor_load64(ctx: &mut C, arg0: &MemArg) -> Option // Generated as internal constructor for term loadrev16. pub fn constructor_loadrev16(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1477. + // Rule at src/isa/s390x/inst.isle line 1528. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadRev16 { @@ -1907,7 +1979,7 @@ pub fn constructor_loadrev16(ctx: &mut C, arg0: &MemArg) -> Option(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1484. + // Rule at src/isa/s390x/inst.isle line 1535. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadRev32 { @@ -1922,7 +1994,7 @@ pub fn constructor_loadrev32(ctx: &mut C, arg0: &MemArg) -> Option(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1491. + // Rule at src/isa/s390x/inst.isle line 1542. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadRev64 { @@ -1942,7 +2014,7 @@ pub fn constructor_store8( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1498. + // Rule at src/isa/s390x/inst.isle line 1549. let expr0_0 = MInst::Store8 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -1959,7 +2031,7 @@ pub fn constructor_store16( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1503. + // Rule at src/isa/s390x/inst.isle line 1554. let expr0_0 = MInst::Store16 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -1976,7 +2048,7 @@ pub fn constructor_store32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1508. + // Rule at src/isa/s390x/inst.isle line 1559. let expr0_0 = MInst::Store32 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -1993,7 +2065,7 @@ pub fn constructor_store64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1513. + // Rule at src/isa/s390x/inst.isle line 1564. let expr0_0 = MInst::Store64 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2010,7 +2082,7 @@ pub fn constructor_store8_imm( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1518. + // Rule at src/isa/s390x/inst.isle line 1569. let expr0_0 = MInst::StoreImm8 { imm: pattern0_0, mem: pattern1_0.clone(), @@ -2027,7 +2099,7 @@ pub fn constructor_store16_imm( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1523. + // Rule at src/isa/s390x/inst.isle line 1574. let expr0_0 = MInst::StoreImm16 { imm: pattern0_0, mem: pattern1_0.clone(), @@ -2044,7 +2116,7 @@ pub fn constructor_store32_simm16( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1528. + // Rule at src/isa/s390x/inst.isle line 1579. let expr0_0 = MInst::StoreImm32SExt16 { imm: pattern0_0, mem: pattern1_0.clone(), @@ -2061,7 +2133,7 @@ pub fn constructor_store64_simm16( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1533. + // Rule at src/isa/s390x/inst.isle line 1584. let expr0_0 = MInst::StoreImm64SExt16 { imm: pattern0_0, mem: pattern1_0.clone(), @@ -2078,7 +2150,7 @@ pub fn constructor_storerev16( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1538. + // Rule at src/isa/s390x/inst.isle line 1589. let expr0_0 = MInst::StoreRev16 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2095,7 +2167,7 @@ pub fn constructor_storerev32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1543. + // Rule at src/isa/s390x/inst.isle line 1594. let expr0_0 = MInst::StoreRev32 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2112,7 +2184,7 @@ pub fn constructor_storerev64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1548. + // Rule at src/isa/s390x/inst.isle line 1599. let expr0_0 = MInst::StoreRev64 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2131,7 +2203,7 @@ pub fn constructor_fpu_rr( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1553. + // Rule at src/isa/s390x/inst.isle line 1604. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::FpuRR { fpu_op: pattern1_0.clone(), @@ -2155,7 +2227,7 @@ pub fn constructor_fpu_rrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1560. + // Rule at src/isa/s390x/inst.isle line 1611. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::FpuRRR { fpu_op: pattern1_0.clone(), @@ -2181,7 +2253,7 @@ pub fn constructor_fpu_rrrr( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 1567. + // Rule at src/isa/s390x/inst.isle line 1618. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern2_0)?; let expr1_0 = MInst::FpuRRRR { fpu_op: pattern1_0.clone(), @@ -2204,7 +2276,7 @@ pub fn constructor_fpu_copysign( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1574. + // Rule at src/isa/s390x/inst.isle line 1625. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::FpuCopysign { rd: expr0_0, @@ -2224,7 +2296,7 @@ pub fn constructor_fpu_cmp32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1581. + // Rule at src/isa/s390x/inst.isle line 1632. let expr0_0 = MInst::FpuCmp32 { rn: pattern0_0, rm: pattern1_0, @@ -2245,7 +2317,7 @@ pub fn constructor_fpu_cmp64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1587. + // Rule at src/isa/s390x/inst.isle line 1638. let expr0_0 = MInst::FpuCmp64 { rn: pattern0_0, rm: pattern1_0, @@ -2268,7 +2340,7 @@ pub fn constructor_fpu_to_int( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1593. + // Rule at src/isa/s390x/inst.isle line 1644. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::FpuToInt { op: pattern1_0.clone(), @@ -2293,7 +2365,7 @@ pub fn constructor_int_to_fpu( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1600. + // Rule at src/isa/s390x/inst.isle line 1651. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::IntToFpu { op: pattern1_0.clone(), @@ -2315,7 +2387,7 @@ pub fn constructor_fpu_round( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1607. + // Rule at src/isa/s390x/inst.isle line 1658. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::FpuRound { op: pattern1_0.clone(), @@ -2339,7 +2411,7 @@ pub fn constructor_fpuvec_rrr( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 1614. + // Rule at src/isa/s390x/inst.isle line 1665. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = MInst::FpuVecRRR { fpu_op: pattern1_0.clone(), @@ -2355,7 +2427,7 @@ pub fn constructor_fpuvec_rrr( // Generated as internal constructor for term mov_to_fpr. pub fn constructor_mov_to_fpr(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1621. + // Rule at src/isa/s390x/inst.isle line 1672. let expr0_0: Type = F64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovToFpr { @@ -2370,7 +2442,7 @@ pub fn constructor_mov_to_fpr(ctx: &mut C, arg0: Reg) -> Option // Generated as internal constructor for term mov_from_fpr. pub fn constructor_mov_from_fpr(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1628. + // Rule at src/isa/s390x/inst.isle line 1679. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::MovFromFpr { @@ -2385,7 +2457,7 @@ pub fn constructor_mov_from_fpr(ctx: &mut C, arg0: Reg) -> Option(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1635. + // Rule at src/isa/s390x/inst.isle line 1686. let expr0_0: Type = F32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuLoad32 { @@ -2400,7 +2472,7 @@ pub fn constructor_fpu_load32(ctx: &mut C, arg0: &MemArg) -> Option< // Generated as internal constructor for term fpu_load64. pub fn constructor_fpu_load64(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1642. + // Rule at src/isa/s390x/inst.isle line 1693. let expr0_0: Type = F64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuLoad64 { @@ -2415,7 +2487,7 @@ pub fn constructor_fpu_load64(ctx: &mut C, arg0: &MemArg) -> Option< // Generated as internal constructor for term fpu_loadrev32. pub fn constructor_fpu_loadrev32(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1649. + // Rule at src/isa/s390x/inst.isle line 1700. let expr0_0: Type = F32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuLoadRev32 { @@ -2430,7 +2502,7 @@ pub fn constructor_fpu_loadrev32(ctx: &mut C, arg0: &MemArg) -> Opti // Generated as internal constructor for term fpu_loadrev64. pub fn constructor_fpu_loadrev64(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1656. + // Rule at src/isa/s390x/inst.isle line 1707. let expr0_0: Type = F64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::FpuLoadRev64 { @@ -2450,7 +2522,7 @@ pub fn constructor_fpu_store32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1663. + // Rule at src/isa/s390x/inst.isle line 1714. let expr0_0 = MInst::FpuStore32 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2467,7 +2539,7 @@ pub fn constructor_fpu_store64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1668. + // Rule at src/isa/s390x/inst.isle line 1719. let expr0_0 = MInst::FpuStore64 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2484,7 +2556,7 @@ pub fn constructor_fpu_storerev32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1673. + // Rule at src/isa/s390x/inst.isle line 1724. let expr0_0 = MInst::FpuStoreRev32 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2501,7 +2573,7 @@ pub fn constructor_fpu_storerev64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1678. + // Rule at src/isa/s390x/inst.isle line 1729. let expr0_0 = MInst::FpuStoreRev64 { rd: pattern0_0, mem: pattern1_0.clone(), @@ -2518,7 +2590,7 @@ pub fn constructor_load_ext_name_far( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1683. + // Rule at src/isa/s390x/inst.isle line 1734. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadExtNameFar { @@ -2534,7 +2606,7 @@ pub fn constructor_load_ext_name_far( // Generated as internal constructor for term load_addr. pub fn constructor_load_addr(ctx: &mut C, arg0: &MemArg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1690. + // Rule at src/isa/s390x/inst.isle line 1741. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = MInst::LoadAddr { @@ -2552,7 +2624,7 @@ pub fn constructor_jump_impl( arg0: MachLabel, ) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 1697. + // Rule at src/isa/s390x/inst.isle line 1748. let expr0_0 = MInst::Jump { dest: pattern0_0 }; let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; return Some(expr1_0); @@ -2568,7 +2640,7 @@ pub fn constructor_cond_br( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1702. + // Rule at src/isa/s390x/inst.isle line 1753. let expr0_0 = MInst::CondBr { taken: pattern0_0, not_taken: pattern1_0, @@ -2586,7 +2658,7 @@ pub fn constructor_oneway_cond_br( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1707. + // Rule at src/isa/s390x/inst.isle line 1758. let expr0_0 = MInst::OneWayCondBr { target: pattern0_0, cond: pattern1_0.clone(), @@ -2603,7 +2675,7 @@ pub fn constructor_jt_sequence( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1712. + // Rule at src/isa/s390x/inst.isle line 1763. let expr0_0 = MInst::JTSequence { ridx: pattern0_0, targets: pattern1_0.clone(), @@ -2620,13 +2692,275 @@ pub fn constructor_drop_flags(ctx: &mut C, arg0: &ProducesFlags) -> result: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 1717. + // Rule at src/isa/s390x/inst.isle line 1768. let expr0_0 = C::emit(ctx, &pattern1_0); return Some(pattern1_1); } return None; } +// Generated as internal constructor for term push_alu_reg. +pub fn constructor_push_alu_reg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &ALUOp, + arg2: WritableReg, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + if let Some(pattern3_0) = C::real_reg(ctx, pattern2_0) { + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 1807. + let expr0_0 = MInst::AluRRR { + alu_op: pattern1_0.clone(), + rd: pattern3_0, + rn: pattern4_0, + rm: pattern5_0, + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern3_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term push_alu_uimm32shifted. +pub fn constructor_push_alu_uimm32shifted( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &ALUOp, + arg2: WritableReg, + arg3: Reg, + arg4: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + if let Some(pattern3_0) = C::real_reg(ctx, pattern2_0) { + let pattern4_0 = arg3; + let closure5 = || { + return Some(pattern3_0); + }; + if let Some(pattern5_0) = closure5() { + if let Some(()) = C::same_reg(ctx, pattern4_0, pattern5_0) { + let pattern7_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 1813. + let expr0_0 = MInst::AluRUImm32Shifted { + alu_op: pattern1_0.clone(), + rd: pattern3_0, + imm: pattern7_0, + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern3_0); + return Some(expr2_0); + } + } + } + return None; +} + +// Generated as internal constructor for term push_shift. +pub fn constructor_push_shift( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &ShiftOp, + arg2: WritableReg, + arg3: Reg, + arg4: u8, + arg5: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + if let Some(pattern3_0) = C::real_reg(ctx, pattern2_0) { + let pattern4_0 = arg3; + let pattern5_0 = arg4; + let pattern6_0 = arg5; + // Rule at src/isa/s390x/inst.isle line 1819. + let expr0_0 = MInst::ShiftRR { + shift_op: pattern1_0.clone(), + rd: pattern3_0, + rn: pattern4_0, + shift_imm: pattern5_0, + shift_reg: pattern6_0, + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern3_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term push_rxsbg. +pub fn constructor_push_rxsbg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &RxSBGOp, + arg2: WritableReg, + arg3: Reg, + arg4: Reg, + arg5: u8, + arg6: u8, + arg7: i8, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + if let Some(pattern3_0) = C::real_reg(ctx, pattern2_0) { + let pattern4_0 = arg3; + let closure5 = || { + return Some(pattern3_0); + }; + if let Some(pattern5_0) = closure5() { + if let Some(()) = C::same_reg(ctx, pattern4_0, pattern5_0) { + let pattern7_0 = arg4; + let pattern8_0 = arg5; + let pattern9_0 = arg6; + let pattern10_0 = arg7; + // Rule at src/isa/s390x/inst.isle line 1826. + let expr0_0 = MInst::RxSBG { + op: pattern1_0.clone(), + rd: pattern3_0, + rn: pattern7_0, + start_bit: pattern8_0, + end_bit: pattern9_0, + rotate_amt: pattern10_0, + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern3_0); + return Some(expr2_0); + } + } + } + return None; +} + +// Generated as internal constructor for term push_unary. +pub fn constructor_push_unary( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &UnaryOp, + arg2: WritableReg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + if let Some(pattern3_0) = C::real_reg(ctx, pattern2_0) { + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1833. + let expr0_0 = MInst::UnaryRR { + op: pattern1_0.clone(), + rd: pattern3_0, + rn: pattern4_0, + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern3_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term push_atomic_cas32. +pub fn constructor_push_atomic_cas32( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: WritableReg, + arg2: Reg, + arg3: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::real_reg(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1839. + let expr0_0 = MInst::AtomicCas32 { + rd: pattern2_0, + rn: pattern3_0, + mem: pattern4_0.clone(), + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern2_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term push_atomic_cas64. +pub fn constructor_push_atomic_cas64( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: WritableReg, + arg2: Reg, + arg3: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::real_reg(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 1845. + let expr0_0 = MInst::AtomicCas64 { + rd: pattern2_0, + rn: pattern3_0, + mem: pattern4_0.clone(), + }; + let expr1_0 = C::inst_builder_push(ctx, pattern0_0, &expr0_0); + let expr2_0 = C::writable_reg_to_reg(ctx, pattern2_0); + return Some(expr2_0); + } + return None; +} + +// Generated as internal constructor for term push_break_if. +pub fn constructor_push_break_if( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &ProducesFlags, + arg2: &Cond, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let &ProducesFlags::ProducesFlags { + inst: ref pattern2_0, + result: pattern2_1, + } = pattern1_0 + { + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1851. + let expr0_0 = C::inst_builder_push(ctx, pattern0_0, &pattern2_0); + let expr1_0 = MInst::CondBreak { + cond: pattern3_0.clone(), + }; + let expr2_0 = C::inst_builder_push(ctx, pattern0_0, &expr1_0); + return Some(pattern2_1); + } + return None; +} + +// Generated as internal constructor for term emit_loop. +pub fn constructor_emit_loop( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: &Cond, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 1858. + let expr0_0 = C::inst_builder_finish(ctx, pattern0_0); + let expr1_0 = MInst::Loop { + body: expr0_0, + cond: pattern1_0.clone(), + }; + let expr2_0 = C::emit(ctx, &expr1_0); + return Some(expr2_0); +} + // Generated as internal constructor for term emit_mov. pub fn constructor_emit_mov( ctx: &mut C, @@ -2638,7 +2972,7 @@ pub fn constructor_emit_mov( if pattern0_0 == F32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1733. + // Rule at src/isa/s390x/inst.isle line 1873. let expr0_0 = MInst::FpuMove32 { rd: pattern2_0, rn: pattern3_0, @@ -2649,7 +2983,7 @@ pub fn constructor_emit_mov( if pattern0_0 == F64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1736. + // Rule at src/isa/s390x/inst.isle line 1876. let expr0_0 = MInst::FpuMove64 { rd: pattern2_0, rn: pattern3_0, @@ -2660,7 +2994,7 @@ pub fn constructor_emit_mov( if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1727. + // Rule at src/isa/s390x/inst.isle line 1867. let expr0_0 = MInst::Mov32 { rd: pattern2_0, rm: pattern3_0, @@ -2671,7 +3005,7 @@ pub fn constructor_emit_mov( if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1730. + // Rule at src/isa/s390x/inst.isle line 1870. let expr0_0 = MInst::Mov64 { rd: pattern2_0, rm: pattern3_0, @@ -2690,7 +3024,7 @@ pub fn constructor_copy_writable_reg( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1741. + // Rule at src/isa/s390x/inst.isle line 1881. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = constructor_emit_mov(ctx, pattern0_0, expr0_0, pattern1_0)?; return Some(expr0_0); @@ -2700,12 +3034,45 @@ pub fn constructor_copy_writable_reg( pub fn constructor_copy_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1748. + // Rule at src/isa/s390x/inst.isle line 1888. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern1_0)?; let expr1_0 = C::writable_reg_to_reg(ctx, expr0_0); return Some(expr1_0); } +// Generated as internal constructor for term emit_load. +pub fn constructor_emit_load( + ctx: &mut C, + arg0: Type, + arg1: WritableReg, + arg2: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1892. + let expr0_0 = MInst::Load32 { + rd: pattern2_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 1894. + let expr0_0 = MInst::Load64 { + rd: pattern2_0, + mem: pattern3_0.clone(), + }; + let expr1_0 = C::emit(ctx, &expr0_0); + return Some(expr1_0); + } + return None; +} + // Generated as internal constructor for term emit_imm. pub fn constructor_emit_imm( ctx: &mut C, @@ -2717,7 +3084,7 @@ pub fn constructor_emit_imm( if pattern0_0 == F32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1803. + // Rule at src/isa/s390x/inst.isle line 1950. let expr0_0 = C::u64_as_u32(ctx, pattern3_0); let expr1_0 = MInst::LoadFpuConst32 { rd: pattern2_0, @@ -2729,7 +3096,7 @@ pub fn constructor_emit_imm( if pattern0_0 == F64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1808. + // Rule at src/isa/s390x/inst.isle line 1955. let expr0_0 = MInst::LoadFpuConst64 { rd: pattern2_0, const_data: pattern3_0, @@ -2740,7 +3107,7 @@ pub fn constructor_emit_imm( if let Some(pattern1_0) = C::fits_in_16(ctx, pattern0_0) { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1757. + // Rule at src/isa/s390x/inst.isle line 1904. let expr0_0 = C::u64_as_i16(ctx, pattern3_0); let expr1_0 = MInst::Mov32SImm16 { rd: pattern2_0, @@ -2753,7 +3120,7 @@ pub fn constructor_emit_imm( let pattern2_0 = arg1; let pattern3_0 = arg2; if let Some(pattern4_0) = C::i16_from_u64(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1761. + // Rule at src/isa/s390x/inst.isle line 1908. let expr0_0 = MInst::Mov32SImm16 { rd: pattern2_0, imm: pattern4_0, @@ -2761,7 +3128,7 @@ pub fn constructor_emit_imm( let expr1_0 = C::emit(ctx, &expr0_0); return Some(expr1_0); } - // Rule at src/isa/s390x/inst.isle line 1765. + // Rule at src/isa/s390x/inst.isle line 1912. let expr0_0 = C::u64_as_u32(ctx, pattern3_0); let expr1_0 = MInst::Mov32Imm { rd: pattern2_0, @@ -2775,14 +3142,14 @@ pub fn constructor_emit_imm( let pattern3_0 = arg2; if let Some(pattern4_0) = C::u64_nonzero_hipart(ctx, pattern3_0) { if let Some(pattern5_0) = C::u64_nonzero_lopart(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1785. + // Rule at src/isa/s390x/inst.isle line 1932. let expr0_0 = constructor_emit_imm(ctx, pattern1_0, pattern2_0, pattern4_0)?; let expr1_0 = constructor_emit_insert_imm(ctx, pattern2_0, pattern5_0)?; return Some(expr1_0); } } if let Some(pattern4_0) = C::i16_from_u64(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1769. + // Rule at src/isa/s390x/inst.isle line 1916. let expr0_0 = MInst::Mov64SImm16 { rd: pattern2_0, imm: pattern4_0, @@ -2791,7 +3158,7 @@ pub fn constructor_emit_imm( return Some(expr1_0); } if let Some(pattern4_0) = C::i32_from_u64(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1773. + // Rule at src/isa/s390x/inst.isle line 1920. let expr0_0 = MInst::Mov64SImm32 { rd: pattern2_0, imm: pattern4_0, @@ -2800,7 +3167,7 @@ pub fn constructor_emit_imm( return Some(expr1_0); } if let Some(pattern4_0) = C::uimm32shifted_from_u64(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1781. + // Rule at src/isa/s390x/inst.isle line 1928. let expr0_0 = MInst::Mov64UImm32Shifted { rd: pattern2_0, imm: pattern4_0, @@ -2809,7 +3176,7 @@ pub fn constructor_emit_imm( return Some(expr1_0); } if let Some(pattern4_0) = C::uimm16shifted_from_u64(ctx, pattern3_0) { - // Rule at src/isa/s390x/inst.isle line 1777. + // Rule at src/isa/s390x/inst.isle line 1924. let expr0_0 = MInst::Mov64UImm16Shifted { rd: pattern2_0, imm: pattern4_0, @@ -2830,7 +3197,7 @@ pub fn constructor_emit_insert_imm( let pattern0_0 = arg0; let pattern1_0 = arg1; if let Some(pattern2_0) = C::uimm32shifted_from_u64(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1798. + // Rule at src/isa/s390x/inst.isle line 1945. let expr0_0 = MInst::Insert64UImm32Shifted { rd: pattern0_0, imm: pattern2_0, @@ -2839,7 +3206,7 @@ pub fn constructor_emit_insert_imm( return Some(expr1_0); } if let Some(pattern2_0) = C::uimm16shifted_from_u64(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1794. + // Rule at src/isa/s390x/inst.isle line 1941. let expr0_0 = MInst::Insert64UImm16Shifted { rd: pattern0_0, imm: pattern2_0, @@ -2854,7 +3221,7 @@ pub fn constructor_emit_insert_imm( pub fn constructor_imm(ctx: &mut C, arg0: Type, arg1: u64) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1813. + // Rule at src/isa/s390x/inst.isle line 1960. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = constructor_emit_imm(ctx, pattern0_0, expr0_0, pattern1_0)?; let expr2_0 = C::writable_reg_to_reg(ctx, expr0_0); @@ -2871,7 +3238,7 @@ pub fn constructor_imm_regpair_lo( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1821. + // Rule at src/isa/s390x/inst.isle line 1968. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern2_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr1_0, pattern1_0)?; @@ -2889,7 +3256,7 @@ pub fn constructor_imm_regpair_hi( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1829. + // Rule at src/isa/s390x/inst.isle line 1976. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern2_0)?; let expr1_0 = constructor_writable_regpair_hi(ctx, &expr0_0)?; let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr1_0, pattern1_0)?; @@ -2901,22 +3268,22 @@ pub fn constructor_imm_regpair_hi( pub fn constructor_ty_ext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 1839. + // Rule at src/isa/s390x/inst.isle line 1986. let expr0_0: Type = I32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 1840. + // Rule at src/isa/s390x/inst.isle line 1987. let expr0_0: Type = I32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 1841. + // Rule at src/isa/s390x/inst.isle line 1988. let expr0_0: Type = I32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 1842. + // Rule at src/isa/s390x/inst.isle line 1989. let expr0_0: Type = I64; return Some(expr0_0); } @@ -2927,22 +3294,22 @@ pub fn constructor_ty_ext32(ctx: &mut C, arg0: Type) -> Option pub fn constructor_ty_ext64(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 1846. + // Rule at src/isa/s390x/inst.isle line 1993. let expr0_0: Type = I64; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 1847. + // Rule at src/isa/s390x/inst.isle line 1994. let expr0_0: Type = I64; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 1848. + // Rule at src/isa/s390x/inst.isle line 1995. let expr0_0: Type = I64; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 1849. + // Rule at src/isa/s390x/inst.isle line 1996. let expr0_0: Type = I64; return Some(expr0_0); } @@ -2959,7 +3326,7 @@ pub fn constructor_emit_zext32_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1854. + // Rule at src/isa/s390x/inst.isle line 2001. let expr0_0: bool = false; let expr1_0 = C::ty_bits(ctx, pattern1_0); let expr2_0: u8 = 32; @@ -2984,7 +3351,7 @@ pub fn constructor_emit_sext32_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1860. + // Rule at src/isa/s390x/inst.isle line 2007. let expr0_0: bool = true; let expr1_0 = C::ty_bits(ctx, pattern1_0); let expr2_0: u8 = 32; @@ -3009,7 +3376,7 @@ pub fn constructor_emit_zext64_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1866. + // Rule at src/isa/s390x/inst.isle line 2013. let expr0_0: bool = false; let expr1_0 = C::ty_bits(ctx, pattern1_0); let expr2_0: u8 = 64; @@ -3034,7 +3401,7 @@ pub fn constructor_emit_sext64_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1872. + // Rule at src/isa/s390x/inst.isle line 2019. let expr0_0: bool = true; let expr1_0 = C::ty_bits(ctx, pattern1_0); let expr2_0: u8 = 64; @@ -3053,7 +3420,7 @@ pub fn constructor_emit_sext64_reg( pub fn constructor_zext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1878. + // Rule at src/isa/s390x/inst.isle line 2025. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_zext32_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3065,7 +3432,7 @@ pub fn constructor_zext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> pub fn constructor_sext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1886. + // Rule at src/isa/s390x/inst.isle line 2033. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_sext32_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3077,7 +3444,7 @@ pub fn constructor_sext32_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> pub fn constructor_zext64_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1894. + // Rule at src/isa/s390x/inst.isle line 2041. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_zext64_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3089,7 +3456,7 @@ pub fn constructor_zext64_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> pub fn constructor_sext64_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1902. + // Rule at src/isa/s390x/inst.isle line 2049. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_sext64_reg(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3108,7 +3475,7 @@ pub fn constructor_emit_zext32_mem( let pattern1_0 = arg1; if pattern1_0 == I8 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1910. + // Rule at src/isa/s390x/inst.isle line 2057. let expr0_0 = MInst::Load32ZExt8 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3118,7 +3485,7 @@ pub fn constructor_emit_zext32_mem( } if pattern1_0 == I16 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1911. + // Rule at src/isa/s390x/inst.isle line 2058. let expr0_0 = MInst::Load32ZExt16 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3140,7 +3507,7 @@ pub fn constructor_emit_sext32_mem( let pattern1_0 = arg1; if pattern1_0 == I8 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1915. + // Rule at src/isa/s390x/inst.isle line 2062. let expr0_0 = MInst::Load32SExt8 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3150,7 +3517,7 @@ pub fn constructor_emit_sext32_mem( } if pattern1_0 == I16 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1916. + // Rule at src/isa/s390x/inst.isle line 2063. let expr0_0 = MInst::Load32SExt16 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3172,7 +3539,7 @@ pub fn constructor_emit_zext64_mem( let pattern1_0 = arg1; if pattern1_0 == I8 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1920. + // Rule at src/isa/s390x/inst.isle line 2067. let expr0_0 = MInst::Load64ZExt8 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3182,7 +3549,7 @@ pub fn constructor_emit_zext64_mem( } if pattern1_0 == I16 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1921. + // Rule at src/isa/s390x/inst.isle line 2068. let expr0_0 = MInst::Load64ZExt16 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3192,7 +3559,7 @@ pub fn constructor_emit_zext64_mem( } if pattern1_0 == I32 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1922. + // Rule at src/isa/s390x/inst.isle line 2069. let expr0_0 = MInst::Load64ZExt32 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3214,7 +3581,7 @@ pub fn constructor_emit_sext64_mem( let pattern1_0 = arg1; if pattern1_0 == I8 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1926. + // Rule at src/isa/s390x/inst.isle line 2073. let expr0_0 = MInst::Load64SExt8 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3224,7 +3591,7 @@ pub fn constructor_emit_sext64_mem( } if pattern1_0 == I16 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1927. + // Rule at src/isa/s390x/inst.isle line 2074. let expr0_0 = MInst::Load64SExt16 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3234,7 +3601,7 @@ pub fn constructor_emit_sext64_mem( } if pattern1_0 == I32 { let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 1928. + // Rule at src/isa/s390x/inst.isle line 2075. let expr0_0 = MInst::Load64SExt32 { rd: pattern0_0, mem: pattern3_0.clone(), @@ -3249,7 +3616,7 @@ pub fn constructor_emit_sext64_mem( pub fn constructor_zext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1932. + // Rule at src/isa/s390x/inst.isle line 2079. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_zext32_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3261,7 +3628,7 @@ pub fn constructor_zext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg pub fn constructor_sext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1939. + // Rule at src/isa/s390x/inst.isle line 2086. let expr0_0: Type = I32; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_sext32_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3273,7 +3640,7 @@ pub fn constructor_sext32_mem(ctx: &mut C, arg0: Type, arg1: &MemArg pub fn constructor_zext64_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1946. + // Rule at src/isa/s390x/inst.isle line 2093. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_zext64_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3285,7 +3652,7 @@ pub fn constructor_zext64_mem(ctx: &mut C, arg0: Type, arg1: &MemArg pub fn constructor_sext64_mem(ctx: &mut C, arg0: Type, arg1: &MemArg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 1953. + // Rule at src/isa/s390x/inst.isle line 2100. let expr0_0: Type = I64; let expr1_0 = C::temp_writable_reg(ctx, expr0_0); let expr2_0 = constructor_emit_sext64_mem(ctx, expr1_0, pattern0_0, pattern1_0)?; @@ -3303,7 +3670,7 @@ pub fn constructor_emit_put_in_reg_zext32( let pattern1_0 = arg1; let pattern2_0 = C::value_type(ctx, pattern1_0); if let Some(pattern3_0) = C::u64_from_value(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1961. + // Rule at src/isa/s390x/inst.isle line 2108. let expr0_0 = constructor_ty_ext32(ctx, pattern2_0)?; let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; return Some(expr1_0); @@ -3320,7 +3687,7 @@ pub fn constructor_emit_put_in_reg_zext32( { if let &Opcode::Load = &pattern6_0 { if let Some(()) = C::bigendian(ctx, pattern6_2) { - // Rule at src/isa/s390x/inst.isle line 1963. + // Rule at src/isa/s390x/inst.isle line 2110. let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; let expr1_0 = constructor_emit_zext32_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; @@ -3329,13 +3696,13 @@ pub fn constructor_emit_put_in_reg_zext32( } } } - // Rule at src/isa/s390x/inst.isle line 1965. + // Rule at src/isa/s390x/inst.isle line 2112. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_zext32_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern2_0) { - // Rule at src/isa/s390x/inst.isle line 1967. + // Rule at src/isa/s390x/inst.isle line 2114. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; return Some(expr1_0); @@ -3353,7 +3720,7 @@ pub fn constructor_emit_put_in_reg_sext32( let pattern1_0 = arg1; let pattern2_0 = C::value_type(ctx, pattern1_0); if let Some(pattern3_0) = C::u64_from_signed_value(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1972. + // Rule at src/isa/s390x/inst.isle line 2119. let expr0_0 = constructor_ty_ext32(ctx, pattern2_0)?; let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; return Some(expr1_0); @@ -3370,7 +3737,7 @@ pub fn constructor_emit_put_in_reg_sext32( { if let &Opcode::Load = &pattern6_0 { if let Some(()) = C::bigendian(ctx, pattern6_2) { - // Rule at src/isa/s390x/inst.isle line 1974. + // Rule at src/isa/s390x/inst.isle line 2121. let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; let expr1_0 = constructor_emit_sext32_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; @@ -3379,13 +3746,13 @@ pub fn constructor_emit_put_in_reg_sext32( } } } - // Rule at src/isa/s390x/inst.isle line 1976. + // Rule at src/isa/s390x/inst.isle line 2123. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_sext32_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern2_0) { - // Rule at src/isa/s390x/inst.isle line 1978. + // Rule at src/isa/s390x/inst.isle line 2125. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; return Some(expr1_0); @@ -3403,7 +3770,7 @@ pub fn constructor_emit_put_in_reg_zext64( let pattern1_0 = arg1; let pattern2_0 = C::value_type(ctx, pattern1_0); if let Some(pattern3_0) = C::u64_from_value(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1983. + // Rule at src/isa/s390x/inst.isle line 2130. let expr0_0 = constructor_ty_ext64(ctx, pattern2_0)?; let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; return Some(expr1_0); @@ -3420,7 +3787,7 @@ pub fn constructor_emit_put_in_reg_zext64( { if let &Opcode::Load = &pattern6_0 { if let Some(()) = C::bigendian(ctx, pattern6_2) { - // Rule at src/isa/s390x/inst.isle line 1985. + // Rule at src/isa/s390x/inst.isle line 2132. let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; let expr1_0 = constructor_emit_zext64_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; @@ -3429,13 +3796,13 @@ pub fn constructor_emit_put_in_reg_zext64( } } } - // Rule at src/isa/s390x/inst.isle line 1987. + // Rule at src/isa/s390x/inst.isle line 2134. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_zext64_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::gpr64_ty(ctx, pattern2_0) { - // Rule at src/isa/s390x/inst.isle line 1989. + // Rule at src/isa/s390x/inst.isle line 2136. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; return Some(expr1_0); @@ -3453,7 +3820,7 @@ pub fn constructor_emit_put_in_reg_sext64( let pattern1_0 = arg1; let pattern2_0 = C::value_type(ctx, pattern1_0); if let Some(pattern3_0) = C::u64_from_signed_value(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 1994. + // Rule at src/isa/s390x/inst.isle line 2141. let expr0_0 = constructor_ty_ext64(ctx, pattern2_0)?; let expr1_0 = constructor_emit_imm(ctx, expr0_0, pattern0_0, pattern3_0)?; return Some(expr1_0); @@ -3470,7 +3837,7 @@ pub fn constructor_emit_put_in_reg_sext64( { if let &Opcode::Load = &pattern6_0 { if let Some(()) = C::bigendian(ctx, pattern6_2) { - // Rule at src/isa/s390x/inst.isle line 1996. + // Rule at src/isa/s390x/inst.isle line 2143. let expr0_0 = constructor_sink_load(ctx, pattern4_0)?; let expr1_0 = constructor_emit_sext64_mem(ctx, pattern0_0, pattern3_0, &expr0_0)?; @@ -3479,13 +3846,13 @@ pub fn constructor_emit_put_in_reg_sext64( } } } - // Rule at src/isa/s390x/inst.isle line 1998. + // Rule at src/isa/s390x/inst.isle line 2145. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_sext64_reg(ctx, pattern0_0, pattern3_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern3_0) = C::gpr64_ty(ctx, pattern2_0) { - // Rule at src/isa/s390x/inst.isle line 2000. + // Rule at src/isa/s390x/inst.isle line 2147. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = constructor_emit_mov(ctx, pattern3_0, pattern0_0, expr0_0)?; return Some(expr1_0); @@ -3498,7 +3865,7 @@ pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if let Some(pattern2_0) = C::u64_from_value(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2005. + // Rule at src/isa/s390x/inst.isle line 2152. let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; return Some(expr1_0); @@ -3515,7 +3882,7 @@ pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Op { if let &Opcode::Load = &pattern5_0 { if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/inst.isle line 2007. + // Rule at src/isa/s390x/inst.isle line 2154. let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; let expr1_0 = constructor_zext32_mem(ctx, pattern2_0, &expr0_0)?; return Some(expr1_0); @@ -3523,13 +3890,13 @@ pub fn constructor_put_in_reg_zext32(ctx: &mut C, arg0: Value) -> Op } } } - // Rule at src/isa/s390x/inst.isle line 2009. + // Rule at src/isa/s390x/inst.isle line 2156. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0 = constructor_zext32_reg(ctx, pattern2_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 2011. + // Rule at src/isa/s390x/inst.isle line 2158. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } @@ -3541,7 +3908,7 @@ pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if let Some(pattern2_0) = C::u64_from_signed_value(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2016. + // Rule at src/isa/s390x/inst.isle line 2163. let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; return Some(expr1_0); @@ -3558,7 +3925,7 @@ pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Op { if let &Opcode::Load = &pattern5_0 { if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/inst.isle line 2018. + // Rule at src/isa/s390x/inst.isle line 2165. let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; let expr1_0 = constructor_sext32_mem(ctx, pattern2_0, &expr0_0)?; return Some(expr1_0); @@ -3566,13 +3933,13 @@ pub fn constructor_put_in_reg_sext32(ctx: &mut C, arg0: Value) -> Op } } } - // Rule at src/isa/s390x/inst.isle line 2020. + // Rule at src/isa/s390x/inst.isle line 2167. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0 = constructor_sext32_reg(ctx, pattern2_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 2022. + // Rule at src/isa/s390x/inst.isle line 2169. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } @@ -3584,7 +3951,7 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if let Some(pattern2_0) = C::u64_from_value(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2027. + // Rule at src/isa/s390x/inst.isle line 2174. let expr0_0 = constructor_ty_ext64(ctx, pattern1_0)?; let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; return Some(expr1_0); @@ -3601,7 +3968,7 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op { if let &Opcode::Load = &pattern5_0 { if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/inst.isle line 2029. + // Rule at src/isa/s390x/inst.isle line 2176. let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; let expr1_0 = constructor_zext64_mem(ctx, pattern2_0, &expr0_0)?; return Some(expr1_0); @@ -3609,13 +3976,13 @@ pub fn constructor_put_in_reg_zext64(ctx: &mut C, arg0: Value) -> Op } } } - // Rule at src/isa/s390x/inst.isle line 2031. + // Rule at src/isa/s390x/inst.isle line 2178. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0 = constructor_zext64_reg(ctx, pattern2_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 2033. + // Rule at src/isa/s390x/inst.isle line 2180. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } @@ -3627,7 +3994,7 @@ pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Op let pattern0_0 = arg0; let pattern1_0 = C::value_type(ctx, pattern0_0); if let Some(pattern2_0) = C::u64_from_signed_value(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2038. + // Rule at src/isa/s390x/inst.isle line 2185. let expr0_0 = constructor_ty_ext64(ctx, pattern1_0)?; let expr1_0 = constructor_imm(ctx, expr0_0, pattern2_0)?; return Some(expr1_0); @@ -3644,7 +4011,7 @@ pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Op { if let &Opcode::Load = &pattern5_0 { if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/inst.isle line 2040. + // Rule at src/isa/s390x/inst.isle line 2187. let expr0_0 = constructor_sink_load(ctx, pattern3_0)?; let expr1_0 = constructor_sext64_mem(ctx, pattern2_0, &expr0_0)?; return Some(expr1_0); @@ -3652,13 +4019,13 @@ pub fn constructor_put_in_reg_sext64(ctx: &mut C, arg0: Value) -> Op } } } - // Rule at src/isa/s390x/inst.isle line 2042. + // Rule at src/isa/s390x/inst.isle line 2189. let expr0_0 = C::put_in_reg(ctx, pattern0_0); let expr1_0 = constructor_sext64_reg(ctx, pattern2_0, expr0_0)?; return Some(expr1_0); } if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { - // Rule at src/isa/s390x/inst.isle line 2044. + // Rule at src/isa/s390x/inst.isle line 2191. let expr0_0 = C::put_in_reg(ctx, pattern0_0); return Some(expr0_0); } @@ -3673,7 +4040,7 @@ pub fn constructor_put_in_regpair_lo_zext32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2050. + // Rule at src/isa/s390x/inst.isle line 2197. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_put_in_reg_zext32(ctx, expr1_0, pattern0_0)?; @@ -3689,7 +4056,7 @@ pub fn constructor_put_in_regpair_lo_sext32( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2058. + // Rule at src/isa/s390x/inst.isle line 2205. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_put_in_reg_sext32(ctx, expr1_0, pattern0_0)?; @@ -3705,7 +4072,7 @@ pub fn constructor_put_in_regpair_lo_zext64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2066. + // Rule at src/isa/s390x/inst.isle line 2213. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_put_in_reg_zext64(ctx, expr1_0, pattern0_0)?; @@ -3721,7 +4088,7 @@ pub fn constructor_put_in_regpair_lo_sext64( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2074. + // Rule at src/isa/s390x/inst.isle line 2221. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern1_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_put_in_reg_sext64(ctx, expr1_0, pattern0_0)?; @@ -3742,7 +4109,7 @@ pub fn constructor_emit_cmov_imm( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2084. + // Rule at src/isa/s390x/inst.isle line 2231. let expr0_0 = MInst::CMov32SImm16 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3759,7 +4126,7 @@ pub fn constructor_emit_cmov_imm( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2087. + // Rule at src/isa/s390x/inst.isle line 2234. let expr0_0 = MInst::CMov64SImm16 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3787,7 +4154,7 @@ pub fn constructor_cmov_imm( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2093. + // Rule at src/isa/s390x/inst.isle line 2240. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern3_0)?; let expr1_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3807,7 +4174,7 @@ pub fn constructor_cmov_imm_regpair_lo( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2100. + // Rule at src/isa/s390x/inst.isle line 2247. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern4_0)?; let expr1_0 = constructor_writable_regpair_lo(ctx, &expr0_0)?; let expr2_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr1_0, pattern2_0, pattern3_0)?; @@ -3830,7 +4197,7 @@ pub fn constructor_cmov_imm_regpair_hi( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2109. + // Rule at src/isa/s390x/inst.isle line 2256. let expr0_0 = constructor_copy_writable_regpair(ctx, pattern4_0)?; let expr1_0 = constructor_writable_regpair_hi(ctx, &expr0_0)?; let expr2_0 = constructor_emit_cmov_imm(ctx, pattern0_0, expr1_0, pattern2_0, pattern3_0)?; @@ -3852,7 +4219,7 @@ pub fn constructor_emit_cmov_reg( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2123. + // Rule at src/isa/s390x/inst.isle line 2270. let expr0_0 = MInst::FpuCMov32 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3869,7 +4236,7 @@ pub fn constructor_emit_cmov_reg( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2126. + // Rule at src/isa/s390x/inst.isle line 2273. let expr0_0 = MInst::FpuCMov64 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3886,7 +4253,7 @@ pub fn constructor_emit_cmov_reg( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2117. + // Rule at src/isa/s390x/inst.isle line 2264. let expr0_0 = MInst::CMov32 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3903,7 +4270,7 @@ pub fn constructor_emit_cmov_reg( let pattern2_0 = arg1; let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2120. + // Rule at src/isa/s390x/inst.isle line 2267. let expr0_0 = MInst::CMov64 { rd: pattern2_0, cond: pattern3_0.clone(), @@ -3931,7 +4298,7 @@ pub fn constructor_cmov_reg( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2132. + // Rule at src/isa/s390x/inst.isle line 2279. let expr0_0 = constructor_copy_writable_reg(ctx, pattern0_0, pattern3_0)?; let expr1_0 = constructor_emit_cmov_reg(ctx, pattern0_0, expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -3952,7 +4319,7 @@ pub fn constructor_trap_if( { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2140. + // Rule at src/isa/s390x/inst.isle line 2287. let expr0_0 = C::emit(ctx, &pattern1_0); let expr1_0 = MInst::TrapIf { cond: pattern2_0.clone(), @@ -3978,7 +4345,7 @@ pub fn constructor_icmps_reg_and_trap( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2146. + // Rule at src/isa/s390x/inst.isle line 2293. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = MInst::CmpTrapRR { op: expr0_0, @@ -4006,7 +4373,7 @@ pub fn constructor_icmps_simm16_and_trap( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2152. + // Rule at src/isa/s390x/inst.isle line 2299. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = MInst::CmpTrapRSImm16 { op: expr0_0, @@ -4034,7 +4401,7 @@ pub fn constructor_icmpu_reg_and_trap( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2158. + // Rule at src/isa/s390x/inst.isle line 2305. let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; let expr1_0 = MInst::CmpTrapRR { op: expr0_0, @@ -4062,7 +4429,7 @@ pub fn constructor_icmpu_uimm16_and_trap( let pattern2_0 = arg2; let pattern3_0 = arg3; let pattern4_0 = arg4; - // Rule at src/isa/s390x/inst.isle line 2164. + // Rule at src/isa/s390x/inst.isle line 2311. let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; let expr1_0 = MInst::CmpTrapRUImm16 { op: expr0_0, @@ -4082,7 +4449,7 @@ pub fn constructor_trap_impl( arg0: &TrapCode, ) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 2170. + // Rule at src/isa/s390x/inst.isle line 2317. let expr0_0 = MInst::Trap { trap_code: pattern0_0.clone(), }; @@ -4098,7 +4465,7 @@ pub fn constructor_trap_if_impl( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2174. + // Rule at src/isa/s390x/inst.isle line 2321. let expr0_0 = MInst::TrapIf { cond: pattern0_0.clone(), trap_code: pattern1_0.clone(), @@ -4109,7 +4476,7 @@ pub fn constructor_trap_if_impl( // Generated as internal constructor for term debugtrap_impl. pub fn constructor_debugtrap_impl(ctx: &mut C) -> Option { - // Rule at src/isa/s390x/inst.isle line 2178. + // Rule at src/isa/s390x/inst.isle line 2325. let expr0_0 = MInst::Debugtrap; let expr1_0 = SideEffectNoResult::Inst { inst: expr0_0 }; return Some(expr1_0); @@ -4123,7 +4490,7 @@ pub fn constructor_bool( ) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2189. + // Rule at src/isa/s390x/inst.isle line 2336. let expr0_0 = ProducesBool::ProducesBool { producer: pattern0_0.clone(), cond: pattern1_0.clone(), @@ -4142,7 +4509,7 @@ pub fn constructor_invert_bool( cond: ref pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 2193. + // Rule at src/isa/s390x/inst.isle line 2340. let expr0_0 = C::invert_cond(ctx, &pattern1_1); let expr1_0 = constructor_bool(ctx, &pattern1_0, &expr0_0)?; return Some(expr1_0); @@ -4158,7 +4525,7 @@ pub fn constructor_emit_producer(ctx: &mut C, arg0: &ProducesFlags) result: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 2202. + // Rule at src/isa/s390x/inst.isle line 2349. let expr0_0 = C::emit(ctx, &pattern1_0); return Some(expr0_0); } @@ -4173,7 +4540,7 @@ pub fn constructor_emit_consumer(ctx: &mut C, arg0: &ConsumesFlags) result: pattern1_1, } = pattern0_0 { - // Rule at src/isa/s390x/inst.isle line 2204. + // Rule at src/isa/s390x/inst.isle line 2351. let expr0_0 = C::emit(ctx, &pattern1_0); return Some(expr0_0); } @@ -4197,7 +4564,7 @@ pub fn constructor_select_bool_reg( { let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2208. + // Rule at src/isa/s390x/inst.isle line 2355. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = constructor_emit_producer(ctx, &pattern2_0)?; let expr2_0 = constructor_emit_mov(ctx, pattern0_0, expr0_0, pattern4_0)?; @@ -4226,7 +4593,7 @@ pub fn constructor_select_bool_imm( { let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2217. + // Rule at src/isa/s390x/inst.isle line 2364. let expr0_0 = C::temp_writable_reg(ctx, pattern0_0); let expr1_0 = constructor_emit_producer(ctx, &pattern2_0)?; let expr2_0 = constructor_emit_imm(ctx, pattern0_0, expr0_0, pattern4_0)?; @@ -4247,7 +4614,7 @@ pub fn constructor_lower_bool( let pattern0_0 = arg0; if pattern0_0 == B1 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2227. + // Rule at src/isa/s390x/inst.isle line 2374. let expr0_0: Type = B1; let expr1_0: i16 = 1; let expr2_0: u64 = 0; @@ -4256,7 +4623,7 @@ pub fn constructor_lower_bool( } if pattern0_0 == B8 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2228. + // Rule at src/isa/s390x/inst.isle line 2375. let expr0_0: Type = B8; let expr1_0: i16 = -1; let expr2_0: u64 = 0; @@ -4265,7 +4632,7 @@ pub fn constructor_lower_bool( } if pattern0_0 == B16 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2229. + // Rule at src/isa/s390x/inst.isle line 2376. let expr0_0: Type = B16; let expr1_0: i16 = -1; let expr2_0: u64 = 0; @@ -4274,7 +4641,7 @@ pub fn constructor_lower_bool( } if pattern0_0 == B32 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2230. + // Rule at src/isa/s390x/inst.isle line 2377. let expr0_0: Type = B32; let expr1_0: i16 = -1; let expr2_0: u64 = 0; @@ -4283,7 +4650,7 @@ pub fn constructor_lower_bool( } if pattern0_0 == B64 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2231. + // Rule at src/isa/s390x/inst.isle line 2378. let expr0_0: Type = B64; let expr1_0: i16 = -1; let expr2_0: u64 = 0; @@ -4308,7 +4675,7 @@ pub fn constructor_cond_br_bool( { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2235. + // Rule at src/isa/s390x/inst.isle line 2382. let expr0_0 = constructor_emit_producer(ctx, &pattern1_0)?; let expr1_0 = constructor_cond_br(ctx, pattern2_0, pattern3_0, &pattern1_1)?; return Some(expr1_0); @@ -4329,7 +4696,7 @@ pub fn constructor_oneway_cond_br_bool( } = pattern0_0 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2241. + // Rule at src/isa/s390x/inst.isle line 2388. let expr0_0 = constructor_emit_producer(ctx, &pattern1_0)?; let expr1_0 = constructor_oneway_cond_br(ctx, pattern2_0, &pattern1_1)?; return Some(expr1_0); @@ -4350,7 +4717,7 @@ pub fn constructor_trap_if_bool( } = pattern0_0 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2247. + // Rule at src/isa/s390x/inst.isle line 2394. let expr0_0 = constructor_emit_producer(ctx, &pattern1_0)?; let expr1_0 = constructor_trap_if_impl(ctx, &pattern1_1, pattern2_0)?; return Some(expr1_0); @@ -4358,12 +4725,307 @@ pub fn constructor_trap_if_bool( return None; } +// Generated as internal constructor for term casloop_val_reg. +pub fn constructor_casloop_val_reg(ctx: &mut C) -> Option { + // Rule at src/isa/s390x/inst.isle line 2408. + let expr0_0: u8 = 0; + let expr1_0 = C::writable_gpr(ctx, expr0_0); + return Some(expr1_0); +} + +// Generated as internal constructor for term casloop_tmp_reg. +pub fn constructor_casloop_tmp_reg(ctx: &mut C) -> Option { + // Rule at src/isa/s390x/inst.isle line 2412. + let expr0_0: u8 = 1; + let expr1_0 = C::writable_gpr(ctx, expr0_0); + return Some(expr1_0); +} + +// Generated as internal constructor for term casloop_emit. +pub fn constructor_casloop_emit( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2421. + let expr0_0: i64 = 0; + let expr1_0 = C::memarg_reg_plus_off(ctx, pattern3_0, expr0_0, pattern2_0); + let expr2_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr3_0 = constructor_casloop_val_reg(ctx)?; + let expr4_0 = + constructor_push_atomic_cas(ctx, pattern0_0, expr2_0, expr3_0, pattern4_0, &expr1_0)?; + let expr5_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr6_0 = constructor_casloop_val_reg(ctx)?; + let expr7_0 = constructor_emit_load(ctx, expr5_0, expr6_0, &expr1_0)?; + let expr8_0 = IntCC::NotEqual; + let expr9_0 = C::intcc_as_cond(ctx, &expr8_0); + let expr10_0 = constructor_emit_loop(ctx, pattern0_0, &expr9_0)?; + return Some(expr4_0); +} + +// Generated as internal constructor for term casloop_result. +pub fn constructor_casloop_result( + ctx: &mut C, + arg0: Type, + arg1: MemFlags, + arg2: Reg, +) -> Option { + let pattern0_0 = arg0; + if let Some(pattern1_0) = C::ty_32_or_64(ctx, pattern0_0) { + let pattern2_0 = arg1; + if let Some(()) = C::littleendian(ctx, pattern2_0) { + let pattern4_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2439. + let expr0_0 = constructor_bswap_reg(ctx, pattern1_0, pattern4_0)?; + return Some(expr0_0); + } + if let Some(()) = C::bigendian(ctx, pattern2_0) { + let pattern4_0 = arg2; + // Rule at src/isa/s390x/inst.isle line 2437. + let expr0_0 = constructor_copy_reg(ctx, pattern1_0, pattern4_0)?; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term casloop. +pub fn constructor_casloop( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2444. + let expr0_0 = constructor_casloop_emit( + ctx, pattern0_0, pattern1_0, pattern2_0, pattern3_0, pattern4_0, + )?; + let expr1_0 = constructor_casloop_result(ctx, pattern1_0, pattern2_0, expr0_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term casloop_bitshift. +pub fn constructor_casloop_bitshift(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 2459. + let expr0_0: Type = I32; + let expr1_0: u8 = 3; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern0_0, expr1_0)?; + return Some(expr2_0); +} + +// Generated as internal constructor for term casloop_aligned_addr. +pub fn constructor_casloop_aligned_addr(ctx: &mut C, arg0: Reg) -> Option { + let pattern0_0 = arg0; + // Rule at src/isa/s390x/inst.isle line 2464. + let expr0_0: Type = I64; + let expr1_0: u16 = 65532; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm16shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_and_uimm16shifted(ctx, expr0_0, pattern0_0, expr3_0)?; + return Some(expr4_0); +} + +// Generated as internal constructor for term casloop_rotate_in. +pub fn constructor_casloop_rotate_in( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2474. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 0; + let expr3_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern5_0, expr2_0, pattern4_0, + )?; + return Some(expr3_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2478. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 16; + let expr3_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern6_0, expr2_0, pattern5_0, + )?; + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2476. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 0; + let expr3_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern6_0, expr2_0, pattern5_0, + )?; + return Some(expr3_0); + } + } + return None; +} + +// Generated as internal constructor for term casloop_rotate_out. +pub fn constructor_casloop_rotate_out( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: Reg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2487. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 0; + let expr3_0: Type = I32; + let expr4_0 = constructor_neg_reg(ctx, expr3_0, pattern4_0)?; + let expr5_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern5_0, expr2_0, expr4_0, + )?; + return Some(expr5_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2491. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 16; + let expr3_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern6_0, expr2_0, pattern5_0, + )?; + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2489. + let expr0_0: Type = I32; + let expr1_0 = constructor_casloop_tmp_reg(ctx)?; + let expr2_0: u8 = 0; + let expr3_0 = constructor_push_rot_imm_reg( + ctx, pattern0_0, expr0_0, expr1_0, pattern6_0, expr2_0, pattern5_0, + )?; + return Some(expr3_0); + } + } + return None; +} + +// Generated as internal constructor for term casloop_rotate_result. +pub fn constructor_casloop_rotate_result( + ctx: &mut C, + arg0: Type, + arg1: MemFlags, + arg2: Reg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I8 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2502. + let expr0_0: Type = I32; + let expr1_0: u8 = 8; + let expr2_0 = constructor_rot_imm_reg(ctx, expr0_0, pattern4_0, expr1_0, pattern3_0)?; + return Some(expr2_0); + } + if pattern0_0 == I16 { + let pattern2_0 = arg1; + if let Some(()) = C::littleendian(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2506. + let expr0_0: Type = I32; + let expr1_0: Type = I32; + let expr2_0 = constructor_rot_reg(ctx, expr1_0, pattern5_0, pattern4_0)?; + let expr3_0 = constructor_bswap_reg(ctx, expr0_0, expr2_0)?; + return Some(expr3_0); + } + if let Some(()) = C::bigendian(ctx, pattern2_0) { + let pattern4_0 = arg2; + let pattern5_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2504. + let expr0_0: Type = I32; + let expr1_0: u8 = 16; + let expr2_0 = constructor_rot_imm_reg(ctx, expr0_0, pattern5_0, expr1_0, pattern4_0)?; + return Some(expr2_0); + } + } + return None; +} + +// Generated as internal constructor for term casloop_subword. +pub fn constructor_casloop_subword( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: Reg, + arg4: Reg, + arg5: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + let pattern5_0 = arg5; + // Rule at src/isa/s390x/inst.isle line 2511. + let expr0_0 = constructor_casloop_emit( + ctx, pattern0_0, pattern1_0, pattern2_0, pattern3_0, pattern5_0, + )?; + let expr1_0 = + constructor_casloop_rotate_result(ctx, pattern1_0, pattern2_0, pattern4_0, expr0_0)?; + return Some(expr1_0); +} + // Generated as internal constructor for term clz_reg. pub fn constructor_clz_reg(ctx: &mut C, arg0: i16, arg1: Reg) -> Option { let pattern0_0 = arg0; if pattern0_0 == 64 { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2258. + // Rule at src/isa/s390x/inst.isle line 2522. let expr0_0 = constructor_temp_writable_regpair(ctx)?; let expr1_0 = MInst::Flogr { rn: pattern2_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -4371,7 +5033,7 @@ pub fn constructor_clz_reg(ctx: &mut C, arg0: i16, arg1: Reg) -> Opt return Some(expr3_0); } let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2267. + // Rule at src/isa/s390x/inst.isle line 2531. let expr0_0 = constructor_temp_writable_regpair(ctx)?; let expr1_0 = MInst::Flogr { rn: pattern1_0 }; let expr2_0 = C::emit(ctx, &expr1_0); @@ -4392,22 +5054,22 @@ pub fn constructor_clz_reg(ctx: &mut C, arg0: i16, arg1: Reg) -> Opt pub fn constructor_aluop_add(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 2278. + // Rule at src/isa/s390x/inst.isle line 2542. let expr0_0 = ALUOp::Add32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2279. + // Rule at src/isa/s390x/inst.isle line 2543. let expr0_0 = ALUOp::Add32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2280. + // Rule at src/isa/s390x/inst.isle line 2544. let expr0_0 = ALUOp::Add32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2281. + // Rule at src/isa/s390x/inst.isle line 2545. let expr0_0 = ALUOp::Add64; return Some(expr0_0); } @@ -4418,17 +5080,17 @@ pub fn constructor_aluop_add(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2284. + // Rule at src/isa/s390x/inst.isle line 2548. let expr0_0 = ALUOp::Add32Ext16; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2285. + // Rule at src/isa/s390x/inst.isle line 2549. let expr0_0 = ALUOp::Add32Ext16; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2286. + // Rule at src/isa/s390x/inst.isle line 2550. let expr0_0 = ALUOp::Add64Ext16; return Some(expr0_0); } @@ -4439,7 +5101,7 @@ pub fn constructor_aluop_add_sext16(ctx: &mut C, arg0: Type) -> Opti pub fn constructor_aluop_add_sext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2289. + // Rule at src/isa/s390x/inst.isle line 2553. let expr0_0 = ALUOp::Add64Ext32; return Some(expr0_0); } @@ -4456,7 +5118,7 @@ pub fn constructor_add_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2292. + // Rule at src/isa/s390x/inst.isle line 2556. let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4472,7 +5134,7 @@ pub fn constructor_add_reg_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2295. + // Rule at src/isa/s390x/inst.isle line 2559. let expr0_0 = constructor_aluop_add_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4488,7 +5150,7 @@ pub fn constructor_add_simm16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2298. + // Rule at src/isa/s390x/inst.isle line 2562. let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrsimm16(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4504,7 +5166,7 @@ pub fn constructor_add_simm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2301. + // Rule at src/isa/s390x/inst.isle line 2565. let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rsimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4520,7 +5182,7 @@ pub fn constructor_add_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2304. + // Rule at src/isa/s390x/inst.isle line 2568. let expr0_0 = constructor_aluop_add(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4536,7 +5198,7 @@ pub fn constructor_add_mem_sext16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2307. + // Rule at src/isa/s390x/inst.isle line 2571. let expr0_0 = constructor_aluop_add_sext16(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4552,7 +5214,7 @@ pub fn constructor_add_mem_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2310. + // Rule at src/isa/s390x/inst.isle line 2574. let expr0_0 = constructor_aluop_add_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4562,12 +5224,12 @@ pub fn constructor_add_mem_sext32( pub fn constructor_aluop_add_logical(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2316. + // Rule at src/isa/s390x/inst.isle line 2580. let expr0_0 = ALUOp::AddLogical32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2317. + // Rule at src/isa/s390x/inst.isle line 2581. let expr0_0 = ALUOp::AddLogical64; return Some(expr0_0); } @@ -4578,7 +5240,7 @@ pub fn constructor_aluop_add_logical(ctx: &mut C, arg0: Type) -> Opt pub fn constructor_aluop_add_logical_zext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2320. + // Rule at src/isa/s390x/inst.isle line 2584. let expr0_0 = ALUOp::AddLogical64Ext32; return Some(expr0_0); } @@ -4595,7 +5257,7 @@ pub fn constructor_add_logical_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2323. + // Rule at src/isa/s390x/inst.isle line 2587. let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4611,7 +5273,7 @@ pub fn constructor_add_logical_reg_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2326. + // Rule at src/isa/s390x/inst.isle line 2590. let expr0_0 = constructor_aluop_add_logical_zext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4627,7 +5289,7 @@ pub fn constructor_add_logical_zimm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2329. + // Rule at src/isa/s390x/inst.isle line 2593. let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4643,7 +5305,7 @@ pub fn constructor_add_logical_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2332. + // Rule at src/isa/s390x/inst.isle line 2596. let expr0_0 = constructor_aluop_add_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4659,7 +5321,7 @@ pub fn constructor_add_logical_mem_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2335. + // Rule at src/isa/s390x/inst.isle line 2599. let expr0_0 = constructor_aluop_add_logical_zext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4669,22 +5331,22 @@ pub fn constructor_add_logical_mem_zext32( pub fn constructor_aluop_sub(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 2341. + // Rule at src/isa/s390x/inst.isle line 2605. let expr0_0 = ALUOp::Sub32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2342. + // Rule at src/isa/s390x/inst.isle line 2606. let expr0_0 = ALUOp::Sub32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2343. + // Rule at src/isa/s390x/inst.isle line 2607. let expr0_0 = ALUOp::Sub32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2344. + // Rule at src/isa/s390x/inst.isle line 2608. let expr0_0 = ALUOp::Sub64; return Some(expr0_0); } @@ -4695,17 +5357,17 @@ pub fn constructor_aluop_sub(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2347. + // Rule at src/isa/s390x/inst.isle line 2611. let expr0_0 = ALUOp::Sub32Ext16; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2348. + // Rule at src/isa/s390x/inst.isle line 2612. let expr0_0 = ALUOp::Sub32Ext16; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2349. + // Rule at src/isa/s390x/inst.isle line 2613. let expr0_0 = ALUOp::Sub64Ext16; return Some(expr0_0); } @@ -4716,7 +5378,7 @@ pub fn constructor_aluop_sub_sext16(ctx: &mut C, arg0: Type) -> Opti pub fn constructor_aluop_sub_sext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2352. + // Rule at src/isa/s390x/inst.isle line 2616. let expr0_0 = ALUOp::Sub64Ext32; return Some(expr0_0); } @@ -4733,7 +5395,7 @@ pub fn constructor_sub_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2355. + // Rule at src/isa/s390x/inst.isle line 2619. let expr0_0 = constructor_aluop_sub(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4749,7 +5411,7 @@ pub fn constructor_sub_reg_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2358. + // Rule at src/isa/s390x/inst.isle line 2622. let expr0_0 = constructor_aluop_sub_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4765,7 +5427,7 @@ pub fn constructor_sub_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2361. + // Rule at src/isa/s390x/inst.isle line 2625. let expr0_0 = constructor_aluop_sub(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4781,7 +5443,7 @@ pub fn constructor_sub_mem_sext16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2364. + // Rule at src/isa/s390x/inst.isle line 2628. let expr0_0 = constructor_aluop_sub_sext16(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4797,7 +5459,7 @@ pub fn constructor_sub_mem_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2367. + // Rule at src/isa/s390x/inst.isle line 2631. let expr0_0 = constructor_aluop_sub_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4807,12 +5469,12 @@ pub fn constructor_sub_mem_sext32( pub fn constructor_aluop_sub_logical(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2373. + // Rule at src/isa/s390x/inst.isle line 2637. let expr0_0 = ALUOp::SubLogical32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2374. + // Rule at src/isa/s390x/inst.isle line 2638. let expr0_0 = ALUOp::SubLogical64; return Some(expr0_0); } @@ -4823,7 +5485,7 @@ pub fn constructor_aluop_sub_logical(ctx: &mut C, arg0: Type) -> Opt pub fn constructor_aluop_sub_logical_zext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2377. + // Rule at src/isa/s390x/inst.isle line 2641. let expr0_0 = ALUOp::SubLogical64Ext32; return Some(expr0_0); } @@ -4840,7 +5502,7 @@ pub fn constructor_sub_logical_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2380. + // Rule at src/isa/s390x/inst.isle line 2644. let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4856,7 +5518,7 @@ pub fn constructor_sub_logical_reg_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2383. + // Rule at src/isa/s390x/inst.isle line 2647. let expr0_0 = constructor_aluop_sub_logical_zext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4872,7 +5534,7 @@ pub fn constructor_sub_logical_zimm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2386. + // Rule at src/isa/s390x/inst.isle line 2650. let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4888,7 +5550,7 @@ pub fn constructor_sub_logical_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2389. + // Rule at src/isa/s390x/inst.isle line 2653. let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4904,7 +5566,7 @@ pub fn constructor_sub_logical_mem_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2392. + // Rule at src/isa/s390x/inst.isle line 2656. let expr0_0 = constructor_aluop_sub_logical(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4914,22 +5576,22 @@ pub fn constructor_sub_logical_mem_zext32( pub fn constructor_aluop_mul(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 2398. + // Rule at src/isa/s390x/inst.isle line 2662. let expr0_0 = ALUOp::Mul32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2399. + // Rule at src/isa/s390x/inst.isle line 2663. let expr0_0 = ALUOp::Mul32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2400. + // Rule at src/isa/s390x/inst.isle line 2664. let expr0_0 = ALUOp::Mul32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2401. + // Rule at src/isa/s390x/inst.isle line 2665. let expr0_0 = ALUOp::Mul64; return Some(expr0_0); } @@ -4940,17 +5602,17 @@ pub fn constructor_aluop_mul(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2404. + // Rule at src/isa/s390x/inst.isle line 2668. let expr0_0 = ALUOp::Mul32Ext16; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2405. + // Rule at src/isa/s390x/inst.isle line 2669. let expr0_0 = ALUOp::Mul32Ext16; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2406. + // Rule at src/isa/s390x/inst.isle line 2670. let expr0_0 = ALUOp::Mul64Ext16; return Some(expr0_0); } @@ -4961,7 +5623,7 @@ pub fn constructor_aluop_mul_sext16(ctx: &mut C, arg0: Type) -> Opti pub fn constructor_aluop_mul_sext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2409. + // Rule at src/isa/s390x/inst.isle line 2673. let expr0_0 = ALUOp::Mul64Ext32; return Some(expr0_0); } @@ -4978,7 +5640,7 @@ pub fn constructor_mul_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2412. + // Rule at src/isa/s390x/inst.isle line 2676. let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -4994,7 +5656,7 @@ pub fn constructor_mul_reg_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2415. + // Rule at src/isa/s390x/inst.isle line 2679. let expr0_0 = constructor_aluop_mul_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5010,7 +5672,7 @@ pub fn constructor_mul_simm16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2418. + // Rule at src/isa/s390x/inst.isle line 2682. let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rsimm16(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5026,7 +5688,7 @@ pub fn constructor_mul_simm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2421. + // Rule at src/isa/s390x/inst.isle line 2685. let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rsimm32(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5042,7 +5704,7 @@ pub fn constructor_mul_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2424. + // Rule at src/isa/s390x/inst.isle line 2688. let expr0_0 = constructor_aluop_mul(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5058,7 +5720,7 @@ pub fn constructor_mul_mem_sext16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2427. + // Rule at src/isa/s390x/inst.isle line 2691. let expr0_0 = constructor_aluop_mul_sext16(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5074,7 +5736,7 @@ pub fn constructor_mul_mem_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2430. + // Rule at src/isa/s390x/inst.isle line 2694. let expr0_0 = constructor_aluop_mul_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5091,14 +5753,14 @@ pub fn constructor_udivmod( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2436. + // Rule at src/isa/s390x/inst.isle line 2700. let expr0_0 = constructor_udivmod32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2437. + // Rule at src/isa/s390x/inst.isle line 2701. let expr0_0 = constructor_udivmod64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -5116,14 +5778,14 @@ pub fn constructor_sdivmod( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2443. + // Rule at src/isa/s390x/inst.isle line 2707. let expr0_0 = constructor_sdivmod32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2444. + // Rule at src/isa/s390x/inst.isle line 2708. let expr0_0 = constructor_sdivmod64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -5134,12 +5796,12 @@ pub fn constructor_sdivmod( pub fn constructor_aluop_and(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2450. + // Rule at src/isa/s390x/inst.isle line 2714. let expr0_0 = ALUOp::And32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2451. + // Rule at src/isa/s390x/inst.isle line 2715. let expr0_0 = ALUOp::And64; return Some(expr0_0); } @@ -5156,7 +5818,7 @@ pub fn constructor_and_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2454. + // Rule at src/isa/s390x/inst.isle line 2718. let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5172,7 +5834,7 @@ pub fn constructor_and_uimm16shifted( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2457. + // Rule at src/isa/s390x/inst.isle line 2721. let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm16shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; @@ -5189,7 +5851,7 @@ pub fn constructor_and_uimm32shifted( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2460. + // Rule at src/isa/s390x/inst.isle line 2724. let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; @@ -5206,7 +5868,7 @@ pub fn constructor_and_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2463. + // Rule at src/isa/s390x/inst.isle line 2727. let expr0_0 = constructor_aluop_and(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5216,12 +5878,12 @@ pub fn constructor_and_mem( pub fn constructor_aluop_or(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2469. + // Rule at src/isa/s390x/inst.isle line 2733. let expr0_0 = ALUOp::Orr32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2470. + // Rule at src/isa/s390x/inst.isle line 2734. let expr0_0 = ALUOp::Orr64; return Some(expr0_0); } @@ -5238,7 +5900,7 @@ pub fn constructor_or_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2473. + // Rule at src/isa/s390x/inst.isle line 2737. let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5254,7 +5916,7 @@ pub fn constructor_or_uimm16shifted( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2476. + // Rule at src/isa/s390x/inst.isle line 2740. let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm16shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; @@ -5271,7 +5933,7 @@ pub fn constructor_or_uimm32shifted( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2479. + // Rule at src/isa/s390x/inst.isle line 2743. let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; @@ -5288,7 +5950,7 @@ pub fn constructor_or_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2482. + // Rule at src/isa/s390x/inst.isle line 2746. let expr0_0 = constructor_aluop_or(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5298,12 +5960,12 @@ pub fn constructor_or_mem( pub fn constructor_aluop_xor(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2488. + // Rule at src/isa/s390x/inst.isle line 2752. let expr0_0 = ALUOp::Xor32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2489. + // Rule at src/isa/s390x/inst.isle line 2753. let expr0_0 = ALUOp::Xor64; return Some(expr0_0); } @@ -5320,7 +5982,7 @@ pub fn constructor_xor_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2492. + // Rule at src/isa/s390x/inst.isle line 2756. let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5336,7 +5998,7 @@ pub fn constructor_xor_uimm32shifted( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2495. + // Rule at src/isa/s390x/inst.isle line 2759. let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; let expr1_0 = constructor_alu_ruimm32shifted(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; @@ -5353,18 +6015,40 @@ pub fn constructor_xor_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2498. + // Rule at src/isa/s390x/inst.isle line 2762. let expr0_0 = constructor_aluop_xor(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rx(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); } +// Generated as internal constructor for term push_xor_uimm32shifted. +pub fn constructor_push_xor_uimm32shifted( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: WritableReg, + arg3: Reg, + arg4: UImm32Shifted, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2765. + let expr0_0 = constructor_aluop_xor(ctx, pattern1_0)?; + let expr1_0 = constructor_push_alu_uimm32shifted( + ctx, pattern0_0, &expr0_0, pattern2_0, pattern3_0, pattern4_0, + )?; + return Some(expr1_0); +} + // Generated as internal constructor for term not_reg. pub fn constructor_not_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2504. + // Rule at src/isa/s390x/inst.isle line 2771. let expr0_0: u32 = 4294967295; let expr1_0: u8 = 0; let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); @@ -5373,7 +6057,7 @@ pub fn constructor_not_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Op } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { let pattern2_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2506. + // Rule at src/isa/s390x/inst.isle line 2773. let expr0_0: u32 = 4294967295; let expr1_0: u8 = 0; let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); @@ -5387,16 +6071,59 @@ pub fn constructor_not_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Op return None; } +// Generated as internal constructor for term push_not_reg. +pub fn constructor_push_not_reg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: WritableReg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(pattern2_0) = C::gpr32_ty(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2779. + let expr0_0: u32 = 4294967295; + let expr1_0: u8 = 0; + let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); + let expr3_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, pattern2_0, pattern3_0, pattern4_0, expr2_0, + )?; + return Some(expr3_0); + } + if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2781. + let expr0_0: u32 = 4294967295; + let expr1_0: u8 = 0; + let expr2_0 = C::uimm32shifted(ctx, expr0_0, expr1_0); + let expr3_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, pattern2_0, pattern3_0, pattern4_0, expr2_0, + )?; + let expr4_0: u32 = 4294967295; + let expr5_0: u8 = 32; + let expr6_0 = C::uimm32shifted(ctx, expr4_0, expr5_0); + let expr7_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, pattern2_0, pattern3_0, expr3_0, expr6_0, + )?; + return Some(expr7_0); + } + return None; +} + // Generated as internal constructor for term aluop_and_not. pub fn constructor_aluop_and_not(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2515. + // Rule at src/isa/s390x/inst.isle line 2789. let expr0_0 = ALUOp::AndNot32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2516. + // Rule at src/isa/s390x/inst.isle line 2790. let expr0_0 = ALUOp::AndNot64; return Some(expr0_0); } @@ -5413,7 +6140,7 @@ pub fn constructor_and_not_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2519. + // Rule at src/isa/s390x/inst.isle line 2793. let expr0_0 = constructor_aluop_and_not(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5423,12 +6150,12 @@ pub fn constructor_and_not_reg( pub fn constructor_aluop_or_not(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2525. + // Rule at src/isa/s390x/inst.isle line 2799. let expr0_0 = ALUOp::OrrNot32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2526. + // Rule at src/isa/s390x/inst.isle line 2800. let expr0_0 = ALUOp::OrrNot64; return Some(expr0_0); } @@ -5445,7 +6172,7 @@ pub fn constructor_or_not_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2529. + // Rule at src/isa/s390x/inst.isle line 2803. let expr0_0 = constructor_aluop_or_not(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5455,12 +6182,12 @@ pub fn constructor_or_not_reg( pub fn constructor_aluop_xor_not(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::gpr32_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2535. + // Rule at src/isa/s390x/inst.isle line 2809. let expr0_0 = ALUOp::XorNot32; return Some(expr0_0); } if let Some(pattern1_0) = C::gpr64_ty(ctx, pattern0_0) { - // Rule at src/isa/s390x/inst.isle line 2536. + // Rule at src/isa/s390x/inst.isle line 2810. let expr0_0 = ALUOp::XorNot64; return Some(expr0_0); } @@ -5477,7 +6204,7 @@ pub fn constructor_xor_not_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2539. + // Rule at src/isa/s390x/inst.isle line 2813. let expr0_0 = constructor_aluop_xor_not(ctx, pattern0_0)?; let expr1_0 = constructor_alu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5487,12 +6214,12 @@ pub fn constructor_xor_not_reg( pub fn constructor_unaryop_abs(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2545. + // Rule at src/isa/s390x/inst.isle line 2819. let expr0_0 = UnaryOp::Abs32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2546. + // Rule at src/isa/s390x/inst.isle line 2820. let expr0_0 = UnaryOp::Abs64; return Some(expr0_0); } @@ -5503,7 +6230,7 @@ pub fn constructor_unaryop_abs(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2549. + // Rule at src/isa/s390x/inst.isle line 2823. let expr0_0 = UnaryOp::Abs64Ext32; return Some(expr0_0); } @@ -5514,7 +6241,7 @@ pub fn constructor_unaryop_abs_sext32(ctx: &mut C, arg0: Type) -> Op pub fn constructor_abs_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2552. + // Rule at src/isa/s390x/inst.isle line 2826. let expr0_0 = constructor_unaryop_abs(ctx, pattern0_0)?; let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -5524,7 +6251,7 @@ pub fn constructor_abs_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Op pub fn constructor_abs_reg_sext32(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2555. + // Rule at src/isa/s390x/inst.isle line 2829. let expr0_0 = constructor_unaryop_abs_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -5534,22 +6261,22 @@ pub fn constructor_abs_reg_sext32(ctx: &mut C, arg0: Type, arg1: Reg pub fn constructor_unaryop_neg(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 2561. + // Rule at src/isa/s390x/inst.isle line 2835. let expr0_0 = UnaryOp::Neg32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2562. + // Rule at src/isa/s390x/inst.isle line 2836. let expr0_0 = UnaryOp::Neg32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2563. + // Rule at src/isa/s390x/inst.isle line 2837. let expr0_0 = UnaryOp::Neg32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2564. + // Rule at src/isa/s390x/inst.isle line 2838. let expr0_0 = UnaryOp::Neg64; return Some(expr0_0); } @@ -5560,7 +6287,7 @@ pub fn constructor_unaryop_neg(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2567. + // Rule at src/isa/s390x/inst.isle line 2841. let expr0_0 = UnaryOp::Neg64Ext32; return Some(expr0_0); } @@ -5571,7 +6298,7 @@ pub fn constructor_unaryop_neg_sext32(ctx: &mut C, arg0: Type) -> Op pub fn constructor_neg_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2570. + // Rule at src/isa/s390x/inst.isle line 2844. let expr0_0 = constructor_unaryop_neg(ctx, pattern0_0)?; let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -5581,22 +6308,66 @@ pub fn constructor_neg_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Op pub fn constructor_neg_reg_sext32(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2573. + // Rule at src/isa/s390x/inst.isle line 2847. let expr0_0 = constructor_unaryop_neg_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); } +// Generated as internal constructor for term unaryop_bswap. +pub fn constructor_unaryop_bswap(ctx: &mut C, arg0: Type) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + // Rule at src/isa/s390x/inst.isle line 2853. + let expr0_0 = UnaryOp::BSwap32; + return Some(expr0_0); + } + if pattern0_0 == I64 { + // Rule at src/isa/s390x/inst.isle line 2854. + let expr0_0 = UnaryOp::BSwap64; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term bswap_reg. +pub fn constructor_bswap_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + // Rule at src/isa/s390x/inst.isle line 2857. + let expr0_0 = constructor_unaryop_bswap(ctx, pattern0_0)?; + let expr1_0 = constructor_unary_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; + return Some(expr1_0); +} + +// Generated as internal constructor for term push_bswap_reg. +pub fn constructor_push_bswap_reg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: WritableReg, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2860. + let expr0_0 = constructor_unaryop_bswap(ctx, pattern1_0)?; + let expr1_0 = constructor_push_unary(ctx, pattern0_0, &expr0_0, pattern2_0, pattern3_0)?; + return Some(expr1_0); +} + // Generated as internal constructor for term shiftop_rot. pub fn constructor_shiftop_rot(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2579. + // Rule at src/isa/s390x/inst.isle line 2866. let expr0_0 = ShiftOp::RotL32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2580. + // Rule at src/isa/s390x/inst.isle line 2867. let expr0_0 = ShiftOp::RotL64; return Some(expr0_0); } @@ -5613,7 +6384,7 @@ pub fn constructor_rot_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2583. + // Rule at src/isa/s390x/inst.isle line 2870. let expr0_0 = constructor_shiftop_rot(ctx, pattern0_0)?; let expr1_0: u8 = 0; let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; @@ -5630,33 +6401,77 @@ pub fn constructor_rot_imm( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2587. + // Rule at src/isa/s390x/inst.isle line 2874. let expr0_0 = constructor_shiftop_rot(ctx, pattern0_0)?; let expr1_0 = C::zero_reg(ctx); let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; return Some(expr2_0); } +// Generated as internal constructor for term rot_imm_reg. +pub fn constructor_rot_imm_reg( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: u8, + arg3: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2878. + let expr0_0 = constructor_shiftop_rot(ctx, pattern0_0)?; + let expr1_0 = constructor_shift_rr( + ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, pattern3_0, + )?; + return Some(expr1_0); +} + +// Generated as internal constructor for term push_rot_imm_reg. +pub fn constructor_push_rot_imm_reg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: WritableReg, + arg3: Reg, + arg4: u8, + arg5: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + let pattern2_0 = arg2; + let pattern3_0 = arg3; + let pattern4_0 = arg4; + let pattern5_0 = arg5; + // Rule at src/isa/s390x/inst.isle line 2882. + let expr0_0 = constructor_shiftop_rot(ctx, pattern1_0)?; + let expr1_0 = constructor_push_shift( + ctx, pattern0_0, &expr0_0, pattern2_0, pattern3_0, pattern4_0, pattern5_0, + )?; + return Some(expr1_0); +} + // Generated as internal constructor for term shiftop_lshl. pub fn constructor_shiftop_lshl(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I8 { - // Rule at src/isa/s390x/inst.isle line 2594. + // Rule at src/isa/s390x/inst.isle line 2889. let expr0_0 = ShiftOp::LShL32; return Some(expr0_0); } if pattern0_0 == I16 { - // Rule at src/isa/s390x/inst.isle line 2595. + // Rule at src/isa/s390x/inst.isle line 2890. let expr0_0 = ShiftOp::LShL32; return Some(expr0_0); } if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2596. + // Rule at src/isa/s390x/inst.isle line 2891. let expr0_0 = ShiftOp::LShL32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2597. + // Rule at src/isa/s390x/inst.isle line 2892. let expr0_0 = ShiftOp::LShL64; return Some(expr0_0); } @@ -5673,7 +6488,7 @@ pub fn constructor_lshl_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2600. + // Rule at src/isa/s390x/inst.isle line 2895. let expr0_0 = constructor_shiftop_lshl(ctx, pattern0_0)?; let expr1_0: u8 = 0; let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; @@ -5690,7 +6505,7 @@ pub fn constructor_lshl_imm( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2604. + // Rule at src/isa/s390x/inst.isle line 2899. let expr0_0 = constructor_shiftop_lshl(ctx, pattern0_0)?; let expr1_0 = C::zero_reg(ctx); let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; @@ -5701,12 +6516,12 @@ pub fn constructor_lshl_imm( pub fn constructor_shiftop_lshr(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2611. + // Rule at src/isa/s390x/inst.isle line 2906. let expr0_0 = ShiftOp::LShR32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2612. + // Rule at src/isa/s390x/inst.isle line 2907. let expr0_0 = ShiftOp::LShR64; return Some(expr0_0); } @@ -5723,7 +6538,7 @@ pub fn constructor_lshr_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2615. + // Rule at src/isa/s390x/inst.isle line 2910. let expr0_0 = constructor_shiftop_lshr(ctx, pattern0_0)?; let expr1_0: u8 = 0; let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; @@ -5740,7 +6555,7 @@ pub fn constructor_lshr_imm( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2619. + // Rule at src/isa/s390x/inst.isle line 2914. let expr0_0 = constructor_shiftop_lshr(ctx, pattern0_0)?; let expr1_0 = C::zero_reg(ctx); let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; @@ -5751,12 +6566,12 @@ pub fn constructor_lshr_imm( pub fn constructor_shiftop_ashr(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2626. + // Rule at src/isa/s390x/inst.isle line 2921. let expr0_0 = ShiftOp::AShR32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2627. + // Rule at src/isa/s390x/inst.isle line 2922. let expr0_0 = ShiftOp::AShR64; return Some(expr0_0); } @@ -5773,7 +6588,7 @@ pub fn constructor_ashr_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2630. + // Rule at src/isa/s390x/inst.isle line 2925. let expr0_0 = constructor_shiftop_ashr(ctx, pattern0_0)?; let expr1_0: u8 = 0; let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, expr1_0, pattern2_0)?; @@ -5790,7 +6605,7 @@ pub fn constructor_ashr_imm( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2634. + // Rule at src/isa/s390x/inst.isle line 2929. let expr0_0 = constructor_shiftop_ashr(ctx, pattern0_0)?; let expr1_0 = C::zero_reg(ctx); let expr2_0 = constructor_shift_rr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0, expr1_0)?; @@ -5800,7 +6615,7 @@ pub fn constructor_ashr_imm( // Generated as internal constructor for term popcnt_byte. pub fn constructor_popcnt_byte(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 2641. + // Rule at src/isa/s390x/inst.isle line 2936. let expr0_0: Type = I64; let expr1_0 = UnaryOp::PopcntByte; let expr2_0 = constructor_unary_rr(ctx, expr0_0, &expr1_0, pattern0_0)?; @@ -5810,7 +6625,7 @@ pub fn constructor_popcnt_byte(ctx: &mut C, arg0: Reg) -> Option(ctx: &mut C, arg0: Reg) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/inst.isle line 2644. + // Rule at src/isa/s390x/inst.isle line 2939. let expr0_0: Type = I64; let expr1_0 = UnaryOp::PopcntReg; let expr2_0 = constructor_unary_rr(ctx, expr0_0, &expr1_0, pattern0_0)?; @@ -5828,7 +6643,7 @@ pub fn constructor_atomic_rmw_and( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2650. + // Rule at src/isa/s390x/inst.isle line 2945. let expr0_0: Type = I32; let expr1_0 = ALUOp::And32; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5837,7 +6652,7 @@ pub fn constructor_atomic_rmw_and( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2651. + // Rule at src/isa/s390x/inst.isle line 2946. let expr0_0: Type = I64; let expr1_0 = ALUOp::And64; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5857,7 +6672,7 @@ pub fn constructor_atomic_rmw_or( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2654. + // Rule at src/isa/s390x/inst.isle line 2949. let expr0_0: Type = I32; let expr1_0 = ALUOp::Orr32; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5866,7 +6681,7 @@ pub fn constructor_atomic_rmw_or( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2655. + // Rule at src/isa/s390x/inst.isle line 2950. let expr0_0: Type = I64; let expr1_0 = ALUOp::Orr64; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5886,7 +6701,7 @@ pub fn constructor_atomic_rmw_xor( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2658. + // Rule at src/isa/s390x/inst.isle line 2953. let expr0_0: Type = I32; let expr1_0 = ALUOp::Xor32; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5895,7 +6710,7 @@ pub fn constructor_atomic_rmw_xor( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2659. + // Rule at src/isa/s390x/inst.isle line 2954. let expr0_0: Type = I64; let expr1_0 = ALUOp::Xor64; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5915,7 +6730,7 @@ pub fn constructor_atomic_rmw_add( if pattern0_0 == I32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2662. + // Rule at src/isa/s390x/inst.isle line 2957. let expr0_0: Type = I32; let expr1_0 = ALUOp::Add32; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5924,7 +6739,7 @@ pub fn constructor_atomic_rmw_add( if pattern0_0 == I64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2663. + // Rule at src/isa/s390x/inst.isle line 2958. let expr0_0: Type = I64; let expr1_0 = ALUOp::Add64; let expr2_0 = constructor_atomic_rmw_impl(ctx, expr0_0, &expr1_0, pattern2_0, pattern3_0)?; @@ -5933,16 +6748,76 @@ pub fn constructor_atomic_rmw_add( return None; } +// Generated as internal constructor for term atomic_cas_impl. +pub fn constructor_atomic_cas_impl( + ctx: &mut C, + arg0: Type, + arg1: Reg, + arg2: Reg, + arg3: &MemArg, +) -> Option { + let pattern0_0 = arg0; + if pattern0_0 == I32 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2964. + let expr0_0 = constructor_atomic_cas32(ctx, pattern2_0, pattern3_0, pattern4_0)?; + return Some(expr0_0); + } + if pattern0_0 == I64 { + let pattern2_0 = arg1; + let pattern3_0 = arg2; + let pattern4_0 = arg3; + // Rule at src/isa/s390x/inst.isle line 2965. + let expr0_0 = constructor_atomic_cas64(ctx, pattern2_0, pattern3_0, pattern4_0)?; + return Some(expr0_0); + } + return None; +} + +// Generated as internal constructor for term push_atomic_cas. +pub fn constructor_push_atomic_cas( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: WritableReg, + arg3: Reg, + arg4: &MemArg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I32 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2968. + let expr0_0 = + constructor_push_atomic_cas32(ctx, pattern0_0, pattern3_0, pattern4_0, pattern5_0)?; + return Some(expr0_0); + } + if pattern1_0 == I64 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/inst.isle line 2969. + let expr0_0 = + constructor_push_atomic_cas64(ctx, pattern0_0, pattern3_0, pattern4_0, pattern5_0)?; + return Some(expr0_0); + } + return None; +} + // Generated as internal constructor for term fpuop2_add. pub fn constructor_fpuop2_add(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2669. + // Rule at src/isa/s390x/inst.isle line 2975. let expr0_0 = FPUOp2::Add32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2670. + // Rule at src/isa/s390x/inst.isle line 2976. let expr0_0 = FPUOp2::Add64; return Some(expr0_0); } @@ -5959,7 +6834,7 @@ pub fn constructor_fadd_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2673. + // Rule at src/isa/s390x/inst.isle line 2979. let expr0_0 = constructor_fpuop2_add(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -5969,12 +6844,12 @@ pub fn constructor_fadd_reg( pub fn constructor_fpuop2_sub(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2679. + // Rule at src/isa/s390x/inst.isle line 2985. let expr0_0 = FPUOp2::Sub32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2680. + // Rule at src/isa/s390x/inst.isle line 2986. let expr0_0 = FPUOp2::Sub64; return Some(expr0_0); } @@ -5991,7 +6866,7 @@ pub fn constructor_fsub_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2683. + // Rule at src/isa/s390x/inst.isle line 2989. let expr0_0 = constructor_fpuop2_sub(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6001,12 +6876,12 @@ pub fn constructor_fsub_reg( pub fn constructor_fpuop2_mul(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2689. + // Rule at src/isa/s390x/inst.isle line 2995. let expr0_0 = FPUOp2::Mul32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2690. + // Rule at src/isa/s390x/inst.isle line 2996. let expr0_0 = FPUOp2::Mul64; return Some(expr0_0); } @@ -6023,7 +6898,7 @@ pub fn constructor_fmul_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2693. + // Rule at src/isa/s390x/inst.isle line 2999. let expr0_0 = constructor_fpuop2_mul(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6033,12 +6908,12 @@ pub fn constructor_fmul_reg( pub fn constructor_fpuop2_div(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2699. + // Rule at src/isa/s390x/inst.isle line 3005. let expr0_0 = FPUOp2::Div32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2700. + // Rule at src/isa/s390x/inst.isle line 3006. let expr0_0 = FPUOp2::Div64; return Some(expr0_0); } @@ -6055,7 +6930,7 @@ pub fn constructor_fdiv_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2703. + // Rule at src/isa/s390x/inst.isle line 3009. let expr0_0 = constructor_fpuop2_div(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6065,12 +6940,12 @@ pub fn constructor_fdiv_reg( pub fn constructor_fpuop2_min(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2709. + // Rule at src/isa/s390x/inst.isle line 3015. let expr0_0 = FPUOp2::Min32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2710. + // Rule at src/isa/s390x/inst.isle line 3016. let expr0_0 = FPUOp2::Min64; return Some(expr0_0); } @@ -6087,7 +6962,7 @@ pub fn constructor_fmin_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2713. + // Rule at src/isa/s390x/inst.isle line 3019. let expr0_0 = constructor_fpuop2_min(ctx, pattern0_0)?; let expr1_0 = constructor_fpuvec_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6097,12 +6972,12 @@ pub fn constructor_fmin_reg( pub fn constructor_fpuop2_max(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2719. + // Rule at src/isa/s390x/inst.isle line 3025. let expr0_0 = FPUOp2::Max32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2720. + // Rule at src/isa/s390x/inst.isle line 3026. let expr0_0 = FPUOp2::Max64; return Some(expr0_0); } @@ -6119,7 +6994,7 @@ pub fn constructor_fmax_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2723. + // Rule at src/isa/s390x/inst.isle line 3029. let expr0_0 = constructor_fpuop2_max(ctx, pattern0_0)?; let expr1_0 = constructor_fpuvec_rrr(ctx, pattern0_0, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6129,12 +7004,12 @@ pub fn constructor_fmax_reg( pub fn constructor_fpuop3_fma(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2729. + // Rule at src/isa/s390x/inst.isle line 3035. let expr0_0 = FPUOp3::MAdd32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2730. + // Rule at src/isa/s390x/inst.isle line 3036. let expr0_0 = FPUOp3::MAdd64; return Some(expr0_0); } @@ -6153,7 +7028,7 @@ pub fn constructor_fma_reg( let pattern1_0 = arg1; let pattern2_0 = arg2; let pattern3_0 = arg3; - // Rule at src/isa/s390x/inst.isle line 2733. + // Rule at src/isa/s390x/inst.isle line 3039. let expr0_0 = constructor_fpuop3_fma(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rrrr( ctx, pattern0_0, &expr0_0, pattern3_0, pattern1_0, pattern2_0, @@ -6165,12 +7040,12 @@ pub fn constructor_fma_reg( pub fn constructor_fpuop1_sqrt(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2739. + // Rule at src/isa/s390x/inst.isle line 3045. let expr0_0 = FPUOp1::Sqrt32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2740. + // Rule at src/isa/s390x/inst.isle line 3046. let expr0_0 = FPUOp1::Sqrt64; return Some(expr0_0); } @@ -6181,7 +7056,7 @@ pub fn constructor_fpuop1_sqrt(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2743. + // Rule at src/isa/s390x/inst.isle line 3049. let expr0_0 = constructor_fpuop1_sqrt(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6191,12 +7066,12 @@ pub fn constructor_sqrt_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> O pub fn constructor_fpuop1_neg(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2749. + // Rule at src/isa/s390x/inst.isle line 3055. let expr0_0 = FPUOp1::Neg32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2750. + // Rule at src/isa/s390x/inst.isle line 3056. let expr0_0 = FPUOp1::Neg64; return Some(expr0_0); } @@ -6207,7 +7082,7 @@ pub fn constructor_fpuop1_neg(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2753. + // Rule at src/isa/s390x/inst.isle line 3059. let expr0_0 = constructor_fpuop1_neg(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6217,12 +7092,12 @@ pub fn constructor_fneg_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> O pub fn constructor_fpuop1_abs(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2759. + // Rule at src/isa/s390x/inst.isle line 3065. let expr0_0 = FPUOp1::Abs32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2760. + // Rule at src/isa/s390x/inst.isle line 3066. let expr0_0 = FPUOp1::Abs64; return Some(expr0_0); } @@ -6233,7 +7108,7 @@ pub fn constructor_fpuop1_abs(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2763. + // Rule at src/isa/s390x/inst.isle line 3069. let expr0_0 = constructor_fpuop1_abs(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6243,12 +7118,12 @@ pub fn constructor_fabs_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> O pub fn constructor_fpuroundmode_ceil(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2769. + // Rule at src/isa/s390x/inst.isle line 3075. let expr0_0 = FpuRoundMode::Plus32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2770. + // Rule at src/isa/s390x/inst.isle line 3076. let expr0_0 = FpuRoundMode::Plus64; return Some(expr0_0); } @@ -6259,7 +7134,7 @@ pub fn constructor_fpuroundmode_ceil(ctx: &mut C, arg0: Type) -> Opt pub fn constructor_ceil_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2773. + // Rule at src/isa/s390x/inst.isle line 3079. let expr0_0 = constructor_fpuroundmode_ceil(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6269,12 +7144,12 @@ pub fn constructor_ceil_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> O pub fn constructor_fpuroundmode_floor(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2779. + // Rule at src/isa/s390x/inst.isle line 3085. let expr0_0 = FpuRoundMode::Minus32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2780. + // Rule at src/isa/s390x/inst.isle line 3086. let expr0_0 = FpuRoundMode::Minus64; return Some(expr0_0); } @@ -6285,7 +7160,7 @@ pub fn constructor_fpuroundmode_floor(ctx: &mut C, arg0: Type) -> Op pub fn constructor_floor_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2783. + // Rule at src/isa/s390x/inst.isle line 3089. let expr0_0 = constructor_fpuroundmode_floor(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6295,12 +7170,12 @@ pub fn constructor_floor_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> pub fn constructor_fpuroundmode_trunc(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2789. + // Rule at src/isa/s390x/inst.isle line 3095. let expr0_0 = FpuRoundMode::Zero32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2790. + // Rule at src/isa/s390x/inst.isle line 3096. let expr0_0 = FpuRoundMode::Zero64; return Some(expr0_0); } @@ -6311,7 +7186,7 @@ pub fn constructor_fpuroundmode_trunc(ctx: &mut C, arg0: Type) -> Op pub fn constructor_trunc_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2793. + // Rule at src/isa/s390x/inst.isle line 3099. let expr0_0 = constructor_fpuroundmode_trunc(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6324,12 +7199,12 @@ pub fn constructor_fpuroundmode_nearest( ) -> Option { let pattern0_0 = arg0; if pattern0_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2799. + // Rule at src/isa/s390x/inst.isle line 3105. let expr0_0 = FpuRoundMode::Nearest32; return Some(expr0_0); } if pattern0_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2800. + // Rule at src/isa/s390x/inst.isle line 3106. let expr0_0 = FpuRoundMode::Nearest64; return Some(expr0_0); } @@ -6340,7 +7215,7 @@ pub fn constructor_fpuroundmode_nearest( pub fn constructor_nearest_reg(ctx: &mut C, arg0: Type, arg1: Reg) -> Option { let pattern0_0 = arg0; let pattern1_0 = arg1; - // Rule at src/isa/s390x/inst.isle line 2803. + // Rule at src/isa/s390x/inst.isle line 3109. let expr0_0 = constructor_fpuroundmode_nearest(ctx, pattern0_0)?; let expr1_0 = constructor_fpu_round(ctx, pattern0_0, &expr0_0, pattern1_0)?; return Some(expr1_0); @@ -6356,7 +7231,7 @@ pub fn constructor_fpuop1_promote( if pattern0_0 == F64 { let pattern2_0 = arg1; if pattern2_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2809. + // Rule at src/isa/s390x/inst.isle line 3115. let expr0_0 = FPUOp1::Cvt32To64; return Some(expr0_0); } @@ -6374,7 +7249,7 @@ pub fn constructor_fpromote_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2812. + // Rule at src/isa/s390x/inst.isle line 3118. let expr0_0 = constructor_fpuop1_promote(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6390,7 +7265,7 @@ pub fn constructor_fpuop1_demote( if pattern0_0 == F32 { let pattern2_0 = arg1; if pattern2_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2819. + // Rule at src/isa/s390x/inst.isle line 3125. let expr0_0 = FPUOp1::Cvt64To32; return Some(expr0_0); } @@ -6408,7 +7283,7 @@ pub fn constructor_fdemote_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2822. + // Rule at src/isa/s390x/inst.isle line 3128. let expr0_0 = constructor_fpuop1_demote(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_fpu_rr(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6424,12 +7299,12 @@ pub fn constructor_uint_to_fpu_op( if pattern0_0 == F32 { let pattern2_0 = arg1; if pattern2_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2829. + // Rule at src/isa/s390x/inst.isle line 3135. let expr0_0 = IntToFpuOp::U32ToF32; return Some(expr0_0); } if pattern2_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2831. + // Rule at src/isa/s390x/inst.isle line 3137. let expr0_0 = IntToFpuOp::U64ToF32; return Some(expr0_0); } @@ -6437,12 +7312,12 @@ pub fn constructor_uint_to_fpu_op( if pattern0_0 == F64 { let pattern2_0 = arg1; if pattern2_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2830. + // Rule at src/isa/s390x/inst.isle line 3136. let expr0_0 = IntToFpuOp::U32ToF64; return Some(expr0_0); } if pattern2_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2832. + // Rule at src/isa/s390x/inst.isle line 3138. let expr0_0 = IntToFpuOp::U64ToF64; return Some(expr0_0); } @@ -6460,7 +7335,7 @@ pub fn constructor_fcvt_from_uint_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2835. + // Rule at src/isa/s390x/inst.isle line 3141. let expr0_0 = constructor_uint_to_fpu_op(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_int_to_fpu(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6476,12 +7351,12 @@ pub fn constructor_sint_to_fpu_op( if pattern0_0 == F32 { let pattern2_0 = arg1; if pattern2_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2842. + // Rule at src/isa/s390x/inst.isle line 3148. let expr0_0 = IntToFpuOp::I32ToF32; return Some(expr0_0); } if pattern2_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2844. + // Rule at src/isa/s390x/inst.isle line 3150. let expr0_0 = IntToFpuOp::I64ToF32; return Some(expr0_0); } @@ -6489,12 +7364,12 @@ pub fn constructor_sint_to_fpu_op( if pattern0_0 == F64 { let pattern2_0 = arg1; if pattern2_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2843. + // Rule at src/isa/s390x/inst.isle line 3149. let expr0_0 = IntToFpuOp::I32ToF64; return Some(expr0_0); } if pattern2_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2845. + // Rule at src/isa/s390x/inst.isle line 3151. let expr0_0 = IntToFpuOp::I64ToF64; return Some(expr0_0); } @@ -6512,7 +7387,7 @@ pub fn constructor_fcvt_from_sint_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2848. + // Rule at src/isa/s390x/inst.isle line 3154. let expr0_0 = constructor_sint_to_fpu_op(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_int_to_fpu(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6528,12 +7403,12 @@ pub fn constructor_fpu_to_uint_op( if pattern0_0 == I32 { let pattern2_0 = arg1; if pattern2_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2855. + // Rule at src/isa/s390x/inst.isle line 3161. let expr0_0 = FpuToIntOp::F32ToU32; return Some(expr0_0); } if pattern2_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2856. + // Rule at src/isa/s390x/inst.isle line 3162. let expr0_0 = FpuToIntOp::F64ToU32; return Some(expr0_0); } @@ -6541,12 +7416,12 @@ pub fn constructor_fpu_to_uint_op( if pattern0_0 == I64 { let pattern2_0 = arg1; if pattern2_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2857. + // Rule at src/isa/s390x/inst.isle line 3163. let expr0_0 = FpuToIntOp::F32ToU64; return Some(expr0_0); } if pattern2_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2858. + // Rule at src/isa/s390x/inst.isle line 3164. let expr0_0 = FpuToIntOp::F64ToU64; return Some(expr0_0); } @@ -6564,7 +7439,7 @@ pub fn constructor_fcvt_to_uint_reg_with_flags( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2861. + // Rule at src/isa/s390x/inst.isle line 3167. let expr0_0 = constructor_fpu_to_uint_op(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_fpu_to_int(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6580,7 +7455,7 @@ pub fn constructor_fcvt_to_uint_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2865. + // Rule at src/isa/s390x/inst.isle line 3171. let expr0_0 = constructor_fcvt_to_uint_reg_with_flags(ctx, pattern0_0, pattern1_0, pattern2_0)?; let expr1_0 = constructor_drop_flags(ctx, &expr0_0)?; return Some(expr1_0); @@ -6596,12 +7471,12 @@ pub fn constructor_fpu_to_sint_op( if pattern0_0 == I32 { let pattern2_0 = arg1; if pattern2_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2872. + // Rule at src/isa/s390x/inst.isle line 3178. let expr0_0 = FpuToIntOp::F32ToI32; return Some(expr0_0); } if pattern2_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2873. + // Rule at src/isa/s390x/inst.isle line 3179. let expr0_0 = FpuToIntOp::F64ToI32; return Some(expr0_0); } @@ -6609,12 +7484,12 @@ pub fn constructor_fpu_to_sint_op( if pattern0_0 == I64 { let pattern2_0 = arg1; if pattern2_0 == F32 { - // Rule at src/isa/s390x/inst.isle line 2874. + // Rule at src/isa/s390x/inst.isle line 3180. let expr0_0 = FpuToIntOp::F32ToI64; return Some(expr0_0); } if pattern2_0 == F64 { - // Rule at src/isa/s390x/inst.isle line 2875. + // Rule at src/isa/s390x/inst.isle line 3181. let expr0_0 = FpuToIntOp::F64ToI64; return Some(expr0_0); } @@ -6632,7 +7507,7 @@ pub fn constructor_fcvt_to_sint_reg_with_flags( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2878. + // Rule at src/isa/s390x/inst.isle line 3184. let expr0_0 = constructor_fpu_to_sint_op(ctx, pattern0_0, pattern1_0)?; let expr1_0 = constructor_fpu_to_int(ctx, pattern0_0, &expr0_0, pattern2_0)?; return Some(expr1_0); @@ -6648,7 +7523,7 @@ pub fn constructor_fcvt_to_sint_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2882. + // Rule at src/isa/s390x/inst.isle line 3188. let expr0_0 = constructor_fcvt_to_sint_reg_with_flags(ctx, pattern0_0, pattern1_0, pattern2_0)?; let expr1_0 = constructor_drop_flags(ctx, &expr0_0)?; return Some(expr1_0); @@ -6658,12 +7533,12 @@ pub fn constructor_fcvt_to_sint_reg( pub fn constructor_cmpop_cmps(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2889. + // Rule at src/isa/s390x/inst.isle line 3195. let expr0_0 = CmpOp::CmpS32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2890. + // Rule at src/isa/s390x/inst.isle line 3196. let expr0_0 = CmpOp::CmpS64; return Some(expr0_0); } @@ -6674,12 +7549,12 @@ pub fn constructor_cmpop_cmps(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2893. + // Rule at src/isa/s390x/inst.isle line 3199. let expr0_0 = CmpOp::CmpS32Ext16; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2894. + // Rule at src/isa/s390x/inst.isle line 3200. let expr0_0 = CmpOp::CmpS64Ext16; return Some(expr0_0); } @@ -6690,7 +7565,7 @@ pub fn constructor_cmpop_cmps_sext16(ctx: &mut C, arg0: Type) -> Opt pub fn constructor_cmpop_cmps_sext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2897. + // Rule at src/isa/s390x/inst.isle line 3203. let expr0_0 = CmpOp::CmpS64Ext32; return Some(expr0_0); } @@ -6707,7 +7582,7 @@ pub fn constructor_icmps_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2900. + // Rule at src/isa/s390x/inst.isle line 3206. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6723,7 +7598,7 @@ pub fn constructor_icmps_reg_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2903. + // Rule at src/isa/s390x/inst.isle line 3209. let expr0_0 = constructor_cmpop_cmps_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6739,7 +7614,7 @@ pub fn constructor_icmps_simm16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2906. + // Rule at src/isa/s390x/inst.isle line 3212. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rsimm16(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6755,7 +7630,7 @@ pub fn constructor_icmps_simm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2909. + // Rule at src/isa/s390x/inst.isle line 3215. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rsimm32(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6771,7 +7646,7 @@ pub fn constructor_icmps_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2912. + // Rule at src/isa/s390x/inst.isle line 3218. let expr0_0 = constructor_cmpop_cmps(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6787,7 +7662,7 @@ pub fn constructor_icmps_mem_sext16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2915. + // Rule at src/isa/s390x/inst.isle line 3221. let expr0_0 = constructor_cmpop_cmps_sext16(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6803,7 +7678,7 @@ pub fn constructor_icmps_mem_sext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2918. + // Rule at src/isa/s390x/inst.isle line 3224. let expr0_0 = constructor_cmpop_cmps_sext32(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6813,12 +7688,12 @@ pub fn constructor_icmps_mem_sext32( pub fn constructor_cmpop_cmpu(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2924. + // Rule at src/isa/s390x/inst.isle line 3230. let expr0_0 = CmpOp::CmpL32; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2925. + // Rule at src/isa/s390x/inst.isle line 3231. let expr0_0 = CmpOp::CmpL64; return Some(expr0_0); } @@ -6829,12 +7704,12 @@ pub fn constructor_cmpop_cmpu(ctx: &mut C, arg0: Type) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I32 { - // Rule at src/isa/s390x/inst.isle line 2928. + // Rule at src/isa/s390x/inst.isle line 3234. let expr0_0 = CmpOp::CmpL32Ext16; return Some(expr0_0); } if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2929. + // Rule at src/isa/s390x/inst.isle line 3235. let expr0_0 = CmpOp::CmpL64Ext16; return Some(expr0_0); } @@ -6845,7 +7720,7 @@ pub fn constructor_cmpop_cmpu_zext16(ctx: &mut C, arg0: Type) -> Opt pub fn constructor_cmpop_cmpu_zext32(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if pattern0_0 == I64 { - // Rule at src/isa/s390x/inst.isle line 2932. + // Rule at src/isa/s390x/inst.isle line 3238. let expr0_0 = CmpOp::CmpL64Ext32; return Some(expr0_0); } @@ -6862,7 +7737,7 @@ pub fn constructor_icmpu_reg( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2935. + // Rule at src/isa/s390x/inst.isle line 3241. let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6878,7 +7753,7 @@ pub fn constructor_icmpu_reg_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2938. + // Rule at src/isa/s390x/inst.isle line 3244. let expr0_0 = constructor_cmpop_cmpu_zext32(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rr(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6894,7 +7769,7 @@ pub fn constructor_icmpu_uimm32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2941. + // Rule at src/isa/s390x/inst.isle line 3247. let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_ruimm32(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6910,7 +7785,7 @@ pub fn constructor_icmpu_mem( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2944. + // Rule at src/isa/s390x/inst.isle line 3250. let expr0_0 = constructor_cmpop_cmpu(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6926,7 +7801,7 @@ pub fn constructor_icmpu_mem_zext16( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2947. + // Rule at src/isa/s390x/inst.isle line 3253. let expr0_0 = constructor_cmpop_cmpu_zext16(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6942,7 +7817,7 @@ pub fn constructor_icmpu_mem_zext32( let pattern0_0 = arg0; let pattern1_0 = arg1; let pattern2_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2950. + // Rule at src/isa/s390x/inst.isle line 3256. let expr0_0 = constructor_cmpop_cmpu_zext32(ctx, pattern0_0)?; let expr1_0 = constructor_cmp_rx(ctx, &expr0_0, pattern1_0, pattern2_0)?; return Some(expr1_0); @@ -6959,14 +7834,14 @@ pub fn constructor_fcmp_reg( if pattern0_0 == F32 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2956. + // Rule at src/isa/s390x/inst.isle line 3262. let expr0_0 = constructor_fpu_cmp32(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } if pattern0_0 == F64 { let pattern2_0 = arg1; let pattern3_0 = arg2; - // Rule at src/isa/s390x/inst.isle line 2957. + // Rule at src/isa/s390x/inst.isle line 3263. let expr0_0 = constructor_fpu_cmp64(ctx, pattern2_0, pattern3_0)?; return Some(expr0_0); } @@ -6983,7 +7858,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { match &pattern2_0 { &Opcode::Debugtrap => { - // Rule at src/isa/s390x/lower.isle line 1892. + // Rule at src/isa/s390x/lower.isle line 2173. let expr0_0 = constructor_debugtrap_impl(ctx)?; let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; return Some(expr1_0); @@ -6995,7 +7870,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1605. + // Rule at src/isa/s390x/lower.isle line 1886. let expr0_0 = constructor_fence_impl(ctx)?; let expr1_0 = constructor_value_regs_none(ctx, &expr0_0)?; return Some(expr1_0); @@ -7086,13 +7961,13 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { match &pattern2_0 { &Opcode::Trap => { - // Rule at src/isa/s390x/lower.isle line 1862. + // Rule at src/isa/s390x/lower.isle line 2143. let expr0_0 = constructor_trap_impl(ctx, &pattern2_1)?; let expr1_0 = constructor_safepoint(ctx, &expr0_0)?; return Some(expr1_0); } &Opcode::ResumableTrap => { - // Rule at src/isa/s390x/lower.isle line 1868. + // Rule at src/isa/s390x/lower.isle line 2149. let expr0_0 = constructor_trap_impl(ctx, &pattern2_1)?; let expr1_0 = constructor_safepoint(ctx, &expr0_0)?; return Some(expr1_0); @@ -7109,7 +7984,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { let (pattern8_0, pattern8_1) = C::unpack_value_array_2(ctx, &pattern6_1); - // Rule at src/isa/s390x/lower.isle line 1904. + // Rule at src/isa/s390x/lower.isle line 2185. let expr0_0: bool = false; let expr1_0 = constructor_icmp_val( ctx, @@ -7362,7 +8237,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { match &pattern2_0 { &Opcode::Trapz => { - // Rule at src/isa/s390x/lower.isle line 1874. + // Rule at src/isa/s390x/lower.isle line 2155. let expr0_0 = constructor_value_nonzero(ctx, pattern2_1)?; let expr1_0 = constructor_invert_bool(ctx, &expr0_0)?; let expr2_0 = constructor_trap_if_bool(ctx, &expr1_0, &pattern2_2)?; @@ -7392,14 +8267,14 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1880. + // Rule at src/isa/s390x/lower.isle line 2161. let expr0_0 = constructor_value_nonzero(ctx, pattern2_1)?; let expr1_0 = constructor_trap_if_bool(ctx, &expr0_0, &pattern2_2)?; let expr2_0 = constructor_safepoint(ctx, &expr1_0)?; return Some(expr2_0); } &Opcode::ResumableTrapnz => { - // Rule at src/isa/s390x/lower.isle line 1886. + // Rule at src/isa/s390x/lower.isle line 2167. let expr0_0 = constructor_value_nonzero(ctx, pattern2_1)?; let expr1_0 = constructor_trap_if_bool(ctx, &expr0_0, &pattern2_2)?; let expr2_0 = constructor_safepoint(ctx, &expr1_0)?; @@ -7423,7 +8298,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let pattern7_0 = C::value_type(ctx, pattern5_1); if pattern7_0 == R64 { - // Rule at src/isa/s390x/lower.isle line 1740. + // Rule at src/isa/s390x/lower.isle line 2021. let expr0_0: Type = B1; let expr1_0: Type = I64; let expr2_0 = C::put_in_reg(ctx, pattern5_1); @@ -7440,7 +8315,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let pattern7_0 = C::value_type(ctx, pattern5_1); if pattern7_0 == R64 { - // Rule at src/isa/s390x/lower.isle line 1746. + // Rule at src/isa/s390x/lower.isle line 2027. let expr0_0: Type = B1; let expr1_0: Type = I64; let expr2_0 = C::put_in_reg(ctx, pattern5_1); @@ -7479,7 +8354,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::AtomicLoad = &pattern5_0 { - // Rule at src/isa/s390x/lower.isle line 1549. + // Rule at src/isa/s390x/lower.isle line 1830. let expr0_0: Type = I8; let expr1_0 = C::zero_offset(ctx); let expr2_0 = @@ -7518,7 +8393,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::AtomicLoad = &pattern5_0 { if let Some(()) = C::littleendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/lower.isle line 1557. + // Rule at src/isa/s390x/lower.isle line 1838. let expr0_0 = C::zero_offset(ctx); let expr1_0 = constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; @@ -7527,7 +8402,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option {} } } - &InstructionData::AtomicCas { - opcode: ref pattern5_0, - args: ref pattern5_1, - flags: pattern5_2, - } => { - if let &Opcode::AtomicCas = &pattern5_0 { - let (pattern7_0, pattern7_1, pattern7_2) = - C::unpack_value_array_3(ctx, &pattern5_1); - if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/lower.isle line 1534. - let expr0_0 = C::put_in_reg(ctx, pattern7_1); - let expr1_0 = C::put_in_reg(ctx, pattern7_2); - let expr2_0 = C::zero_offset(ctx); - let expr3_0 = - constructor_lower_address(ctx, pattern5_2, pattern7_0, expr2_0)?; - let expr4_0 = - constructor_atomic_cas32(ctx, expr0_0, expr1_0, &expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); - } - } - } &InstructionData::Unary { opcode: ref pattern5_0, arg: pattern5_1, @@ -7653,7 +8506,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::AtomicLoad = &pattern5_0 { if let Some(()) = C::littleendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/lower.isle line 1565. + // Rule at src/isa/s390x/lower.isle line 1846. let expr0_0 = C::zero_offset(ctx); let expr1_0 = constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; @@ -7662,7 +8515,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option {} } } - &InstructionData::AtomicCas { - opcode: ref pattern5_0, - args: ref pattern5_1, - flags: pattern5_2, - } => { - if let &Opcode::AtomicCas = &pattern5_0 { - let (pattern7_0, pattern7_1, pattern7_2) = - C::unpack_value_array_3(ctx, &pattern5_1); - if let Some(()) = C::bigendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/lower.isle line 1539. - let expr0_0 = C::put_in_reg(ctx, pattern7_1); - let expr1_0 = C::put_in_reg(ctx, pattern7_2); - let expr2_0 = C::zero_offset(ctx); - let expr3_0 = - constructor_lower_address(ctx, pattern5_2, pattern7_0, expr2_0)?; - let expr4_0 = - constructor_atomic_cas64(ctx, expr0_0, expr1_0, &expr3_0)?; - let expr5_0 = C::value_reg(ctx, expr4_0); - return Some(expr5_0); - } - } - } &InstructionData::Unary { opcode: ref pattern5_0, arg: pattern5_1, @@ -7781,7 +8612,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::AtomicLoad = &pattern5_0 { if let Some(()) = C::littleendian(ctx, pattern5_2) { - // Rule at src/isa/s390x/lower.isle line 1573. + // Rule at src/isa/s390x/lower.isle line 1854. let expr0_0 = C::zero_offset(ctx); let expr1_0 = constructor_lower_address(ctx, pattern5_2, pattern5_1, expr0_0)?; @@ -7790,7 +8621,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::Fcmp = &pattern4_0 { let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); - // Rule at src/isa/s390x/lower.isle line 1727. + // Rule at src/isa/s390x/lower.isle line 2008. let expr0_0 = constructor_fcmp_val(ctx, &pattern4_2, pattern6_0, pattern6_1)?; let expr1_0 = constructor_lower_bool(ctx, pattern2_0, &expr0_0)?; let expr2_0 = C::value_reg(ctx, expr1_0); @@ -8087,7 +8918,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::Icmp = &pattern4_0 { let (pattern6_0, pattern6_1) = C::unpack_value_array_2(ctx, &pattern4_1); - // Rule at src/isa/s390x/lower.isle line 1638. + // Rule at src/isa/s390x/lower.isle line 1919. let expr0_0: bool = true; let expr1_0 = constructor_icmp_val(ctx, expr0_0, &pattern4_2, pattern6_0, pattern6_1)?; @@ -8104,7 +8935,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern6_0, pattern6_1, pattern6_2) = C::unpack_value_array_3(ctx, &pattern4_1); - // Rule at src/isa/s390x/lower.isle line 1769. + // Rule at src/isa/s390x/lower.isle line 2050. let expr0_0 = constructor_value_nonzero(ctx, pattern6_0)?; let expr1_0 = C::put_in_reg(ctx, pattern6_1); let expr2_0 = C::put_in_reg(ctx, pattern6_2); @@ -8147,7 +8978,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { if let &Opcode::AtomicRmw = &pattern5_0 { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(()) = C::littleendian(ctx, pattern5_2) { + match &pattern5_3 { + &AtomicRmwOp::And => { + // Rule at src/isa/s390x/lower.isle line 1509. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr2_0, + )?; + let expr4_0 = constructor_atomic_rmw_and( + ctx, pattern3_0, expr1_0, &expr3_0, + )?; + let expr5_0 = constructor_bswap_reg(ctx, pattern3_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + &AtomicRmwOp::Or => { + // Rule at src/isa/s390x/lower.isle line 1521. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr2_0, + )?; + let expr4_0 = constructor_atomic_rmw_or( + ctx, pattern3_0, expr1_0, &expr3_0, + )?; + let expr5_0 = constructor_bswap_reg(ctx, pattern3_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + &AtomicRmwOp::Xor => { + // Rule at src/isa/s390x/lower.isle line 1533. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = constructor_lower_address( + ctx, pattern5_2, pattern7_0, expr2_0, + )?; + let expr4_0 = constructor_atomic_rmw_xor( + ctx, pattern3_0, expr1_0, &expr3_0, + )?; + let expr5_0 = constructor_bswap_reg(ctx, pattern3_0, expr4_0)?; + let expr6_0 = C::value_reg(ctx, expr5_0); + return Some(expr6_0); + } + _ => {} + } + } if let Some(()) = C::bigendian(ctx, pattern5_2) { match &pattern5_3 { &AtomicRmwOp::Add => { - // Rule at src/isa/s390x/lower.isle line 1519. + // Rule at src/isa/s390x/lower.isle line 1539. let expr0_0 = C::put_in_reg(ctx, pattern7_1); let expr1_0 = C::zero_offset(ctx); let expr2_0 = constructor_lower_address( @@ -10165,7 +11046,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1501. + // Rule at src/isa/s390x/lower.isle line 1503. let expr0_0 = C::put_in_reg(ctx, pattern7_1); let expr1_0 = C::zero_offset(ctx); let expr2_0 = constructor_lower_address( @@ -10178,7 +11059,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1507. + // Rule at src/isa/s390x/lower.isle line 1515. let expr0_0 = C::put_in_reg(ctx, pattern7_1); let expr1_0 = C::zero_offset(ctx); let expr2_0 = constructor_lower_address( @@ -10191,7 +11072,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1525. + // Rule at src/isa/s390x/lower.isle line 1545. let expr0_0 = C::put_in_reg(ctx, pattern7_1); let expr1_0 = constructor_neg_reg(ctx, pattern3_0, expr0_0)?; let expr2_0 = C::zero_offset(ctx); @@ -10205,7 +11086,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - // Rule at src/isa/s390x/lower.isle line 1513. + // Rule at src/isa/s390x/lower.isle line 1527. let expr0_0 = C::put_in_reg(ctx, pattern7_1); let expr1_0 = C::zero_offset(ctx); let expr2_0 = constructor_lower_address( @@ -10220,6 +11101,67 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option {} } } + // Rule at src/isa/s390x/lower.isle line 1554. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = C::inst_builder_new(ctx); + let expr3_0 = constructor_casloop_val_reg(ctx)?; + let expr4_0 = C::writable_reg_to_reg(ctx, expr3_0); + let expr5_0 = constructor_casloop_tmp_reg(ctx)?; + let expr6_0 = constructor_atomic_rmw_body( + ctx, + &expr2_0, + pattern3_0, + pattern5_2, + &pattern5_3, + expr5_0, + expr4_0, + expr0_0, + )?; + let expr7_0 = constructor_casloop( + ctx, &expr2_0, pattern3_0, pattern5_2, expr1_0, expr6_0, + )?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); + } + } + &InstructionData::AtomicCas { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicCas = &pattern5_0 { + let (pattern7_0, pattern7_1, pattern7_2) = + C::unpack_value_array_3(ctx, &pattern5_1); + if let Some(()) = C::littleendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1766. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, expr0_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_2); + let expr3_0 = constructor_bswap_reg(ctx, pattern3_0, expr2_0)?; + let expr4_0 = C::zero_offset(ctx); + let expr5_0 = + constructor_lower_address(ctx, pattern5_2, pattern7_0, expr4_0)?; + let expr6_0 = constructor_atomic_cas_impl( + ctx, pattern3_0, expr1_0, expr3_0, &expr5_0, + )?; + let expr7_0 = constructor_bswap_reg(ctx, pattern3_0, expr6_0)?; + let expr8_0 = C::value_reg(ctx, expr7_0); + return Some(expr8_0); + } + if let Some(()) = C::bigendian(ctx, pattern5_2) { + // Rule at src/isa/s390x/lower.isle line 1759. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_2); + let expr2_0 = C::zero_offset(ctx); + let expr3_0 = + constructor_lower_address(ctx, pattern5_2, pattern7_0, expr2_0)?; + let expr4_0 = constructor_atomic_cas_impl( + ctx, pattern3_0, expr0_0, expr1_0, &expr3_0, + )?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } } } &InstructionData::Unary { @@ -10305,100 +11247,187 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { - let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/s390x/lower.isle line 245. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; - let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern7_1)?; - let expr2_0: Type = I32; - let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; - let expr4_0: Type = I32; - let expr5_0 = C::ty_bits(ctx, pattern3_0); - let expr6_0 = constructor_lshr_imm(ctx, expr4_0, expr3_0, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); - } - &Opcode::Smulhi => { - let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/s390x/lower.isle line 267. - let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; - let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern7_1)?; - let expr2_0: Type = I32; - let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; - let expr4_0: Type = I32; - let expr5_0 = C::ty_bits(ctx, pattern3_0); - let expr6_0 = constructor_ashr_imm(ctx, expr4_0, expr3_0, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); - } - &Opcode::Rotl => { - let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { - if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) { - // Rule at src/isa/s390x/lower.isle line 546. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; - let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; - let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); - let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); - let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr0_0, expr2_0)?; - let expr5_0 = constructor_lshr_imm(ctx, expr1_0, expr0_0, expr3_0)?; - let expr6_0 = - constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); - } + match &pattern4_0 { + &InstructionData::Binary { + opcode: ref pattern5_0, + args: ref pattern5_1, + } => { + match &pattern5_0 { + &Opcode::Umulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 245. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern7_1)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I32; + let expr5_0 = C::ty_bits(ctx, pattern3_0); + let expr6_0 = constructor_lshr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); } - // Rule at src/isa/s390x/lower.isle line 534. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; - let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; - let expr2_0 = C::put_in_reg(ctx, pattern7_1); - let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; - let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; - let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; - let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr4_0)?; - let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr5_0)?; - let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; - let expr9_0 = C::value_reg(ctx, expr8_0); - return Some(expr9_0); - } - &Opcode::Rotr => { - let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { - if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) { - // Rule at src/isa/s390x/lower.isle line 584. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; - let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; - let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); - let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); - let expr4_0 = constructor_lshl_imm(ctx, expr1_0, expr0_0, expr3_0)?; - let expr5_0 = constructor_lshr_imm(ctx, expr1_0, expr0_0, expr2_0)?; - let expr6_0 = - constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; - let expr7_0 = C::value_reg(ctx, expr6_0); - return Some(expr7_0); - } + &Opcode::Smulhi => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 267. + let expr0_0 = constructor_put_in_reg_sext32(ctx, pattern7_0)?; + let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern7_1)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_mul_reg(ctx, expr2_0, expr0_0, expr1_0)?; + let expr4_0: Type = I32; + let expr5_0 = C::ty_bits(ctx, pattern3_0); + let expr6_0 = constructor_ashr_imm(ctx, expr4_0, expr3_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); } - // Rule at src/isa/s390x/lower.isle line 572. - let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; - let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; - let expr2_0 = C::put_in_reg(ctx, pattern7_1); - let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; - let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; - let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; - let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr5_0)?; - let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr4_0)?; - let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; - let expr9_0 = C::value_reg(ctx, expr8_0); - return Some(expr9_0); + &Opcode::Rotl => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) + { + // Rule at src/isa/s390x/lower.isle line 546. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); + let expr4_0 = + constructor_lshl_imm(ctx, expr1_0, expr0_0, expr2_0)?; + let expr5_0 = + constructor_lshr_imm(ctx, expr1_0, expr0_0, expr3_0)?; + let expr6_0 = + constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + } + // Rule at src/isa/s390x/lower.isle line 534. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_1); + let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; + let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; + let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; + let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr4_0)?; + let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr5_0)?; + let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + &Opcode::Rotr => { + let (pattern7_0, pattern7_1) = + C::unpack_value_array_2(ctx, &pattern5_1); + if let Some(pattern8_0) = C::i64_from_value(ctx, pattern7_1) { + if let Some(pattern9_0) = C::i64_from_negated_value(ctx, pattern7_1) + { + // Rule at src/isa/s390x/lower.isle line 584. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::mask_amt_imm(ctx, pattern3_0, pattern8_0); + let expr3_0 = C::mask_amt_imm(ctx, pattern3_0, pattern9_0); + let expr4_0 = + constructor_lshl_imm(ctx, expr1_0, expr0_0, expr3_0)?; + let expr5_0 = + constructor_lshr_imm(ctx, expr1_0, expr0_0, expr2_0)?; + let expr6_0 = + constructor_or_reg(ctx, pattern3_0, expr4_0, expr5_0)?; + let expr7_0 = C::value_reg(ctx, expr6_0); + return Some(expr7_0); + } + } + // Rule at src/isa/s390x/lower.isle line 572. + let expr0_0 = constructor_put_in_reg_zext32(ctx, pattern7_0)?; + let expr1_0 = constructor_ty_ext32(ctx, pattern3_0)?; + let expr2_0 = C::put_in_reg(ctx, pattern7_1); + let expr3_0 = constructor_neg_reg(ctx, pattern3_0, expr2_0)?; + let expr4_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr2_0)?; + let expr5_0 = constructor_mask_amt_reg(ctx, pattern3_0, expr3_0)?; + let expr6_0 = constructor_lshl_reg(ctx, expr1_0, expr0_0, expr5_0)?; + let expr7_0 = constructor_lshr_reg(ctx, expr1_0, expr0_0, expr4_0)?; + let expr8_0 = constructor_or_reg(ctx, pattern3_0, expr6_0, expr7_0)?; + let expr9_0 = C::value_reg(ctx, expr8_0); + return Some(expr9_0); + } + _ => {} } - _ => {} } + &InstructionData::AtomicRmw { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + op: ref pattern5_3, + } => { + if let &Opcode::AtomicRmw = &pattern5_0 { + let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1566. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_0); + let expr2_0 = constructor_casloop_bitshift(ctx, expr1_0)?; + let expr3_0 = constructor_casloop_aligned_addr(ctx, expr1_0)?; + let expr4_0 = C::inst_builder_new(ctx); + let expr5_0 = constructor_casloop_val_reg(ctx)?; + let expr6_0 = C::writable_reg_to_reg(ctx, expr5_0); + let expr7_0 = constructor_casloop_rotate_in( + ctx, &expr4_0, pattern3_0, pattern5_2, expr2_0, expr6_0, + )?; + let expr8_0 = constructor_casloop_tmp_reg(ctx)?; + let expr9_0 = constructor_atomic_rmw_body( + ctx, + &expr4_0, + pattern3_0, + pattern5_2, + &pattern5_3, + expr8_0, + expr7_0, + expr0_0, + )?; + let expr10_0 = constructor_casloop_rotate_out( + ctx, &expr4_0, pattern3_0, pattern5_2, expr2_0, expr9_0, + )?; + let expr11_0 = constructor_casloop_subword( + ctx, &expr4_0, pattern3_0, pattern5_2, expr3_0, expr2_0, expr10_0, + )?; + let expr12_0 = C::value_reg(ctx, expr11_0); + return Some(expr12_0); + } + } + &InstructionData::AtomicCas { + opcode: ref pattern5_0, + args: ref pattern5_1, + flags: pattern5_2, + } => { + if let &Opcode::AtomicCas = &pattern5_0 { + let (pattern7_0, pattern7_1, pattern7_2) = + C::unpack_value_array_3(ctx, &pattern5_1); + // Rule at src/isa/s390x/lower.isle line 1773. + let expr0_0 = C::put_in_reg(ctx, pattern7_1); + let expr1_0 = C::put_in_reg(ctx, pattern7_2); + let expr2_0 = C::put_in_reg(ctx, pattern7_0); + let expr3_0 = constructor_casloop_bitshift(ctx, expr2_0)?; + let expr4_0 = constructor_casloop_aligned_addr(ctx, expr2_0)?; + let expr5_0 = C::inst_builder_new(ctx); + let expr6_0 = constructor_casloop_val_reg(ctx)?; + let expr7_0 = C::writable_reg_to_reg(ctx, expr6_0); + let expr8_0 = constructor_casloop_rotate_in( + ctx, &expr5_0, pattern3_0, pattern5_2, expr3_0, expr7_0, + )?; + let expr9_0 = constructor_casloop_tmp_reg(ctx)?; + let expr10_0 = constructor_atomic_cas_body( + ctx, &expr5_0, pattern3_0, pattern5_2, expr9_0, expr8_0, expr0_0, + expr1_0, + )?; + let expr11_0 = constructor_casloop_rotate_out( + ctx, &expr5_0, pattern3_0, pattern5_2, expr3_0, expr10_0, + )?; + let expr12_0 = constructor_casloop_subword( + ctx, &expr5_0, pattern3_0, pattern5_2, expr4_0, expr3_0, expr11_0, + )?; + let expr13_0 = C::value_reg(ctx, expr12_0); + return Some(expr13_0); + } + } + _ => {} } } if let Some(pattern3_0) = C::gpr32_ty(ctx, pattern2_0) { @@ -10707,7 +11736,7 @@ pub fn constructor_lower_branch( } => { if let &Opcode::BrTable = &pattern2_0 { let pattern4_0 = arg1; - // Rule at src/isa/s390x/lower.isle line 1799. + // Rule at src/isa/s390x/lower.isle line 2080. let expr0_0 = constructor_put_in_reg_zext64(ctx, pattern2_1)?; let expr1_0: Type = I64; let expr2_0 = C::vec_length_minus1(ctx, pattern4_0); @@ -10736,7 +11765,7 @@ pub fn constructor_lower_branch( &Opcode::Brz => { let (pattern4_0, pattern4_1) = C::unwrap_head_value_list_1(ctx, pattern2_1); let pattern5_0 = arg1; - // Rule at src/isa/s390x/lower.isle line 1832. + // Rule at src/isa/s390x/lower.isle line 2113. let expr0_0 = constructor_value_nonzero(ctx, pattern4_0)?; let expr1_0 = constructor_invert_bool(ctx, &expr0_0)?; let expr2_0: u8 = 0; @@ -10750,7 +11779,7 @@ pub fn constructor_lower_branch( &Opcode::Brnz => { let (pattern4_0, pattern4_1) = C::unwrap_head_value_list_1(ctx, pattern2_1); let pattern5_0 = arg1; - // Rule at src/isa/s390x/lower.isle line 1843. + // Rule at src/isa/s390x/lower.isle line 2124. let expr0_0 = constructor_value_nonzero(ctx, pattern4_0)?; let expr1_0: u8 = 0; let expr2_0 = C::vec_element(ctx, pattern5_0, expr1_0); @@ -10771,7 +11800,7 @@ pub fn constructor_lower_branch( if let &Opcode::Jump = &pattern2_0 { let pattern4_0 = C::value_list_slice(ctx, pattern2_1); let pattern5_0 = arg1; - // Rule at src/isa/s390x/lower.isle line 1791. + // Rule at src/isa/s390x/lower.isle line 2072. let expr0_0: u8 = 0; let expr1_0 = C::vec_element(ctx, pattern5_0, expr0_0); let expr2_0 = constructor_jump_impl(ctx, expr1_0)?; @@ -10798,7 +11827,7 @@ pub fn constructor_lower_branch( let (pattern9_0, pattern9_1) = C::unpack_value_array_2(ctx, &pattern7_1); let pattern10_0 = arg1; - // Rule at src/isa/s390x/lower.isle line 1854. + // Rule at src/isa/s390x/lower.isle line 2135. let expr0_0: bool = false; let expr1_0 = constructor_icmp_val( ctx, @@ -11366,13 +12395,744 @@ pub fn constructor_istore64_impl( return None; } +// Generated as internal constructor for term atomic_rmw_body. +pub fn constructor_atomic_rmw_body( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: &AtomicRmwOp, + arg4: WritableReg, + arg5: Reg, + arg6: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if let Some(()) = C::mie2_enabled(ctx, pattern1_0) { + if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern1_0) { + let pattern4_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern4_0) { + let pattern6_0 = arg3; + if let &AtomicRmwOp::Nand = pattern6_0 { + let pattern8_0 = arg4; + let pattern9_0 = arg5; + let pattern10_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1602. + let expr0_0 = constructor_aluop_and_not(ctx, pattern3_0)?; + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, pattern10_0)?; + let expr2_0 = constructor_push_alu_reg( + ctx, pattern0_0, &expr0_0, pattern8_0, pattern9_0, expr1_0, + )?; + return Some(expr2_0); + } + } + if let Some(()) = C::bigendian(ctx, pattern4_0) { + let pattern6_0 = arg3; + if let &AtomicRmwOp::Nand = pattern6_0 { + let pattern8_0 = arg4; + let pattern9_0 = arg5; + let pattern10_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1599. + let expr0_0 = constructor_aluop_and_not(ctx, pattern3_0)?; + let expr1_0 = constructor_push_alu_reg( + ctx, + pattern0_0, + &expr0_0, + pattern8_0, + pattern9_0, + pattern10_0, + )?; + return Some(expr1_0); + } + } + } + } + if let Some(()) = C::mie2_disabled(ctx, pattern1_0) { + if let Some(pattern3_0) = C::ty_32_or_64(ctx, pattern1_0) { + let pattern4_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern4_0) { + let pattern6_0 = arg3; + if let &AtomicRmwOp::Nand = pattern6_0 { + let pattern8_0 = arg4; + let pattern9_0 = arg5; + let pattern10_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1609. + let expr0_0 = constructor_aluop_and(ctx, pattern3_0)?; + let expr1_0 = constructor_bswap_reg(ctx, pattern3_0, pattern10_0)?; + let expr2_0 = constructor_push_alu_reg( + ctx, pattern0_0, &expr0_0, pattern8_0, pattern9_0, expr1_0, + )?; + let expr3_0 = + constructor_push_not_reg(ctx, pattern0_0, pattern3_0, pattern8_0, expr2_0)?; + return Some(expr3_0); + } + } + if let Some(()) = C::bigendian(ctx, pattern4_0) { + let pattern6_0 = arg3; + if let &AtomicRmwOp::Nand = pattern6_0 { + let pattern8_0 = arg4; + let pattern9_0 = arg5; + let pattern10_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1605. + let expr0_0 = constructor_aluop_and(ctx, pattern3_0)?; + let expr1_0 = constructor_push_alu_reg( + ctx, + pattern0_0, + &expr0_0, + pattern8_0, + pattern9_0, + pattern10_0, + )?; + let expr2_0 = + constructor_push_not_reg(ctx, pattern0_0, pattern3_0, pattern8_0, expr1_0)?; + return Some(expr2_0); + } + } + } + } + if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + if let &AtomicRmwOp::Xchg = pattern5_0 { + let pattern7_0 = arg4; + let pattern8_0 = arg5; + let pattern9_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1591. + let expr0_0 = constructor_bswap_reg(ctx, pattern2_0, pattern9_0)?; + return Some(expr0_0); + } + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + if let &AtomicRmwOp::Xchg = pattern5_0 { + let pattern7_0 = arg4; + let pattern8_0 = arg5; + let pattern9_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1588. + return Some(pattern9_0); + } + } + } + if let Some(pattern2_0) = C::ty_8_or_16(ctx, pattern1_0) { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + match pattern4_0 { + &AtomicRmwOp::And => { + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1619. + let expr0_0 = RxSBGOp::And; + let expr1_0 = constructor_atomic_rmw_body_rxsbg( + ctx, pattern0_0, pattern2_0, pattern3_0, &expr0_0, pattern6_0, pattern7_0, + pattern8_0, + )?; + return Some(expr1_0); + } + &AtomicRmwOp::Nand => { + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1625. + let expr0_0 = RxSBGOp::And; + let expr1_0 = constructor_atomic_rmw_body_rxsbg( + ctx, pattern0_0, pattern2_0, pattern3_0, &expr0_0, pattern6_0, pattern7_0, + pattern8_0, + )?; + let expr2_0 = constructor_atomic_rmw_body_invert( + ctx, pattern0_0, pattern2_0, pattern3_0, pattern6_0, expr1_0, + )?; + return Some(expr2_0); + } + &AtomicRmwOp::Or => { + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1621. + let expr0_0 = RxSBGOp::Or; + let expr1_0 = constructor_atomic_rmw_body_rxsbg( + ctx, pattern0_0, pattern2_0, pattern3_0, &expr0_0, pattern6_0, pattern7_0, + pattern8_0, + )?; + return Some(expr1_0); + } + &AtomicRmwOp::Xchg => { + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1617. + let expr0_0 = RxSBGOp::Insert; + let expr1_0 = constructor_atomic_rmw_body_rxsbg( + ctx, pattern0_0, pattern2_0, pattern3_0, &expr0_0, pattern6_0, pattern7_0, + pattern8_0, + )?; + return Some(expr1_0); + } + &AtomicRmwOp::Xor => { + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1623. + let expr0_0 = RxSBGOp::Xor; + let expr1_0 = constructor_atomic_rmw_body_rxsbg( + ctx, pattern0_0, pattern2_0, pattern3_0, &expr0_0, pattern6_0, pattern7_0, + pattern8_0, + )?; + return Some(expr1_0); + } + _ => {} + } + } + let pattern2_0 = arg2; + let pattern3_0 = arg3; + match pattern3_0 { + &AtomicRmwOp::Add => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1657. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_aluop_add(ctx, expr0_0)?; + let expr2_0 = constructor_atomic_rmw_body_addsub( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, pattern5_0, pattern6_0, + pattern7_0, + )?; + return Some(expr2_0); + } + &AtomicRmwOp::Smax => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1698. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_cmpop_cmps(ctx, expr0_0)?; + let expr2_0 = IntCC::SignedGreaterThan; + let expr3_0 = C::intcc_as_cond(ctx, &expr2_0); + let expr4_0 = constructor_atomic_rmw_body_minmax( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, &expr3_0, pattern5_0, + pattern6_0, pattern7_0, + )?; + return Some(expr4_0); + } + &AtomicRmwOp::Smin => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1695. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_cmpop_cmps(ctx, expr0_0)?; + let expr2_0 = IntCC::SignedLessThan; + let expr3_0 = C::intcc_as_cond(ctx, &expr2_0); + let expr4_0 = constructor_atomic_rmw_body_minmax( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, &expr3_0, pattern5_0, + pattern6_0, pattern7_0, + )?; + return Some(expr4_0); + } + &AtomicRmwOp::Sub => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1659. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_aluop_sub(ctx, expr0_0)?; + let expr2_0 = constructor_atomic_rmw_body_addsub( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, pattern5_0, pattern6_0, + pattern7_0, + )?; + return Some(expr2_0); + } + &AtomicRmwOp::Umax => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1704. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_cmpop_cmpu(ctx, expr0_0)?; + let expr2_0 = IntCC::UnsignedGreaterThan; + let expr3_0 = C::intcc_as_cond(ctx, &expr2_0); + let expr4_0 = constructor_atomic_rmw_body_minmax( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, &expr3_0, pattern5_0, + pattern6_0, pattern7_0, + )?; + return Some(expr4_0); + } + &AtomicRmwOp::Umin => { + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1701. + let expr0_0 = constructor_ty_ext32(ctx, pattern1_0)?; + let expr1_0 = constructor_cmpop_cmpu(ctx, expr0_0)?; + let expr2_0 = IntCC::UnsignedLessThan; + let expr3_0 = C::intcc_as_cond(ctx, &expr2_0); + let expr4_0 = constructor_atomic_rmw_body_minmax( + ctx, pattern0_0, pattern1_0, pattern2_0, &expr1_0, &expr3_0, pattern5_0, + pattern6_0, pattern7_0, + )?; + return Some(expr4_0); + } + _ => {} + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_body_rxsbg. +pub fn constructor_atomic_rmw_body_rxsbg( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: &RxSBGOp, + arg4: WritableReg, + arg5: Reg, + arg6: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1633. + let expr0_0: u8 = 32; + let expr1_0: u8 = 40; + let expr2_0: i8 = 24; + let expr3_0 = constructor_push_rxsbg( + ctx, pattern0_0, pattern4_0, pattern5_0, pattern6_0, pattern7_0, expr0_0, expr1_0, + expr2_0, + )?; + return Some(expr3_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1641. + let expr0_0: Type = I32; + let expr1_0 = constructor_bswap_reg(ctx, expr0_0, pattern8_0)?; + let expr2_0: u8 = 48; + let expr3_0: u8 = 64; + let expr4_0: i8 = -16; + let expr5_0 = constructor_push_rxsbg( + ctx, pattern0_0, pattern5_0, pattern6_0, pattern7_0, expr1_0, expr2_0, expr3_0, + expr4_0, + )?; + return Some(expr5_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1637. + let expr0_0: u8 = 32; + let expr1_0: u8 = 48; + let expr2_0: i8 = 16; + let expr3_0 = constructor_push_rxsbg( + ctx, pattern0_0, pattern5_0, pattern6_0, pattern7_0, pattern8_0, expr0_0, expr1_0, + expr2_0, + )?; + return Some(expr3_0); + } + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_body_invert. +pub fn constructor_atomic_rmw_body_invert( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: WritableReg, + arg4: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + // Rule at src/isa/s390x/lower.isle line 1647. + let expr0_0: Type = I32; + let expr1_0: u32 = 4278190080; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm32shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, expr0_0, pattern4_0, pattern5_0, expr3_0, + )?; + return Some(expr4_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/lower.isle line 1653. + let expr0_0: Type = I32; + let expr1_0: u32 = 65535; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm32shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, expr0_0, pattern5_0, pattern6_0, expr3_0, + )?; + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + // Rule at src/isa/s390x/lower.isle line 1650. + let expr0_0: Type = I32; + let expr1_0: u32 = 4294901760; + let expr2_0: u8 = 0; + let expr3_0 = C::uimm32shifted(ctx, expr1_0, expr2_0); + let expr4_0 = constructor_push_xor_uimm32shifted( + ctx, pattern0_0, expr0_0, pattern5_0, pattern6_0, expr3_0, + )?; + return Some(expr4_0); + } + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_body_addsub. +pub fn constructor_atomic_rmw_body_addsub( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: &ALUOp, + arg4: WritableReg, + arg5: Reg, + arg6: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1676. + let expr0_0: Type = I32; + let expr1_0: u8 = 24; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern7_0, expr1_0)?; + let expr3_0 = + constructor_push_alu_reg(ctx, pattern0_0, pattern4_0, pattern5_0, pattern6_0, expr2_0)?; + return Some(expr3_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1688. + let expr0_0: Type = I32; + let expr1_0: u8 = 16; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern8_0, expr1_0)?; + let expr3_0: Type = I32; + let expr4_0 = + constructor_push_bswap_reg(ctx, pattern0_0, expr3_0, pattern6_0, pattern7_0)?; + let expr5_0 = constructor_push_alu_reg( + ctx, pattern0_0, pattern5_0, pattern6_0, expr4_0, expr2_0, + )?; + let expr6_0: Type = I32; + let expr7_0 = + constructor_push_bswap_reg(ctx, pattern0_0, expr6_0, pattern6_0, expr5_0)?; + return Some(expr7_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1680. + let expr0_0: Type = I32; + let expr1_0: u8 = 16; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern8_0, expr1_0)?; + let expr3_0 = constructor_push_alu_reg( + ctx, pattern0_0, pattern5_0, pattern6_0, pattern7_0, expr2_0, + )?; + return Some(expr3_0); + } + } + if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1670. + let expr0_0 = + constructor_push_bswap_reg(ctx, pattern0_0, pattern2_0, pattern6_0, pattern7_0)?; + let expr1_0 = constructor_push_alu_reg( + ctx, pattern0_0, pattern5_0, pattern6_0, expr0_0, pattern8_0, + )?; + let expr2_0 = + constructor_push_bswap_reg(ctx, pattern0_0, pattern2_0, pattern6_0, expr1_0)?; + return Some(expr2_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1666. + let expr0_0 = constructor_push_alu_reg( + ctx, pattern0_0, pattern5_0, pattern6_0, pattern7_0, pattern8_0, + )?; + return Some(expr0_0); + } + } + return None; +} + +// Generated as internal constructor for term atomic_rmw_body_minmax. +pub fn constructor_atomic_rmw_body_minmax( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: &CmpOp, + arg4: &Cond, + arg5: WritableReg, + arg6: Reg, + arg7: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + let pattern8_0 = arg7; + // Rule at src/isa/s390x/lower.isle line 1733. + let expr0_0: Type = I32; + let expr1_0: u8 = 24; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern8_0, expr1_0)?; + let expr3_0 = constructor_cmp_rr(ctx, pattern4_0, expr2_0, pattern7_0)?; + let expr4_0 = C::invert_cond(ctx, pattern5_0); + let expr5_0 = constructor_push_break_if(ctx, pattern0_0, &expr3_0, &expr4_0)?; + let expr6_0 = RxSBGOp::Insert; + let expr7_0: u8 = 32; + let expr8_0: u8 = 40; + let expr9_0: i8 = 0; + let expr10_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr6_0, pattern6_0, pattern7_0, expr2_0, expr7_0, expr8_0, expr9_0, + )?; + return Some(expr10_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + let pattern9_0 = arg7; + // Rule at src/isa/s390x/lower.isle line 1746. + let expr0_0: Type = I32; + let expr1_0: u8 = 16; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern9_0, expr1_0)?; + let expr3_0: Type = I32; + let expr4_0 = + constructor_push_bswap_reg(ctx, pattern0_0, expr3_0, pattern7_0, pattern8_0)?; + let expr5_0 = constructor_cmp_rr(ctx, pattern5_0, expr2_0, expr4_0)?; + let expr6_0 = C::invert_cond(ctx, pattern6_0); + let expr7_0 = constructor_push_break_if(ctx, pattern0_0, &expr5_0, &expr6_0)?; + let expr8_0 = RxSBGOp::Insert; + let expr9_0: u8 = 32; + let expr10_0: u8 = 48; + let expr11_0: i8 = 0; + let expr12_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr8_0, pattern7_0, expr4_0, expr2_0, expr9_0, expr10_0, + expr11_0, + )?; + let expr13_0: Type = I32; + let expr14_0 = + constructor_push_bswap_reg(ctx, pattern0_0, expr13_0, pattern7_0, expr12_0)?; + return Some(expr14_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + let pattern9_0 = arg7; + // Rule at src/isa/s390x/lower.isle line 1739. + let expr0_0: Type = I32; + let expr1_0: u8 = 16; + let expr2_0 = constructor_lshl_imm(ctx, expr0_0, pattern9_0, expr1_0)?; + let expr3_0 = constructor_cmp_rr(ctx, pattern5_0, expr2_0, pattern8_0)?; + let expr4_0 = C::invert_cond(ctx, pattern6_0); + let expr5_0 = constructor_push_break_if(ctx, pattern0_0, &expr3_0, &expr4_0)?; + let expr6_0 = RxSBGOp::Insert; + let expr7_0: u8 = 32; + let expr8_0: u8 = 48; + let expr9_0: i8 = 0; + let expr10_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr6_0, pattern7_0, pattern8_0, expr2_0, expr7_0, expr8_0, + expr9_0, + )?; + return Some(expr10_0); + } + } + if let Some(pattern2_0) = C::ty_32_or_64(ctx, pattern1_0) { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + let pattern9_0 = arg7; + // Rule at src/isa/s390x/lower.isle line 1721. + let expr0_0 = + constructor_push_bswap_reg(ctx, pattern0_0, pattern2_0, pattern7_0, pattern8_0)?; + let expr1_0 = constructor_cmp_rr(ctx, pattern5_0, pattern9_0, expr0_0)?; + let expr2_0 = C::invert_cond(ctx, pattern6_0); + let expr3_0 = constructor_push_break_if(ctx, pattern0_0, &expr1_0, &expr2_0)?; + let expr4_0 = + constructor_push_bswap_reg(ctx, pattern0_0, pattern2_0, pattern7_0, pattern9_0)?; + return Some(expr4_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + let pattern9_0 = arg7; + // Rule at src/isa/s390x/lower.isle line 1714. + let expr0_0 = constructor_cmp_rr(ctx, pattern5_0, pattern9_0, pattern8_0)?; + let expr1_0 = C::invert_cond(ctx, pattern6_0); + let expr2_0 = constructor_push_break_if(ctx, pattern0_0, &expr0_0, &expr1_0)?; + return Some(pattern9_0); + } + } + return None; +} + +// Generated as internal constructor for term atomic_cas_body. +pub fn constructor_atomic_cas_body( + ctx: &mut C, + arg0: &VecMInstBuilder, + arg1: Type, + arg2: MemFlags, + arg3: WritableReg, + arg4: Reg, + arg5: Reg, + arg6: Reg, +) -> Option { + let pattern0_0 = arg0; + let pattern1_0 = arg1; + if pattern1_0 == I8 { + let pattern3_0 = arg2; + let pattern4_0 = arg3; + let pattern5_0 = arg4; + let pattern6_0 = arg5; + let pattern7_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1798. + let expr0_0 = RxSBGOp::Xor; + let expr1_0: u8 = 32; + let expr2_0: u8 = 40; + let expr3_0: i8 = 24; + let expr4_0 = constructor_rxsbg_test( + ctx, &expr0_0, pattern5_0, pattern6_0, expr1_0, expr2_0, expr3_0, + )?; + let expr5_0 = IntCC::NotEqual; + let expr6_0 = C::intcc_as_cond(ctx, &expr5_0); + let expr7_0 = constructor_push_break_if(ctx, pattern0_0, &expr4_0, &expr6_0)?; + let expr8_0 = RxSBGOp::Insert; + let expr9_0: u8 = 32; + let expr10_0: u8 = 40; + let expr11_0: i8 = 24; + let expr12_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr8_0, pattern4_0, pattern5_0, pattern7_0, expr9_0, expr10_0, + expr11_0, + )?; + return Some(expr12_0); + } + if pattern1_0 == I16 { + let pattern3_0 = arg2; + if let Some(()) = C::littleendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1816. + let expr0_0: Type = I32; + let expr1_0 = constructor_bswap_reg(ctx, expr0_0, pattern7_0)?; + let expr2_0: Type = I32; + let expr3_0 = constructor_bswap_reg(ctx, expr2_0, pattern8_0)?; + let expr4_0 = RxSBGOp::Xor; + let expr5_0: u8 = 48; + let expr6_0: u8 = 64; + let expr7_0: i8 = -16; + let expr8_0 = constructor_rxsbg_test( + ctx, &expr4_0, pattern6_0, expr1_0, expr5_0, expr6_0, expr7_0, + )?; + let expr9_0 = IntCC::NotEqual; + let expr10_0 = C::intcc_as_cond(ctx, &expr9_0); + let expr11_0 = constructor_push_break_if(ctx, pattern0_0, &expr8_0, &expr10_0)?; + let expr12_0 = RxSBGOp::Insert; + let expr13_0: u8 = 48; + let expr14_0: u8 = 64; + let expr15_0: i8 = -16; + let expr16_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr12_0, pattern5_0, pattern6_0, expr3_0, expr13_0, expr14_0, + expr15_0, + )?; + return Some(expr16_0); + } + if let Some(()) = C::bigendian(ctx, pattern3_0) { + let pattern5_0 = arg3; + let pattern6_0 = arg4; + let pattern7_0 = arg5; + let pattern8_0 = arg6; + // Rule at src/isa/s390x/lower.isle line 1805. + let expr0_0 = RxSBGOp::Xor; + let expr1_0: u8 = 32; + let expr2_0: u8 = 48; + let expr3_0: i8 = 16; + let expr4_0 = constructor_rxsbg_test( + ctx, &expr0_0, pattern6_0, pattern7_0, expr1_0, expr2_0, expr3_0, + )?; + let expr5_0 = IntCC::NotEqual; + let expr6_0 = C::intcc_as_cond(ctx, &expr5_0); + let expr7_0 = constructor_push_break_if(ctx, pattern0_0, &expr4_0, &expr6_0)?; + let expr8_0 = RxSBGOp::Insert; + let expr9_0: u8 = 32; + let expr10_0: u8 = 48; + let expr11_0: i8 = 16; + let expr12_0 = constructor_push_rxsbg( + ctx, pattern0_0, &expr8_0, pattern5_0, pattern6_0, pattern8_0, expr9_0, expr10_0, + expr11_0, + )?; + return Some(expr12_0); + } + } + return None; +} + // Generated as internal constructor for term atomic_store_impl. pub fn constructor_atomic_store_impl( ctx: &mut C, arg0: &SideEffectNoResult, ) -> Option { let pattern0_0 = arg0; - // Rule at src/isa/s390x/lower.isle line 1581. + // Rule at src/isa/s390x/lower.isle line 1862. let expr0_0 = constructor_value_regs_none(ctx, pattern0_0)?; let expr1_0 = constructor_fence_impl(ctx)?; let expr2_0 = constructor_value_regs_none(ctx, &expr1_0)?; @@ -11392,7 +13152,7 @@ pub fn constructor_icmp_val( if let Some(()) = C::signed(ctx, pattern1_0) { let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/lower.isle line 1648. + // Rule at src/isa/s390x/lower.isle line 1929. let expr0_0 = constructor_icmps_val(ctx, pattern0_0, pattern3_0, pattern4_0)?; let expr1_0 = C::intcc_as_cond(ctx, pattern1_0); let expr2_0 = constructor_bool(ctx, &expr0_0, &expr1_0)?; @@ -11401,7 +13161,7 @@ pub fn constructor_icmp_val( if let Some(()) = C::unsigned(ctx, pattern1_0) { let pattern3_0 = arg2; let pattern4_0 = arg3; - // Rule at src/isa/s390x/lower.isle line 1651. + // Rule at src/isa/s390x/lower.isle line 1932. let expr0_0 = constructor_icmpu_val(ctx, pattern0_0, pattern3_0, pattern4_0)?; let expr1_0 = C::intcc_as_cond(ctx, pattern1_0); let expr2_0 = constructor_bool(ctx, &expr0_0, &expr1_0)?; @@ -11435,7 +13195,7 @@ pub fn constructor_icmps_val( match &pattern8_0 { &Opcode::Sload16 => { if let Some(()) = C::bigendian(ctx, pattern8_2) { - // Rule at src/isa/s390x/lower.isle line 1681. + // Rule at src/isa/s390x/lower.isle line 1962. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_sload16(ctx, pattern6_0)?; let expr2_0 = constructor_icmps_mem_sext16( @@ -11446,7 +13206,7 @@ pub fn constructor_icmps_val( } &Opcode::Sload32 => { if let Some(()) = C::bigendian(ctx, pattern8_2) { - // Rule at src/isa/s390x/lower.isle line 1683. + // Rule at src/isa/s390x/lower.isle line 1964. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_sload32(ctx, pattern6_0)?; let expr2_0 = constructor_icmps_mem_sext32( @@ -11472,7 +13232,7 @@ pub fn constructor_icmps_val( { if let &Opcode::Load = &pattern10_0 { if let Some(()) = C::bigendian(ctx, pattern10_2) { - // Rule at src/isa/s390x/lower.isle line 1677. + // Rule at src/isa/s390x/lower.isle line 1958. let expr0_0 = constructor_ty_ext32(ctx, pattern4_0)?; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern2_0)?; let expr2_0 = constructor_sink_load(ctx, pattern8_0)?; @@ -11496,7 +13256,7 @@ pub fn constructor_icmps_val( { if let &Opcode::Load = &pattern10_0 { if let Some(()) = C::bigendian(ctx, pattern10_2) { - // Rule at src/isa/s390x/lower.isle line 1673. + // Rule at src/isa/s390x/lower.isle line 1954. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_load(ctx, pattern8_0)?; let expr2_0 = @@ -11514,14 +13274,14 @@ pub fn constructor_icmps_val( if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) { let pattern4_0 = arg2; if let Some(pattern5_0) = C::i16_from_value(ctx, pattern4_0) { - // Rule at src/isa/s390x/lower.isle line 1667. + // Rule at src/isa/s390x/lower.isle line 1948. let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; let expr2_0 = constructor_icmps_simm16(ctx, expr0_0, expr1_0, pattern5_0)?; return Some(expr2_0); } if let Some(pattern5_0) = C::i32_from_value(ctx, pattern4_0) { - // Rule at src/isa/s390x/lower.isle line 1669. + // Rule at src/isa/s390x/lower.isle line 1950. let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; let expr2_0 = constructor_icmps_simm32(ctx, expr0_0, expr1_0, pattern5_0)?; @@ -11537,7 +13297,7 @@ pub fn constructor_icmps_val( if let &Opcode::Sextend = &pattern7_0 { let pattern9_0 = C::value_type(ctx, pattern7_1); if pattern9_0 == I32 { - // Rule at src/isa/s390x/lower.isle line 1663. + // Rule at src/isa/s390x/lower.isle line 1944. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = C::put_in_reg(ctx, pattern7_1); let expr2_0 = @@ -11547,7 +13307,7 @@ pub fn constructor_icmps_val( } } } - // Rule at src/isa/s390x/lower.isle line 1659. + // Rule at src/isa/s390x/lower.isle line 1940. let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern1_0)?; let expr2_0 = constructor_put_in_reg_sext32(ctx, pattern4_0)?; @@ -11608,7 +13368,7 @@ pub fn constructor_icmpu_val( if let Some(()) = C::bigendian(ctx, pattern17_2) { - // Rule at src/isa/s390x/lower.isle line 1716. + // Rule at src/isa/s390x/lower.isle line 1997. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_uload16( @@ -11631,7 +13391,7 @@ pub fn constructor_icmpu_val( } &Opcode::Uload32 => { if let Some(()) = C::bigendian(ctx, pattern8_2) { - // Rule at src/isa/s390x/lower.isle line 1719. + // Rule at src/isa/s390x/lower.isle line 2000. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_uload32(ctx, pattern6_0)?; let expr2_0 = constructor_icmpu_mem_zext32( @@ -11683,7 +13443,7 @@ pub fn constructor_icmpu_val( if let Some(()) = C::bigendian(ctx, pattern19_2) { - // Rule at src/isa/s390x/lower.isle line 1709. + // Rule at src/isa/s390x/lower.isle line 1990. let expr0_0 = constructor_ty_ext32( ctx, pattern4_0, )?; @@ -11723,7 +13483,7 @@ pub fn constructor_icmpu_val( { if let &Opcode::Load = &pattern10_0 { if let Some(()) = C::bigendian(ctx, pattern10_2) { - // Rule at src/isa/s390x/lower.isle line 1703. + // Rule at src/isa/s390x/lower.isle line 1984. let expr0_0 = C::put_in_reg(ctx, pattern2_0); let expr1_0 = constructor_sink_load(ctx, pattern8_0)?; let expr2_0 = @@ -11741,7 +13501,7 @@ pub fn constructor_icmpu_val( if let Some(pattern3_0) = C::fits_in_64(ctx, pattern2_0) { let pattern4_0 = arg2; if let Some(pattern5_0) = C::u32_from_value(ctx, pattern4_0) { - // Rule at src/isa/s390x/lower.isle line 1699. + // Rule at src/isa/s390x/lower.isle line 1980. let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern1_0)?; let expr2_0 = constructor_icmpu_uimm32(ctx, expr0_0, expr1_0, pattern5_0)?; @@ -11757,7 +13517,7 @@ pub fn constructor_icmpu_val( if let &Opcode::Uextend = &pattern7_0 { let pattern9_0 = C::value_type(ctx, pattern7_1); if pattern9_0 == I32 { - // Rule at src/isa/s390x/lower.isle line 1695. + // Rule at src/isa/s390x/lower.isle line 1976. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = C::put_in_reg(ctx, pattern7_1); let expr2_0 = @@ -11767,7 +13527,7 @@ pub fn constructor_icmpu_val( } } } - // Rule at src/isa/s390x/lower.isle line 1691. + // Rule at src/isa/s390x/lower.isle line 1972. let expr0_0 = constructor_ty_ext32(ctx, pattern3_0)?; let expr1_0 = constructor_put_in_reg_zext32(ctx, pattern1_0)?; let expr2_0 = constructor_put_in_reg_zext32(ctx, pattern4_0)?; @@ -11788,7 +13548,7 @@ pub fn constructor_fcmp_val( let pattern1_0 = arg1; let pattern2_0 = C::value_type(ctx, pattern1_0); let pattern3_0 = arg2; - // Rule at src/isa/s390x/lower.isle line 1732. + // Rule at src/isa/s390x/lower.isle line 2013. let expr0_0 = C::put_in_reg(ctx, pattern1_0); let expr1_0 = C::put_in_reg(ctx, pattern3_0); let expr2_0 = constructor_fcmp_reg(ctx, pattern2_0, expr0_0, expr1_0)?; @@ -11810,7 +13570,7 @@ pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option } => { if let &Opcode::Fcmp = &pattern3_0 { let (pattern5_0, pattern5_1) = C::unpack_value_array_2(ctx, &pattern3_1); - // Rule at src/isa/s390x/lower.isle line 1760. + // Rule at src/isa/s390x/lower.isle line 2041. let expr0_0 = constructor_fcmp_val(ctx, &pattern3_2, pattern5_0, pattern5_1)?; return Some(expr0_0); } @@ -11822,7 +13582,7 @@ pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option } => { if let &Opcode::Icmp = &pattern3_0 { let (pattern5_0, pattern5_1) = C::unpack_value_array_2(ctx, &pattern3_1); - // Rule at src/isa/s390x/lower.isle line 1759. + // Rule at src/isa/s390x/lower.isle line 2040. let expr0_0: bool = false; let expr1_0 = constructor_icmp_val(ctx, expr0_0, &pattern3_2, pattern5_0, pattern5_1)?; @@ -11834,7 +13594,7 @@ pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option arg: pattern3_1, } => { if let &Opcode::Bint = &pattern3_0 { - // Rule at src/isa/s390x/lower.isle line 1758. + // Rule at src/isa/s390x/lower.isle line 2039. let expr0_0 = constructor_value_nonzero(ctx, pattern3_1)?; return Some(expr0_0); } @@ -11844,7 +13604,7 @@ pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option } let pattern1_0 = C::value_type(ctx, pattern0_0); if let Some(pattern2_0) = C::gpr32_ty(ctx, pattern1_0) { - // Rule at src/isa/s390x/lower.isle line 1761. + // Rule at src/isa/s390x/lower.isle line 2042. let expr0_0: Type = I32; let expr1_0 = constructor_put_in_reg_sext32(ctx, pattern0_0)?; let expr2_0: i16 = 0; @@ -11855,7 +13615,7 @@ pub fn constructor_value_nonzero(ctx: &mut C, arg0: Value) -> Option return Some(expr6_0); } if let Some(pattern2_0) = C::gpr64_ty(ctx, pattern1_0) { - // Rule at src/isa/s390x/lower.isle line 1764. + // Rule at src/isa/s390x/lower.isle line 2045. let expr0_0: Type = I64; let expr1_0 = C::put_in_reg(ctx, pattern0_0); let expr2_0: i16 = 0; diff --git a/cranelift/filetests/filetests/isa/s390x/atomic_cas-little.clif b/cranelift/filetests/filetests/isa/s390x/atomic_cas-little.clif new file mode 100644 index 0000000000..53eed240c6 --- /dev/null +++ b/cranelift/filetests/filetests/isa/s390x/atomic_cas-little.clif @@ -0,0 +1,63 @@ +test compile +target s390x + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_CAS +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_cas_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_cas.i64 little v2, v0, v1 + return v3 +} + +; check: lrvgr %r2, %r2 +; nextln: lrvgr %r3, %r3 +; nextln: csg %r2, %r3, 0(%r4) +; nextln: lrvgr %r2, %r2 +; nextln: br %r14 + +function %atomic_cas_i32(i32, i32, i64) -> i32 { +block0(v0: i32, v1: i32, v2: i64): + v3 = atomic_cas.i32 little v2, v0, v1 + return v3 +} + +; check: lrvr %r2, %r2 +; nextln: lrvr %r3, %r3 +; nextln: cs %r2, %r3, 0(%r4) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_cas_i16(i64, i16, i16, i64) -> i16 { +block0(v0: i64, v1: i16, v2: i16, v3: i64): + v4 = atomic_cas.i16 little v3, v1, v2 + return v4 +} + +; check: sllk %r2, %r5, 3 +; nextln: nill %r5, 65532 +; nextln: lrvr %r3, %r3 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r5) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r3, 176, 64, 48 ; jglh 1f ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_cas_i8(i64, i8, i8, i64) -> i8 { +block0(v0: i64, v1: i8, v2: i8, v3: i64): + v4 = atomic_cas.i8 little v3, v1, v2 + return v4 +} + +; check: stmg %r14, %r15, 112(%r15) +; nextln: sllk %r2, %r5, 3 +; nextln: nill %r5, 65532 +; nextln: lcr %r14, %r2 +; nextln: l %r0, 0(%r5) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b +; nextln: rll %r2, %r0, 8(%r2) +; nextln: lmg %r14, %r15, 112(%r15) +; nextln: br %r14 + diff --git a/cranelift/filetests/filetests/isa/s390x/atomic_cas.clif b/cranelift/filetests/filetests/isa/s390x/atomic_cas.clif index 5d3929f79a..5d9e79786f 100644 --- a/cranelift/filetests/filetests/isa/s390x/atomic_cas.clif +++ b/cranelift/filetests/filetests/isa/s390x/atomic_cas.clif @@ -23,3 +23,32 @@ block0(v0: i32, v1: i32, v2: i64): ; check: cs %r2, %r3, 0(%r4) ; nextln: br %r14 +function %atomic_cas_i16(i64, i16, i16, i64) -> i16 { +block0(v0: i64, v1: i16, v2: i16, v3: i64): + v4 = atomic_cas.i16 v3, v1, v2 + return v4 +} + +; check: sllk %r2, %r5, 3 +; nextln: nill %r5, 65532 +; nextln: l %r0, 0(%r5) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 48, 16 ; jglh 1f ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r5) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_cas_i8(i64, i8, i8, i64) -> i8 { +block0(v0: i64, v1: i8, v2: i8, v3: i64): + v4 = atomic_cas.i8 v3, v1, v2 + return v4 +} + +; check: stmg %r14, %r15, 112(%r15) +; nextln: sllk %r2, %r5, 3 +; nextln: nill %r5, 65532 +; nextln: lcr %r14, %r2 +; nextln: l %r0, 0(%r5) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r3, 160, 40, 24 ; jglh 1f ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r14) ; cs %r0, %r1, 0(%r5) ; jglh 0b +; nextln: rll %r2, %r0, 8(%r2) +; nextln: lmg %r14, %r15, 112(%r15) +; nextln: br %r14 + diff --git a/cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif b/cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif new file mode 100644 index 0000000000..28cf8be01e --- /dev/null +++ b/cranelift/filetests/filetests/isa/s390x/atomic_rmw-arch13.clif @@ -0,0 +1,105 @@ +test compile +target s390x arch13 + +function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 nand v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: nngrk %r1, %r0, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 nand v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: nnrk %r1, %r0, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little nand v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: lg %r0, 0(%r3) +; nextln: 0: nngrk %r1, %r0, %r2 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little nand v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: nnrk %r1, %r0, %r2 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + diff --git a/cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif b/cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif new file mode 100644 index 0000000000..783c70a9cd --- /dev/null +++ b/cranelift/filetests/filetests/isa/s390x/atomic_rmw-little.clif @@ -0,0 +1,620 @@ +test compile +target s390x + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (XCHG) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little xchg v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: lg %r0, 0(%r3) +; nextln: 0: csg %r0, %r2, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little xchg v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: cs %r0, %r2, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little xchg v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; risbgn %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little xchg v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (ADD) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_add_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little add v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; agr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_add_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little add v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; ar %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_add_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little add v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; ar %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_add_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little add v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (SUB) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_sub_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little sub v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; sgr %r1, %r4 ; lrvgr %r1, %r1 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_sub_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little sub v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; sr %r1, %r4 ; lrvr %r1, %r1 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little sub v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; sr %r1, %r4 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little sub v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (AND) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_and_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little and v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: lang %r2, %r2, 0(%r3) +; nextln: lrvgr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_and_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little and v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: lan %r2, %r2, 0(%r3) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_and_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little and v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_and_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little and v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (OR) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_or_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little or v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: laog %r2, %r2, 0(%r3) +; nextln: lrvgr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_or_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little or v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: lao %r2, %r2, 0(%r3) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_or_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little or v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rosbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_or_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little or v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (XOR) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_xor_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little xor v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: laxg %r2, %r2, 0(%r3) +; nextln: lrvgr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_xor_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little xor v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: lax %r2, %r2, 0(%r3) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little xor v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rxsbg %r1, %r4, 48, 64, 48 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little xor v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (NAND) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little nand v1, v2 + return v3 +} + +; check: lrvgr %r2, %r4 +; nextln: lg %r0, 0(%r3) +; nextln: 0: ngrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little nand v1, v2 + return v3 +} + +; check: lrvr %r2, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: nrk %r1, %r0, %r2 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lrvr %r4, %r4 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; rnsbg %r1, %r4, 48, 64, 48 ; xilf %r1, 65535 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (SMIN) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little smin v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little smin v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little smin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little smin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (SMAX) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little smax v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; cgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little smax v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; cr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little smax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little smax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (UMIN) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little umin v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnl 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little umin v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnl 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little umin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little umin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (UMAX) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 little umax v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: lrvgr %r1, %r0 ; clgr %r4, %r1 ; jgnh 1f ; lrvgr %r1, %r4 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 little umax v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: lrvr %r1, %r0 ; clr %r4, %r1 ; jgnh 1f ; lrvr %r1, %r4 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lrvr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 little umax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 16(%r2) ; lrvr %r1, %r1 ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; lrvr %r1, %r1 ; rll %r1, %r1, 16(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 0(%r2) +; nextln: lrvr %r2, %r2 +; nextln: br %r14 + +function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 little umax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + diff --git a/cranelift/filetests/filetests/isa/s390x/atomic_rmw.clif b/cranelift/filetests/filetests/isa/s390x/atomic_rmw.clif index 2c1e02d72b..13b3adb1cd 100644 --- a/cranelift/filetests/filetests/isa/s390x/atomic_rmw.clif +++ b/cranelift/filetests/filetests/isa/s390x/atomic_rmw.clif @@ -1,6 +1,59 @@ test compile target s390x +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (XCHG) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_xchg_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 xchg v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: csg %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_xchg_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 xchg v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: cs %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_xchg_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 xchg v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_xchg_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 xchg v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; risbgn %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ATOMIC_RMW (ADD) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -23,6 +76,35 @@ block0(v0: i64, v1: i32): ; check: laa %r2, %r3, 0(%r2) ; nextln: br %r14 +function %atomic_rmw_add_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 add v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_add_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 add v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; ar %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ATOMIC_RMW (SUB) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -47,6 +129,35 @@ block0(v0: i64, v1: i32): ; nextln: laa %r2, %r3, 0(%r2) ; nextln: br %r14 +function %atomic_rmw_sub_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 sub v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_sub_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 sub v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; sr %r1, %r4 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ATOMIC_RMW (AND) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -69,6 +180,33 @@ block0(v0: i64, v1: i32): ; check: lan %r2, %r3, 0(%r2) ; nextln: br %r14 +function %atomic_rmw_and_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 and v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_and_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 and v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ATOMIC_RMW (OR) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -91,6 +229,33 @@ block0(v0: i64, v1: i32): ; check: lao %r2, %r3, 0(%r2) ; nextln: br %r14 +function %atomic_rmw_or_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 or v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_or_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 or v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rosbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ATOMIC_RMW (XOR) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -112,3 +277,304 @@ block0(v0: i64, v1: i32): ; check: lax %r2, %r3, 0(%r2) ; nextln: br %r14 + +function %atomic_rmw_xor_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 xor v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 48, 16 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_xor_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 xor v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rxsbg %r1, %r4, 32, 40, 24 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (NAND) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_nand_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 nand v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: ngrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; xihf %r1, 4294967295 ; csg %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 nand v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: nrk %r1, %r0, %r4 ; xilf %r1, 4294967295 ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_nand_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 48, 16 ; xilf %r1, 4294901760 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_nand_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 nand v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; rnsbg %r1, %r4, 32, 40, 24 ; xilf %r1, 4278190080 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (SMIN) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_smin_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 smin v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: cgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smin_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 smin v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: cr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smin_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 smin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_smin_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 smin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (SMAX) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_smax_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 smax v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: cgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smax_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 smax v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: cr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_smax_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 smax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_smax_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 smax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; cr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (UMIN) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_umin_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 umin v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: clgr %r4, %r0 ; jgnl 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umin_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 umin v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: clr %r4, %r0 ; jgnl 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umin_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 umin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_umin_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 umin v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnl 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; ATOMIC_RMW (UMAX) +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +function %atomic_rmw_umax_i64(i64, i64, i64) -> i64 { +block0(v0: i64, v1: i64, v2: i64): + v3 = atomic_rmw.i64 umax v1, v2 + return v3 +} + +; check: lg %r0, 0(%r3) +; nextln: 0: clgr %r4, %r0 ; jgnh 1f ; csg %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lgr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umax_i32(i64, i64, i32) -> i32 { +block0(v0: i64, v1: i64, v2: i32): + v3 = atomic_rmw.i32 umax v1, v2 + return v3 +} + +; check: l %r0, 0(%r3) +; nextln: 0: clr %r4, %r0 ; jgnh 1f ; cs %r0, %r4, 0(%r3) ; jglh 0b ; 1: +; nextln: lr %r2, %r0 +; nextln: br %r14 + +function %atomic_rmw_umax_i16(i64, i64, i16) -> i16 { +block0(v0: i64, v1: i64, v2: i16): + v3 = atomic_rmw.i16 umax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 16 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 48, 0 ; rll %r1, %r1, 0(%r2) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 16(%r2) +; nextln: br %r14 + +function %atomic_rmw_umax_i8(i64, i64, i8) -> i8 { +block0(v0: i64, v1: i64, v2: i8): + v3 = atomic_rmw.i8 umax v1, v2 + return v3 +} + +; check: sllk %r2, %r3, 3 +; nextln: nill %r3, 65532 +; nextln: sllk %r4, %r4, 24 +; nextln: lcr %r5, %r2 +; nextln: l %r0, 0(%r3) +; nextln: 0: rll %r1, %r0, 0(%r2) ; clr %r4, %r1 ; jgnh 1f ; risbgn %r1, %r4, 32, 40, 0 ; rll %r1, %r1, 0(%r5) ; cs %r0, %r1, 0(%r3) ; jglh 0b ; 1: +; nextln: rll %r2, %r0, 8(%r2) +; nextln: br %r14 + diff --git a/cranelift/filetests/filetests/runtests/atomic-cas-little.clif b/cranelift/filetests/filetests/runtests/atomic-cas-little.clif new file mode 100644 index 0000000000..d80c6dac57 --- /dev/null +++ b/cranelift/filetests/filetests/runtests/atomic-cas-little.clif @@ -0,0 +1,40 @@ +test run +target s390x + +; We can't test that these instructions are right regarding atomicity, but we can +; test if they perform their operation correctly + +function %atomic_cas_i64(i64, i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64, v2: i64): + v3 = stack_addr.i64 ss0 + store.i64 little v0, v3 + + v4 = atomic_cas.i64 little v3, v1, v2 + + v5 = load.i64 little v3 + return v5 +} +; run: %atomic_cas_i64(0, 0, 2) == 2 +; run: %atomic_cas_i64(1, 0, 2) == 1 +; run: %atomic_cas_i64(0, 1, 2) == 0 +; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF + +function %atomic_cas_i32(i32, i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32, v2: i32): + v3 = stack_addr.i32 ss0 + store.i32 little v0, v3 + + v4 = atomic_cas.i32 little v3, v1, v2 + + v5 = load.i32 little v3 + return v5 +} +; run: %atomic_cas_i32(0, 0, 2) == 2 +; run: %atomic_cas_i32(1, 0, 2) == 1 +; run: %atomic_cas_i32(0, 1, 2) == 0 +; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE + diff --git a/cranelift/filetests/filetests/runtests/atomic-cas-subword.clif b/cranelift/filetests/filetests/runtests/atomic-cas-subword.clif new file mode 100644 index 0000000000..02aeaafecd --- /dev/null +++ b/cranelift/filetests/filetests/runtests/atomic-cas-subword.clif @@ -0,0 +1,86 @@ +test run +target s390x + +; We can't test that these instructions are right regarding atomicity, but we can +; test if they perform their operation correctly + +function %atomic_cas_big_i16(i32, i64, i16, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16, v3: i16): + v4 = stack_addr.i64 ss0 + store.i32 big v0, v4 + + v5 = iadd.i64 v4, v1 + v6 = atomic_cas.i16 big v5, v2, v3 + + v7 = load.i32 big v4 + return v7 +} +; run: %atomic_cas_big_i16(0x12345678, 0, 0x1234, 0xabcd) == 0xabcd5678 +; run: %atomic_cas_big_i16(0x12345678, 0, 0x4321, 0xabcd) == 0x12345678 +; run: %atomic_cas_big_i16(0x12345678, 2, 0x5678, 0xabcd) == 0x1234abcd +; run: %atomic_cas_big_i16(0x12345678, 2, 0x8765, 0xabcd) == 0x12345678 + +function %atomic_cas_little_i16(i32, i64, i16, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16, v3: i16): + v4 = stack_addr.i64 ss0 + store.i32 little v0, v4 + + v5 = iadd.i64 v4, v1 + v6 = atomic_cas.i16 little v5, v2, v3 + + v7 = load.i32 little v4 + return v7 +} +; run: %atomic_cas_little_i16(0x12345678, 2, 0x1234, 0xabcd) == 0xabcd5678 +; run: %atomic_cas_little_i16(0x12345678, 2, 0x4321, 0xabcd) == 0x12345678 +; run: %atomic_cas_little_i16(0x12345678, 0, 0x5678, 0xabcd) == 0x1234abcd +; run: %atomic_cas_little_i16(0x12345678, 0, 0x8765, 0xabcd) == 0x12345678 + +function %atomic_cas_big_i8(i32, i64, i8, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8, v3: i8): + v4 = stack_addr.i64 ss0 + store.i32 big v0, v4 + + v5 = iadd.i64 v4, v1 + v6 = atomic_cas.i8 big v5, v2, v3 + + v7 = load.i32 big v4 + return v7 +} +; run: %atomic_cas_big_i8(0x12345678, 0, 0x12, 0xab) == 0xab345678 +; run: %atomic_cas_big_i8(0x12345678, 0, 0x21, 0xab) == 0x12345678 +; run: %atomic_cas_big_i8(0x12345678, 1, 0x34, 0xab) == 0x12ab5678 +; run: %atomic_cas_big_i8(0x12345678, 1, 0x43, 0xab) == 0x12345678 +; run: %atomic_cas_big_i8(0x12345678, 2, 0x56, 0xab) == 0x1234ab78 +; run: %atomic_cas_big_i8(0x12345678, 2, 0x65, 0xab) == 0x12345678 +; run: %atomic_cas_big_i8(0x12345678, 3, 0x78, 0xab) == 0x123456ab +; run: %atomic_cas_big_i8(0x12345678, 3, 0x87, 0xab) == 0x12345678 + +function %atomic_cas_little_i8(i32, i64, i8, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8, v3: i8): + v4 = stack_addr.i64 ss0 + store.i32 little v0, v4 + + v5 = iadd.i64 v4, v1 + v6 = atomic_cas.i8 little v5, v2, v3 + + v7 = load.i32 little v4 + return v7 +} +; run: %atomic_cas_little_i8(0x12345678, 3, 0x12, 0xab) == 0xab345678 +; run: %atomic_cas_little_i8(0x12345678, 3, 0x21, 0xab) == 0x12345678 +; run: %atomic_cas_little_i8(0x12345678, 2, 0x34, 0xab) == 0x12ab5678 +; run: %atomic_cas_little_i8(0x12345678, 2, 0x43, 0xab) == 0x12345678 +; run: %atomic_cas_little_i8(0x12345678, 1, 0x56, 0xab) == 0x1234ab78 +; run: %atomic_cas_little_i8(0x12345678, 1, 0x65, 0xab) == 0x12345678 +; run: %atomic_cas_little_i8(0x12345678, 0, 0x78, 0xab) == 0x123456ab +; run: %atomic_cas_little_i8(0x12345678, 0, 0x87, 0xab) == 0x12345678 + diff --git a/cranelift/filetests/filetests/runtests/atomic-cas.clif b/cranelift/filetests/filetests/runtests/atomic-cas.clif new file mode 100644 index 0000000000..7e95568d5f --- /dev/null +++ b/cranelift/filetests/filetests/runtests/atomic-cas.clif @@ -0,0 +1,43 @@ +test run +target aarch64 +target aarch64 has_lse +target x86_64 +target s390x + +; We can't test that these instructions are right regarding atomicity, but we can +; test if they perform their operation correctly + +function %atomic_cas_i64(i64, i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64, v2: i64): + stack_store.i64 v0, ss0 + + v3 = stack_addr.i64 ss0 + v4 = atomic_cas.i64 v3, v1, v2 + + v5 = stack_load.i64 ss0 + return v5 +} +; run: %atomic_cas_i64(0, 0, 2) == 2 +; run: %atomic_cas_i64(1, 0, 2) == 1 +; run: %atomic_cas_i64(0, 1, 2) == 0 +; run: %atomic_cas_i64(0, 0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF + +function %atomic_cas_i32(i32, i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32, v2: i32): + stack_store.i32 v0, ss0 + + v3 = stack_addr.i32 ss0 + v4 = atomic_cas.i32 v3, v1, v2 + + v5 = stack_load.i32 ss0 + return v5 +} +; run: %atomic_cas_i32(0, 0, 2) == 2 +; run: %atomic_cas_i32(1, 0, 2) == 1 +; run: %atomic_cas_i32(0, 1, 2) == 0 +; run: %atomic_cas_i32(0, 0, 0xC0FFEEEE) == 0xC0FFEEEE + diff --git a/cranelift/filetests/filetests/runtests/atomic-rmw-2.clif b/cranelift/filetests/filetests/runtests/atomic-rmw-2.clif deleted file mode 100644 index b697a9279e..0000000000 --- a/cranelift/filetests/filetests/runtests/atomic-rmw-2.clif +++ /dev/null @@ -1,238 +0,0 @@ -test run -target aarch64 -target aarch64 has_lse -target x86_64 -; TODO: Merge this with atomic-rmw.clif when s390x supports it - - -function %atomic_rmw_nand_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 nand v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_nand_i64(0, 0) == -1 -; run: %atomic_rmw_nand_i64(1, 0) == -1 -; run: %atomic_rmw_nand_i64(0, 1) == -1 -; run: %atomic_rmw_nand_i64(1, 1) == -2 -; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E - -function %atomic_rmw_nand_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 nand v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_nand_i32(0, 0) == -1 -; run: %atomic_rmw_nand_i32(1, 0) == -1 -; run: %atomic_rmw_nand_i32(0, 1) == -1 -; run: %atomic_rmw_nand_i32(1, 1) == -2 -; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F - - - -function %atomic_rmw_umin_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 umin v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_umin_i64(0, 0) == 0 -; run: %atomic_rmw_umin_i64(1, 0) == 0 -; run: %atomic_rmw_umin_i64(0, 1) == 0 -; run: %atomic_rmw_umin_i64(1, 1) == 1 -; run: %atomic_rmw_umin_i64(-1, 1) == 1 -; run: %atomic_rmw_umin_i64(-1, -3) == -3 - -function %atomic_rmw_umin_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 umin v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_umin_i32(0, 0) == 0 -; run: %atomic_rmw_umin_i32(1, 0) == 0 -; run: %atomic_rmw_umin_i32(0, 1) == 0 -; run: %atomic_rmw_umin_i32(1, 1) == 1 -; run: %atomic_rmw_umin_i32(-1, 1) == 1 -; run: %atomic_rmw_umin_i32(-1, -3) == -3 - - - -function %atomic_rmw_umax_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 umax v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_umax_i64(0, 0) == 0 -; run: %atomic_rmw_umax_i64(1, 0) == 1 -; run: %atomic_rmw_umax_i64(0, 1) == 1 -; run: %atomic_rmw_umax_i64(1, 1) == 1 -; run: %atomic_rmw_umax_i64(-1, 1) == -1 -; run: %atomic_rmw_umax_i64(-1, -3) == -1 - -function %atomic_rmw_umax_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 umax v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_umax_i32(0, 0) == 0 -; run: %atomic_rmw_umax_i32(1, 0) == 1 -; run: %atomic_rmw_umax_i32(0, 1) == 1 -; run: %atomic_rmw_umax_i32(1, 1) == 1 -; run: %atomic_rmw_umax_i32(-1, 1) == -1 -; run: %atomic_rmw_umax_i32(-1, -3) == -1 - - - -function %atomic_rmw_smin_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 smin v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_smin_i64(0, 0) == 0 -; run: %atomic_rmw_smin_i64(1, 0) == 0 -; run: %atomic_rmw_smin_i64(0, 1) == 0 -; run: %atomic_rmw_smin_i64(1, 1) == 1 -; run: %atomic_rmw_smin_i64(-1, 1) == -1 -; run: %atomic_rmw_smin_i64(-1, -3) == -3 - -function %atomic_rmw_smin_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 smin v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_smin_i32(0, 0) == 0 -; run: %atomic_rmw_smin_i32(1, 0) == 0 -; run: %atomic_rmw_smin_i32(0, 1) == 0 -; run: %atomic_rmw_smin_i32(1, 1) == 1 -; run: %atomic_rmw_smin_i32(-1, -1) == -1 -; run: %atomic_rmw_smin_i32(-1, -3) == -3 - - - -function %atomic_rmw_smax_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 smax v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_smax_i64(0, 0) == 0 -; run: %atomic_rmw_smax_i64(1, 0) == 1 -; run: %atomic_rmw_smax_i64(0, 1) == 1 -; run: %atomic_rmw_smax_i64(1, 1) == 1 -; run: %atomic_rmw_smax_i64(-1, 1) == 1 -; run: %atomic_rmw_smax_i64(-1, -3) == -1 - -function %atomic_rmw_smax_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 smax v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_smax_i32(0, 0) == 0 -; run: %atomic_rmw_smax_i32(1, 0) == 1 -; run: %atomic_rmw_smax_i32(0, 1) == 1 -; run: %atomic_rmw_smax_i32(1, 1) == 1 -; run: %atomic_rmw_smax_i32(-1, 1) == 1 -; run: %atomic_rmw_smax_i32(-1, -3) == -1 - - - -function %atomic_rmw_xchg_i64(i64, i64) -> i64 { - ss0 = explicit_slot 8 - -block0(v0: i64, v1: i64): - stack_store.i64 v0, ss0 - - v2 = stack_addr.i64 ss0 - v3 = atomic_rmw.i64 xchg v2, v1 - - v4 = stack_load.i64 ss0 - return v4 -} -; run: %atomic_rmw_xchg_i64(0, 0) == 0 -; run: %atomic_rmw_xchg_i64(1, 0) == 0 -; run: %atomic_rmw_xchg_i64(0, 1) == 1 -; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF - -function %atomic_rmw_xchg_i32(i32, i32) -> i32 { - ss0 = explicit_slot 4 - -block0(v0: i32, v1: i32): - stack_store.i32 v0, ss0 - - v2 = stack_addr.i32 ss0 - v3 = atomic_rmw.i32 xchg v2, v1 - - v4 = stack_load.i32 ss0 - return v4 -} -; run: %atomic_rmw_xchg_i32(0, 0) == 0 -; run: %atomic_rmw_xchg_i32(1, 0) == 0 -; run: %atomic_rmw_xchg_i32(0, 1) == 1 -; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE diff --git a/cranelift/filetests/filetests/runtests/atomic-rmw-little.clif b/cranelift/filetests/filetests/runtests/atomic-rmw-little.clif new file mode 100644 index 0000000000..cd6731cc4f --- /dev/null +++ b/cranelift/filetests/filetests/runtests/atomic-rmw-little.clif @@ -0,0 +1,429 @@ +test run +target s390x + +; We can't test that these instructions are right regarding atomicity, but we can +; test if they perform their operation correctly + +function %atomic_rmw_add_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little add v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_add_i64(0, 0) == 0 +; run: %atomic_rmw_add_i64(1, 0) == 1 +; run: %atomic_rmw_add_i64(0, 1) == 1 +; run: %atomic_rmw_add_i64(1, 1) == 2 +; run: %atomic_rmw_add_i64(0xC0FFEEEE_C0FFEEEE, 0x1DCB1111_1DCB1111) == 0xDECAFFFF_DECAFFFF + +function %atomic_rmw_add_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little add v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_add_i32(0, 0) == 0 +; run: %atomic_rmw_add_i32(1, 0) == 1 +; run: %atomic_rmw_add_i32(0, 1) == 1 +; run: %atomic_rmw_add_i32(1, 1) == 2 +; run: %atomic_rmw_add_i32(0xC0FFEEEE, 0x1DCB1111) == 0xDECAFFFF + + + +function %atomic_rmw_sub_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little sub v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_sub_i64(0, 0) == 0 +; run: %atomic_rmw_sub_i64(1, 0) == 1 +; run: %atomic_rmw_sub_i64(0, 1) == -1 +; run: %atomic_rmw_sub_i64(1, 1) == 0 +; run: %atomic_rmw_sub_i64(0xDECAFFFF_DECAFFFF, 0x1DCB1111_1DCB1111) == 0xC0FFEEEE_C0FFEEEE + +function %atomic_rmw_sub_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little sub v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_sub_i32(0, 0) == 0 +; run: %atomic_rmw_sub_i32(1, 0) == 1 +; run: %atomic_rmw_sub_i32(0, 1) == -1 +; run: %atomic_rmw_sub_i32(1, 1) == 0 +; run: %atomic_rmw_sub_i32(0xDECAFFFF, 0x1DCB1111) == 0xC0FFEEEE + + + +function %atomic_rmw_and_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little and v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_and_i64(0, 0) == 0 +; run: %atomic_rmw_and_i64(1, 0) == 0 +; run: %atomic_rmw_and_i64(0, 1) == 0 +; run: %atomic_rmw_and_i64(1, 1) == 1 +; run: %atomic_rmw_and_i64(0xF1FFFEFE_FEEEFFFF, 0xCEFFEFEF_DFDBFFFF) == 0xC0FFEEEE_DECAFFFF + +function %atomic_rmw_and_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little and v2, v1 + + v4 = load.i32 little v2 + return v4 +} + +; run: %atomic_rmw_and_i64(0, 0) == 0 +; run: %atomic_rmw_and_i64(1, 0) == 0 +; run: %atomic_rmw_and_i64(0, 1) == 0 +; run: %atomic_rmw_and_i64(1, 1) == 1 +; run: %atomic_rmw_and_i64(0xF1FFFEFE, 0xCEFFEFEF) == 0xC0FFEEEE + + + +function %atomic_rmw_or_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little or v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_or_i64(0, 0) == 0 +; run: %atomic_rmw_or_i64(1, 0) == 1 +; run: %atomic_rmw_or_i64(0, 1) == 1 +; run: %atomic_rmw_or_i64(1, 1) == 1 +; run: %atomic_rmw_or_i64(0x80AAAAAA_8A8AAAAA, 0x40554444_54405555) == 0xC0FFEEEE_DECAFFFF + +function %atomic_rmw_or_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little or v2, v1 + + v4 = load.i32 little v2 + return v4 +} + +; run: %atomic_rmw_or_i32(0, 0) == 0 +; run: %atomic_rmw_or_i32(1, 0) == 1 +; run: %atomic_rmw_or_i32(0, 1) == 1 +; run: %atomic_rmw_or_i32(1, 1) == 1 +; run: %atomic_rmw_or_i32(0x80AAAAAA, 0x40554444) == 0xC0FFEEEE + + + +function %atomic_rmw_xor_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little xor v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_xor_i64(0, 0) == 0 +; run: %atomic_rmw_xor_i64(1, 0) == 1 +; run: %atomic_rmw_xor_i64(0, 1) == 1 +; run: %atomic_rmw_xor_i64(1, 1) == 0 +; run: %atomic_rmw_xor_i64(0x8FA50A64_9440A07D, 0x4F5AE48A_4A8A5F82) == 0xC0FFEEEE_DECAFFFF + +function %atomic_rmw_xor_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little xor v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_xor_i32(0, 0) == 0 +; run: %atomic_rmw_xor_i32(1, 0) == 1 +; run: %atomic_rmw_xor_i32(0, 1) == 1 +; run: %atomic_rmw_xor_i32(1, 1) == 0 +; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE + + + +function %atomic_rmw_nand_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little nand v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_nand_i64(0, 0) == -1 +; run: %atomic_rmw_nand_i64(1, 0) == -1 +; run: %atomic_rmw_nand_i64(0, 1) == -1 +; run: %atomic_rmw_nand_i64(1, 1) == -2 +; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E + +function %atomic_rmw_nand_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little nand v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_nand_i32(0, 0) == -1 +; run: %atomic_rmw_nand_i32(1, 0) == -1 +; run: %atomic_rmw_nand_i32(0, 1) == -1 +; run: %atomic_rmw_nand_i32(1, 1) == -2 +; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F + + + +function %atomic_rmw_umin_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little umin v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_umin_i64(0, 0) == 0 +; run: %atomic_rmw_umin_i64(1, 0) == 0 +; run: %atomic_rmw_umin_i64(0, 1) == 0 +; run: %atomic_rmw_umin_i64(1, 1) == 1 +; run: %atomic_rmw_umin_i64(-1, 1) == 1 +; run: %atomic_rmw_umin_i64(-1, -3) == -3 + +function %atomic_rmw_umin_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little umin v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_umin_i32(0, 0) == 0 +; run: %atomic_rmw_umin_i32(1, 0) == 0 +; run: %atomic_rmw_umin_i32(0, 1) == 0 +; run: %atomic_rmw_umin_i32(1, 1) == 1 +; run: %atomic_rmw_umin_i32(-1, 1) == 1 +; run: %atomic_rmw_umin_i32(-1, -3) == -3 + + + +function %atomic_rmw_umax_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little umax v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_umax_i64(0, 0) == 0 +; run: %atomic_rmw_umax_i64(1, 0) == 1 +; run: %atomic_rmw_umax_i64(0, 1) == 1 +; run: %atomic_rmw_umax_i64(1, 1) == 1 +; run: %atomic_rmw_umax_i64(-1, 1) == -1 +; run: %atomic_rmw_umax_i64(-1, -3) == -1 + +function %atomic_rmw_umax_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little umax v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_umax_i32(0, 0) == 0 +; run: %atomic_rmw_umax_i32(1, 0) == 1 +; run: %atomic_rmw_umax_i32(0, 1) == 1 +; run: %atomic_rmw_umax_i32(1, 1) == 1 +; run: %atomic_rmw_umax_i32(-1, 1) == -1 +; run: %atomic_rmw_umax_i32(-1, -3) == -1 + + + +function %atomic_rmw_smin_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little smin v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_smin_i64(0, 0) == 0 +; run: %atomic_rmw_smin_i64(1, 0) == 0 +; run: %atomic_rmw_smin_i64(0, 1) == 0 +; run: %atomic_rmw_smin_i64(1, 1) == 1 +; run: %atomic_rmw_smin_i64(-1, 1) == -1 +; run: %atomic_rmw_smin_i64(-1, -3) == -3 + +function %atomic_rmw_smin_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little smin v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_smin_i32(0, 0) == 0 +; run: %atomic_rmw_smin_i32(1, 0) == 0 +; run: %atomic_rmw_smin_i32(0, 1) == 0 +; run: %atomic_rmw_smin_i32(1, 1) == 1 +; run: %atomic_rmw_smin_i32(-1, -1) == -1 +; run: %atomic_rmw_smin_i32(-1, -3) == -3 + + + +function %atomic_rmw_smax_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little smax v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_smax_i64(0, 0) == 0 +; run: %atomic_rmw_smax_i64(1, 0) == 1 +; run: %atomic_rmw_smax_i64(0, 1) == 1 +; run: %atomic_rmw_smax_i64(1, 1) == 1 +; run: %atomic_rmw_smax_i64(-1, 1) == 1 +; run: %atomic_rmw_smax_i64(-1, -3) == -1 + +function %atomic_rmw_smax_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little smax v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_smax_i32(0, 0) == 0 +; run: %atomic_rmw_smax_i32(1, 0) == 1 +; run: %atomic_rmw_smax_i32(0, 1) == 1 +; run: %atomic_rmw_smax_i32(1, 1) == 1 +; run: %atomic_rmw_smax_i32(-1, 1) == 1 +; run: %atomic_rmw_smax_i32(-1, -3) == -1 + + + +function %atomic_rmw_xchg_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + v2 = stack_addr.i64 ss0 + store.i64 little v0, v2 + + v3 = atomic_rmw.i64 little xchg v2, v1 + + v4 = load.i64 little v2 + return v4 +} +; run: %atomic_rmw_xchg_i64(0, 0) == 0 +; run: %atomic_rmw_xchg_i64(1, 0) == 0 +; run: %atomic_rmw_xchg_i64(0, 1) == 1 +; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF + +function %atomic_rmw_xchg_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + v2 = stack_addr.i32 ss0 + store.i32 little v0, v2 + + v3 = atomic_rmw.i32 little xchg v2, v1 + + v4 = load.i32 little v2 + return v4 +} +; run: %atomic_rmw_xchg_i32(0, 0) == 0 +; run: %atomic_rmw_xchg_i32(1, 0) == 0 +; run: %atomic_rmw_xchg_i32(0, 1) == 1 +; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE diff --git a/cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif b/cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif new file mode 100644 index 0000000000..efc3068b3f --- /dev/null +++ b/cranelift/filetests/filetests/runtests/atomic-rmw-subword.clif @@ -0,0 +1,907 @@ +test run +target s390x + +; We can't test that these instructions are right regarding atomicity, but we can +; test if they perform their operation correctly + +function %atomic_rmw_add_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big add v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x23455678 +; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12335678 +; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x12346789 +; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12345677 + +function %atomic_rmw_add_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little add v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0x1111) == 0x23455678 +; run: %atomic_rmw_add_little_i16(0x12345678, 2, 0xffff) == 0x12335678 +; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0x1111) == 0x12346789 +; run: %atomic_rmw_add_little_i16(0x12345678, 0, 0xffff) == 0x12345677 + +function %atomic_rmw_add_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big add v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0x11) == 0x23345678 +; run: %atomic_rmw_add_big_i8(0x12345678, 0, 0xff) == 0x11345678 +; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0x11) == 0x12455678 +; run: %atomic_rmw_add_big_i8(0x12345678, 1, 0xff) == 0x12335678 +; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0x11) == 0x12346778 +; run: %atomic_rmw_add_big_i8(0x12345678, 2, 0xff) == 0x12345578 +; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0x11) == 0x12345689 +; run: %atomic_rmw_add_big_i8(0x12345678, 3, 0xff) == 0x12345677 + +function %atomic_rmw_add_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little add v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0x11) == 0x23345678 +; run: %atomic_rmw_add_little_i8(0x12345678, 3, 0xff) == 0x11345678 +; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0x11) == 0x12455678 +; run: %atomic_rmw_add_little_i8(0x12345678, 2, 0xff) == 0x12335678 +; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0x11) == 0x12346778 +; run: %atomic_rmw_add_little_i8(0x12345678, 1, 0xff) == 0x12345578 +; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0x11) == 0x12345689 +; run: %atomic_rmw_add_little_i8(0x12345678, 0, 0xff) == 0x12345677 + + + +function %atomic_rmw_sub_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big sub v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0x1111) == 0x01235678 +; run: %atomic_rmw_sub_big_i16(0x12345678, 0, 0xffff) == 0x12355678 +; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0x1111) == 0x12344567 +; run: %atomic_rmw_sub_big_i16(0x12345678, 2, 0xffff) == 0x12345679 + +function %atomic_rmw_sub_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little sub v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0x1111) == 0x01235678 +; run: %atomic_rmw_sub_little_i16(0x12345678, 2, 0xffff) == 0x12355678 +; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0x1111) == 0x12344567 +; run: %atomic_rmw_sub_little_i16(0x12345678, 0, 0xffff) == 0x12345679 + +function %atomic_rmw_sub_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big sub v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0x11) == 0x01345678 +; run: %atomic_rmw_sub_big_i8(0x12345678, 0, 0xff) == 0x13345678 +; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0x11) == 0x12235678 +; run: %atomic_rmw_sub_big_i8(0x12345678, 1, 0xff) == 0x12355678 +; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0x11) == 0x12344578 +; run: %atomic_rmw_sub_big_i8(0x12345678, 2, 0xff) == 0x12345778 +; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0x11) == 0x12345667 +; run: %atomic_rmw_sub_big_i8(0x12345678, 3, 0xff) == 0x12345679 + +function %atomic_rmw_sub_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little sub v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0x11) == 0x01345678 +; run: %atomic_rmw_sub_little_i8(0x12345678, 3, 0xff) == 0x13345678 +; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0x11) == 0x12235678 +; run: %atomic_rmw_sub_little_i8(0x12345678, 2, 0xff) == 0x12355678 +; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0x11) == 0x12344578 +; run: %atomic_rmw_sub_little_i8(0x12345678, 1, 0xff) == 0x12345778 +; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0x11) == 0x12345667 +; run: %atomic_rmw_sub_little_i8(0x12345678, 0, 0xff) == 0x12345679 + + + +function %atomic_rmw_and_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big and v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0xf000) == 0x10005678 +; run: %atomic_rmw_and_big_i16(0x12345678, 0, 0x000f) == 0x00045678 +; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0xf000) == 0x12345000 +; run: %atomic_rmw_and_big_i16(0x12345678, 2, 0x000f) == 0x12340008 + +function %atomic_rmw_and_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little and v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0xf000) == 0x10005678 +; run: %atomic_rmw_and_little_i16(0x12345678, 2, 0x000f) == 0x00045678 +; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0xf000) == 0x12345000 +; run: %atomic_rmw_and_little_i16(0x12345678, 0, 0x000f) == 0x12340008 + +function %atomic_rmw_and_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big and v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0xf0) == 0x10345678 +; run: %atomic_rmw_and_big_i8(0x12345678, 0, 0x0f) == 0x02345678 +; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0xf0) == 0x12305678 +; run: %atomic_rmw_and_big_i8(0x12345678, 1, 0x0f) == 0x12045678 +; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0xf0) == 0x12345078 +; run: %atomic_rmw_and_big_i8(0x12345678, 2, 0x0f) == 0x12340678 +; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0xf0) == 0x12345670 +; run: %atomic_rmw_and_big_i8(0x12345678, 3, 0x0f) == 0x12345608 + +function %atomic_rmw_and_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little and v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0xf0) == 0x10345678 +; run: %atomic_rmw_and_little_i8(0x12345678, 3, 0x0f) == 0x02345678 +; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0xf0) == 0x12305678 +; run: %atomic_rmw_and_little_i8(0x12345678, 2, 0x0f) == 0x12045678 +; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0xf0) == 0x12345078 +; run: %atomic_rmw_and_little_i8(0x12345678, 1, 0x0f) == 0x12340678 +; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0xf0) == 0x12345670 +; run: %atomic_rmw_and_little_i8(0x12345678, 0, 0x0f) == 0x12345608 + + + +function %atomic_rmw_or_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big or v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0xf000) == 0xf2345678 +; run: %atomic_rmw_or_big_i16(0x12345678, 0, 0x000f) == 0x123f5678 +; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0xf000) == 0x1234f678 +; run: %atomic_rmw_or_big_i16(0x12345678, 2, 0x000f) == 0x1234567f + +function %atomic_rmw_or_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little or v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0xf000) == 0xf2345678 +; run: %atomic_rmw_or_little_i16(0x12345678, 2, 0x000f) == 0x123f5678 +; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0xf000) == 0x1234f678 +; run: %atomic_rmw_or_little_i16(0x12345678, 0, 0x000f) == 0x1234567f + +function %atomic_rmw_or_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big or v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0xf0) == 0xf2345678 +; run: %atomic_rmw_or_big_i8(0x12345678, 0, 0x0f) == 0x1f345678 +; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0xf0) == 0x12f45678 +; run: %atomic_rmw_or_big_i8(0x12345678, 1, 0x0f) == 0x123f5678 +; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0xf0) == 0x1234f678 +; run: %atomic_rmw_or_big_i8(0x12345678, 2, 0x0f) == 0x12345f78 +; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0xf0) == 0x123456f8 +; run: %atomic_rmw_or_big_i8(0x12345678, 3, 0x0f) == 0x1234567f + +function %atomic_rmw_or_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little or v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0xf0) == 0xf2345678 +; run: %atomic_rmw_or_little_i8(0x12345678, 3, 0x0f) == 0x1f345678 +; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0xf0) == 0x12f45678 +; run: %atomic_rmw_or_little_i8(0x12345678, 2, 0x0f) == 0x123f5678 +; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0xf0) == 0x1234f678 +; run: %atomic_rmw_or_little_i8(0x12345678, 1, 0x0f) == 0x12345f78 +; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0xf0) == 0x123456f8 +; run: %atomic_rmw_or_little_i8(0x12345678, 0, 0x0f) == 0x1234567f + + + + +function %atomic_rmw_xor_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big xor v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0xf000) == 0xe2345678 +; run: %atomic_rmw_xor_big_i16(0x12345678, 0, 0x000f) == 0x123b5678 +; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0xf000) == 0x1234a678 +; run: %atomic_rmw_xor_big_i16(0x12345678, 2, 0x000f) == 0x12345677 + +function %atomic_rmw_xor_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little xor v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0xf000) == 0xe2345678 +; run: %atomic_rmw_xor_little_i16(0x12345678, 2, 0x000f) == 0x123b5678 +; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0xf000) == 0x1234a678 +; run: %atomic_rmw_xor_little_i16(0x12345678, 0, 0x000f) == 0x12345677 + +function %atomic_rmw_xor_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big xor v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0xf0) == 0xe2345678 +; run: %atomic_rmw_xor_big_i8(0x12345678, 0, 0x0f) == 0x1d345678 +; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0xf0) == 0x12c45678 +; run: %atomic_rmw_xor_big_i8(0x12345678, 1, 0x0f) == 0x123b5678 +; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0xf0) == 0x1234a678 +; run: %atomic_rmw_xor_big_i8(0x12345678, 2, 0x0f) == 0x12345978 +; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0xf0) == 0x12345688 +; run: %atomic_rmw_xor_big_i8(0x12345678, 3, 0x0f) == 0x12345677 + +function %atomic_rmw_xor_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little xor v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0xf0) == 0xe2345678 +; run: %atomic_rmw_xor_little_i8(0x12345678, 3, 0x0f) == 0x1d345678 +; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0xf0) == 0x12c45678 +; run: %atomic_rmw_xor_little_i8(0x12345678, 2, 0x0f) == 0x123b5678 +; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0xf0) == 0x1234a678 +; run: %atomic_rmw_xor_little_i8(0x12345678, 1, 0x0f) == 0x12345978 +; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0xf0) == 0x12345688 +; run: %atomic_rmw_xor_little_i8(0x12345678, 0, 0x0f) == 0x12345677 + + + +function %atomic_rmw_nand_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big nand v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0xf000) == 0xefff5678 +; run: %atomic_rmw_nand_big_i16(0x12345678, 0, 0x000f) == 0xfffb5678 +; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0xf000) == 0x1234afff +; run: %atomic_rmw_nand_big_i16(0x12345678, 2, 0x000f) == 0x1234fff7 + +function %atomic_rmw_nand_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little nand v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0xf000) == 0xefff5678 +; run: %atomic_rmw_nand_little_i16(0x12345678, 2, 0x000f) == 0xfffb5678 +; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0xf000) == 0x1234afff +; run: %atomic_rmw_nand_little_i16(0x12345678, 0, 0x000f) == 0x1234fff7 + +function %atomic_rmw_nand_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big nand v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0xf0) == 0xef345678 +; run: %atomic_rmw_nand_big_i8(0x12345678, 0, 0x0f) == 0xfd345678 +; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0xf0) == 0x12cf5678 +; run: %atomic_rmw_nand_big_i8(0x12345678, 1, 0x0f) == 0x12fb5678 +; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0xf0) == 0x1234af78 +; run: %atomic_rmw_nand_big_i8(0x12345678, 2, 0x0f) == 0x1234f978 +; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0xf0) == 0x1234568f +; run: %atomic_rmw_nand_big_i8(0x12345678, 3, 0x0f) == 0x123456f7 + +function %atomic_rmw_nand_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little nand v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0xf0) == 0xef345678 +; run: %atomic_rmw_nand_little_i8(0x12345678, 3, 0x0f) == 0xfd345678 +; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0xf0) == 0x12cf5678 +; run: %atomic_rmw_nand_little_i8(0x12345678, 2, 0x0f) == 0x12fb5678 +; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0xf0) == 0x1234af78 +; run: %atomic_rmw_nand_little_i8(0x12345678, 1, 0x0f) == 0x1234f978 +; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0xf0) == 0x1234568f +; run: %atomic_rmw_nand_little_i8(0x12345678, 0, 0x0f) == 0x123456f7 + + + +function %atomic_rmw_umin_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big umin v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0x1111) == 0x11115678 +; run: %atomic_rmw_umin_big_i16(0x12345678, 0, 0xffff) == 0x12345678 +; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0x1111) == 0x12341111 +; run: %atomic_rmw_umin_big_i16(0x12345678, 2, 0xffff) == 0x12345678 + +function %atomic_rmw_umin_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little umin v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0x1111) == 0x11115678 +; run: %atomic_rmw_umin_little_i16(0x12345678, 2, 0xffff) == 0x12345678 +; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0x1111) == 0x12341111 +; run: %atomic_rmw_umin_little_i16(0x12345678, 0, 0xffff) == 0x12345678 + +function %atomic_rmw_umin_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big umin v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0x11) == 0x11345678 +; run: %atomic_rmw_umin_big_i8(0x12345678, 0, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0x11) == 0x12115678 +; run: %atomic_rmw_umin_big_i8(0x12345678, 1, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0x11) == 0x12341178 +; run: %atomic_rmw_umin_big_i8(0x12345678, 2, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0x11) == 0x12345611 +; run: %atomic_rmw_umin_big_i8(0x12345678, 3, 0xff) == 0x12345678 + +function %atomic_rmw_umin_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little umin v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0x11) == 0x11345678 +; run: %atomic_rmw_umin_little_i8(0x12345678, 3, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0x11) == 0x12115678 +; run: %atomic_rmw_umin_little_i8(0x12345678, 2, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0x11) == 0x12341178 +; run: %atomic_rmw_umin_little_i8(0x12345678, 1, 0xff) == 0x12345678 +; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0x11) == 0x12345611 +; run: %atomic_rmw_umin_little_i8(0x12345678, 0, 0xff) == 0x12345678 + + + +function %atomic_rmw_umax_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big umax v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0x1111) == 0x12345678 +; run: %atomic_rmw_umax_big_i16(0x12345678, 0, 0xffff) == 0xffff5678 +; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0x1111) == 0x12345678 +; run: %atomic_rmw_umax_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff + +function %atomic_rmw_umax_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little umax v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0x1111) == 0x12345678 +; run: %atomic_rmw_umax_little_i16(0x12345678, 2, 0xffff) == 0xffff5678 +; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0x1111) == 0x12345678 +; run: %atomic_rmw_umax_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff + +function %atomic_rmw_umax_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big umax v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 0, 0xff) == 0xff345678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 1, 0xff) == 0x12ff5678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 2, 0xff) == 0x1234ff78 +; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_big_i8(0x12345678, 3, 0xff) == 0x123456ff + +function %atomic_rmw_umax_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little umax v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 3, 0xff) == 0xff345678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 2, 0xff) == 0x12ff5678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 1, 0xff) == 0x1234ff78 +; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0x11) == 0x12345678 +; run: %atomic_rmw_umax_little_i8(0x12345678, 0, 0xff) == 0x123456ff + + + +function %atomic_rmw_smin_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big smin v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0x1111) == 0x11115678 +; run: %atomic_rmw_smin_big_i16(0x12345678, 0, 0xffff) == 0xffff5678 +; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0x1111) == 0x12341111 +; run: %atomic_rmw_smin_big_i16(0x12345678, 2, 0xffff) == 0x1234ffff + +function %atomic_rmw_smin_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little smin v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0x1111) == 0x11115678 +; run: %atomic_rmw_smin_little_i16(0x12345678, 2, 0xffff) == 0xffff5678 +; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0x1111) == 0x12341111 +; run: %atomic_rmw_smin_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff + +function %atomic_rmw_smin_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big smin v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0x11) == 0x11345678 +; run: %atomic_rmw_smin_big_i8(0x12345678, 0, 0xff) == 0xff345678 +; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0x11) == 0x12115678 +; run: %atomic_rmw_smin_big_i8(0x12345678, 1, 0xff) == 0x12ff5678 +; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0x11) == 0x12341178 +; run: %atomic_rmw_smin_big_i8(0x12345678, 2, 0xff) == 0x1234ff78 +; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0x11) == 0x12345611 +; run: %atomic_rmw_smin_big_i8(0x12345678, 3, 0xff) == 0x123456ff + +function %atomic_rmw_smin_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little smin v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0x11) == 0x11345678 +; run: %atomic_rmw_smin_little_i8(0x12345678, 3, 0xff) == 0xff345678 +; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0x11) == 0x12115678 +; run: %atomic_rmw_smin_little_i8(0x12345678, 2, 0xff) == 0x12ff5678 +; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0x11) == 0x12341178 +; run: %atomic_rmw_smin_little_i8(0x12345678, 1, 0xff) == 0x1234ff78 +; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0x11) == 0x12345611 +; run: %atomic_rmw_smin_little_i8(0x12345678, 0, 0xff) == 0x123456ff + + + +function %atomic_rmw_smax_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big smax v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0xffff) == 0x12345678 +; run: %atomic_rmw_smax_big_i16(0x12345678, 0, 0x7fff) == 0x7fff5678 +; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0xffff) == 0x12345678 +; run: %atomic_rmw_smax_big_i16(0x12345678, 2, 0x7fff) == 0x12347fff + +function %atomic_rmw_smax_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little smax v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0xffff) == 0x12345678 +; run: %atomic_rmw_smax_little_i16(0x12345678, 2, 0x7fff) == 0x7fff5678 +; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0xffff) == 0x12345678 +; run: %atomic_rmw_smax_little_i16(0x12345678, 0, 0x7fff) == 0x12347fff + +function %atomic_rmw_smax_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big smax v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 0, 0x7f) == 0x7f345678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 1, 0x7f) == 0x127f5678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 2, 0x7f) == 0x12347f78 +; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_big_i8(0x12345678, 3, 0x7f) == 0x1234567f + +function %atomic_rmw_smax_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little smax v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 3, 0x7f) == 0x7f345678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 2, 0x7f) == 0x127f5678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 1, 0x7f) == 0x12347f78 +; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0xff) == 0x12345678 +; run: %atomic_rmw_smax_little_i8(0x12345678, 0, 0x7f) == 0x1234567f + + + +function %atomic_rmw_xchg_big_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 big xchg v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x11115678 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0xffff5678 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x12341111 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0x1234ffff + +function %atomic_rmw_xchg_little_i16(i32, i64, i16) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i16): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i16 little xchg v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0x1111) == 0x11115678 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 2, 0xffff) == 0xffff5678 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0x1111) == 0x12341111 +; run: %atomic_rmw_xchg_little_i16(0x12345678, 0, 0xffff) == 0x1234ffff + +function %atomic_rmw_xchg_big_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 big v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 big xchg v4, v2 + + v6 = load.i32 big v3 + return v6 +} +; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0x11) == 0x11345678 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 0, 0xff) == 0xff345678 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0x11) == 0x12115678 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 1, 0xff) == 0x12ff5678 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0x11) == 0x12341178 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 2, 0xff) == 0x1234ff78 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0x11) == 0x12345611 +; run: %atomic_rmw_xchg_big_i8(0x12345678, 3, 0xff) == 0x123456ff + +function %atomic_rmw_xchg_little_i8(i32, i64, i8) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i64, v2: i8): + v3 = stack_addr.i64 ss0 + store.i32 little v0, v3 + + v4 = iadd.i64 v3, v1 + v5 = atomic_rmw.i8 little xchg v4, v2 + + v6 = load.i32 little v3 + return v6 +} +; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0x11) == 0x11345678 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 3, 0xff) == 0xff345678 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0x11) == 0x12115678 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 2, 0xff) == 0x12ff5678 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0x11) == 0x12341178 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 1, 0xff) == 0x1234ff78 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0x11) == 0x12345611 +; run: %atomic_rmw_xchg_little_i8(0x12345678, 0, 0xff) == 0x123456ff + diff --git a/cranelift/filetests/filetests/runtests/atomic-rmw.clif b/cranelift/filetests/filetests/runtests/atomic-rmw.clif index 57e18a0dbe..26c466c5e8 100644 --- a/cranelift/filetests/filetests/runtests/atomic-rmw.clif +++ b/cranelift/filetests/filetests/runtests/atomic-rmw.clif @@ -196,3 +196,237 @@ block0(v0: i32, v1: i32): ; run: %atomic_rmw_xor_i32(0, 1) == 1 ; run: %atomic_rmw_xor_i32(1, 1) == 0 ; run: %atomic_rmw_xor_i32(0x8FA50A64, 0x4F5AE48A) == 0xC0FFEEEE + + + +function %atomic_rmw_nand_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 nand v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_nand_i64(0, 0) == -1 +; run: %atomic_rmw_nand_i64(1, 0) == -1 +; run: %atomic_rmw_nand_i64(0, 1) == -1 +; run: %atomic_rmw_nand_i64(1, 1) == -2 +; run: %atomic_rmw_nand_i64(0xC0FFEEEE_DECAFFFF, 0x7DCB5691_7DCB5691) == 0xBF34B97F_A335A96E + +function %atomic_rmw_nand_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 nand v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_nand_i32(0, 0) == -1 +; run: %atomic_rmw_nand_i32(1, 0) == -1 +; run: %atomic_rmw_nand_i32(0, 1) == -1 +; run: %atomic_rmw_nand_i32(1, 1) == -2 +; run: %atomic_rmw_nand_i32(0xC0FFEEEE, 0x7DCB5691) == 0xBF34B97F + + + +function %atomic_rmw_umin_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 umin v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_umin_i64(0, 0) == 0 +; run: %atomic_rmw_umin_i64(1, 0) == 0 +; run: %atomic_rmw_umin_i64(0, 1) == 0 +; run: %atomic_rmw_umin_i64(1, 1) == 1 +; run: %atomic_rmw_umin_i64(-1, 1) == 1 +; run: %atomic_rmw_umin_i64(-1, -3) == -3 + +function %atomic_rmw_umin_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 umin v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_umin_i32(0, 0) == 0 +; run: %atomic_rmw_umin_i32(1, 0) == 0 +; run: %atomic_rmw_umin_i32(0, 1) == 0 +; run: %atomic_rmw_umin_i32(1, 1) == 1 +; run: %atomic_rmw_umin_i32(-1, 1) == 1 +; run: %atomic_rmw_umin_i32(-1, -3) == -3 + + + +function %atomic_rmw_umax_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 umax v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_umax_i64(0, 0) == 0 +; run: %atomic_rmw_umax_i64(1, 0) == 1 +; run: %atomic_rmw_umax_i64(0, 1) == 1 +; run: %atomic_rmw_umax_i64(1, 1) == 1 +; run: %atomic_rmw_umax_i64(-1, 1) == -1 +; run: %atomic_rmw_umax_i64(-1, -3) == -1 + +function %atomic_rmw_umax_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 umax v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_umax_i32(0, 0) == 0 +; run: %atomic_rmw_umax_i32(1, 0) == 1 +; run: %atomic_rmw_umax_i32(0, 1) == 1 +; run: %atomic_rmw_umax_i32(1, 1) == 1 +; run: %atomic_rmw_umax_i32(-1, 1) == -1 +; run: %atomic_rmw_umax_i32(-1, -3) == -1 + + + +function %atomic_rmw_smin_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 smin v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_smin_i64(0, 0) == 0 +; run: %atomic_rmw_smin_i64(1, 0) == 0 +; run: %atomic_rmw_smin_i64(0, 1) == 0 +; run: %atomic_rmw_smin_i64(1, 1) == 1 +; run: %atomic_rmw_smin_i64(-1, 1) == -1 +; run: %atomic_rmw_smin_i64(-1, -3) == -3 + +function %atomic_rmw_smin_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 smin v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_smin_i32(0, 0) == 0 +; run: %atomic_rmw_smin_i32(1, 0) == 0 +; run: %atomic_rmw_smin_i32(0, 1) == 0 +; run: %atomic_rmw_smin_i32(1, 1) == 1 +; run: %atomic_rmw_smin_i32(-1, -1) == -1 +; run: %atomic_rmw_smin_i32(-1, -3) == -3 + + + +function %atomic_rmw_smax_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 smax v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_smax_i64(0, 0) == 0 +; run: %atomic_rmw_smax_i64(1, 0) == 1 +; run: %atomic_rmw_smax_i64(0, 1) == 1 +; run: %atomic_rmw_smax_i64(1, 1) == 1 +; run: %atomic_rmw_smax_i64(-1, 1) == 1 +; run: %atomic_rmw_smax_i64(-1, -3) == -1 + +function %atomic_rmw_smax_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 smax v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_smax_i32(0, 0) == 0 +; run: %atomic_rmw_smax_i32(1, 0) == 1 +; run: %atomic_rmw_smax_i32(0, 1) == 1 +; run: %atomic_rmw_smax_i32(1, 1) == 1 +; run: %atomic_rmw_smax_i32(-1, 1) == 1 +; run: %atomic_rmw_smax_i32(-1, -3) == -1 + + + +function %atomic_rmw_xchg_i64(i64, i64) -> i64 { + ss0 = explicit_slot 8 + +block0(v0: i64, v1: i64): + stack_store.i64 v0, ss0 + + v2 = stack_addr.i64 ss0 + v3 = atomic_rmw.i64 xchg v2, v1 + + v4 = stack_load.i64 ss0 + return v4 +} +; run: %atomic_rmw_xchg_i64(0, 0) == 0 +; run: %atomic_rmw_xchg_i64(1, 0) == 0 +; run: %atomic_rmw_xchg_i64(0, 1) == 1 +; run: %atomic_rmw_xchg_i64(0, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF + +function %atomic_rmw_xchg_i32(i32, i32) -> i32 { + ss0 = explicit_slot 4 + +block0(v0: i32, v1: i32): + stack_store.i32 v0, ss0 + + v2 = stack_addr.i32 ss0 + v3 = atomic_rmw.i32 xchg v2, v1 + + v4 = stack_load.i32 ss0 + return v4 +} +; run: %atomic_rmw_xchg_i32(0, 0) == 0 +; run: %atomic_rmw_xchg_i32(1, 0) == 0 +; run: %atomic_rmw_xchg_i32(0, 1) == 1 +; run: %atomic_rmw_xchg_i32(0, 0xC0FFEEEE) == 0xC0FFEEEE