Revert IR changes

Along with the x64 and s390x changes. Now pattern matching the
uextend(atomic_load) in the aarch64 backend.
This commit is contained in:
Sam Parker
2021-08-05 09:35:32 +01:00
parent cbb7229457
commit b6f6ac116a
10 changed files with 164 additions and 255 deletions

View File

@@ -1740,6 +1740,22 @@ pub(crate) fn is_valid_atomic_transaction_ty(ty: Type) -> bool {
}
}
pub(crate) fn emit_atomic_load<C: LowerCtx<I = Inst>>(
ctx: &mut C,
rt: Writable<Reg>,
insn: IRInst,
) {
assert!(ctx.data(insn).opcode() == Opcode::AtomicLoad);
let inputs = insn_inputs(ctx, insn);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let access_ty = ctx.output_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(access_ty));
// We're ignoring the result type of the load because the LoadAcquire will
// explicitly zero extend to the nearest word, and also zero the high half
// of an X register.
ctx.emit(Inst::LoadAcquire { access_ty, rt, rn });
}
fn load_op_to_ty(op: Opcode) -> Option<Type> {
match op {
Opcode::Sload8 | Opcode::Uload8 | Opcode::Sload8Complex | Opcode::Uload8Complex => Some(I8),

View File

@@ -521,6 +521,19 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
Opcode::Uextend | Opcode::Sextend => {
if op == Opcode::Uextend {
let inputs = ctx.get_input_as_source_or_const(inputs[0].insn, inputs[0].input);
if let Some((atomic_load, 0)) = inputs.inst {
if ctx.data(atomic_load).opcode() == Opcode::AtomicLoad {
let output_ty = ty.unwrap();
assert!(output_ty == I32 || output_ty == I64);
let rt = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
emit_atomic_load(ctx, rt, atomic_load);
ctx.sink_inst(atomic_load);
return Ok(());
}
}
}
let output_ty = ty.unwrap();
let input_ty = ctx.input_ty(insn, 0);
let from_bits = ty_bits(input_ty) as u8;
@@ -1522,38 +1535,15 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
}
Opcode::AtomicLoad
| Opcode::AtomicUload8
| Opcode::AtomicUload16
| Opcode::AtomicUload32 => {
Opcode::AtomicLoad => {
let rt = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let ty = ty.unwrap();
let access_ty = match op {
Opcode::AtomicLoad => ty,
Opcode::AtomicUload8 => I8,
Opcode::AtomicUload16 => I16,
Opcode::AtomicUload32 => I32,
_ => panic!(),
};
assert!(is_valid_atomic_transaction_ty(access_ty));
ctx.emit(Inst::LoadAcquire { access_ty, rt, rn });
emit_atomic_load(ctx, rt, insn);
}
Opcode::AtomicStore
| Opcode::AtomicStore32
| Opcode::AtomicStore16
| Opcode::AtomicStore8 => {
Opcode::AtomicStore => {
let rt = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
let ty = ctx.input_ty(insn, 0);
let access_ty = match op {
Opcode::AtomicStore => ty,
Opcode::AtomicStore32 => I32,
Opcode::AtomicStore16 => I16,
Opcode::AtomicStore8 => I8,
_ => unreachable!(),
};
let access_ty = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(access_ty));
ctx.emit(Inst::StoreRelease { access_ty, rt, rn });
}

View File

@@ -2734,61 +2734,37 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::AtomicCas64 { rd, rn, mem });
}
}
Opcode::AtomicLoad
| Opcode::AtomicUload8
| Opcode::AtomicUload16
| Opcode::AtomicUload32 => {
Opcode::AtomicLoad => {
let flags = ctx.memflags(insn).unwrap();
let endianness = flags.endianness(Endianness::Big);
let ty = ty.unwrap();
let access_ty = match op {
Opcode::AtomicLoad => ty,
Opcode::AtomicUload8 => types::I8,
Opcode::AtomicUload16 => types::I16,
Opcode::AtomicUload32 => types::I32,
_ => unreachable!(),
};
assert!(is_valid_atomic_transaction_ty(access_ty));
assert!(is_valid_atomic_transaction_ty(ty));
let mem = lower_address(ctx, &inputs[..], 0, flags);
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
if endianness == Endianness::Big {
ctx.emit(match (ty_bits(access_ty), ty_bits(ty)) {
(8, 32) => Inst::Load32ZExt8 { rd, mem },
(8, 64) => Inst::Load64ZExt8 { rd, mem },
(16, 32) => Inst::Load32ZExt16 { rd, mem },
(16, 64) => Inst::Load64ZExt16 { rd, mem },
(32, 32) => Inst::Load32 { rd, mem },
(32, 64) => Inst::Load64ZExt32 { rd, mem },
(64, 64) => Inst::Load64 { rd, mem },
ctx.emit(match ty_bits(ty) {
8 => Inst::Load32ZExt8 { rd, mem },
16 => Inst::Load32ZExt16 { rd, mem },
32 => Inst::Load32 { rd, mem },
64 => Inst::Load64 { rd, mem },
_ => panic!("Unsupported size in load"),
});
} else {
ctx.emit(match (ty_bits(access_ty), ty_bits(ty)) {
(8, 32) => Inst::Load32ZExt8 { rd, mem },
(8, 64) => Inst::Load64ZExt8 { rd, mem },
(16, 32) => Inst::LoadRev16 { rd, mem },
(32, 32) => Inst::LoadRev32 { rd, mem },
(64, 64) => Inst::LoadRev64 { rd, mem },
ctx.emit(match ty_bits(ty) {
8 => Inst::Load32ZExt8 { rd, mem },
16 => Inst::LoadRev16 { rd, mem },
32 => Inst::LoadRev32 { rd, mem },
64 => Inst::LoadRev64 { rd, mem },
_ => panic!("Unsupported size in load"),
});
}
}
Opcode::AtomicStore
| Opcode::AtomicStore32
| Opcode::AtomicStore16
| Opcode::AtomicStore8 => {
Opcode::AtomicStore => {
let flags = ctx.memflags(insn).unwrap();
let endianness = flags.endianness(Endianness::Big);
let data_ty = ctx.input_ty(insn, 0);
let ty = match op {
Opcode::AtomicStore => data_ty,
Opcode::AtomicStore32 => types::I32,
Opcode::AtomicStore16 => types::I16,
Opcode::AtomicStore8 => types::I8,
_ => unreachable!(),
};
let ty = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty));
let mem = lower_address(ctx, &inputs[1..], 0, flags);

View File

@@ -5825,10 +5825,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
Opcode::AtomicLoad
| Opcode::AtomicUload8
| Opcode::AtomicUload16
| Opcode::AtomicUload32 => {
Opcode::AtomicLoad => {
// This is a normal load. The x86-TSO memory model provides sufficient sequencing
// to satisfy the CLIF synchronisation requirements for `AtomicLoad` without the
// need for any fence instructions.
@@ -5850,21 +5847,11 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
}
Opcode::AtomicStore
| Opcode::AtomicStore32
| Opcode::AtomicStore16
| Opcode::AtomicStore8 => {
Opcode::AtomicStore => {
// This is a normal store, followed by an `mfence` instruction.
let data = put_input_in_reg(ctx, inputs[0]);
let addr = lower_to_amode(ctx, inputs[1], 0);
let data_ty = ctx.input_ty(insn, 0);
let ty_access = match op {
Opcode::AtomicStore => data_ty,
Opcode::AtomicStore32 => types::I32,
Opcode::AtomicStore16 => types::I16,
Opcode::AtomicStore8 => types::I8,
_ => unreachable!(),
};
let ty_access = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty_access));
ctx.emit(Inst::store(ty_access, data, addr));