aarch64: fix up regalloc2 semantics. (#4830)

This PR removes all uses of modify-operands in the aarch64 backend,
replacing them with reused-input operands instead. This has the nice
effect of removing a bunch of move instructions and more clearly
representing inputs and outputs.

This PR also removes the explicit use of pinned vregs in the aarch64
backend, instead using fixed-register constraints on the operands when
insts or pseudo-inst sequences require certain registers.

This is the second PR in the regalloc-semantics cleanup series; after
the remaining backend (s390x) and the ABI code are cleaned up as well,
we'll be able to simplify the regalloc2 frontend.
This commit is contained in:
Chris Fallin
2022-09-01 14:25:20 -07:00
committed by GitHub
parent ac2d4c4818
commit ae5fe8a728
25 changed files with 1098 additions and 886 deletions

View File

@@ -560,10 +560,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
insts.push(Inst::StoreP64 {
rt: fp_reg(),
rt2: link_reg(),
mem: PairAMode::PreIndexed(
writable_stack_reg(),
SImm7Scaled::maybe_from_i64(-16, types::I64).unwrap(),
),
mem: PairAMode::SPPreIndexed(SImm7Scaled::maybe_from_i64(-16, types::I64).unwrap()),
flags: MemFlags::trusted(),
});
@@ -601,10 +598,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
insts.push(Inst::LoadP64 {
rt: writable_fp_reg(),
rt2: writable_link_reg(),
mem: PairAMode::PostIndexed(
writable_stack_reg(),
SImm7Scaled::maybe_from_i64(16, types::I64).unwrap(),
),
mem: PairAMode::SPPostIndexed(SImm7Scaled::maybe_from_i64(16, types::I64).unwrap()),
flags: MemFlags::trusted(),
});
insts
@@ -676,10 +670,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
// str rd, [sp, #-16]!
insts.push(Inst::Store64 {
rd,
mem: AMode::PreIndexed(
writable_stack_reg(),
SImm9::maybe_from_i64(-clobber_offset_change).unwrap(),
),
mem: AMode::SPPreIndexed(SImm9::maybe_from_i64(-clobber_offset_change).unwrap()),
flags: MemFlags::trusted(),
});
@@ -708,8 +699,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
insts.push(Inst::StoreP64 {
rt,
rt2,
mem: PairAMode::PreIndexed(
writable_stack_reg(),
mem: PairAMode::SPPreIndexed(
SImm7Scaled::maybe_from_i64(-clobber_offset_change, types::I64).unwrap(),
),
flags: MemFlags::trusted(),
@@ -734,10 +724,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
let store_vec_reg = |rd| Inst::FpuStore64 {
rd,
mem: AMode::PreIndexed(
writable_stack_reg(),
SImm9::maybe_from_i64(-clobber_offset_change).unwrap(),
),
mem: AMode::SPPreIndexed(SImm9::maybe_from_i64(-clobber_offset_change).unwrap()),
flags: MemFlags::trusted(),
};
let iter = clobbered_vec.chunks_exact(2);
@@ -766,8 +753,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
Inst::FpuStoreP64 {
rt,
rt2,
mem: PairAMode::PreIndexed(
writable_stack_reg(),
mem: PairAMode::SPPreIndexed(
SImm7Scaled::maybe_from_i64(-clobber_offset_change, F64).unwrap(),
),
flags: MemFlags::trusted(),
@@ -831,16 +817,13 @@ impl ABIMachineSpec for AArch64MachineDeps {
let load_vec_reg = |rd| Inst::FpuLoad64 {
rd,
mem: AMode::PostIndexed(writable_stack_reg(), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::SPPostIndexed(SImm9::maybe_from_i64(16).unwrap()),
flags: MemFlags::trusted(),
};
let load_vec_reg_pair = |rt, rt2| Inst::FpuLoadP64 {
rt,
rt2,
mem: PairAMode::PostIndexed(
writable_stack_reg(),
SImm7Scaled::maybe_from_i64(16, F64).unwrap(),
),
mem: PairAMode::SPPostIndexed(SImm7Scaled::maybe_from_i64(16, F64).unwrap()),
flags: MemFlags::trusted(),
};
@@ -876,10 +859,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
insts.push(Inst::LoadP64 {
rt,
rt2,
mem: PairAMode::PostIndexed(
writable_stack_reg(),
SImm7Scaled::maybe_from_i64(16, I64).unwrap(),
),
mem: PairAMode::SPPostIndexed(SImm7Scaled::maybe_from_i64(16, I64).unwrap()),
flags: MemFlags::trusted(),
});
}
@@ -893,7 +873,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
// ldr rd, [sp], #16
insts.push(Inst::ULoad64 {
rd,
mem: AMode::PostIndexed(writable_stack_reg(), SImm9::maybe_from_i64(16).unwrap()),
mem: AMode::SPPostIndexed(SImm9::maybe_from_i64(16).unwrap()),
flags: MemFlags::trusted(),
});
}