machinst x64: use the (base,offset) addressing mode even in the presence of a uextend;
This commit is contained in:
@@ -449,6 +449,7 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i
|
||||
// We now either have an add that we must materialize, or some other input; as well as the
|
||||
// final offset.
|
||||
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
|
||||
debug_assert_eq!(ctx.output_ty(add, 0), types::I64);
|
||||
let add_inputs = &[
|
||||
InsnInput {
|
||||
insn: add,
|
||||
@@ -480,7 +481,33 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i
|
||||
)
|
||||
} else {
|
||||
for i in 0..=1 {
|
||||
if let Some(cst) = ctx.get_input(add, i).constant {
|
||||
let input = ctx.get_input(add, i);
|
||||
|
||||
// Try to pierce through uextend.
|
||||
if let Some(uextend) = matches_input(
|
||||
ctx,
|
||||
InsnInput {
|
||||
insn: add,
|
||||
input: i,
|
||||
},
|
||||
Opcode::Uextend,
|
||||
) {
|
||||
if let Some(cst) = ctx.get_input(uextend, 0).constant {
|
||||
// Zero the upper bits.
|
||||
let input_size = ctx.input_ty(uextend, 0).bits() as u64;
|
||||
let shift: u64 = 64 - input_size;
|
||||
let uext_cst: u64 = (cst << shift) >> shift;
|
||||
|
||||
let final_offset = (offset as i64).wrapping_add(uext_cst as i64);
|
||||
if low32_will_sign_extend_to_64(final_offset as u64) {
|
||||
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
|
||||
return Amode::imm_reg(final_offset as u32, base);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's a constant, add it directly!
|
||||
if let Some(cst) = input.constant {
|
||||
let final_offset = (offset as i64).wrapping_add(cst as i64);
|
||||
if low32_will_sign_extend_to_64(final_offset as u64) {
|
||||
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
|
||||
|
||||
41
cranelift/filetests/filetests/isa/x64/amode-opt.clif
Normal file
41
cranelift/filetests/filetests/isa/x64/amode-opt.clif
Normal file
@@ -0,0 +1,41 @@
|
||||
test compile
|
||||
target x86_64
|
||||
feature "experimental_x64"
|
||||
|
||||
function %amode_add(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = iadd v0, v1
|
||||
v3 = load.i64 v2
|
||||
return v3
|
||||
; check: movq 0(%rdi,%rsi,1), %r12
|
||||
}
|
||||
|
||||
function %amode_add_imm(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
v1 = iconst.i64 42
|
||||
v2 = iadd v0, v1
|
||||
v3 = load.i64 v2
|
||||
return v3
|
||||
; check: movq 42(%rdi), %r12
|
||||
}
|
||||
|
||||
;; Same as above, but add operands have been reversed.
|
||||
function %amode_add_imm_order(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
v1 = iconst.i64 42
|
||||
v2 = iadd v1, v0
|
||||
v3 = load.i64 v2
|
||||
return v3
|
||||
; check: movq 42(%rdi), %r12
|
||||
}
|
||||
|
||||
;; Make sure that uextend(cst) are ignored when the cst will naturally sign-extend.
|
||||
function %amode_add_uext_imm(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
v1 = iconst.i32 42
|
||||
v2 = uextend.i64 v1
|
||||
v3 = iadd v2, v0
|
||||
v4 = load.i64 v3
|
||||
return v4
|
||||
; check: movq 42(%rdi), %r12
|
||||
}
|
||||
Reference in New Issue
Block a user