Files
wasmtime/cranelift/filetests/filetests/isa/aarch64/amodes.clif
Trevor Elliott 2e9b0802ab aarch64: Rework amode compilation to produce SSA code (#5369)
Rework the compilation of amodes in the aarch64 backend to stop reusing registers and instead generate fresh virtual registers for intermediates. This resolves some SSA checker violations with the aarch64 backend, and as a nice side-effect removes some unnecessary movs in the generated code.
2022-12-02 01:23:15 +00:00

328 lines
5.1 KiB
Plaintext

test compile precise-output
set unwind_info=false
target aarch64
function %f5(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
v4 = load.i32 v3
return v4
}
; block0:
; ldr w0, [x0, w1, SXTW]
; ret
function %f6(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v2, v0
v4 = load.i32 v3
return v4
}
; block0:
; ldr w0, [x0, w1, SXTW]
; ret
function %f7(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = uextend.i64 v0
v3 = uextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; block0:
; mov w3, w0
; ldr w0, [x3, w1, UXTW]
; ret
function %f8(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iconst.i64 32
v4 = iadd.i64 v2, v3
v5 = iadd.i64 v4, v0
v6 = iadd.i64 v5, v5
v7 = load.i32 v6+4
return v7
}
; block0:
; add x3, x0, #68
; add x5, x3, x0
; add x7, x5, x1, SXTW
; ldr w0, [x7, w1, SXTW]
; ret
function %f9(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 48
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; block0:
; add x4, x0, x2
; add x6, x4, x1
; ldr w0, [x6, #48]
; ret
function %f10(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 4100
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; block0:
; movz x5, #4100
; add x5, x5, x1
; add x8, x5, x2
; ldr w0, [x8, x0]
; ret
function %f10() -> i32 {
block0:
v1 = iconst.i64 1234
v2 = load.i32 v1
return v2
}
; block0:
; movz x0, #1234
; ldr w0, [x0]
; ret
function %f11(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 8388608 ;; Imm12: 0x800 << 12
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; add x2, x0, #8388608
; ldr w0, [x2]
; ret
function %f12(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 -4
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; sub x2, x0, #4
; ldr w0, [x2]
; ret
function %f13(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 1000000000
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; movz w3, #51712
; movk w3, w3, #15258, LSL #16
; add x4, x3, x0
; ldr w0, [x4]
; ret
function %f14(i32) -> i32 {
block0(v0: i32):
v1 = sextend.i64 v0
v2 = load.i32 v1
return v2
}
; block0:
; sxtw x2, w0
; ldr w0, [x2]
; ret
function %f15(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sextend.i64 v0
v3 = sextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; block0:
; sxtw x3, w0
; ldr w0, [x3, w1, SXTW]
; ret
function %f18(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 -4098
v6 = uextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; movn w4, #4097
; ldrsh x0, [x4]
; ret
function %f19(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 4098
v6 = uextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; movz x4, #4098
; ldrsh x0, [x4]
; ret
function %f20(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 -4098
v6 = sextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; movn w4, #4097
; sxtw x6, w4
; ldrsh x0, [x6]
; ret
function %f21(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 4098
v6 = sextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; movz x4, #4098
; sxtw x6, w4
; ldrsh x0, [x6]
; ret
function %i128(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0
store.i128 v1, v0
return v1
}
; block0:
; mov x5, x0
; ldp x0, x1, [x5]
; stp x0, x1, [x5]
; ret
function %i128_imm_offset(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0+16
store.i128 v1, v0+16
return v1
}
; block0:
; mov x5, x0
; ldp x0, x1, [x5, #16]
; stp x0, x1, [x5, #16]
; ret
function %i128_imm_offset_large(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0+504
store.i128 v1, v0+504
return v1
}
; block0:
; mov x5, x0
; ldp x0, x1, [x5, #504]
; stp x0, x1, [x5, #504]
; ret
function %i128_imm_offset_negative_large(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0-512
store.i128 v1, v0-512
return v1
}
; block0:
; mov x5, x0
; ldp x0, x1, [x5, #-512]
; stp x0, x1, [x5, #-512]
; ret
function %i128_add_offset(i64) -> i128 {
block0(v0: i64):
v1 = iadd_imm v0, 32
v2 = load.i128 v1
store.i128 v2, v1
return v2
}
; block0:
; mov x5, x0
; ldp x0, x1, [x5, #32]
; stp x0, x1, [x5, #32]
; ret
function %i128_32bit_sextend_simple(i32) -> i128 {
block0(v0: i32):
v1 = sextend.i64 v0
v2 = load.i128 v1
store.i128 v2, v1
return v2
}
; block0:
; sxtw x3, w0
; mov x8, x0
; ldp x0, x1, [x3]
; sxtw x4, w8
; stp x0, x1, [x4]
; ret
function %i128_32bit_sextend(i64, i32) -> i128 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
v4 = iadd_imm.i64 v3, 24
v5 = load.i128 v4
store.i128 v5, v4
return v5
}
; block0:
; add x4, x0, x1, SXTW
; mov x11, x0
; mov x9, x1
; ldp x0, x1, [x4, #24]
; add x5, x11, x9, SXTW
; stp x0, x1, [x5, #24]
; ret