add riscv64 backend for cranelift. (#4271)

Add a RISC-V 64 (`riscv64`, RV64GC) backend.

Co-authored-by: yuyang <756445638@qq.com>
Co-authored-by: Chris Fallin <chris@cfallin.org>
Co-authored-by: Afonso Bordado <afonsobordado@az8.co>
This commit is contained in:
yuyang-ok
2022-09-28 08:30:31 +08:00
committed by GitHub
parent 9715d91c50
commit cdecc858b4
182 changed files with 21024 additions and 36 deletions

View File

@@ -0,0 +1,365 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f5(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
v4 = load.i32 v3
return v4
}
; block0:
; sext.w a3,a1
; add a3,a0,a3
; lw a0,0(a3)
; ret
function %f6(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v2, v0
v4 = load.i32 v3
return v4
}
; block0:
; sext.w a3,a1
; add a3,a3,a0
; lw a0,0(a3)
; ret
function %f7(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = uextend.i64 v0
v3 = uextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; block0:
; uext.w a4,a0
; uext.w a5,a1
; add a4,a4,a5
; lw a0,0(a4)
; ret
function %f8(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iconst.i64 32
v4 = iadd.i64 v2, v3
v5 = iadd.i64 v4, v0
v6 = iadd.i64 v5, v5
v7 = load.i32 v6+4
return v7
}
; block0:
; sext.w a5,a1
; addi a5,a5,32
; add a5,a5,a0
; add a5,a5,a5
; lw a0,4(a5)
; ret
function %f9(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 48
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; block0:
; add a5,a0,a1
; add a5,a5,a2
; addi a5,a5,48
; lw a0,0(a5)
; ret
function %f10(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 4100
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; block0:
; add a6,a0,a1
; add a6,a6,a2
; lui a5,1
; addi a5,a5,4
; add t3,a6,a5
; lw a0,0(t3)
; ret
function %f10() -> i32 {
block0:
v1 = iconst.i64 1234
v2 = load.i32 v1
return v2
}
; block0:
; li t1,1234
; lw a0,0(t1)
; ret
function %f11(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 8388608 ;; Imm12: 0x800 << 12
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; lui a1,2048
; add a3,a0,a1
; lw a0,0(a3)
; ret
function %f12(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 -4
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; addi a1,a0,-4
; lw a0,0(a1)
; ret
function %f13(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 1000000000
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; block0:
; lui a1,244141
; addi a1,a1,2560
; add a4,a0,a1
; lw a0,0(a4)
; ret
function %f14(i32) -> i32 {
block0(v0: i32):
v1 = sextend.i64 v0
v2 = load.i32 v1
return v2
}
; block0:
; sext.w a1,a0
; lw a0,0(a1)
; ret
function %f15(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sextend.i64 v0
v3 = sextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; block0:
; sext.w a4,a0
; sext.w a5,a1
; add a4,a4,a5
; lw a0,0(a4)
; ret
function %f18(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 -4098
v6 = uextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; lui a3,1048575
; addi a3,a3,4094
; uext.w a6,a3
; lh a0,0(a6)
; ret
function %f19(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 4098
v6 = uextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; lui a3,1
; addi a3,a3,2
; uext.w a6,a3
; lh a0,0(a6)
; ret
function %f20(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 -4098
v6 = sextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; lui a3,1048575
; addi a3,a3,4094
; sext.w a6,a3
; lh a0,0(a6)
; ret
function %f21(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i32 4098
v6 = sextend.i64 v3
v5 = sload16.i32 v6+0
return v5
}
; block0:
; lui a3,1
; addi a3,a3,2
; sext.w a6,a3
; lh a0,0(a6)
; ret
function %i128(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0
store.i128 v1, v0
return v1
}
; block0:
; ld a1,0(a0)
; mv a3,a1
; ld a1,8(a0)
; mv a5,a3
; sd a5,0(a0)
; sd a1,8(a0)
; mv a0,a3
; ret
function %i128_imm_offset(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0+16
store.i128 v1, v0+16
return v1
}
; block0:
; ld a1,16(a0)
; mv a3,a1
; ld a1,24(a0)
; mv a5,a3
; sd a5,16(a0)
; sd a1,24(a0)
; mv a0,a3
; ret
function %i128_imm_offset_large(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0+504
store.i128 v1, v0+504
return v1
}
; block0:
; ld a1,504(a0)
; mv a3,a1
; ld a1,512(a0)
; mv a5,a3
; sd a5,504(a0)
; sd a1,512(a0)
; mv a0,a3
; ret
function %i128_imm_offset_negative_large(i64) -> i128 {
block0(v0: i64):
v1 = load.i128 v0-512
store.i128 v1, v0-512
return v1
}
; block0:
; ld a1,-512(a0)
; mv a3,a1
; ld a1,-504(a0)
; mv a5,a3
; sd a5,-512(a0)
; sd a1,-504(a0)
; mv a0,a3
; ret
function %i128_add_offset(i64) -> i128 {
block0(v0: i64):
v1 = iadd_imm v0, 32
v2 = load.i128 v1
store.i128 v2, v1
return v2
}
; block0:
; addi a3,a0,32
; ld a0,0(a3)
; ld a1,8(a3)
; sd a0,0(a3)
; sd a1,8(a3)
; ret
function %i128_32bit_sextend_simple(i32) -> i128 {
block0(v0: i32):
v1 = sextend.i64 v0
v2 = load.i128 v1
store.i128 v2, v1
return v2
}
; block0:
; sext.w a3,a0
; ld a0,0(a3)
; ld a1,8(a3)
; sd a0,0(a3)
; sd a1,8(a3)
; ret
function %i128_32bit_sextend(i64, i32) -> i128 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
v4 = iadd_imm.i64 v3, 24
v5 = load.i128 v4
store.i128 v5, v4
return v5
}
; block0:
; sext.w a6,a1
; add a6,a0,a6
; addi a6,a6,24
; ld a0,0(a6)
; ld a1,8(a6)
; sd a0,0(a6)
; sd a1,8(a6)
; ret

View File

@@ -0,0 +1,509 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f1(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iadd.i64 v0, v1
return v2
}
; block0:
; add a0,a0,a1
; ret
function %f2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = isub.i64 v0, v1
return v2
}
; block0:
; sub a0,a0,a1
; ret
function %f3(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = imul.i64 v0, v1
return v2
}
; block0:
; mul a0,a0,a1
; ret
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = umulhi.i64 v0, v1
return v2
}
; block0:
; mulhu a0,a0,a1
; ret
function %f5(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = smulhi.i64 v0, v1
return v2
}
; block0:
; mulh a0,a0,a1
; ret
function %f6(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sdiv.i64 v0, v1
return v2
}
; block0:
; li a2,-1
; li a3,1
; slli a5,a3,63
; eq a7,a2,a1##ty=i64
; eq t4,a5,a0##ty=i64
; and t1,a7,t4
; trap_if t1,int_ovf
; trap_ifc int_divz##(zero eq a1)
; div a0,a0,a1
; ret
function %f7(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = sdiv.i64 v0, v1
return v2
}
; block0:
; li a1,2
; li a2,-1
; li a4,1
; slli a6,a4,63
; eq t3,a2,a1##ty=i64
; eq t0,a6,a0##ty=i64
; and t2,t3,t0
; trap_if t2,int_ovf
; li a2,2
; trap_ifc int_divz##(zero eq a2)
; li a5,2
; div a0,a0,a5
; ret
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = udiv.i64 v0, v1
return v2
}
; block0:
; trap_ifc int_divz##(zero eq a1)
; divu a0,a0,a1
; ret
function %f9(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = udiv.i64 v0, v1
return v2
}
; block0:
; li a1,2
; trap_ifc int_divz##(zero eq a1)
; li a3,2
; divu a0,a0,a3
; ret
function %f10(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = srem.i64 v0, v1
return v2
}
; block0:
; trap_ifc int_divz##(zero eq a1)
; rem a0,a0,a1
; ret
function %f11(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = urem.i64 v0, v1
return v2
}
; block0:
; trap_ifc int_divz##(zero eq a1)
; remu a0,a0,a1
; ret
function %f12(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sdiv.i32 v0, v1
return v2
}
; block0:
; sext.w a2,a0
; sext.w a3,a1
; li a5,-1
; li a7,1
; slli t4,a7,63
; slli t1,a2,32
; eq a0,a5,a3##ty=i32
; eq a4,t4,t1##ty=i32
; and a4,a0,a4
; trap_if a4,int_ovf
; trap_ifc int_divz##(zero eq a3)
; divw a0,a2,a3
; ret
function %f13(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 2
v2 = sdiv.i32 v0, v1
return v2
}
; block0:
; sext.w a0,a0
; li a2,2
; sext.w a4,a2
; li a6,-1
; li t3,1
; slli t0,t3,63
; slli t2,a0,32
; eq a1,a6,a4##ty=i32
; eq a3,t0,t2##ty=i32
; and a5,a1,a3
; trap_if a5,int_ovf
; trap_ifc int_divz##(zero eq a4)
; divw a0,a0,a4
; ret
function %f14(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = udiv.i32 v0, v1
return v2
}
; block0:
; uext.w a1,a1
; trap_ifc int_divz##(zero eq a1)
; uext.w a4,a0
; divuw a0,a4,a1
; ret
function %f15(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 2
v2 = udiv.i32 v0, v1
return v2
}
; block0:
; li a1,2
; uext.w a2,a1
; trap_ifc int_divz##(zero eq a2)
; uext.w a5,a0
; divuw a0,a5,a2
; ret
function %f16(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = srem.i32 v0, v1
return v2
}
; block0:
; sext.w a1,a1
; trap_ifc int_divz##(zero eq a1)
; remw a0,a0,a1
; ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = urem.i32 v0, v1
return v2
}
; block0:
; uext.w a1,a1
; trap_ifc int_divz##(zero eq a1)
; remuw a0,a0,a1
; ret
function %f18(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band.i64 v0, v1
return v2
}
; block0:
; and a0,a0,a1
; ret
function %f19(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor.i64 v0, v1
return v2
}
; block0:
; or a0,a0,a1
; ret
function %f20(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor.i64 v0, v1
return v2
}
; block0:
; xor a0,a0,a1
; ret
function %f21(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band_not.i64 v0, v1
return v2
}
; block0:
; not a1,a1
; and a0,a0,a1
; ret
function %f22(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor_not.i64 v0, v1
return v2
}
; block0:
; not a1,a1
; or a0,a0,a1
; ret
function %f23(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor_not.i64 v0, v1
return v2
}
; block0:
; not a1,a1
; xor a0,a0,a1
; ret
function %f24(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bnot.i64 v0
return v2
}
; block0:
; not a0,a0
; ret
function %f25(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iconst.i32 53
v3 = ishl.i32 v0, v2
v4 = isub.i32 v1, v3
return v4
}
; block0:
; slliw a2,a0,53
; subw a0,a1,a2
; ret
function %f26(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 -1
v2 = iadd.i32 v0, v1
return v2
}
; block0:
; li a1,-1
; addw a0,a0,a1
; ret
function %f27(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 -1
v2 = isub.i32 v0, v1
return v2
}
; block0:
; li a1,-1
; subw a0,a0,a1
; ret
function %f28(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 -1
v2 = isub.i64 v0, v1
return v2
}
; block0:
; li a1,-1
; sub a0,a0,a1
; ret
function %f29(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 1
v2 = ineg v1
return v2
}
; block0:
; li a0,1
; sub a0,zero,a0
; ret
function %add_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = iadd v0, v1
return v2
}
; block0:
; add a0,a0,a2
; sltu a6,a0,a2
; add t3,a1,a3
; add a1,t3,a6
; ret
function %sub_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = isub v0, v1
return v2
}
; block0:
; sub a4,a0,a2
; mv t4,a4
; sltu a6,a0,t4
; sub t3,a1,a3
; sub a1,t3,a6
; mv a0,a4
; ret
function %add_mul_2(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = imul v1, v2
v4 = iadd v3, v0
return v4
}
; block0:
; mulw a3,a1,a2
; addw a0,a3,a0
; ret
function %msub_i32(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = imul v1, v2
v4 = isub v0, v3
return v4
}
; block0:
; mulw a3,a1,a2
; subw a0,a0,a3
; ret
function %msub_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = imul v1, v2
v4 = isub v0, v3
return v4
}
; block0:
; mul a3,a1,a2
; sub a0,a0,a3
; ret
function %imul_sub_i32(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = imul v1, v2
v4 = isub v3, v0
return v4
}
; block0:
; mulw a3,a1,a2
; subw a0,a3,a0
; ret
function %imul_sub_i64(i64, i64, i64) -> i64 {
block0(v0: i64, v1: i64, v2: i64):
v3 = imul v1, v2
v4 = isub v3, v0
return v4
}
; block0:
; mul a3,a1,a2
; sub a0,a3,a0
; ret
function %srem_const (i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = srem.i64 v0, v1
return v2
}
; block0:
; li a1,2
; trap_ifc int_divz##(zero eq a1)
; li a3,2
; rem a0,a0,a3
; ret
function %urem_const (i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = urem.i64 v0, v1
return v2
}
; block0:
; li a1,2
; trap_ifc int_divz##(zero eq a1)
; li a3,2
; remu a0,a0,a3
; ret
function %sdiv_minus_one(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 -1
v2 = sdiv.i64 v0, v1
return v2
}
; block0:
; li a1,-1
; li a2,-1
; li a4,1
; slli a6,a4,63
; eq t3,a2,a1##ty=i64
; eq t0,a6,a0##ty=i64
; and t2,t3,t0
; trap_if t2,int_ovf
; li a2,-1
; trap_ifc int_divz##(zero eq a2)
; li a5,-1
; div a0,a0,a5
; ret

View File

@@ -0,0 +1,210 @@
test compile precise-output
set unwind_info=false
target riscv64
function %atomic_rmw_add_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 add v0, v1
return
}
; block0:
; amoadd.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_add_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 add v0, v1
return
}
; block0:
; amoadd.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_sub_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 sub v0, v1
return
}
; block0:
; sub a1,zero,a1
; amoadd.d.aqrl a2,a1,(a0)
; ret
function %atomic_rmw_sub_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 sub v0, v1
return
}
; block0:
; sub a1,zero,a1
; amoadd.w.aqrl a2,a1,(a0)
; ret
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 and v0, v1
return
}
; block0:
; amoand.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_and_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 and v0, v1
return
}
; block0:
; amoand.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_nand_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 nand v0, v1
return
}
; block0:
; mv a3,a0
; mv a2,a1
; atomic_rmw.i64 nand a0,a2,(a3)##t0=a1 offset=zero
; ret
function %atomic_rmw_nand_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 nand v0, v1
return
}
; block0:
; mv a3,a0
; mv a2,a1
; atomic_rmw.i32 nand a0,a2,(a3)##t0=a1 offset=zero
; ret
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 or v0, v1
return
}
; block0:
; amoor.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_or_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 or v0, v1
return
}
; block0:
; amoor.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 xor v0, v1
return
}
; block0:
; amoxor.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_xor_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 xor v0, v1
return
}
; block0:
; amoxor.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smax v0, v1
return
}
; block0:
; amomax.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_smax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smax v0, v1
return
}
; block0:
; amomax.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umax v0, v1
return
}
; block0:
; amomaxu.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_umax_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umax v0, v1
return
}
; block0:
; amomaxu.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smin v0, v1
return
}
; block0:
; amomin.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_smin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 smin v0, v1
return
}
; block0:
; amomin.w.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umin v0, v1
return
}
; block0:
; amominu.d.aqrl a0,a1,(a0)
; ret
function %atomic_rmw_umin_i32(i64, i32) {
block0(v0: i64, v1: i32):
v2 = atomic_rmw.i32 umin v0, v1
return
}
; block0:
; amominu.w.aqrl a0,a1,(a0)
; ret

View File

@@ -0,0 +1,36 @@
test compile precise-output
set unwind_info=false
target riscv64
function %atomic_load_i64(i64) -> i64 {
block0(v0: i64):
v1 = atomic_load.i64 v0
return v1
}
; block0:
; atomic_load.i64 a0,(a0)
; ret
function %atomic_load_i32(i64) -> i32 {
block0(v0: i64):
v1 = atomic_load.i32 v0
return v1
}
; block0:
; atomic_load.i32 a0,(a0)
; ret
function %atomic_load_i32_i64(i64) -> i64 {
block0(v0: i64):
v1 = atomic_load.i32 v0
v2 = uextend.i64 v1
return v2
}
; block0:
; atomic_load.i32 a1,(a0)
; uext.w a0,a1
; ret

View File

@@ -0,0 +1,76 @@
test compile precise-output
set unwind_info=false
target riscv64
function %atomic_store_i64(i64, i64) {
block0(v0: i64, v1: i64):
atomic_store.i64 v0, v1
return
}
; block0:
; atomic_store.i64 a0,(a1)
; ret
function %atomic_store_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
atomic_store.i64 v0, v1
return
}
; block0:
; load_sym t2,%sym+0
; atomic_store.i64 a0,(t2)
; ret
function %atomic_store_imm_i64(i64) {
block0(v0: i64):
v1 = iconst.i64 12345
atomic_store.i64 v1, v0
return
}
; block0:
; lui t2,3
; addi t2,t2,57
; atomic_store.i64 t2,(a0)
; ret
function %atomic_store_i32(i32, i64) {
block0(v0: i32, v1: i64):
atomic_store.i32 v0, v1
return
}
; block0:
; atomic_store.i32 a0,(a1)
; ret
function %atomic_store_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
atomic_store.i32 v0, v1
return
}
; block0:
; load_sym t2,%sym+0
; atomic_store.i32 a0,(t2)
; ret
function %atomic_store_imm_i32(i64) {
block0(v0: i64):
v1 = iconst.i32 12345
atomic_store.i32 v1, v0
return
}
; block0:
; lui t2,3
; addi t2,t2,57
; atomic_store.i32 t2,(a0)
; ret

View File

@@ -0,0 +1,929 @@
test compile precise-output
set unwind_info=false
target riscv64
function %a(i8) -> i8 {
block0(v0: i8):
v1 = bitrev v0
return v1
}
; block0:
; brev8 a4,a0##tmp=a3 tmp2=a1 step=a2 ty=i8
; mv a0,a4
; ret
function %a(i16) -> i16 {
block0(v0: i16):
v1 = bitrev v0
return v1
}
; block0:
; mv t3,a0
; brev8 a3,t3##tmp=a0 tmp2=a1 step=a2 ty=i16
; rev8 a5,a3##step=a7 tmp=a6
; srli a0,a5,48
; ret
function %a(i32) -> i32 {
block0(v0: i32):
v1 = bitrev v0
return v1
}
; block0:
; mv t3,a0
; brev8 a3,t3##tmp=a0 tmp2=a1 step=a2 ty=i32
; rev8 a5,a3##step=a7 tmp=a6
; srli a0,a5,32
; ret
function %a(i64) -> i64 {
block0(v0: i64):
v1 = bitrev v0
return v1
}
; block0:
; rev8 a3,a0##step=a2 tmp=a1
; brev8 a0,a3##tmp=a4 tmp2=a5 step=a6 ty=i64
; ret
function %a(i128) -> i128 {
block0(v0: i128):
v1 = bitrev v0
return v1
}
; block0:
; rev8 a2,a0##step=a4 tmp=a3
; brev8 t4,a2##tmp=a6 tmp2=a7 step=t3 ty=i64
; rev8 t1,a1##step=a0 tmp=t2
; brev8 a0,t1##tmp=a2 tmp2=a3 step=a4 ty=i64
; mv a1,t4
; ret
function %b(i8) -> i8 {
block0(v0: i8):
v1 = clz v0
return v1
}
; block0:
; clz a3,a0##ty=i8 tmp=a2 step=a1
; mv a0,a3
; ret
function %b(i16) -> i16 {
block0(v0: i16):
v1 = clz v0
return v1
}
; block0:
; clz a3,a0##ty=i16 tmp=a2 step=a1
; mv a0,a3
; ret
function %b(i32) -> i32 {
block0(v0: i32):
v1 = clz v0
return v1
}
; block0:
; clz a3,a0##ty=i32 tmp=a2 step=a1
; mv a0,a3
; ret
function %b(i64) -> i64 {
block0(v0: i64):
v1 = clz v0
return v1
}
; block0:
; clz a3,a0##ty=i64 tmp=a2 step=a1
; mv a0,a3
; ret
function %b(i128) -> i128 {
block0(v0: i128):
v1 = clz v0
return v1
}
; block0:
; clz a4,a1##ty=i64 tmp=a2 step=a3
; clz t3,a0##ty=i64 tmp=a6 step=a7
; li t0,64
; select_reg t2,t3,zero##condition=(t0 eq a4)
; add a0,a4,t2
; mv a1,zero
; ret
function %c(i8) -> i8 {
block0(v0: i8):
v1 = cls v0
return v1
}
; block0:
; sext.b a1,a0
; not a2,a0
; select_reg a4,a2,a0##condition=(a1 slt zero)
; clz t3,a4##ty=i8 tmp=a6 step=a7
; addi a0,t3,-1
; ret
function %c(i16) -> i16 {
block0(v0: i16):
v1 = cls v0
return v1
}
; block0:
; sext.h a1,a0
; not a2,a0
; select_reg a4,a2,a0##condition=(a1 slt zero)
; clz t3,a4##ty=i16 tmp=a6 step=a7
; addi a0,t3,-1
; ret
function %c(i32) -> i32 {
block0(v0: i32):
v1 = cls v0
return v1
}
; block0:
; sext.w a1,a0
; not a2,a0
; select_reg a4,a2,a0##condition=(a1 slt zero)
; clz t3,a4##ty=i32 tmp=a6 step=a7
; addi a0,t3,-1
; ret
function %c(i64) -> i64 {
block0(v0: i64):
v1 = cls v0
return v1
}
; block0:
; not a1,a0
; select_reg a2,a1,a0##condition=(a0 slt zero)
; clz a6,a2##ty=i64 tmp=a4 step=a5
; addi a0,a6,-1
; ret
function %c(i128) -> i128 {
block0(v0: i128):
v1 = cls v0
return v1
}
; block0:
; not a2,a0
; select_reg a4,a2,a0##condition=(a1 slt zero)
; not a6,a1
; select_reg t3,a6,a1##condition=(a1 slt zero)
; clz t2,t3##ty=i64 tmp=t0 step=t1
; clz a3,a4##ty=i64 tmp=a1 step=a2
; li a5,64
; select_reg a7,a3,zero##condition=(a5 eq t2)
; add t4,t2,a7
; addi a0,t4,-1
; mv a1,zero
; ret
function %d(i8) -> i8 {
block0(v0: i8):
v1 = ctz v0
return v1
}
; block0:
; ctz a3,a0##ty=i8 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i16) -> i16 {
block0(v0: i16):
v1 = ctz v0
return v1
}
; block0:
; ctz a3,a0##ty=i16 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = ctz v0
return v1
}
; block0:
; ctz a3,a0##ty=i32 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = ctz v0
return v1
}
; block0:
; ctz a3,a0##ty=i64 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i128) -> i128 {
block0(v0: i128):
v1 = ctz v0
return v1
}
; block0:
; ctz a4,a0##ty=i64 tmp=a2 step=a3
; ctz t3,a1##ty=i64 tmp=a6 step=a7
; li t0,64
; select_reg t2,t3,zero##condition=(t0 eq a4)
; add a0,a4,t2
; mv a1,zero
; ret
function %d(i128) -> i128 {
block0(v0: i128):
v1 = popcnt v0
return v1
}
; block0:
; popcnt a4,a0##ty=i64 tmp=a2 step=a3
; popcnt t3,a1##ty=i64 tmp=a6 step=a7
; add a0,a4,t3
; mv a1,zero
; ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = popcnt v0
return v1
}
; block0:
; popcnt a3,a0##ty=i64 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = popcnt v0
return v1
}
; block0:
; popcnt a3,a0##ty=i32 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i16) -> i16 {
block0(v0: i16):
v1 = popcnt v0
return v1
}
; block0:
; popcnt a3,a0##ty=i16 tmp=a2 step=a1
; mv a0,a3
; ret
function %d(i8) -> i8 {
block0(v0: i8):
v1 = popcnt v0
return v1
}
; block0:
; popcnt a3,a0##ty=i8 tmp=a2 step=a1
; mv a0,a3
; ret
function %bextend_b8() -> b32 {
block0:
v1 = bconst.b8 true
v2 = bextend.b32 v1
return v2
}
; block0:
; li a0,-1
; ret
function %bextend_b1() -> b32 {
block0:
v1 = bconst.b1 true
v2 = bextend.b32 v1
return v2
}
; block0:
; li a0,-1
; ret
function %bnot_i32(i32) -> i32 {
block0(v0: i32):
v1 = bnot v0
return v1
}
; block0:
; not a0,a0
; ret
function %bnot_i64(i64) -> i64 {
block0(v0: i64):
v1 = bnot v0
return v1
}
; block0:
; not a0,a0
; ret
function %bnot_i64_with_shift(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = ishl.i64 v0, v1
v3 = bnot v2
return v3
}
; block0:
; slli a1,a0,3
; not a0,a1
; ret
function %bnot_i128(i128) -> i128 {
block0(v0: i128):
v1 = bnot v0
return v1
}
; block0:
; not a0,a0
; not a1,a1
; ret
function %band_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band v0, v1
return v2
}
; block0:
; and a0,a0,a1
; ret
function %band_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band v0, v1
return v2
}
; block0:
; and a0,a0,a1
; ret
function %band_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = band v0, v1
return v2
}
; block0:
; and a0,a0,a2
; and a1,a1,a3
; ret
function %band_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = band v0, v1
return v2
}
; block0:
; andi a0,a0,3
; ret
function %band_i64_constant2(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = band v1, v0
return v2
}
; block0:
; andi a0,a0,3
; ret
function %band_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = band v0, v3
return v4
}
; block0:
; slli a2,a1,3
; and a0,a0,a2
; ret
function %band_i64_constant_shift2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = band v3, v0
return v4
}
; block0:
; slli a2,a1,3
; and a0,a2,a0
; ret
function %bor_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor v0, v1
return v2
}
; block0:
; or a0,a0,a1
; ret
function %bor_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor v0, v1
return v2
}
; block0:
; or a0,a0,a1
; ret
function %bor_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = bor v0, v1
return v2
}
; block0:
; or a0,a0,a2
; or a1,a1,a3
; ret
function %bor_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = bor v0, v1
return v2
}
; block0:
; ori a0,a0,3
; ret
function %bor_i64_constant2(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = bor v1, v0
return v2
}
; block0:
; ori a0,a0,3
; ret
function %bor_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = bor v0, v3
return v4
}
; block0:
; slli a2,a1,3
; or a0,a0,a2
; ret
function %bor_i64_constant_shift2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = bor v3, v0
return v4
}
; block0:
; slli a2,a1,3
; or a0,a2,a0
; ret
function %bxor_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bxor v0, v1
return v2
}
; block0:
; xor a0,a0,a1
; ret
function %bxor_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor v0, v1
return v2
}
; block0:
; xor a0,a0,a1
; ret
function %bxor_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = bxor v0, v1
return v2
}
; block0:
; xor a0,a0,a2
; xor a1,a1,a3
; ret
function %bxor_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = bxor v0, v1
return v2
}
; block0:
; xori a0,a0,3
; ret
function %bxor_i64_constant2(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = bxor v1, v0
return v2
}
; block0:
; xori a0,a0,3
; ret
function %bxor_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = bxor v0, v3
return v4
}
; block0:
; slli a2,a1,3
; xor a0,a0,a2
; ret
function %bxor_i64_constant_shift2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 3
v3 = ishl.i64 v1, v2
v4 = bxor v3, v0
return v4
}
; block0:
; slli a2,a1,3
; xor a0,a2,a0
; ret
function %band_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band_not v0, v1
return v2
}
; block0:
; not a1,a1
; and a0,a0,a1
; ret
function %band_not_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band_not v0, v1
return v2
}
; block0:
; not a1,a1
; and a0,a0,a1
; ret
function %band_not_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = band_not v0, v1
return v2
}
; block0:
; not a4,a2
; and a0,a0,a4
; not t3,a3
; and a1,a1,t3
; ret
function %band_not_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 4
v2 = band_not v0, v1
return v2
}
; block0:
; li a1,4
; not a2,a1
; and a0,a0,a2
; ret
function %band_not_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 4
v3 = ishl.i64 v1, v2
v4 = band_not v0, v3
return v4
}
; block0:
; slli a3,a1,4
; not a2,a3
; and a0,a0,a2
; ret
function %bor_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor_not v0, v1
return v2
}
; block0:
; not a1,a1
; or a0,a0,a1
; ret
function %bor_not_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor_not v0, v1
return v2
}
; block0:
; not a1,a1
; or a0,a0,a1
; ret
function %bor_not_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = bor_not v0, v1
return v2
}
; block0:
; not a4,a2
; or a0,a0,a4
; not t3,a3
; or a1,a1,t3
; ret
function %bor_not_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 4
v2 = bor_not v0, v1
return v2
}
; block0:
; li a1,4
; not a2,a1
; or a0,a0,a2
; ret
function %bor_not_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 4
v3 = ishl.i64 v1, v2
v4 = bor_not v0, v3
return v4
}
; block0:
; slli a3,a1,4
; not a2,a3
; or a0,a0,a2
; ret
function %bxor_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bxor_not v0, v1
return v2
}
; block0:
; not a1,a1
; xor a0,a0,a1
; ret
function %bxor_not_i64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor_not v0, v1
return v2
}
; block0:
; not a1,a1
; xor a0,a0,a1
; ret
function %bxor_not_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = bxor_not v0, v1
return v2
}
; block0:
; not a4,a2
; xor a0,a0,a4
; not t3,a3
; xor a1,a1,t3
; ret
function %bxor_not_i64_constant(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 4
v2 = bxor_not v0, v1
return v2
}
; block0:
; li a1,4
; not a2,a1
; xor a0,a0,a2
; ret
function %bxor_not_i64_constant_shift(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconst.i64 4
v3 = ishl.i64 v1, v2
v4 = bxor_not v0, v3
return v4
}
; block0:
; slli a3,a1,4
; not a2,a3
; xor a0,a0,a2
; ret
function %ishl_i128_i8(i128, i8) -> i128 {
block0(v0: i128, v1: i8):
v2 = ishl.i128 v0, v1
return v2
}
; block0:
; andi a3,a2,127
; li a5,128
; sub a5,a5,a3
; sll t3,a0,a3
; srl t0,a0,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; sll a1,a1,a3
; or a4,t2,a1
; li a5,64
; select_reg a0,zero,t3##condition=(a3 uge a5)
; select_reg a1,t3,a4##condition=(a3 uge a5)
; ret
function %ishl_i128_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = ishl.i128 v0, v1
return v2
}
; block0:
; andi a4,a2,127
; li a6,128
; sub a6,a6,a4
; sll t4,a0,a4
; srl t1,a0,a6
; select_reg a0,zero,t1##condition=(a4 eq zero)
; sll a2,a1,a4
; or a5,a0,a2
; li a6,64
; select_reg a0,zero,t4##condition=(a4 uge a6)
; select_reg a1,t4,a5##condition=(a4 uge a6)
; ret
function %ushr_i128_i8(i128, i8) -> i128 {
block0(v0: i128, v1: i8):
v2 = ushr.i128 v0, v1
return v2
}
; block0:
; andi a3,a2,127
; li a5,128
; sub a5,a5,a3
; sll t3,a1,a5
; select_reg t0,zero,t3##condition=(a3 eq zero)
; srl t2,a0,a3
; or a2,t0,t2
; li a4,64
; srl a5,a1,a3
; select_reg a0,a5,a2##condition=(a3 uge a4)
; select_reg a1,zero,a5##condition=(a3 uge a4)
; ret
function %ushr_i128_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = ushr.i128 v0, v1
return v2
}
; block0:
; andi a4,a2,127
; li a6,128
; sub a6,a6,a4
; sll t4,a1,a6
; select_reg t1,zero,t4##condition=(a4 eq zero)
; srl a0,a0,a4
; or a2,t1,a0
; li a5,64
; srl a6,a1,a4
; select_reg a0,a6,a2##condition=(a4 uge a5)
; select_reg a1,zero,a6##condition=(a4 uge a5)
; ret
function %sshr_i128_i8(i128, i8) -> i128 {
block0(v0: i128, v1: i8):
v2 = sshr.i128 v0, v1
return v2
}
; block0:
; andi a3,a2,127
; li a5,128
; sub a5,a5,a3
; sll t3,a1,a5
; select_reg t0,zero,t3##condition=(a3 eq zero)
; srl t2,a0,a3
; or a2,t0,t2
; li a4,64
; sra a5,a1,a3
; li a7,-1
; select_reg t4,a7,zero##condition=(a1 slt zero)
; select_reg a0,a5,a2##condition=(a3 uge a4)
; select_reg a1,t4,a5##condition=(a3 uge a4)
; ret
function %sshr_i128_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = sshr.i128 v0, v1
return v2
}
; block0:
; andi a4,a2,127
; li a6,128
; sub a6,a6,a4
; sll t4,a1,a6
; select_reg t1,zero,t4##condition=(a4 eq zero)
; srl a0,a0,a4
; or a2,t1,a0
; li a5,64
; sra a6,a1,a4
; li t3,-1
; select_reg t0,t3,zero##condition=(a1 slt zero)
; select_reg a0,a6,a2##condition=(a4 uge a5)
; select_reg a1,t0,a6##condition=(a4 uge a5)
; ret

View File

@@ -0,0 +1,22 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(i64, i64) -> i64 {
sig0 = (i64) -> i64
block0(v0: i64, v1: i64):
v2 = call_indirect.i64 sig0, v1(v0)
return v2
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; callind a1
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret

View File

@@ -0,0 +1,424 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f1(i64) -> i64 {
fn0 = %g(i64) -> i64
block0(v0: i64):
v1 = call fn0(v0)
return v1
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; load_sym a1,%g+0
; callind a1
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f2(i32) -> i64 {
fn0 = %g(i32 uext) -> i64
block0(v0: i32):
v1 = call fn0(v0)
return v1
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; uext.w a0,a0
; load_sym a3,%g+0
; callind a3
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f3(i32) -> i32 uext {
block0(v0: i32):
return v0
}
; block0:
; uext.w a0,a0
; ret
function %f4(i32) -> i64 {
fn0 = %g(i32 sext) -> i64
block0(v0: i32):
v1 = call fn0(v0)
return v1
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; sext.w a0,a0
; load_sym a3,%g+0
; callind a3
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f5(i32) -> i32 sext {
block0(v0: i32):
return v0
}
; block0:
; sext.w a0,a0
; ret
function %f6(i8) -> i64 {
fn0 = %g(i32, i32, i32, i32, i32, i32, i32, i32, i8 sext) -> i64
block0(v0: i8):
v1 = iconst.i32 42
v2 = call fn0(v1, v1, v1, v1, v1, v1, v1, v1, v0)
return v2
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv t3,a0
; add sp,-16
; virtual_sp_offset_adj +16
; li a0,42
; li a1,42
; li a2,42
; li a3,42
; li a4,42
; li a5,42
; li a6,42
; li a7,42
; sext.b t3,t3
; sd t3,0(sp)
; load_sym t4,%g+0
; callind t4
; add sp,+16
; virtual_sp_offset_adj -16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f7(i8) -> i32, i32, i32, i32, i32, i32, i32, i32, i8 sext {
block0(v0: i8):
v1 = iconst.i32 42
return v1, v1, v1, v1, v1, v1, v1, v1, v0
}
; block0:
; mv a7,a0
; mv a6,a1
; li a0,42
; li a1,42
; li a2,42
; li a5,42
; li t3,42
; li t1,42
; li a3,42
; li a4,42
; mv t2,a7
; mv t0,a6
; sw a2,0(t0)
; sw a5,8(t0)
; sw t3,16(t0)
; sw t1,24(t0)
; sw a3,32(t0)
; sw a4,40(t0)
; sext.b t2,t2
; sd t2,48(t0)
; ret
function %f8() {
fn0 = %g0() -> f32
fn1 = %g1() -> f64
fn2 = %g2()
fn3 = %g3(f32)
fn4 = %g4(f64)
block0:
v0 = call fn0()
v1 = call fn1()
v2 = call fn1()
call fn2()
call fn3(v0)
call fn4(v1)
call fn4(v2)
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; fsd fs2,-8(sp)
; fsd fs3,-16(sp)
; fsd fs11,-24(sp)
; add sp,-32
; block0:
; load_sym a6,%g0+0
; callind a6
; fmv.d fs11,fa0
; load_sym a6,%g1+0
; callind a6
; fmv.d fs2,fa0
; load_sym a6,%g1+0
; callind a6
; fmv.d fs3,fa0
; load_sym a6,%g2+0
; callind a6
; load_sym a7,%g3+0
; fmv.d fa0,fs11
; callind a7
; load_sym t3,%g4+0
; fmv.d fa0,fs2
; callind t3
; load_sym t4,%g4+0
; fmv.d fa0,fs3
; callind t4
; add sp,+32
; fld fs2,-8(sp)
; fld fs3,-16(sp)
; fld fs11,-24(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f11(i128, i64) -> i64 {
block0(v0: i128, v1: i64):
v2, v3 = isplit v0
return v3
}
; block0:
; mv a2,a0
; mv a0,a1
; ret
function %f11_call(i64) -> i64 {
fn0 = %f11(i128, i64) -> i64
block0(v0: i64):
v1 = iconst.i64 42
v2 = iconcat v1, v0
v3 = call fn0(v2, v1)
return v3
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv a6,a0
; li a0,42
; mv a1,a6
; li a2,42
; load_sym a6,%f11+0
; callind a6
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f12(i64, i128) -> i64 {
block0(v0: i64, v1: i128):
v2, v3 = isplit v1
return v2
}
; block0:
; mv a0,a1
; ret
function %f12_call(i64) -> i64 {
fn0 = %f12(i64, i128) -> i64
block0(v0: i64):
v1 = iconst.i64 42
v2 = iconcat v0, v1
v3 = call fn0(v1, v2)
return v3
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv a1,a0
; li a2,42
; li a0,42
; load_sym a6,%f12+0
; callind a6
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f13(i64, i128) -> i64 {
block0(v0: i64, v1: i128):
v2, v3 = isplit v1
return v2
}
; block0:
; mv a0,a1
; ret
function %f13_call(i64) -> i64 {
fn0 = %f13(i64, i128) -> i64
block0(v0: i64):
v1 = iconst.i64 42
v2 = iconcat v0, v1
v3 = call fn0(v1, v2)
return v3
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv a1,a0
; li a2,42
; li a0,42
; load_sym a6,%f13+0
; callind a6
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f14(i128, i128, i128, i64, i128) -> i128 {
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
return v4
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; ld a1,16(fp)
; mv a0,a7
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f14_call(i128, i64) -> i128 {
fn0 = %f14(i128, i128, i128, i64, i128) -> i128
block0(v0: i128, v1: i64):
v2 = call fn0(v0, v0, v0, v1, v0)
return v2
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv a7,a0
; mv a6,a2
; add sp,-16
; virtual_sp_offset_adj +16
; sd a1,0(sp)
; mv a5,a1
; load_sym t3,%f14+0
; mv a1,a5
; mv a3,a5
; mv a0,a7
; mv a2,a7
; mv a4,a7
; callind t3
; add sp,+16
; virtual_sp_offset_adj -16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f15(i128, i128, i128, i64, i128) -> i128{
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
return v4
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; ld a1,16(fp)
; mv a0,a7
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f15_call(i128, i64) -> i128 {
fn0 = %f15(i128, i128, i128, i64, i128) -> i128
block0(v0: i128, v1: i64):
v2 = call fn0(v0, v0, v0, v1, v0)
return v2
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; block0:
; mv a7,a0
; mv a6,a2
; add sp,-16
; virtual_sp_offset_adj +16
; sd a1,0(sp)
; mv a5,a1
; load_sym t3,%f15+0
; mv a1,a5
; mv a3,a5
; mv a0,a7
; mv a2,a7
; mv a4,a7
; callind t3
; add sp,+16
; virtual_sp_offset_adj -16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f16() -> i32, i32 {
block0:
v0 = iconst.i32 0
v1 = iconst.i32 1
return v0, v1
}
; block0:
; li a0,0
; li a1,1
; ret

View File

@@ -0,0 +1,391 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(i64, i64) -> b1 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2
}
; block0:
; eq a0,a0,a1##ty=i64
; ret
function %icmp_eq_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp eq v0, v1
return v2
}
; block0:
; eq a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ne_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp ne v0, v1
return v2
}
; block0:
; ne a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_slt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp slt v0, v1
return v2
}
; block0:
; slt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ult_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp ult v0, v1
return v2
}
; block0:
; ult a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sle_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp sle v0, v1
return v2
}
; block0:
; sle a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ule_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp ule v0, v1
return v2
}
; block0:
; ule a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sgt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp sgt v0, v1
return v2
}
; block0:
; sgt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_ugt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp ugt v0, v1
return v2
}
; block0:
; ugt a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_sge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp sge v0, v1
return v2
}
; block0:
; sge a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %icmp_uge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
v2 = icmp uge v0, v1
return v2
}
; block0:
; uge a0,[a0,a1],[a2,a3]##ty=i128
; ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block2
block1:
v4 = iconst.i64 1
return v4
block2:
v5 = iconst.i64 2
return v5
}
; block0:
; eq a3,a0,a1##ty=i64
; bne a3,zero,taken(label1),not_taken(label2)
; block1:
; li a0,1
; ret
; block2:
; li a0,2
; ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block1
block1:
v4 = iconst.i64 1
return v4
}
; block0:
; eq a2,a0,a1##ty=i64
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; li a0,1
; ret
function %i128_brz(i128){
block0(v0: i128):
brz v0, block1
jump block1
block1:
nop
return
}
; block0:
; bne a1,zero,taken(label2),not_taken(0)
; beq a0,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_brnz(i128){
block0(v0: i128):
brnz v0, block1
jump block1
block1:
nop
return
}
; block0:
; bne a1,zero,taken(label1),not_taken(0)
; bne a0,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_eq(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp eq v0, v1, block1
jump block1
block1:
return
}
; block0:
; eq a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_ne(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp ne v0, v1, block1
jump block1
block1:
return
}
; block0:
; ne a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_slt(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp slt v0, v1, block1
jump block1
block1:
return
}
; block0:
; slt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_ult(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp ult v0, v1, block1
jump block1
block1:
return
}
; block0:
; ult a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_sle(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp sle v0, v1, block1
jump block1
block1:
return
}
; block0:
; sle a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_ule(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp ule v0, v1, block1
jump block1
block1:
return
}
; block0:
; ule a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_sgt(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp sgt v0, v1, block1
jump block1
block1:
return
}
; block0:
; sgt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_ugt(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp ugt v0, v1, block1
jump block1
block1:
return
}
; block0:
; ugt a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_sge(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp sge v0, v1, block1
jump block1
block1:
return
}
; block0:
; sge a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret
function %i128_bricmp_uge(i128, i128) {
block0(v0: i128, v1: i128):
br_icmp uge v0, v1, block1
jump block1
block1:
return
}
; block0:
; uge a2,[a0,a1],[a2,a3]##ty=i128
; bne a2,zero,taken(label1),not_taken(label2)
; block1:
; j label3
; block2:
; j label3
; block3:
; ret

View File

@@ -0,0 +1,86 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(i8, i64, i64) -> i64 {
block0(v0: i8, v1: i64, v2: i64):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = selectif.i64 eq v4, v1, v2
return v5
}
; block0:
; li a3,42
; uext.b a5,a0
; uext.b a7,a3
; eq t4,a5,a7##ty=i8
; selectif a0,a1,a2##test=t4
; ret
function %g(i8) -> b1 {
block0(v0: i8):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = trueif eq v4
return v5
}
; block0:
; mv a5,a0
; li a0,42
; uext.b a2,a5
; uext.b a4,a0
; eq a0,a2,a4##ty=i8
; ret
function %h(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = bitselect.i8 v0, v1, v2
return v3
}
; block0:
; mv t3,a2
; and a2,a0,a1
; not a4,a0
; and a6,a4,t3
; or a0,a2,a6
; ret
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; block0:
; select_i8 a0,a1,a2##condition=a0
; ret
function %i(i32, i8, i8) -> i8 {
block0(v0: i32, v1: i8, v2: i8):
v3 = iconst.i32 42
v4 = icmp.i32 eq v0, v3
v5 = select.i8 v4, v1, v2
return v5
}
; block0:
; li a3,42
; uext.w a5,a0
; uext.w a7,a3
; eq t4,a5,a7##ty=i32
; select_i8 a0,a1,a2##condition=t4
; ret
function %i128_select(b1, i128, i128) -> i128 {
block0(v0: b1, v1: i128, v2: i128):
v3 = select.i128 v0, v1, v2
return v3
}
; block0:
; select_i128 [a0,a1],[a1,a2],[a3,a4]##condition=a0
; ret

View File

@@ -0,0 +1,328 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f() -> b8 {
block0:
v0 = bconst.b8 true
return v0
}
; block0:
; li a0,-1
; ret
function %f() -> b16 {
block0:
v0 = bconst.b16 false
return v0
}
; block0:
; li a0,0
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0
return v0
}
; block0:
; li a0,0
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff
return v0
}
; block0:
; lui a0,16
; addi a0,a0,4095
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffff0000
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff00000000
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffff00000000
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff000000000000
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffff000000000000
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffffffff
return v0
}
; block0:
; li a0,-1
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffff0000
return v0
}
; block0:
; lui a0,1048560
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffff0000ffff
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffffffff0000ffff
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000ffffffff
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffff0000ffffffff
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x0000ffffffffffff
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xffffffffffff
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xf34bf0a31212003a ;; random digits
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xf34bf0a31212003a
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e900001ef40000 ;; random digits with 2 clear half words
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0x12e900001ef40000
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e9ffff1ef4ffff ;; random digits with 2 full half words
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0x12e9ffff1ef4ffff
; ret
function %f() -> i32 {
block0:
v0 = iconst.i32 -1
return v0
}
; block0:
; li a0,-1
; ret
function %f() -> i32 {
block0:
v0 = iconst.i32 0xfffffff7
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xfffffff7
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xfffffff7
return v0
}
; block0:
; auipc a0,0
; ld a0,12(a0)
; j 12
; .8byte 0xfffffff7
; ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xfffffffffffffff7
return v0
}
; block0:
; li a0,-9
; ret
function %f() -> f64 {
block0:
v0 = f64const 0x1.0
return v0
}
; block0:
; auipc t2,0
; ld t2,12(t2)
; j 12
; .8byte 0x3ff0000000000000
; fmv.d.x fa0,t2
; ret
function %f() -> f32 {
block0:
v0 = f32const 0x5.0
return v0
}
; block0:
; lui t2,264704
; fmv.w.x fa0,t2
; ret
function %f() -> f64 {
block0:
v0 = f64const 0x32.0
return v0
}
; block0:
; auipc t2,0
; ld t2,12(t2)
; j 12
; .8byte 0x4049000000000000
; fmv.d.x fa0,t2
; ret
function %f() -> f32 {
block0:
v0 = f32const 0x32.0
return v0
}
; block0:
; lui t2,271488
; fmv.w.x fa0,t2
; ret
function %f() -> f64 {
block0:
v0 = f64const 0x0.0
return v0
}
; block0:
; li t2,0
; fmv.d.x fa0,t2
; ret
function %f() -> f32 {
block0:
v0 = f32const 0x0.0
return v0
}
; block0:
; li t2,0
; fmv.w.x fa0,t2
; ret
function %f() -> f64 {
block0:
v0 = f64const -0x10.0
return v0
}
; block0:
; auipc t2,0
; ld t2,12(t2)
; j 12
; .8byte 0xc030000000000000
; fmv.d.x fa0,t2
; ret
function %f() -> f32 {
block0:
v0 = f32const -0x10.0
return v0
}
; block0:
; auipc t2,0
; lwu t2,12(t2)
; j 8
; .4byte 0xc1800000
; fmv.w.x fa0,t2
; ret

View File

@@ -0,0 +1,119 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
v2 = iconst.i64 42
v3 = iadd.i64 v2, v1
return v3
}
; block0:
; sext.b a1,a0
; addi a0,a1,42
; ret
function %f2(i8, i64) -> i64 {
block0(v0: i8, v1: i64):
v2 = sextend.i64 v0
v3 = iadd.i64 v2, v1
return v3
}
; block0:
; sext.b a2,a0
; add a0,a2,a1
; ret
function %i128_uextend_i64(i64) -> i128 {
block0(v0: i64):
v1 = uextend.i128 v0
return v1
}
; block0:
; mv a1,zero
; ret
function %i128_sextend_i64(i64) -> i128 {
block0(v0: i64):
v1 = sextend.i128 v0
return v1
}
; block0:
; slt a1,a0,zero
; sext.b1 a1,a1
; ret
function %i128_uextend_i32(i32) -> i128 {
block0(v0: i32):
v1 = uextend.i128 v0
return v1
}
; block0:
; uext.w a0,a0
; mv a1,zero
; ret
function %i128_sextend_i32(i32) -> i128 {
block0(v0: i32):
v1 = sextend.i128 v0
return v1
}
; block0:
; sext.w a1,a0
; slt a3,a1,zero
; sext.b1 a1,a3
; ret
function %i128_uextend_i16(i16) -> i128 {
block0(v0: i16):
v1 = uextend.i128 v0
return v1
}
; block0:
; uext.h a0,a0
; mv a1,zero
; ret
function %i128_sextend_i16(i16) -> i128 {
block0(v0: i16):
v1 = sextend.i128 v0
return v1
}
; block0:
; sext.h a1,a0
; slt a3,a1,zero
; sext.b1 a1,a3
; ret
function %i128_uextend_i8(i8) -> i128 {
block0(v0: i8):
v1 = uextend.i128 v0
return v1
}
; block0:
; uext.b a0,a0
; mv a1,zero
; ret
function %i128_sextend_i8(i8) -> i128 {
block0(v0: i8):
v1 = sextend.i128 v0
return v1
}
; block0:
; sext.b a1,a0
; slt a3,a1,zero
; sext.b1 a1,a3
; ret

View File

@@ -0,0 +1,84 @@
test compile precise-output
set unwind_info=false
target riscv64
function u0:0(i8) -> f32 {
block0(v0: i8):
v1 = fcvt_from_uint.f32 v0
return v1
}
; block0:
; fcvt.s.lu fa0,a0
; ret
function u0:0(i8) -> f64 {
block0(v0: i8):
v1 = fcvt_from_uint.f64 v0
return v1
}
; block0:
; fcvt.d.lu fa0,a0
; ret
function u0:0(i16) -> f32 {
block0(v0: i16):
v1 = fcvt_from_uint.f32 v0
return v1
}
; block0:
; fcvt.s.lu fa0,a0
; ret
function u0:0(i16) -> f64 {
block0(v0: i16):
v1 = fcvt_from_uint.f64 v0
return v1
}
; block0:
; fcvt.d.lu fa0,a0
; ret
function u0:0(f32) -> i8 {
block0(v0: f32):
v1 = fcvt_to_uint.i8 v0
return v1
}
; block0:
; fcvt_to_uint.i8 a0,fa0##in_ty=f32 tmp=ft4
; ret
function u0:0(f64) -> i8 {
block0(v0: f64):
v1 = fcvt_to_uint.i8 v0
return v1
}
; block0:
; fcvt_to_uint.i8 a0,fa0##in_ty=f64 tmp=ft4
; ret
function u0:0(f32) -> i16 {
block0(v0: f32):
v1 = fcvt_to_uint.i16 v0
return v1
}
; block0:
; fcvt_to_uint.i16 a0,fa0##in_ty=f32 tmp=ft4
; ret
function u0:0(f64) -> i16 {
block0(v0: f64):
v1 = fcvt_to_uint.i16 v0
return v1
}
; block0:
; fcvt_to_uint.i16 a0,fa0##in_ty=f64 tmp=ft4
; ret

View File

@@ -0,0 +1,576 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f1(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fadd v0, v1
return v2
}
; block0:
; fadd.s fa0,fa0,fa1
; ret
function %f2(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fadd v0, v1
return v2
}
; block0:
; fadd.d fa0,fa0,fa1
; ret
function %f3(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fsub v0, v1
return v2
}
; block0:
; fsub.s fa0,fa0,fa1
; ret
function %f4(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fsub v0, v1
return v2
}
; block0:
; fsub.d fa0,fa0,fa1
; ret
function %f5(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmul v0, v1
return v2
}
; block0:
; fmul.s fa0,fa0,fa1
; ret
function %f6(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmul v0, v1
return v2
}
; block0:
; fmul.d fa0,fa0,fa1
; ret
function %f7(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fdiv v0, v1
return v2
}
; block0:
; fdiv.s fa0,fa0,fa1
; ret
function %f8(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fdiv v0, v1
return v2
}
; block0:
; fdiv.d fa0,fa0,fa1
; ret
function %f9(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
}
; block0:
; fmin.s ft4,fa0,fa1##tmp=a2 ty=f32
; fmv.d fa0,ft4
; ret
function %f10(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmin v0, v1
return v2
}
; block0:
; fmin.d ft4,fa0,fa1##tmp=a2 ty=f64
; fmv.d fa0,ft4
; ret
function %f11(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmax v0, v1
return v2
}
; block0:
; fmax.s ft4,fa0,fa1##tmp=a2 ty=f32
; fmv.d fa0,ft4
; ret
function %f12(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmax v0, v1
return v2
}
; block0:
; fmax.d ft4,fa0,fa1##tmp=a2 ty=f64
; fmv.d fa0,ft4
; ret
function %f13(f32) -> f32 {
block0(v0: f32):
v1 = sqrt v0
return v1
}
; block0:
; fsqrt.s fa0,fa0
; ret
function %f15(f64) -> f64 {
block0(v0: f64):
v1 = sqrt v0
return v1
}
; block0:
; fsqrt.d fa0,fa0
; ret
function %f16(f32) -> f32 {
block0(v0: f32):
v1 = fabs v0
return v1
}
; block0:
; fabs.s fa0,fa0
; ret
function %f17(f64) -> f64 {
block0(v0: f64):
v1 = fabs v0
return v1
}
; block0:
; fabs.d fa0,fa0
; ret
function %f18(f32) -> f32 {
block0(v0: f32):
v1 = fneg v0
return v1
}
; block0:
; fneg.s fa0,fa0
; ret
function %f19(f64) -> f64 {
block0(v0: f64):
v1 = fneg v0
return v1
}
; block0:
; fneg.d fa0,fa0
; ret
function %f20(f32) -> f64 {
block0(v0: f32):
v1 = fpromote.f64 v0
return v1
}
; block0:
; fcvt.d.s fa0,fa0
; ret
function %f21(f64) -> f32 {
block0(v0: f64):
v1 = fdemote.f32 v0
return v1
}
; block0:
; fcvt.s.d fa0,fa0
; ret
function %f22(f32) -> f32 {
block0(v0: f32):
v1 = ceil v0
return v1
}
; block0:
; ceil ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f32
; fmv.d fa0,ft3
; ret
function %f22(f64) -> f64 {
block0(v0: f64):
v1 = ceil v0
return v1
}
; block0:
; ceil ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f64
; fmv.d fa0,ft3
; ret
function %f23(f32) -> f32 {
block0(v0: f32):
v1 = floor v0
return v1
}
; block0:
; floor ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f32
; fmv.d fa0,ft3
; ret
function %f24(f64) -> f64 {
block0(v0: f64):
v1 = floor v0
return v1
}
; block0:
; floor ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f64
; fmv.d fa0,ft3
; ret
function %f25(f32) -> f32 {
block0(v0: f32):
v1 = trunc v0
return v1
}
; block0:
; trunc ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f32
; fmv.d fa0,ft3
; ret
function %f26(f64) -> f64 {
block0(v0: f64):
v1 = trunc v0
return v1
}
; block0:
; trunc ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f64
; fmv.d fa0,ft3
; ret
function %f27(f32) -> f32 {
block0(v0: f32):
v1 = nearest v0
return v1
}
; block0:
; nearest ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f32
; fmv.d fa0,ft3
; ret
function %f28(f64) -> f64 {
block0(v0: f64):
v1 = nearest v0
return v1
}
; block0:
; nearest ft3,fa0##int_tmp=a1 f_tmp=ft5 ty=f64
; fmv.d fa0,ft3
; ret
function %f29(f32, f32, f32) -> f32 {
block0(v0: f32, v1: f32, v2: f32):
v3 = fma v0, v1, v2
return v3
}
; block0:
; fmadd.s fa0,fa0,fa1,fa2
; ret
function %f30(f64, f64, f64) -> f64 {
block0(v0: f64, v1: f64, v2: f64):
v3 = fma v0, v1, v2
return v3
}
; block0:
; fmadd.d fa0,fa0,fa1,fa2
; ret
function %f31(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fcopysign v0, v1
return v2
}
; block0:
; fsgnj.s fa0,fa0,fa1
; ret
function %f32(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fcopysign v0, v1
return v2
}
; block0:
; fsgnj.d fa0,fa0,fa1
; ret
function %f33(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint.i32 v0
return v1
}
; block0:
; fcvt_to_uint.i32 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f34(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint.i32 v0
return v1
}
; block0:
; fcvt_to_sint.i32 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f35(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint.i64 v0
return v1
}
; block0:
; fcvt_to_uint.i64 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f36(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint.i64 v0
return v1
}
; block0:
; fcvt_to_sint.i64 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f37(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint.i32 v0
return v1
}
; block0:
; fcvt_to_uint.i32 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f38(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint.i32 v0
return v1
}
; block0:
; fcvt_to_sint.i32 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f39(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint.i64 v0
return v1
}
; block0:
; fcvt_to_uint.i64 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f40(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint.i64 v0
return v1
}
; block0:
; fcvt_to_sint.i64 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f41(i32) -> f32 {
block0(v0: i32):
v1 = fcvt_from_uint.f32 v0
return v1
}
; block0:
; fcvt.s.wu fa0,a0
; ret
function %f42(i32) -> f32 {
block0(v0: i32):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; fcvt.s.w fa0,a0
; ret
function %f43(i64) -> f32 {
block0(v0: i64):
v1 = fcvt_from_uint.f32 v0
return v1
}
; block0:
; fcvt.s.lu fa0,a0
; ret
function %f44(i64) -> f32 {
block0(v0: i64):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; fcvt.s.l fa0,a0
; ret
function %f45(i32) -> f64 {
block0(v0: i32):
v1 = fcvt_from_uint.f64 v0
return v1
}
; block0:
; fcvt.d.wu fa0,a0
; ret
function %f46(i32) -> f64 {
block0(v0: i32):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; fcvt.d.w fa0,a0
; ret
function %f47(i64) -> f64 {
block0(v0: i64):
v1 = fcvt_from_uint.f64 v0
return v1
}
; block0:
; fcvt.d.lu fa0,a0
; ret
function %f48(i64) -> f64 {
block0(v0: i64):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; fcvt.d.l fa0,a0
; ret
function %f49(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; block0:
; fcvt_to_uint_sat.i32 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f50(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; block0:
; fcvt_to_sint_sat.i32 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f51(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; block0:
; fcvt_to_uint_sat.i64 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f52(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; block0:
; fcvt_to_sint_sat.i64 a0,fa0##in_ty=f32 tmp=ft4
; ret
function %f53(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; block0:
; fcvt_to_uint_sat.i32 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f54(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; block0:
; fcvt_to_sint_sat.i32 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f55(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; block0:
; fcvt_to_uint_sat.i64 a0,fa0##in_ty=f64 tmp=ft4
; ret
function %f56(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; block0:
; fcvt_to_sint_sat.i64 a0,fa0##in_ty=f64 tmp=ft4
; ret

View File

@@ -0,0 +1,53 @@
test compile precise-output
set unwind_info=false
target riscv64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0
return v2
}
; block0:
; uext.w t3,a1
; ld t4,0(a0)
; addi t4,t4,0
; ugt t0,t3,t4##ty=i64
; beq t0,zero,taken(label1),not_taken(label2)
; block1:
; add t0,a0,t3
; ugt t3,t3,t4##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t0##test=t3
; ret
; block2:
; udf##trap_code=heap_oob
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0
return v2
}
; block0:
; uext.w t3,a1
; lui a7,16
; ugt t4,t3,a7##ty=i64
; beq t4,zero,taken(label1),not_taken(label2)
; block1:
; add t4,a0,t3
; lui a7,16
; ugt t0,t3,a7##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t4##test=t0
; ret
; block2:
; udf##trap_code=heap_oob

View File

@@ -0,0 +1,24 @@
test compile precise-output
set unwind_info=false
target riscv64
function u0:0() -> i8 system_v {
block0:
v0 = iconst.i16 0xddcc
v1 = icmp.i16 ne v0, v0
v2 = bint.i8 v1
return v2
}
; block0:
; lui t2,14
; addi t2,t2,3532
; lui a2,14
; addi a2,a2,3532
; uext.h a5,t2
; uext.h a7,a2
; ne t4,a5,a7##ty=i16
; andi a0,t4,1
; ret

View File

@@ -0,0 +1,17 @@
test compile precise-output
set unwind_info=false
target riscv64
;; Test default (non-SpiderMonkey) ABI.
function %f() -> i64, i64 {
block1:
v0 = iconst.i64 1
v1 = iconst.i64 2
return v0, v1
}
; block0:
; li a0,1
; li a1,2
; ret

View File

@@ -0,0 +1,58 @@
test compile precise-output
set unwind_info=false
target riscv64
function %add8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = iadd.i8 v0, v1
return v2
}
; block0:
; addw a0,a0,a1
; ret
function %add16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = iadd.i16 v0, v1
return v2
}
; block0:
; addw a0,a0,a1
; ret
function %add32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iadd.i32 v0, v1
return v2
}
; block0:
; addw a0,a0,a1
; ret
function %add32_8(i32, i8) -> i32 {
block0(v0: i32, v1: i8):
v2 = sextend.i32 v1
v3 = iadd.i32 v0, v2
return v3
}
; block0:
; sext.b a2,a1
; addw a0,a0,a2
; ret
function %add64_32(i64, i32) -> i64 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
return v3
}
; block0:
; sext.w a2,a1
; add a0,a0,a2
; ret

View File

@@ -0,0 +1,279 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(f64) -> f64 {
block0(v0: f64):
v1 = fadd.f64 v0, v0
v2 = fadd.f64 v0, v0
v3 = fadd.f64 v0, v0
v4 = fadd.f64 v0, v0
v5 = fadd.f64 v0, v0
v6 = fadd.f64 v0, v0
v7 = fadd.f64 v0, v0
v8 = fadd.f64 v0, v0
v9 = fadd.f64 v0, v0
v10 = fadd.f64 v0, v0
v11 = fadd.f64 v0, v0
v12 = fadd.f64 v0, v0
v13 = fadd.f64 v0, v0
v14 = fadd.f64 v0, v0
v15 = fadd.f64 v0, v0
v16 = fadd.f64 v0, v0
v17 = fadd.f64 v0, v0
v18 = fadd.f64 v0, v0
v19 = fadd.f64 v0, v0
v20 = fadd.f64 v0, v0
v21 = fadd.f64 v0, v0
v22 = fadd.f64 v0, v0
v23 = fadd.f64 v0, v0
v24 = fadd.f64 v0, v0
v25 = fadd.f64 v0, v0
v26 = fadd.f64 v0, v0
v27 = fadd.f64 v0, v0
v28 = fadd.f64 v0, v0
v29 = fadd.f64 v0, v0
v30 = fadd.f64 v0, v0
v31 = fadd.f64 v0, v0
v32 = fadd.f64 v0, v1
v33 = fadd.f64 v2, v3
v34 = fadd.f64 v4, v5
v35 = fadd.f64 v6, v7
v36 = fadd.f64 v8, v9
v37 = fadd.f64 v10, v11
v38 = fadd.f64 v12, v13
v39 = fadd.f64 v14, v15
v40 = fadd.f64 v16, v17
v41 = fadd.f64 v18, v19
v42 = fadd.f64 v20, v21
v43 = fadd.f64 v22, v23
v44 = fadd.f64 v24, v25
v45 = fadd.f64 v26, v27
v46 = fadd.f64 v28, v29
v47 = fadd.f64 v30, v31
v48 = fadd.f64 v32, v33
v49 = fadd.f64 v34, v35
v50 = fadd.f64 v36, v37
v51 = fadd.f64 v38, v39
v52 = fadd.f64 v40, v41
v53 = fadd.f64 v42, v43
v54 = fadd.f64 v44, v45
v55 = fadd.f64 v46, v47
v56 = fadd.f64 v48, v49
v57 = fadd.f64 v50, v51
v58 = fadd.f64 v52, v53
v59 = fadd.f64 v54, v55
v60 = fadd.f64 v56, v57
v61 = fadd.f64 v58, v59
v62 = fadd.f64 v60, v61
return v62
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; fsd fs0,-8(sp)
; fsd fs2,-16(sp)
; fsd fs3,-24(sp)
; fsd fs4,-32(sp)
; fsd fs5,-40(sp)
; fsd fs6,-48(sp)
; fsd fs7,-56(sp)
; fsd fs8,-64(sp)
; fsd fs9,-72(sp)
; fsd fs10,-80(sp)
; fsd fs11,-88(sp)
; add sp,-96
; block0:
; fadd.d ft4,fa0,fa0
; fadd.d ft5,fa0,fa0
; fadd.d ft6,fa0,fa0
; fadd.d ft7,fa0,fa0
; fadd.d fa1,fa0,fa0
; fadd.d fa2,fa0,fa0
; fadd.d fa3,fa0,fa0
; fadd.d fa4,fa0,fa0
; fadd.d fa5,fa0,fa0
; fadd.d fa6,fa0,fa0
; fadd.d fa7,fa0,fa0
; fadd.d ft8,fa0,fa0
; fadd.d ft9,fa0,fa0
; fadd.d ft10,fa0,fa0
; fadd.d ft11,fa0,fa0
; fadd.d ft0,fa0,fa0
; fadd.d ft1,fa0,fa0
; fadd.d ft2,fa0,fa0
; fadd.d ft3,fa0,fa0
; fadd.d fs4,fa0,fa0
; fadd.d fs5,fa0,fa0
; fadd.d fs6,fa0,fa0
; fadd.d fs7,fa0,fa0
; fadd.d fs8,fa0,fa0
; fadd.d fs9,fa0,fa0
; fadd.d fs10,fa0,fa0
; fadd.d fs11,fa0,fa0
; fadd.d fs0,fa0,fa0
; fadd.d fs1,fa0,fa0
; fadd.d fs2,fa0,fa0
; fadd.d fs3,fa0,fa0
; fadd.d ft4,fa0,ft4
; fadd.d ft5,ft5,ft6
; fadd.d ft6,ft7,fa1
; fadd.d ft7,fa2,fa3
; fadd.d fa0,fa4,fa5
; fadd.d fa1,fa6,fa7
; fadd.d fa2,ft8,ft9
; fadd.d fa3,ft10,ft11
; fadd.d fa4,ft0,ft1
; fadd.d fa5,ft2,ft3
; fadd.d fa6,fs4,fs5
; fadd.d fa7,fs6,fs7
; fadd.d ft8,fs8,fs9
; fadd.d ft9,fs10,fs11
; fadd.d ft10,fs0,fs1
; fadd.d ft11,fs2,fs3
; fadd.d ft4,ft4,ft5
; fadd.d ft5,ft6,ft7
; fadd.d ft6,fa0,fa1
; fadd.d ft7,fa2,fa3
; fadd.d fa0,fa4,fa5
; fadd.d fa1,fa6,fa7
; fadd.d fa2,ft8,ft9
; fadd.d fa3,ft10,ft11
; fadd.d ft4,ft4,ft5
; fadd.d ft5,ft6,ft7
; fadd.d ft6,fa0,fa1
; fadd.d ft7,fa2,fa3
; fadd.d ft4,ft4,ft5
; fadd.d ft5,ft6,ft7
; fadd.d fa0,ft4,ft5
; add sp,+96
; fld fs0,-8(sp)
; fld fs2,-16(sp)
; fld fs3,-24(sp)
; fld fs4,-32(sp)
; fld fs5,-40(sp)
; fld fs6,-48(sp)
; fld fs7,-56(sp)
; fld fs8,-64(sp)
; fld fs9,-72(sp)
; fld fs10,-80(sp)
; fld fs11,-88(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %f2(i64) -> i64 {
block0(v0: i64):
v1 = iadd.i64 v0, v0
v2 = iadd.i64 v0, v1
v3 = iadd.i64 v0, v2
v4 = iadd.i64 v0, v3
v5 = iadd.i64 v0, v4
v6 = iadd.i64 v0, v5
v7 = iadd.i64 v0, v6
v8 = iadd.i64 v0, v7
v9 = iadd.i64 v0, v8
v10 = iadd.i64 v0, v9
v11 = iadd.i64 v0, v10
v12 = iadd.i64 v0, v11
v13 = iadd.i64 v0, v12
v14 = iadd.i64 v0, v13
v15 = iadd.i64 v0, v14
v16 = iadd.i64 v0, v15
v17 = iadd.i64 v0, v16
v18 = iadd.i64 v0, v17
v19 = iadd.i64 v0, v1
v20 = iadd.i64 v2, v3
v21 = iadd.i64 v4, v5
v22 = iadd.i64 v6, v7
v23 = iadd.i64 v8, v9
v24 = iadd.i64 v10, v11
v25 = iadd.i64 v12, v13
v26 = iadd.i64 v14, v15
v27 = iadd.i64 v16, v17
v28 = iadd.i64 v18, v19
v29 = iadd.i64 v20, v21
v30 = iadd.i64 v22, v23
v31 = iadd.i64 v24, v25
v32 = iadd.i64 v26, v27
v33 = iadd.i64 v28, v29
v34 = iadd.i64 v30, v31
v35 = iadd.i64 v32, v33
v36 = iadd.i64 v34, v35
return v36
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; sd s6,-8(sp)
; sd s7,-16(sp)
; sd s8,-24(sp)
; sd s9,-32(sp)
; sd s10,-40(sp)
; sd s11,-48(sp)
; add sp,-48
; block0:
; add t4,a0,a0
; add t0,a0,t4
; add t1,a0,t0
; add t2,a0,t1
; add a1,a0,t2
; add a2,a0,a1
; add a3,a0,a2
; add a4,a0,a3
; add a5,a0,a4
; add a6,a0,a5
; add a7,a0,a6
; add t3,a0,a7
; add s6,a0,t3
; add s7,a0,s6
; add s8,a0,s7
; add s9,a0,s8
; add s10,a0,s9
; add s11,a0,s10
; add t4,a0,t4
; add t0,t0,t1
; add t1,t2,a1
; add t2,a2,a3
; add a0,a4,a5
; add a1,a6,a7
; add a2,t3,s6
; add a3,s7,s8
; add a4,s9,s10
; add t4,s11,t4
; add t0,t0,t1
; add t1,t2,a0
; add t2,a1,a2
; add a0,a3,a4
; add t4,t4,t0
; add t0,t1,t2
; add t4,a0,t4
; add a0,t0,t4
; add sp,+48
; ld s6,-8(sp)
; ld s7,-16(sp)
; ld s8,-24(sp)
; ld s9,-32(sp)
; ld s10,-40(sp)
; ld s11,-48(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret

View File

@@ -0,0 +1,40 @@
test compile precise-output
set unwind_info=false
target riscv64
function %ireduce_128_64(i128) -> i64 {
block0(v0: i128):
v1 = ireduce.i64 v0
return v1
}
; block0:
; ret
function %ireduce_128_32(i128) -> i32 {
block0(v0: i128):
v1 = ireduce.i32 v0
return v1
}
; block0:
; ret
function %ireduce_128_16(i128) -> i16 {
block0(v0: i128):
v1 = ireduce.i16 v0
return v1
}
; block0:
; ret
function %ireduce_128_8(i128) -> i8 {
block0(v0: i128):
v1 = ireduce.i8 v0
return v1
}
; block0:
; ret

View File

@@ -0,0 +1,103 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f0(r64) -> r64 {
block0(v0: r64):
return v0
}
; block0:
; ret
function %f1(r64) -> b1 {
block0(v0: r64):
v1 = is_null v0
return v1
}
; block0:
; is_null a0,a0
; ret
function %f2(r64) -> b1 {
block0(v0: r64):
v1 = is_invalid v0
return v1
}
; block0:
; is_invalid a0,a0
; ret
function %f3() -> r64 {
block0:
v0 = null.r64
return v0
}
; block0:
; li a0,0
; ret
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
ss0 = explicit_slot 8
block0(v0: r64, v1: r64):
v2 = call fn0(v0)
stack_store.r64 v0, ss0
brz v2, block1(v1, v0)
jump block2(v0, v1)
block1(v3: r64, v4: r64):
jump block3(v3, v4)
block2(v5: r64, v6: r64):
jump block3(v5, v6)
block3(v7: r64, v8: r64):
v9 = stack_load.r64 ss0
return v7, v8, v9
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; sd s9,-8(sp)
; add sp,-48
; block0:
; sd a0,8(nominal_sp)
; sd a1,16(nominal_sp)
; mv s9,a2
; load_sym a3,%f+0
; callind a3
; load_addr a2,nsp+0
; ld t1,8(nominal_sp)
; sd t1,0(a2)
; beq a0,zero,taken(label1),not_taken(label3)
; block1:
; j label2
; block2:
; mv a1,t1
; ld a0,16(nominal_sp)
; j label5
; block3:
; j label4
; block4:
; mv a0,t1
; ld a1,16(nominal_sp)
; j label5
; block5:
; load_addr a4,nsp+0
; ld a4,0(a4)
; mv a2,s9
; sd a4,0(a2)
; add sp,+48
; ld s9,-8(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret

View File

@@ -0,0 +1,28 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = ishl.i64 v0, v1
v3 = iadd.i64 v0, v2
return v3
}
; block0:
; slli a1,a0,3
; add a0,a0,a1
; ret
function %f(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 53
v2 = ishl.i32 v0, v1
return v2
}
; block0:
; slliw a0,a0,53
; ret

View File

@@ -0,0 +1,451 @@
test compile precise-output
set unwind_info=false
target riscv64
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %i128_rotr(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = rotr.i128 v0, v1
return v2
}
; block0:
; andi a4,a2,127
; li a6,128
; sub a6,a6,a4
; srl t4,a0,a4
; sll t1,a1,a6
; select_reg a2,zero,t1##condition=(a4 eq zero)
; or a2,t4,a2
; srl a5,a1,a4
; sll a6,a0,a6
; select_reg t3,zero,a6##condition=(a4 eq zero)
; or t0,a5,t3
; li t2,64
; select_reg a0,t0,a2##condition=(a4 uge t2)
; select_reg a1,a2,t0##condition=(a4 uge t2)
; ret
function %f0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotr.i64 v0, v1
return v2
}
; block0:
; andi a1,a1,63
; li a3,64
; sub a3,a3,a1
; srl a6,a0,a1
; sll t3,a0,a3
; select_reg t0,zero,t3##condition=(a1 eq zero)
; or a0,a6,t0
; ret
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotr.i32 v0, v1
return v2
}
; block0:
; uext.w a2,a0
; andi a3,a1,31
; li a5,32
; sub a5,a5,a3
; srl t3,a2,a3
; sll t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %f2(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotr.i16 v0, v1
return v2
}
; block0:
; uext.h a2,a0
; andi a3,a1,15
; li a5,16
; sub a5,a5,a3
; srl t3,a2,a3
; sll t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %f3(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotr.i8 v0, v1
return v2
}
; block0:
; uext.b a2,a0
; andi a3,a1,7
; li a5,8
; sub a5,a5,a3
; srl t3,a2,a3
; sll t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %i128_rotl(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
v2 = rotl.i128 v0, v1
return v2
}
; block0:
; andi a4,a2,127
; li a6,128
; sub a6,a6,a4
; sll t4,a0,a4
; srl t1,a1,a6
; select_reg a2,zero,t1##condition=(a4 eq zero)
; or a2,t4,a2
; sll a5,a1,a4
; srl a6,a0,a6
; select_reg t3,zero,a6##condition=(a4 eq zero)
; or t0,a5,t3
; li t2,64
; select_reg a0,t0,a2##condition=(a4 uge t2)
; select_reg a1,a2,t0##condition=(a4 uge t2)
; ret
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotl.i64 v0, v1
return v2
}
; block0:
; andi a1,a1,63
; li a3,64
; sub a3,a3,a1
; sll a6,a0,a1
; srl t3,a0,a3
; select_reg t0,zero,t3##condition=(a1 eq zero)
; or a0,a6,t0
; ret
function %f5(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotl.i32 v0, v1
return v2
}
; block0:
; uext.w a2,a0
; andi a3,a1,31
; li a5,32
; sub a5,a5,a3
; sll t3,a2,a3
; srl t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %f6(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotl.i16 v0, v1
return v2
}
; block0:
; uext.h a2,a0
; andi a3,a1,15
; li a5,16
; sub a5,a5,a3
; sll t3,a2,a3
; srl t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %f7(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotl.i8 v0, v1
return v2
}
; block0:
; uext.b a2,a0
; andi a3,a1,7
; li a5,8
; sub a5,a5,a3
; sll t3,a2,a3
; srl t0,a2,a5
; select_reg t2,zero,t0##condition=(a3 eq zero)
; or a0,t3,t2
; ret
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ushr.i64 v0, v1
return v2
}
; block0:
; srl a0,a0,a1
; ret
function %f9(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ushr.i32 v0, v1
return v2
}
; block0:
; srlw a0,a0,a1
; ret
function %f10(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ushr.i16 v0, v1
return v2
}
; block0:
; mv a5,a1
; uext.h a1,a0
; andi a3,a5,15
; srlw a0,a1,a3
; ret
function %f11(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ushr.i8 v0, v1
return v2
}
; block0:
; mv a5,a1
; uext.b a1,a0
; andi a3,a5,7
; srlw a0,a1,a3
; ret
function %f12(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ishl.i64 v0, v1
return v2
}
; block0:
; sll a0,a0,a1
; ret
function %f13(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ishl.i32 v0, v1
return v2
}
; block0:
; sllw a0,a0,a1
; ret
function %f14(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ishl.i16 v0, v1
return v2
}
; block0:
; andi a1,a1,15
; sllw a0,a0,a1
; ret
function %f15(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ishl.i8 v0, v1
return v2
}
; block0:
; andi a1,a1,7
; sllw a0,a0,a1
; ret
function %f16(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sshr.i64 v0, v1
return v2
}
; block0:
; sra a0,a0,a1
; ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sshr.i32 v0, v1
return v2
}
; block0:
; sraw a0,a0,a1
; ret
function %f18(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = sshr.i16 v0, v1
return v2
}
; block0:
; mv a5,a1
; sext.h a1,a0
; andi a3,a5,15
; sra a0,a1,a3
; ret
function %f19(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = sshr.i8 v0, v1
return v2
}
; block0:
; mv a5,a1
; sext.b a1,a0
; andi a3,a5,7
; sra a0,a1,a3
; ret
function %f20(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotr.i64 v0, v1
return v2
}
; block0:
; li a1,17
; andi a2,a1,63
; li a4,64
; sub a4,a4,a2
; srl a7,a0,a2
; sll t4,a0,a4
; select_reg t1,zero,t4##condition=(a2 eq zero)
; or a0,a7,t1
; ret
function %f21(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotl.i64 v0, v1
return v2
}
; block0:
; li a1,17
; andi a2,a1,63
; li a4,64
; sub a4,a4,a2
; sll a7,a0,a2
; srl t4,a0,a4
; select_reg t1,zero,t4##condition=(a2 eq zero)
; or a0,a7,t1
; ret
function %f22(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 17
v2 = rotl.i32 v0, v1
return v2
}
; block0:
; mv t4,a0
; li a0,17
; uext.w a2,t4
; andi a4,a0,31
; li a6,32
; sub a6,a6,a4
; sll t4,a2,a4
; srl t1,a2,a6
; select_reg a0,zero,t1##condition=(a4 eq zero)
; or a0,t4,a0
; ret
function %f23(i16) -> i16 {
block0(v0: i16):
v1 = iconst.i32 10
v2 = rotl.i16 v0, v1
return v2
}
; block0:
; mv t4,a0
; li a0,10
; uext.h a2,t4
; andi a4,a0,15
; li a6,16
; sub a6,a6,a4
; sll t4,a2,a4
; srl t1,a2,a6
; select_reg a0,zero,t1##condition=(a4 eq zero)
; or a0,t4,a0
; ret
function %f24(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i32 3
v2 = rotl.i8 v0, v1
return v2
}
; block0:
; mv t4,a0
; li a0,3
; uext.b a2,t4
; andi a4,a0,7
; li a6,8
; sub a6,a6,a4
; sll t4,a2,a4
; srl t1,a2,a6
; select_reg a0,zero,t1##condition=(a4 eq zero)
; or a0,t4,a0
; ret
function %f25(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ushr.i64 v0, v1
return v2
}
; block0:
; srli a0,a0,17
; ret
function %f26(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = sshr.i64 v0, v1
return v2
}
; block0:
; srai a0,a0,17
; ret
function %f27(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ishl.i64 v0, v1
return v2
}
; block0:
; slli a0,a0,17
; ret

View File

@@ -0,0 +1,206 @@
test compile precise-output
set unwind_info=false
target riscv64
function %foo() {
block0:
return
}
; block0:
; ret
function %stack_limit_leaf_zero(i64 stack_limit) {
block0(v0: i64):
return
}
; block0:
; ret
function %stack_limit_gv_leaf_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
block0(v0: i64):
return
}
; block0:
; ret
function %stack_limit_call_zero(i64 stack_limit) {
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; trap_ifc stk_ovf##(sp ult a0)
; block0:
; load_sym t2,%foo+0
; callind t2
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_limit_gv_call_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; ld t6,0(a0)
; ld t6,4(t6)
; trap_ifc stk_ovf##(sp ult t6)
; block0:
; load_sym t2,%foo+0
; callind t2
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_limit(i64 stack_limit) {
ss0 = explicit_slot 168
block0(v0: i64):
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; andi t6,a0,176
; trap_ifc stk_ovf##(sp ult t6)
; add sp,-176
; block0:
; add sp,+176
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %huge_stack_limit(i64 stack_limit) {
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; trap_ifc stk_ovf##(sp ult a0)
; lui t5,98
; addi t5,t5,2688
; add t6,t5,a0
; trap_ifc stk_ovf##(sp ult t6)
; lui a0,98
; addi a0,a0,2688
; call %Probestack
; add sp,-400000
; block0:
; add sp,+400000
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %limit_preamble(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; ld t6,0(a0)
; ld t6,4(t6)
; andi t6,t6,32
; trap_ifc stk_ovf##(sp ult t6)
; add sp,-32
; block0:
; add sp,+32
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %limit_preamble_huge(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; ld t6,0(a0)
; ld t6,4(t6)
; trap_ifc stk_ovf##(sp ult t6)
; lui t5,98
; addi t5,t5,2688
; add t6,t5,t6
; trap_ifc stk_ovf##(sp ult t6)
; lui a0,98
; addi a0,a0,2688
; call %Probestack
; add sp,-400000
; block0:
; add sp,+400000
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %limit_preamble_huge_offset(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+400000
stack_limit = gv1
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; ld t6,400000(a0)
; andi t6,t6,32
; trap_ifc stk_ovf##(sp ult t6)
; add sp,-32
; block0:
; add sp,+32
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret

View File

@@ -0,0 +1,630 @@
test compile precise-output
set unwind_info=false
target riscv64
function %stack_addr_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-16
; block0:
; load_addr a0,nsp+0
; add sp,+16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_addr_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; lui a0,24
; addi a0,a0,1712
; call %Probestack
; add sp,-100016
; block0:
; load_addr a0,nsp+0
; add sp,+100016
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_load_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-16
; block0:
; load_addr t2,nsp+0
; ld a0,0(t2)
; add sp,+16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_load_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; lui a0,24
; addi a0,a0,1712
; call %Probestack
; add sp,-100016
; block0:
; load_addr t2,nsp+0
; ld a0,0(t2)
; add sp,+100016
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_store_small(i64) {
ss0 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-16
; block0:
; load_addr t2,nsp+0
; sd a0,0(t2)
; add sp,+16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %stack_store_big(i64) {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; lui a0,24
; addi a0,a0,1712
; call %Probestack
; add sp,-100016
; block0:
; load_addr t2,nsp+0
; sd a0,0(t2)
; add sp,+100016
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %b1_spill_slot(b1) -> b1, i64 {
ss0 = explicit_slot 1000
block0(v0: b1):
v1 = iconst.i64 1
v2 = iconst.i64 2
v3 = iconst.i64 3
v4 = iconst.i64 4
v5 = iconst.i64 5
v6 = iconst.i64 6
v7 = iconst.i64 7
v8 = iconst.i64 8
v9 = iconst.i64 9
v10 = iconst.i64 10
v11 = iconst.i64 11
v12 = iconst.i64 12
v13 = iconst.i64 13
v14 = iconst.i64 14
v15 = iconst.i64 15
v16 = iconst.i64 16
v17 = iconst.i64 17
v18 = iconst.i64 18
v19 = iconst.i64 19
v20 = iconst.i64 20
v21 = iconst.i64 21
v22 = iconst.i64 22
v23 = iconst.i64 23
v24 = iconst.i64 24
v25 = iconst.i64 25
v26 = iconst.i64 26
v27 = iconst.i64 27
v28 = iconst.i64 28
v29 = iconst.i64 29
v30 = iconst.i64 30
v31 = iconst.i64 31
v32 = iconst.i64 32
v33 = iconst.i64 33
v34 = iconst.i64 34
v35 = iconst.i64 35
v36 = iconst.i64 36
v37 = iconst.i64 37
v38 = iconst.i64 38
v39 = iconst.i64 39
v40 = iconst.i64 30
v41 = iconst.i64 31
v42 = iconst.i64 32
v43 = iconst.i64 33
v44 = iconst.i64 34
v45 = iconst.i64 35
v46 = iconst.i64 36
v47 = iconst.i64 37
v48 = iconst.i64 38
v49 = iconst.i64 39
v50 = iconst.i64 30
v51 = iconst.i64 31
v52 = iconst.i64 32
v53 = iconst.i64 33
v54 = iconst.i64 34
v55 = iconst.i64 35
v56 = iconst.i64 36
v57 = iconst.i64 37
v58 = iconst.i64 38
v59 = iconst.i64 39
v60 = iconst.i64 30
v61 = iconst.i64 31
v62 = iconst.i64 32
v63 = iconst.i64 33
v64 = iconst.i64 34
v65 = iconst.i64 35
v66 = iconst.i64 36
v67 = iconst.i64 37
v68 = iconst.i64 38
v69 = iconst.i64 39
v70 = iadd.i64 v1, v2
v71 = iadd.i64 v3, v4
v72 = iadd.i64 v5, v6
v73 = iadd.i64 v7, v8
v74 = iadd.i64 v9, v10
v75 = iadd.i64 v11, v12
v76 = iadd.i64 v13, v14
v77 = iadd.i64 v15, v16
v78 = iadd.i64 v17, v18
v79 = iadd.i64 v19, v20
v80 = iadd.i64 v21, v22
v81 = iadd.i64 v23, v24
v82 = iadd.i64 v25, v26
v83 = iadd.i64 v27, v28
v84 = iadd.i64 v29, v30
v85 = iadd.i64 v31, v32
v86 = iadd.i64 v33, v34
v87 = iadd.i64 v35, v36
v88 = iadd.i64 v37, v38
v89 = iadd.i64 v39, v40
v90 = iadd.i64 v41, v42
v91 = iadd.i64 v43, v44
v92 = iadd.i64 v45, v46
v93 = iadd.i64 v47, v48
v94 = iadd.i64 v49, v50
v95 = iadd.i64 v51, v52
v96 = iadd.i64 v53, v54
v97 = iadd.i64 v55, v56
v98 = iadd.i64 v57, v58
v99 = iadd.i64 v59, v60
v100 = iadd.i64 v61, v62
v101 = iadd.i64 v63, v64
v102 = iadd.i64 v65, v66
v103 = iadd.i64 v67, v68
v104 = iadd.i64 v69, v70
v105 = iadd.i64 v71, v72
v106 = iadd.i64 v73, v74
v107 = iadd.i64 v75, v76
v108 = iadd.i64 v77, v78
v109 = iadd.i64 v79, v80
v110 = iadd.i64 v81, v82
v111 = iadd.i64 v83, v84
v112 = iadd.i64 v85, v86
v113 = iadd.i64 v87, v88
v114 = iadd.i64 v89, v90
v115 = iadd.i64 v91, v92
v116 = iadd.i64 v93, v94
v117 = iadd.i64 v95, v96
v118 = iadd.i64 v97, v98
v119 = iadd.i64 v99, v100
v120 = iadd.i64 v101, v102
v121 = iadd.i64 v103, v104
v122 = iadd.i64 v105, v106
v123 = iadd.i64 v107, v108
v124 = iadd.i64 v109, v110
v125 = iadd.i64 v111, v112
v126 = iadd.i64 v113, v114
v127 = iadd.i64 v115, v116
v128 = iadd.i64 v117, v118
v129 = iadd.i64 v119, v120
v130 = iadd.i64 v121, v122
v131 = iadd.i64 v123, v124
v132 = iadd.i64 v125, v126
v133 = iadd.i64 v127, v128
v134 = iadd.i64 v129, v130
v135 = iadd.i64 v131, v132
v136 = iadd.i64 v133, v134
v137 = iadd.i64 v135, v136
return v0, v137
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; sd s1,-8(sp)
; sd s2,-16(sp)
; sd s3,-24(sp)
; sd s4,-32(sp)
; sd s5,-40(sp)
; sd s6,-48(sp)
; sd s7,-56(sp)
; sd s8,-64(sp)
; sd s9,-72(sp)
; sd s10,-80(sp)
; sd s11,-88(sp)
; add sp,-1280
; block0:
; sd a0,1000(nominal_sp)
; li t0,2
; addi a1,t0,1
; sd a1,1176(nominal_sp)
; li t0,4
; addi a2,t0,3
; sd a2,1168(nominal_sp)
; li t0,6
; addi a3,t0,5
; sd a3,1160(nominal_sp)
; li t0,8
; addi a4,t0,7
; sd a4,1152(nominal_sp)
; li t0,10
; addi a5,t0,9
; sd a5,1144(nominal_sp)
; li t0,12
; addi a6,t0,11
; sd a6,1136(nominal_sp)
; li t0,14
; addi a7,t0,13
; sd a7,1128(nominal_sp)
; li t0,16
; addi t3,t0,15
; sd t3,1120(nominal_sp)
; li t0,18
; addi t4,t0,17
; sd t4,1112(nominal_sp)
; li t0,20
; addi t0,t0,19
; sd t0,1104(nominal_sp)
; li t0,22
; addi t1,t0,21
; sd t1,1096(nominal_sp)
; li t0,24
; addi s8,t0,23
; sd s8,1088(nominal_sp)
; li t0,26
; addi s9,t0,25
; sd s9,1080(nominal_sp)
; li t0,28
; addi s10,t0,27
; sd s10,1072(nominal_sp)
; li t0,30
; addi s11,t0,29
; sd s11,1064(nominal_sp)
; li t0,32
; addi s1,t0,31
; sd s1,1056(nominal_sp)
; li t0,34
; addi s2,t0,33
; sd s2,1048(nominal_sp)
; li t0,36
; addi s3,t0,35
; sd s3,1040(nominal_sp)
; li t0,38
; addi s4,t0,37
; sd s4,1032(nominal_sp)
; li t0,30
; addi s5,t0,39
; sd s5,1024(nominal_sp)
; li t0,32
; addi s6,t0,31
; sd s6,1016(nominal_sp)
; li t0,34
; addi s7,t0,33
; sd s7,1008(nominal_sp)
; li t0,36
; addi s7,t0,35
; li t0,38
; addi a0,t0,37
; li t0,30
; addi t2,t0,39
; li t0,32
; addi a1,t0,31
; li t0,34
; addi a2,t0,33
; li t0,36
; addi a3,t0,35
; li t0,38
; addi a4,t0,37
; li t0,30
; addi a5,t0,39
; li t0,32
; addi a6,t0,31
; li t0,34
; addi a7,t0,33
; li t0,36
; addi t3,t0,35
; li t0,38
; addi t4,t0,37
; ld t0,1176(nominal_sp)
; addi t0,t0,39
; ld t1,1160(nominal_sp)
; ld s4,1168(nominal_sp)
; add t1,s4,t1
; ld s11,1144(nominal_sp)
; ld s9,1152(nominal_sp)
; add s8,s9,s11
; ld s5,1128(nominal_sp)
; ld s3,1136(nominal_sp)
; add s9,s3,s5
; ld s10,1112(nominal_sp)
; ld s11,1120(nominal_sp)
; add s10,s11,s10
; ld s4,1096(nominal_sp)
; ld s2,1104(nominal_sp)
; add s11,s2,s4
; ld s1,1080(nominal_sp)
; ld s2,1088(nominal_sp)
; add s1,s2,s1
; ld s3,1064(nominal_sp)
; ld s2,1072(nominal_sp)
; add s2,s2,s3
; ld s3,1048(nominal_sp)
; ld s6,1056(nominal_sp)
; add s3,s6,s3
; ld s4,1032(nominal_sp)
; ld s5,1040(nominal_sp)
; add s4,s5,s4
; ld s6,1016(nominal_sp)
; ld s5,1024(nominal_sp)
; add s5,s5,s6
; ld s6,1008(nominal_sp)
; add s7,s6,s7
; add t2,a0,t2
; add a0,a1,a2
; add a1,a3,a4
; add a2,a5,a6
; add a3,a7,t3
; add a4,t4,t0
; add t1,t1,s8
; add a5,s9,s10
; add a6,s11,s1
; add a7,s2,s3
; add t3,s4,s5
; add t2,s7,t2
; add a0,a0,a1
; add a1,a2,a3
; add t1,a4,t1
; add a2,a5,a6
; add a3,a7,t3
; add t2,t2,a0
; add t1,a1,t1
; add a0,a2,a3
; add t1,t2,t1
; add a1,a0,t1
; ld a0,1000(nominal_sp)
; add sp,+1280
; ld s1,-8(sp)
; ld s2,-16(sp)
; ld s3,-24(sp)
; ld s4,-32(sp)
; ld s5,-40(sp)
; ld s6,-48(sp)
; ld s7,-56(sp)
; ld s8,-64(sp)
; ld s9,-72(sp)
; ld s10,-80(sp)
; ld s11,-88(sp)
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_store(i128) {
ss0 = explicit_slot 16
block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-16
; block0:
; mv a2,a0
; load_addr a0,nsp+0
; sd a2,0(a0)
; sd a1,8(a0)
; add sp,+16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_store_inst_offset(i128) {
ss0 = explicit_slot 16
ss1 = explicit_slot 16
block0(v0: i128):
stack_store.i128 v0, ss1+16
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-32
; block0:
; mv a2,a0
; load_addr a0,nsp+32
; sd a2,0(a0)
; sd a1,8(a0)
; add sp,+32
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_store_big(i128) {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; lui a0,24
; addi a0,a0,1712
; call %Probestack
; add sp,-100016
; block0:
; mv a2,a0
; load_addr a0,nsp+0
; sd a2,0(a0)
; sd a1,8(a0)
; add sp,+100016
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_load() -> i128 {
ss0 = explicit_slot 16
block0:
v0 = stack_load.i128 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-16
; block0:
; load_addr a1,nsp+0
; ld a0,0(a1)
; ld a1,8(a1)
; add sp,+16
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_load_inst_offset() -> i128 {
ss0 = explicit_slot 16
ss1 = explicit_slot 16
block0:
v0 = stack_load.i128 ss1+16
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; add sp,-32
; block0:
; load_addr a1,nsp+32
; ld a0,0(a1)
; ld a1,8(a1)
; add sp,+32
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret
function %i128_stack_load_big() -> i128 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_load.i128 ss0
return v0
}
; add sp,-16
; sd ra,8(sp)
; sd fp,0(sp)
; mv fp,sp
; lui a0,24
; addi a0,a0,1712
; call %Probestack
; add sp,-100016
; block0:
; load_addr a1,nsp+0
; ld a0,0(a1)
; ld a1,8(a1)
; add sp,+100016
; ld ra,8(sp)
; ld fp,0(sp)
; add sp,+16
; ret

View File

@@ -0,0 +1,16 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f() -> i64 {
gv0 = symbol %my_global
block0:
v0 = symbol_value.i64 gv0
return v0
}
; block0:
; load_sym a0,%my_global+0
; ret

View File

@@ -0,0 +1,36 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f() {
block0:
trap user0
}
; block0:
; udf##trap_code=user0
function %g(i64) {
block0(v0: i64):
v1 = iconst.i64 42
v2 = ifcmp v0, v1
trapif eq v2, user0
return
}
; block0:
; li t2,42
; eq a1,a0,t2##ty=i64
; trap_if a1,user0
; ret
function %h() {
block0:
debugtrap
return
}
; block0:
; ebreak
; ret

View File

@@ -0,0 +1,124 @@
test compile precise-output
set unwind_info=false
target riscv64
function %f_u_8_64(i8) -> i64 {
block0(v0: i8):
v1 = uextend.i64 v0
return v1
}
; block0:
; uext.b a0,a0
; ret
function %f_u_8_32(i8) -> i32 {
block0(v0: i8):
v1 = uextend.i32 v0
return v1
}
; block0:
; uext.b a0,a0
; ret
function %f_u_8_16(i8) -> i16 {
block0(v0: i8):
v1 = uextend.i16 v0
return v1
}
; block0:
; uext.b a0,a0
; ret
function %f_s_8_64(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
return v1
}
; block0:
; sext.b a0,a0
; ret
function %f_s_8_32(i8) -> i32 {
block0(v0: i8):
v1 = sextend.i32 v0
return v1
}
; block0:
; sext.b a0,a0
; ret
function %f_s_8_16(i8) -> i16 {
block0(v0: i8):
v1 = sextend.i16 v0
return v1
}
; block0:
; sext.b a0,a0
; ret
function %f_u_16_64(i16) -> i64 {
block0(v0: i16):
v1 = uextend.i64 v0
return v1
}
; block0:
; uext.h a0,a0
; ret
function %f_u_16_32(i16) -> i32 {
block0(v0: i16):
v1 = uextend.i32 v0
return v1
}
; block0:
; uext.h a0,a0
; ret
function %f_s_16_64(i16) -> i64 {
block0(v0: i16):
v1 = sextend.i64 v0
return v1
}
; block0:
; sext.h a0,a0
; ret
function %f_s_16_32(i16) -> i32 {
block0(v0: i16):
v1 = sextend.i32 v0
return v1
}
; block0:
; sext.h a0,a0
; ret
function %f_u_32_64(i32) -> i64 {
block0(v0: i32):
v1 = uextend.i64 v0
return v1
}
; block0:
; uext.w a0,a0
; ret
function %f_s_32_64(i32) -> i64 {
block0(v0: i32):
v1 = sextend.i64 v0
return v1
}
; block0:
; sext.w a0,a0
; ret

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %alias(i8) -> i8 {
block0(v0: i8):

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64 has_m
function %add_i64(i64, i64) -> i64 {
block0(v0: i64,v1: i64):

View File

@@ -3,6 +3,7 @@ target s390x
target aarch64
target aarch64 has_lse
target x86_64
target riscv64
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly

View File

@@ -2,7 +2,8 @@ test run
target aarch64
target aarch64 has_lse
target x86_64
target s390x
target s390x
target riscv64 has_a
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly

View File

@@ -4,6 +4,7 @@ target s390x has_mie2
target aarch64
target aarch64 has_lse
target x86_64
target riscv64 has_a
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly

View File

@@ -4,6 +4,7 @@ target s390x has_mie2
target aarch64
target aarch64 has_lse
target x86_64
target riscv64
; We can't test that these instructions are right regarding atomicity, but we can
; test if they perform their operation correctly

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target x86_64
target s390x
target riscv64
function %bextend_b1_b8(b1) -> b8 {
block0(v0: b1):

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %bint_b1_i8_true() -> i8 {
block0:

View File

@@ -1,6 +1,7 @@
test run
target aarch64
target s390x
target riscv64
target s390x has_mie2
; target x86_64 TODO: Not yet implemented on x86_64

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %bitrev_i8(i8) -> i8 {
block0(v0: i8):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target aarch64
target s390x
target riscv64
function %bmask_b64_i64(b64) -> i64 {
block0(v0: b64):

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %jump() -> b1 {
block0:

View File

@@ -3,7 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %bricmp_eq_i64(i64, i64) -> b1 {
block0(v0: i64, v1: i64):

View File

@@ -4,6 +4,7 @@ target aarch64
target aarch64 use_bti
target x86_64
target s390x
target riscv64
function %br_table_i32(i32) -> i32 {
jt0 = jump_table [block1, block2, block2, block3]
@@ -38,4 +39,4 @@ block5(v5: i32):
; run: %br_table_i32(4) == 8
; run: %br_table_i32(5) == 9
; run: %br_table_i32(6) == 10
; run: %br_table_i32(-1) == 3
; run: %br_table_i32(-1) == 3

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target x86_64
target s390x
target riscv64
function %breduce_b8_b1(b8) -> b1 {
block0(v0: b8):

View File

@@ -4,6 +4,7 @@ target x86_64
target x86_64 has_sse41=false
target aarch64
target s390x
target riscv64
function %ceil_f32(f32) -> f32 {
block0(v0: f32):

View File

@@ -1,6 +1,7 @@
test interpret
test run
target aarch64
target riscv64
target s390x
; not implemented on `x86_64`

View File

@@ -4,6 +4,7 @@ target aarch64
target s390x
target x86_64
target x86_64 has_lzcnt
target riscv64
function %clz_i8(i8) -> i8 {
block0(v0: i8):

View File

@@ -2,6 +2,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %i8_iconst_0() -> i8 {
block0:

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %fcvt_to_sint(f32) -> i32 {
block0(v0: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target s390x
target aarch64
;; target riscv64 vector type not supported.
function %fpromote_f32_f64(i64 vmctx, i64, f32) -> f64 {
gv0 = vmctx

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
target x86_64 has_bmi1
function %ctz_i8(i8) -> i8 {

View File

@@ -3,6 +3,8 @@ set avoid_div_traps=false
target aarch64
target s390x
target x86_64
target riscv64
; Tests that the `avoid_div_traps` flag prevents a trap when {s,u}rem is called
; with INT_MIN % -1.

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
;;;; basic uextend

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target x86_64
target s390x
target riscv64
function %fabs_f32(f32) -> f32 {
block0(v0: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fadd_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_eq_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_ge_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_gt_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_le_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_lt_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fcmp_ne_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_one_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_ord_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_ueq_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_uge_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_ugt_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_ule_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_ult_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,8 @@ test interpret
test run
target x86_64
target s390x
target riscv64
function %fcmp_uno_f32(f32, f32) -> b1 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target x86_64
target s390x
target riscv64
function %fcopysign_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fdiv_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -4,6 +4,7 @@ target x86_64
target x86_64 has_sse41=false
target aarch64
target s390x
target riscv64
function %floor_f32(f32) -> f32 {
block0(v0: f32):

View File

@@ -4,6 +4,7 @@ target aarch64
target s390x
target x86_64 has_avx has_fma
target x86_64 has_avx=false has_fma=false
target riscv64
function %fma_f32(f32, f32, f32) -> f32 {
block0(v0: f32, v1: f32, v2: f32):
@@ -148,4 +149,4 @@ block0(v0: f32, v1: f32, v2: f32):
v4 = fma v0, v1, v3
return v4
}
; run: %fma_load_f32(0x9.0, 0x9.0, 0x9.0) == 0x1.680000p6
; run: %fma_load_f32(0x9.0, 0x9.0, 0x9.0) == 0x1.680000p6

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target aarch64
target riscv64
; target s390x FIXME: This currently fails under qemu due to a qemu bug
function %fmax_p_f32(f32, f32) -> f32 {

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fmax_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -2,6 +2,7 @@ test interpret
test run
target x86_64
target aarch64
target riscv64
; target s390x FIXME: This currently fails under qemu due to a qemu bug
function %fmin_p_f32(f32, f32) -> f32 {

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fmin_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fmul_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target x86_64
target s390x
target riscv64
function %fneg_f32(f32) -> f32 {
block0(v0: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target aarch64
target s390x
target riscv64
function %fsub_f32(f32, f32) -> f32 {
block0(v0: f32, v1: f32):

View File

@@ -3,6 +3,7 @@ test run
target x86_64
target s390x
target aarch64
target riscv64
; Store a value in the heap using `heap_addr` and load it using `global_value`
function %store_load(i64 vmctx, i64, i32) -> i32 {

View File

@@ -3,7 +3,7 @@ test run
target x86_64
target s390x
target aarch64
target riscv64
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %add_i128(i128, i128) -> i128 {
block0(v0: i128,v1: i128):

View File

@@ -1,5 +1,6 @@
test run
target aarch64
target riscv64
target s390x
function %band_not_i128(i128, i128) -> i128 {

View File

@@ -2,6 +2,7 @@ test interpret
test run
target aarch64
target s390x
target riscv64
function %bextend_b1_b128(b1) -> b128 {
block0(v0: b1):

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %bint_b1_i128_true() -> i128 {
block0:

View File

@@ -3,6 +3,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %ctz_i128(i128) -> i128 {
block0(v0: i128):

View File

@@ -3,6 +3,8 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %bnot_i128(i128) -> i128 {
block0(v0: i128):

View File

@@ -3,6 +3,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %reverse_bits_zero() -> b1 {
block0:

View File

@@ -1,6 +1,7 @@
test interpret
test run
target aarch64
target riscv64
target s390x
function %bmask_b128_i128(b128) -> i128 {

View File

@@ -1,5 +1,6 @@
test run
target aarch64
target riscv64
target s390x
function %bor_not_i128(i128, i128) -> i128 {

View File

@@ -3,7 +3,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %i128_brz(i128) -> b1 {
block0(v0: i128):

View File

@@ -1,4 +1,5 @@
test interpret
target riscv64
function %breduce_b128_b1(b128) -> b1 {
block0(v0: b128):

View File

@@ -1,5 +1,6 @@
test run
target aarch64
target riscv64
target s390x
function %i128_bricmp_eq(i128, i128) -> b1 {

View File

@@ -1,5 +1,6 @@
test run
target aarch64
target riscv64
target s390x
function %bxor_not_i128(i128, i128) -> i128 {

View File

@@ -1,5 +1,6 @@
test run
target aarch64
target riscv64
target s390x
function %cls_i128(i128) -> i128 {

View File

@@ -3,6 +3,7 @@ test run
target aarch64
target s390x
target x86_64
target riscv64
function %iconcat_isplit(i64, i64) -> i64, i64 {
block0(v0: i64, v1: i64):
@@ -15,3 +16,4 @@ block0(v0: i64, v1: i64):
; run: %iconcat_isplit(0xFFFFFFFF_FFFFFFFF, 0) == [0xFFFFFFFF_FFFFFFFF, 0]
; run: %iconcat_isplit(0, 0xFFFFFFFF_FFFFFFFF) == [0, 0xFFFFFFFF_FFFFFFFF]
; run: %iconcat_isplit(0x01010101_01010101, 0x02020202_02020202) == [0x01010101_01010101, 0x02020202_02020202]

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %i128_const_0() -> i128 {
block0:

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %i128_uextend_i64(i64) -> i128 {
block0(v0: i64):

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %icmp_eq_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):

View File

@@ -4,6 +4,7 @@ set enable_llvm_abi_extensions=true
target aarch64
target s390x
target x86_64
target riscv64
function %ireduce_128_64(i128) -> i64 {
block0(v0: i128):

Some files were not shown because too many files have changed in this diff Show More