* riscv64: Use `add.uw` to zero extend * riscv64: Implement `add.uw` optimizations * riscv64: Add `Zba` `iadd+ishl` optimizations * riscv64: Add `shl+uextend` optimizations based on `Zba` * riscv64: Fix some issues with `Zba` instructions * riscv64: Restrict shnadd selection * riscv64: Fix `extend` priorities * riscv64: Remove redundant `addw` rule * riscv64: Specify type for `add` extend rules * riscv64: Use `u64_from_imm64` extractor instead of `uimm8` * riscv64: Restrict `uextend` in `shnadd.uw` rules * riscv64: Use concrete type in `slli.uw` rule * riscv64: Add extra arithmetic extends tests Co-authored-by: Jamey Sharp <jsharp@fastly.com> * riscv64: Make `Adduw` types concrete * riscv64: Add extra arithmetic extend tests * riscv64: Add `sextend`+Arithmetic rules * riscv64: Fix whitespace * cranelift: Move arithmetic extends tests with i128 to separate file --------- Co-authored-by: Jamey Sharp <jsharp@fastly.com>
899 lines
14 KiB
Plaintext
899 lines
14 KiB
Plaintext
test compile precise-output
|
|
set unwind_info=false
|
|
target riscv64
|
|
|
|
function %f1(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = iadd.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; add a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; add a0, a0, a1
|
|
; ret
|
|
|
|
function %f2(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = isub.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; sub a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; sub a0, a0, a1
|
|
; ret
|
|
|
|
function %f3(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = imul.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mul a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mul a0, a0, a1
|
|
; ret
|
|
|
|
function %f4(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = umulhi.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mulhu a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mulhu a0, a0, a1
|
|
; ret
|
|
|
|
function %f5(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = smulhi.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mulh a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mulh a0, a0, a1
|
|
; ret
|
|
|
|
function %f6(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = sdiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li a2,-1
|
|
; li a3,1
|
|
; slli a4,a3,63
|
|
; eq a6,a2,a1##ty=i64
|
|
; eq t3,a4,a0##ty=i64
|
|
; and t0,a6,t3
|
|
; trap_if t0,int_ovf
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; div a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi a2, zero, -1
|
|
; addi a3, zero, 1
|
|
; slli a4, a3, 0x3f
|
|
; bne a2, a1, 0xc
|
|
; addi a6, zero, 1
|
|
; j 8
|
|
; mv a6, zero
|
|
; bne a4, a0, 0xc
|
|
; addi t3, zero, 1
|
|
; j 8
|
|
; mv t3, zero
|
|
; and t0, a6, t3
|
|
; beqz t0, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_ovf
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; div a0, a0, a1
|
|
; ret
|
|
|
|
function %f7(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = sdiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,2
|
|
; li a1,-1
|
|
; li a3,1
|
|
; slli a5,a3,63
|
|
; eq a7,a1,t2##ty=i64
|
|
; eq t4,a5,a0##ty=i64
|
|
; and t1,a7,t4
|
|
; trap_if t1,int_ovf
|
|
; li a1,2
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; li a4,2
|
|
; div a0,a0,a4
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 2
|
|
; addi a1, zero, -1
|
|
; addi a3, zero, 1
|
|
; slli a5, a3, 0x3f
|
|
; bne a1, t2, 0xc
|
|
; addi a7, zero, 1
|
|
; j 8
|
|
; mv a7, zero
|
|
; bne a5, a0, 0xc
|
|
; addi t4, zero, 1
|
|
; j 8
|
|
; mv t4, zero
|
|
; and t1, a7, t4
|
|
; beqz t1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_ovf
|
|
; addi a1, zero, 2
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; addi a4, zero, 2
|
|
; div a0, a0, a4
|
|
; ret
|
|
|
|
function %f8(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = udiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; divu a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; divu a0, a0, a1
|
|
; ret
|
|
|
|
function %f9(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = udiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,2
|
|
; trap_ifc int_divz##(zero eq t2)
|
|
; li a2,2
|
|
; divu a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 2
|
|
; bne zero, t2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; addi a2, zero, 2
|
|
; divu a0, a0, a2
|
|
; ret
|
|
|
|
function %f10(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = srem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; rem a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; rem a0, a0, a1
|
|
; ret
|
|
|
|
function %f11(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = urem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; remu a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; remu a0, a0, a1
|
|
; ret
|
|
|
|
function %f12(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = sdiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; sext.w a0,a0
|
|
; sext.w a2,a1
|
|
; li a4,-1
|
|
; li a6,1
|
|
; slli t3,a6,63
|
|
; slli t0,a0,32
|
|
; eq t2,a4,a2##ty=i32
|
|
; eq a1,t3,t0##ty=i32
|
|
; and a3,t2,a1
|
|
; trap_if a3,int_ovf
|
|
; trap_ifc int_divz##(zero eq a2)
|
|
; divw a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; sext.w a0, a0
|
|
; sext.w a2, a1
|
|
; addi a4, zero, -1
|
|
; addi a6, zero, 1
|
|
; slli t3, a6, 0x3f
|
|
; slli t0, a0, 0x20
|
|
; bne a4, a2, 0xc
|
|
; addi t2, zero, 1
|
|
; j 8
|
|
; mv t2, zero
|
|
; bne t3, t0, 0xc
|
|
; addi a1, zero, 1
|
|
; j 8
|
|
; mv a1, zero
|
|
; and a3, t2, a1
|
|
; beqz a3, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_ovf
|
|
; bne zero, a2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; divw a0, a0, a2
|
|
; ret
|
|
|
|
function %f13(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = sdiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; sext.w t2,a0
|
|
; li a1,2
|
|
; sext.w a3,a1
|
|
; li a5,-1
|
|
; li a7,1
|
|
; slli t4,a7,63
|
|
; slli t1,t2,32
|
|
; eq a0,a5,a3##ty=i32
|
|
; eq a2,t4,t1##ty=i32
|
|
; and a4,a0,a2
|
|
; trap_if a4,int_ovf
|
|
; trap_ifc int_divz##(zero eq a3)
|
|
; divw a0,t2,a3
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; sext.w t2, a0
|
|
; addi a1, zero, 2
|
|
; sext.w a3, a1
|
|
; addi a5, zero, -1
|
|
; addi a7, zero, 1
|
|
; slli t4, a7, 0x3f
|
|
; slli t1, t2, 0x20
|
|
; bne a5, a3, 0xc
|
|
; addi a0, zero, 1
|
|
; j 8
|
|
; mv a0, zero
|
|
; bne t4, t1, 0xc
|
|
; addi a2, zero, 1
|
|
; j 8
|
|
; mv a2, zero
|
|
; and a4, a0, a2
|
|
; beqz a4, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_ovf
|
|
; bne zero, a3, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; divw a0, t2, a3
|
|
; ret
|
|
|
|
function %f14(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = udiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; slli a1,a1,32
|
|
; srli a2,a1,32
|
|
; trap_ifc int_divz##(zero eq a2)
|
|
; slli a5,a0,32
|
|
; srli a7,a5,32
|
|
; divuw a0,a7,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; slli a1, a1, 0x20
|
|
; srli a2, a1, 0x20
|
|
; bne zero, a2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; slli a5, a0, 0x20
|
|
; srli a7, a5, 0x20
|
|
; divuw a0, a7, a2
|
|
; ret
|
|
|
|
function %f15(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = udiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,2
|
|
; slli a1,t2,32
|
|
; srli a3,a1,32
|
|
; trap_ifc int_divz##(zero eq a3)
|
|
; slli a6,a0,32
|
|
; srli t3,a6,32
|
|
; divuw a0,t3,a3
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 2
|
|
; slli a1, t2, 0x20
|
|
; srli a3, a1, 0x20
|
|
; bne zero, a3, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; slli a6, a0, 0x20
|
|
; srli t3, a6, 0x20
|
|
; divuw a0, t3, a3
|
|
; ret
|
|
|
|
function %f16(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = srem.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; sext.w a1,a1
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; remw a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; sext.w a1, a1
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; remw a0, a0, a1
|
|
; ret
|
|
|
|
function %f17(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = urem.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; slli a1,a1,32
|
|
; srli a2,a1,32
|
|
; trap_ifc int_divz##(zero eq a2)
|
|
; remuw a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; slli a1, a1, 0x20
|
|
; srli a2, a1, 0x20
|
|
; bne zero, a2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; remuw a0, a0, a2
|
|
; ret
|
|
|
|
function %f18(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = band.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; and a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; and a0, a0, a1
|
|
; ret
|
|
|
|
function %f19(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bor.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; or a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; or a0, a0, a1
|
|
; ret
|
|
|
|
function %f20(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bxor.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; xor a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; xor a0, a0, a1
|
|
; ret
|
|
|
|
function %f21(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = band_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; not a1,a1
|
|
; and a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; not a1, a1
|
|
; and a0, a0, a1
|
|
; ret
|
|
|
|
function %f22(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bor_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; not a1,a1
|
|
; or a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; not a1, a1
|
|
; or a0, a0, a1
|
|
; ret
|
|
|
|
function %f23(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bxor_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; not a1,a1
|
|
; xor a0,a0,a1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; not a1, a1
|
|
; xor a0, a0, a1
|
|
; ret
|
|
|
|
function %f24(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bnot.i64 v0
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; not a0,a0
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; not a0, a0
|
|
; ret
|
|
|
|
function %f25(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = iconst.i32 53
|
|
v3 = ishl.i32 v0, v2
|
|
v4 = isub.i32 v1, v3
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; slliw a2,a0,53
|
|
; subw a0,a1,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; slliw a2, a0, 0x15
|
|
; subw a0, a1, a2
|
|
; ret
|
|
|
|
function %f26(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 -1
|
|
v2 = iadd.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; addiw a0,a0,-1
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addiw a0, a0, -1
|
|
; ret
|
|
|
|
function %f27(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 -1
|
|
v2 = isub.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,-1
|
|
; subw a0,a0,t2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, -1
|
|
; subw a0, a0, t2
|
|
; ret
|
|
|
|
function %f28(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 -1
|
|
v2 = isub.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,-1
|
|
; sub a0,a0,t2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, -1
|
|
; sub a0, a0, t2
|
|
; ret
|
|
|
|
function %f29(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 1
|
|
v2 = ineg v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,1
|
|
; sub a0,zero,t2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 1
|
|
; neg a0, t2
|
|
; ret
|
|
|
|
function %add_i128(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = iadd v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; add a0,a0,a2
|
|
; sltu a4,a0,a2
|
|
; add a6,a1,a3
|
|
; add a1,a6,a4
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; add a0, a0, a2
|
|
; sltu a4, a0, a2
|
|
; add a6, a1, a3
|
|
; add a1, a6, a4
|
|
; ret
|
|
|
|
function %sub_i128(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = isub v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; sub a2,a0,a2
|
|
; mv a7,a2
|
|
; sltu a4,a0,a7
|
|
; mv a0,a7
|
|
; sub a6,a1,a3
|
|
; sub a1,a6,a4
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; sub a2, a0, a2
|
|
; ori a7, a2, 0
|
|
; sltu a4, a0, a7
|
|
; ori a0, a7, 0
|
|
; sub a6, a1, a3
|
|
; sub a1, a6, a4
|
|
; ret
|
|
|
|
function %add_mul_2(i32, i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32, v2: i32):
|
|
v3 = imul v1, v2
|
|
v4 = iadd v3, v0
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mulw a2,a1,a2
|
|
; add a0,a2,a0
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mulw a2, a1, a2
|
|
; add a0, a2, a0
|
|
; ret
|
|
|
|
function %msub_i32(i32, i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32, v2: i32):
|
|
v3 = imul v1, v2
|
|
v4 = isub v0, v3
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mulw a2,a1,a2
|
|
; subw a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mulw a2, a1, a2
|
|
; subw a0, a0, a2
|
|
; ret
|
|
|
|
function %msub_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = imul v1, v2
|
|
v4 = isub v0, v3
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mul a2,a1,a2
|
|
; sub a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mul a2, a1, a2
|
|
; sub a0, a0, a2
|
|
; ret
|
|
|
|
function %imul_sub_i32(i32, i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32, v2: i32):
|
|
v3 = imul v1, v2
|
|
v4 = isub v3, v0
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mulw a2,a1,a2
|
|
; subw a0,a2,a0
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mulw a2, a1, a2
|
|
; subw a0, a2, a0
|
|
; ret
|
|
|
|
function %imul_sub_i64(i64, i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = imul v1, v2
|
|
v4 = isub v3, v0
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; mul a2,a1,a2
|
|
; sub a0,a2,a0
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; mul a2, a1, a2
|
|
; sub a0, a2, a0
|
|
; ret
|
|
|
|
function %srem_const (i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = srem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,2
|
|
; trap_ifc int_divz##(zero eq t2)
|
|
; li a2,2
|
|
; rem a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 2
|
|
; bne zero, t2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; addi a2, zero, 2
|
|
; rem a0, a0, a2
|
|
; ret
|
|
|
|
function %urem_const (i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = urem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,2
|
|
; trap_ifc int_divz##(zero eq t2)
|
|
; li a2,2
|
|
; remu a0,a0,a2
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, 2
|
|
; bne zero, t2, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; addi a2, zero, 2
|
|
; remu a0, a0, a2
|
|
; ret
|
|
|
|
function %sdiv_minus_one(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 -1
|
|
v2 = sdiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; block0:
|
|
; li t2,-1
|
|
; li a1,-1
|
|
; li a3,1
|
|
; slli a5,a3,63
|
|
; eq a7,a1,t2##ty=i64
|
|
; eq t4,a5,a0##ty=i64
|
|
; and t1,a7,t4
|
|
; trap_if t1,int_ovf
|
|
; li a1,-1
|
|
; trap_ifc int_divz##(zero eq a1)
|
|
; li a4,-1
|
|
; div a0,a0,a4
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; addi t2, zero, -1
|
|
; addi a1, zero, -1
|
|
; addi a3, zero, 1
|
|
; slli a5, a3, 0x3f
|
|
; bne a1, t2, 0xc
|
|
; addi a7, zero, 1
|
|
; j 8
|
|
; mv a7, zero
|
|
; bne a5, a0, 0xc
|
|
; addi t4, zero, 1
|
|
; j 8
|
|
; mv t4, zero
|
|
; and t1, a7, t4
|
|
; beqz t1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_ovf
|
|
; addi a1, zero, -1
|
|
; bne zero, a1, 8
|
|
; .byte 0x00, 0x00, 0x00, 0x00 ; trap: int_divz
|
|
; addi a4, zero, -1
|
|
; div a0, a0, a4
|
|
; ret
|
|
|