* x64: Add precise-output tests for div traps
This adds a suite of `*.clif` files which are intended to test the
`avoid_div_traps=true` compilation of the `{s,u}{div,rem}` instructions.
* x64: Remove conditional regalloc in `Div` instruction
Move the 8-bit `Div` logic into a dedicated `Div8` instruction to avoid
having conditionally-used registers with respect to regalloc.
* x64: Migrate non-trapping, `udiv`/`urem` to ISLE
* x64: Port checked `udiv` to ISLE
* x64: Migrate urem entirely to ISLE
* x64: Use `test` instead of `cmp` to compare-to-zero
* x64: Port `sdiv` lowering to ISLE
* x64: Port `srem` lowering to ISLE
* Tidy up regalloc behavior and fix tests
* Update docs and winch
* Review comments
* Reword again
* More refactoring test fixes
* More test fixes
281 lines
4.9 KiB
Plaintext
281 lines
4.9 KiB
Plaintext
test compile precise-output
|
|
set avoid_div_traps=true
|
|
target x86_64
|
|
|
|
function %f1(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movzbl %dil, %eax
|
|
; testb %sil, %sil
|
|
; jnz ; ud2 int_divz ;
|
|
; div %al, %sil, %al
|
|
; shrq $8, %rax, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movzbl %dil, %eax
|
|
; testb %sil, %sil
|
|
; jne 0x13
|
|
; ud2 ; trap: int_divz
|
|
; divb %sil ; trap: int_divz
|
|
; shrq $8, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %f2(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; testw %si, %si
|
|
; jnz ; ud2 int_divz ;
|
|
; div %ax, %dx, %si, %ax, %dx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; testw %si, %si
|
|
; jne 0x15
|
|
; ud2 ; trap: int_divz
|
|
; divw %si ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %f3(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; testl %esi, %esi
|
|
; jnz ; ud2 int_divz ;
|
|
; div %eax, %edx, %esi, %eax, %edx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; testl %esi, %esi
|
|
; jne 0x14
|
|
; ud2 ; trap: int_divz
|
|
; divl %esi ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %f4(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; testq %rsi, %rsi
|
|
; jnz ; ud2 int_divz ;
|
|
; div %rax, %rdx, %rsi, %rax, %rdx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; testq %rsi, %rsi
|
|
; jne 0x15
|
|
; ud2 ; trap: int_divz
|
|
; divq %rsi ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %i8_imm(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i8 17
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movzbl %dil, %eax
|
|
; movl $17, %edx
|
|
; div %al, %dl, %al
|
|
; shrq $8, %rax, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movzbl %dil, %eax
|
|
; movl $0x11, %edx
|
|
; divb %dl ; trap: int_divz
|
|
; shrq $8, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %i16_imm(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i16 17
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; movl $17, %r8d
|
|
; div %ax, %dx, %r8w, %ax, %dx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; movl $0x11, %r8d
|
|
; divw %r8w ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %i32_imm(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; movl $17, %r8d
|
|
; div %eax, %edx, %r8d, %eax, %edx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; movl $0x11, %r8d
|
|
; divl %r8d ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function %i64_imm(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 17
|
|
v2 = urem v0, v1
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx, %rdx
|
|
; movl $17, %r8d
|
|
; div %rax, %rdx, %r8, %rax, %rdx
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rax
|
|
; xorq %rdx, %rdx
|
|
; movl $0x11, %r8d
|
|
; divq %r8 ; trap: int_divz
|
|
; movq %rdx, %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|