Files
wasmtime/cranelift/filetests/isa/intel/legalize-div-traps.cton
Pat Hickey 80d2c5d9bf Implement shift-immediate encodings for x86 (#283)
* add x86 encodings for shift-immediate instructions

implements encodings for ishl_imm, sshr_imm, and ushr_imm. uses 8-bit immediates.

added tests for the encodings to intel/binary64.cton. Canonical versions
come from llvm-mc.

* translate test to use shift-immediates

* shift immediate encodings: use enc_i32_i64

and note why the regular shift encodings cant use it above

* add additional encoding tests for shift immediates

this covers 32 bit mode, and 64 bit operations in 64 bit mode.
2018-03-26 16:48:20 -07:00

73 lines
2.0 KiB
Plaintext

; Test the division legalizations.
test legalizer
set is_64bit
; See also legalize-div.cton.
set avoid_div_traps=1
isa intel
; regex: V=v\d+
; regex: EBB=ebb\d+
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done(v2: i64):
return v2
; nextln: return v2
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2
}