Remove support for entity variables in filecheck.

Now that the parser doesn't renumber indices, there's no need for entity
variables like $v0.
This commit is contained in:
Dan Gohman
2018-02-20 14:31:01 -08:00
parent a5b00b173e
commit 10dcfcacdb
29 changed files with 343 additions and 395 deletions

View File

@@ -10,63 +10,63 @@ isa intel
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: $(fz=$V) = ifcmp_imm $v1, 0
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 $v0, $imin
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: $ebb0(
; check: ebb0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm $v1, -1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; check: $(hi=$V) = sshr
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx $v0, $hi, $v1
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; check: $done($v2: i64):
; check: $done(v2: i64):
return v2
; nextln: return $v2
; nextln: return v2
}