Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -2,80 +2,80 @@ test simple_preopt
target x86_64
function %icmp_to_brz_fold(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = icmp_imm eq v0, 0
brnz v1, ebb1
jump ebb2
ebb1:
brnz v1, block1
jump block2
block1:
v3 = iconst.i32 1
return v3
ebb2:
block2:
v4 = iconst.i32 2
return v4
}
; sameln: function %icmp_to_brz_fold
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = icmp_imm eq v0, 0
; nextln: brnz v0, ebb2
; nextln: jump ebb1
; nextln: brnz v0, block2
; nextln: jump block1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v3 = iconst.i32 1
; nextln: return v3
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: v4 = iconst.i32 2
; nextln: return v4
; nextln: }
function %icmp_to_brz_inverted_fold(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = icmp_imm ne v0, 0
brz v1, ebb1
jump ebb2
ebb1:
brz v1, block1
jump block2
block1:
v3 = iconst.i32 1
return v3
ebb2:
block2:
v4 = iconst.i32 2
return v4
}
; sameln: function %icmp_to_brz_inve
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = icmp_imm ne v0, 0
; nextln: brnz v0, ebb2
; nextln: jump ebb1
; nextln: brnz v0, block2
; nextln: jump block1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v3 = iconst.i32 1
; nextln: return v3
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: v4 = iconst.i32 2
; nextln: return v4
; nextln: }
function %br_icmp_inversion(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
br_icmp ugt v0, v1, ebb1
jump ebb2
ebb1:
block0(v0: i32, v1: i32):
br_icmp ugt v0, v1, block1
jump block2
block1:
v2 = iconst.i32 1
return v2
ebb2:
block2:
v3 = iconst.i32 2
return v3
}
; sameln: function %br_icmp_inversio
; nextln: ebb0(v0: i32, v1: i32):
; nextln: br_icmp ule v0, v1, ebb2
; nextln: jump ebb1
; nextln: block0(v0: i32, v1: i32):
; nextln: br_icmp ule v0, v1, block2
; nextln: jump block1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: v2 = iconst.i32 1
; nextln: return v2
; nextln:
; nextln: ebb2:
; nextln: block2:
; nextln: v3 = iconst.i32 2
; nextln: return v3
; nextln: }

View File

@@ -4,7 +4,7 @@ target x86_64 baseline
; Cases where the denominator is created by an iconst
function %indir_udiv32(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 7
v2 = udiv v0, v1
; check: iconst.i32 7
@@ -19,7 +19,7 @@ ebb0(v0: i32):
}
function %indir_sdiv32(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 -17
v2 = sdiv v0, v1
; check: iconst.i32 -17
@@ -33,7 +33,7 @@ ebb0(v0: i32):
}
function %indir_udiv64(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i64 1337
v2 = udiv v0, v1
; check: iconst.i64 1337
@@ -45,7 +45,7 @@ ebb0(v0: i64):
}
function %indir_sdiv64(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i64 -90210
v2 = sdiv v0, v1
; check: iconst.i64 0xffff_ffff_fffe_9f9e

View File

@@ -5,7 +5,7 @@ target i686 baseline
; complex case (mul, sub, shift, add, shift)
function %t_udiv32_p7(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 7
; check: iconst.i32 0x2492_4925
; check: umulhi v0, v2
@@ -19,7 +19,7 @@ ebb0(v0: i32):
; simple case (mul, shift)
function %t_udiv32_p125(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 125
; check: iconst.i32 0x1062_4dd3
; check: umulhi v0, v2
@@ -30,7 +30,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul)
function %t_udiv32_p641(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 641
; check: iconst.i32 0x0066_3d81
; check: v3 = umulhi v0, v2
@@ -43,7 +43,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul, add-sign-bit)
function %t_sdiv32_n6(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -6
; check: iconst.i32 0xffff_ffff_d555_5555
; check: smulhi v0, v2
@@ -55,7 +55,7 @@ ebb0(v0: i32):
; simple case (mul, shift, add-sign-bit)
function %t_sdiv32_n5(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -5
; check: iconst.i32 0xffff_ffff_9999_9999
; check: smulhi v0, v2
@@ -68,7 +68,7 @@ ebb0(v0: i32):
; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit)
function %t_sdiv32_n3(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -3
; check: iconst.i32 0x5555_5555
; check: smulhi v0, v2
@@ -82,7 +82,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul, add-sign-bit)
function %t_sdiv32_p6(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 6
; check: iconst.i32 0x2aaa_aaab
; check: smulhi v0, v2
@@ -94,7 +94,7 @@ ebb0(v0: i32):
; case d > 0 && M < 0 (mull, add, shift, add-sign-bit)
function %t_sdiv32_p7(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 7
; check: iconst.i32 0xffff_ffff_9249_2493
; check: smulhi v0, v2
@@ -108,7 +108,7 @@ ebb0(v0: i32):
; simple case (mul, shift, add-sign-bit)
function %t_sdiv32_p625(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 625
; check: iconst.i32 0x68db_8bad
; check: smulhi v0, v2
@@ -124,7 +124,7 @@ ebb0(v0: i32):
; complex case (mul, sub, shift, add, shift)
function %t_udiv64_p7(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 7
; check: iconst.i64 0x2492_4924_9249_2493
; check: umulhi v0, v2
@@ -138,7 +138,7 @@ ebb0(v0: i64):
; simple case (mul, shift)
function %t_udiv64_p9(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 9
; check: iconst.i64 0xe38e_38e3_8e38_e38f
; check: umulhi v0, v2
@@ -149,7 +149,7 @@ ebb0(v0: i64):
; complex case (mul, sub, shift, add, shift)
function %t_udiv64_p125(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 125
; check: iconst.i64 0x0624_dd2f_1a9f_be77
; check: umulhi v0, v2
@@ -163,7 +163,7 @@ ebb0(v0: i64):
; simple case w/ shift by zero (mul)
function %t_udiv64_p274177(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 274177
; check: iconst.i64 0x3d30_f19c_d101
; check: v3 = umulhi v0, v2
@@ -176,7 +176,7 @@ ebb0(v0: i64):
; simple case (mul, shift, add-sign-bit)
function %t_sdiv64_n625(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -625
; check: iconst.i64 0xcb92_3a29_c779_a6b5
; check: smulhi v0, v2
@@ -189,7 +189,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_sdiv64_n6(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -6
; check: iconst.i64 0xd555_5555_5555_5555
; check: smulhi v0, v2
@@ -201,7 +201,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_sdiv64_n5(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -5
; check: iconst.i64 0x9999_9999_9999_9999
; check: smulhi v0, v2
@@ -214,7 +214,7 @@ ebb0(v0: i64):
; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit)
function %t_sdiv64_n3(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -3
; check: iconst.i64 0x5555_5555_5555_5555
; check: smulhi v0, v2
@@ -228,7 +228,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_sdiv64_p6(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 6
; check: iconst.i64 0x2aaa_aaaa_aaaa_aaab
; check: smulhi v0, v2
@@ -240,7 +240,7 @@ ebb0(v0: i64):
; case d > 0 && M < 0 (mul, add, shift, add-sign-bit)
function %t_sdiv64_p15(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 15
; check: iconst.i64 0x8888_8888_8888_8889
; check: smulhi v0, v2
@@ -254,7 +254,7 @@ ebb0(v0: i64):
; simple case (mul, shift, add-sign-bit)
function %t_sdiv64_p625(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 625
; check: iconst.i64 0x346d_c5d6_3886_594b
; check: smulhi v0, v2

View File

@@ -5,7 +5,7 @@ target i686 baseline
; ignored
function %t_udiv32_p0(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 0
; check: udiv_imm v0, 0
return v1
@@ -13,7 +13,7 @@ ebb0(v0: i32):
; converted to a nop
function %t_udiv32_p1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 1
; check: nop
return v1
@@ -21,7 +21,7 @@ ebb0(v0: i32):
; shift
function %t_udiv32_p2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 2
; check: ushr_imm v0, 1
return v1
@@ -29,7 +29,7 @@ ebb0(v0: i32):
; shift
function %t_udiv32_p2p31(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = udiv_imm v0, 0x8000_0000
; check: ushr_imm v0, 31
return v1
@@ -40,7 +40,7 @@ ebb0(v0: i32):
; ignored
function %t_udiv64_p0(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 0
; check: udiv_imm v0, 0
return v1
@@ -48,7 +48,7 @@ ebb0(v0: i64):
; converted to a nop
function %t_udiv64_p1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 1
; check: nop
return v1
@@ -56,7 +56,7 @@ ebb0(v0: i64):
; shift
function %t_udiv64_p2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 2
; check: ushr_imm v0, 1
return v1
@@ -64,7 +64,7 @@ ebb0(v0: i64):
; shift
function %t_udiv64_p2p63(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = udiv_imm v0, 0x8000_0000_0000_0000
; check: ushr_imm v0, 63
return v1
@@ -75,7 +75,7 @@ ebb0(v0: i64):
; ignored
function %t_sdiv32_p0(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 0
; check: sdiv_imm v0, 0
return v1
@@ -83,7 +83,7 @@ ebb0(v0: i32):
; converted to a nop
function %t_sdiv32_p1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 1
; check: nop
return v1
@@ -91,7 +91,7 @@ ebb0(v0: i32):
; ignored
function %t_sdiv32_n1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -1
; check: sdiv_imm v0, -1
return v1
@@ -99,7 +99,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_p2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 2
; check: ushr_imm v0, 31
; check: iadd v0, v2
@@ -110,7 +110,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_n2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -2
; check: ushr_imm v0, 31
; check: iadd v0, v2
@@ -121,7 +121,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_p4(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 4
; check: v2 = sshr_imm v0, 1
; check: ushr_imm v2, 30
@@ -134,7 +134,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_n4(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 30
@@ -146,7 +146,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_p2p30(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, 0x4000_0000
; check: sshr_imm v0, 29
; check: ushr_imm v2, 2
@@ -158,7 +158,7 @@ ebb0(v0: i32):
; shift
function %t_sdiv32_n2p30(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -0x4000_0000
; check: sshr_imm v0, 29
; check: ushr_imm v2, 2
@@ -171,7 +171,7 @@ ebb0(v0: i32):
; there's no positive version of this, since -(-0x8000_0000) isn't
; representable.
function %t_sdiv32_n2p31(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = sdiv_imm v0, -0x8000_0000
; check: sshr_imm v0, 30
; check: ushr_imm v2, 1
@@ -186,7 +186,7 @@ ebb0(v0: i32):
; ignored
function %t_sdiv64_p0(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 0
; check: sdiv_imm v0, 0
return v1
@@ -194,7 +194,7 @@ ebb0(v0: i64):
; converted to a nop
function %t_sdiv64_p1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 1
; check: nop
return v1
@@ -202,7 +202,7 @@ ebb0(v0: i64):
; ignored
function %t_sdiv64_n1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -1
; check: sdiv_imm v0, -1
return v1
@@ -210,7 +210,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_p2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 2
; check: ushr_imm v0, 63
; check: iadd v0, v2
@@ -221,7 +221,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_n2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -2
; check: ushr_imm v0, 63
; check: iadd v0, v2
@@ -232,7 +232,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_p4(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 62
@@ -244,7 +244,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_n4(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 62
@@ -256,7 +256,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_p2p62(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, 0x4000_0000_0000_0000
; check: sshr_imm v0, 61
; check: ushr_imm v2, 2
@@ -268,7 +268,7 @@ ebb0(v0: i64):
; shift
function %t_sdiv64_n2p62(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -0x4000_0000_0000_0000
; check: sshr_imm v0, 61
; check: ushr_imm v2, 2
@@ -281,7 +281,7 @@ ebb0(v0: i64):
; there's no positive version of this, since -(-0x8000_0000_0000_0000) isn't
; representable.
function %t_sdiv64_n2p63(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = sdiv_imm v0, -0x8000_0000_0000_0000
; check: sshr_imm v0, 62
; check: ushr_imm v2, 1

View File

@@ -5,7 +5,7 @@ function %wraparound(i64 vmctx) -> f32 system_v {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 48
ebb35(v0: i64):
block35(v0: i64):
v88 = iconst.i64 0
v89 = iconst.i64 0x8000_0000_0000_0000
v90 = ishl_imm v88, 0x8000_0000_0000_0000

View File

@@ -5,7 +5,7 @@ target i686 baseline
; complex case (mul, sub, shift, add, shift)
function %t_urem32_p7(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 7
; check: iconst.i32 0x2492_4925
; check: umulhi v0, v2
@@ -20,7 +20,7 @@ ebb0(v0: i32):
; simple case (mul, shift)
function %t_urem32_p125(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 125
; check: iconst.i32 0x1062_4dd3
; check: umulhi v0, v2
@@ -32,7 +32,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul)
function %t_urem32_p641(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 641
; check: iconst.i32 0x0066_3d81
; check: umulhi v0, v2
@@ -46,7 +46,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul, add-sign-bit)
function %t_srem32_n6(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -6
; check: iconst.i32 0xffff_ffff_d555_5555
; check: smulhi v0, v2
@@ -59,7 +59,7 @@ ebb0(v0: i32):
; simple case (mul, shift, add-sign-bit)
function %t_srem32_n5(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -5
; check: iconst.i32 0xffff_ffff_9999_9999
; check: smulhi v0, v2
@@ -73,7 +73,7 @@ ebb0(v0: i32):
; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit)
function %t_srem32_n3(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -3
; check: iconst.i32 0x5555_5555
; check: smulhi v0, v2
@@ -88,7 +88,7 @@ ebb0(v0: i32):
; simple case w/ shift by zero (mul, add-sign-bit)
function %t_srem32_p6(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 6
; check: iconst.i32 0x2aaa_aaab
; check: smulhi v0, v2
@@ -101,7 +101,7 @@ ebb0(v0: i32):
; case d > 0 && M < 0 (mull, add, shift, add-sign-bit)
function %t_srem32_p7(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 7
; check: iconst.i32 0xffff_ffff_9249_2493
; check: smulhi v0, v2
@@ -116,7 +116,7 @@ ebb0(v0: i32):
; simple case (mul, shift, add-sign-bit)
function %t_srem32_p625(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 625
; check: iconst.i32 0x68db_8bad
; check: smulhi v0, v2
@@ -133,7 +133,7 @@ ebb0(v0: i32):
; complex case (mul, sub, shift, add, shift)
function %t_urem64_p7(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 7
; check: umulhi v0, v2
; check: isub v0, v3
@@ -147,7 +147,7 @@ ebb0(v0: i64):
; simple case (mul, shift)
function %t_urem64_p9(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 9
; check: iconst.i64 0xe38e_38e3_8e38_e38f
; check: umulhi v0, v2
@@ -159,7 +159,7 @@ ebb0(v0: i64):
; complex case (mul, sub, shift, add, shift)
function %t_urem64_p125(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 125
; check: iconst.i64 0x0624_dd2f_1a9f_be77
; check: umulhi v0, v2
@@ -174,7 +174,7 @@ ebb0(v0: i64):
; simple case w/ shift by zero (mul)
function %t_urem64_p274177(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 274177
; check: iconst.i64 0x3d30_f19c_d101
; check: umulhi v0, v2
@@ -188,7 +188,7 @@ ebb0(v0: i64):
; simple case (mul, shift, add-sign-bit)
function %t_srem64_n625(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -625
; check: iconst.i64 0xcb92_3a29_c779_a6b5
; check: smulhi v0, v2
@@ -202,7 +202,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_srem64_n6(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -6
; check: iconst.i64 0xd555_5555_5555_5555
; check: smulhi v0, v2
@@ -215,7 +215,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_srem64_n5(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -5
; check: iconst.i64 0x9999_9999_9999_9999
; check: smulhi v0, v2
@@ -229,7 +229,7 @@ ebb0(v0: i64):
; case d < 0 && M > 0 (mul, sub, shift, add-sign-bit)
function %t_srem64_n3(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -3
; check: iconst.i64 0x5555_5555_5555_5555
; check: smulhi v0, v2
@@ -244,7 +244,7 @@ ebb0(v0: i64):
; simple case w/ zero shift (mul, add-sign-bit)
function %t_srem64_p6(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 6
; check: iconst.i64 0x2aaa_aaaa_aaaa_aaab
; check: smulhi v0, v2
@@ -257,7 +257,7 @@ ebb0(v0: i64):
; case d > 0 && M < 0 (mul, add, shift, add-sign-bit)
function %t_srem64_p15(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 15
; check: iconst.i64 0x8888_8888_8888_8889
; check: smulhi v0, v2
@@ -272,7 +272,7 @@ ebb0(v0: i64):
; simple case (mul, shift, add-sign-bit)
function %t_srem64_p625(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 625
; check: iconst.i64 0x346d_c5d6_3886_594b
; check: smulhi v0, v2

View File

@@ -5,7 +5,7 @@ target i686 baseline
; ignored
function %t_urem32_p0(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 0
; check: urem_imm v0, 0
return v1
@@ -13,7 +13,7 @@ ebb0(v0: i32):
; converted to constant zero
function %t_urem32_p1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 1
; check: iconst.i32 0
return v1
@@ -21,7 +21,7 @@ ebb0(v0: i32):
; shift
function %t_urem32_p2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 2
; check: band_imm v0, 1
return v1
@@ -29,7 +29,7 @@ ebb0(v0: i32):
; shift
function %t_urem32_p2p31(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = urem_imm v0, 0x8000_0000
; check: band_imm v0, 0x7fff_ffff
return v1
@@ -40,7 +40,7 @@ ebb0(v0: i32):
; ignored
function %t_urem64_p0(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 0
; check: urem_imm v0, 0
return v1
@@ -48,7 +48,7 @@ ebb0(v0: i64):
; converted to constant zero
function %t_urem64_p1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 1
; check: iconst.i64 0
return v1
@@ -56,7 +56,7 @@ ebb0(v0: i64):
; shift
function %t_urem64_p2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 2
; check: band_imm v0, 1
return v1
@@ -64,7 +64,7 @@ ebb0(v0: i64):
; shift
function %t_urem64_p2p63(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = urem_imm v0, 0x8000_0000_0000_0000
; check: band_imm v0, 0x7fff_ffff_ffff_ffff
return v1
@@ -75,7 +75,7 @@ ebb0(v0: i64):
; ignored
function %t_srem32_n1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -1
; check: srem_imm v0, -1
return v1
@@ -83,7 +83,7 @@ ebb0(v0: i32):
; ignored
function %t_srem32_p0(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 0
; check: srem_imm v0, 0
return v1
@@ -91,7 +91,7 @@ ebb0(v0: i32):
; converted to constant zero
function %t_srem32_p1(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 1
; check: iconst.i32 0
return v1
@@ -99,7 +99,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_p2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 2
; check: ushr_imm v0, 31
; check: iadd v0, v2
@@ -110,7 +110,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_n2(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -2
; check: ushr_imm v0, 31
; check: iadd v0, v2
@@ -121,7 +121,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_p4(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 30
@@ -133,7 +133,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_n4(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 30
@@ -145,7 +145,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_p2p30(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, 0x4000_0000
; check: sshr_imm v0, 29
; check: ushr_imm v2, 2
@@ -157,7 +157,7 @@ ebb0(v0: i32):
; shift
function %t_srem32_n2p30(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -0x4000_0000
; check: sshr_imm v0, 29
; check: ushr_imm v2, 2
@@ -170,7 +170,7 @@ ebb0(v0: i32):
; there's no positive version of this, since -(-0x8000_0000) isn't
; representable.
function %t_srem32_n2p31(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = srem_imm v0, -0x8000_0000
; check: sshr_imm v0, 30
; check: ushr_imm v2, 1
@@ -185,7 +185,7 @@ ebb0(v0: i32):
; ignored
function %t_srem64_n1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -1
; check: srem_imm v0, -1
return v1
@@ -193,7 +193,7 @@ ebb0(v0: i64):
; ignored
function %t_srem64_p0(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 0
; check: srem_imm v0, 0
return v1
@@ -201,7 +201,7 @@ ebb0(v0: i64):
; converted to constant zero
function %t_srem64_p1(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 1
; check: iconst.i64 0
return v1
@@ -209,7 +209,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_p2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 2
; check: ushr_imm v0, 63
; check: iadd v0, v2
@@ -220,7 +220,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_n2(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -2
; check: ushr_imm v0, 63
; check: iadd v0, v2
@@ -231,7 +231,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_p4(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 62
@@ -243,7 +243,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_n4(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -4
; check: sshr_imm v0, 1
; check: ushr_imm v2, 62
@@ -255,7 +255,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_p2p62(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, 0x4000_0000_0000_0000
; check: sshr_imm v0, 61
; check: ushr_imm v2, 2
@@ -267,7 +267,7 @@ ebb0(v0: i64):
; shift
function %t_srem64_n2p62(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -0x4000_0000_0000_0000
; check: sshr_imm v0, 61
; check: ushr_imm v2, 2
@@ -280,7 +280,7 @@ ebb0(v0: i64):
; there's no positive version of this, since -(-0x8000_0000_0000_0000) isn't
; representable.
function %t_srem64_n2p63(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = srem_imm v0, -0x8000_0000_0000_0000
; check: sshr_imm v0, 62
; check: ushr_imm v2, 1

View File

@@ -4,40 +4,40 @@ target i686
;; 32-bits platforms.
function %iadd_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = iadd v0, v1
return v2
}
; sameln: function %iadd_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = iadd_imm v0, 2
; nextln: return v2
; nextln: }
function %isub_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = isub v0, v1
return v2
}
; sameln: function %isub_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = iadd_imm v0, -2
; nextln: return v2
; nextln: }
function %icmp_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = icmp slt v0, v1
v3 = bint.i32 v2
return v3
}
; sameln: function %icmp_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = icmp_imm slt v0, 2
; nextln: v3 = bint.i32 v2
@@ -47,13 +47,13 @@ ebb0(v0: i32):
;; Don't simplify operations that would get illegal because of lack of native
;; support.
function %iadd_imm(i64) -> i64 {
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i64 2
v2 = iadd v0, v1
return v2
}
; sameln: function %iadd_imm
; nextln: ebb0(v0: i64):
; nextln: block0(v0: i64):
; nextln: v1 = iconst.i64 2
; nextln: v2 = iadd v0, v1
; nextln: return v2

View File

@@ -4,40 +4,40 @@ target x86_64
;; 64-bits platforms.
function %iadd_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = iadd v0, v1
return v2
}
; sameln: function %iadd_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = iadd_imm v0, 2
; nextln: return v2
; nextln: }
function %isub_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = isub v0, v1
return v2
}
; sameln: function %isub_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = iadd_imm v0, -2
; nextln: return v2
; nextln: }
function %icmp_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = icmp slt v0, v1
v3 = bint.i32 v2
return v3
}
; sameln: function %icmp_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = icmp_imm slt v0, 2
; nextln: v3 = bint.i32 v2
@@ -45,18 +45,18 @@ ebb0(v0: i32):
; nextln: }
function %brz_bint(i32) {
ebb0(v0: i32):
block0(v0: i32):
v3 = icmp_imm slt v0, 0
v1 = bint.i32 v3
v2 = select v1, v1, v1
trapz v1, user0
brz v1, ebb1
jump ebb2
brz v1, block1
jump block2
ebb1:
block1:
return
ebb2:
block2:
return
}
; sameln: function %brz_bint
@@ -65,17 +65,17 @@ ebb2:
; nextln: v1 = bint.i32 v3
; nextln: v2 = select v3, v1, v1
; nextln: trapz v3, user0
; nextln: brnz v3, ebb2
; nextln: jump ebb1
; nextln: brnz v3, block2
; nextln: jump block1
function %irsub_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i32 2
v2 = isub v1, v0
return v2
}
; sameln: function %irsub_imm
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v1 = iconst.i32 2
; nextln: v2 = irsub_imm v0, 2
; nextln: return v2
@@ -85,14 +85,14 @@ ebb0(v0: i32):
;; 8 -> 16
function %uextend_8_16() -> i16 {
ebb0:
block0:
v0 = iconst.i16 37
v1 = ishl_imm v0, 8
v2 = ushr_imm v1, 8
return v2
}
; sameln: function %uextend_8_16
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i16 37
; nextln: v1 = ishl_imm v0, 8
; nextln: v3 = ireduce.i8 v0
@@ -101,14 +101,14 @@ ebb0:
; nextln: }
function %sextend_8_16() -> i16 {
ebb0:
block0:
v0 = iconst.i16 37
v1 = ishl_imm v0, 8
v2 = sshr_imm v1, 8
return v2
}
; sameln: function %sextend_8_16
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i16 37
; nextln: v1 = ishl_imm v0, 8
; nextln: v3 = ireduce.i8 v0
@@ -118,14 +118,14 @@ ebb0:
;; 8 -> 32
function %uextend_8_32() -> i32 {
ebb0:
block0:
v0 = iconst.i32 37
v1 = ishl_imm v0, 24
v2 = ushr_imm v1, 24
return v2
}
; sameln: function %uextend_8_32
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i32 37
; nextln: v1 = ishl_imm v0, 24
; nextln: v3 = ireduce.i8 v0
@@ -134,14 +134,14 @@ ebb0:
; nextln: }
function %sextend_8_32() -> i32 {
ebb0:
block0:
v0 = iconst.i32 37
v1 = ishl_imm v0, 24
v2 = sshr_imm v1, 24
return v2
}
; sameln: function %sextend_8_32
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i32 37
; nextln: v1 = ishl_imm v0, 24
; nextln: v3 = ireduce.i8 v0
@@ -151,14 +151,14 @@ ebb0:
;; 16 -> 32
function %uextend_16_32() -> i32 {
ebb0:
block0:
v0 = iconst.i32 37
v1 = ishl_imm v0, 16
v2 = ushr_imm v1, 16
return v2
}
; sameln: function %uextend_16_32
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i32 37
; nextln: v1 = ishl_imm v0, 16
; nextln: v3 = ireduce.i16 v0
@@ -167,14 +167,14 @@ ebb0:
; nextln: }
function %sextend_16_32() -> i32 {
ebb0:
block0:
v0 = iconst.i32 37
v1 = ishl_imm v0, 16
v2 = sshr_imm v1, 16
return v2
}
; sameln: function %sextend_16_32
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i32 37
; nextln: v1 = ishl_imm v0, 16
; nextln: v3 = ireduce.i16 v0
@@ -184,14 +184,14 @@ ebb0:
;; 8 -> 64
function %uextend_8_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 56
v2 = ushr_imm v1, 56
return v2
}
; sameln: function %uextend_8_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 56
; nextln: v3 = ireduce.i8 v0
@@ -200,14 +200,14 @@ ebb0:
; nextln: }
function %sextend_8_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 56
v2 = sshr_imm v1, 56
return v2
}
; sameln: function %sextend_8_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 56
; nextln: v3 = ireduce.i8 v0
@@ -217,14 +217,14 @@ ebb0:
;; 16 -> 64
function %uextend_16_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 48
v2 = ushr_imm v1, 48
return v2
}
; sameln: function %uextend_16_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 48
; nextln: v3 = ireduce.i16 v0
@@ -233,14 +233,14 @@ ebb0:
; nextln: }
function %sextend_16_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 48
v2 = sshr_imm v1, 48
return v2
}
; sameln: function %sextend_16_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 48
; nextln: v3 = ireduce.i16 v0
@@ -250,14 +250,14 @@ ebb0:
;; 32 -> 64
function %uextend_32_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 32
v2 = ushr_imm v1, 32
return v2
}
; sameln: function %uextend_32_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 32
; nextln: v3 = ireduce.i32 v0
@@ -266,14 +266,14 @@ ebb0:
; nextln: }
function %sextend_32_64() -> i64 {
ebb0:
block0:
v0 = iconst.i64 37
v1 = ishl_imm v0, 32
v2 = sshr_imm v1, 32
return v2
}
; sameln: function %sextend_32_64
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i64 37
; nextln: v1 = ishl_imm v0, 32
; nextln: v3 = ireduce.i32 v0
@@ -282,13 +282,13 @@ ebb0:
; nextln: }
function %add_imm_fold(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iadd_imm v0, 42
v2 = iadd_imm v1, -42
return v2
}
; sameln: function %add_imm_fold(i32)
; nextln: ebb0(v0: i32):
; nextln: block0(v0: i32):
; nextln: v2 -> v0
; nextln: v1 = iadd_imm v0, 42
; nextln: nop