Mass rename Ebb and relatives to Block (#1365)

* Manually rename BasicBlock to BlockPredecessor

BasicBlock is a pair of (Ebb, Inst) that is used to represent the
basic block subcomponent of an Ebb that is a predecessor to an Ebb.

Eventually we will be able to remove this struct, but for now it
makes sense to give it a non-conflicting name so that we can start
to transition Ebb to represent a basic block.

I have not updated any comments that refer to BasicBlock, as
eventually we will remove BlockPredecessor and replace with Block,
which is a basic block, so the comments will become correct.

* Manually rename SSABuilder block types to avoid conflict

SSABuilder has its own Block and BlockData types. These along with
associated identifier will cause conflicts in a later commit, so
they are renamed to be more verbose here.

* Automatically rename 'Ebb' to 'Block' in *.rs

* Automatically rename 'EBB' to 'block' in *.rs

* Automatically rename 'ebb' to 'block' in *.rs

* Automatically rename 'extended basic block' to 'basic block' in *.rs

* Automatically rename 'an basic block' to 'a basic block' in *.rs

* Manually update comment for `Block`

`Block`'s wikipedia article required an update.

* Automatically rename 'an `Block`' to 'a `Block`' in *.rs

* Automatically rename 'extended_basic_block' to 'basic_block' in *.rs

* Automatically rename 'ebb' to 'block' in *.clif

* Manually rename clif constant that contains 'ebb' as substring to avoid conflict

* Automatically rename filecheck uses of 'EBB' to 'BB'

'regex: EBB' -> 'regex: BB'
'$EBB' -> '$BB'

* Automatically rename 'EBB' 'Ebb' to 'block' in *.clif

* Automatically rename 'an block' to 'a block' in *.clif

* Fix broken testcase when function name length increases

Test function names are limited to 16 characters. This causes
the new longer name to be truncated and fail a filecheck test. An
outdated comment was also fixed.
This commit is contained in:
Ryan Hunt
2020-02-07 10:46:47 -06:00
committed by GitHub
parent a136d1cb00
commit 832666c45e
370 changed files with 8090 additions and 7988 deletions

View File

@@ -5,33 +5,33 @@ test verifier
function %nonsense(i32, i32) -> f32 {
; regex: I=\binst\d+\b
; check: digraph "%nonsense" {
; check: ebb0 [shape=record, label="{ebb0(v1: i32, v2: i32):
; check: | <$(BRZ=$I)>brz v2, ebb2
; nextln: | <$(JUMP0=$I)>jump ebb3
; check: block0 [shape=record, label="{block0(v1: i32, v2: i32):
; check: | <$(BRZ=$I)>brz v2, block2
; nextln: | <$(JUMP0=$I)>jump block3
; nextln: }"]
; nextln: ebb3 [shape=record, label="{ebb3:
; check: | <$(JUMP3=$I)>jump ebb1(v4)
; nextln: block3 [shape=record, label="{block3:
; check: | <$(JUMP3=$I)>jump block1(v4)
; nextln: }"]
; nextln: ebb1 [shape=record, label="{ebb1(v5: i32):
; check: | <$(BRNZ1=$I)>brnz v13, ebb1(v12)
; nextln: | <$(JUMP1=$I)>jump ebb4
; nextln: block1 [shape=record, label="{block1(v5: i32):
; check: | <$(BRNZ1=$I)>brnz v13, block1(v12)
; nextln: | <$(JUMP1=$I)>jump block4
; nextln: }"]
; nextln: ebb4 [shape=record, label="{ebb4:
; nextln: block4 [shape=record, label="{block4:
; check: | <$I>return v17
; nextln: }"]
; nextln: ebb2 [shape=record, label="{ebb2:
; nextln: block2 [shape=record, label="{block2:
; check: | <$I>return v100
; check:}"]
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v3 = f64const 0x0.0
brz v2, ebb2 ; unordered: ebb0:$BRZ -> ebb2
jump ebb3 ; unordered: ebb0:$JUMP0 -> ebb3
brz v2, block2 ; unordered: block0:$BRZ -> block2
jump block3 ; unordered: block0:$JUMP0 -> block3
ebb3:
block3:
v4 = iconst.i32 0
jump ebb1(v4) ; unordered: ebb3:$JUMP3 -> ebb1
jump block1(v4) ; unordered: block3:$JUMP3 -> block1
ebb1(v5: i32):
block1(v5: i32):
v6 = imul_imm v5, 4
v7 = iadd v1, v6
v8 = f32const 0.0
@@ -40,17 +40,17 @@ ebb1(v5: i32):
v11 = fadd v9, v10
v12 = iadd_imm v5, 1
v13 = icmp ult v12, v2
brnz v13, ebb1(v12) ; unordered: ebb1:$BRNZ1 -> ebb1
jump ebb4 ; unordered: ebb1:$JUMP1 -> ebb4
brnz v13, block1(v12) ; unordered: block1:$BRNZ1 -> block1
jump block4 ; unordered: block1:$JUMP1 -> block4
ebb4:
block4:
v14 = f64const 0.0
v15 = f64const 0.0
v16 = fdiv v14, v15
v17 = f32const 0.0
return v17
ebb2:
block2:
v100 = f32const 0.0
return v100
}

View File

@@ -6,16 +6,16 @@ test verifier
function %nonsense(i32) {
; check: digraph "%nonsense" {
ebb0(v1: i32):
block0(v1: i32):
trap user0 ; error: terminator instruction was encountered before the end
brnz v1, ebb2 ; unordered: ebb0:inst1 -> ebb2
jump ebb1 ; unordered: ebb0:inst2 -> ebb1
brnz v1, block2 ; unordered: block0:inst1 -> block2
jump block1 ; unordered: block0:inst2 -> block1
ebb1:
block1:
v2 = iconst.i32 0
v3 = iadd v1, v3
jump ebb0(v3) ; unordered: ebb1:inst5 -> ebb0
jump block0(v3) ; unordered: block1:inst5 -> block0
ebb2:
block2:
return v1
}

View File

@@ -3,25 +3,25 @@ test print-cfg
function %not_reached(i32) -> i32 {
; check: digraph "%not_reached" {
; check: ebb0 [shape=record, label="{ebb0(v0: i32):
; check: | <inst0>brnz v0, ebb2
; check: block0 [shape=record, label="{block0(v0: i32):
; check: | <inst0>brnz v0, block2
; check: | <inst1>trap user0
; check: }"]
; check: ebb1 [shape=record, label="{ebb1:
; check: | <inst4>jump ebb0(v2)
; check: block1 [shape=record, label="{block1:
; check: | <inst4>jump block0(v2)
; check: }"]
; check: ebb2 [shape=record, label="{ebb2:
; check: block2 [shape=record, label="{block2:
; check: | <inst5>return v0
; check: }"]
ebb0(v0: i32):
brnz v0, ebb2 ; unordered: ebb0:inst0 -> ebb2
block0(v0: i32):
brnz v0, block2 ; unordered: block0:inst0 -> block2
trap user0
ebb1:
block1:
v1 = iconst.i32 1
v2 = iadd v0, v1
jump ebb0(v2) ; unordered: ebb1:inst4 -> ebb0
jump block0(v2) ; unordered: block1:inst4 -> block0
ebb2:
block2:
return v0
}

View File

@@ -1,46 +1,46 @@
test dce
function %simple() -> i32 {
ebb0:
block0:
v2 = iconst.i32 2
v3 = iconst.i32 3
return v3
}
; sameln: function %simple
; nextln: ebb0:
; nextln: block0:
; nextln: v3 = iconst.i32 3
; nextln: return v3
; nextln: }
function %some_branching(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
v3 = iconst.i32 70
v4 = iconst.i32 71
v5 = iconst.i32 72
v8 = iconst.i32 73
brz v0, ebb1
jump ebb2(v8)
brz v0, block1
jump block2(v8)
ebb1:
block1:
v2 = iadd v0, v3
return v0
ebb2(v9: i32):
block2(v9: i32):
v6 = iadd v1, v4
v7 = iadd v6, v9
return v7
}
; sameln: function %some_branching
; nextln: ebb0(v0: i32, v1: i32):
; nextln: block0(v0: i32, v1: i32):
; nextln: v4 = iconst.i32 71
; nextln: v8 = iconst.i32 73
; nextln: brz v0, ebb1
; nextln: jump ebb2(v8)
; nextln: brz v0, block1
; nextln: jump block2(v8)
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: return v0
; nextln:
; nextln: ebb2(v9: i32):
; nextln: block2(v9: i32):
; nextln: v6 = iadd.i32 v1, v4
; nextln: v7 = iadd v6, v9
; nextln: return v7

View File

@@ -1,25 +1,25 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb3 ; dominates: ebb3
jump ebb2 ; dominates: ebb2
ebb2:
jump ebb3
ebb3:
block0(v0: i32):
jump block1 ; dominates: block1
block1:
brz v0, block3 ; dominates: block3
jump block2 ; dominates: block2
block2:
jump block3
block3:
return
}
; check: cfg_postorder:
; sameln: ebb2
; sameln: ebb3
; sameln: ebb1
; sameln: ebb0
; sameln: block2
; sameln: block3
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1
; nextln: ebb1: ebb3 ebb2
; nextln: ebb3:
; nextln: ebb2:
; nextln: block0: block1
; nextln: block1: block3 block2
; nextln: block3:
; nextln: block2:
; nextln: }

View File

@@ -1,118 +1,118 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb5
ebb5:
brz v0, ebb4
jump ebb6 ; dominates: ebb6
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block5
block5:
brz v0, block4
jump block6 ; dominates: block6
block6:
return
}
; Fall-through-first, prune-at-source DFT:
;
; ebb0 {
; ebb0:brz v0, ebb1 {
; ebb0:jump ebb2 {
; ebb2 {
; ebb2:brz v2, ebb2 -
; ebb2:brz v3, ebb1 -
; ebb2:brz v4, ebb4 {
; ebb2: jump ebb5 {
; ebb5: jump ebb6 {
; ebb6 {}
; block0 {
; block0:brz v0, block1 {
; block0:jump block2 {
; block2 {
; block2:brz v2, block2 -
; block2:brz v3, block1 -
; block2:brz v4, block4 {
; block2: jump block5 {
; block5: jump block6 {
; block6 {}
; }
; }
; ebb4 {}
; block4 {}
; }
; } ebb2
; } block2
; }
; ebb1 {
; ebb1:jump ebb3 {
; ebb3 {}
; block1 {
; block1:jump block3 {
; block3 {}
; }
; } ebb1
; } block1
; }
; } ebb0
; } block0
;
; check: cfg_postorder:
; sameln: ebb6
; sameln: ebb5
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block6
; sameln: block5
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4:
; nextln: ebb3:
; nextln: ebb5: ebb6
; nextln: ebb6:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4:
; nextln: block3:
; nextln: block5: block6
; nextln: block6:
; nextln: }
function %loop2(i32) system_v {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb8 ; dominates: ebb8
ebb8:
brnz v0, ebb5
jump ebb6 ; dominates: ebb6
ebb5:
brz v0, ebb4
jump ebb9 ; dominates: ebb9
ebb9:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block8 ; dominates: block8
block8:
brnz v0, block5
jump block6 ; dominates: block6
block5:
brz v0, block4
jump block9 ; dominates: block9
block9:
trap user0
ebb6:
jump ebb7 ; dominates: ebb7
ebb7:
block6:
jump block7 ; dominates: block7
block7:
return
}
; check: cfg_postorder:
; sameln: ebb9
; sameln: ebb5
; sameln: ebb7
; sameln: ebb6
; sameln: ebb8
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block9
; sameln: block5
; sameln: block7
; sameln: block6
; sameln: block8
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4: ebb8
; nextln: ebb8: ebb6
; nextln: ebb6: ebb7
; nextln: ebb7:
; nextln: ebb3:
; nextln: ebb5: ebb9
; nextln: ebb9:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4: block8
; nextln: block8: block6
; nextln: block6: block7
; nextln: block7:
; nextln: block3:
; nextln: block5: block9
; nextln: block9:
; nextln: }

View File

@@ -1,92 +1,92 @@
test domtree
function %loop1(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb6
jump ebb10 ; dominates: ebb10
ebb10:
brnz v0, ebb2 ; dominates: ebb2 ebb9
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb6
ebb2:
brz v0, ebb4 ; dominates: ebb4 ebb7 ebb8
jump ebb5 ; dominates: ebb5
ebb3:
jump ebb9
ebb4:
brz v0, ebb4
jump ebb11 ; dominates: ebb11
ebb11:
brnz v0, ebb6
jump ebb7
ebb5:
brz v0, ebb7
jump ebb12 ; dominates: ebb12
ebb12:
brnz v0, ebb8
jump ebb9
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block6
jump block10 ; dominates: block10
block10:
brnz v0, block2 ; dominates: block2 block9
jump block3 ; dominates: block3
block1:
jump block6
block2:
brz v0, block4 ; dominates: block4 block7 block8
jump block5 ; dominates: block5
block3:
jump block9
block4:
brz v0, block4
jump block11 ; dominates: block11
block11:
brnz v0, block6
jump block7
block5:
brz v0, block7
jump block12 ; dominates: block12
block12:
brnz v0, block8
jump block9
block6:
return
ebb7:
jump ebb8
ebb8:
block7:
jump block8
block8:
return
ebb9:
block9:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb10 ebb6
; nextln: ebb1:
; nextln: ebb10: ebb2 ebb3 ebb9
; nextln: ebb2: ebb4 ebb5 ebb7 ebb8
; nextln: ebb4: ebb11
; nextln: ebb11:
; nextln: ebb5: ebb12
; nextln: ebb12:
; nextln: ebb7:
; nextln: ebb8:
; nextln: ebb3:
; nextln: ebb9:
; nextln: ebb6:
; nextln: block0: block1 block10 block6
; nextln: block1:
; nextln: block10: block2 block3 block9
; nextln: block2: block4 block5 block7 block8
; nextln: block4: block11
; nextln: block11:
; nextln: block5: block12
; nextln: block12:
; nextln: block7:
; nextln: block8:
; nextln: block3:
; nextln: block9:
; nextln: block6:
; nextln: }
function %loop2(i32) system_v {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb5
ebb5:
brz v0, ebb4
jump ebb6 ; dominates: ebb6
ebb6:
block0(v0: i32):
brz v0, block1 ; dominates: block1 block3 block4 block5
jump block2 ; dominates: block2
block1:
jump block3
block2:
brz v0, block4
jump block5
block3:
jump block4
block4:
brz v0, block3
jump block5
block5:
brz v0, block4
jump block6 ; dominates: block6
block6:
return
}
; check: cfg_postorder:
; sameln: ebb6
; sameln: ebb5
; sameln: ebb3
; sameln: ebb4
; sameln: ebb2
; sameln: ebb1
; sameln: ebb0
; sameln: block6
; sameln: block5
; sameln: block3
; sameln: block4
; sameln: block2
; sameln: block1
; sameln: block0
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb2 ebb4 ebb3 ebb5
; nextln: ebb1:
; nextln: ebb2:
; nextln: ebb4:
; nextln: ebb3:
; nextln: ebb5: ebb6
; nextln: ebb6:
; nextln: block0: block1 block2 block4 block3 block5
; nextln: block1:
; nextln: block2:
; nextln: block4:
; nextln: block3:
; nextln: block5: block6
; nextln: block6:
; nextln: }

View File

@@ -1,54 +1,54 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1
jump ebb12 ; dominates: ebb12
ebb12:
brnz v0, ebb2 ; dominates: ebb2 ebb5
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb4 ; dominates: ebb4
ebb2:
jump ebb5
ebb3:
jump ebb5
ebb4:
brz v0, ebb6 ; dominates: ebb6 ebb10
jump ebb7 ; dominates: ebb7
ebb5:
block0(v0: i32):
brz v0, block1 ; dominates: block1
jump block12 ; dominates: block12
block12:
brnz v0, block2 ; dominates: block2 block5
jump block3 ; dominates: block3
block1:
jump block4 ; dominates: block4
block2:
jump block5
block3:
jump block5
block4:
brz v0, block6 ; dominates: block6 block10
jump block7 ; dominates: block7
block5:
return
ebb6:
brz v0, ebb8 ; dominates: ebb11 ebb8
jump ebb13 ; dominates: ebb13
ebb13:
brnz v0, ebb9 ; dominates: ebb9
jump ebb10
ebb7:
jump ebb10
ebb8:
jump ebb11
ebb9:
jump ebb11
ebb10:
block6:
brz v0, block8 ; dominates: block11 block8
jump block13 ; dominates: block13
block13:
brnz v0, block9 ; dominates: block9
jump block10
block7:
jump block10
block8:
jump block11
block9:
jump block11
block10:
return
ebb11:
block11:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb1 ebb12
; nextln: ebb1: ebb4
; nextln: ebb4: ebb6 ebb7 ebb10
; nextln: ebb6: ebb8 ebb13 ebb11
; nextln: ebb8:
; nextln: ebb13: ebb9
; nextln: ebb9:
; nextln: ebb11:
; nextln: ebb7:
; nextln: ebb10:
; nextln: ebb12: ebb2 ebb3 ebb5
; nextln: ebb2:
; nextln: ebb3:
; nextln: ebb5:
; nextln: block0: block1 block12
; nextln: block1: block4
; nextln: block4: block6 block7 block10
; nextln: block6: block8 block13 block11
; nextln: block8:
; nextln: block13: block9
; nextln: block9:
; nextln: block11:
; nextln: block7:
; nextln: block10:
; nextln: block12: block2 block3 block5
; nextln: block2:
; nextln: block3:
; nextln: block5:
; nextln: }

View File

@@ -1,73 +1,73 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb13 ; dominates: ebb13
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb2 ; dominates: ebb2 ebb7
jump ebb20 ; dominates: ebb20
ebb20:
brnz v0, ebb3 ; dominates: ebb3
jump ebb21 ; dominates: ebb21
ebb21:
brz v0, ebb4 ; dominates: ebb4
jump ebb22 ; dominates: ebb22
ebb22:
brnz v0, ebb5 ; dominates: ebb5
jump ebb6 ; dominates: ebb6
ebb2:
jump ebb7
ebb3:
jump ebb7
ebb4:
jump ebb7
ebb5:
jump ebb7
ebb6:
jump ebb7
ebb7:
brnz v0, ebb8 ; dominates: ebb8 ebb12
jump ebb23 ; dominates: ebb23
ebb23:
brz v0, ebb9 ; dominates: ebb9
jump ebb24 ; dominates: ebb24
ebb24:
brnz v0, ebb10 ; dominates: ebb10
jump ebb11 ; dominates: ebb11
ebb8:
jump ebb12
ebb9:
jump ebb12
ebb10:
brz v0, ebb13
jump ebb12
ebb11:
jump ebb13
ebb12:
block0(v0: i32):
brz v0, block13 ; dominates: block13
jump block1 ; dominates: block1
block1:
brz v0, block2 ; dominates: block2 block7
jump block20 ; dominates: block20
block20:
brnz v0, block3 ; dominates: block3
jump block21 ; dominates: block21
block21:
brz v0, block4 ; dominates: block4
jump block22 ; dominates: block22
block22:
brnz v0, block5 ; dominates: block5
jump block6 ; dominates: block6
block2:
jump block7
block3:
jump block7
block4:
jump block7
block5:
jump block7
block6:
jump block7
block7:
brnz v0, block8 ; dominates: block8 block12
jump block23 ; dominates: block23
block23:
brz v0, block9 ; dominates: block9
jump block24 ; dominates: block24
block24:
brnz v0, block10 ; dominates: block10
jump block11 ; dominates: block11
block8:
jump block12
block9:
jump block12
block10:
brz v0, block13
jump block12
block11:
jump block13
block12:
return
ebb13:
block13:
return
}
; check: domtree_preorder {
; nextln: ebb0: ebb13 ebb1
; nextln: ebb13:
; nextln: ebb1: ebb2 ebb20 ebb7
; nextln: ebb2:
; nextln: ebb20: ebb3 ebb21
; nextln: ebb3:
; nextln: ebb21: ebb4 ebb22
; nextln: ebb4:
; nextln: ebb22: ebb5 ebb6
; nextln: ebb5:
; nextln: ebb6:
; nextln: ebb7: ebb8 ebb23 ebb12
; nextln: ebb8:
; nextln: ebb23: ebb9 ebb24
; nextln: ebb9:
; nextln: ebb24: ebb10 ebb11
; nextln: ebb10:
; nextln: ebb11:
; nextln: ebb12:
; nextln: block0: block13 block1
; nextln: block13:
; nextln: block1: block2 block20 block7
; nextln: block2:
; nextln: block20: block3 block21
; nextln: block3:
; nextln: block21: block4 block22
; nextln: block4:
; nextln: block22: block5 block6
; nextln: block5:
; nextln: block6:
; nextln: block7: block8 block23 block12
; nextln: block8:
; nextln: block23: block9 block24
; nextln: block9:
; nextln: block24: block10 block11
; nextln: block10:
; nextln: block11:
; nextln: block12:
; nextln: }

View File

@@ -9,6 +9,6 @@ function %f() {
; available in RV32E.
sig0 = (i64, i64, i64, i64) -> i64 system_v
; check: sig0 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11] system_v
ebb0:
block0:
return
}

View File

@@ -27,6 +27,6 @@ function %f() {
sig5 = (i64x4) system_v
; check: sig5 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17]) system_v
ebb0:
block0:
return
}

View File

@@ -6,7 +6,7 @@ function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
sig0 = ()
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x21] v2 = iconst.i32 2
@@ -94,96 +94,96 @@ ebb0(v9999: i32):
call_indirect sig0, v1() ; bin: 000500e7
call_indirect sig0, v2() ; bin: 000a80e7
brz v1, ebb3
fallthrough ebb4
brz v1, block3
fallthrough block4
ebb4:
brnz v1, ebb1
fallthrough ebb5
block4:
brnz v1, block1
fallthrough block5
ebb5:
block5:
; jalr %x0, %x1, 0
return v9999 ; bin: 00008067
ebb1:
block1:
; beq 0x000
br_icmp eq v1, v2, ebb1 ; bin: 01550063
fallthrough ebb100
br_icmp eq v1, v2, block1 ; bin: 01550063
fallthrough block100
ebb100:
block100:
; bne 0xffc
br_icmp ne v1, v2, ebb1 ; bin: ff551ee3
fallthrough ebb101
br_icmp ne v1, v2, block1 ; bin: ff551ee3
fallthrough block101
ebb101:
block101:
; blt 0xff8
br_icmp slt v1, v2, ebb1 ; bin: ff554ce3
fallthrough ebb102
br_icmp slt v1, v2, block1 ; bin: ff554ce3
fallthrough block102
ebb102:
block102:
; bge 0xff4
br_icmp sge v1, v2, ebb1 ; bin: ff555ae3
fallthrough ebb103
br_icmp sge v1, v2, block1 ; bin: ff555ae3
fallthrough block103
ebb103:
block103:
; bltu 0xff0
br_icmp ult v1, v2, ebb1 ; bin: ff5568e3
fallthrough ebb104
br_icmp ult v1, v2, block1 ; bin: ff5568e3
fallthrough block104
ebb104:
block104:
; bgeu 0xfec
br_icmp uge v1, v2, ebb1 ; bin: ff5576e3
fallthrough ebb105
br_icmp uge v1, v2, block1 ; bin: ff5576e3
fallthrough block105
ebb105:
block105:
; Forward branches.
fallthrough ebb106
fallthrough block106
ebb106:
block106:
; beq 0x018
br_icmp eq v2, v1, ebb2 ; bin: 00aa8c63
fallthrough ebb107
br_icmp eq v2, v1, block2 ; bin: 00aa8c63
fallthrough block107
ebb107:
block107:
; bne 0x014
br_icmp ne v2, v1, ebb2 ; bin: 00aa9a63
fallthrough ebb108
br_icmp ne v2, v1, block2 ; bin: 00aa9a63
fallthrough block108
ebb108:
block108:
; blt 0x010
br_icmp slt v2, v1, ebb2 ; bin: 00aac863
fallthrough ebb109
br_icmp slt v2, v1, block2 ; bin: 00aac863
fallthrough block109
ebb109:
block109:
; bge 0x00c
br_icmp sge v2, v1, ebb2 ; bin: 00aad663
fallthrough ebb110
br_icmp sge v2, v1, block2 ; bin: 00aad663
fallthrough block110
ebb110:
block110:
; bltu 0x008
br_icmp ult v2, v1, ebb2 ; bin: 00aae463
fallthrough ebb111
br_icmp ult v2, v1, block2 ; bin: 00aae463
fallthrough block111
ebb111:
block111:
; bgeu 0x004
br_icmp uge v2, v1, ebb2 ; bin: 00aaf263
br_icmp uge v2, v1, block2 ; bin: 00aaf263
fallthrough ebb2
fallthrough block2
ebb2:
block2:
; jal %x0, 0x00000
jump ebb2 ; bin: 0000006f
jump block2 ; bin: 0000006f
ebb3:
block3:
; beq x, %x0
brz v1, ebb3 ; bin: 00050063
fallthrough ebb6
brz v1, block3 ; bin: 00050063
fallthrough block6
ebb6:
block6:
; bne x, %x0
brnz v1, ebb3 ; bin: fe051ee3
brnz v1, block3 ; bin: fe051ee3
; jal %x0, 0x1ffff4
jump ebb2 ; bin: ff5ff06f
jump block2 ; bin: ff5ff06f
}

View File

@@ -2,7 +2,7 @@ test legalizer
target riscv32 supports_m=1
function %int32(i32, i32) {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v10 = iadd v1, v2
; check: [R#0c]
; sameln: v10 = iadd

View File

@@ -8,7 +8,7 @@ target riscv64 supports_m=1
; regex: V=v\d+
function %carry_out(i32, i32) -> i32, b1 {
ebb0(v1: i32, v2: i32):
block0(v1: i32, v2: i32):
v3, v4 = iadd_cout v1, v2
return v3, v4
}
@@ -19,7 +19,7 @@ ebb0(v1: i32, v2: i32):
; Expanding illegal immediate constants.
; Note that at some point we'll probably expand the iconst as well.
function %large_imm(i32) -> i32 {
ebb0(v0: i32):
block0(v0: i32):
v1 = iadd_imm v0, 1000000000
return v1
}
@@ -28,7 +28,7 @@ ebb0(v0: i32):
; check: return v1
function %bitclear(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
v2 = band_not v0, v1
; check: iconst.i32 -1
; check: bxor

View File

@@ -7,8 +7,8 @@ target riscv32
; regex: WS=\s+
function %int_split_args(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
block0(v0: i64):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: v0 = iconcat $v0l, $v0h
v1 = iadd_imm v0, 1
; check: $(v1l=$V), $(v1h=$V) = isplit v1
@@ -19,7 +19,7 @@ ebb0(v0: i64):
function %split_call_arg(i32) {
fn1 = %foo(i64)
fn2 = %foo(i32, i64)
ebb0(v0: i32):
block0(v0: i32):
v1 = uextend.i64 v0
call fn1(v1)
; check: $(v1l=$V), $(v1h=$V) = isplit v1
@@ -31,36 +31,36 @@ ebb0(v0: i32):
function %split_ret_val() {
fn1 = %foo() -> i64
ebb0:
block0:
v1 = call fn1()
; check: ebb0($(link=$V): i32):
; check: block0($(link=$V): i32):
; nextln: $(v1l=$V), $(v1h=$V) = call fn1()
; check: v1 = iconcat $v1l, $v1h
jump ebb1(v1)
; check: jump ebb1(v1)
jump block1(v1)
; check: jump block1(v1)
ebb1(v10: i64):
jump ebb1(v10)
block1(v10: i64):
jump block1(v10)
}
; First return value is fine, second one is expanded.
function %split_ret_val2() {
fn1 = %foo() -> i32, i64
ebb0:
block0:
v1, v2 = call fn1()
; check: ebb0($(link=$V): i32):
; check: block0($(link=$V): i32):
; nextln: v1, $(v2l=$V), $(v2h=$V) = call fn1()
; check: v2 = iconcat $v2l, $v2h
jump ebb1(v1, v2)
; check: jump ebb1(v1, v2)
jump block1(v1, v2)
; check: jump block1(v1, v2)
ebb1(v9: i32, v10: i64):
jump ebb1(v9, v10)
block1(v9: i32, v10: i64):
jump block1(v9, v10)
}
function %int_ext(i8, i8 sext, i8 uext) -> i8 uext {
ebb0(v1: i8, v2: i8, v3: i8):
; check: ebb0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
block0(v1: i8, v2: i8, v3: i8):
; check: block0(v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
; check: v2 = ireduce.i8 $v2x
; check: v3 = ireduce.i8 $v3x
; check: $(v1x=$V) = uextend.i32 v1
@@ -71,21 +71,21 @@ ebb0(v1: i8, v2: i8, v3: i8):
; Function produces single return value, still need to copy.
function %ext_ret_val() {
fn1 = %foo() -> i8 sext
ebb0:
block0:
v1 = call fn1()
; check: ebb0($V: i32):
; check: block0($V: i32):
; nextln: $(rv=$V) = call fn1()
; check: v1 = ireduce.i8 $rv
jump ebb1(v1)
; check: jump ebb1(v1)
jump block1(v1)
; check: jump block1(v1)
ebb1(v10: i8):
jump ebb1(v10)
block1(v10: i8):
jump block1(v10)
}
function %vector_split_args(i64x4) -> i64x4 {
ebb0(v0: i64x4):
; check: ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
block0(v0: i64x4):
; check: block0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
; check: $(v0a=$V) = iconcat $v0al, $v0ah
; check: $(v0b=$V) = iconcat $v0bl, $v0bh
; check: $(v0ab=$V) = vconcat $v0a, $v0b
@@ -107,7 +107,7 @@ ebb0(v0: i64x4):
function %indirect(i32) {
sig1 = () system_v
ebb0(v0: i32):
block0(v0: i32):
call_indirect sig1, v0()
return
}
@@ -115,7 +115,7 @@ ebb0(v0: i32):
; The first argument to call_indirect doesn't get altered.
function %indirect_arg(i32, f32x2) {
sig1 = (f32x2) system_v
ebb0(v0: i32, v1: f32x2):
block0(v0: i32, v1: f32x2):
call_indirect sig1, v0(v1)
; check: call_indirect sig1, v0($V, $V)
return
@@ -125,7 +125,7 @@ ebb0(v0: i32, v1: f32x2):
function %stack_args(i32) {
; check: $(ss0=$SS) = outgoing_arg 4
fn1 = %foo(i64, i64, i64, i64, i32)
ebb0(v0: i32):
block0(v0: i32):
v1 = iconst.i64 1
call fn1(v1, v1, v1, v1, v0)
; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill v0

View File

@@ -5,11 +5,11 @@ target riscv32 supports_m=1
; regex: V=v\d+
function %bitwise_and(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = band v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#ec
; sameln: $(v3l=$V) = band $v1l, $v2l
; check: [R#ec
@@ -18,11 +18,11 @@ ebb0(v1: i64, v2: i64):
; check: return $v3l, $v3h, $link
function %bitwise_or(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = bor v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#cc
; sameln: $(v3l=$V) = bor $v1l, $v2l
; check: [R#cc
@@ -31,11 +31,11 @@ ebb0(v1: i64, v2: i64):
; check: return $v3l, $v3h, $link
function %bitwise_xor(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = bxor v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#8c
; sameln: $(v3l=$V) = bxor $v1l, $v2l
; check: [R#8c
@@ -47,11 +47,11 @@ function %arith_add(i64, i64) -> i64 {
; Legalizing iadd.i64 requires two steps:
; 1. Narrow to iadd_cout.i32, then
; 2. Expand iadd_cout.i32 since RISC-V has no carry flag.
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v3 = iadd v1, v2
return v3
}
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#0c
; sameln: $(v3l=$V) = iadd $v1l, $v2l
; check: $(c=$V) = icmp ult $v3l, $v1l

View File

@@ -4,11 +4,11 @@ target riscv32
; regex: V=v\d+
function %icmp_imm_eq(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm eq v0, 0x20202020_10101010
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)
@@ -20,11 +20,11 @@ ebb0(v0: i64):
; nextln: return v1, $(link)
function %icmp_imm_ne(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm ne v0, 0x33333333_44444444
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)
@@ -36,11 +36,11 @@ ebb0(v0: i64):
; nextln: return v1, $(link)
function %icmp_imm_sge(i64) -> b1 {
ebb0(v0: i64):
block0(v0: i64):
v1 = icmp_imm sge v0, 0x01020304_05060708
return v1
}
; check: ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: block0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; nextln: $(v2l=$V) -> $(v0l)
; nextln: $(v2h=$V) -> $(v0h)
; nextln: v0 = iconcat $(v0l), $(v0h)

View File

@@ -31,6 +31,6 @@ function %parse_encoding(i32 [%x5]) -> i32 [%x10] {
; check: sig6 = (i32 [%x10]) -> b1 [%x10] system_v
; nextln: fn0 = %bar sig6
ebb0(v0: i32):
block0(v0: i32):
return v0
}

View File

@@ -3,7 +3,7 @@ test binemit
target riscv32
function %regmoves(i32 link [%x1]) -> i32 link [%x1] {
ebb0(v9999: i32):
block0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x7] v2 = iadd_imm v1, 1000 ; bin: 3e850393
regmove v1, %x10 -> %x11 ; bin: 00050593

View File

@@ -1,17 +1,17 @@
; Test the legalization of EBB arguments that are split.
; Test the legalization of block arguments that are split.
test legalizer
target riscv32
; regex: V=v\d+
function %simple(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump ebb1($v1l, $v1h)
block0(v1: i64, v2: i64):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump block1(v1)
; check: jump block1($v1l, $v1h)
ebb1(v3: i64):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
block1(v3: i64):
; check: block1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
@@ -20,18 +20,18 @@ ebb1(v3: i64):
}
function %multi(i64) -> i64 {
ebb1(v1: i64):
; check: ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump ebb2(v1, v1)
; check: jump ebb2($v1l, $v1l, $v1h, $v1h)
block1(v1: i64):
; check: block1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump block2(v1, v1)
; check: jump block2($v1l, $v1l, $v1h, $v1h)
ebb2(v2: i64, v3: i64):
; check: ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump ebb3(v2)
; check: jump ebb3($v2l, $v2h)
block2(v2: i64, v3: i64):
; check: block2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump block3(v2)
; check: jump block3($v2l, $v2h)
ebb3(v4: i64):
; check: ebb3($(v4l=$V): i32, $(v4h=$V): i32):
block3(v4: i64):
; check: block3($(v4l=$V): i32, $(v4h=$V): i32):
v5 = band v4, v3
; check: $(v5l=$V) = band $v4l, $v3l
; check: $(v5h=$V) = band $v4h, $v3h
@@ -40,16 +40,16 @@ ebb3(v4: i64):
}
function %loop(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump ebb1($v1l, $v1h)
block0(v1: i64, v2: i64):
; check: block0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump block1(v1)
; check: jump block1($v1l, $v1h)
ebb1(v3: i64):
; check: ebb1($(v3l=$V): i32, $(v3h=$V): i32):
block1(v3: i64):
; check: block1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
jump ebb1(v4)
; check: jump ebb1($v4l, $v4h)
jump block1(v4)
; check: jump block1($v4l, $v4h)
}

View File

@@ -4,7 +4,7 @@ target riscv32
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
; iconst.i32 needs legalizing, so it should throw a
[R#0,-] v1 = iconst.i32 0xf0f0f0f0f0 ; error: Instruction failed to re-encode
[Iret#19] return v9999
@@ -13,7 +13,7 @@ ebb0(v9999: i32):
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = %foo()
ebb0(v9999: i32):
block0(v9999: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
[R#0,-] v3 = iadd v1, v2 ; error: encoding R#00 should be R#0c

View File

@@ -5,7 +5,7 @@ target i686
; allocator can move it to a register that can be.
function %test(i32 [%rdi]) -> i32 system_v {
ebb0(v0: i32 [%rdi]):
block0(v0: i32 [%rdi]):
v1 = ireduce.i8 v0
v2 = sextend.i32 v1
return v2

View File

@@ -2,18 +2,18 @@ test compile
target x86_64 haswell
function %foo(i64, i64, i64, i32) -> b1 system_v {
ebb3(v0: i64, v1: i64, v2: i64, v3: i32):
block3(v0: i64, v1: i64, v2: i64, v3: i32):
v5 = icmp ne v2, v2
v8 = iconst.i64 0
jump ebb2(v8, v3, v5)
jump block2(v8, v3, v5)
ebb2(v10: i64, v30: i32, v37: b1):
block2(v10: i64, v30: i32, v37: b1):
v18 = load.i32 notrap aligned v2
v27 = iadd.i64 v10, v10
v31 = icmp eq v30, v30
brz v31, ebb2(v27, v30, v37)
jump ebb0(v37)
brz v31, block2(v27, v30, v37)
jump block0(v37)
ebb0(v35: b1):
block0(v35: b1):
return v35
}

View File

@@ -14,7 +14,7 @@ function %f() {
sig2 = (f32, i64) -> f64 system_v
; check: sig2 = (f32 [0], i32 [4], i32 [8]) -> f64 [%xmm0] system_v
ebb0:
block0:
return
}

View File

@@ -14,7 +14,7 @@ function %f() {
sig2 = (f32, i64) -> f64 system_v
; check: sig2 = (f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0] system_v
ebb0:
block0:
return
}
@@ -22,10 +22,10 @@ function %pass_stack_int64(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64
sig0 = (i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 vmctx) baldrdash_system_v
fn0 = u0:0 sig0
ebb0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64, v6: i64, v7: i64, v8: i64, v9: i64, v10: i64, v11: i64, v12: i64, v13: i64, v14: i64, v15: i64, v16: i64, v17: i64, v18: i64, v19: i64, v20: i64):
block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64, v6: i64, v7: i64, v8: i64, v9: i64, v10: i64, v11: i64, v12: i64, v13: i64, v14: i64, v15: i64, v16: i64, v17: i64, v18: i64, v19: i64, v20: i64):
call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20)
jump ebb1
jump block1
ebb1:
block1:
return
}

View File

@@ -14,7 +14,7 @@ function %I32() {
sig0 = ()
fn0 = %foo()
ebb0:
block0:
; asm: movl $-1, %ecx
[-,%rcx] v400 = func_addr.i32 fn0 ; bin: b9 Abs4(%foo) ffffffff

View File

@@ -14,7 +14,7 @@ function %I64() {
sig0 = ()
fn0 = %foo()
ebb0:
block0:
; asm: movabsq $-1, %rcx
[-,%rcx] v400 = func_addr.i64 fn0 ; bin: 48 b9 Abs8(%foo) ffffffffffffffff

View File

@@ -5,7 +5,7 @@ target i686
function u0:0(i32 vmctx) baldrdash_system_v {
sig0 = (i32 vmctx, i32 sigid) baldrdash_system_v
ebb0(v0: i32):
block0(v0: i32):
v2 = iconst.i32 0
v8 = iconst.i32 0
v9 = iconst.i32 0

View File

@@ -5,7 +5,7 @@ target x86_64 baseline
; clz/ctz on 64 bit operands
function %i64_clz(i64) -> i64 {
ebb0(v10: i64):
block0(v10: i64):
v11 = clz v10
; check: x86_bsr
; check: selectif.i64
@@ -13,7 +13,7 @@ ebb0(v10: i64):
}
function %i64_ctz(i64) -> i64 {
ebb1(v20: i64):
block1(v20: i64):
v21 = ctz v20
; check: x86_bsf
; check: selectif.i64
@@ -24,7 +24,7 @@ ebb1(v20: i64):
; clz/ctz on 32 bit operands
function %i32_clz(i32) -> i32 {
ebb0(v10: i32):
block0(v10: i32):
v11 = clz v10
; check: x86_bsr
; check: selectif.i32
@@ -32,7 +32,7 @@ ebb0(v10: i32):
}
function %i32_ctz(i32) -> i32 {
ebb1(v20: i32):
block1(v20: i32):
v21 = ctz v20
; check: x86_bsf
; check: selectif.i32
@@ -43,7 +43,7 @@ ebb1(v20: i32):
; popcount on 64 bit operands
function %i64_popcount(i64) -> i64 {
ebb0(v30: i64):
block0(v30: i64):
v31 = popcnt v30;
; check: ushr_imm
; check: iconst.i64
@@ -69,7 +69,7 @@ ebb0(v30: i64):
; popcount on 32 bit operands
function %i32_popcount(i32) -> i32 {
ebb0(v40: i32):
block0(v40: i32):
v41 = popcnt v40;
; check: ushr_imm
; check: iconst.i32

View File

@@ -8,7 +8,7 @@ target x86_64 baseline
;
function %Foo() {
ebb0:
block0:
; 64-bit wide bsf
[-,%r11] v10 = iconst.i64 0x1234

View File

@@ -13,7 +13,7 @@ function %F32() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
@@ -262,7 +262,7 @@ function %F64() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
@@ -472,53 +472,53 @@ ebb0:
}
function %cpuflags_float(f32 [%xmm0]) {
ebb0(v0: f32 [%xmm0]):
block0(v0: f32 [%xmm0]):
; asm: ucomiss %xmm0, %xmm0
[-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0
jump ebb1
jump block1
ebb1:
; asm: jnp ebb1
brff ord v1, ebb1 ; bin: 7b fe
jump ebb2
block1:
; asm: jnp block1
brff ord v1, block1 ; bin: 7b fe
jump block2
ebb2:
; asm: jp ebb1
brff uno v1, ebb1 ; bin: 7a fc
jump ebb3
block2:
; asm: jp block1
brff uno v1, block1 ; bin: 7a fc
jump block3
ebb3:
; asm: jne ebb1
brff one v1, ebb1 ; bin: 75 fa
jump ebb4
block3:
; asm: jne block1
brff one v1, block1 ; bin: 75 fa
jump block4
ebb4:
; asm: je ebb1
brff ueq v1, ebb1 ; bin: 74 f8
jump ebb5
block4:
; asm: je block1
brff ueq v1, block1 ; bin: 74 f8
jump block5
ebb5:
; asm: ja ebb1
brff gt v1, ebb1 ; bin: 77 f6
jump ebb6
block5:
; asm: ja block1
brff gt v1, block1 ; bin: 77 f6
jump block6
ebb6:
; asm: jae ebb1
brff ge v1, ebb1 ; bin: 73 f4
jump ebb7
block6:
; asm: jae block1
brff ge v1, block1 ; bin: 73 f4
jump block7
ebb7:
; asm: jb ebb1
brff ult v1, ebb1 ; bin: 72 f2
jump ebb8
block7:
; asm: jb block1
brff ult v1, block1 ; bin: 72 f2
jump block8
ebb8:
; asm: jbe ebb1
brff ule v1, ebb1 ; bin: 76 f0
jump ebb9
block8:
; asm: jbe block1
brff ule v1, block1 ; bin: 76 f0
jump block9
ebb9:
block9:
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2

View File

@@ -19,7 +19,7 @@ function %I32() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
; asm: movl $1, %ecx
[-,%rcx] v1 = iconst.i32 1 ; bin: b9 00000001
; asm: movl $2, %esi
@@ -486,141 +486,141 @@ ebb0:
[-,%rcx,%rflags] v709, v710 = isub_ifborrow v1, v2, v707 ; bin: 19 f1
; asm: testl %ecx, %ecx
; asm: je ebb1
brz v1, ebb1 ; bin: 85 c9 74 0e
fallthrough ebb3
; asm: je block1
brz v1, block1 ; bin: 85 c9 74 0e
fallthrough block3
ebb3:
block3:
; asm: testl %esi, %esi
; asm: je ebb1
brz v2, ebb1 ; bin: 85 f6 74 0a
fallthrough ebb4
; asm: je block1
brz v2, block1 ; bin: 85 f6 74 0a
fallthrough block4
ebb4:
block4:
; asm: testl %ecx, %ecx
; asm: jne ebb1
brnz v1, ebb1 ; bin: 85 c9 75 06
fallthrough ebb5
; asm: jne block1
brnz v1, block1 ; bin: 85 c9 75 06
fallthrough block5
ebb5:
block5:
; asm: testl %esi, %esi
; asm: jne ebb1
brnz v2, ebb1 ; bin: 85 f6 75 02
; asm: jne block1
brnz v2, block1 ; bin: 85 f6 75 02
; asm: jmp ebb2
jump ebb2 ; bin: eb 01
; asm: jmp block2
jump block2 ; bin: eb 01
; asm: ebb1:
ebb1:
; asm: block1:
block1:
; asm: ret
return ; bin: c3
; asm: ebb2:
ebb2:
; asm: block2:
block2:
trap user0 ; bin: user0 0f 0b
}
; Special branch encodings only for I32 mode.
function %special_branches() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%rdi] v3 = icmp eq v1, v2
[-,%rbx] v4 = icmp ugt v1, v2
; asm: testl $0xff, %edi
; asm: je ebb1
brz v3, ebb1 ; bin: f7 c7 000000ff 0f 84 00000015
fallthrough ebb2
; asm: je block1
brz v3, block1 ; bin: f7 c7 000000ff 0f 84 00000015
fallthrough block2
ebb2:
block2:
; asm: testb %bl, %bl
; asm: je ebb1
brz v4, ebb1 ; bin: 84 db 74 11
fallthrough ebb3
; asm: je block1
brz v4, block1 ; bin: 84 db 74 11
fallthrough block3
ebb3:
block3:
; asm: testl $0xff, %edi
; asm: jne ebb1
brnz v3, ebb1 ; bin: f7 c7 000000ff 0f 85 00000005
fallthrough ebb4
; asm: jne block1
brnz v3, block1 ; bin: f7 c7 000000ff 0f 85 00000005
fallthrough block4
ebb4:
block4:
; asm: testb %bl, %bl
; asm: jne ebb1
brnz v4, ebb1 ; bin: 84 db 75 01
fallthrough ebb5
; asm: jne block1
brnz v4, block1 ; bin: 84 db 75 01
fallthrough block5
ebb5:
block5:
return
ebb1:
block1:
return
}
; CPU flag instructions.
function %cpu_flags() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
jump ebb1
jump block1
ebb1:
block1:
; asm: cmpl %esi, %ecx
[-,%rflags] v10 = ifcmp v1, v2 ; bin: 39 f1
; asm: cmpl %ecx, %esi
[-,%rflags] v11 = ifcmp v2, v1 ; bin: 39 ce
; asm: je ebb1
brif eq v11, ebb1 ; bin: 74 fa
jump ebb2
; asm: je block1
brif eq v11, block1 ; bin: 74 fa
jump block2
ebb2:
; asm: jne ebb1
brif ne v11, ebb1 ; bin: 75 f8
jump ebb3
block2:
; asm: jne block1
brif ne v11, block1 ; bin: 75 f8
jump block3
ebb3:
; asm: jl ebb1
brif slt v11, ebb1 ; bin: 7c f6
jump ebb4
block3:
; asm: jl block1
brif slt v11, block1 ; bin: 7c f6
jump block4
ebb4:
; asm: jge ebb1
brif sge v11, ebb1 ; bin: 7d f4
jump ebb5
block4:
; asm: jge block1
brif sge v11, block1 ; bin: 7d f4
jump block5
ebb5:
; asm: jg ebb1
brif sgt v11, ebb1 ; bin: 7f f2
jump ebb6
block5:
; asm: jg block1
brif sgt v11, block1 ; bin: 7f f2
jump block6
ebb6:
; asm: jle ebb1
brif sle v11, ebb1 ; bin: 7e f0
jump ebb7
block6:
; asm: jle block1
brif sle v11, block1 ; bin: 7e f0
jump block7
ebb7:
; asm: jb ebb1
brif ult v11, ebb1 ; bin: 72 ee
jump ebb8
block7:
; asm: jb block1
brif ult v11, block1 ; bin: 72 ee
jump block8
ebb8:
; asm: jae ebb1
brif uge v11, ebb1 ; bin: 73 ec
jump ebb9
block8:
; asm: jae block1
brif uge v11, block1 ; bin: 73 ec
jump block9
ebb9:
; asm: ja ebb1
brif ugt v11, ebb1 ; bin: 77 ea
jump ebb10
block9:
; asm: ja block1
brif ugt v11, block1 ; bin: 77 ea
jump block10
ebb10:
; asm: jbe ebb1
brif ule v11, ebb1 ; bin: 76 e8
jump ebb11
block10:
; asm: jbe block1
brif ule v11, block1 ; bin: 76 e8
jump block11
ebb11:
block11:
; asm: sete %bl
[-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3
@@ -690,7 +690,7 @@ ebb11:
; Tests for i32/i8 conversion instructions.
function %I32_I8() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rcx] v11 = ireduce.i8 v1 ; bin:
@@ -706,7 +706,7 @@ ebb0:
; Tests for i32/i16 conversion instructions.
function %I32_I16() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rcx] v11 = ireduce.i16 v1 ; bin:

View File

@@ -14,7 +14,7 @@ function %F32() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
@@ -297,7 +297,7 @@ function %F64() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
@@ -553,53 +553,53 @@ ebb0:
}
function %cpuflags_float(f32 [%xmm0]) {
ebb0(v0: f32 [%xmm0]):
block0(v0: f32 [%xmm0]):
; asm: ucomiss %xmm0, %xmm0
[-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0
jump ebb1
jump block1
ebb1:
; asm: jnp ebb1
brff ord v1, ebb1 ; bin: 7b fe
jump ebb2
block1:
; asm: jnp block1
brff ord v1, block1 ; bin: 7b fe
jump block2
ebb2:
; asm: jp ebb1
brff uno v1, ebb1 ; bin: 7a fc
jump ebb3
block2:
; asm: jp block1
brff uno v1, block1 ; bin: 7a fc
jump block3
ebb3:
; asm: jne ebb1
brff one v1, ebb1 ; bin: 75 fa
jump ebb4
block3:
; asm: jne block1
brff one v1, block1 ; bin: 75 fa
jump block4
ebb4:
; asm: je ebb1
brff ueq v1, ebb1 ; bin: 74 f8
jump ebb5
block4:
; asm: je block1
brff ueq v1, block1 ; bin: 74 f8
jump block5
ebb5:
; asm: ja ebb1
brff gt v1, ebb1 ; bin: 77 f6
jump ebb6
block5:
; asm: ja block1
brff gt v1, block1 ; bin: 77 f6
jump block6
ebb6:
; asm: jae ebb1
brff ge v1, ebb1 ; bin: 73 f4
jump ebb7
block6:
; asm: jae block1
brff ge v1, block1 ; bin: 73 f4
jump block7
ebb7:
; asm: jb ebb1
brff ult v1, ebb1 ; bin: 72 f2
jump ebb8
block7:
; asm: jb block1
brff ult v1, block1 ; bin: 72 f2
jump block8
ebb8:
; asm: jbe ebb1
brff ule v1, ebb1 ; bin: 76 f0
jump ebb9
block8:
; asm: jbe block1
brff ule v1, block1 ; bin: 76 f0
jump block9
ebb9:
block9:
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2

View File

@@ -25,7 +25,7 @@ function %I64() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
; Colocated functions.

View File

@@ -3,7 +3,7 @@ target x86_64
; this verifies that returning b64 immediates does not result in a segmentation fault, see https://github.com/bytecodealliance/cranelift/issues/911
function %test_b64() -> b64 {
ebb0:
block0:
[-, %r10] v0 = bconst.b64 true
return v0
}

View File

@@ -23,7 +23,7 @@ function %I64() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
; Integer Constants.
@@ -708,117 +708,117 @@ ebb0:
istore8_complex v601, v521+v522 ; bin: heap_oob 88 0c 18
; asm: testq %rcx, %rcx
; asm: je ebb1
brz v1, ebb1 ; bin: 48 85 c9 74 1b
fallthrough ebb3
; asm: je block1
brz v1, block1 ; bin: 48 85 c9 74 1b
fallthrough block3
ebb3:
block3:
; asm: testq %rsi, %rsi
; asm: je ebb1
brz v2, ebb1 ; bin: 48 85 f6 74 16
fallthrough ebb4
; asm: je block1
brz v2, block1 ; bin: 48 85 f6 74 16
fallthrough block4
ebb4:
block4:
; asm: testq %r10, %r10
; asm: je ebb1
brz v3, ebb1 ; bin: 4d 85 d2 74 11
fallthrough ebb5
; asm: je block1
brz v3, block1 ; bin: 4d 85 d2 74 11
fallthrough block5
ebb5:
block5:
; asm: testq %rcx, %rcx
; asm: jne ebb1
brnz v1, ebb1 ; bin: 48 85 c9 75 0c
fallthrough ebb6
; asm: jne block1
brnz v1, block1 ; bin: 48 85 c9 75 0c
fallthrough block6
ebb6:
block6:
; asm: testq %rsi, %rsi
; asm: jne ebb1
brnz v2, ebb1 ; bin: 48 85 f6 75 07
fallthrough ebb7
; asm: jne block1
brnz v2, block1 ; bin: 48 85 f6 75 07
fallthrough block7
ebb7:
block7:
; asm: testq %r10, %r10
; asm: jne ebb1
brnz v3, ebb1 ; bin: 4d 85 d2 75 02
; asm: jne block1
brnz v3, block1 ; bin: 4d 85 d2 75 02
; asm: jmp ebb2
jump ebb2 ; bin: eb 01
; asm: jmp block2
jump block2 ; bin: eb 01
; asm: ebb1:
ebb1:
; asm: block1:
block1:
return ; bin: c3
; asm: ebb2:
ebb2:
; asm: block2:
block2:
; Add a no-op instruction to prevent fold_redundant_jump from removing this block.
; asm: notq %rcx
[-,%rcx] v5000 = bnot v1 ; bin: 48 f7 d1
jump ebb1 ; bin: eb fa
jump block1 ; bin: eb fa
}
; CPU flag instructions.
function %cpu_flags_I64() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i64 1
[-,%r10] v2 = iconst.i64 2
jump ebb1
jump block1
ebb1:
block1:
; asm: cmpq %r10, %rcx
[-,%rflags] v10 = ifcmp v1, v2 ; bin: 4c 39 d1
; asm: cmpq %rcx, %r10
[-,%rflags] v11 = ifcmp v2, v1 ; bin: 49 39 ca
; asm: je ebb1
brif eq v11, ebb1 ; bin: 74 f8
jump ebb2
; asm: je block1
brif eq v11, block1 ; bin: 74 f8
jump block2
ebb2:
; asm: jne ebb1
brif ne v11, ebb1 ; bin: 75 f6
jump ebb3
block2:
; asm: jne block1
brif ne v11, block1 ; bin: 75 f6
jump block3
ebb3:
; asm: jl ebb1
brif slt v11, ebb1 ; bin: 7c f4
jump ebb4
block3:
; asm: jl block1
brif slt v11, block1 ; bin: 7c f4
jump block4
ebb4:
; asm: jge ebb1
brif sge v11, ebb1 ; bin: 7d f2
jump ebb5
block4:
; asm: jge block1
brif sge v11, block1 ; bin: 7d f2
jump block5
ebb5:
; asm: jg ebb1
brif sgt v11, ebb1 ; bin: 7f f0
jump ebb6
block5:
; asm: jg block1
brif sgt v11, block1 ; bin: 7f f0
jump block6
ebb6:
; asm: jle ebb1
brif sle v11, ebb1 ; bin: 7e ee
jump ebb7
block6:
; asm: jle block1
brif sle v11, block1 ; bin: 7e ee
jump block7
ebb7:
; asm: jb ebb1
brif ult v11, ebb1 ; bin: 72 ec
jump ebb8
block7:
; asm: jb block1
brif ult v11, block1 ; bin: 72 ec
jump block8
ebb8:
; asm: jae ebb1
brif uge v11, ebb1 ; bin: 73 ea
jump ebb9
block8:
; asm: jae block1
brif uge v11, block1 ; bin: 73 ea
jump block9
ebb9:
; asm: ja ebb1
brif ugt v11, ebb1 ; bin: 77 e8
jump ebb10
block9:
; asm: ja block1
brif ugt v11, block1 ; bin: 77 e8
jump block10
ebb10:
; asm: jbe ebb1
brif ule v11, ebb1 ; bin: 76 e6
jump ebb11
block10:
; asm: jbe block1
brif ule v11, block1 ; bin: 76 e6
jump block11
ebb11:
block11:
; asm: sete %bl
[-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3
@@ -896,7 +896,7 @@ function %outargs() {
ss1 = outgoing_arg 8, offset 8
ss2 = outgoing_arg 8, offset 0
ebb0:
block0:
[-,%rcx] v1 = iconst.i64 1
; asm: movq %rcx, 8(%rsp)
@@ -922,7 +922,7 @@ function %I32() {
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
block0:
; Integer Constants.
@@ -1318,58 +1318,58 @@ ebb0:
[-,%r10] v533 = ushr_imm v3, 31 ; bin: 41 c1 ea 1f
; asm: testl %ecx, %ecx
; asm: je ebb1x
brz v1, ebb1 ; bin: 85 c9 74 18
fallthrough ebb3
; asm: je block1x
brz v1, block1 ; bin: 85 c9 74 18
fallthrough block3
ebb3:
block3:
; asm: testl %esi, %esi
; asm: je ebb1x
brz v2, ebb1 ; bin: 85 f6 74 14
fallthrough ebb4
; asm: je block1x
brz v2, block1 ; bin: 85 f6 74 14
fallthrough block4
ebb4:
block4:
; asm: testl %r10d, %r10d
; asm: je ebb1x
brz v3, ebb1 ; bin: 45 85 d2 74 0f
fallthrough ebb5
; asm: je block1x
brz v3, block1 ; bin: 45 85 d2 74 0f
fallthrough block5
ebb5:
block5:
; asm: testl %ecx, %ecx
; asm: jne ebb1x
brnz v1, ebb1 ; bin: 85 c9 75 0b
fallthrough ebb6
; asm: jne block1x
brnz v1, block1 ; bin: 85 c9 75 0b
fallthrough block6
ebb6:
block6:
; asm: testl %esi, %esi
; asm: jne ebb1x
brnz v2, ebb1 ; bin: 85 f6 75 07
fallthrough ebb7
; asm: jne block1x
brnz v2, block1 ; bin: 85 f6 75 07
fallthrough block7
ebb7:
block7:
; asm: testl %r10d, %r10d
; asm: jne ebb1x
brnz v3, ebb1 ; bin: 45 85 d2 75 02
; asm: jne block1x
brnz v3, block1 ; bin: 45 85 d2 75 02
; asm: jmp ebb2x
jump ebb2 ; bin: eb 01
; asm: jmp block2x
jump block2 ; bin: eb 01
; asm: ebb1x:
ebb1:
; asm: block1x:
block1:
return ; bin: c3
; asm: ebb2x:
ebb2:
; asm: block2x:
block2:
; Add a no-op instruction to prevent fold_redundant_jump from removing this block.
; asm: notl %ecx
[-,%rcx] v5000 = bnot v1 ; bin: f7 d1
jump ebb1 ; bin: eb fb
jump block1 ; bin: eb fb
}
; Tests for i32/i8 conversion instructions.
function %I32_I8() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%r10] v3 = iconst.i32 3
@@ -1397,7 +1397,7 @@ ebb0:
; Tests for i32/i16 conversion instructions.
function %I32_I16() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%r10] v3 = iconst.i32 3
@@ -1425,7 +1425,7 @@ ebb0:
; Tests for i64/i8 conversion instructions.
function %I64_I8() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
@@ -1453,7 +1453,7 @@ ebb0:
; Tests for i64/i16 conversion instructions.
function %I64_I16() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
@@ -1481,7 +1481,7 @@ ebb0:
; Tests for i64/i32 conversion instructions.
function %I64_I32() {
ebb0:
block0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
@@ -1509,9 +1509,9 @@ ebb0:
; Tests for i64 jump table instructions.
function %I64_JT(i64 [%rdi]) {
jt0 = jump_table [ebb1, ebb2, ebb3]
jt0 = jump_table [block1, block2, block3]
ebb0(v0: i64 [%rdi]):
block0(v0: i64 [%rdi]):
; Note: The next two lines will need to change whenever instructions are
; added or removed from this test.
[-, %rax] v1 = jump_table_base.i64 jt0 ; bin: 48 8d 05 00000039 PCRelRodata4(jt0)
@@ -1530,23 +1530,23 @@ ebb0(v0: i64 [%rdi]):
[-, %r10] v32 = jump_table_entry.i64 v11, v1, 4, jt0 ; bin: 4e 63 14 a8
[-, %r10] v33 = jump_table_entry.i64 v11, v2, 4, jt0 ; bin: 4f 63 14 aa
fallthrough ebb10
fallthrough block10
ebb10:
block10:
indirect_jump_table_br v10, jt0 ; bin: ff e3
ebb11:
block11:
indirect_jump_table_br v11, jt0 ; bin: 41 ff e5
ebb1:
fallthrough ebb2
ebb2:
fallthrough ebb3
ebb3:
block1:
fallthrough block2
block2:
fallthrough block3
block3:
trap user0
}
function %r12_r13_loads() {
ebb0:
block0:
[-,%r12] v1 = iconst.i64 0x0123_4567_89ab_cdef
[-,%r13] v2 = iconst.i64 0xfedc_ba98_7654_3210
[-,%rax] v3 = iconst.i64 0x1
@@ -1603,7 +1603,7 @@ ebb0:
}
function %r12_r13_stores() {
ebb0:
block0:
[-,%r12] v1 = iconst.i64 0x0123_4567_89ab_cdef
[-,%r13] v2 = iconst.i64 0xfedc_ba98_7654_3210
[-,%rax] v3 = iconst.i64 0x1
@@ -1674,7 +1674,7 @@ ebb0:
}
function %B64() {
ebb0:
block0:
[-, %rax] v1 = bconst.b64 true ; bin: 40 b8 00000001
[-, %r10] v0 = bconst.b64 true ; bin: 41 ba 00000001
return

View File

@@ -2,7 +2,7 @@ test run
target x86_64
function %reverse_bits_zero() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0
v1 = iconcat v0, v0
v2 = bitrev.i128 v1
@@ -12,7 +12,7 @@ ebb0:
; run
function %reverse_bits_one() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0
v1 = iconst.i64 1
v2 = iconcat v0, v1
@@ -29,7 +29,7 @@ ebb0:
; run
function %reverse_bits() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0x06AD_8667_69EC_41BA
v1 = iconst.i64 0x6C83_D81A_6E28_83AB
v2 = iconcat v0, v1

View File

@@ -2,36 +2,36 @@ test run
target x86_64
function %br_false() -> b1 {
ebb0:
block0:
v10 = iconst.i64 0x42
v11 = iconst.i64 0x00
v0 = iconcat v10, v11
brz v0, ebb2
jump ebb1
brz v0, block2
jump block1
ebb1:
block1:
v1 = bconst.b1 true
return v1
ebb2:
block2:
v2 = bconst.b1 false
return v2
}
; run
function %br_true() -> b1 {
ebb0:
block0:
v10 = iconst.i64 0x00
v11 = iconst.i64 0x00
v0 = iconcat v10, v11
brz v0, ebb2
jump ebb1
brz v0, block2
jump block1
ebb1:
block1:
v1 = bconst.b1 false
return v1
ebb2:
block2:
v2 = bconst.b1 true
return v2
}

View File

@@ -2,41 +2,41 @@ test compile
target x86_64
function u0:0(i128) -> i8 fast {
ebb0(v0: i128):
brz v0, ebb2
block0(v0: i128):
brz v0, block2
; check: v0 = iconcat v3, v4
; nextln: v5 = icmp_imm eq v3, 0
; nextln: v6 = icmp_imm eq v4, 0
; nextln: v7 = band v5, v6
; nextln: brnz v7, ebb2
jump ebb1
; nextln: brnz v7, block2
jump block1
ebb1:
block1:
v1 = iconst.i8 0
return v1
ebb2:
block2:
v2 = iconst.i8 1
return v2
}
function u0:1(i128) -> i8 fast {
ebb0(v0: i128):
brnz v0, ebb2
block0(v0: i128):
brnz v0, block2
; check: v0 = iconcat v3, v4
; nextln: brnz v3, ebb2
; nextln: fallthrough ebb3
; nextln: brnz v3, block2
; nextln: fallthrough block3
; check: ebb3:
; nextln: brnz.i64 v4, ebb2
jump ebb1
; nextln: fallthrough ebb1
; check: block3:
; nextln: brnz.i64 v4, block2
jump block1
; nextln: fallthrough block1
ebb1:
block1:
v1 = iconst.i8 0
return v1
ebb2:
block2:
v2 = iconst.i8 1
return v2
}

View File

@@ -2,32 +2,32 @@ test run
target x86_64
function u0:0() -> b1 {
ebb0:
block0:
v0 = iconst.i8 0
brz v0, ebb1
jump ebb2
brz v0, block1
jump block2
ebb1:
block1:
v1 = bconst.b1 true
return v1
ebb2:
block2:
v2 = bconst.b1 false
return v2
}
; run
function u0:1() -> b1 {
ebb0:
block0:
v0 = iconst.i8 0
brnz v0, ebb1
jump ebb2
brnz v0, block1
jump block2
ebb1:
block1:
v1 = bconst.b1 false
return v1
ebb2:
block2:
v2 = bconst.b1 true
return v2
}

View File

@@ -2,37 +2,37 @@ test compile
target x86_64
function u0:0() -> b1 {
ebb0:
block0:
v0 = iconst.i8 0
; check: v0 = iconst.i8 0
brz v0, ebb1
brz v0, block1
; nextln: v3 = uextend.i32 v0
; nextln: brz v3, ebb1
jump ebb2
; nextln: brz v3, block1
jump block2
ebb1:
block1:
v1 = bconst.b1 true
return v1
ebb2:
block2:
v2 = bconst.b1 false
return v2
}
function u0:1() -> b1 {
ebb0:
block0:
v0 = iconst.i8 0
; check: v0 = iconst.i8 0
brnz v0, ebb1
brnz v0, block1
; nextln: v3 = uextend.i32 v0
; nextln: brnz v3, ebb1
jump ebb2
; nextln: brnz v3, block1
jump block2
ebb1:
block1:
v1 = bconst.b1 false
return v1
ebb2:
block2:
v2 = bconst.b1 true
return v2
}

View File

@@ -5,12 +5,12 @@ target x86_64 haswell
; use baldrdash calling convention here for simplicity (avoids prologue, epilogue)
function %test_vconst_i32() -> i32x4 baldrdash_system_v {
ebb0:
block0:
v0 = vconst.i32x4 0x1234
return v0
}
; check: ebb0:
; check: block0:
; nextln: v0 = vconst.i32x4 0x1234
; nextln: return v0
; nextln: }

View File

@@ -2,7 +2,7 @@ test run
target x86_64
function u0:0() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
v1 = uextend.i128 v0
v2, v3 = isplit v1
@@ -14,7 +14,7 @@ ebb0:
; run
function u0:1() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
v1 = sextend.i128 v0
v2, v3 = isplit v1

View File

@@ -2,7 +2,7 @@ test compile
target x86_64
function u0:0() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
; check: v0 = iconst.i64 0xffff_ffff_eeee_0000
; nextln: v2 -> v0
@@ -20,7 +20,7 @@ ebb0:
}
function u0:1() -> b1 {
ebb0:
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
; check: v0 = iconst.i64 0xffff_ffff_eeee_0000
; nextln: v2 -> v0

View File

@@ -6,7 +6,7 @@ target x86_64 haswell
; booleans use x86_pextr which is manually placed in the IR so that it can be binemit-tested
function %test_extractlane_b8() {
ebb0:
block0:
[-, %rax] v0 = bconst.b8 true
[-, %xmm0] v1 = splat.b8x16 v0
[-, %rax] v2 = x86_pextr v1, 10 ; bin: 66 0f 3a 14 c0 0a
@@ -14,7 +14,7 @@ ebb0:
}
function %test_extractlane_i16() {
ebb0:
block0:
[-, %rax] v0 = iconst.i16 4
[-, %xmm1] v1 = splat.i16x8 v0
[-, %rax] v2 = x86_pextr v1, 4 ; bin: 66 0f 3a 15 c8 04
@@ -22,7 +22,7 @@ ebb0:
}
function %test_extractlane_i32() {
ebb0:
block0:
[-, %rax] v0 = iconst.i32 42
[-, %xmm4] v1 = splat.i32x4 v0
[-, %rcx] v2 = x86_pextr v1, 2 ; bin: 66 0f 3a 16 e1 02
@@ -30,7 +30,7 @@ ebb0:
}
function %test_extractlane_b64() {
ebb0:
block0:
[-, %rax] v0 = bconst.b64 false
[-, %xmm2] v1 = splat.b64x2 v0
[-, %rbx] v2 = x86_pextr v1, 1 ; bin: 66 48 0f 3a 16 d3 01

View File

@@ -2,7 +2,7 @@ test run
set enable_simd
function %test_extractlane_b8() -> b8 {
ebb0:
block0:
v1 = vconst.b8x16 [false false false false false false false false false false true false false
false false false]
v2 = extractlane v1, 10
@@ -11,7 +11,7 @@ ebb0:
; run
function %test_extractlane_i16() -> b1 {
ebb0:
block0:
v0 = vconst.i16x8 0x00080007000600050004000300020001
v1 = extractlane v0, 1
v2 = icmp_imm eq v1, 2
@@ -20,7 +20,7 @@ ebb0:
; run
function %test_extractlane_f32() -> b1 {
ebb0:
block0:
v0 = f32const 0x42.42
v1 = vconst.f32x4 [0x00.00 0x00.00 0x00.00 0x42.42]
v2 = extractlane v1, 3
@@ -30,7 +30,7 @@ ebb0:
; run
function %test_extractlane_i32_with_vector_reuse() -> b1 {
ebb0:
block0:
v0 = iconst.i32 42
v1 = iconst.i32 99
@@ -49,7 +49,7 @@ ebb0:
; run
function %test_extractlane_f32_with_vector_reuse() -> b1 {
ebb0:
block0:
v0 = f32const 0x42.42
v1 = f32const 0x99.99

View File

@@ -3,14 +3,14 @@ test binemit
target i686
function %foo() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %bar() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0
return v1

View File

@@ -3,28 +3,28 @@ test binemit
target x86_64
function %zero_const_32bit_no_rex() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 40 0f 57 c0
return v0
}
function %zero_const_32bit_rex() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm8, %xmm8
[-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0
return v1
}
function %zero_const_64bit_no_rex() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v0 = f64const 0.0 ; bin: 66 40 0f 57 c0
return v0
}
function %zero_const_64bit_rex() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm8, %xmm8
[-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0
return v1

View File

@@ -2,24 +2,24 @@ test compile
target x86_64
function u0:0() -> i128 system_v {
ebb0:
block0:
v0 = iconst.i64 0
v1 = iconst.i64 0
v2 = iconcat v0, v1
jump ebb5
jump block5
ebb2:
jump ebb4(v27)
block2:
jump block4(v27)
ebb4(v23: i128):
block4(v23: i128):
return v23
ebb5:
block5:
v27 = bxor.i128 v2, v2
v32 = iconst.i32 0
brz v32, ebb2
jump ebb6
brz v32, block2
jump block6
ebb6:
block6:
trap user0
}

View File

@@ -2,8 +2,8 @@ test compile
target x86_64
function u0:0(i64, i64) -> i128 fast {
ebb0(v0: i64, v1: i64):
;check: ebb0(v0: i64 [%rdi], v1: i64 [%rsi], v3: i64 [%rbp]):
block0(v0: i64, v1: i64):
;check: block0(v0: i64 [%rdi], v1: i64 [%rsi], v3: i64 [%rbp]):
v2 = iconcat.i64 v0, v1
; check: regmove v0, %rdi -> %rax
@@ -15,8 +15,8 @@ ebb0(v0: i64, v1: i64):
}
function u0:1(i128) -> i64, i64 fast {
ebb0(v0: i128):
; check: ebb0(v3: i64 [%rdi], v4: i64 [%rsi], v5: i64 [%rbp]):
block0(v0: i128):
; check: block0(v3: i64 [%rdi], v4: i64 [%rsi], v5: i64 [%rbp]):
v1, v2 = isplit v0
; check: regmove v3, %rdi -> %rax
@@ -28,8 +28,8 @@ ebb0(v0: i128):
}
function u0:2(i64, i128) fast {
; check: ebb0(v0: i64 [%rdi], v2: i64 [%rsi], v3: i64 [%rdx], v6: i64 [%rbp]):
ebb0(v0: i64, v1: i128):
; check: block0(v0: i64 [%rdi], v2: i64 [%rsi], v3: i64 [%rdx], v6: i64 [%rbp]):
block0(v0: i64, v1: i128):
; check: store v2, v0+8
; check: store v3, v0+16
store v1, v0+8
@@ -37,7 +37,7 @@ ebb0(v0: i64, v1: i128):
}
function u0:3(i64) -> i128 fast {
ebb0(v0: i64):
block0(v0: i64):
; check: v2 = load.i64 v0+8
; check: v3 = load.i64 v0+16
v1 = load.i128 v0+8

View File

@@ -3,7 +3,7 @@ set enable_simd
target x86_64 skylake
function %icmp_i8x16() {
ebb0:
block0:
[-, %xmm3] v0 = vconst.i8x16 0x00 ; bin: 66 0f ef db
[-, %xmm4] v1 = vconst.i8x16 0xffffffffffffffffffffffffffffffff ; bin: 66 0f 74 e4
[-, %xmm3] v2 = icmp eq v0, v1 ; bin: 66 0f 74 dc
@@ -11,7 +11,7 @@ ebb0:
}
function %icmp_i16x8() {
ebb0:
block0:
[-, %xmm0] v0 = vconst.i16x8 0x00
[-, %xmm7] v1 = vconst.i16x8 0xffffffffffffffffffffffffffffffff
[-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 75 c7
@@ -19,7 +19,7 @@ ebb0:
}
function %icmp_i32x4() {
ebb0:
block0:
[-, %xmm0] v0 = vconst.i32x4 0x00
[-, %xmm4] v1 = vconst.i32x4 0xffffffffffffffffffffffffffffffff
[-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 76 c4
@@ -27,7 +27,7 @@ ebb0:
}
function %icmp_i64x2() {
ebb0:
block0:
[-, %xmm0] v0 = vconst.i64x2 0x00
[-, %xmm1] v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff
[-, %xmm0] v2 = icmp eq v0, v1 ; bin: 66 0f 38 29 c1

View File

@@ -2,7 +2,7 @@ test run
target x86_64 haswell
function %test_icmp_eq_i128() -> b1 {
ebb0:
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
@@ -16,7 +16,7 @@ ebb0:
; run
function %test_icmp_imm_eq_i128() -> b1 {
ebb0:
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
@@ -27,7 +27,7 @@ ebb0:
; run
function %test_icmp_ne_i128() -> b1 {
ebb0:
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12
@@ -41,7 +41,7 @@ ebb0:
; run
function %test_icmp_imm_ne_i128() -> b1 {
ebb0:
block0:
v11 = iconst.i64 0x0
v12 = iconst.i64 0x0
v1 = iconcat v11, v12

View File

@@ -2,7 +2,7 @@ test run
set enable_simd
function %run_icmp_i8x16() -> b8 {
ebb0:
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 0x00
v2 = icmp eq v0, v1
@@ -13,7 +13,7 @@ ebb0:
; run
function %run_icmp_i64x2() -> b64 {
ebb0:
block0:
v0 = vconst.i64x2 0xffffffffffffffffffffffffffffffff
v1 = vconst.i64x2 0xffffffffffffffffffffffffffffffff
v2 = icmp eq v0, v1

View File

@@ -2,7 +2,7 @@ test run
target x86_64 haswell
function %test_imul_i128() -> b1 {
ebb0:
block0:
v11 = iconst.i64 0xf2347ac4503f1e24
v12 = iconst.i64 0x0098fe985354ab06
v1 = iconcat v11, v12

View File

@@ -6,7 +6,7 @@ target x86_64 haswell
; booleans use x86_pinsr which is manually placed in the IR so that it can be binemit-tested
function %test_insertlane_b8() {
ebb0:
block0:
[-, %rax] v0 = bconst.b8 true
[-, %rbx] v1 = bconst.b8 false
[-, %xmm0] v2 = splat.b8x16 v0
@@ -15,7 +15,7 @@ ebb0:
}
function %test_insertlane_i16() {
ebb0:
block0:
[-, %rax] v0 = iconst.i16 4
[-, %rbx] v1 = iconst.i16 5
[-, %xmm1] v2 = splat.i16x8 v0
@@ -24,7 +24,7 @@ ebb0:
}
function %test_insertlane_i32() {
ebb0:
block0:
[-, %rax] v0 = iconst.i32 42
[-, %rbx] v1 = iconst.i32 99
[-, %xmm4] v2 = splat.i32x4 v0
@@ -33,7 +33,7 @@ ebb0:
}
function %test_insertlane_b64() {
ebb0:
block0:
[-, %rax] v0 = bconst.b64 true
[-, %rbx] v1 = bconst.b64 false
[-, %xmm2] v2 = splat.b64x2 v0

View File

@@ -4,7 +4,7 @@ set enable_simd
; TODO once SIMD vector comparison is implemented, remove use of extractlane below
function %test_insertlane_b8() -> b8 {
ebb0:
block0:
v1 = bconst.b8 true
v2 = vconst.b8x16 [false false false false false false false false false false false false false
false false false]
@@ -15,7 +15,7 @@ ebb0:
; run
function %test_insertlane_f32() -> b1 {
ebb0:
block0:
v0 = f32const 0x42.42
v1 = vconst.f32x4 0x00
v2 = insertlane v1, 1, v0
@@ -26,7 +26,7 @@ ebb0:
; run
function %test_insertlane_f64_lane1() -> b1 {
ebb0:
block0:
v0 = f64const 0x42.42
v1 = vconst.f64x2 0x00
v2 = insertlane v1, 1, v0
@@ -37,7 +37,7 @@ ebb0:
; run
function %test_insertlane_f64_lane0() -> b1 {
ebb0:
block0:
v0 = f64const 0x42.42
v1 = vconst.f64x2 0x00
v2 = insertlane v1, 0, v0

View File

@@ -2,7 +2,7 @@ test compile
target x86_64
function u0:0(i16) -> i8 fast {
ebb0(v0: i16):
block0(v0: i16):
v1 = ireduce.i8 v0
return v1
}

View File

@@ -2,10 +2,10 @@ test compile
target x86_64
function u0:0(i64, i64) -> i128 system_v {
ebb0(v0: i64, v1: i64):
block0(v0: i64, v1: i64):
trap user0
ebb30:
block30:
v245 = iconst.i64 0
v246 = iconcat v245, v245
; The next instruction used to be legalized twice, causing a panic the second time.
@@ -13,7 +13,7 @@ ebb30:
v252, v253 = isplit v246
trap user0
ebb45:
block45:
v369 = iconst.i64 0
v370 = load.i128 v369
trap user0

View File

@@ -3,7 +3,7 @@ set opt_level=speed_and_size
target x86_64
function u0:0(i8) -> i8 fast {
ebb0(v0: i8):
block0(v0: i8):
v1 = iconst.i8 0
v2 = isub v1, v0
; check: v3 = uextend.i32 v0

View File

@@ -2,9 +2,9 @@ test compile
target x86_64
function u0:0(i128) system_v {
ebb0(v0: i128):
jump ebb1(v0)
block0(v0: i128):
jump block1(v0)
ebb1(v1: i128):
block1(v1: i128):
return
}

View File

@@ -3,7 +3,7 @@ test compile
target x86_64
function u0:0() -> i8 fast {
ebb0:
block0:
v14 = bconst.b1 false
v15 = bint.i8 v14
return v15

View File

@@ -8,15 +8,15 @@ function u0:51(i64, i64) system_v {
ss2 = explicit_slot 1
ss3 = explicit_slot 1
ebb0(v0: i64, v1: i64):
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss1
v3 = load.i8 v1
store v3, v2
v4 = stack_addr.i64 ss2
v5 = stack_addr.i64 ss3
jump ebb1
jump block1
ebb1:
block1:
v6 = load.i8 v2
store v6, v5
v7 = load.i8 v5

View File

@@ -3,44 +3,44 @@ test legalizer
target x86_64
function %br_icmp(i64) fast {
ebb0(v0: i64):
block0(v0: i64):
v1 = iconst.i64 0
br_icmp eq v0, v1, ebb1
jump ebb1
br_icmp eq v0, v1, block1
jump block1
ebb1:
block1:
return
}
; sameln: function %br_icmp(i64 [%rdi]) fast {
; nextln: ebb0(v0: i64):
; nextln: block0(v0: i64):
; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0
; nextln: [DynRexOp1icscc#8039] v2 = icmp eq v0, v1
; nextln: [RexOp1t8jccb#75] brnz v2, ebb1
; nextln: [Op1jmpb#eb] jump ebb1
; nextln: [RexOp1t8jccb#75] brnz v2, block1
; nextln: [Op1jmpb#eb] jump block1
; nextln:
; nextln: ebb1:
; nextln: block1:
; nextln: [Op1ret#c3] return
; nextln: }
function %br_icmp_ebb_args(i64) fast {
ebb0(v0: i64):
function %br_icmp_args(i64) fast {
block0(v0: i64):
v1 = iconst.i64 0
br_icmp eq v0, v1, ebb1(v0)
jump ebb1(v0)
br_icmp eq v0, v1, block1(v0)
jump block1(v0)
ebb1(v2: i64):
block1(v2: i64):
return
}
; sameln: function %br_icmp_ebb_args(i64 [%rdi]) fast {
; nextln: ebb0(v0: i64):
; sameln: function %br_icmp_args(i64 [%rdi]) fast {
; nextln: block0(v0: i64):
; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0
; nextln: [DynRexOp1icscc#8039] v3 = icmp eq v0, v1
; nextln: [RexOp1t8jccb#75] brnz v3, ebb1(v0)
; nextln: [Op1jmpb#eb] jump ebb1(v0)
; nextln: [RexOp1t8jccb#75] brnz v3, block1(v0)
; nextln: [Op1jmpb#eb] jump block1(v0)
; nextln:
; nextln: ebb1(v2: i64):
; nextln: block1(v2: i64):
; nextln: [Op1ret#c3] return
; nextln: }

View File

@@ -2,20 +2,20 @@ test compile
set opt_level=speed_and_size
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
function u0:0(i64) system_v {
ss0 = explicit_slot 1
jt0 = jump_table [ebb1]
jt0 = jump_table [block1]
ebb0(v0: i64):
block0(v0: i64):
v1 = stack_addr.i64 ss0
v2 = load.i8 v1
br_table v2, ebb2, jt0
br_table v2, block2, jt0
; check: $(oob=$V) = ifcmp_imm $(idx=$V), 1
; ebb2 is replaced by ebb1 by fold_redundant_jump
; nextln: brif uge $oob, ebb1
; nextln: fallthrough $(inb=$EBB)
; block2 is replaced by block1 by fold_redundant_jump
; nextln: brif uge $oob, block1
; nextln: fallthrough $(inb=$BB)
; check: $inb:
; nextln: $(final_idx=$V) = uextend.i64 $idx
; nextln: $(base=$V) = jump_table_base.i64 jt0
@@ -23,9 +23,9 @@ ebb0(v0: i64):
; nextln: $(addr=$V) = iadd $base, $rel_addr
; nextln: indirect_jump_table_br $addr, jt0
ebb2:
jump ebb1
block2:
jump block1
ebb1:
block1:
return
}

View File

@@ -7,7 +7,7 @@ function u0:0(i8, i8) fast {
fn0 = %black_box(i8)
ss0 = explicit_slot 1 ; black box
ebb0(v0: i8, v1: i8):
block0(v0: i8, v1: i8):
v99 = stack_addr.i64 ss0
; check: istore8 $(V), $(V)

View File

@@ -5,7 +5,7 @@ target x86_64 haswell
function %call() {
fn0 = %foo()
ebb0:
block0:
call fn0()
return
}

View File

@@ -4,7 +4,7 @@ target x86_64
; regex: V=v\d+
function u0:0(i8) -> i8, i8 fast {
ebb0(v0: i8):
block0(v0: i8):
v1 = clz v0
; check: v3 = uextend.i32 v0
; nextln: v6 = iconst.i32 -1

View File

@@ -4,36 +4,36 @@ target i686
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %cond_trap(i32) {
ebb0(v1: i32):
block0(v1: i32):
trapz v1, user67
return
; check: ebb0(v1: i32
; check: block0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $f, user67
; nextln: return
}
function %cond_trap2(i32) {
ebb0(v1: i32):
block0(v1: i32):
trapnz v1, int_ovf
return
; check: ebb0(v1: i32
; check: block0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif ne $f, int_ovf
; nextln: return
}
function %cond_trap_b1(i32) {
ebb0(v1: i32):
block0(v1: i32):
v2 = icmp_imm eq v1, 6
trapz v2, user7
return
; check: ebb0(v1: i32
; check: brnz v2, $(new=$EBB)
; check: jump $(trap=$EBB)
; check: block0(v1: i32
; check: brnz v2, $(new=$BB)
; check: jump $(trap=$BB)
; check: $trap:
; nextln: trap user7
; check: $new:
@@ -41,13 +41,13 @@ ebb0(v1: i32):
}
function %cond_trap2_b1(i32) {
ebb0(v1: i32):
block0(v1: i32):
v2 = icmp_imm eq v1, 6
trapnz v2, user9
return
; check: ebb0(v1: i32
; check: brz v2, $(new=$EBB)
; check: jump $(trap=$EBB)
; check: block0(v1: i32
; check: brz v2, $(new=$BB)
; check: jump $(trap=$BB)
; check: $trap:
; nextln: trap user9
; check: $new:
@@ -55,7 +55,7 @@ ebb0(v1: i32):
}
function %f32const() -> f32 {
ebb0:
block0:
v1 = f32const 0x1.0p1
; check: $(tmp=$V) = iconst.i32
; check: v1 = bitcast.f32 $tmp
@@ -63,9 +63,9 @@ ebb0:
}
function %select_f64(f64, f64, i32) -> f64 {
ebb0(v0: f64, v1: f64, v2: i32):
block0(v0: f64, v1: f64, v2: i32):
v3 = select v2, v0, v1
; check: brnz v2, $(new=$EBB)(v0)
; check: brnz v2, $(new=$BB)(v0)
; nextln: jump $new(v1)
; check: $new(v3: f64):
; nextln: return v3
@@ -73,19 +73,19 @@ ebb0(v0: f64, v1: f64, v2: i32):
}
function %f32_min(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
block0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
; check: $(vnat=$V) = x86_fmin.f32 v0, v1
; nextln: jump $(done=$EBB)($vnat)
; nextln: jump $(done=$BB)($vnat)
; check: $(uno=$EBB):
; check: $(uno=$BB):
; nextln: $(vuno=$V) = fadd.f32 v0, v1
; nextln: jump $(done=$EBB)($vuno)
; nextln: jump $(done=$BB)($vuno)
; check: $(ueq=$EBB):
; check: $(ueq=$BB):
; check: $(veq=$V) = bor.f32 v0, v1
; nextln: jump $(done=$EBB)($veq)
; nextln: jump $(done=$BB)($veq)
; check: $done(v2: f32):
; nextln: return v2

View File

@@ -5,11 +5,11 @@ set avoid_div_traps=1
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
@@ -20,8 +20,8 @@ ebb0(v0: i64, v1: i64):
}
function %udiv_0(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = udiv v0, v1
@@ -34,8 +34,8 @@ ebb0(v0: i64):
}
function %udiv_minus_1(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = udiv v0, v1
@@ -46,8 +46,8 @@ ebb0(v0: i64):
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
@@ -58,8 +58,8 @@ ebb0(v0: i64, v1: i64):
}
function %urem_0(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = urem v0, v1
@@ -72,8 +72,8 @@ ebb0(v0: i64):
}
function %urem_minus_1(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = urem v0, v1
@@ -84,16 +84,16 @@ ebb0(v0: i64):
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = sdiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; nextln: jump $(done=$BB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
@@ -104,8 +104,8 @@ ebb0(v0: i64, v1: i64):
}
function %sdiv_0(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = sdiv v0, v1
@@ -118,16 +118,16 @@ ebb0(v0: i64):
}
function %sdiv_minus_1(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($q)
; nextln: jump $(done=$BB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
@@ -140,27 +140,27 @@ ebb0(v0: i64):
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = srem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2
}
function %srem_0(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = srem v0, v1
@@ -173,19 +173,19 @@ ebb0(v0: i64):
}
function %srem_minus_1(i64) -> i64 {
ebb0(v0: i64):
; check: ebb0(
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2

View File

@@ -5,11 +5,11 @@ set avoid_div_traps=0
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %udiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = udiv v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
@@ -18,8 +18,8 @@ ebb0(v0: i64, v1: i64):
}
function %urem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = urem v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
@@ -28,8 +28,8 @@ ebb0(v0: i64, v1: i64):
}
function %sdiv(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = sdiv v0, v1
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
@@ -40,17 +40,17 @@ ebb0(v0: i64, v1: i64):
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
; check: ebb0(
block0(v0: i64, v1: i64):
; check: block0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$EBB)
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$EBB)($r)
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$EBB)($zero)
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2

View File

@@ -5,7 +5,7 @@ target x86_64
; regex: V=v\d+
function %f64const() -> f64 {
ebb0:
block0:
v1 = f64const 0x1.0p1
; check: $(tmp=$V) = iconst.i64
; check: v1 = bitcast.f64 $tmp

View File

@@ -2,13 +2,13 @@ test compile
target x86_64
function u0:0(i16) -> f64 fast {
ebb0(v0: i16):
block0(v0: i16):
v1 = fcvt_from_uint.f64 v0
return v1
}
function u0:1(i16) -> f64 fast {
ebb0(v0: i16):
block0(v0: i16):
v1 = fcvt_from_sint.f64 v0
return v1
}

View File

@@ -2,7 +2,7 @@ test legalizer
target x86_64
; Test legalization for various forms of heap addresses.
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %heap_addrs(i32, i64, i64 vmctx) {
gv4 = vmctx
@@ -29,7 +29,7 @@ function %heap_addrs(i32, i64, i64 vmctx) {
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, offset_guard 4096, index_type i64
ebb0(v0: i32, v1: i64, v3: i64):
block0(v0: i32, v1: i64, v3: i64):
; The fast-path; 32-bit index, static heap with a sufficient bound, no bounds check needed!
v4 = heap_addr.i64 heap0, v0, 0
; check: v12 = uextend.i64 v0
@@ -38,8 +38,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v5 = heap_addr.i64 heap1, v0, 0
; check: v14 = icmp_imm ugt v0, 0x0001_0000
; check: brz v14, $(resume_1=$EBB)
; nextln: jump $(trap_1=$EBB)
; check: brz v14, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB)
; check: $trap_1:
; nextln: trap heap_oob
; check: $resume_1:
@@ -50,8 +50,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v6 = heap_addr.i64 heap2, v1, 0
; check: v19 = iconst.i64 0x0001_0000_0000
; check: v17 = icmp.i64 ugt v1, v19
; check: brz v17, $(resume_2=$EBB)
; nextln: jump $(trap_2=$EBB)
; check: brz v17, $(resume_2=$BB)
; nextln: jump $(trap_2=$BB)
; check: $trap_2:
; nextln: trap heap_oob
; check: $resume_2:
@@ -60,8 +60,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v7 = heap_addr.i64 heap3, v1, 0
; check: v20 = icmp_imm.i64 ugt v1, 0x0001_0000
; check: brz v20, $(resume_3=$EBB)
; nextln: jump $(trap_3=$EBB)
; check: brz v20, $(resume_3=$BB)
; nextln: jump $(trap_3=$BB)
; check: $trap_3:
; nextln: trap heap_oob
; check: $resume_3:
@@ -72,8 +72,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v22 = load.i32 notrap aligned v3+88
; check: v23 = iadd_imm v22, 0
; check: v24 = icmp.i32 ugt v0, v23
; check: brz v24, $(resume_4=$EBB)
; nextln: jump $(trap_4=$EBB)
; check: brz v24, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB)
; check: $trap_4:
; nextln: trap heap_oob
; check: $resume_4:
@@ -85,8 +85,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v27 = load.i32 notrap aligned v3+88
; check: v28 = iadd_imm v27, 0
; check: v29 = icmp.i32 ugt v0, v28
; check: brz v29, $(resume_5=$EBB)
; nextln: jump $(trap_5=$EBB)
; check: brz v29, $(resume_5=$BB)
; nextln: jump $(trap_5=$BB)
; check: $trap_5:
; nextln: trap heap_oob
; check: $resume_5:
@@ -98,8 +98,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v32 = iadd_imm.i64 v3, 80
; check: v33 = iadd_imm v32, 0
; check: v34 = icmp.i64 ugt v1, v33
; check: brz v34, $(resume_6=$EBB)
; nextln: jump $(trap_6=$EBB)
; check: brz v34, $(resume_6=$BB)
; nextln: jump $(trap_6=$BB)
; check: $trap_6:
; nextln: trap heap_oob
; check: $resume_6:
@@ -110,8 +110,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v36 = iadd_imm.i64 v3, 80
; check: v37 = iadd_imm v36, 0
; check: v38 = icmp.i64 ugt v1, v37
; check: brz v38, $(resume_7=$EBB)
; nextln: jump $(trap_7=$EBB)
; check: brz v38, $(resume_7=$BB)
; nextln: jump $(trap_7=$BB)
; check: $trap_7:
; nextln: trap heap_oob
; check: $resume_7:

View File

@@ -5,7 +5,7 @@ target x86_64 haswell
; regex: V=v\d+
function %imul(i128, i128) -> i128 {
ebb0(v1: i128, v2: i128):
block0(v1: i128, v2: i128):
v10 = imul v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)

View File

@@ -5,7 +5,7 @@ target i686 haswell
; regex: V=v\d+
function %iadd(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = iadd v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -16,7 +16,7 @@ ebb0(v1: i64, v2: i64):
}
function %isub(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = isub v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -27,7 +27,7 @@ ebb0(v1: i64, v2: i64):
}
function %imul(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = imul v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -42,7 +42,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_eq(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp eq v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -53,7 +53,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_eq(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm eq v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -67,7 +67,7 @@ ebb0(v1: i64):
}
function %icmp_ne(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp ne v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -78,7 +78,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_ne(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm ne v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -92,7 +92,7 @@ ebb0(v1: i64):
}
function %icmp_sgt(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp sgt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -106,7 +106,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_sgt(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm sgt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -123,7 +123,7 @@ ebb0(v1: i64):
}
function %icmp_sge(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp sge v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -137,7 +137,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_sge(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm sge v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -154,7 +154,7 @@ ebb0(v1: i64):
}
function %icmp_slt(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp slt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -168,7 +168,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_slt(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm slt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -185,7 +185,7 @@ ebb0(v1: i64):
}
function %icmp_sle(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp sle v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -199,7 +199,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_sle(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm sle v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -216,7 +216,7 @@ ebb0(v1: i64):
}
function %icmp_ugt(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp ugt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -230,7 +230,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_ugt(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm ugt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -247,7 +247,7 @@ ebb0(v1: i64):
}
function %icmp_uge(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp uge v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -261,7 +261,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_uge(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm uge v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -278,7 +278,7 @@ ebb0(v1: i64):
}
function %icmp_ult(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp ult v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -292,7 +292,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_ult(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm ult v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
@@ -309,7 +309,7 @@ ebb0(v1: i64):
}
function %icmp_ule(i64, i64) -> b1 {
ebb0(v1: i64, v2: i64):
block0(v1: i64, v2: i64):
v10 = icmp ule v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
@@ -323,7 +323,7 @@ ebb0(v1: i64, v2: i64):
}
function %icmp_imm_ule(i64) -> b1 {
ebb0(v1: i64):
block0(v1: i64):
v10 = icmp_imm ule v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)

View File

@@ -4,7 +4,7 @@ target x86_64
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
ebb0(v0: i8, v1: i8):
block0(v0: i8, v1: i8):
v2 = icmp_imm sle v0, 0
; check: $(e1=$V) = sextend.i32 v0
; nextln: v2 = icmp_imm sle $e1, 0

View File

@@ -5,10 +5,10 @@ target x86_64
function u0:0(i64) system_v {
ss0 = explicit_slot 0
ebb0(v0: i64):
jump ebb1
block0(v0: i64):
jump block1
ebb1:
block1:
; _0 = const 42u8
v1 = iconst.i8 42
store v1, v0

View File

@@ -4,7 +4,7 @@ target x86_64
function u0:0(i64, i8, i8) system_v {
ebb0(v0: i64, v1: i8, v2: i8):
block0(v0: i64, v1: i8, v2: i8):
v11 = imul v1, v2
store v11, v0
return

View File

@@ -5,7 +5,7 @@ target x86_64
function u0:0(i64, i8) system_v {
ss0 = explicit_slot 1
ebb0(v0: i64, v1: i8):
block0(v0: i64, v1: i8):
v3 = stack_addr.i64 ss0
v5 = load.i8 v3
v6 = iconst.i8 2

View File

@@ -2,11 +2,11 @@ test compile
target x86_64
function u0:0(i128) -> i64, i64 fast {
; check: ebb0(v4: i64 [%rdi], v5: i64 [%rsi], v8: i64 [%rbp]):
ebb0(v0: i128):
jump ebb2
; check: block0(v4: i64 [%rdi], v5: i64 [%rsi], v8: i64 [%rbp]):
block0(v0: i128):
jump block2
ebb1:
block1:
; When this `isplit` is legalized, the bnot below is not yet legalized,
; so there isn't a corresponding `iconcat` yet. We should try legalization
; for this `isplit` again once all instrucions have been legalized.
@@ -14,11 +14,11 @@ ebb1:
; return v6, v7
return v2, v3
ebb2:
block2:
; check: v6 = bnot.i64 v4
; check: v2 -> v6
; check: v7 = bnot.i64 v5
; check: v3 -> v7
v1 = bnot.i128 v0
jump ebb1
jump block1
}

View File

@@ -5,7 +5,7 @@ set is_pic
target x86_64
function %floor(f32) -> f32 {
ebb0(v0: f32):
block0(v0: f32):
v1 = floor v0
return v1
}

View File

@@ -9,16 +9,16 @@ function u0:0(i64, i8, i8) system_v {
ss3 = explicit_slot 1
ss4 = explicit_slot 1
ebb0(v0: i64, v1: i8, v2: i8):
block0(v0: i64, v1: i8, v2: i8):
v3 = stack_addr.i64 ss1
store v1, v3
v4 = stack_addr.i64 ss2
store v2, v4
v5 = stack_addr.i64 ss3
v6 = stack_addr.i64 ss4
jump ebb1
jump block1
ebb1:
block1:
v7 = load.i8 v3
store v7, v5
v8 = load.i8 v4

View File

@@ -3,13 +3,13 @@ test legalizer
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %vmctx(i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, -16
ebb1(v1: i64):
block1(v1: i64):
v2 = global_value.i64 gv1
; check: v2 = iadd_imm v1, -16
return v2
@@ -21,7 +21,7 @@ function %load(i64 vmctx) -> i64 {
gv1 = load.i64 notrap aligned gv0-16
gv2 = iadd_imm.i64 gv1, 32
ebb1(v1: i64):
block1(v1: i64):
v2 = global_value.i64 gv2
; check: $(p1=$V) = load.i64 notrap aligned v1-16
; check: v2 = iadd_imm $p1, 32
@@ -33,7 +33,7 @@ function %symbol() -> i64 {
gv0 = symbol %something
gv1 = symbol u123:456
ebb1:
block1:
v0 = global_value.i64 gv0
; check: v0 = symbol_value.i64 gv0
v1 = global_value.i64 gv1
@@ -49,8 +49,8 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(
block0(v0: i32, v999: i64):
; check: block0(
v1 = heap_addr.i64 heap0, v0, 1
; Boundscheck should be eliminated.
; Checks here are assuming that no pipehole opts fold the load offsets.
@@ -70,13 +70,13 @@ function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
block0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
; the `trap heap_oob` instruction as the terminator of the Ebb and moving
; the remainder of the instructions into an inaccessible Ebb.
; check: ebb0(
; the `trap heap_oob` instruction as the terminator of the block and moving
; the remainder of the instructions into an inaccessible block.
; check: block0(
; nextln: trap heap_oob
; check: ebb1:
; check: block1:
; nextln: v1 = iconst.i64 0
; nextln: v2 = load.f32 v1+16
; nextln: return v2
@@ -94,13 +94,13 @@ function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(
block0(v0: i32, v999: i64):
; check: block0(
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
; Boundscheck code
; check: $(oob=$V) = icmp
; nextln: brz $oob, $(ok=$EBB)
; nextln: jump $(trap_oob=$EBB)
; nextln: brz $oob, $(ok=$BB)
; nextln: jump $(trap_oob=$BB)
; check: $trap_oob:
; nextln: trap heap_oob
; check: $ok:

View File

@@ -4,7 +4,7 @@ target x86_64 baseline
; umulhi/smulhi on 64 bit operands
function %i64_umulhi(i64, i64) -> i64 {
ebb0(v10: i64, v11: i64):
block0(v10: i64, v11: i64):
v12 = umulhi v10, v11
; check: %rdi -> %rax
; check: x86_umulx
@@ -13,7 +13,7 @@ ebb0(v10: i64, v11: i64):
}
function %i64_smulhi(i64, i64) -> i64 {
ebb0(v20: i64, v21: i64):
block0(v20: i64, v21: i64):
v22 = smulhi v20, v21
; check: %rdi -> %rax
; check: x86_smulx
@@ -25,7 +25,7 @@ ebb0(v20: i64, v21: i64):
; umulhi/smulhi on 32 bit operands
function %i32_umulhi(i32, i32) -> i32 {
ebb0(v30: i32, v31: i32):
block0(v30: i32, v31: i32):
v32 = umulhi v30, v31
; check: %rdi -> %rax
; check: x86_umulx
@@ -34,7 +34,7 @@ ebb0(v30: i32, v31: i32):
}
function %i32_smulhi(i32, i32) -> i32 {
ebb0(v40: i32, v41: i32):
block0(v40: i32, v41: i32):
v42 = smulhi v40, v41
; check: %rdi -> %rax
; check: x86_smulx

View File

@@ -2,7 +2,7 @@ test compile
target x86_64
function u0:0(i8) -> i8 fast {
ebb0(v0: i8):
block0(v0: i8):
v1 = popcnt v0
; check-not: sextend.i32 v0
return v1

View File

@@ -11,16 +11,16 @@ function u0:0(i64, i64, i64) system_v {
sig0 = (i64, i16, i64) system_v
fn0 = colocated u0:11 sig0
ebb0(v0: i64, v1: i64, v2: i64):
block0(v0: i64, v1: i64, v2: i64):
v3 = stack_addr.i64 ss1
store v1, v3
v4 = stack_addr.i64 ss2
store v2, v4
v5 = stack_addr.i64 ss3
v6 = stack_addr.i64 ss4
jump ebb1
jump block1
ebb1:
block1:
v7 = load.i64 v3
v8 = load.i16 v7
store v8, v5
@@ -29,8 +29,8 @@ ebb1:
v10 = load.i16 v5
v11 = load.i64 v6
call fn0(v0, v10, v11)
jump ebb2
jump block2
ebb2:
block2:
return
}

View File

@@ -5,7 +5,7 @@ target x86_64
; regex: R=%[a-z0-9]+
function %i32_rotr(i32, i32) -> i32 fast {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
; check: regmove v1, $R -> %rcx
; check: v2 = rotr v0, v1
v2 = rotr v0, v1
@@ -13,14 +13,14 @@ ebb0(v0: i32, v1: i32):
}
function %i32_rotr_imm_1(i32) -> i32 fast {
ebb0(v0: i32):
block0(v0: i32):
; check: $V = rotr_imm v0, 1
v2 = rotr_imm v0, 1
return v2
}
function %i32_rotl(i32, i32) -> i32 fast {
ebb0(v0: i32, v1: i32):
block0(v0: i32, v1: i32):
; check: regmove v1, $R -> %rcx
; check: v2 = rotl v0, v1
v2 = rotl v0, v1
@@ -28,7 +28,7 @@ ebb0(v0: i32, v1: i32):
}
function %i32_rotl_imm_1(i32) -> i32 fast {
ebb0(v0: i32):
block0(v0: i32):
; check: $V = rotl_imm v0, 1
v2 = rotl_imm v0, 1
return v2

View File

@@ -4,7 +4,7 @@ target x86_64
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
ebb0(v0: i8, v1: i8):
block0(v0: i8, v1: i8):
v2 = ishl v0, v1
; check: $(e1=$V) = uextend.i32 v0
; check: $(r1=$V) = ishl $e1, v1

View File

@@ -5,7 +5,7 @@ target x86_64 haswell
; use baldrdash_system_v calling convention here for simplicity (avoids prologue, epilogue)
function %test_splat_i32() -> i32x4 baldrdash_system_v {
ebb0:
block0:
v0 = iconst.i32 42
v1 = splat.i32x4 v0
return v1
@@ -14,7 +14,7 @@ ebb0:
; sameln: function %test_splat_i32() -> i32x4 [%xmm0] baldrdash_system_v {
; nextln: ss0 = incoming_arg 0, offset 0
; nextln:
; nextln: ebb0:
; nextln: block0:
; nextln: v0 = iconst.i32 42
; nextln: v2 = scalar_to_vector.i32x4 v0
; nextln: v1 = x86_pshufd v2, 0
@@ -24,13 +24,13 @@ ebb0:
function %test_splat_i64() -> i64x2 baldrdash_system_v {
ebb0:
block0:
v0 = iconst.i64 42
v1 = splat.i64x2 v0
return v1
}
; check: ebb0:
; check: block0:
; nextln: v0 = iconst.i64 42
; nextln: v2 = scalar_to_vector.i64x2 v0
; nextln: v1 = x86_pinsr v2, 1, v0
@@ -39,13 +39,13 @@ ebb0:
function %test_splat_b16() -> b16x8 baldrdash_system_v {
ebb0:
block0:
v0 = bconst.b16 true
v1 = splat.b16x8 v0
return v1
}
; check: ebb0:
; check: block0:
; nextln: v0 = bconst.b16 true
; nextln: v2 = scalar_to_vector.b16x8 v0
; nextln: v3 = x86_pinsr v2, 1, v0
@@ -57,13 +57,13 @@ ebb0:
function %test_splat_i8() -> i8x16 baldrdash_system_v {
ebb0:
block0:
v0 = iconst.i8 42
v1 = splat.i8x16 v0
return v1
}
; check: ebb0:
; check: block0:
; nextln: v2 = iconst.i32 42
; nextln: v0 = ireduce.i8 v2
; nextln: v3 = scalar_to_vector.i8x16 v0

View File

@@ -2,7 +2,7 @@ test legalizer
target x86_64
; Test legalization for various forms of table addresses.
; regex: EBB=ebb\d+
; regex: BB=block\d+
function %table_addrs(i32, i64, i64 vmctx) {
gv4 = vmctx
@@ -20,12 +20,12 @@ function %table_addrs(i32, i64, i64 vmctx) {
; check: table2 = dynamic gv0, min 0x0001_0000, bound gv1, element_size 1, index_type i64
; check: table3 = dynamic gv0, min 0, bound gv1, element_size 16, index_type i64
ebb0(v0: i32, v1: i64, v3: i64):
block0(v0: i32, v1: i64, v3: i64):
v4 = table_addr.i64 table0, v0, +0
; check: v8 = load.i32 notrap aligned v3+88
; check: v9 = icmp uge v0, v8
; check: brz v9, $(resume_1=$EBB)
; nextln: jump $(trap_1=$EBB)
; check: brz v9, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB)
; check: $trap_1:
; nextln: trap table_oob
; check: $resume_1:
@@ -36,8 +36,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v5 = table_addr.i64 table1, v0, +0
; check: v12 = load.i32 notrap aligned v3+88
; check: v13 = icmp.i32 uge v0, v12
; check: brz v13, $(resume_2=$EBB)
; nextln: jump $(trap_2=$EBB)
; check: brz v13, $(resume_2=$BB)
; nextln: jump $(trap_2=$BB)
; check: $trap_2:
; nextln: trap table_oob
; check: $resume_2:
@@ -49,8 +49,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v6 = table_addr.i64 table2, v1, +0
; check: v17 = iadd_imm.i64 v3, 80
; check: v18 = icmp.i64 uge v1, v17
; check: brz v18, $(resume_3=$EBB)
; nextln: jump $(trap_3=$EBB)
; check: brz v18, $(resume_3=$BB)
; nextln: jump $(trap_3=$BB)
; check: $trap_3:
; nextln: trap table_oob
; check: $resume_3:
@@ -60,8 +60,8 @@ ebb0(v0: i32, v1: i64, v3: i64):
v7 = table_addr.i64 table3, v1, +0
; check: v20 = iadd_imm.i64 v3, 80
; check: v21 = icmp.i64 uge v1, v20
; check: brz v21, $(resume_4=$EBB)
; nextln: jump $(trap_4=$EBB)
; check: brz v21, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB)
; check: $trap_4:
; nextln: trap table_oob
; check: $resume_4:

View File

@@ -4,7 +4,7 @@ target x86_64
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
ebb0(v0: i8, v1: i8):
block0(v0: i8, v1: i8):
v2 = urem v0, v1
; check: $(a=$V) = uextend.i32 v0
; nextln: $(b=$V) = uextend.i32 v1

View File

@@ -2,14 +2,14 @@ test compile
target i686
function u0:0(i64, i32) system_v {
ebb0(v0: i64, v1: i32):
block0(v0: i64, v1: i32):
v2 = bor v0, v0
store v2, v1
return
}
function u0:1(i32) -> i64 system_v {
ebb0(v1: i32):
block0(v1: i32):
v0 = load.i64 v1
v2 = bor v0, v0
return v2

View File

@@ -3,7 +3,7 @@ test compile
target x86_64
function %test(i32) -> i32 system_v {
ebb0(v0: i32):
block0(v0: i32):
nop
v1 = iconst.i32 42
return v1

View File

@@ -4,21 +4,21 @@ set opt_level=speed_and_size
target i686
function %foo() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %bar() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0
return v1
}
function %zero_dword() -> i32 fast {
ebb0:
block0:
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i32 0 ; bin: 31 c0
; asm: xor %edi, %edi
@@ -27,7 +27,7 @@ ebb0:
}
function %zero_word() -> i16 fast {
ebb0:
block0:
; while you may expect this to be encoded like 6631c0, aka
; xor %ax, %ax, the upper 16 bits of the register used for
; i16 are left undefined, so it's not wrong to clear them.
@@ -43,7 +43,7 @@ ebb0:
}
function %zero_byte() -> i8 fast {
ebb0:
block0:
; asm: xor %al, %al
[-,%rax] v0 = iconst.i8 0 ; bin: 30 c0
; asm: xor %dh, %dh

View File

@@ -4,35 +4,35 @@ set opt_level=speed_and_size
target x86_64
function %zero_const_32bit_no_rex() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %zero_const_32bit_rex() -> f32 fast {
ebb0:
block0:
; asm: xorps %xmm8, %xmm8
[-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0
return v1
}
function %zero_const_64bit_no_rex() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v0 = f64const 0.0 ; bin: 66 0f 57 c0
return v0
}
function %zero_const_64bit_rex() -> f64 fast {
ebb0:
block0:
; asm: xorpd %xmm8, %xmm8
[-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0
return v1
}
function %imm_zero_register() -> i64 fast {
ebb0:
block0:
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i64 0 ; bin: 31 c0
; asm: xor %edi, %edi
@@ -45,7 +45,7 @@ ebb0:
}
function %zero_word() -> i16 fast {
ebb0:
block0:
; while you may expect this to be encoded like 6631c0, aka
; xor %ax, %ax, the upper 16 bits of the register used for
; i16 are left undefined, so it's not wrong to clear them.
@@ -61,7 +61,7 @@ ebb0:
}
function %zero_byte() -> i8 fast {
ebb0:
block0:
; asm: xor %r8b, %r8b
[-,%r15] v0 = iconst.i8 0 ; bin: 45 30 ff
; asm: xor %al, %al

View File

@@ -11,7 +11,7 @@ target x86_64
; r15 is the pinned heap register. It must not be rewritten, so it must not be
; used as a tied output register.
function %tied_input() -> i64 system_v {
ebb0:
block0:
v1 = get_pinned_reg.i64
v2 = iadd_imm v1, 42
return v2
@@ -25,7 +25,7 @@ ebb0:
;; It musn't be used even if this is a tied input used twice.
function %tied_twice() -> i64 system_v {
ebb0:
block0:
v1 = get_pinned_reg.i64
v2 = iadd v1, v1
return v2
@@ -38,7 +38,7 @@ ebb0:
; sameln: iadd v1, v1
function %uses() -> i64 system_v {
ebb0:
block0:
v1 = get_pinned_reg.i64
v2 = iadd_imm v1, 42
v3 = get_pinned_reg.i64
@@ -62,7 +62,7 @@ function u0:1(i64 vmctx) -> i64 system_v {
gv0 = vmctx
heap0 = static gv0, min 0x000a_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
ebb0(v42: i64):
block0(v42: i64):
v5 = iconst.i32 42
v6 = heap_addr.i64 heap0, v5, 0
v7 = load.i64 v6

View File

@@ -8,7 +8,7 @@ target x86_64
function %big() system_v {
ss0 = explicit_slot 300000
ebb0:
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
@@ -17,7 +17,7 @@ ebb0:
; nextln: sig0 = (i64 [%rax]) probestack
; nextln: fn0 = colocated %Probestack sig0
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0

View File

@@ -7,14 +7,14 @@ target x86_64
function %big() system_v {
ss0 = explicit_slot 300000
ebb0:
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 300000, offset -300016
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 0x0004_93e0

View File

@@ -5,7 +5,7 @@ target x86_64
function %big() system_v {
ss0 = explicit_slot 300000
ebb0:
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
@@ -14,7 +14,7 @@ ebb0:
; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack
; nextln: fn0 = %Probestack sig0
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0

View File

@@ -8,7 +8,7 @@ target x86_64
function %big() system_v {
ss0 = explicit_slot 4097
ebb0:
block0:
return
}
@@ -16,7 +16,7 @@ ebb0:
; nextln: ss0 = explicit_slot 4097, offset -4113
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 4112
@@ -30,7 +30,7 @@ ebb0:
function %bigger() system_v {
ss0 = explicit_slot 8192
ebb0:
block0:
return
}
@@ -38,7 +38,7 @@ ebb0:
; nextln: ss0 = explicit_slot 8192, offset -8208
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 8192
@@ -52,7 +52,7 @@ ebb0:
function %biggest() system_v {
ss0 = explicit_slot 8193
ebb0:
block0:
return
}
@@ -62,7 +62,7 @@ ebb0:
; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack
; nextln: fn0 = colocated %Probestack sig0
; nextln:
; nextln: ebb0(v0: i64 [%rbp]):
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 8208

Some files were not shown because too many files have changed in this diff Show More