Check in the wasmstandalone code.

This is based on the code in https://github.com/denismerigoux/cretonne/commits/wasm2cretonne
before wasmstandalone was removed, with minor updates for the new library structure.
It is not yet updated for the latest cretonne API changes.
This commit is contained in:
Dan Gohman
2017-09-05 17:06:51 -07:00
parent 8f6957296e
commit d0fe50a2a8
679 changed files with 31 additions and 57859 deletions

13
filetests/arith.wast Normal file
View File

@@ -0,0 +1,13 @@
(module
(memory 1)
(func $main (local i32)
(set_local 0 (i32.sub (i32.const 4) (i32.const 4)))
(if
(get_local 0)
(then unreachable)
(else (drop (i32.mul (i32.const 6) (get_local 0))))
)
)
(start $main)
(data (i32.const 0) "abcdefgh")
)

10
filetests/call.wast Normal file
View File

@@ -0,0 +1,10 @@
(module
(func $main (local i32)
(set_local 0 (i32.const 0))
(drop (call $inc))
)
(func $inc (result i32)
(i32.const 1)
)
(start $main)
)

View File

@@ -1,35 +0,0 @@
; For testing cfg generation. This code is nonsense.
test print-cfg
test verifier
function %nonsense(i32, i32) -> f32 {
; check: digraph %nonsense {
; regex: I=\binst\d+\b
; check: label="{ebb0 | <$(BRZ=$I)>brz ebb2 | <$(JUMP=$I)>jump ebb1}"]
ebb0(v1: i32, v2: i32):
v3 = f64const 0x0.0
brz v2, ebb2 ; unordered: ebb0:$BRZ -> ebb2
v4 = iconst.i32 0
jump ebb1(v4) ; unordered: ebb0:$JUMP -> ebb1
ebb1(v5: i32):
v6 = imul_imm v5, 4
v7 = iadd v1, v6
v8 = f32const 0.0
v9 = f32const 0.0
v10 = f32const 0.0
v11 = fadd v9, v10
v12 = iadd_imm v5, 1
v13 = icmp ult v12, v2
brnz v13, ebb1(v12) ; unordered: ebb1:inst12 -> ebb1
v14 = f64const 0.0
v15 = f64const 0.0
v16 = fdiv v14, v15
v17 = f32const 0.0
return v17
ebb2:
v100 = f32const 0.0
return v100
}

View File

@@ -1,21 +0,0 @@
; For testing cfg generation. This code explores the implications of encountering
; a terminating instruction before any connections have been made.
test print-cfg
test verifier
function %nonsense(i32) {
; check: digraph %nonsense {
ebb0(v1: i32):
trap ; error: terminator instruction was encountered before the end
brnz v1, ebb2 ; unordered: ebb0:inst1 -> ebb2
jump ebb1 ; unordered: ebb0:inst2 -> ebb1
ebb1:
v2 = iconst.i32 0
v3 = iadd v1, v3
jump ebb0(v3) ; unordered: ebb1:inst5 -> ebb0
ebb2:
return v1
}

View File

@@ -1,21 +0,0 @@
; For testing cfg generation where some block is never reached.
test print-cfg
function %not_reached(i32) -> i32 {
; check: digraph %not_reached {
; check: ebb0 [shape=record, label="{ebb0 | <inst0>brnz ebb2}"]
; check: ebb1 [shape=record, label="{ebb1 | <inst4>jump ebb0}"]
; check: ebb2 [shape=record, label="{ebb2}"]
ebb0(v0: i32):
brnz v0, ebb2 ; unordered: ebb0:inst0 -> ebb2
trap
ebb1:
v1 = iconst.i32 1
v2 = iadd v0, v1
jump ebb0(v2) ; unordered: ebb1:inst4 -> ebb0
ebb2:
return v0
}

View File

@@ -1,13 +0,0 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb3 ; dominates: ebb3
jump ebb2 ; dominates: ebb2
ebb2:
jump ebb3
ebb3:
return
}

View File

@@ -1,20 +0,0 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb3 ebb4 ebb5
jump ebb2 ; dominates: ebb2
ebb1:
jump ebb3
ebb2:
brz v0, ebb4
jump ebb5
ebb3:
jump ebb4
ebb4:
brz v0, ebb3
jump ebb5
ebb5:
brz v0, ebb4
return
}

View File

@@ -1,31 +0,0 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1 ebb6
brnz v0, ebb2 ; dominates: ebb2 ebb9
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb6
ebb2:
brz v0, ebb4 ; dominates: ebb4 ebb7 ebb8
jump ebb5 ; dominates: ebb5
ebb3:
jump ebb9
ebb4:
brz v0, ebb4
brnz v0, ebb6
jump ebb7
ebb5:
brz v0, ebb7
brnz v0, ebb8
jump ebb9
ebb6:
return
ebb7:
jump ebb8
ebb8:
return
ebb9:
return
}

View File

@@ -1,33 +0,0 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb1 ; dominates: ebb1
brnz v0, ebb2 ; dominates: ebb2 ebb5
jump ebb3 ; dominates: ebb3
ebb1:
jump ebb4 ; dominates: ebb4
ebb2:
jump ebb5
ebb3:
jump ebb5
ebb4:
brz v0, ebb6 ; dominates: ebb6 ebb10
jump ebb7 ; dominates: ebb7
ebb5:
return
ebb6:
brz v0, ebb8 ; dominates: ebb11 ebb8
brnz v0, ebb9 ; dominates: ebb9
jump ebb10
ebb7:
jump ebb10
ebb8:
jump ebb11
ebb9:
jump ebb11
ebb10:
return
ebb11:
return
}

View File

@@ -1,41 +0,0 @@
test domtree
function %test(i32) {
ebb0(v0: i32):
brz v0, ebb13 ; dominates: ebb13
jump ebb1 ; dominates: ebb1
ebb1:
brz v0, ebb2 ; dominates: ebb2 ebb7
brnz v0, ebb3 ; dominates: ebb3
brz v0, ebb4 ; dominates: ebb4
brnz v0, ebb5 ; dominates: ebb5
jump ebb6 ; dominates: ebb6
ebb2:
jump ebb7
ebb3:
jump ebb7
ebb4:
jump ebb7
ebb5:
jump ebb7
ebb6:
jump ebb7
ebb7:
brnz v0, ebb8 ; dominates: ebb8 ebb12
brz v0, ebb9 ; dominates: ebb9
brnz v0, ebb10 ; dominates: ebb10
jump ebb11 ; dominates: ebb11
ebb8:
jump ebb12
ebb9:
jump ebb12
ebb10:
brz v0, ebb13
jump ebb12
ebb11:
jump ebb13
ebb12:
return
ebb13:
return
}

22
filetests/fibonacci.wast Normal file
View File

@@ -0,0 +1,22 @@
(module
(memory 1)
(func $main (local i32 i32 i32 i32)
(set_local 0 (i32.const 0))
(set_local 1 (i32.const 1))
(set_local 2 (i32.const 1))
(set_local 3 (i32.const 0))
(block
(loop
(br_if 1 (i32.gt_s (get_local 0) (i32.const 5)))
(set_local 3 (get_local 2))
(set_local 2 (i32.add (get_local 2) (get_local 1)))
(set_local 1 (get_local 3))
(set_local 0 (i32.add (get_local 0) (i32.const 1)))
(br 0)
)
)
(i32.store (i32.const 0) (get_local 2))
)
(start $main)
(data (i32.const 0) "0000")
)

8
filetests/globals.wast Normal file
View File

@@ -0,0 +1,8 @@
(module
(global $x (mut i32) (i32.const 4))
(memory 1)
(func $main (local i32)
(i32.store (i32.const 0) (get_global $x))
)
(start $main)
)

View File

@@ -1,20 +0,0 @@
; Test the legalization of function signatures.
test legalizer
set is_64bit
isa intel
; regex: V=v\d+
function %f() {
sig0 = (i32) -> i32 native
; check: sig0 = (i32 [%rdi]) -> i32 [%rax] native
sig1 = (i64) -> b1 native
; check: sig1 = (i64 [%rdi]) -> b1 [%rax] native
sig2 = (f32, i64) -> f64 native
; check: sig2 = (f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0] native
ebb0:
return
}

View File

@@ -1,146 +0,0 @@
; Binary emission of 32-bit floating point code.
test binemit
isa intel has_sse2
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/intel/binary32-float.cton | llvm-mc -show-encoding -triple=i386
;
function %F32() {
ebb0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
; asm: cvtsi2ss %ecx, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f32 v0 ; bin: f3 0f 2a e9
; asm: cvtsi2ss %esi, %xmm2
[-,%xmm2] v11 = fcvt_from_sint.f32 v1 ; bin: f3 0f 2a d6
; asm: cvtss2sd %xmm2, %xmm5
[-,%xmm5] v12 = fpromote.f64 v11 ; bin: f3 0f 5a ea
; asm: cvtss2sd %xmm5, %xmm2
[-,%xmm2] v13 = fpromote.f64 v10 ; bin: f3 0f 5a d5
; asm: movd %ecx, %xmm5
[-,%xmm5] v14 = bitcast.f32 v0 ; bin: 66 0f 6e e9
; asm: movd %esi, %xmm2
[-,%xmm2] v15 = bitcast.f32 v1 ; bin: 66 0f 6e d6
; asm: movd %xmm5, %ecx
[-,%rcx] v16 = bitcast.i32 v10 ; bin: 66 0f 7e e9
; asm: movd %xmm2, %esi
[-,%rsi] v17 = bitcast.i32 v11 ; bin: 66 0f 7e d6
; Binary arithmetic.
; asm: addss %xmm2, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f3 0f 58 ea
; asm: addss %xmm5, %xmm2
[-,%xmm2] v21 = fadd v11, v10 ; bin: f3 0f 58 d5
; asm: subss %xmm2, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f3 0f 5c ea
; asm: subss %xmm5, %xmm2
[-,%xmm2] v23 = fsub v11, v10 ; bin: f3 0f 5c d5
; asm: mulss %xmm2, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f3 0f 59 ea
; asm: mulss %xmm5, %xmm2
[-,%xmm2] v25 = fmul v11, v10 ; bin: f3 0f 59 d5
; asm: divss %xmm2, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f3 0f 5e ea
; asm: divss %xmm5, %xmm2
[-,%xmm2] v27 = fdiv v11, v10 ; bin: f3 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm2, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 0f 54 ea
; asm: andps %xmm5, %xmm2
[-,%xmm2] v31 = band v11, v10 ; bin: 0f 54 d5
; asm: andnps %xmm2, %xmm5
[-,%xmm5] v32 = band_not v10, v11 ; bin: 0f 55 ea
; asm: andnps %xmm5, %xmm2
[-,%xmm2] v33 = band_not v11, v10 ; bin: 0f 55 d5
; asm: orps %xmm2, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 0f 56 ea
; asm: orps %xmm5, %xmm2
[-,%xmm2] v35 = bor v11, v10 ; bin: 0f 56 d5
; asm: xorps %xmm2, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 0f 57 ea
; asm: xorps %xmm5, %xmm2
[-,%xmm2] v37 = bxor v11, v10 ; bin: 0f 57 d5
return
}
function %F64() {
ebb0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
; asm: cvtsi2sd %ecx, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f64 v0 ; bin: f2 0f 2a e9
; asm: cvtsi2sd %esi, %xmm2
[-,%xmm2] v11 = fcvt_from_sint.f64 v1 ; bin: f2 0f 2a d6
; asm: cvtsd2ss %xmm2, %xmm5
[-,%xmm5] v12 = fdemote.f32 v11 ; bin: f2 0f 5a ea
; asm: cvtsd2ss %xmm5, %xmm2
[-,%xmm2] v13 = fdemote.f32 v10 ; bin: f2 0f 5a d5
; No i64 <-> f64 bitcasts in 32-bit mode.
; Binary arithmetic.
; asm: addsd %xmm2, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f2 0f 58 ea
; asm: addsd %xmm5, %xmm2
[-,%xmm2] v21 = fadd v11, v10 ; bin: f2 0f 58 d5
; asm: subsd %xmm2, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f2 0f 5c ea
; asm: subsd %xmm5, %xmm2
[-,%xmm2] v23 = fsub v11, v10 ; bin: f2 0f 5c d5
; asm: mulsd %xmm2, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f2 0f 59 ea
; asm: mulsd %xmm5, %xmm2
[-,%xmm2] v25 = fmul v11, v10 ; bin: f2 0f 59 d5
; asm: divsd %xmm2, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f2 0f 5e ea
; asm: divsd %xmm5, %xmm2
[-,%xmm2] v27 = fdiv v11, v10 ; bin: f2 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm2, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 0f 54 ea
; asm: andps %xmm5, %xmm2
[-,%xmm2] v31 = band v11, v10 ; bin: 0f 54 d5
; asm: andnps %xmm2, %xmm5
[-,%xmm5] v32 = band_not v10, v11 ; bin: 0f 55 ea
; asm: andnps %xmm5, %xmm2
[-,%xmm2] v33 = band_not v11, v10 ; bin: 0f 55 d5
; asm: orps %xmm2, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 0f 56 ea
; asm: orps %xmm5, %xmm2
[-,%xmm2] v35 = bor v11, v10 ; bin: 0f 56 d5
; asm: xorps %xmm2, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 0f 57 ea
; asm: xorps %xmm5, %xmm2
[-,%xmm2] v37 = bxor v11, v10 ; bin: 0f 57 d5
return
}

View File

@@ -1,368 +0,0 @@
; binary emission of 32-bit code.
test binemit
isa intel haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/intel/binary32.cton | llvm-mc -show-encoding -triple=i386
;
function %I32() {
fn0 = function %foo()
sig0 = ()
ebb0:
; asm: movl $1, %ecx
[-,%rcx] v1 = iconst.i32 1 ; bin: b9 00000001
; asm: movl $2, %esi
[-,%rsi] v2 = iconst.i32 2 ; bin: be 00000002
; Integer Register-Register Operations.
; asm: addl %esi, %ecx
[-,%rcx] v10 = iadd v1, v2 ; bin: 01 f1
; asm: addl %ecx, %esi
[-,%rsi] v11 = iadd v2, v1 ; bin: 01 ce
; asm: subl %esi, %ecx
[-,%rcx] v12 = isub v1, v2 ; bin: 29 f1
; asm: subl %ecx, %esi
[-,%rsi] v13 = isub v2, v1 ; bin: 29 ce
; asm: andl %esi, %ecx
[-,%rcx] v14 = band v1, v2 ; bin: 21 f1
; asm: andl %ecx, %esi
[-,%rsi] v15 = band v2, v1 ; bin: 21 ce
; asm: orl %esi, %ecx
[-,%rcx] v16 = bor v1, v2 ; bin: 09 f1
; asm: orl %ecx, %esi
[-,%rsi] v17 = bor v2, v1 ; bin: 09 ce
; asm: xorl %esi, %ecx
[-,%rcx] v18 = bxor v1, v2 ; bin: 31 f1
; asm: xorl %ecx, %esi
[-,%rsi] v19 = bxor v2, v1 ; bin: 31 ce
; Dynamic shifts take the shift amount in %rcx.
; asm: shll %cl, %esi
[-,%rsi] v20 = ishl v2, v1 ; bin: d3 e6
; asm: shll %cl, %ecx
[-,%rcx] v21 = ishl v1, v1 ; bin: d3 e1
; asm: shrl %cl, %esi
[-,%rsi] v22 = ushr v2, v1 ; bin: d3 ee
; asm: shrl %cl, %ecx
[-,%rcx] v23 = ushr v1, v1 ; bin: d3 e9
; asm: sarl %cl, %esi
[-,%rsi] v24 = sshr v2, v1 ; bin: d3 fe
; asm: sarl %cl, %ecx
[-,%rcx] v25 = sshr v1, v1 ; bin: d3 f9
; asm: roll %cl, %esi
[-,%rsi] v26 = rotl v2, v1 ; bin: d3 c6
; asm: roll %cl, %ecx
[-,%rcx] v27 = rotl v1, v1 ; bin: d3 c1
; asm: rorl %cl, %esi
[-,%rsi] v28 = rotr v2, v1 ; bin: d3 ce
; asm: rorl %cl, %ecx
[-,%rcx] v29 = rotr v1, v1 ; bin: d3 c9
; Integer Register - Immediate 8-bit operations.
; The 8-bit immediate is sign-extended.
; asm: addl $-128, %ecx
[-,%rcx] v30 = iadd_imm v1, -128 ; bin: 83 c1 80
; asm: addl $10, %esi
[-,%rsi] v31 = iadd_imm v2, 10 ; bin: 83 c6 0a
; asm: andl $-128, %ecx
[-,%rcx] v32 = band_imm v1, -128 ; bin: 83 e1 80
; asm: andl $10, %esi
[-,%rsi] v33 = band_imm v2, 10 ; bin: 83 e6 0a
; asm: orl $-128, %ecx
[-,%rcx] v34 = bor_imm v1, -128 ; bin: 83 c9 80
; asm: orl $10, %esi
[-,%rsi] v35 = bor_imm v2, 10 ; bin: 83 ce 0a
; asm: xorl $-128, %ecx
[-,%rcx] v36 = bxor_imm v1, -128 ; bin: 83 f1 80
; asm: xorl $10, %esi
[-,%rsi] v37 = bxor_imm v2, 10 ; bin: 83 f6 0a
; Integer Register - Immediate 32-bit operations.
; asm: addl $-128000, %ecx
[-,%rcx] v40 = iadd_imm v1, -128000 ; bin: 81 c1 fffe0c00
; asm: addl $1000000, %esi
[-,%rsi] v41 = iadd_imm v2, 1000000 ; bin: 81 c6 000f4240
; asm: andl $-128000, %ecx
[-,%rcx] v42 = band_imm v1, -128000 ; bin: 81 e1 fffe0c00
; asm: andl $1000000, %esi
[-,%rsi] v43 = band_imm v2, 1000000 ; bin: 81 e6 000f4240
; asm: orl $-128000, %ecx
[-,%rcx] v44 = bor_imm v1, -128000 ; bin: 81 c9 fffe0c00
; asm: orl $1000000, %esi
[-,%rsi] v45 = bor_imm v2, 1000000 ; bin: 81 ce 000f4240
; asm: xorl $-128000, %ecx
[-,%rcx] v46 = bxor_imm v1, -128000 ; bin: 81 f1 fffe0c00
; asm: xorl $1000000, %esi
[-,%rsi] v47 = bxor_imm v2, 1000000 ; bin: 81 f6 000f4240
; More arithmetic.
; asm: imull %esi, %ecx
[-,%rcx] v50 = imul v1, v2 ; bin: 0f af ce
; asm: imull %ecx, %esi
[-,%rsi] v51 = imul v2, v1 ; bin: 0f af f1
; asm: movl $1, %eax
[-,%rax] v52 = iconst.i32 1 ; bin: b8 00000001
; asm: movl $2, %edx
[-,%rdx] v53 = iconst.i32 2 ; bin: ba 00000002
; asm: idivl %ecx
[-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: f7 fe
; asm: divl %ecx
[-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: f7 f1
; asm: divl %esi
[-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: f7 f6
; Register copies.
; asm: movl %esi, %ecx
[-,%rcx] v80 = copy v2 ; bin: 89 f1
; asm: movl %ecx, %esi
[-,%rsi] v81 = copy v1 ; bin: 89 ce
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movl %ecx, (%esi)
store v1, v2 ; bin: 89 0e
; asm: movl %esi, (%ecx)
store v2, v1 ; bin: 89 31
; asm: movw %cx, (%esi)
istore16 v1, v2 ; bin: 66 89 0e
; asm: movw %si, (%ecx)
istore16 v2, v1 ; bin: 66 89 31
; asm: movb %cl, (%esi)
istore8 v1, v2 ; bin: 88 0e
; Can't store %sil in 32-bit mode (needs REX prefix).
; asm: movl (%ecx), %edi
[-,%rdi] v100 = load.i32 v1 ; bin: 8b 39
; asm: movl (%esi), %edx
[-,%rdx] v101 = load.i32 v2 ; bin: 8b 16
; asm: movzwl (%ecx), %edi
[-,%rdi] v102 = uload16.i32 v1 ; bin: 0f b7 39
; asm: movzwl (%esi), %edx
[-,%rdx] v103 = uload16.i32 v2 ; bin: 0f b7 16
; asm: movswl (%ecx), %edi
[-,%rdi] v104 = sload16.i32 v1 ; bin: 0f bf 39
; asm: movswl (%esi), %edx
[-,%rdx] v105 = sload16.i32 v2 ; bin: 0f bf 16
; asm: movzbl (%ecx), %edi
[-,%rdi] v106 = uload8.i32 v1 ; bin: 0f b6 39
; asm: movzbl (%esi), %edx
[-,%rdx] v107 = uload8.i32 v2 ; bin: 0f b6 16
; asm: movsbl (%ecx), %edi
[-,%rdi] v108 = sload8.i32 v1 ; bin: 0f be 39
; asm: movsbl (%esi), %edx
[-,%rdx] v109 = sload8.i32 v2 ; bin: 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl %ecx, 100(%esi)
store v1, v2+100 ; bin: 89 4e 64
; asm: movl %esi, -100(%ecx)
store v2, v1-100 ; bin: 89 71 9c
; asm: movw %cx, 100(%esi)
istore16 v1, v2+100 ; bin: 66 89 4e 64
; asm: movw %si, -100(%ecx)
istore16 v2, v1-100 ; bin: 66 89 71 9c
; asm: movb %cl, 100(%esi)
istore8 v1, v2+100 ; bin: 88 4e 64
; asm: movl 50(%ecx), %edi
[-,%rdi] v110 = load.i32 v1+50 ; bin: 8b 79 32
; asm: movl -50(%esi), %edx
[-,%rdx] v111 = load.i32 v2-50 ; bin: 8b 56 ce
; asm: movzwl 50(%ecx), %edi
[-,%rdi] v112 = uload16.i32 v1+50 ; bin: 0f b7 79 32
; asm: movzwl -50(%esi), %edx
[-,%rdx] v113 = uload16.i32 v2-50 ; bin: 0f b7 56 ce
; asm: movswl 50(%ecx), %edi
[-,%rdi] v114 = sload16.i32 v1+50 ; bin: 0f bf 79 32
; asm: movswl -50(%esi), %edx
[-,%rdx] v115 = sload16.i32 v2-50 ; bin: 0f bf 56 ce
; asm: movzbl 50(%ecx), %edi
[-,%rdi] v116 = uload8.i32 v1+50 ; bin: 0f b6 79 32
; asm: movzbl -50(%esi), %edx
[-,%rdx] v117 = uload8.i32 v2-50 ; bin: 0f b6 56 ce
; asm: movsbl 50(%ecx), %edi
[-,%rdi] v118 = sload8.i32 v1+50 ; bin: 0f be 79 32
; asm: movsbl -50(%esi), %edx
[-,%rdx] v119 = sload8.i32 v2-50 ; bin: 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl %ecx, 10000(%esi)
store v1, v2+10000 ; bin: 89 8e 00002710
; asm: movl %esi, -10000(%ecx)
store v2, v1-10000 ; bin: 89 b1 ffffd8f0
; asm: movw %cx, 10000(%esi)
istore16 v1, v2+10000 ; bin: 66 89 8e 00002710
; asm: movw %si, -10000(%ecx)
istore16 v2, v1-10000 ; bin: 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%esi)
istore8 v1, v2+10000 ; bin: 88 8e 00002710
; asm: movl 50000(%ecx), %edi
[-,%rdi] v120 = load.i32 v1+50000 ; bin: 8b b9 0000c350
; asm: movl -50000(%esi), %edx
[-,%rdx] v121 = load.i32 v2-50000 ; bin: 8b 96 ffff3cb0
; asm: movzwl 50000(%ecx), %edi
[-,%rdi] v122 = uload16.i32 v1+50000 ; bin: 0f b7 b9 0000c350
; asm: movzwl -50000(%esi), %edx
[-,%rdx] v123 = uload16.i32 v2-50000 ; bin: 0f b7 96 ffff3cb0
; asm: movswl 50000(%ecx), %edi
[-,%rdi] v124 = sload16.i32 v1+50000 ; bin: 0f bf b9 0000c350
; asm: movswl -50000(%esi), %edx
[-,%rdx] v125 = sload16.i32 v2-50000 ; bin: 0f bf 96 ffff3cb0
; asm: movzbl 50000(%ecx), %edi
[-,%rdi] v126 = uload8.i32 v1+50000 ; bin: 0f b6 b9 0000c350
; asm: movzbl -50000(%esi), %edx
[-,%rdx] v127 = uload8.i32 v2-50000 ; bin: 0f b6 96 ffff3cb0
; asm: movsbl 50000(%ecx), %edi
[-,%rdi] v128 = sload8.i32 v1+50000 ; bin: 0f be b9 0000c350
; asm: movsbl -50000(%esi), %edx
[-,%rdx] v129 = sload8.i32 v2-50000 ; bin: 0f be 96 ffff3cb0
; Bit-counting instructions.
; asm: popcntl %esi, %ecx
[-,%rcx] v200 = popcnt v2 ; bin: f3 0f b8 ce
; asm: popcntl %ecx, %esi
[-,%rsi] v201 = popcnt v1 ; bin: f3 0f b8 f1
; asm: lzcntl %esi, %ecx
[-,%rcx] v202 = clz v2 ; bin: f3 0f bd ce
; asm: lzcntl %ecx, %esi
[-,%rsi] v203 = clz v1 ; bin: f3 0f bd f1
; asm: tzcntl %esi, %ecx
[-,%rcx] v204 = ctz v2 ; bin: f3 0f bc ce
; asm: tzcntl %ecx, %esi
[-,%rsi] v205 = ctz v1 ; bin: f3 0f bc f1
; Integer comparisons.
; asm: cmpl %esi, %ecx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 39 f1 0f 94 c3
; asm: cmpl %ecx, %esi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v1 ; bin: 39 ce 0f 94 c2
; asm: cmpl %esi, %ecx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 39 f1 0f 95 c3
; asm: cmpl %ecx, %esi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v1 ; bin: 39 ce 0f 95 c2
; asm: cmpl %esi, %ecx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 39 f1 0f 9c c3
; asm: cmpl %ecx, %esi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v1 ; bin: 39 ce 0f 9c c2
; asm: cmpl %esi, %ecx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 39 f1 0f 9d c3
; asm: cmpl %ecx, %esi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v1 ; bin: 39 ce 0f 9d c2
; asm: cmpl %esi, %ecx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 39 f1 0f 9f c3
; asm: cmpl %ecx, %esi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v1 ; bin: 39 ce 0f 9f c2
; asm: cmpl %esi, %ecx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 39 f1 0f 9e c3
; asm: cmpl %ecx, %esi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v1 ; bin: 39 ce 0f 9e c2
; asm: cmpl %esi, %ecx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 39 f1 0f 92 c3
; asm: cmpl %ecx, %esi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v1 ; bin: 39 ce 0f 92 c2
; asm: cmpl %esi, %ecx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 39 f1 0f 93 c3
; asm: cmpl %ecx, %esi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v1 ; bin: 39 ce 0f 93 c2
; asm: cmpl %esi, %ecx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 39 f1 0f 97 c3
; asm: cmpl %ecx, %esi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v1 ; bin: 39 ce 0f 97 c2
; asm: cmpl %esi, %ecx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 39 f1 0f 96 c3
; asm: cmpl %ecx, %esi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v1 ; bin: 39 ce 0f 96 c2
; Bool-to-int conversions.
; asm: movzbl %bl, %ecx
[-,%rcx] v350 = bint.i32 v300 ; bin: 0f b6 cb
; asm: movzbl %dl, %esi
[-,%rsi] v351 = bint.i32 v301 ; bin: 0f b6 f2
; asm: call foo
call fn0() ; bin: e8 PCRel4(fn0) 00000000
; asm: call *%ecx
call_indirect sig0, v1() ; bin: ff d1
; asm: call *%esi
call_indirect sig0, v2() ; bin: ff d6
; asm: testl %ecx, %ecx
; asm: je ebb1
brz v1, ebb1 ; bin: 85 c9 74 0e
; asm: testl %esi, %esi
; asm: je ebb1
brz v2, ebb1 ; bin: 85 f6 74 0a
; asm: testl %ecx, %ecx
; asm: jne ebb1
brnz v1, ebb1 ; bin: 85 c9 75 06
; asm: testl %esi, %esi
; asm: jne ebb1
brnz v2, ebb1 ; bin: 85 f6 75 02
; asm: jmp ebb2
jump ebb2 ; bin: eb 01
; asm: ebb1:
ebb1:
; asm: ret
return ; bin: c3
; asm: ebb2:
ebb2:
trap ; bin: 0f 0b
}

View File

@@ -1,169 +0,0 @@
; Binary emission of 64-bit floating point code.
test binemit
set is_64bit
isa intel has_sse2
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/intel/binary64-float.cton | llvm-mc -show-encoding -triple=x86_64
;
function %F32() {
ebb0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
[-,%r14] v3 = iconst.i64 12
; asm: cvtsi2ssl %r11d, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f32 v0 ; bin: f3 41 0f 2a eb
; asm: cvtsi2ssl %esi, %xmm10
[-,%xmm10] v11 = fcvt_from_sint.f32 v1 ; bin: f3 44 0f 2a d6
; asm: cvtsi2ssq %rax, %xmm5
[-,%xmm5] v12 = fcvt_from_sint.f32 v2 ; bin: f3 48 0f 2a e8
; asm: cvtsi2ssq %r14, %xmm10
[-,%xmm10] v13 = fcvt_from_sint.f32 v3 ; bin: f3 4d 0f 2a d6
; asm: cvtss2sd %xmm10, %xmm5
[-,%xmm5] v14 = fpromote.f64 v11 ; bin: f3 41 0f 5a ea
; asm: cvtss2sd %xmm5, %xmm10
[-,%xmm10] v15 = fpromote.f64 v10 ; bin: f3 44 0f 5a d5
; asm: movd %r11d, %xmm5
[-,%xmm5] v16 = bitcast.f32 v0 ; bin: 66 41 0f 6e eb
; asm: movd %esi, %xmm10
[-,%xmm10] v17 = bitcast.f32 v1 ; bin: 66 44 0f 6e d6
; asm: movd %xmm5, %ecx
[-,%rcx] v18 = bitcast.i32 v10 ; bin: 66 40 0f 7e e9
; asm: movd %xmm10, %esi
[-,%rsi] v19 = bitcast.i32 v11 ; bin: 66 44 0f 7e d6
; Binary arithmetic.
; asm: addss %xmm10, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f3 41 0f 58 ea
; asm: addss %xmm5, %xmm10
[-,%xmm10] v21 = fadd v11, v10 ; bin: f3 44 0f 58 d5
; asm: subss %xmm10, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f3 41 0f 5c ea
; asm: subss %xmm5, %xmm10
[-,%xmm10] v23 = fsub v11, v10 ; bin: f3 44 0f 5c d5
; asm: mulss %xmm10, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f3 41 0f 59 ea
; asm: mulss %xmm5, %xmm10
[-,%xmm10] v25 = fmul v11, v10 ; bin: f3 44 0f 59 d5
; asm: divss %xmm10, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f3 41 0f 5e ea
; asm: divss %xmm5, %xmm10
[-,%xmm10] v27 = fdiv v11, v10 ; bin: f3 44 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm10, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 41 0f 54 ea
; asm: andps %xmm5, %xmm10
[-,%xmm10] v31 = band v11, v10 ; bin: 44 0f 54 d5
; asm: andnps %xmm10, %xmm5
[-,%xmm5] v32 = band_not v10, v11 ; bin: 41 0f 55 ea
; asm: andnps %xmm5, %xmm10
[-,%xmm10] v33 = band_not v11, v10 ; bin: 44 0f 55 d5
; asm: orps %xmm10, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 41 0f 56 ea
; asm: orps %xmm5, %xmm10
[-,%xmm10] v35 = bor v11, v10 ; bin: 44 0f 56 d5
; asm: xorps %xmm10, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 41 0f 57 ea
; asm: xorps %xmm5, %xmm10
[-,%xmm10] v37 = bxor v11, v10 ; bin: 44 0f 57 d5
return
}
function %F64() {
ebb0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
[-,%r14] v3 = iconst.i64 12
; asm: cvtsi2sdl %r11d, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f64 v0 ; bin: f2 41 0f 2a eb
; asm: cvtsi2sdl %esi, %xmm10
[-,%xmm10] v11 = fcvt_from_sint.f64 v1 ; bin: f2 44 0f 2a d6
; asm: cvtsi2sdq %rax, %xmm5
[-,%xmm5] v12 = fcvt_from_sint.f64 v2 ; bin: f2 48 0f 2a e8
; asm: cvtsi2sdq %r14, %xmm10
[-,%xmm10] v13 = fcvt_from_sint.f64 v3 ; bin: f2 4d 0f 2a d6
; asm: cvtsd2ss %xmm10, %xmm5
[-,%xmm5] v14 = fdemote.f32 v11 ; bin: f2 41 0f 5a ea
; asm: cvtsd2ss %xmm5, %xmm10
[-,%xmm10] v15 = fdemote.f32 v10 ; bin: f2 44 0f 5a d5
; asm: movq %rax, %xmm5
[-,%xmm5] v16 = bitcast.f64 v2 ; bin: 66 48 0f 6e e8
; asm: movq %r14, %xmm10
[-,%xmm10] v17 = bitcast.f64 v3 ; bin: 66 4d 0f 6e d6
; asm: movq %xmm5, %rcx
[-,%rcx] v18 = bitcast.i64 v10 ; bin: 66 48 0f 7e e9
; asm: movq %xmm10, %rsi
[-,%rsi] v19 = bitcast.i64 v11 ; bin: 66 4c 0f 7e d6
; Binary arithmetic.
; asm: addsd %xmm10, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f2 41 0f 58 ea
; asm: addsd %xmm5, %xmm10
[-,%xmm10] v21 = fadd v11, v10 ; bin: f2 44 0f 58 d5
; asm: subsd %xmm10, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f2 41 0f 5c ea
; asm: subsd %xmm5, %xmm10
[-,%xmm10] v23 = fsub v11, v10 ; bin: f2 44 0f 5c d5
; asm: mulsd %xmm10, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f2 41 0f 59 ea
; asm: mulsd %xmm5, %xmm10
[-,%xmm10] v25 = fmul v11, v10 ; bin: f2 44 0f 59 d5
; asm: divsd %xmm10, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f2 41 0f 5e ea
; asm: divsd %xmm5, %xmm10
[-,%xmm10] v27 = fdiv v11, v10 ; bin: f2 44 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm10, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 41 0f 54 ea
; asm: andps %xmm5, %xmm10
[-,%xmm10] v31 = band v11, v10 ; bin: 44 0f 54 d5
; asm: andnps %xmm10, %xmm5
[-,%xmm5] v32 = band_not v10, v11 ; bin: 41 0f 55 ea
; asm: andnps %xmm5, %xmm10
[-,%xmm10] v33 = band_not v11, v10 ; bin: 44 0f 55 d5
; asm: orps %xmm10, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 41 0f 56 ea
; asm: orps %xmm5, %xmm10
[-,%xmm10] v35 = bor v11, v10 ; bin: 44 0f 56 d5
; asm: xorps %xmm10, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 41 0f 57 ea
; asm: xorps %xmm5, %xmm10
[-,%xmm10] v37 = bxor v11, v10 ; bin: 44 0f 57 d5
return
}

View File

@@ -1,848 +0,0 @@
; binary emission of 64-bit code.
test binemit
set is_64bit
isa intel haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/intel/binary64.cton | llvm-mc -show-encoding -triple=x86_64
;
; Tests for i64 instructions.
function %I64() {
fn0 = function %foo()
sig0 = ()
ebb0:
; Integer Constants.
; asm: movq $0x01020304f1f2f3f4, %rcx
[-,%rcx] v1 = iconst.i64 0x0102_0304_f1f2_f3f4 ; bin: 48 b9 01020304f1f2f3f4
; asm: movq $0x11020304f1f2f3f4, %rsi
[-,%rsi] v2 = iconst.i64 0x1102_0304_f1f2_f3f4 ; bin: 48 be 11020304f1f2f3f4
; asm: movq $0x21020304f1f2f3f4, %r10
[-,%r10] v3 = iconst.i64 0x2102_0304_f1f2_f3f4 ; bin: 49 ba 21020304f1f2f3f4
; asm: movl $0xff001122, %r8d # 32-bit zero-extended constant.
[-,%r8] v4 = iconst.i64 0xff00_1122 ; bin: 41 b8 ff001122
; asm: movq $0xffffffff88001122, %r14 # 32-bit sign-extended constant.
[-,%r14] v5 = iconst.i64 0xffff_ffff_8800_1122 ; bin: 49 c7 c6 88001122
; Integer Register-Register Operations.
; asm: addq %rsi, %rcx
[-,%rcx] v10 = iadd v1, v2 ; bin: 48 01 f1
; asm: addq %r10, %rsi
[-,%rsi] v11 = iadd v2, v3 ; bin: 4c 01 d6
; asm: addq %rcx, %r10
[-,%r10] v12 = iadd v3, v1 ; bin: 49 01 ca
; asm: subq %rsi, %rcx
[-,%rcx] v20 = isub v1, v2 ; bin: 48 29 f1
; asm: subq %r10, %rsi
[-,%rsi] v21 = isub v2, v3 ; bin: 4c 29 d6
; asm: subq %rcx, %r10
[-,%r10] v22 = isub v3, v1 ; bin: 49 29 ca
; asm: andq %rsi, %rcx
[-,%rcx] v30 = band v1, v2 ; bin: 48 21 f1
; asm: andq %r10, %rsi
[-,%rsi] v31 = band v2, v3 ; bin: 4c 21 d6
; asm: andq %rcx, %r10
[-,%r10] v32 = band v3, v1 ; bin: 49 21 ca
; asm: orq %rsi, %rcx
[-,%rcx] v40 = bor v1, v2 ; bin: 48 09 f1
; asm: orq %r10, %rsi
[-,%rsi] v41 = bor v2, v3 ; bin: 4c 09 d6
; asm: orq %rcx, %r10
[-,%r10] v42 = bor v3, v1 ; bin: 49 09 ca
; asm: xorq %rsi, %rcx
[-,%rcx] v50 = bxor v1, v2 ; bin: 48 31 f1
; asm: xorq %r10, %rsi
[-,%rsi] v51 = bxor v2, v3 ; bin: 4c 31 d6
; asm: xorq %rcx, %r10
[-,%r10] v52 = bxor v3, v1 ; bin: 49 31 ca
; asm: shlq %cl, %rsi
[-,%rsi] v60 = ishl v2, v1 ; bin: 48 d3 e6
; asm: shlq %cl, %r10
[-,%r10] v61 = ishl v3, v1 ; bin: 49 d3 e2
; asm: sarq %cl, %rsi
[-,%rsi] v62 = sshr v2, v1 ; bin: 48 d3 fe
; asm: sarq %cl, %r10
[-,%r10] v63 = sshr v3, v1 ; bin: 49 d3 fa
; asm: shrq %cl, %rsi
[-,%rsi] v64 = ushr v2, v1 ; bin: 48 d3 ee
; asm: shrq %cl, %r10
[-,%r10] v65 = ushr v3, v1 ; bin: 49 d3 ea
; asm: rolq %cl, %rsi
[-,%rsi] v66 = rotl v2, v1 ; bin: 48 d3 c6
; asm: rolq %cl, %r10
[-,%r10] v67 = rotl v3, v1 ; bin: 49 d3 c2
; asm: rorq %cl, %rsi
[-,%rsi] v68 = rotr v2, v1 ; bin: 48 d3 ce
; asm: rorq %cl, %r10
[-,%r10] v69 = rotr v3, v1 ; bin: 49 d3 ca
; Integer Register-Immediate Operations.
; These 64-bit ops all use a 32-bit immediate that is sign-extended to 64 bits.
; Some take 8-bit immediates that are sign-extended to 64 bits.
; asm: addq $-100000, %rcx
[-,%rcx] v70 = iadd_imm v1, -100000 ; bin: 48 81 c1 fffe7960
; asm: addq $100000, %rsi
[-,%rsi] v71 = iadd_imm v2, 100000 ; bin: 48 81 c6 000186a0
; asm: addq $0x7fffffff, %r10
[-,%r10] v72 = iadd_imm v3, 0x7fff_ffff ; bin: 49 81 c2 7fffffff
; asm: addq $100, %r8
[-,%r8] v73 = iadd_imm v4, 100 ; bin: 49 83 c0 64
; asm: addq $-100, %r14
[-,%r14] v74 = iadd_imm v5, -100 ; bin: 49 83 c6 9c
; asm: andq $-100000, %rcx
[-,%rcx] v80 = band_imm v1, -100000 ; bin: 48 81 e1 fffe7960
; asm: andq $100000, %rsi
[-,%rsi] v81 = band_imm v2, 100000 ; bin: 48 81 e6 000186a0
; asm: andq $0x7fffffff, %r10
[-,%r10] v82 = band_imm v3, 0x7fff_ffff ; bin: 49 81 e2 7fffffff
; asm: andq $100, %r8
[-,%r8] v83 = band_imm v4, 100 ; bin: 49 83 e0 64
; asm: andq $-100, %r14
[-,%r14] v84 = band_imm v5, -100 ; bin: 49 83 e6 9c
; asm: orq $-100000, %rcx
[-,%rcx] v90 = bor_imm v1, -100000 ; bin: 48 81 c9 fffe7960
; asm: orq $100000, %rsi
[-,%rsi] v91 = bor_imm v2, 100000 ; bin: 48 81 ce 000186a0
; asm: orq $0x7fffffff, %r10
[-,%r10] v92 = bor_imm v3, 0x7fff_ffff ; bin: 49 81 ca 7fffffff
; asm: orq $100, %r8
[-,%r8] v93 = bor_imm v4, 100 ; bin: 49 83 c8 64
; asm: orq $-100, %r14
[-,%r14] v94 = bor_imm v5, -100 ; bin: 49 83 ce 9c
; asm: ret
; asm: xorq $-100000, %rcx
[-,%rcx] v100 = bxor_imm v1, -100000 ; bin: 48 81 f1 fffe7960
; asm: xorq $100000, %rsi
[-,%rsi] v101 = bxor_imm v2, 100000 ; bin: 48 81 f6 000186a0
; asm: xorq $0x7fffffff, %r10
[-,%r10] v102 = bxor_imm v3, 0x7fff_ffff ; bin: 49 81 f2 7fffffff
; asm: xorq $100, %r8
[-,%r8] v103 = bxor_imm v4, 100 ; bin: 49 83 f0 64
; asm: xorq $-100, %r14
[-,%r14] v104 = bxor_imm v5, -100 ; bin: 49 83 f6 9c
; Register copies.
; asm: movq %rsi, %rcx
[-,%rcx] v110 = copy v2 ; bin: 48 89 f1
; asm: movq %r10, %rsi
[-,%rsi] v111 = copy v3 ; bin: 4c 89 d6
; asm: movq %rcx, %r10
[-,%r10] v112 = copy v1 ; bin: 49 89 ca
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movq %rcx, (%rsi)
store v1, v2 ; bin: 48 89 0e
; asm: movq %rsi, (%rcx)
store v2, v1 ; bin: 48 89 31
; asm: movl %ecx, (%rsi)
istore32 v1, v2 ; bin: 40 89 0e
; asm: movl %esi, (%rcx)
istore32 v2, v1 ; bin: 40 89 31
; asm: movw %cx, (%rsi)
istore16 v1, v2 ; bin: 66 40 89 0e
; asm: movw %si, (%rcx)
istore16 v2, v1 ; bin: 66 40 89 31
; asm: movb %cl, (%rsi)
istore8 v1, v2 ; bin: 40 88 0e
; asm: movb %sil, (%rcx)
istore8 v2, v1 ; bin: 40 88 31
; asm: movq (%rcx), %rdi
[-,%rdi] v120 = load.i64 v1 ; bin: 48 8b 39
; asm: movq (%rsi), %rdx
[-,%rdx] v121 = load.i64 v2 ; bin: 48 8b 16
; asm: movl (%rcx), %edi
[-,%rdi] v122 = uload32.i64 v1 ; bin: 40 8b 39
; asm: movl (%rsi), %edx
[-,%rdx] v123 = uload32.i64 v2 ; bin: 40 8b 16
; asm: movslq (%rcx), %rdi
[-,%rdi] v124 = sload32.i64 v1 ; bin: 48 63 39
; asm: movslq (%rsi), %rdx
[-,%rdx] v125 = sload32.i64 v2 ; bin: 48 63 16
; asm: movzwq (%rcx), %rdi
[-,%rdi] v126 = uload16.i64 v1 ; bin: 48 0f b7 39
; asm: movzwq (%rsi), %rdx
[-,%rdx] v127 = uload16.i64 v2 ; bin: 48 0f b7 16
; asm: movswq (%rcx), %rdi
[-,%rdi] v128 = sload16.i64 v1 ; bin: 48 0f bf 39
; asm: movswq (%rsi), %rdx
[-,%rdx] v129 = sload16.i64 v2 ; bin: 48 0f bf 16
; asm: movzbq (%rcx), %rdi
[-,%rdi] v130 = uload8.i64 v1 ; bin: 48 0f b6 39
; asm: movzbq (%rsi), %rdx
[-,%rdx] v131 = uload8.i64 v2 ; bin: 48 0f b6 16
; asm: movsbq (%rcx), %rdi
[-,%rdi] v132 = sload8.i64 v1 ; bin: 48 0f be 39
; asm: movsbq (%rsi), %rdx
[-,%rdx] v133 = sload8.i64 v2 ; bin: 48 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movq %rcx, 100(%rsi)
store v1, v2+100 ; bin: 48 89 4e 64
; asm: movq %rsi, -100(%rcx)
store v2, v1-100 ; bin: 48 89 71 9c
; asm: movl %ecx, 100(%rsi)
istore32 v1, v2+100 ; bin: 40 89 4e 64
; asm: movl %esi, -100(%rcx)
istore32 v2, v1-100 ; bin: 40 89 71 9c
; asm: movw %cx, 100(%rsi)
istore16 v1, v2+100 ; bin: 66 40 89 4e 64
; asm: movw %si, -100(%rcx)
istore16 v2, v1-100 ; bin: 66 40 89 71 9c
; asm: movb %cl, 100(%rsi)
istore8 v1, v2+100 ; bin: 40 88 4e 64
; asm: movb %sil, 100(%rcx)
istore8 v2, v1+100 ; bin: 40 88 71 64
; asm: movq 50(%rcx), %rdi
[-,%rdi] v140 = load.i64 v1+50 ; bin: 48 8b 79 32
; asm: movq -50(%rsi), %rdx
[-,%rdx] v141 = load.i64 v2-50 ; bin: 48 8b 56 ce
; asm: movl 50(%rcx), %edi
[-,%rdi] v142 = uload32.i64 v1+50 ; bin: 40 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v143 = uload32.i64 v2-50 ; bin: 40 8b 56 ce
; asm: movslq 50(%rcx), %rdi
[-,%rdi] v144 = sload32.i64 v1+50 ; bin: 48 63 79 32
; asm: movslq -50(%rsi), %rdx
[-,%rdx] v145 = sload32.i64 v2-50 ; bin: 48 63 56 ce
; asm: movzwq 50(%rcx), %rdi
[-,%rdi] v146 = uload16.i64 v1+50 ; bin: 48 0f b7 79 32
; asm: movzwq -50(%rsi), %rdx
[-,%rdx] v147 = uload16.i64 v2-50 ; bin: 48 0f b7 56 ce
; asm: movswq 50(%rcx), %rdi
[-,%rdi] v148 = sload16.i64 v1+50 ; bin: 48 0f bf 79 32
; asm: movswq -50(%rsi), %rdx
[-,%rdx] v149 = sload16.i64 v2-50 ; bin: 48 0f bf 56 ce
; asm: movzbq 50(%rcx), %rdi
[-,%rdi] v150 = uload8.i64 v1+50 ; bin: 48 0f b6 79 32
; asm: movzbq -50(%rsi), %rdx
[-,%rdx] v151 = uload8.i64 v2-50 ; bin: 48 0f b6 56 ce
; asm: movsbq 50(%rcx), %rdi
[-,%rdi] v152 = sload8.i64 v1+50 ; bin: 48 0f be 79 32
; asm: movsbq -50(%rsi), %rdx
[-,%rdx] v153 = sload8.i64 v2-50 ; bin: 48 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movq %rcx, 10000(%rsi)
store v1, v2+10000 ; bin: 48 89 8e 00002710
; asm: movq %rsi, -10000(%rcx)
store v2, v1-10000 ; bin: 48 89 b1 ffffd8f0
; asm: movl %ecx, 10000(%rsi)
istore32 v1, v2+10000 ; bin: 40 89 8e 00002710
; asm: movl %esi, -10000(%rcx)
istore32 v2, v1-10000 ; bin: 40 89 b1 ffffd8f0
; asm: movw %cx, 10000(%rsi)
istore16 v1, v2+10000 ; bin: 66 40 89 8e 00002710
; asm: movw %si, -10000(%rcx)
istore16 v2, v1-10000 ; bin: 66 40 89 b1 ffffd8f0
; asm: movb %cl, 10000(%rsi)
istore8 v1, v2+10000 ; bin: 40 88 8e 00002710
; asm: movb %sil, 10000(%rcx)
istore8 v2, v1+10000 ; bin: 40 88 b1 00002710
; asm: movq 50000(%rcx), %rdi
[-,%rdi] v160 = load.i64 v1+50000 ; bin: 48 8b b9 0000c350
; asm: movq -50000(%rsi), %rdx
[-,%rdx] v161 = load.i64 v2-50000 ; bin: 48 8b 96 ffff3cb0
; asm: movl 50000(%rcx), %edi
[-,%rdi] v162 = uload32.i64 v1+50000 ; bin: 40 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v163 = uload32.i64 v2-50000 ; bin: 40 8b 96 ffff3cb0
; asm: movslq 50000(%rcx), %rdi
[-,%rdi] v164 = sload32.i64 v1+50000 ; bin: 48 63 b9 0000c350
; asm: movslq -50000(%rsi), %rdx
[-,%rdx] v165 = sload32.i64 v2-50000 ; bin: 48 63 96 ffff3cb0
; asm: movzwq 50000(%rcx), %rdi
[-,%rdi] v166 = uload16.i64 v1+50000 ; bin: 48 0f b7 b9 0000c350
; asm: movzwq -50000(%rsi), %rdx
[-,%rdx] v167 = uload16.i64 v2-50000 ; bin: 48 0f b7 96 ffff3cb0
; asm: movswq 50000(%rcx), %rdi
[-,%rdi] v168 = sload16.i64 v1+50000 ; bin: 48 0f bf b9 0000c350
; asm: movswq -50000(%rsi), %rdx
[-,%rdx] v169 = sload16.i64 v2-50000 ; bin: 48 0f bf 96 ffff3cb0
; asm: movzbq 50000(%rcx), %rdi
[-,%rdi] v170 = uload8.i64 v1+50000 ; bin: 48 0f b6 b9 0000c350
; asm: movzbq -50000(%rsi), %rdx
[-,%rdx] v171 = uload8.i64 v2-50000 ; bin: 48 0f b6 96 ffff3cb0
; asm: movsbq 50000(%rcx), %rdi
[-,%rdi] v172 = sload8.i64 v1+50000 ; bin: 48 0f be b9 0000c350
; asm: movsbq -50000(%rsi), %rdx
[-,%rdx] v173 = sload8.i64 v2-50000 ; bin: 48 0f be 96 ffff3cb0
; More arithmetic.
; asm: imulq %rsi, %rcx
[-,%rcx] v180 = imul v1, v2 ; bin: 48 0f af ce
; asm: imulq %r10, %rsi
[-,%rsi] v181 = imul v2, v3 ; bin: 49 0f af f2
; asm: imulq %rcx, %r10
[-,%r10] v182 = imul v3, v1 ; bin: 4c 0f af d1
[-,%rax] v190 = iconst.i64 1
[-,%rdx] v191 = iconst.i64 2
; asm: idivq %rcx
[-,%rax,%rdx] v192, v193 = x86_sdivmodx v130, v131, v1 ; bin: 48 f7 f9
; asm: idivq %rsi
[-,%rax,%rdx] v194, v195 = x86_sdivmodx v130, v131, v2 ; bin: 48 f7 fe
; asm: idivq %r10
[-,%rax,%rdx] v196, v197 = x86_sdivmodx v130, v131, v3 ; bin: 49 f7 fa
; asm: divq %rcx
[-,%rax,%rdx] v198, v199 = x86_udivmodx v130, v131, v1 ; bin: 48 f7 f1
; asm: divq %rsi
[-,%rax,%rdx] v200, v201 = x86_udivmodx v130, v131, v2 ; bin: 48 f7 f6
; asm: divq %r10
[-,%rax,%rdx] v202, v203 = x86_udivmodx v130, v131, v3 ; bin: 49 f7 f2
; Bit-counting instructions.
; asm: popcntq %rsi, %rcx
[-,%rcx] v210 = popcnt v2 ; bin: f3 48 0f b8 ce
; asm: popcntq %r10, %rsi
[-,%rsi] v211 = popcnt v3 ; bin: f3 49 0f b8 f2
; asm: popcntq %rcx, %r10
[-,%r10] v212 = popcnt v1 ; bin: f3 4c 0f b8 d1
; asm: lzcntq %rsi, %rcx
[-,%rcx] v213 = clz v2 ; bin: f3 48 0f bd ce
; asm: lzcntq %r10, %rsi
[-,%rsi] v214 = clz v3 ; bin: f3 49 0f bd f2
; asm: lzcntq %rcx, %r10
[-,%r10] v215 = clz v1 ; bin: f3 4c 0f bd d1
; asm: tzcntq %rsi, %rcx
[-,%rcx] v216 = ctz v2 ; bin: f3 48 0f bc ce
; asm: tzcntq %r10, %rsi
[-,%rsi] v217 = ctz v3 ; bin: f3 49 0f bc f2
; asm: tzcntq %rcx, %r10
[-,%r10] v218 = ctz v1 ; bin: f3 4c 0f bc d1
; Integer comparisons.
; asm: cmpq %rsi, %rcx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 48 39 f1 0f 94 c3
; asm: cmpq %r10, %rsi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v3 ; bin: 4c 39 d6 0f 94 c2
; asm: cmpq %rsi, %rcx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 48 39 f1 0f 95 c3
; asm: cmpq %r10, %rsi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v3 ; bin: 4c 39 d6 0f 95 c2
; asm: cmpq %rsi, %rcx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 48 39 f1 0f 9c c3
; asm: cmpq %r10, %rsi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v3 ; bin: 4c 39 d6 0f 9c c2
; asm: cmpq %rsi, %rcx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 48 39 f1 0f 9d c3
; asm: cmpq %r10, %rsi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v3 ; bin: 4c 39 d6 0f 9d c2
; asm: cmpq %rsi, %rcx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 48 39 f1 0f 9f c3
; asm: cmpq %r10, %rsi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v3 ; bin: 4c 39 d6 0f 9f c2
; asm: cmpq %rsi, %rcx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 48 39 f1 0f 9e c3
; asm: cmpq %r10, %rsi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v3 ; bin: 4c 39 d6 0f 9e c2
; asm: cmpq %rsi, %rcx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 48 39 f1 0f 92 c3
; asm: cmpq %r10, %rsi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v3 ; bin: 4c 39 d6 0f 92 c2
; asm: cmpq %rsi, %rcx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 48 39 f1 0f 93 c3
; asm: cmpq %r10, %rsi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v3 ; bin: 4c 39 d6 0f 93 c2
; asm: cmpq %rsi, %rcx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 48 39 f1 0f 97 c3
; asm: cmpq %r10, %rsi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v3 ; bin: 4c 39 d6 0f 97 c2
; asm: cmpq %rsi, %rcx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 48 39 f1 0f 96 c3
; asm: cmpq %r10, %rsi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v3 ; bin: 4c 39 d6 0f 96 c2
; Bool-to-int conversions.
; asm: movzbq %bl, %rcx
[-,%rcx] v350 = bint.i64 v300 ; bin: 48 0f b6 cb
; asm: movzbq %dl, %rsi
[-,%rsi] v351 = bint.i64 v301 ; bin: 48 0f b6 f2
; asm: testq %rcx, %rcx
; asm: je ebb1
brz v1, ebb1 ; bin: 48 85 c9 74 1b
; asm: testq %rsi, %rsi
; asm: je ebb1
brz v2, ebb1 ; bin: 48 85 f6 74 16
; asm: testq %r10, %r10
; asm: je ebb1
brz v3, ebb1 ; bin: 4d 85 d2 74 11
; asm: testq %rcx, %rcx
; asm: jne ebb1
brnz v1, ebb1 ; bin: 48 85 c9 75 0c
; asm: testq %rsi, %rsi
; asm: jne ebb1
brnz v2, ebb1 ; bin: 48 85 f6 75 07
; asm: testq %r10, %r10
; asm: jne ebb1
brnz v3, ebb1 ; bin: 4d 85 d2 75 02
; asm: jmp ebb2
jump ebb2 ; bin: eb 01
; asm: ebb1:
ebb1:
return ; bin: c3
; asm: ebb2:
ebb2:
jump ebb1 ; bin: eb fd
}
; Tests for i32 instructions in 64-bit mode.
;
; Note that many i32 instructions can be encoded both with and without a REX
; prefix if they only use the low 8 registers. Here, we are testing the REX
; encodings which are chosen by default. Switching to non-REX encodings should
; be done by an instruction shrinking pass.
function %I32() {
fn0 = function %foo()
sig0 = ()
ebb0:
; Integer Constants.
; asm: movl $0x01020304, %ecx
[-,%rcx] v1 = iconst.i32 0x0102_0304 ; bin: 40 b9 01020304
; asm: movl $0x11020304, %esi
[-,%rsi] v2 = iconst.i32 0x1102_0304 ; bin: 40 be 11020304
; asm: movl $0x21020304, %r10d
[-,%r10] v3 = iconst.i32 0x2102_0304 ; bin: 41 ba 21020304
; asm: movl $0xff001122, %r8d
[-,%r8] v4 = iconst.i32 0xff00_1122 ; bin: 41 b8 ff001122
; asm: movl $0x88001122, %r14d
[-,%r14] v5 = iconst.i32 0xffff_ffff_8800_1122 ; bin: 41 be 88001122
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movl (%rcx), %edi
[-,%rdi] v10 = load.i32 v1 ; bin: 40 8b 39
; asm: movl (%rsi), %edx
[-,%rdx] v11 = load.i32 v2 ; bin: 40 8b 16
; asm: movzwl (%rcx), %edi
[-,%rdi] v12 = uload16.i32 v1 ; bin: 40 0f b7 39
; asm: movzwl (%rsi), %edx
[-,%rdx] v13 = uload16.i32 v2 ; bin: 40 0f b7 16
; asm: movswl (%rcx), %edi
[-,%rdi] v14 = sload16.i32 v1 ; bin: 40 0f bf 39
; asm: movswl (%rsi), %edx
[-,%rdx] v15 = sload16.i32 v2 ; bin: 40 0f bf 16
; asm: movzbl (%rcx), %edi
[-,%rdi] v16 = uload8.i32 v1 ; bin: 40 0f b6 39
; asm: movzbl (%rsi), %edx
[-,%rdx] v17 = uload8.i32 v2 ; bin: 40 0f b6 16
; asm: movsbl (%rcx), %edi
[-,%rdi] v18 = sload8.i32 v1 ; bin: 40 0f be 39
; asm: movsbl (%rsi), %edx
[-,%rdx] v19 = sload8.i32 v2 ; bin: 40 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl 50(%rcx), %edi
[-,%rdi] v20 = load.i32 v1+50 ; bin: 40 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v21 = load.i32 v2-50 ; bin: 40 8b 56 ce
; asm: movzwl 50(%rcx), %edi
[-,%rdi] v22 = uload16.i32 v1+50 ; bin: 40 0f b7 79 32
; asm: movzwl -50(%rsi), %edx
[-,%rdx] v23 = uload16.i32 v2-50 ; bin: 40 0f b7 56 ce
; asm: movswl 50(%rcx), %edi
[-,%rdi] v24 = sload16.i32 v1+50 ; bin: 40 0f bf 79 32
; asm: movswl -50(%rsi), %edx
[-,%rdx] v25 = sload16.i32 v2-50 ; bin: 40 0f bf 56 ce
; asm: movzbl 50(%rcx), %edi
[-,%rdi] v26 = uload8.i32 v1+50 ; bin: 40 0f b6 79 32
; asm: movzbl -50(%rsi), %edx
[-,%rdx] v27 = uload8.i32 v2-50 ; bin: 40 0f b6 56 ce
; asm: movsbl 50(%rcx), %edi
[-,%rdi] v28 = sload8.i32 v1+50 ; bin: 40 0f be 79 32
; asm: movsbl -50(%rsi), %edx
[-,%rdx] v29 = sload8.i32 v2-50 ; bin: 40 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl 50000(%rcx), %edi
[-,%rdi] v30 = load.i32 v1+50000 ; bin: 40 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v31 = load.i32 v2-50000 ; bin: 40 8b 96 ffff3cb0
; asm: movzwl 50000(%rcx), %edi
[-,%rdi] v32 = uload16.i32 v1+50000 ; bin: 40 0f b7 b9 0000c350
; asm: movzwl -50000(%rsi), %edx
[-,%rdx] v33 = uload16.i32 v2-50000 ; bin: 40 0f b7 96 ffff3cb0
; asm: movswl 50000(%rcx), %edi
[-,%rdi] v34 = sload16.i32 v1+50000 ; bin: 40 0f bf b9 0000c350
; asm: movswl -50000(%rsi), %edx
[-,%rdx] v35 = sload16.i32 v2-50000 ; bin: 40 0f bf 96 ffff3cb0
; asm: movzbl 50000(%rcx), %edi
[-,%rdi] v36 = uload8.i32 v1+50000 ; bin: 40 0f b6 b9 0000c350
; asm: movzbl -50000(%rsi), %edx
[-,%rdx] v37 = uload8.i32 v2-50000 ; bin: 40 0f b6 96 ffff3cb0
; asm: movsbl 50000(%rcx), %edi
[-,%rdi] v38 = sload8.i32 v1+50000 ; bin: 40 0f be b9 0000c350
; asm: movsbl -50000(%rsi), %edx
[-,%rdx] v39 = sload8.i32 v2-50000 ; bin: 40 0f be 96 ffff3cb0
; Integer Register-Register Operations.
; asm: addl %esi, %ecx
[-,%rcx] v40 = iadd v1, v2 ; bin: 40 01 f1
; asm: addl %r10d, %esi
[-,%rsi] v41 = iadd v2, v3 ; bin: 44 01 d6
; asm: addl %ecx, %r10d
[-,%r10] v42 = iadd v3, v1 ; bin: 41 01 ca
; asm: subl %esi, %ecx
[-,%rcx] v50 = isub v1, v2 ; bin: 40 29 f1
; asm: subl %r10d, %esi
[-,%rsi] v51 = isub v2, v3 ; bin: 44 29 d6
; asm: subl %ecx, %r10d
[-,%r10] v52 = isub v3, v1 ; bin: 41 29 ca
; asm: andl %esi, %ecx
[-,%rcx] v60 = band v1, v2 ; bin: 40 21 f1
; asm: andl %r10d, %esi
[-,%rsi] v61 = band v2, v3 ; bin: 44 21 d6
; asm: andl %ecx, %r10d
[-,%r10] v62 = band v3, v1 ; bin: 41 21 ca
; asm: orl %esi, %ecx
[-,%rcx] v70 = bor v1, v2 ; bin: 40 09 f1
; asm: orl %r10d, %esi
[-,%rsi] v71 = bor v2, v3 ; bin: 44 09 d6
; asm: orl %ecx, %r10d
[-,%r10] v72 = bor v3, v1 ; bin: 41 09 ca
; asm: xorl %esi, %ecx
[-,%rcx] v80 = bxor v1, v2 ; bin: 40 31 f1
; asm: xorl %r10d, %esi
[-,%rsi] v81 = bxor v2, v3 ; bin: 44 31 d6
; asm: xorl %ecx, %r10d
[-,%r10] v82 = bxor v3, v1 ; bin: 41 31 ca
; asm: shll %cl, %esi
[-,%rsi] v90 = ishl v2, v1 ; bin: 40 d3 e6
; asm: shll %cl, %r10d
[-,%r10] v91 = ishl v3, v1 ; bin: 41 d3 e2
; asm: sarl %cl, %esi
[-,%rsi] v92 = sshr v2, v1 ; bin: 40 d3 fe
; asm: sarl %cl, %r10d
[-,%r10] v93 = sshr v3, v1 ; bin: 41 d3 fa
; asm: shrl %cl, %esi
[-,%rsi] v94 = ushr v2, v1 ; bin: 40 d3 ee
; asm: shrl %cl, %r10d
[-,%r10] v95 = ushr v3, v1 ; bin: 41 d3 ea
; asm: roll %cl, %esi
[-,%rsi] v96 = rotl v2, v1 ; bin: 40 d3 c6
; asm: roll %cl, %r10d
[-,%r10] v97 = rotl v3, v1 ; bin: 41 d3 c2
; asm: rorl %cl, %esi
[-,%rsi] v98 = rotr v2, v1 ; bin: 40 d3 ce
; asm: rorl %cl, %r10d
[-,%r10] v99 = rotr v3, v1 ; bin: 41 d3 ca
; Integer Register-Immediate Operations.
; These 64-bit ops all use a 32-bit immediate that is sign-extended to 64 bits.
; Some take 8-bit immediates that are sign-extended to 64 bits.
; asm: addl $-100000, %ecx
[-,%rcx] v100 = iadd_imm v1, -100000 ; bin: 40 81 c1 fffe7960
; asm: addl $100000, %esi
[-,%rsi] v101 = iadd_imm v2, 100000 ; bin: 40 81 c6 000186a0
; asm: addl $0x7fffffff, %r10d
[-,%r10] v102 = iadd_imm v3, 0x7fff_ffff ; bin: 41 81 c2 7fffffff
; asm: addl $100, %r8d
[-,%r8] v103 = iadd_imm v4, 100 ; bin: 41 83 c0 64
; asm: addl $-100, %r14d
[-,%r14] v104 = iadd_imm v5, -100 ; bin: 41 83 c6 9c
; asm: andl $-100000, %ecx
[-,%rcx] v110 = band_imm v1, -100000 ; bin: 40 81 e1 fffe7960
; asm: andl $100000, %esi
[-,%rsi] v111 = band_imm v2, 100000 ; bin: 40 81 e6 000186a0
; asm: andl $0x7fffffff, %r10d
[-,%r10] v112 = band_imm v3, 0x7fff_ffff ; bin: 41 81 e2 7fffffff
; asm: andl $100, %r8d
[-,%r8] v113 = band_imm v4, 100 ; bin: 41 83 e0 64
; asm: andl $-100, %r14d
[-,%r14] v114 = band_imm v5, -100 ; bin: 41 83 e6 9c
; asm: orl $-100000, %ecx
[-,%rcx] v120 = bor_imm v1, -100000 ; bin: 40 81 c9 fffe7960
; asm: orl $100000, %esi
[-,%rsi] v121 = bor_imm v2, 100000 ; bin: 40 81 ce 000186a0
; asm: orl $0x7fffffff, %r10d
[-,%r10] v122 = bor_imm v3, 0x7fff_ffff ; bin: 41 81 ca 7fffffff
; asm: orl $100, %r8d
[-,%r8] v123 = bor_imm v4, 100 ; bin: 41 83 c8 64
; asm: orl $-100, %r14d
[-,%r14] v124 = bor_imm v5, -100 ; bin: 41 83 ce 9c
; asm: ret
; asm: xorl $-100000, %ecx
[-,%rcx] v130 = bxor_imm v1, -100000 ; bin: 40 81 f1 fffe7960
; asm: xorl $100000, %esi
[-,%rsi] v131 = bxor_imm v2, 100000 ; bin: 40 81 f6 000186a0
; asm: xorl $0x7fffffff, %r10d
[-,%r10] v132 = bxor_imm v3, 0x7fff_ffff ; bin: 41 81 f2 7fffffff
; asm: xorl $100, %r8d
[-,%r8] v133 = bxor_imm v4, 100 ; bin: 41 83 f0 64
; asm: xorl $-100, %r14d
[-,%r14] v134 = bxor_imm v5, -100 ; bin: 41 83 f6 9c
; Register copies.
; asm: movl %esi, %ecx
[-,%rcx] v140 = copy v2 ; bin: 40 89 f1
; asm: movl %r10d, %esi
[-,%rsi] v141 = copy v3 ; bin: 44 89 d6
; asm: movl %ecx, %r10d
[-,%r10] v142 = copy v1 ; bin: 41 89 ca
; More arithmetic.
; asm: imull %esi, %ecx
[-,%rcx] v150 = imul v1, v2 ; bin: 40 0f af ce
; asm: imull %r10d, %esi
[-,%rsi] v151 = imul v2, v3 ; bin: 41 0f af f2
; asm: imull %ecx, %r10d
[-,%r10] v152 = imul v3, v1 ; bin: 44 0f af d1
[-,%rax] v160 = iconst.i32 1
[-,%rdx] v161 = iconst.i32 2
; asm: idivl %ecx
[-,%rax,%rdx] v162, v163 = x86_sdivmodx v130, v131, v1 ; bin: 40 f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v164, v165 = x86_sdivmodx v130, v131, v2 ; bin: 40 f7 fe
; asm: idivl %r10d
[-,%rax,%rdx] v166, v167 = x86_sdivmodx v130, v131, v3 ; bin: 41 f7 fa
; asm: divl %ecx
[-,%rax,%rdx] v168, v169 = x86_udivmodx v130, v131, v1 ; bin: 40 f7 f1
; asm: divl %esi
[-,%rax,%rdx] v170, v171 = x86_udivmodx v130, v131, v2 ; bin: 40 f7 f6
; asm: divl %r10d
[-,%rax,%rdx] v172, v173 = x86_udivmodx v130, v131, v3 ; bin: 41 f7 f2
; Bit-counting instructions.
; asm: popcntl %esi, %ecx
[-,%rcx] v200 = popcnt v2 ; bin: f3 40 0f b8 ce
; asm: popcntl %r10d, %esi
[-,%rsi] v201 = popcnt v3 ; bin: f3 41 0f b8 f2
; asm: popcntl %ecx, %r10d
[-,%r10] v202 = popcnt v1 ; bin: f3 44 0f b8 d1
; asm: lzcntl %esi, %ecx
[-,%rcx] v203 = clz v2 ; bin: f3 40 0f bd ce
; asm: lzcntl %r10d, %esi
[-,%rsi] v204 = clz v3 ; bin: f3 41 0f bd f2
; asm: lzcntl %ecx, %r10d
[-,%r10] v205 = clz v1 ; bin: f3 44 0f bd d1
; asm: tzcntl %esi, %ecx
[-,%rcx] v206 = ctz v2 ; bin: f3 40 0f bc ce
; asm: tzcntl %r10d, %esi
[-,%rsi] v207 = ctz v3 ; bin: f3 41 0f bc f2
; asm: tzcntl %ecx, %r10d
[-,%r10] v208 = ctz v1 ; bin: f3 44 0f bc d1
; Integer comparisons.
; asm: cmpl %esi, %ecx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 40 39 f1 0f 94 c3
; asm: cmpl %r10d, %esi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v3 ; bin: 44 39 d6 0f 94 c2
; asm: cmpl %esi, %ecx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 40 39 f1 0f 95 c3
; asm: cmpl %r10d, %esi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v3 ; bin: 44 39 d6 0f 95 c2
; asm: cmpl %esi, %ecx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 40 39 f1 0f 9c c3
; asm: cmpl %r10d, %esi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v3 ; bin: 44 39 d6 0f 9c c2
; asm: cmpl %esi, %ecx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 40 39 f1 0f 9d c3
; asm: cmpl %r10d, %esi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v3 ; bin: 44 39 d6 0f 9d c2
; asm: cmpl %esi, %ecx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 40 39 f1 0f 9f c3
; asm: cmpl %r10d, %esi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v3 ; bin: 44 39 d6 0f 9f c2
; asm: cmpl %esi, %ecx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 40 39 f1 0f 9e c3
; asm: cmpl %r10d, %esi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v3 ; bin: 44 39 d6 0f 9e c2
; asm: cmpl %esi, %ecx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 40 39 f1 0f 92 c3
; asm: cmpl %r10d, %esi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v3 ; bin: 44 39 d6 0f 92 c2
; asm: cmpl %esi, %ecx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 40 39 f1 0f 93 c3
; asm: cmpl %r10d, %esi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v3 ; bin: 44 39 d6 0f 93 c2
; asm: cmpl %esi, %ecx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 40 39 f1 0f 97 c3
; asm: cmpl %r10d, %esi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v3 ; bin: 44 39 d6 0f 97 c2
; asm: cmpl %esi, %ecx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 40 39 f1 0f 96 c3
; asm: cmpl %r10d, %esi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v3 ; bin: 44 39 d6 0f 96 c2
; Bool-to-int conversions.
; asm: movzbl %bl, %ecx
[-,%rcx] v350 = bint.i32 v300 ; bin: 40 0f b6 cb
; asm: movzbl %dl, %esi
[-,%rsi] v351 = bint.i32 v301 ; bin: 40 0f b6 f2
; asm: testl %ecx, %ecx
; asm: je ebb1x
brz v1, ebb1 ; bin: 40 85 c9 74 1b
; asm: testl %esi, %esi
; asm: je ebb1x
brz v2, ebb1 ; bin: 40 85 f6 74 16
; asm: testl %r10d, %r10d
; asm: je ebb1x
brz v3, ebb1 ; bin: 45 85 d2 74 11
; asm: testl %ecx, %ecx
; asm: jne ebb1x
brnz v1, ebb1 ; bin: 40 85 c9 75 0c
; asm: testl %esi, %esi
; asm: jne ebb1x
brnz v2, ebb1 ; bin: 40 85 f6 75 07
; asm: testl %r10d, %r10d
; asm: jne ebb1x
brnz v3, ebb1 ; bin: 45 85 d2 75 02
; asm: jmp ebb2x
jump ebb2 ; bin: eb 01
; asm: ebb1x:
ebb1:
return ; bin: c3
; asm: ebb2x:
ebb2:
jump ebb1 ; bin: eb fd
}
; Tests for i64/i32 conversion instructions.
function %I64_I32() {
ebb0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
[-,%rcx] v11 = ireduce.i32 v1 ; bin:
[-,%rsi] v12 = ireduce.i32 v2 ; bin:
[-,%r10] v13 = ireduce.i32 v3 ; bin:
; asm: movslq %ecx, %rsi
[-,%rsi] v20 = sextend.i64 v11 ; bin: 48 63 f1
; asm: movslq %esi, %r10
[-,%r10] v21 = sextend.i64 v12 ; bin: 4c 63 d6
; asm: movslq %r10d, %rcx
[-,%rcx] v22 = sextend.i64 v13 ; bin: 49 63 ca
; asm: movl %ecx, %esi
[-,%rsi] v30 = uextend.i64 v11 ; bin: 40 89 ce
; asm: movl %esi, %r10d
[-,%r10] v31 = uextend.i64 v12 ; bin: 41 89 f2
; asm: movl %r10d, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 44 89 d1
trap ; bin: 0f 0b
}

View File

@@ -1,14 +0,0 @@
; Test the legalization of function signatures for RV32E.
test legalizer
isa riscv enable_e
; regex: V=v\d+
function %f() {
; Spilling into the stack args after %x15 since %16 and up are not
; available in RV32E.
sig0 = (i64, i64, i64, i64) -> i64 native
; check: sig0 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [0], i32 [4]) -> i32 [%x10], i32 [%x11] native
ebb0:
return
}

View File

@@ -1,32 +0,0 @@
; Test the legalization of function signatures.
test legalizer
isa riscv
; regex: V=v\d+
function %f() {
sig0 = (i32) -> i32 native
; check: sig0 = (i32 [%x10]) -> i32 [%x10] native
sig1 = (i64) -> b1 native
; check: sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
; The i64 argument must go in an even-odd register pair.
sig2 = (f32, i64) -> f64 native
; check: sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
; Spilling into the stack args.
sig3 = (f64, f64, f64, f64, f64, f64, f64, i64) -> f64 native
; check: sig3 = (f64 [%f10], f64 [%f11], f64 [%f12], f64 [%f13], f64 [%f14], f64 [%f15], f64 [%f16], i32 [0], i32 [4]) -> f64 [%f10] native
; Splitting vectors.
sig4 = (i32x4) native
; check: sig4 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13]) native
; Splitting vectors, then splitting ints.
sig5 = (i64x4) native
; check: sig5 = (i32 [%x10], i32 [%x11], i32 [%x12], i32 [%x13], i32 [%x14], i32 [%x15], i32 [%x16], i32 [%x17]) native
ebb0:
return
}

View File

@@ -1,145 +0,0 @@
; Binary emission of 32-bit code.
test binemit
isa riscv
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = function %foo()
sig0 = ()
ebb0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x21] v2 = iconst.i32 2
; Integer Register-Register Operations.
; add
[-,%x7] v10 = iadd v1, v2 ; bin: 015503b3
[-,%x16] v11 = iadd v2, v1 ; bin: 00aa8833
; sub
[-,%x7] v12 = isub v1, v2 ; bin: 415503b3
[-,%x16] v13 = isub v2, v1 ; bin: 40aa8833
; and
[-,%x7] v20 = band v1, v2 ; bin: 015573b3
[-,%x16] v21 = band v2, v1 ; bin: 00aaf833
; or
[-,%x7] v22 = bor v1, v2 ; bin: 015563b3
[-,%x16] v23 = bor v2, v1 ; bin: 00aae833
; xor
[-,%x7] v24 = bxor v1, v2 ; bin: 015543b3
[-,%x16] v25 = bxor v2, v1 ; bin: 00aac833
; sll
[-,%x7] v30 = ishl v1, v2 ; bin: 015513b3
[-,%x16] v31 = ishl v2, v1 ; bin: 00aa9833
; srl
[-,%x7] v32 = ushr v1, v2 ; bin: 015553b3
[-,%x16] v33 = ushr v2, v1 ; bin: 00aad833
; sra
[-,%x7] v34 = sshr v1, v2 ; bin: 415553b3
[-,%x16] v35 = sshr v2, v1 ; bin: 40aad833
; slt
[-,%x7] v42 = icmp slt v1, v2 ; bin: 015523b3
[-,%x16] v43 = icmp slt v2, v1 ; bin: 00aaa833
; sltu
[-,%x7] v44 = icmp ult v1, v2 ; bin: 015533b3
[-,%x16] v45 = icmp ult v2, v1 ; bin: 00aab833
; Integer Register-Immediate Instructions
; addi
[-,%x7] v100 = iadd_imm v1, 1000 ; bin: 3e850393
[-,%x16] v101 = iadd_imm v2, -905 ; bin: c77a8813
; andi
[-,%x7] v110 = band_imm v1, 1000 ; bin: 3e857393
[-,%x16] v111 = band_imm v2, -905 ; bin: c77af813
; ori
[-,%x7] v112 = bor_imm v1, 1000 ; bin: 3e856393
[-,%x16] v113 = bor_imm v2, -905 ; bin: c77ae813
; xori
[-,%x7] v114 = bxor_imm v1, 1000 ; bin: 3e854393
[-,%x16] v115 = bxor_imm v2, -905 ; bin: c77ac813
; slli
[-,%x7] v120 = ishl_imm v1, 31 ; bin: 01f51393
[-,%x16] v121 = ishl_imm v2, 8 ; bin: 008a9813
; srli
[-,%x7] v122 = ushr_imm v1, 31 ; bin: 01f55393
[-,%x16] v123 = ushr_imm v2, 8 ; bin: 008ad813
; srai
[-,%x7] v124 = sshr_imm v1, 31 ; bin: 41f55393
[-,%x16] v125 = sshr_imm v2, 8 ; bin: 408ad813
; slti
[-,%x7] v130 = icmp_imm slt v1, 1000 ; bin: 3e852393
[-,%x16] v131 = icmp_imm slt v2, -905 ; bin: c77aa813
; sltiu
[-,%x7] v132 = icmp_imm ult v1, 1000 ; bin: 3e853393
[-,%x16] v133 = icmp_imm ult v2, -905 ; bin: c77ab813
; lui
[-,%x7] v140 = iconst.i32 0x12345000 ; bin: 123453b7
[-,%x16] v141 = iconst.i32 0xffffffff_fedcb000 ; bin: fedcb837
; addi
[-,%x7] v142 = iconst.i32 1000 ; bin: 3e800393
[-,%x16] v143 = iconst.i32 -905 ; bin: c7700813
; Copies alias to iadd_imm.
[-,%x7] v150 = copy v1 ; bin: 00050393
[-,%x16] v151 = copy v2 ; bin: 000a8813
; Control Transfer Instructions
; jal %x1, fn0
call fn0() ; bin: Call(fn0) 000000ef
; jalr %x1, %x10
call_indirect sig0, v1() ; bin: 000500e7
call_indirect sig0, v2() ; bin: 000a80e7
brz v1, ebb3
brnz v1, ebb1
; jalr %x0, %x1, 0
return v9999 ; bin: 00008067
ebb1:
; beq 0x000
br_icmp eq v1, v2, ebb1 ; bin: 01550063
; bne 0xffc
br_icmp ne v1, v2, ebb1 ; bin: ff551ee3
; blt 0xff8
br_icmp slt v1, v2, ebb1 ; bin: ff554ce3
; bge 0xff4
br_icmp sge v1, v2, ebb1 ; bin: ff555ae3
; bltu 0xff0
br_icmp ult v1, v2, ebb1 ; bin: ff5568e3
; bgeu 0xfec
br_icmp uge v1, v2, ebb1 ; bin: ff5576e3
; Forward branches.
; beq 0x018
br_icmp eq v2, v1, ebb2 ; bin: 00aa8c63
; bne 0x014
br_icmp ne v2, v1, ebb2 ; bin: 00aa9a63
; blt 0x010
br_icmp slt v2, v1, ebb2 ; bin: 00aac863
; bge 0x00c
br_icmp sge v2, v1, ebb2 ; bin: 00aad663
; bltu 0x008
br_icmp ult v2, v1, ebb2 ; bin: 00aae463
; bgeu 0x004
br_icmp uge v2, v1, ebb2 ; bin: 00aaf263
fallthrough ebb2
ebb2:
; jal %x0, 0x00000
jump ebb2 ; bin: 0000006f
ebb3:
; beq x, %x0
brz v1, ebb3 ; bin: 00050063
; bne x, %x0
brnz v1, ebb3 ; bin: fe051ee3
; jal %x0, 0x1ffff4
jump ebb2 ; bin: ff5ff06f
}

View File

@@ -1,21 +0,0 @@
test legalizer
isa riscv supports_m=1
function %int32(i32, i32) {
ebb0(v1: i32, v2: i32):
v10 = iadd v1, v2
; check: [R#0c]
; sameln: $v10 = iadd
v11 = isub v1, v2
; check: [R#200c]
; sameln: $v11 = isub
v12 = imul v1, v2
; check: [R#10c]
; sameln: $v12 = imul
return
; check: [Iret#19]
; sameln: return
}

View File

@@ -1,38 +0,0 @@
; Test the legalization of i32 instructions that don't have RISC-V versions.
test legalizer
set is_64bit=0
isa riscv supports_m=1
set is_64bit=1
isa riscv supports_m=1
; regex: V=v\d+
function %carry_out(i32, i32) -> i32, b1 {
ebb0(v1: i32, v2: i32):
v3, v4 = iadd_cout v1, v2
return v3, v4
}
; check: $v3 = iadd $v1, $v2
; check: $v4 = icmp ult $v3, $v1
; check: return $v3, $v4
; Expanding illegal immediate constants.
; Note that at some point we'll probably expand the iconst as well.
function %large_imm(i32) -> i32 {
ebb0(v0: i32):
v1 = iadd_imm v0, 1000000000
return v1
}
; check: $(cst=$V) = iconst.i32 0x3b9a_ca00
; check: $v1 = iadd $v0, $cst
; check: return $v1
function %bitclear(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = band_not v0, v1
; check: bnot
; check: band
return v2
}

View File

@@ -1,134 +0,0 @@
; Test legalizer's handling of ABI boundaries.
test legalizer
isa riscv
; regex: V=v\d+
; regex: SS=ss\d+
; regex: WS=\s+
function %int_split_args(i64) -> i64 {
ebb0(v0: i64):
; check: $ebb0($(v0l=$V): i32, $(v0h=$V): i32, $(link=$V): i32):
; check: $v0 = iconcat $v0l, $v0h
v1 = iadd_imm v0, 1
; check: $(v1l=$V), $(v1h=$V) = isplit $v1
; check: return $v1l, $v1h, $link
return v1
}
function %split_call_arg(i32) {
fn1 = function %foo(i64)
fn2 = function %foo(i32, i64)
ebb0(v0: i32):
v1 = uextend.i64 v0
call fn1(v1)
; check: $(v1l=$V), $(v1h=$V) = isplit $v1
; check: call $fn1($v1l, $v1h)
call fn2(v0, v1)
; check: call $fn2($v0, $V, $V)
return
}
function %split_ret_val() {
fn1 = function %foo() -> i64
ebb0:
v1 = call fn1()
; check: $ebb0($(link=$V): i32):
; nextln: $(v1l=$V), $(v1h=$V) = call $fn1()
; check: $v1 = iconcat $v1l, $v1h
jump ebb1(v1)
; check: jump $ebb1($v1)
ebb1(v10: i64):
jump ebb1(v10)
}
; First return value is fine, second one is expanded.
function %split_ret_val2() {
fn1 = function %foo() -> i32, i64
ebb0:
v1, v2 = call fn1()
; check: $ebb0($(link=$V): i32):
; nextln: $v1, $(v2l=$V), $(v2h=$V) = call $fn1()
; check: $v2 = iconcat $v2l, $v2h
jump ebb1(v1, v2)
; check: jump $ebb1($v1, $v2)
ebb1(v9: i32, v10: i64):
jump ebb1(v9, v10)
}
function %int_ext(i8, i8 sext, i8 uext) -> i8 uext {
ebb0(v1: i8, v2: i8, v3: i8):
; check: $ebb0($v1: i8, $(v2x=$V): i32, $(v3x=$V): i32, $(link=$V): i32):
; check: $v2 = ireduce.i8 $v2x
; check: $v3 = ireduce.i8 $v3x
; check: $(v1x=$V) = uextend.i32 $v1
; check: return $v1x, $link
return v1
}
; Function produces single return value, still need to copy.
function %ext_ret_val() {
fn1 = function %foo() -> i8 sext
ebb0:
v1 = call fn1()
; check: $ebb0($V: i32):
; nextln: $(rv=$V) = call $fn1()
; check: $v1 = ireduce.i8 $rv
jump ebb1(v1)
; check: jump $ebb1($v1)
ebb1(v10: i8):
jump ebb1(v10)
}
function %vector_split_args(i64x4) -> i64x4 {
ebb0(v0: i64x4):
; check: $ebb0($(v0al=$V): i32, $(v0ah=$V): i32, $(v0bl=$V): i32, $(v0bh=$V): i32, $(v0cl=$V): i32, $(v0ch=$V): i32, $(v0dl=$V): i32, $(v0dh=$V): i32, $(link=$V): i32):
; check: $(v0a=$V) = iconcat $v0al, $v0ah
; check: $(v0b=$V) = iconcat $v0bl, $v0bh
; check: $(v0ab=$V) = vconcat $v0a, $v0b
; check: $(v0c=$V) = iconcat $v0cl, $v0ch
; check: $(v0d=$V) = iconcat $v0dl, $v0dh
; check: $(v0cd=$V) = vconcat $v0c, $v0d
; check: $v0 = vconcat $v0ab, $v0cd
v1 = bxor v0, v0
; check: $(v1ab=$V), $(v1cd=$V) = vsplit $v1
; check: $(v1a=$V), $(v1b=$V) = vsplit $v1ab
; check: $(v1al=$V), $(v1ah=$V) = isplit $v1a
; check: $(v1bl=$V), $(v1bh=$V) = isplit $v1b
; check: $(v1c=$V), $(v1d=$V) = vsplit $v1cd
; check: $(v1cl=$V), $(v1ch=$V) = isplit $v1c
; check: $(v1dl=$V), $(v1dh=$V) = isplit $v1d
; check: return $v1al, $v1ah, $v1bl, $v1bh, $v1cl, $v1ch, $v1dl, $v1dh, $link
return v1
}
function %indirect(i32) {
sig1 = () native
ebb0(v0: i32):
call_indirect sig1, v0()
return
}
; The first argument to call_indirect doesn't get altered.
function %indirect_arg(i32, f32x2) {
sig1 = (f32x2) native
ebb0(v0: i32, v1: f32x2):
call_indirect sig1, v0(v1)
; check: call_indirect $sig1, $v0($V, $V)
return
}
; Call a function that takes arguments on the stack.
function %stack_args(i32) {
; check: $(ss0=$SS) = outgoing_arg 4
fn1 = function %foo(i64, i64, i64, i64, i32)
ebb0(v0: i32):
v1 = iconst.i64 1
call fn1(v1, v1, v1, v1, v0)
; check: [GPsp#48,$ss0]$WS $(v0s=$V) = spill $v0
; check: call $fn1($(=.*), $v0s)
return
}

View File

@@ -1,64 +0,0 @@
; Test the legalization of i64 arithmetic instructions.
test legalizer
isa riscv supports_m=1
; regex: V=v\d+
function %bitwise_and(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
v3 = band v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#ec
; sameln: $(v3l=$V) = band $v1l, $v2l
; check: [R#ec
; sameln: $(v3h=$V) = band $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %bitwise_or(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
v3 = bor v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#cc
; sameln: $(v3l=$V) = bor $v1l, $v2l
; check: [R#cc
; sameln: $(v3h=$V) = bor $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %bitwise_xor(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
v3 = bxor v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#8c
; sameln: $(v3l=$V) = bxor $v1l, $v2l
; check: [R#8c
; sameln: $(v3h=$V) = bxor $v1h, $v2h
; check: $v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link
function %arith_add(i64, i64) -> i64 {
; Legalizing iadd.i64 requires two steps:
; 1. Narrow to iadd_cout.i32, then
; 2. Expand iadd_cout.i32 since RISC-V has no carry flag.
ebb0(v1: i64, v2: i64):
v3 = iadd v1, v2
return v3
}
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
; check: [R#0c
; sameln: $(v3l=$V) = iadd $v1l, $v2l
; check: $(c=$V) = icmp ult $v3l, $v1l
; check: [R#0c
; sameln: $(v3h1=$V) = iadd $v1h, $v2h
; check: $(c_int=$V) = bint.i32 $c
; check: [R#0c
; sameln: $(v3h=$V) = iadd $v3h1, $c_int
; check: $v3 = iconcat $v3l, $v3h
; check: return $v3l, $v3h, $link

View File

@@ -1,36 +0,0 @@
; Test the parser's support for encoding annotations.
test legalizer
isa riscv
function %parse_encoding(i32 [%x5]) -> i32 [%x10] {
; check: function %parse_encoding(i32 [%x5], i32 link [%x1]) -> i32 [%x10], i32 link [%x1] native {
sig0 = (i32 [%x10]) -> i32 [%x10] native
; check: sig0 = (i32 [%x10]) -> i32 [%x10] native
sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
; check: sig1 = (i32 [%x10], i32 [%x11]) -> b1 [%x10] native
sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
; check: sig2 = (f32 [%f10], i32 [%x12], i32 [%x13]) -> f64 [%f10] native
; Arguments on stack where not necessary
sig3 = (f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10] native
; check: sig3 = (f64 [%f10], i32 [0], i32 [4]) -> f64 [%f10] native
; Stack argument before register argument
sig4 = (f32 [72], i32 [%x10]) native
; check: sig4 = (f32 [72], i32 [%x10]) native
; Return value on stack
sig5 = () -> f32 [0] native
; check: sig5 = () -> f32 [0] native
; function + signature
fn15 = function %bar(i32 [%x10]) -> b1 [%x10] native
; check: sig6 = (i32 [%x10]) -> b1 [%x10] native
; nextln: fn0 = sig6 %bar
ebb0(v0: i32):
return v0
}

View File

@@ -1,15 +0,0 @@
; Test tracking of register moves.
test binemit
isa riscv
function %regmoves(i32 link [%x1]) -> i32 link [%x1] {
ebb0(v9999: i32):
[-,%x10] v1 = iconst.i32 1
[-,%x7] v2 = iadd_imm v1, 1000 ; bin: 3e850393
regmove v1, %x10 -> %x11 ; bin: 00050593
[-,%x7] v3 = iadd_imm v1, 1000 ; bin: 3e858393
regmove v1, %x11 -> %x10 ; bin: 00058513
[-,%x7] v4 = iadd_imm v1, 1000 ; bin: 3e850393
return v9999
}

View File

@@ -1,55 +0,0 @@
; Test the legalization of EBB arguments that are split.
test legalizer
isa riscv
; regex: V=v\d+
function %simple(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump $ebb1($v1l, $v1h)
ebb1(v3: i64):
; check: $ebb1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
return v4
; check: return $v4l, $v4h, $link
}
function %multi(i64) -> i64 {
ebb1(v1: i64):
; check: $ebb1($(v1l=$V): i32, $(v1h=$V): i32, $(link=$V): i32):
jump ebb2(v1, v1)
; check: jump $ebb2($v1l, $v1l, $v1h, $v1h)
ebb2(v2: i64, v3: i64):
; check: $ebb2($(v2l=$V): i32, $(v3l=$V): i32, $(v2h=$V): i32, $(v3h=$V): i32):
jump ebb3(v2)
; check: jump $ebb3($v2l, $v2h)
ebb3(v4: i64):
; check: $ebb3($(v4l=$V): i32, $(v4h=$V): i32):
v5 = band v4, v3
; check: $(v5l=$V) = band $v4l, $v3l
; check: $(v5h=$V) = band $v4h, $v3h
return v5
; check: return $v5l, $v5h, $link
}
function %loop(i64, i64) -> i64 {
ebb0(v1: i64, v2: i64):
; check: $ebb0($(v1l=$V): i32, $(v1h=$V): i32, $(v2l=$V): i32, $(v2h=$V): i32, $(link=$V): i32):
jump ebb1(v1)
; check: jump $ebb1($v1l, $v1h)
ebb1(v3: i64):
; check: $ebb1($(v3l=$V): i32, $(v3h=$V): i32):
v4 = band v3, v2
; check: $(v4l=$V) = band $v3l, $v2l
; check: $(v4h=$V) = band $v3h, $v2h
jump ebb1(v4)
; check: jump $ebb1($v4l, $v4h)
}

View File

@@ -1,21 +0,0 @@
test verifier
isa riscv
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = function %foo()
ebb0(v9999: i32):
; iconst.i32 needs legalizing, so it should throw a
[R#0,-] v1 = iconst.i32 0xf0f0f0f0f0 ; error: Instruction failed to re-encode
return v9999
}
function %RV32I(i32 link [%x1]) -> i32 link [%x1] {
fn0 = function %foo()
ebb0(v9999: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
[R#0,-] v3 = iadd v1, v2 ; error: Instruction re-encoding
return v9999
}

View File

@@ -1,31 +0,0 @@
test licm
function %simple_loop(i32) -> i32 {
ebb1(v0: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
v3 = iadd v1, v2
brz v0, ebb2(v0)
v4 = isub v0, v1
jump ebb1(v4)
ebb2(v5: i32):
return v5
}
; sameln: function %simple_loop
; nextln: ebb2(v6: i32):
; nextln: v1 = iconst.i32 1
; nextln: v2 = iconst.i32 2
; nextln: v3 = iadd v1, v2
; nextln: jump ebb0(v6)
; nextln:
; nextln: ebb0(v0: i32):
; nextln: brz v0, ebb1(v0)
; nextln: v4 = isub v0, v1
; nextln: jump ebb0(v4)
; nextln:
; nextln: ebb1(v5: i32):
; nextln: return v5
; nextln: }

View File

@@ -1,81 +0,0 @@
test licm
function %complex(i32) -> i32 {
ebb0(v0: i32):
v1 = iconst.i32 1
v19 = iconst.i32 4
v2 = iadd v1, v0
brz v0, ebb1(v1)
jump ebb3(v2)
ebb1(v3: i32):
v4 = iconst.i32 2
v5 = iadd v3, v2
v6 = iadd v4, v0
jump ebb2(v6)
ebb2(v7: i32):
v8 = iadd v7, v3
v9 = iadd v0, v2
brz v0, ebb1(v7)
jump ebb5(v8)
ebb3(v10: i32):
v11 = iconst.i32 3
v12 = iadd v10, v11
v13 = iadd v2, v11
jump ebb4(v11)
ebb4(v14: i32):
v15 = iadd v12, v2
brz v0, ebb3(v14)
jump ebb5(v14)
ebb5(v16: i32):
v17 = iadd v16, v1
v18 = iadd v1, v19
brz v0, ebb0(v18)
return v17
}
; sameln: function %complex
; nextln: ebb6(v20: i32):
; nextln: v1 = iconst.i32 1
; nextln: v2 = iconst.i32 4
; nextln: v5 = iconst.i32 2
; nextln: v12 = iconst.i32 3
; nextln: v19 = iadd v1, v2
; nextln: jump ebb0(v20)
; nextln:
; nextln: ebb0(v0: i32):
; nextln: v3 = iadd.i32 v1, v0
; nextln: v7 = iadd.i32 v5, v0
; nextln: v10 = iadd v0, v3
; nextln: brz v0, ebb1(v1)
; nextln: v14 = iadd v3, v12
; nextln: jump ebb3(v3)
; nextln:
; nextln: ebb1(v4: i32):
; nextln: v6 = iadd v4, v3
; nextln: jump ebb2(v7)
; nextln:
; nextln: ebb2(v8: i32):
; nextln: v9 = iadd v8, v4
; nextln: brz.i32 v0, ebb1(v8)
; nextln: jump ebb5(v9)
; nextln:
; nextln: ebb3(v11: i32):
; nextln: v13 = iadd v11, v12
; nextln: jump ebb4(v12)
; nextln:
; nextln: ebb4(v15: i32):
; nextln: v16 = iadd.i32 v13, v3
; nextln: brz.i32 v0, ebb3(v15)
; nextln: jump ebb5(v15)
; nextln:
; nextln: ebb5(v17: i32):
; nextln: v18 = iadd v17, v1
; nextln: brz.i32 v0, ebb0(v19)
; nextln: return v18
; nextln: }

View File

@@ -1,46 +0,0 @@
test licm
function %multiple_blocks(i32) -> i32 {
ebb0(v0: i32):
jump ebb1(v0)
ebb1(v10: i32):
v11 = iconst.i32 1
v12 = iconst.i32 2
v13 = iadd v11, v12
brz v10, ebb2(v10)
v15 = isub v10, v11
brz v15, ebb3(v15)
v14 = isub v10, v11
jump ebb1(v14)
ebb2(v20: i32):
return v20
ebb3(v30: i32):
v31 = iadd v11, v13
jump ebb1(v30)
}
; sameln:function %multiple_blocks(i32) -> i32 {
; nextln: ebb0(v0: i32):
; nextln: v2 = iconst.i32 1
; nextln: v3 = iconst.i32 2
; nextln: v4 = iadd v2, v3
; nextln: v9 = iadd v2, v4
; nextln: jump ebb1(v0)
; nextln:
; nextln: ebb1(v1: i32):
; nextln: brz v1, ebb2(v1)
; nextln: v5 = isub v1, v2
; nextln: brz v5, ebb3(v5)
; nextln: v6 = isub v1, v2
; nextln: jump ebb1(v6)
; nextln:
; nextln: ebb2(v7: i32):
; nextln: return v7
; nextln:
; nextln: ebb3(v8: i32):
; nextln: jump ebb1(v8)
; nextln: }

View File

@@ -1,52 +0,0 @@
test licm
function %nested_loops(i32) -> i32 {
ebb0(v0: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
v3 = iadd v1, v2
v4 = isub v0, v1
jump ebb1(v4,v4)
ebb1(v10: i32,v11: i32):
brz v11, ebb2(v10)
v12 = iconst.i32 1
v15 = iadd v12, v4
v13 = isub v11, v12
jump ebb1(v10,v13)
ebb2(v20: i32):
brz v20, ebb3(v20)
jump ebb0(v20)
ebb3(v30: i32):
return v30
}
; sameln:function %nested_loops(i32) -> i32 {
; nextln: ebb4(v12: i32):
; nextln: v1 = iconst.i32 1
; nextln: v2 = iconst.i32 2
; nextln: v3 = iadd v1, v2
; nextln: v7 = iconst.i32 1
; nextln: jump ebb0(v12)
; nextln:
; nextln: ebb0(v0: i32):
; nextln: v4 = isub v0, v1
; nextln: v8 = iadd.i32 v7, v4
; nextln: jump ebb1(v4, v4)
; nextln:
; nextln: ebb1(v5: i32, v6: i32):
; nextln: brz v6, ebb2(v5)
; nextln: v9 = isub v6, v7
; nextln: jump ebb1(v5, v9)
; nextln:
; nextln: ebb2(v10: i32):
; nextln: brz v10, ebb3(v10)
; nextln: jump ebb0(v10)
; nextln:
; nextln: ebb3(v11: i32):
; nextln: return v11
; nextln: }

11
filetests/memory.wast Normal file
View File

@@ -0,0 +1,11 @@
(module
(memory 1)
(func $main (local i32)
(i32.store (i32.const 0) (i32.const 0x0))
(if (i32.load (i32.const 0))
(then (i32.store (i32.const 0) (i32.const 0xa)))
(else (i32.store (i32.const 0) (i32.const 0xb))))
)
(start $main)
(data (i32.const 0) "0000")
)

View File

@@ -1,113 +0,0 @@
; Parsing branches and jumps.
test cat
; Jumps with no arguments. The '()' empty argument list is optional.
function %minimal() {
ebb0:
jump ebb1
ebb1:
jump ebb0()
}
; sameln: function %minimal() native {
; nextln: ebb0:
; nextln: jump ebb1
; nextln:
; nextln: ebb1:
; nextln: jump ebb0
; nextln: }
; Jumps with 1 arg.
function %onearg(i32) {
ebb0(v90: i32):
jump ebb1(v90)
ebb1(v91: i32):
jump ebb0(v91)
}
; sameln: function %onearg(i32) native {
; nextln: ebb0($v90: i32):
; nextln: jump ebb1($v90)
; nextln:
; nextln: ebb1($v91: i32):
; nextln: jump ebb0($v91)
; nextln: }
; Jumps with 2 args.
function %twoargs(i32, f32) {
ebb0(v90: i32, v91: f32):
jump ebb1(v90, v91)
ebb1(v92: i32, v93: f32):
jump ebb0(v92, v93)
}
; sameln: function %twoargs(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: jump ebb1($v90, $v91)
; nextln:
; nextln: ebb1($v92: i32, $v93: f32):
; nextln: jump ebb0($v92, $v93)
; nextln: }
; Branches with no arguments. The '()' empty argument list is optional.
function %minimal(i32) {
ebb0(v90: i32):
brz v90, ebb1
ebb1:
brnz v90, ebb1()
}
; sameln: function %minimal(i32) native {
; nextln: ebb0($v90: i32):
; nextln: brz $v90, ebb1
; nextln:
; nextln: ebb1:
; nextln: brnz.i32 $v90, ebb1
; nextln: }
function %twoargs(i32, f32) {
ebb0(v90: i32, v91: f32):
brz v90, ebb1(v90, v91)
ebb1(v92: i32, v93: f32):
brnz v90, ebb0(v92, v93)
}
; sameln: function %twoargs(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: brz $v90, ebb1($v90, $v91)
; nextln:
; nextln: ebb1($v92: i32, $v93: f32):
; nextln: brnz.i32 $v90, ebb0($v92, $v93)
; nextln: }
function %jumptable(i32) {
jt200 = jump_table 0, 0
jt2 = jump_table 0, 0, ebb10, ebb40, ebb20, ebb30
ebb10(v3: i32):
br_table v3, jt2
trap
ebb20:
trap
ebb30:
trap
ebb40:
trap
}
; sameln: function %jumptable(i32) native {
; nextln: jt0 = jump_table 0
; nextln: jt1 = jump_table 0, 0, ebb0, ebb3, ebb1, ebb2
; nextln:
; nextln: ebb0($v3: i32):
; nextln: br_table $v3, jt1
; nextln: trap
; nextln:
; nextln: ebb1:
; nextln: trap
; nextln:
; nextln: ebb2:
; nextln: trap
; nextln:
; nextln: ebb3:
; nextln: trap
; nextln: }

View File

@@ -1,80 +0,0 @@
; Parser tests for call and return syntax.
test cat
function %mini() {
ebb1:
return
}
; sameln: function %mini() native {
; nextln: ebb0:
; nextln: return
; nextln: }
function %r1() -> i32, f32 spiderwasm {
ebb1:
v1 = iconst.i32 3
v2 = f32const 0.0
return v1, v2
}
; sameln: function %r1() -> i32, f32 spiderwasm {
; nextln: ebb0:
; nextln: $v1 = iconst.i32 3
; nextln: $v2 = f32const 0.0
; nextln: return $v1, $v2
; nextln: }
function %signatures() {
sig10 = ()
sig11 = (i32, f64) -> i32, b1 spiderwasm
fn5 = sig11 %foo
fn8 = function %bar(i32) -> b1
}
; sameln: function %signatures() native {
; nextln: $sig10 = () native
; nextln: $sig11 = (i32, f64) -> i32, b1 spiderwasm
; nextln: sig2 = (i32) -> b1 native
; nextln: $fn5 = $sig11 %foo
; nextln: $fn8 = sig2 %bar
; nextln: }
function %direct() {
fn0 = function %none()
fn1 = function %one() -> i32
fn2 = function %two() -> i32, f32
ebb0:
call fn0()
v1 = call fn1()
v2, v3 = call fn2()
return
}
; check: call $fn0()
; check: $v1 = call $fn1()
; check: $v2, $v3 = call $fn2()
; check: return
function %indirect(i64) {
sig0 = (i64)
sig1 = () -> i32
sig2 = () -> i32, f32
ebb0(v0: i64):
v1 = call_indirect sig1, v0()
call_indirect sig0, v1(v0)
v3, v4 = call_indirect sig2, v1()
return
}
; check: $v1 = call_indirect $sig1, $v0()
; check: call_indirect $sig0, $v1($v0)
; check: $v3, $v4 = call_indirect $sig2, $v1()
; check: return
; Special purpose function arguments
function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret {
ebb0(v1: i32, v2: i32, v3: i32, v4: i32):
return v4, v2, v3, v1
}
; check: function %special1(i32 sret, i32 fp, i32 csr, i32 link) -> i32 link, i32 fp, i32 csr, i32 sret native {
; check: ebb0($v1: i32, $v2: i32, $v3: i32, $v4: i32):
; check: return $v4, $v2, $v3, $v1
; check: }

View File

@@ -1,24 +0,0 @@
test cat
isa riscv
; regex: WS=[ \t]*
function %foo(i32, i32) {
ebb1(v0: i32, v1: i32):
[-,-] v2 = iadd v0, v1
[-] trap
[R#1234, %x5, %x11] v6, v7 = iadd_cout v2, v0
[Rshamt#beef, %x25] v8 = ishl_imm v6, 2
v9 = iadd v8, v7
[Iret#5] return v0, v8
}
; sameln: function %foo(i32, i32) native {
; nextln: $ebb1($v0: i32, $v1: i32):
; nextln: [-,-]$WS $v2 = iadd $v0, $v1
; nextln: [-]$WS trap
; nextln: [R#1234,%x5,%x11]$WS $v6, $v7 = iadd_cout $v2, $v0
; nextln: [Rshamt#beef,%x25]$WS $v8 = ishl_imm $v6, 2
; nextln: [-,-]$WS $v9 = iadd $v8, $v7
; nextln: [Iret#05]$WS return $v0, $v8
; nextln: }

View File

@@ -1,5 +0,0 @@
test cat
; 'function' is not a keyword, and can be used as the name of a function too.
function %function() {}
; check: function %function() native

View File

@@ -1,37 +0,0 @@
; The .cton parser can't preserve the actual entity numbers in the input file
; since entities are numbered as they are created. For entities declared in the
; preamble, this is no problem, but for EBB and value references, mapping
; source numbers to real numbers can be a problem.
;
; It is possible to refer to instructions and EBBs that have not yet been
; defined in the lexical order, so the parser needs to rewrite these references
; after the fact.
test cat
; Check that defining numbers are rewritten.
function %defs() {
ebb100(v20: i32):
v1000 = iconst.i32x8 5
v9200 = f64const 0x4.0p0
trap
}
; sameln: function %defs() native {
; nextln: $ebb100($v20: i32):
; nextln: $v1000 = iconst.i32x8 5
; nextln: $v9200 = f64const 0x1.0000000000000p2
; nextln: trap
; nextln: }
; Using values.
function %use_value() {
ebb100(v20: i32):
v1000 = iadd_imm v20, 5
v200 = iadd v20, v1000
jump ebb100(v1000)
}
; sameln: function %use_value() native {
; nextln: ebb0($v20: i32):
; nextln: $v1000 = iadd_imm $v20, 5
; nextln: $v200 = iadd $v20, $v1000
; nextln: jump ebb0($v1000)
; nextln: }

View File

@@ -1,24 +0,0 @@
test cat
test verifier
function %add_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 {
ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
v10, v11 = iadd_cout v1, v4
;check: $v10, $v11 = iadd_cout $v1, $v4
v20, v21 = iadd_carry v2, v5, v11
; check: $v20, $v21 = iadd_carry $v2, $v5, $v11
v30 = iadd_cin v3, v6, v21
; check: $v30 = iadd_cin $v3, $v6, $v21
return v10, v20, v30
}
function %sub_i96(i32, i32, i32, i32, i32, i32) -> i32, i32, i32 {
ebb1(v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32):
v10, v11 = isub_bout v1, v4
;check: $v10, $v11 = isub_bout $v1, $v4
v20, v21 = isub_borrow v2, v5, v11
; check: $v20, $v21 = isub_borrow $v2, $v5, $v11
v30 = isub_bin v3, v6, v21
; check: $v30 = isub_bin $v3, $v6, $v21
return v10, v20, v30
}

View File

@@ -1,193 +0,0 @@
test cat
; The smallest possible function.
function %minimal() {
ebb0:
trap
}
; sameln: function %minimal() native {
; nextln: ebb0:
; nextln: trap
; nextln: }
; Create and use values.
; Polymorphic instructions with type suffix.
function %ivalues() {
ebb0:
v0 = iconst.i32 2
v1 = iconst.i8 6
v2 = ishl v0, v1
}
; sameln: function %ivalues() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32 2
; nextln: $v1 = iconst.i8 6
; nextln: $v2 = ishl $v0, $v1
; nextln: }
; Create and use values.
; Polymorphic instructions with type suffix.
function %bvalues() {
ebb0:
v0 = bconst.b32 true
v1 = bconst.b8 false
v2 = bextend.b32 v1
v3 = bxor v0, v2
}
; sameln: function %bvalues() native {
; nextln: ebb0:
; nextln: $v0 = bconst.b32 true
; nextln: $v1 = bconst.b8 false
; nextln: $v2 = bextend.b32 v1
; nextln: $v3 = bxor v0, v2
; nextln: }
; Polymorphic istruction controlled by second operand.
function %select() {
ebb0(v90: i32, v91: i32, v92: b1):
v0 = select v92, v90, v91
}
; sameln: function %select() native {
; nextln: ebb0($v90: i32, $v91: i32, $v92: b1):
; nextln: $v0 = select $v92, $v90, $v91
; nextln: }
; Lane indexes.
function %lanes() {
ebb0:
v0 = iconst.i32x4 2
v1 = extractlane v0, 3
v2 = insertlane v0, 1, v1
}
; sameln: function %lanes() native {
; nextln: ebb0:
; nextln: $v0 = iconst.i32x4 2
; nextln: $v1 = extractlane $v0, 3
; nextln: $v2 = insertlane $v0, 1, $v1
; nextln: }
; Integer condition codes.
function %icmp(i32, i32) {
ebb0(v90: i32, v91: i32):
v0 = icmp eq v90, v91
v1 = icmp ult v90, v91
v2 = icmp_imm sge v90, -12
v3 = irsub_imm v91, 45
br_icmp eq v90, v91, ebb0(v91, v90)
}
; sameln: function %icmp(i32, i32) native {
; nextln: ebb0($v90: i32, $v91: i32):
; nextln: $v0 = icmp eq $v90, $v91
; nextln: $v1 = icmp ult $v90, $v91
; nextln: $v2 = icmp_imm sge $v90, -12
; nextln: $v3 = irsub_imm $v91, 45
; nextln: br_icmp eq $v90, $v91, ebb0($v91, $v90)
; nextln: }
; Floating condition codes.
function %fcmp(f32, f32) {
ebb0(v90: f32, v91: f32):
v0 = fcmp eq v90, v91
v1 = fcmp uno v90, v91
v2 = fcmp lt v90, v91
}
; sameln: function %fcmp(f32, f32) native {
; nextln: ebb0($v90: f32, $v91: f32):
; nextln: $v0 = fcmp eq $v90, $v91
; nextln: $v1 = fcmp uno $v90, $v91
; nextln: $v2 = fcmp lt $v90, $v91
; nextln: }
; The bitcast instruction has two type variables: The controlling type variable
; controls the outout type, and the input type is a free variable.
function %bitcast(i32, f32) {
ebb0(v90: i32, v91: f32):
v0 = bitcast.i8x4 v90
v1 = bitcast.i32 v91
}
; sameln: function %bitcast(i32, f32) native {
; nextln: ebb0($v90: i32, $v91: f32):
; nextln: $v0 = bitcast.i8x4 $v90
; nextln: $v1 = bitcast.i32 $v91
; nextln: }
; Stack slot references
function %stack() {
ss10 = spill_slot 8
ss2 = local 4
ss3 = incoming_arg 4, offset 8
ss4 = outgoing_arg 4
ebb0:
v1 = stack_load.i32 ss10
v2 = stack_load.i32 ss10+4
stack_store v1, ss10+2
stack_store v2, ss2
}
; sameln: function %stack() native {
; nextln: $ss10 = spill_slot 8
; nextln: $ss2 = local 4
; nextln: $ss3 = incoming_arg 4, offset 8
; nextln: $ss4 = outgoing_arg 4
; check: ebb0:
; nextln: $v1 = stack_load.i32 $ss10
; nextln: $v2 = stack_load.i32 $ss10+4
; nextln: stack_store $v1, $ss10+2
; nextln: stack_store $v2, $ss2
; Heap access instructions.
function %heap(i32) {
; TODO: heap0 = heap %foo
ebb0(v1: i32):
v2 = heap_load.f32 v1
v3 = heap_load.f32 v1+12
heap_store v3, v1
}
; sameln: function %heap(i32) native {
; nextln: ebb0($v1: i32):
; nextln: $v2 = heap_load.f32 $v1
; nextln: $v3 = heap_load.f32 $v1+12
; nextln: heap_store $v3, $v1
; Memory access instructions.
function %memory(i32) {
ebb0(v1: i32):
v2 = load.i64 v1
v3 = load.i64 aligned v1
v4 = load.i64 notrap v1
v5 = load.i64 notrap aligned v1
v6 = load.i64 aligned notrap v1
v7 = load.i64 v1-12
v8 = load.i64 notrap v1+0x1_0000
store v2, v1
store aligned v3, v1+12
store notrap aligned v3, v1-12
}
; sameln: function %memory(i32) native {
; nextln: ebb0($v1: i32):
; nextln: $v2 = load.i64 $v1
; nextln: $v3 = load.i64 aligned $v1
; nextln: $v4 = load.i64 notrap $v1
; nextln: $v5 = load.i64 notrap aligned $v1
; nextln: $v6 = load.i64 notrap aligned $v1
; nextln: $v7 = load.i64 $v1-12
; nextln: $v8 = load.i64 notrap $v1+0x0001_0000
; nextln: store $v2, $v1
; nextln: store aligned $v3, $v1+12
; nextln: store notrap aligned $v3, $v1-12
; Register diversions.
; This test file has no ISA, so we can unly use register unit numbers.
function %diversion(i32) {
ebb0(v1: i32):
regmove v1, %10 -> %20
regmove v1, %20 -> %10
return
}
; sameln: function %diversion(i32) native {
; nextln: ebb0($v1: i32):
; nextln: regmove $v1, %10 -> %20
; nextln: regmove $v1, %20 -> %10
; nextln: return
; nextln: }

View File

@@ -1,80 +0,0 @@
test regalloc
; We can add more ISAs once they have defined encodings.
isa riscv
; regex: RX=%x\d+
function %add(i32, i32) {
ebb0(v1: i32, v2: i32):
v3 = iadd v1, v2
; check: [R#0c,%x5]
; sameln: iadd
return
}
; Function with a dead argument.
function %dead_arg(i32, i32) -> i32{
ebb0(v1: i32, v2: i32):
; not: regmove
; check: return $v1
return v1
}
; Return a value from a different register.
function %move1(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> %x10
; nextln: return $v2
return v2
}
; Swap two registers.
function %swap(i32, i32) -> i32, i32 {
ebb0(v1: i32, v2: i32):
; not: regmove
; check: regmove $v2, %x11 -> $(tmp=$RX)
; nextln: regmove $v1, %x10 -> %x11
; nextln: regmove $v2, $tmp -> %x10
; nextln: return $v2, $v1
return v2, v1
}
; Return an EBB argument.
function %retebb(i32, i32) -> i32 {
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1)
jump ebb1(v2)
ebb1(v10: i32):
return v10
}
; Pass an EBB argument as a function argument.
function %callebb(i32, i32) -> i32 {
fn0 = function %foo(i32) -> i32
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1)
jump ebb1(v2)
ebb1(v10: i32):
v11 = call fn0(v10)
return v11
}
; Pass an EBB argument as a jump argument.
function %jumpebb(i32, i32) -> i32 {
fn0 = function %foo(i32) -> i32
ebb0(v1: i32, v2: i32):
brnz v1, ebb1(v1, v2)
jump ebb1(v2, v1)
ebb1(v10: i32, v11: i32):
jump ebb2(v10, v11)
ebb2(v20: i32, v21: i32):
return v21
}

View File

@@ -1,111 +0,0 @@
test regalloc
isa riscv
; Test the coalescer.
; regex: V=v\d+
; regex: WS=\s+
; This function is already CSSA, so no copies should be inserted.
function %cssa(i32) -> i32 {
ebb0(v0: i32):
; not: copy
; v0 is used by the branch and passed as an arg - that's no conflict.
brnz v0, ebb1(v0)
; v0 is live across the branch above. That's no conflict.
v1 = iadd_imm v0, 7
jump ebb1(v1)
ebb1(v10: i32):
v11 = iadd_imm v10, 7
return v11
}
function %trivial(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($cp1)
brnz v0, ebb1(v0)
; not: copy
v1 = iadd_imm v0, 7
jump ebb1(v1)
ebb1(v10: i32):
; Use v0 in the destination EBB causes a conflict.
v11 = iadd v10, v0
return v11
}
; A value is used as an SSA argument twice in the same branch.
function %dualuse(i32) -> i32 {
ebb0(v0: i32):
; check: $(cp1=$V) = copy $v0
; nextln: brnz $v0, $ebb1($v0, $cp1)
brnz v0, ebb1(v0, v0)
; not: copy
v1 = iadd_imm v0, 7
v2 = iadd_imm v1, 56
jump ebb1(v1, v2)
ebb1(v10: i32, v11: i32):
v12 = iadd v10, v11
return v12
}
; Interference away from the branch
; The interference can be broken with a copy at either branch.
function %interference(i32) -> i32 {
ebb0(v0: i32):
; not: copy
brnz v0, ebb1(v0)
v1 = iadd_imm v0, 7
; v1 and v0 interfere here:
v2 = iadd_imm v0, 8
; check: $(cp1=$V) = copy $v1
; not: copy
; check: jump $ebb1($cp1)
jump ebb1(v1)
ebb1(v10: i32):
; not: copy
v11 = iadd_imm v10, 7
return v11
}
; A loop where one induction variable is used as a backedge argument.
function %fibonacci(i32) -> i32 {
ebb0(v0: i32):
; not: copy
v1 = iconst.i32 1
v2 = iconst.i32 2
jump ebb1(v1, v2)
ebb1(v10: i32, v11: i32):
; v11 needs to be isolated because it interferes with v10.
; check: $ebb1($v10: i32, $(nv11a=$V): i32)
; check: $v11 = copy $nv11a
v12 = iadd v10, v11
v13 = icmp ult v12, v0
; check: $(nv11b=$V) = copy $v11
; not: copy
; check: brnz $v13, $ebb1($nv11b, $v12)
brnz v13, ebb1(v11, v12)
return v12
}
; Function arguments passed on the stack aren't allowed to be part of a virtual
; register, at least for now. This is because the other values in the virtual
; register would need to be spilled to the incoming_arg stack slot which we treat
; as belonging to the caller.
function %stackarg(i32, i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss0 = incoming_arg 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32, v8: i32):
; check: fill v8
; not: v8
brnz v0, ebb1(v8)
jump ebb1(v7)
ebb1(v10: i32):
v11 = iadd_imm v10, 1
return v11
}

View File

@@ -1,82 +0,0 @@
test regalloc
isa intel
; regex: V=v\d+
; regex: REG=%r([abcd]x|[sd]i)
; Tied operands, both are killed at instruction.
function %tied_easy() -> i32 {
ebb0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; not: copy
; check: isub
v2 = isub v0, v1
return v2
}
; Tied operand is live after instruction.
function %tied_alive() -> i32 {
ebb0:
v0 = iconst.i32 12
v1 = iconst.i32 13
; check: $(v0c=$V) = copy $v0
; check: $v2 = isub $v0c, $v1
v2 = isub v0, v1
; check: $v3 = iadd $v2, $v0
v3 = iadd v2, v0
return v3
}
; Fixed register constraint.
function %fixed_op() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
v2 = ishl v1, v0
return v2
}
; Fixed register constraint twice.
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
v2 = ishl v1, v0
; check: regmove $v0, %rcx -> $REG
; check: regmove $v2, $REG -> %rcx
v3 = ishl v0, v2
return v3
}
; Tied use of a diverted register.
function %fixed_op_twice() -> i32 {
ebb0:
; check: ,%rax]
; sameln: $v0 = iconst.i32 12
v0 = iconst.i32 12
v1 = iconst.i32 13
; The dynamic shift amount must be in %rcx
; check: regmove $v0, %rax -> %rcx
; check: $v2 = ishl $v1, $v0
v2 = ishl v1, v0
; Now v0 is globally allocated to %rax, but diverted to %rcx.
; Check that the tied def gets the diverted register.
v3 = isub v0, v2
; not: regmove
; check: ,%rcx]
; sameln: isub
; Move it into place for the return value.
; check: regmove $v3, %rcx -> %rax
return v3
}

View File

@@ -1,196 +0,0 @@
test regalloc
; Test the spiler on an ISA with few registers.
; RV32E has 16 registers, where:
; - %x0 is hardwired to zero.
; - %x1 is the return address.
; - %x2 is the stack pointer.
; - %x3 is the global pointer.
; - %x4 is the thread pointer.
; - %x10-%x15 are function arguments.
;
; regex: V=v\d+
; regex: WS=\s+
isa riscv enable_e
; In straight-line code, the first value defined is spilled.
; That is in order:
; 1. The argument v1.
; 2. The link register.
; 3. The first computed value, v2
function %pyramid(i32) -> i32 {
; check: ss0 = spill_slot 4
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
; not: spill_slot
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32, $(rlink=$V): i32)
; check: ,ss0]$WS $v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
; not: spill
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
v5 = iadd_imm v4, 12
v6 = iadd_imm v5, 12
v7 = iadd_imm v6, 12
v8 = iadd_imm v7, 12
v9 = iadd_imm v8, 12
v10 = iadd_imm v9, 12
v11 = iadd_imm v10, 12
v12 = iadd_imm v11, 12
v13 = iadd_imm v12, 12
v14 = iadd_imm v13, 12
v33 = iadd v13, v14
; check: iadd $v13
v32 = iadd v33, v12
v31 = iadd v32, v11
v30 = iadd v31, v10
v29 = iadd v30, v9
v28 = iadd v29, v8
v27 = iadd v28, v7
v26 = iadd v27, v6
v25 = iadd v26, v5
v24 = iadd v25, v4
v23 = iadd v24, v3
v22 = iadd v23, v2
; check: $(r2v2=$V) = fill $v2
; check: $v22 = iadd $v23, $r2v2
v21 = iadd v22, v1
; check: $(r2v1=$V) = fill $v1
; check: $v21 = iadd $v22, $r2v1
; check: $(rlink2=$V) = fill $link
return v21
; check: return $v21, $rlink2
}
; All values live across a call must be spilled
function %across_call(i32) {
fn0 = function %foo(i32)
ebb0(v1: i32):
; check: $v1 = spill
call fn0(v1)
; check: call $fn0
call fn0(v1)
; check: fill $v1
; check: call $fn0
return
}
; The same value used for two function arguments.
function %doubleuse(i32) {
fn0 = function %xx(i32, i32)
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
call fn0(v0, v0)
; check: call $fn0($v0, $c)
return
}
; The same value used as indirect callee and argument.
function %doubleuse_icall1(i32) {
sig0 = (i32) native
ebb0(v0: i32):
; not:copy
call_indirect sig0, v0(v0)
return
}
; The same value used as indirect callee and two arguments.
function %doubleuse_icall2(i32) {
sig0 = (i32, i32) native
ebb0(v0: i32):
; check: $(c=$V) = copy $v0
call_indirect sig0, v0(v0, v0)
; check: call_indirect $sig0, $v0($v0, $c)
return
}
; Two arguments on the stack.
function %stackargs(i32, i32, i32, i32, i32, i32, i32, i32) -> i32 {
; check: ss0 = incoming_arg 4
; check: ss1 = incoming_arg 4, offset 4
; not: incoming_arg
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32, v5: i32, v6: i32, v7: i32):
; unordered: fill $v6
; unordered: fill $v7
v10 = iadd v6, v7
return v10
}
; More EBB arguments than registers.
function %ebbargs(i32) -> i32 {
ebb0(v1: i32):
; check: $v1 = spill
v2 = iconst.i32 1
jump ebb1(v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2, v2)
ebb1(v10: i32, v11: i32, v12: i32, v13: i32, v14: i32, v15: i32, v16: i32, v17: i32, v18: i32, v19: i32, v20: i32, v21: i32):
v22 = iadd v10, v11
v23 = iadd v22, v12
v24 = iadd v23, v13
v25 = iadd v24, v14
v26 = iadd v25, v15
v27 = iadd v26, v16
v28 = iadd v27, v17
v29 = iadd v28, v18
v30 = iadd v29, v19
v31 = iadd v30, v20
v32 = iadd v31, v21
v33 = iadd v32, v1
return v33
}
; In straight-line code, the first value defined is spilled.
; That is in order:
; 1. The argument v1.
; 2. The link register.
; 3. The first computed value, v2
function %use_spilled_value(i32) -> i32 {
; check: ss0 = spill_slot 4
; check: ss1 = spill_slot 4
; check: ss2 = spill_slot 4
ebb0(v1: i32):
; check: $ebb0($(rv1=$V): i32, $(rlink=$V): i32)
; check: ,ss0]$WS $v1 = spill $rv1
; nextln: ,ss1]$WS $(link=$V) = spill $rlink
; not: spill
v2 = iadd_imm v1, 12
; check: $(r1v2=$V) = iadd_imm
; nextln: ,ss2]$WS $v2 = spill $r1v2
v3 = iadd_imm v2, 12
v4 = iadd_imm v3, 12
v5 = iadd_imm v4, 12
v6 = iadd_imm v5, 12
v7 = iadd_imm v6, 12
v8 = iadd_imm v7, 12
v9 = iadd_imm v8, 12
v10 = iadd_imm v9, 12
v11 = iadd_imm v10, 12
v12 = iadd_imm v11, 12
v13 = iadd_imm v12, 12
v14 = iadd_imm v13, 12
; Here we have maximum register pressure, and v2 has been spilled.
; What happens if we use it?
v33 = iadd v2, v14
v32 = iadd v33, v12
v31 = iadd v32, v11
v30 = iadd v31, v10
v29 = iadd v30, v9
v28 = iadd v29, v8
v27 = iadd v28, v7
v26 = iadd v27, v6
v25 = iadd v26, v5
v24 = iadd v25, v4
v23 = iadd v24, v3
v22 = iadd v23, v2
v21 = iadd v22, v1
v20 = iadd v21, v13
v19 = iadd v20, v2
return v21
}

View File

@@ -1,41 +0,0 @@
test simple-gvn
function %simple_redundancy(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = iadd v0, v1
v3 = iadd v0, v1
v4 = imul v2, v3
; check: v4 = imul $v2, $v2
return v4
}
function %cascading_redundancy(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = iadd v0, v1
v3 = iadd v0, v1
v4 = imul v2, v3
v5 = imul v2, v2
v6 = iadd v4, v5
; check: v6 = iadd $v4, $v4
return v6
}
function %redundancies_on_some_paths(i32, i32, i32) -> i32 {
ebb0(v0: i32, v1: i32, v2: i32):
v3 = iadd v0, v1
brz v3, ebb1
v4 = iadd v0, v1
jump ebb2(v4)
; check: jump ebb2(v3)
ebb1:
v5 = iadd v0, v1
jump ebb2(v5)
; check: jump ebb2(v3)
ebb2(v6: i32):
v7 = iadd v0, v1
v8 = iadd v6, v7
; check: v8 = iadd v6, v3
return v8
}

View File

@@ -1,19 +0,0 @@
test verifier
function %test(i32) {
ebb0(v0: i32):
jump ebb1 ; error: terminator
return
ebb1:
jump ebb2
brz v0, ebb3
ebb2:
jump ebb3
ebb3:
return
}
function %test(i32) { ; Ok
ebb0(v0: i32):
return
}

View File

@@ -1,23 +0,0 @@
test verifier
function %test() -> i32 { ; Ok
ebb0:
v0 = iconst.i32 0
v1 = iconst.i32 0
jump ebb2
ebb2:
jump ebb4
ebb4:
jump ebb2
ebb3(v2: i32):
v4 = iadd.i32 v1, v2
jump ebb9(v4)
ebb9(v7: i32):
v9 = iadd.i32 v2, v7
return v9
}

View File

@@ -1,50 +0,0 @@
; Test basic code generation for control flow WebAssembly instructions.
test compile
set is_64bit=0
isa intel haswell
set is_64bit=1
isa intel haswell
function %br_if(i32) -> i32 {
ebb0(v0: i32):
v1 = iconst.i32 1
brz v0, ebb1(v1)
jump ebb2
ebb1(v2: i32):
return v2
ebb2:
jump ebb1(v0)
}
function %br_if_not(i32) -> i32 {
ebb0(v0: i32):
v1 = iconst.i32 1
brnz v0, ebb1(v0)
jump ebb2
ebb1(v2: i32):
return v2
ebb2:
jump ebb1(v0)
}
function %br_if_fallthrough(i32) -> i32 {
ebb0(v0: i32):
v1 = iconst.i32 1
brz v0, ebb1(v1)
; This jump gets converted to a fallthrough.
jump ebb1(v0)
ebb1(v2: i32):
return v2
}
function %undefined() {
ebb0:
trap
}

View File

@@ -1,94 +0,0 @@
; Test code generation for WebAssembly type conversion operators.
test compile
set is_64bit=1
isa intel haswell
function %i32_wrap_i64(i64) -> i32 {
ebb0(v0: i64):
v1 = ireduce.i32 v0
return v1
}
function %i64_extend_s_i32(i32) -> i64 {
ebb0(v0: i32):
v1 = sextend.i64 v0
return v1
}
function %i64_extend_u_i32(i32) -> i64 {
ebb0(v0: i32):
v1 = uextend.i64 v0
return v1
}
; function %i32_trunc_s_f32(f32) -> i32
; function %i32_trunc_u_f32(f32) -> i32
; function %i32_trunc_s_f64(f64) -> i32
; function %i32_trunc_u_f64(f64) -> i32
; function %i64_trunc_s_f32(f32) -> i64
; function %i64_trunc_u_f32(f32) -> i64
; function %i64_trunc_s_f64(f64) -> i64
; function %i64_trunc_u_f64(f64) -> i64
function %f32_trunc_f64(f64) -> f32 {
ebb0(v0: f64):
v1 = fdemote.f32 v0
return v1
}
function %f64_promote_f32(f32) -> f64 {
ebb0(v0: f32):
v1 = fpromote.f64 v0
return v1
}
function %f32_convert_s_i32(i32) -> f32 {
ebb0(v0: i32):
v1 = fcvt_from_sint.f32 v0
return v1
}
function %f64_convert_s_i32(i32) -> f64 {
ebb0(v0: i32):
v1 = fcvt_from_sint.f64 v0
return v1
}
function %f32_convert_s_i64(i64) -> f32 {
ebb0(v0: i64):
v1 = fcvt_from_sint.f32 v0
return v1
}
function %f64_convert_s_i64(i64) -> f64 {
ebb0(v0: i64):
v1 = fcvt_from_sint.f64 v0
return v1
}
; TODO: f*_convert_u_i* (Don't exist on Intel).
function %i32_reinterpret_f32(f32) -> i32 {
ebb0(v0: f32):
v1 = bitcast.i32 v0
return v1
}
function %f32_reinterpret_i32(i32) -> f32 {
ebb0(v0: i32):
v1 = bitcast.f32 v0
return v1
}
function %i64_reinterpret_f64(f64) -> i64 {
ebb0(v0: f64):
v1 = bitcast.i64 v0
return v1
}
function %f64_reinterpret_i64(i64) -> f64 {
ebb0(v0: i64):
v1 = bitcast.f64 v0
return v1
}

View File

@@ -1,52 +0,0 @@
; Test basic code generation for f32 arithmetic WebAssembly instructions.
test compile
set is_64bit=0
isa intel haswell
set is_64bit=1
isa intel haswell
; Constants.
; function %f32_const() -> f32
; Unary operations
; function %f32_abs(f32) -> f32
; function %f32_neg(f32) -> f32
; function %f32_sqrt(f32) -> f32
; function %f32_ceil(f32) -> f32
; function %f32_floor(f32) -> f32
; function %f32_trunc(f32) -> f32
; function %f32_nearest (f32) -> f32
; Binary Operations
function %f32_add(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fadd v0, v1
return v2
}
function %f32_sub(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fsub v0, v1
return v2
}
function %f32_mul(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fmul v0, v1
return v2
}
function %f32_div(f32, f32) -> f32 {
ebb0(v0: f32, v1: f32):
v2 = fdiv v0, v1
return v2
}
; function %f32_min(f32, f32) -> f32
; function %f32_max(f32, f32) -> f32
; function %f32_copysign(f32, f32) -> f32

View File

@@ -1,52 +0,0 @@
; Test basic code generation for f64 arithmetic WebAssembly instructions.
test compile
set is_64bit=0
isa intel haswell
set is_64bit=1
isa intel haswell
; Constants.
; function %f64_const() -> f64
; Unary operations
; function %f64_abs(f64) -> f64
; function %f64_neg(f64) -> f64
; function %f64_sqrt(f64) -> f64
; function %f64_ceil(f64) -> f64
; function %f64_floor(f64) -> f64
; function %f64_trunc(f64) -> f64
; function %f64_nearest (f64) -> f64
; Binary Operations
function %f64_add(f64, f64) -> f64 {
ebb0(v0: f64, v1: f64):
v2 = fadd v0, v1
return v2
}
function %f64_sub(f64, f64) -> f64 {
ebb0(v0: f64, v1: f64):
v2 = fsub v0, v1
return v2
}
function %f64_mul(f64, f64) -> f64 {
ebb0(v0: f64, v1: f64):
v2 = fmul v0, v1
return v2
}
function %f64_div(f64, f64) -> f64 {
ebb0(v0: f64, v1: f64):
v2 = fdiv v0, v1
return v2
}
; function %f64_min(f64, f64) -> f64
; function %f64_max(f64, f64) -> f64
; function %f64_copysign(f64, f64) -> f64

View File

@@ -1,128 +0,0 @@
; Test basic code generation for i32 arithmetic WebAssembly instructions.
test compile
set is_64bit=0
isa intel haswell
set is_64bit=1
isa intel haswell
; Constants.
function %i32_const() -> i32 {
ebb0:
v0 = iconst.i32 0x8765_4321
return v0
}
; Unary operations.
function %i32_clz(i32) -> i32 {
ebb0(v0: i32):
v1 = clz v0
return v1
}
function %i32_ctz(i32) -> i32 {
ebb0(v0: i32):
v1 = ctz v0
return v1
}
function %i32_popcnt(i32) -> i32 {
ebb0(v0: i32):
v1 = popcnt v0
return v1
}
; Binary operations.
function %i32_add(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = iadd v0, v1
return v2
}
function %i32_sub(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = isub v0, v1
return v2
}
function %i32_mul(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = imul v0, v1
return v2
}
function %i32_div_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = sdiv v0, v1
return v2
}
function %i32_div_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = udiv v0, v1
return v2
}
function %i32_rem_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = srem v0, v1
return v2
}
function %i32_rem_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = urem v0, v1
return v2
}
function %i32_and(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = band v0, v1
return v2
}
function %i32_or(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = bor v0, v1
return v2
}
function %i32_xor(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = bxor v0, v1
return v2
}
function %i32_shl(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = ishl v0, v1
return v2
}
function %i32_shr_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = sshr v0, v1
return v2
}
function %i32_shr_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = ushr v0, v1
return v2
}
function %i32_rotl(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = rotl v0, v1
return v2
}
function %i32_rotr(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = rotr v0, v1
return v2
}

View File

@@ -1,85 +0,0 @@
; Test code generation for WebAssembly i32 comparison operators.
test compile
set is_64bit=0
isa intel haswell
set is_64bit=1
isa intel haswell
function %i32_eqz(i32) -> i32 {
ebb0(v0: i32):
v1 = icmp_imm eq v0, 0
v2 = bint.i32 v1
return v2
}
function %i32_eq(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp eq v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_ne(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp ne v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_lt_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp slt v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_lt_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp ult v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_gt_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp sgt v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_gt_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp ugt v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_le_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp sle v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_le_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp ule v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_ge_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp sge v0, v1
v3 = bint.i32 v2
return v3
}
function %i32_ge_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = icmp uge v0, v1
v3 = bint.i32 v2
return v3
}

View File

@@ -1,125 +0,0 @@
; Test basic code generation for i64 arithmetic WebAssembly instructions.
test compile
set is_64bit=1
isa intel haswell
; Constants.
function %i64_const() -> i64 {
ebb0:
v0 = iconst.i64 0x8765_4321
return v0
}
; Unary operations.
function %i64_clz(i64) -> i64 {
ebb0(v0: i64):
v1 = clz v0
return v1
}
function %i64_ctz(i64) -> i64 {
ebb0(v0: i64):
v1 = ctz v0
return v1
}
function %i64_popcnt(i64) -> i64 {
ebb0(v0: i64):
v1 = popcnt v0
return v1
}
; Binary operations.
function %i64_add(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = iadd v0, v1
return v2
}
function %i64_sub(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = isub v0, v1
return v2
}
function %i64_mul(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = imul v0, v1
return v2
}
function %i32_div_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = sdiv v0, v1
return v2
}
function %i32_div_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = udiv v0, v1
return v2
}
function %i32_rem_s(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = srem v0, v1
return v2
}
function %i32_rem_u(i32, i32) -> i32 {
ebb0(v0: i32, v1: i32):
v2 = urem v0, v1
return v2
}
function %i64_and(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = band v0, v1
return v2
}
function %i64_or(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = bor v0, v1
return v2
}
function %i64_xor(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = bxor v0, v1
return v2
}
function %i64_shl(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = ishl v0, v1
return v2
}
function %i64_shr_s(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = sshr v0, v1
return v2
}
function %i64_shr_u(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = ushr v0, v1
return v2
}
function %i64_rotl(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = rotl v0, v1
return v2
}
function %i64_rotr(i64, i64) -> i64 {
ebb0(v0: i64, v1: i64):
v2 = rotr v0, v1
return v2
}

View File

@@ -1,82 +0,0 @@
; Test code generation for WebAssembly i64 comparison operators.
test compile
set is_64bit=1
isa intel haswell
function %i64_eqz(i64) -> i32 {
ebb0(v0: i64):
v1 = icmp_imm eq v0, 0
v2 = bint.i32 v1
return v2
}
function %i64_eq(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_ne(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp ne v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_lt_s(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp slt v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_lt_u(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp ult v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_gt_s(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp sgt v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_gt_u(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp ugt v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_le_s(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp sle v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_le_u(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp ule v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_ge_s(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp sge v0, v1
v3 = bint.i32 v2
return v3
}
function %i64_ge_u(i64, i64) -> i32 {
ebb0(v0: i64, v1: i64):
v2 = icmp uge v0, v1
v3 = bint.i32 v2
return v3
}