Files
wasmtime/cranelift/filetests/isa/intel/binary64.cton
Dan Gohman eab57c0a40 Use large-model addressing for calls when in non-PIC mode.
The main use for non-PIC code at present is JIT code, and JIT code can
live anywhere in memory and reference other symbols defined anywhere in
memory, so it needs to use the "large" code model.

func_addr and globalsym_addr instructions were already using `movabs`
to support arbitrary 64-bit addresses, so this just makes calls be
legalized to support arbitrary 64-bit addresses also.
2018-04-08 22:37:35 -07:00

1280 lines
51 KiB
Plaintext

; binary emission of x86-64 code.
test binemit
set is_64bit
set is_compressed
isa intel haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/intel/binary64.cton | llvm-mc -show-encoding -triple=x86_64
;
; Tests for i64 instructions.
function %I64() {
sig0 = ()
fn0 = function %foo()
gv0 = globalsym %some_gv
; Use incoming_arg stack slots because they won't be relocated by the frame
; layout.
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
; Integer Constants.
; asm: movq $0x01020304f1f2f3f4, %rcx
[-,%rcx] v1 = iconst.i64 0x0102_0304_f1f2_f3f4 ; bin: 48 b9 01020304f1f2f3f4
; asm: movq $0x11020304f1f2f3f4, %rsi
[-,%rsi] v2 = iconst.i64 0x1102_0304_f1f2_f3f4 ; bin: 48 be 11020304f1f2f3f4
; asm: movq $0x21020304f1f2f3f4, %r10
[-,%r10] v3 = iconst.i64 0x2102_0304_f1f2_f3f4 ; bin: 49 ba 21020304f1f2f3f4
; asm: movl $0xff001122, %r8d # 32-bit zero-extended constant.
[-,%r8] v4 = iconst.i64 0xff00_1122 ; bin: 41 b8 ff001122
; asm: movq $0xffffffff88001122, %r14 # 32-bit sign-extended constant.
[-,%r14] v5 = iconst.i64 0xffff_ffff_8800_1122 ; bin: 49 c7 c6 88001122
; asm: movb $1, %cl
[-,%rcx] v9007 = bconst.b1 true ; bin: b9 00000001
; asm: movb $1, %sil
[-,%r10] v9008 = bconst.b1 true ; bin: 41 ba 00000001
; Integer Register-Register Operations.
; asm: addq %rsi, %rcx
[-,%rcx] v10 = iadd v1, v2 ; bin: 48 01 f1
; asm: addq %r10, %rsi
[-,%rsi] v11 = iadd v2, v3 ; bin: 4c 01 d6
; asm: addq %rcx, %r10
[-,%r10] v12 = iadd v3, v1 ; bin: 49 01 ca
; asm: subq %rsi, %rcx
[-,%rcx] v20 = isub v1, v2 ; bin: 48 29 f1
; asm: subq %r10, %rsi
[-,%rsi] v21 = isub v2, v3 ; bin: 4c 29 d6
; asm: subq %rcx, %r10
[-,%r10] v22 = isub v3, v1 ; bin: 49 29 ca
; asm: andq %rsi, %rcx
[-,%rcx] v30 = band v1, v2 ; bin: 48 21 f1
; asm: andq %r10, %rsi
[-,%rsi] v31 = band v2, v3 ; bin: 4c 21 d6
; asm: andq %rcx, %r10
[-,%r10] v32 = band v3, v1 ; bin: 49 21 ca
; asm: orq %rsi, %rcx
[-,%rcx] v40 = bor v1, v2 ; bin: 48 09 f1
; asm: orq %r10, %rsi
[-,%rsi] v41 = bor v2, v3 ; bin: 4c 09 d6
; asm: orq %rcx, %r10
[-,%r10] v42 = bor v3, v1 ; bin: 49 09 ca
; asm: xorq %rsi, %rcx
[-,%rcx] v50 = bxor v1, v2 ; bin: 48 31 f1
; asm: xorq %r10, %rsi
[-,%rsi] v51 = bxor v2, v3 ; bin: 4c 31 d6
; asm: xorq %rcx, %r10
[-,%r10] v52 = bxor v3, v1 ; bin: 49 31 ca
; asm: shlq %cl, %rsi
[-,%rsi] v60 = ishl v2, v1 ; bin: 48 d3 e6
; asm: shlq %cl, %r10
[-,%r10] v61 = ishl v3, v1 ; bin: 49 d3 e2
; asm: sarq %cl, %rsi
[-,%rsi] v62 = sshr v2, v1 ; bin: 48 d3 fe
; asm: sarq %cl, %r10
[-,%r10] v63 = sshr v3, v1 ; bin: 49 d3 fa
; asm: shrq %cl, %rsi
[-,%rsi] v64 = ushr v2, v1 ; bin: 48 d3 ee
; asm: shrq %cl, %r10
[-,%r10] v65 = ushr v3, v1 ; bin: 49 d3 ea
; asm: rolq %cl, %rsi
[-,%rsi] v66 = rotl v2, v1 ; bin: 48 d3 c6
; asm: rolq %cl, %r10
[-,%r10] v67 = rotl v3, v1 ; bin: 49 d3 c2
; asm: rorq %cl, %rsi
[-,%rsi] v68 = rotr v2, v1 ; bin: 48 d3 ce
; asm: rorq %cl, %r10
[-,%r10] v69 = rotr v3, v1 ; bin: 49 d3 ca
; Integer Register-Immediate Operations.
; These 64-bit ops all use a 32-bit immediate that is sign-extended to 64 bits.
; Some take 8-bit immediates that are sign-extended to 64 bits.
; asm: addq $-100000, %rcx
[-,%rcx] v70 = iadd_imm v1, -100000 ; bin: 48 81 c1 fffe7960
; asm: addq $100000, %rsi
[-,%rsi] v71 = iadd_imm v2, 100000 ; bin: 48 81 c6 000186a0
; asm: addq $0x7fffffff, %r10
[-,%r10] v72 = iadd_imm v3, 0x7fff_ffff ; bin: 49 81 c2 7fffffff
; asm: addq $100, %r8
[-,%r8] v73 = iadd_imm v4, 100 ; bin: 49 83 c0 64
; asm: addq $-100, %r14
[-,%r14] v74 = iadd_imm v5, -100 ; bin: 49 83 c6 9c
; asm: andq $-100000, %rcx
[-,%rcx] v80 = band_imm v1, -100000 ; bin: 48 81 e1 fffe7960
; asm: andq $100000, %rsi
[-,%rsi] v81 = band_imm v2, 100000 ; bin: 48 81 e6 000186a0
; asm: andq $0x7fffffff, %r10
[-,%r10] v82 = band_imm v3, 0x7fff_ffff ; bin: 49 81 e2 7fffffff
; asm: andq $100, %r8
[-,%r8] v83 = band_imm v4, 100 ; bin: 49 83 e0 64
; asm: andq $-100, %r14
[-,%r14] v84 = band_imm v5, -100 ; bin: 49 83 e6 9c
; asm: orq $-100000, %rcx
[-,%rcx] v90 = bor_imm v1, -100000 ; bin: 48 81 c9 fffe7960
; asm: orq $100000, %rsi
[-,%rsi] v91 = bor_imm v2, 100000 ; bin: 48 81 ce 000186a0
; asm: orq $0x7fffffff, %r10
[-,%r10] v92 = bor_imm v3, 0x7fff_ffff ; bin: 49 81 ca 7fffffff
; asm: orq $100, %r8
[-,%r8] v93 = bor_imm v4, 100 ; bin: 49 83 c8 64
; asm: orq $-100, %r14
[-,%r14] v94 = bor_imm v5, -100 ; bin: 49 83 ce 9c
; asm: ret
; asm: xorq $-100000, %rcx
[-,%rcx] v100 = bxor_imm v1, -100000 ; bin: 48 81 f1 fffe7960
; asm: xorq $100000, %rsi
[-,%rsi] v101 = bxor_imm v2, 100000 ; bin: 48 81 f6 000186a0
; asm: xorq $0x7fffffff, %r10
[-,%r10] v102 = bxor_imm v3, 0x7fff_ffff ; bin: 49 81 f2 7fffffff
; asm: xorq $100, %r8
[-,%r8] v103 = bxor_imm v4, 100 ; bin: 49 83 f0 64
; asm: xorq $-100, %r14
[-,%r14] v104 = bxor_imm v5, -100 ; bin: 49 83 f6 9c
; Register copies.
; asm: movq %rsi, %rcx
[-,%rcx] v110 = copy v2 ; bin: 48 89 f1
; asm: movq %r10, %rsi
[-,%rsi] v111 = copy v3 ; bin: 4c 89 d6
; asm: movq %rcx, %r10
[-,%r10] v112 = copy v1 ; bin: 49 89 ca
; Copy Special
; asm: movq %rsp, %rbp
copy_special %rsp -> %rbp ; bin: 48 89 e5
; asm: movq %r10, %r11
copy_special %r10 -> %r11 ; bin: 4d 89 d3
; asm: movq %rsp, %r11
copy_special %rsp -> %r11 ; bin: 49 89 e3
; asm: movq %r10, %rsp
copy_special %r10 -> %rsp ; bin: 4c 89 d4
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movq %rcx, (%r10)
store v1, v3 ; bin: heap_oob 49 89 0a
; asm: movq %r10, (%rcx)
store v3, v1 ; bin: heap_oob 4c 89 11
; asm: movl %ecx, (%r10)
istore32 v1, v3 ; bin: heap_oob 41 89 0a
; asm: movl %r10d, (%rcx)
istore32 v3, v1 ; bin: heap_oob 44 89 11
; asm: movw %cx, (%r10)
istore16 v1, v3 ; bin: heap_oob 66 41 89 0a
; asm: movw %r10w, (%rcx)
istore16 v3, v1 ; bin: heap_oob 66 44 89 11
; asm: movb %cl, (%r10)
istore8 v1, v3 ; bin: heap_oob 41 88 0a
; asm: movb %r10b, (%rcx)
istore8 v3, v1 ; bin: heap_oob 44 88 11
; asm: movq (%rcx), %r14
[-,%r14] v120 = load.i64 v1 ; bin: heap_oob 4c 8b 31
; asm: movq (%r10), %rdx
[-,%rdx] v121 = load.i64 v3 ; bin: heap_oob 49 8b 12
; asm: movl (%rcx), %r14d
[-,%r14] v122 = uload32.i64 v1 ; bin: heap_oob 44 8b 31
; asm: movl (%r10), %edx
[-,%rdx] v123 = uload32.i64 v3 ; bin: heap_oob 41 8b 12
; asm: movslq (%rcx), %r14
[-,%r14] v124 = sload32.i64 v1 ; bin: heap_oob 4c 63 31
; asm: movslq (%r10), %rdx
[-,%rdx] v125 = sload32.i64 v3 ; bin: heap_oob 49 63 12
; asm: movzwq (%rcx), %r14
[-,%r14] v126 = uload16.i64 v1 ; bin: heap_oob 4c 0f b7 31
; asm: movzwq (%r10), %rdx
[-,%rdx] v127 = uload16.i64 v3 ; bin: heap_oob 49 0f b7 12
; asm: movswq (%rcx), %r14
[-,%r14] v128 = sload16.i64 v1 ; bin: heap_oob 4c 0f bf 31
; asm: movswq (%r10), %rdx
[-,%rdx] v129 = sload16.i64 v3 ; bin: heap_oob 49 0f bf 12
; asm: movzbq (%rcx), %r14
[-,%r14] v130 = uload8.i64 v1 ; bin: heap_oob 4c 0f b6 31
; asm: movzbq (%r10), %rdx
[-,%rdx] v131 = uload8.i64 v3 ; bin: heap_oob 49 0f b6 12
; asm: movsbq (%rcx), %r14
[-,%r14] v132 = sload8.i64 v1 ; bin: heap_oob 4c 0f be 31
; asm: movsbq (%r10), %rdx
[-,%rdx] v133 = sload8.i64 v3 ; bin: heap_oob 49 0f be 12
; Register-indirect with 8-bit signed displacement.
; asm: movq %rcx, 100(%r10)
store v1, v3+100 ; bin: heap_oob 49 89 4a 64
; asm: movq %r10, -100(%rcx)
store v3, v1-100 ; bin: heap_oob 4c 89 51 9c
; asm: movl %ecx, 100(%r10)
istore32 v1, v3+100 ; bin: heap_oob 41 89 4a 64
; asm: movl %r10d, -100(%rcx)
istore32 v3, v1-100 ; bin: heap_oob 44 89 51 9c
; asm: movw %cx, 100(%r10)
istore16 v1, v3+100 ; bin: heap_oob 66 41 89 4a 64
; asm: movw %r10w, -100(%rcx)
istore16 v3, v1-100 ; bin: heap_oob 66 44 89 51 9c
; asm: movb %cl, 100(%r10)
istore8 v1, v3+100 ; bin: heap_oob 41 88 4a 64
; asm: movb %r10b, 100(%rcx)
istore8 v3, v1+100 ; bin: heap_oob 44 88 51 64
; asm: movq 50(%rcx), %r10
[-,%r10] v140 = load.i64 v1+50 ; bin: heap_oob 4c 8b 51 32
; asm: movq -50(%r10), %rdx
[-,%rdx] v141 = load.i64 v3-50 ; bin: heap_oob 49 8b 52 ce
; asm: movl 50(%rcx), %edi
[-,%rdi] v142 = uload32.i64 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v143 = uload32.i64 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movslq 50(%rcx), %rdi
[-,%rdi] v144 = sload32.i64 v1+50 ; bin: heap_oob 48 63 79 32
; asm: movslq -50(%rsi), %rdx
[-,%rdx] v145 = sload32.i64 v2-50 ; bin: heap_oob 48 63 56 ce
; asm: movzwq 50(%rcx), %rdi
[-,%rdi] v146 = uload16.i64 v1+50 ; bin: heap_oob 48 0f b7 79 32
; asm: movzwq -50(%rsi), %rdx
[-,%rdx] v147 = uload16.i64 v2-50 ; bin: heap_oob 48 0f b7 56 ce
; asm: movswq 50(%rcx), %rdi
[-,%rdi] v148 = sload16.i64 v1+50 ; bin: heap_oob 48 0f bf 79 32
; asm: movswq -50(%rsi), %rdx
[-,%rdx] v149 = sload16.i64 v2-50 ; bin: heap_oob 48 0f bf 56 ce
; asm: movzbq 50(%rcx), %rdi
[-,%rdi] v150 = uload8.i64 v1+50 ; bin: heap_oob 48 0f b6 79 32
; asm: movzbq -50(%rsi), %rdx
[-,%rdx] v151 = uload8.i64 v2-50 ; bin: heap_oob 48 0f b6 56 ce
; asm: movsbq 50(%rcx), %rdi
[-,%rdi] v152 = sload8.i64 v1+50 ; bin: heap_oob 48 0f be 79 32
; asm: movsbq -50(%rsi), %rdx
[-,%rdx] v153 = sload8.i64 v2-50 ; bin: heap_oob 48 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movq %rcx, 10000(%r10)
store v1, v3+10000 ; bin: heap_oob 49 89 8a 00002710
; asm: movq %r10, -10000(%rcx)
store v3, v1-10000 ; bin: heap_oob 4c 89 91 ffffd8f0
; asm: movl %ecx, 10000(%rsi)
istore32 v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%rcx)
istore32 v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%rsi)
istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%rcx)
istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%rsi)
istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movb %sil, 10000(%rcx)
istore8 v2, v1+10000 ; bin: heap_oob 40 88 b1 00002710
; asm: movq 50000(%rcx), %r10
[-,%r10] v160 = load.i64 v1+50000 ; bin: heap_oob 4c 8b 91 0000c350
; asm: movq -50000(%r10), %rdx
[-,%rdx] v161 = load.i64 v3-50000 ; bin: heap_oob 49 8b 92 ffff3cb0
; asm: movl 50000(%rcx), %edi
[-,%rdi] v162 = uload32.i64 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v163 = uload32.i64 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movslq 50000(%rcx), %rdi
[-,%rdi] v164 = sload32.i64 v1+50000 ; bin: heap_oob 48 63 b9 0000c350
; asm: movslq -50000(%rsi), %rdx
[-,%rdx] v165 = sload32.i64 v2-50000 ; bin: heap_oob 48 63 96 ffff3cb0
; asm: movzwq 50000(%rcx), %rdi
[-,%rdi] v166 = uload16.i64 v1+50000 ; bin: heap_oob 48 0f b7 b9 0000c350
; asm: movzwq -50000(%rsi), %rdx
[-,%rdx] v167 = uload16.i64 v2-50000 ; bin: heap_oob 48 0f b7 96 ffff3cb0
; asm: movswq 50000(%rcx), %rdi
[-,%rdi] v168 = sload16.i64 v1+50000 ; bin: heap_oob 48 0f bf b9 0000c350
; asm: movswq -50000(%rsi), %rdx
[-,%rdx] v169 = sload16.i64 v2-50000 ; bin: heap_oob 48 0f bf 96 ffff3cb0
; asm: movzbq 50000(%rcx), %rdi
[-,%rdi] v170 = uload8.i64 v1+50000 ; bin: heap_oob 48 0f b6 b9 0000c350
; asm: movzbq -50000(%rsi), %rdx
[-,%rdx] v171 = uload8.i64 v2-50000 ; bin: heap_oob 48 0f b6 96 ffff3cb0
; asm: movsbq 50000(%rcx), %rdi
[-,%rdi] v172 = sload8.i64 v1+50000 ; bin: heap_oob 48 0f be b9 0000c350
; asm: movsbq -50000(%rsi), %rdx
[-,%rdx] v173 = sload8.i64 v2-50000 ; bin: heap_oob 48 0f be 96 ffff3cb0
; More arithmetic.
; asm: imulq %rsi, %rcx
[-,%rcx] v180 = imul v1, v2 ; bin: 48 0f af ce
; asm: imulq %r10, %rsi
[-,%rsi] v181 = imul v2, v3 ; bin: 49 0f af f2
; asm: imulq %rcx, %r10
[-,%r10] v182 = imul v3, v1 ; bin: 4c 0f af d1
[-,%rax] v190 = iconst.i64 1
[-,%rdx] v191 = iconst.i64 2
; asm: idivq %rcx
[-,%rax,%rdx] v192, v193 = x86_sdivmodx v190, v191, v1 ; bin: int_divz 48 f7 f9
; asm: idivq %rsi
[-,%rax,%rdx] v194, v195 = x86_sdivmodx v190, v191, v2 ; bin: int_divz 48 f7 fe
; asm: idivq %r10
[-,%rax,%rdx] v196, v197 = x86_sdivmodx v190, v191, v3 ; bin: int_divz 49 f7 fa
; asm: divq %rcx
[-,%rax,%rdx] v198, v199 = x86_udivmodx v190, v191, v1 ; bin: int_divz 48 f7 f1
; asm: divq %rsi
[-,%rax,%rdx] v200, v201 = x86_udivmodx v190, v191, v2 ; bin: int_divz 48 f7 f6
; asm: divq %r10
[-,%rax,%rdx] v202, v203 = x86_udivmodx v190, v191, v3 ; bin: int_divz 49 f7 f2
; double-length multiply instructions, 64 bit
[-,%rax] v1001 = iconst.i64 1
[-,%r15] v1002 = iconst.i64 2
; asm: mulq %r15
[-,%rax,%rdx] v1003, v1004 = x86_umulx v1001, v1002 ; bin: 49 f7 e7
; asm: imulq %r15
[-,%rax,%rdx] v1005, v1006 = x86_smulx v1001, v1002 ; bin: 49 f7 ef
; double-length multiply instructions, 32 bit
[-,%rax] v1011 = iconst.i32 1
[-,%r15] v1012 = iconst.i32 2
[-,%rcx] v1017 = iconst.i32 3
; asm: mull %r15d
[-,%rax,%rdx] v1013, v1014 = x86_umulx v1011, v1012 ; bin: 41 f7 e7
; asm: imull %r15d
[-,%rax,%rdx] v1015, v1016 = x86_smulx v1011, v1012 ; bin: 41 f7 ef
; asm: mull %ecx
[-,%rax,%rdx] v1018, v1019 = x86_umulx v1011, v1017 ; bin: f7 e1
; asm: imull %ecx
[-,%rax,%rdx] v1020, v1021 = x86_smulx v1011, v1017 ; bin: f7 e9
; Bit-counting instructions.
; asm: popcntq %rsi, %rcx
[-,%rcx] v210 = popcnt v2 ; bin: f3 48 0f b8 ce
; asm: popcntq %r10, %rsi
[-,%rsi] v211 = popcnt v3 ; bin: f3 49 0f b8 f2
; asm: popcntq %rcx, %r10
[-,%r10] v212 = popcnt v1 ; bin: f3 4c 0f b8 d1
; asm: lzcntq %rsi, %rcx
[-,%rcx] v213 = clz v2 ; bin: f3 48 0f bd ce
; asm: lzcntq %r10, %rsi
[-,%rsi] v214 = clz v3 ; bin: f3 49 0f bd f2
; asm: lzcntq %rcx, %r10
[-,%r10] v215 = clz v1 ; bin: f3 4c 0f bd d1
; asm: tzcntq %rsi, %rcx
[-,%rcx] v216 = ctz v2 ; bin: f3 48 0f bc ce
; asm: tzcntq %r10, %rsi
[-,%rsi] v217 = ctz v3 ; bin: f3 49 0f bc f2
; asm: tzcntq %rcx, %r10
[-,%r10] v218 = ctz v1 ; bin: f3 4c 0f bc d1
; Integer comparisons.
; asm: cmpq %rsi, %rcx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 48 39 f1 0f 94 c3
; asm: cmpq %r10, %rsi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v3 ; bin: 4c 39 d6 0f 94 c2
; asm: cmpq %rsi, %rcx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 48 39 f1 0f 95 c3
; asm: cmpq %r10, %rsi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v3 ; bin: 4c 39 d6 0f 95 c2
; asm: cmpq %rsi, %rcx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 48 39 f1 0f 9c c3
; asm: cmpq %r10, %rsi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v3 ; bin: 4c 39 d6 0f 9c c2
; asm: cmpq %rsi, %rcx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 48 39 f1 0f 9d c3
; asm: cmpq %r10, %rsi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v3 ; bin: 4c 39 d6 0f 9d c2
; asm: cmpq %rsi, %rcx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 48 39 f1 0f 9f c3
; asm: cmpq %r10, %rsi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v3 ; bin: 4c 39 d6 0f 9f c2
; asm: cmpq %rsi, %rcx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 48 39 f1 0f 9e c3
; asm: cmpq %r10, %rsi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v3 ; bin: 4c 39 d6 0f 9e c2
; asm: cmpq %rsi, %rcx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 48 39 f1 0f 92 c3
; asm: cmpq %r10, %rsi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v3 ; bin: 4c 39 d6 0f 92 c2
; asm: cmpq %rsi, %rcx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 48 39 f1 0f 93 c3
; asm: cmpq %r10, %rsi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v3 ; bin: 4c 39 d6 0f 93 c2
; asm: cmpq %rsi, %rcx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 48 39 f1 0f 97 c3
; asm: cmpq %r10, %rsi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v3 ; bin: 4c 39 d6 0f 97 c2
; asm: cmpq %rsi, %rcx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 48 39 f1 0f 96 c3
; asm: cmpq %r10, %rsi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v3 ; bin: 4c 39 d6 0f 96 c2
; asm: cmpq $37, %rcx
; asm: setl %bl
[-,%rbx] v320 = icmp_imm slt v1, 37 ; bin: 48 83 f9 25 0f 9c c3
; asm: cmpq $100000, %rcx
; asm: setl %bl
[-,%rbx] v321 = icmp_imm slt v1, 100000 ; bin: 48 81 f9 000186a0 0f 9c c3
; Bool-to-int conversions.
; asm: movzbq %bl, %rcx
[-,%rcx] v350 = bint.i64 v300 ; bin: 0f b6 cb
; asm: movzbq %dl, %rsi
[-,%rsi] v351 = bint.i64 v301 ; bin: 0f b6 f2
; TODO: x86-64 can't encode a direct call to an arbitrary 64-bit address in
; a single instruction. When we add a concept of colocated definitions, this
; test can be re-enabled.
; disabled: asm: call foo
; disabled: call fn0() ; bin: e8 PCRel4(%foo-4) 00000000
; asm: movabsq $0, %rcx
[-,%rcx] v400 = func_addr.i64 fn0 ; bin: 48 b9 Abs8(%foo) 0000000000000000
; asm: movabsq $0, %rsi
[-,%rsi] v401 = func_addr.i64 fn0 ; bin: 48 be Abs8(%foo) 0000000000000000
; asm: movabsq $0, %r10
[-,%r10] v402 = func_addr.i64 fn0 ; bin: 49 ba Abs8(%foo) 0000000000000000
; asm: call *%rcx
call_indirect sig0, v400() ; bin: ff d1
; asm: call *%rsi
call_indirect sig0, v401() ; bin: ff d6
; asm: call *%r10
call_indirect sig0, v402() ; bin: 41 ff d2
; asm: movabsq $-1, %rcx
[-,%rcx] v450 = globalsym_addr.i64 gv0 ; bin: 48 b9 Abs8(%some_gv) 0000000000000000
; asm: movabsq $-1, %rsi
[-,%rsi] v451 = globalsym_addr.i64 gv0 ; bin: 48 be Abs8(%some_gv) 0000000000000000
; asm: movabsq $-1, %r10
[-,%r10] v452 = globalsym_addr.i64 gv0 ; bin: 49 ba Abs8(%some_gv) 0000000000000000
; Spill / Fill.
; asm: movq %rcx, 1032(%rsp)
[-,ss1] v500 = spill v1 ; bin: 48 89 8c 24 00000408
; asm: movq %rsi, 1032(%rsp)
[-,ss1] v501 = spill v2 ; bin: 48 89 b4 24 00000408
; asm: movq %r10, 1032(%rsp)
[-,ss1] v502 = spill v3 ; bin: 4c 89 94 24 00000408
; asm: movq 1032(%rsp), %rcx
[-,%rcx] v510 = fill v500 ; bin: 48 8b 8c 24 00000408
; asm: movq 1032(%rsp), %rsi
[-,%rsi] v511 = fill v501 ; bin: 48 8b b4 24 00000408
; asm: movq 1032(%rsp), %r10
[-,%r10] v512 = fill v502 ; bin: 4c 8b 94 24 00000408
; asm: movq %rcx, 1032(%rsp)
regspill v1, %rcx -> ss1 ; bin: 48 89 8c 24 00000408
; asm: movq 1032(%rsp), %rcx
regfill v1, ss1 -> %rcx ; bin: 48 8b 8c 24 00000408
; Push and Pop
; asm: pushq %rcx
x86_push v1 ; bin: 51
; asm: pushq %r10
x86_push v3 ; bin: 41 52
; asm: popq %rcx
[-,%rcx] v513 = x86_pop.i64 ; bin: 59
; asm: popq %r10
[-,%r10] v514 = x86_pop.i64 ; bin: 41 5a
; Adjust Stack Pointer
; asm: addq $64, %rsp
adjust_sp_imm 64 ; bin: 48 83 c4 40
; asm: addq $-64, %rsp
adjust_sp_imm -64 ; bin: 48 83 c4 c0
; asm: addq $1024, %rsp
adjust_sp_imm 1024 ; bin: 48 81 c4 00000400
; asm: addq $-1024, %rsp
adjust_sp_imm -1024 ; bin: 48 81 c4 fffffc00
; asm: addq $2147483647, %rsp
adjust_sp_imm 2147483647 ; bin: 48 81 c4 7fffffff
; asm: addq $-2147483648, %rsp
adjust_sp_imm -2147483648 ; bin: 48 81 c4 80000000
; Shift immediates
; asm: shlq $12, %rsi
[-,%rsi] v515 = ishl_imm v2, 12 ; bin: 48 c1 e6 0c
; asm: shlq $13, %r8
[-,%r8] v516 = ishl_imm v4, 13 ; bin: 49 c1 e0 0d
; asm: sarq $32, %rsi
[-,%rsi] v517 = sshr_imm v2, 32 ; bin: 48 c1 fe 20
; asm: sarq $33, %r8
[-,%r8] v518 = sshr_imm v4, 33 ; bin: 49 c1 f8 21
; asm: shrl $62, %rsi
[-,%rsi] v519 = ushr_imm v2, 62 ; bin: 48 c1 ee 3e
; asm: shrl $63, %r8
[-,%r8] v520 = ushr_imm v4, 63 ; bin: 49 c1 e8 3f
; asm: testq %rcx, %rcx
; asm: je ebb1
brz v1, ebb1 ; bin: 48 85 c9 74 1b
; asm: testq %rsi, %rsi
; asm: je ebb1
brz v2, ebb1 ; bin: 48 85 f6 74 16
; asm: testq %r10, %r10
; asm: je ebb1
brz v3, ebb1 ; bin: 4d 85 d2 74 11
; asm: testq %rcx, %rcx
; asm: jne ebb1
brnz v1, ebb1 ; bin: 48 85 c9 75 0c
; asm: testq %rsi, %rsi
; asm: jne ebb1
brnz v2, ebb1 ; bin: 48 85 f6 75 07
; asm: testq %r10, %r10
; asm: jne ebb1
brnz v3, ebb1 ; bin: 4d 85 d2 75 02
; asm: jmp ebb2
jump ebb2 ; bin: eb 01
; asm: ebb1:
ebb1:
return ; bin: c3
; asm: ebb2:
ebb2:
jump ebb1 ; bin: eb fd
}
; CPU flag instructions.
function %cpu_flags_I64() {
ebb0:
[-,%rcx] v1 = iconst.i64 1
[-,%r10] v2 = iconst.i64 2
jump ebb1
ebb1:
; asm: cmpq %r10, %rcx
[-,%rflags] v10 = ifcmp v1, v2 ; bin: 4c 39 d1
; asm: cmpq %rcx, %r10
[-,%rflags] v11 = ifcmp v2, v1 ; bin: 49 39 ca
; asm: je ebb1
brif eq v11, ebb1 ; bin: 74 f8
; asm: jne ebb1
brif ne v11, ebb1 ; bin: 75 f6
; asm: jl ebb1
brif slt v11, ebb1 ; bin: 7c f4
; asm: jge ebb1
brif sge v11, ebb1 ; bin: 7d f2
; asm: jg ebb1
brif sgt v11, ebb1 ; bin: 7f f0
; asm: jle ebb1
brif sle v11, ebb1 ; bin: 7e ee
; asm: jb ebb1
brif ult v11, ebb1 ; bin: 72 ec
; asm: jae ebb1
brif uge v11, ebb1 ; bin: 73 ea
; asm: ja ebb1
brif ugt v11, ebb1 ; bin: 77 e8
; asm: jbe ebb1
brif ule v11, ebb1 ; bin: 76 e6
; asm: sete %bl
[-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3
; asm: setne %bl
[-,%rbx] v21 = trueif ne v11 ; bin: 0f 95 c3
; asm: setl %dl
[-,%rdx] v22 = trueif slt v11 ; bin: 0f 9c c2
; asm: setge %dl
[-,%rdx] v23 = trueif sge v11 ; bin: 0f 9d c2
; asm: setg %r10b
[-,%r10] v24 = trueif sgt v11 ; bin: 41 0f 9f c2
; asm: setle %r10b
[-,%r10] v25 = trueif sle v11 ; bin: 41 0f 9e c2
; asm: setb %r14b
[-,%r14] v26 = trueif ult v11 ; bin: 41 0f 92 c6
; asm: setae %r14b
[-,%r14] v27 = trueif uge v11 ; bin: 41 0f 93 c6
; asm: seta %r11b
[-,%r11] v28 = trueif ugt v11 ; bin: 41 0f 97 c3
; asm: setbe %r11b
[-,%r11] v29 = trueif ule v11 ; bin: 41 0f 96 c3
; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; Stack check.
; asm: cmpq %rsp, %rcx
[-,%rflags] v40 = ifcmp_sp v1 ; bin: 48 39 e1
; asm: cmpq %rsp, %r10
[-,%rflags] v41 = ifcmp_sp v2 ; bin: 49 39 e2
; asm: cmpq $-100, %rcx
[-,%rflags] v522 = ifcmp_imm v1, -100 ; bin: 48 83 f9 9c
; asm: cmpq $100, %r10
[-,%rflags] v523 = ifcmp_imm v2, 100 ; bin: 49 83 fa 64
; asm: cmpq $-10000, %rcx
[-,%rflags] v524 = ifcmp_imm v1, -10000 ; bin: 48 81 f9 ffffd8f0
; asm: cmpq $10000, %r10
[-,%rflags] v525 = ifcmp_imm v2, 10000 ; bin: 49 81 fa 00002710
return
}
; Test for the encoding of outgoing_arg stack slots.
function %outargs() {
ss0 = incoming_arg 16, offset -16
ss1 = outgoing_arg 8, offset 8
ss2 = outgoing_arg 8, offset 0
ebb0:
[-,%rcx] v1 = iconst.i64 1
; asm: movq %rcx, 8(%rsp)
[-,ss1] v10 = spill v1 ; bin: 48 89 8c 24 00000008
; asm: movq %rcx, (%rsp)
[-,ss2] v11 = spill v1 ; bin: 48 89 8c 24 00000000
return
}
; Tests for i32 instructions in 64-bit mode.
;
; Note that many i32 instructions can be encoded both with and without a REX
; prefix if they only use the low 8 registers. Here, we are testing the REX
; encodings which are chosen by default. Switching to non-REX encodings should
; be done by an instruction shrinking pass.
function %I32() {
sig0 = ()
fn0 = function %foo()
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
ebb0:
; Integer Constants.
; asm: movl $0x01020304, %ecx
[-,%rcx] v1 = iconst.i32 0x0102_0304 ; bin: b9 01020304
; asm: movl $0x11020304, %esi
[-,%rsi] v2 = iconst.i32 0x1102_0304 ; bin: be 11020304
; asm: movl $0x21020304, %r10d
[-,%r10] v3 = iconst.i32 0x2102_0304 ; bin: 41 ba 21020304
; asm: movl $0xff001122, %r8d
[-,%r8] v4 = iconst.i32 0xff00_1122 ; bin: 41 b8 ff001122
; asm: movl $0x88001122, %r14d
[-,%r14] v5 = iconst.i32 0xffff_ffff_8800_1122 ; bin: 41 be 88001122
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movl (%rcx), %edi
[-,%rdi] v10 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%rsi), %edx
[-,%rdx] v11 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%rcx), %edi
[-,%rdi] v12 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%rsi), %edx
[-,%rdx] v13 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%rcx), %edi
[-,%rdi] v14 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%rsi), %edx
[-,%rdx] v15 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%rcx), %edi
[-,%rdi] v16 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%rsi), %edx
[-,%rdx] v17 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%rcx), %edi
[-,%rdi] v18 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%rsi), %edx
[-,%rdx] v19 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl 50(%rcx), %edi
[-,%rdi] v20 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v21 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%rcx), %edi
[-,%rdi] v22 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%rsi), %edx
[-,%rdx] v23 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%rcx), %edi
[-,%rdi] v24 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%rsi), %edx
[-,%rdx] v25 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%rcx), %edi
[-,%rdi] v26 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%rsi), %edx
[-,%rdx] v27 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%rcx), %edi
[-,%rdi] v28 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%rsi), %edx
[-,%rdx] v29 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl 50000(%rcx), %edi
[-,%rdi] v30 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v31 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%rcx), %edi
[-,%rdi] v32 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%rsi), %edx
[-,%rdx] v33 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%rcx), %edi
[-,%rdi] v34 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%rsi), %edx
[-,%rdx] v35 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%rcx), %edi
[-,%rdi] v36 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%rsi), %edx
[-,%rdx] v37 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%rcx), %edi
[-,%rdi] v38 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%rsi), %edx
[-,%rdx] v39 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Integer Register-Register Operations.
; asm: addl %esi, %ecx
[-,%rcx] v40 = iadd v1, v2 ; bin: 01 f1
; asm: addl %r10d, %esi
[-,%rsi] v41 = iadd v2, v3 ; bin: 44 01 d6
; asm: addl %ecx, %r10d
[-,%r10] v42 = iadd v3, v1 ; bin: 41 01 ca
; asm: subl %esi, %ecx
[-,%rcx] v50 = isub v1, v2 ; bin: 29 f1
; asm: subl %r10d, %esi
[-,%rsi] v51 = isub v2, v3 ; bin: 44 29 d6
; asm: subl %ecx, %r10d
[-,%r10] v52 = isub v3, v1 ; bin: 41 29 ca
; asm: andl %esi, %ecx
[-,%rcx] v60 = band v1, v2 ; bin: 21 f1
; asm: andl %r10d, %esi
[-,%rsi] v61 = band v2, v3 ; bin: 44 21 d6
; asm: andl %ecx, %r10d
[-,%r10] v62 = band v3, v1 ; bin: 41 21 ca
; asm: orl %esi, %ecx
[-,%rcx] v70 = bor v1, v2 ; bin: 09 f1
; asm: orl %r10d, %esi
[-,%rsi] v71 = bor v2, v3 ; bin: 44 09 d6
; asm: orl %ecx, %r10d
[-,%r10] v72 = bor v3, v1 ; bin: 41 09 ca
; asm: xorl %esi, %ecx
[-,%rcx] v80 = bxor v1, v2 ; bin: 31 f1
; asm: xorl %r10d, %esi
[-,%rsi] v81 = bxor v2, v3 ; bin: 44 31 d6
; asm: xorl %ecx, %r10d
[-,%r10] v82 = bxor v3, v1 ; bin: 41 31 ca
; asm: shll %cl, %esi
[-,%rsi] v90 = ishl v2, v1 ; bin: d3 e6
; asm: shll %cl, %r10d
[-,%r10] v91 = ishl v3, v1 ; bin: 41 d3 e2
; asm: sarl %cl, %esi
[-,%rsi] v92 = sshr v2, v1 ; bin: d3 fe
; asm: sarl %cl, %r10d
[-,%r10] v93 = sshr v3, v1 ; bin: 41 d3 fa
; asm: shrl %cl, %esi
[-,%rsi] v94 = ushr v2, v1 ; bin: d3 ee
; asm: shrl %cl, %r10d
[-,%r10] v95 = ushr v3, v1 ; bin: 41 d3 ea
; asm: roll %cl, %esi
[-,%rsi] v96 = rotl v2, v1 ; bin: d3 c6
; asm: roll %cl, %r10d
[-,%r10] v97 = rotl v3, v1 ; bin: 41 d3 c2
; asm: rorl %cl, %esi
[-,%rsi] v98 = rotr v2, v1 ; bin: d3 ce
; asm: rorl %cl, %r10d
[-,%r10] v99 = rotr v3, v1 ; bin: 41 d3 ca
; Integer Register-Immediate Operations.
; These 64-bit ops all use a 32-bit immediate that is sign-extended to 64 bits.
; Some take 8-bit immediates that are sign-extended to 64 bits.
; asm: addl $-100000, %ecx
[-,%rcx] v100 = iadd_imm v1, -100000 ; bin: 81 c1 fffe7960
; asm: addl $100000, %esi
[-,%rsi] v101 = iadd_imm v2, 100000 ; bin: 81 c6 000186a0
; asm: addl $0x7fffffff, %r10d
[-,%r10] v102 = iadd_imm v3, 0x7fff_ffff ; bin: 41 81 c2 7fffffff
; asm: addl $100, %r8d
[-,%r8] v103 = iadd_imm v4, 100 ; bin: 41 83 c0 64
; asm: addl $-100, %r14d
[-,%r14] v104 = iadd_imm v5, -100 ; bin: 41 83 c6 9c
; asm: andl $-100000, %ecx
[-,%rcx] v110 = band_imm v1, -100000 ; bin: 81 e1 fffe7960
; asm: andl $100000, %esi
[-,%rsi] v111 = band_imm v2, 100000 ; bin: 81 e6 000186a0
; asm: andl $0x7fffffff, %r10d
[-,%r10] v112 = band_imm v3, 0x7fff_ffff ; bin: 41 81 e2 7fffffff
; asm: andl $100, %r8d
[-,%r8] v113 = band_imm v4, 100 ; bin: 41 83 e0 64
; asm: andl $-100, %r14d
[-,%r14] v114 = band_imm v5, -100 ; bin: 41 83 e6 9c
; asm: orl $-100000, %ecx
[-,%rcx] v120 = bor_imm v1, -100000 ; bin: 81 c9 fffe7960
; asm: orl $100000, %esi
[-,%rsi] v121 = bor_imm v2, 100000 ; bin: 81 ce 000186a0
; asm: orl $0x7fffffff, %r10d
[-,%r10] v122 = bor_imm v3, 0x7fff_ffff ; bin: 41 81 ca 7fffffff
; asm: orl $100, %r8d
[-,%r8] v123 = bor_imm v4, 100 ; bin: 41 83 c8 64
; asm: orl $-100, %r14d
[-,%r14] v124 = bor_imm v5, -100 ; bin: 41 83 ce 9c
; asm: ret
; asm: xorl $-100000, %ecx
[-,%rcx] v130 = bxor_imm v1, -100000 ; bin: 81 f1 fffe7960
; asm: xorl $100000, %esi
[-,%rsi] v131 = bxor_imm v2, 100000 ; bin: 81 f6 000186a0
; asm: xorl $0x7fffffff, %r10d
[-,%r10] v132 = bxor_imm v3, 0x7fff_ffff ; bin: 41 81 f2 7fffffff
; asm: xorl $100, %r8d
[-,%r8] v133 = bxor_imm v4, 100 ; bin: 41 83 f0 64
; asm: xorl $-100, %r14d
[-,%r14] v134 = bxor_imm v5, -100 ; bin: 41 83 f6 9c
; Register copies.
; asm: movl %esi, %ecx
[-,%rcx] v140 = copy v2 ; bin: 89 f1
; asm: movl %r10d, %esi
[-,%rsi] v141 = copy v3 ; bin: 44 89 d6
; asm: movl %ecx, %r10d
[-,%r10] v142 = copy v1 ; bin: 41 89 ca
; More arithmetic.
; asm: imull %esi, %ecx
[-,%rcx] v150 = imul v1, v2 ; bin: 0f af ce
; asm: imull %r10d, %esi
[-,%rsi] v151 = imul v2, v3 ; bin: 41 0f af f2
; asm: imull %ecx, %r10d
[-,%r10] v152 = imul v3, v1 ; bin: 44 0f af d1
[-,%rax] v160 = iconst.i32 1
[-,%rdx] v161 = iconst.i32 2
; asm: idivl %ecx
[-,%rax,%rdx] v162, v163 = x86_sdivmodx v160, v161, v1 ; bin: int_divz f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v164, v165 = x86_sdivmodx v160, v161, v2 ; bin: int_divz f7 fe
; asm: idivl %r10d
[-,%rax,%rdx] v166, v167 = x86_sdivmodx v160, v161, v3 ; bin: int_divz 41 f7 fa
; asm: divl %ecx
[-,%rax,%rdx] v168, v169 = x86_udivmodx v160, v161, v1 ; bin: int_divz f7 f1
; asm: divl %esi
[-,%rax,%rdx] v170, v171 = x86_udivmodx v160, v161, v2 ; bin: int_divz f7 f6
; asm: divl %r10d
[-,%rax,%rdx] v172, v173 = x86_udivmodx v160, v161, v3 ; bin: int_divz 41 f7 f2
; Bit-counting instructions.
; asm: popcntl %esi, %ecx
[-,%rcx] v200 = popcnt v2 ; bin: f3 0f b8 ce
; asm: popcntl %r10d, %esi
[-,%rsi] v201 = popcnt v3 ; bin: f3 41 0f b8 f2
; asm: popcntl %ecx, %r10d
[-,%r10] v202 = popcnt v1 ; bin: f3 44 0f b8 d1
; asm: lzcntl %esi, %ecx
[-,%rcx] v203 = clz v2 ; bin: f3 0f bd ce
; asm: lzcntl %r10d, %esi
[-,%rsi] v204 = clz v3 ; bin: f3 41 0f bd f2
; asm: lzcntl %ecx, %r10d
[-,%r10] v205 = clz v1 ; bin: f3 44 0f bd d1
; asm: tzcntl %esi, %ecx
[-,%rcx] v206 = ctz v2 ; bin: f3 0f bc ce
; asm: tzcntl %r10d, %esi
[-,%rsi] v207 = ctz v3 ; bin: f3 41 0f bc f2
; asm: tzcntl %ecx, %r10d
[-,%r10] v208 = ctz v1 ; bin: f3 44 0f bc d1
; Integer comparisons.
; asm: cmpl %esi, %ecx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 39 f1 0f 94 c3
; asm: cmpl %r10d, %esi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v3 ; bin: 44 39 d6 0f 94 c2
; asm: cmpl %esi, %ecx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 39 f1 0f 95 c3
; asm: cmpl %r10d, %esi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v3 ; bin: 44 39 d6 0f 95 c2
; asm: cmpl %esi, %ecx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 39 f1 0f 9c c3
; asm: cmpl %r10d, %esi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v3 ; bin: 44 39 d6 0f 9c c2
; asm: cmpl %esi, %ecx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 39 f1 0f 9d c3
; asm: cmpl %r10d, %esi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v3 ; bin: 44 39 d6 0f 9d c2
; asm: cmpl %esi, %ecx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 39 f1 0f 9f c3
; asm: cmpl %r10d, %esi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v3 ; bin: 44 39 d6 0f 9f c2
; asm: cmpl %esi, %ecx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 39 f1 0f 9e c3
; asm: cmpl %r10d, %esi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v3 ; bin: 44 39 d6 0f 9e c2
; asm: cmpl %esi, %ecx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 39 f1 0f 92 c3
; asm: cmpl %r10d, %esi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v3 ; bin: 44 39 d6 0f 92 c2
; asm: cmpl %esi, %ecx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 39 f1 0f 93 c3
; asm: cmpl %r10d, %esi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v3 ; bin: 44 39 d6 0f 93 c2
; asm: cmpl %esi, %ecx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 39 f1 0f 97 c3
; asm: cmpl %r10d, %esi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v3 ; bin: 44 39 d6 0f 97 c2
; asm: cmpl %esi, %ecx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 39 f1 0f 96 c3
; asm: cmpl %r10d, %esi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v3 ; bin: 44 39 d6 0f 96 c2
; asm: cmpl $37, %ecx
; asm: setl %bl
[-,%rbx] v320 = icmp_imm slt v1, 37 ; bin: 83 f9 25 0f 9c c3
; asm: cmpq $100000, %ecx
; asm: setl %bl
[-,%rbx] v321 = icmp_imm slt v1, 100000 ; bin: 81 f9 000186a0 0f 9c c3
; Bool-to-int conversions.
; asm: movzbl %bl, %ecx
[-,%rcx] v350 = bint.i32 v300 ; bin: 0f b6 cb
; asm: movzbl %dl, %esi
[-,%rsi] v351 = bint.i32 v301 ; bin: 0f b6 f2
; Spill / Fill.
; asm: movl %ecx, 1032(%rsp)
[-,ss1] v500 = spill v1 ; bin: 89 8c 24 00000408
; asm: movl %esi, 1032(%rsp)
[-,ss1] v501 = spill v2 ; bin: 89 b4 24 00000408
; asm: movl %r10d, 1032(%rsp)
[-,ss1] v502 = spill v3 ; bin: 44 89 94 24 00000408
; asm: movl 1032(%rsp), %ecx
[-,%rcx] v510 = fill v500 ; bin: 8b 8c 24 00000408
; asm: movl 1032(%rsp), %esi
[-,%rsi] v511 = fill v501 ; bin: 8b b4 24 00000408
; asm: movl 1032(%rsp), %r10d
[-,%r10] v512 = fill v502 ; bin: 44 8b 94 24 00000408
; asm: movl %ecx, 1032(%rsp)
regspill v1, %rcx -> ss1 ; bin: 89 8c 24 00000408
; asm: movl 1032(%rsp), %ecx
regfill v1, ss1 -> %rcx ; bin: 8b 8c 24 00000408
; asm: cmpl %esi, %ecx
[-,%rflags] v520 = ifcmp v1, v2 ; bin: 39 f1
; asm: cmpl %r10d, %esi
[-,%rflags] v521 = ifcmp v2, v3 ; bin: 44 39 d6
; asm: cmpl $-100, %ecx
[-,%rflags] v522 = ifcmp_imm v1, -100 ; bin: 83 f9 9c
; asm: cmpl $100, %r10d
[-,%rflags] v523 = ifcmp_imm v3, 100 ; bin: 41 83 fa 64
; asm: cmpl $-10000, %ecx
[-,%rflags] v524 = ifcmp_imm v1, -10000 ; bin: 81 f9 ffffd8f0
; asm: cmpl $10000, %r10d
[-,%rflags] v525 = ifcmp_imm v3, 10000 ; bin: 41 81 fa 00002710
; asm: shll $2, %esi
[-,%rsi] v526 = ishl_imm v2, 2 ; bin: c1 e6 02
; asm: shll $12, %r10d
[-,%r10] v527 = ishl_imm v3, 12 ; bin: 41 c1 e2 0c
; asm: sarl $5, %esi
[-,%rsi] v529 = sshr_imm v2, 5 ; bin: c1 fe 05
; asm: sarl $32, %r10d
[-,%r10] v530 = sshr_imm v3, 32 ; bin: 41 c1 fa 20
; asm: shrl $8, %esi
[-,%rsi] v532 = ushr_imm v2, 8 ; bin: c1 ee 08
; asm: shrl $31, %r10d
[-,%r10] v533 = ushr_imm v3, 31 ; bin: 41 c1 ea 1f
; asm: testl %ecx, %ecx
; asm: je ebb1x
brz v1, ebb1 ; bin: 85 c9 74 18
; asm: testl %esi, %esi
; asm: je ebb1x
brz v2, ebb1 ; bin: 85 f6 74 14
; asm: testl %r10d, %r10d
; asm: je ebb1x
brz v3, ebb1 ; bin: 45 85 d2 74 0f
; asm: testl %ecx, %ecx
; asm: jne ebb1x
brnz v1, ebb1 ; bin: 85 c9 75 0b
; asm: testl %esi, %esi
; asm: jne ebb1x
brnz v2, ebb1 ; bin: 85 f6 75 07
; asm: testl %r10d, %r10d
; asm: jne ebb1x
brnz v3, ebb1 ; bin: 45 85 d2 75 02
; asm: jmp ebb2x
jump ebb2 ; bin: eb 01
; asm: ebb1x:
ebb1:
return ; bin: c3
; asm: ebb2x:
ebb2:
jump ebb1 ; bin: eb fd
}
; Tests for i32/i8 conversion instructions.
function %I32_I8() {
ebb0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%r10] v3 = iconst.i32 3
[-,%rcx] v11 = ireduce.i8 v1 ; bin:
[-,%rsi] v12 = ireduce.i8 v2 ; bin:
[-,%r10] v13 = ireduce.i8 v3 ; bin:
; asm: movsbl %cl, %esi
[-,%rsi] v20 = sextend.i32 v11 ; bin: 0f be f1
; asm: movsbl %sil, %r10d
[-,%r10] v21 = sextend.i32 v12 ; bin: 44 0f be d6
; asm: movsbl %r10b, %ecx
[-,%rcx] v22 = sextend.i32 v13 ; bin: 41 0f be ca
; asm: movzbl %cl, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b6 f1
; asm: movzbl %sil, %r10d
[-,%r10] v31 = uextend.i32 v12 ; bin: 44 0f b6 d6
; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: user0 0f 0b
}
; Tests for i32/i16 conversion instructions.
function %I32_I16() {
ebb0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%r10] v3 = iconst.i32 3
[-,%rcx] v11 = ireduce.i16 v1 ; bin:
[-,%rsi] v12 = ireduce.i16 v2 ; bin:
[-,%r10] v13 = ireduce.i16 v3 ; bin:
; asm: movswl %cx, %esi
[-,%rsi] v20 = sextend.i32 v11 ; bin: 0f bf f1
; asm: movswl %si, %r10d
[-,%r10] v21 = sextend.i32 v12 ; bin: 44 0f bf d6
; asm: movswl %r10w, %ecx
[-,%rcx] v22 = sextend.i32 v13 ; bin: 41 0f bf ca
; asm: movzwl %cx, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b7 f1
; asm: movzwl %si, %r10d
[-,%r10] v31 = uextend.i32 v12 ; bin: 44 0f b7 d6
; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i8 conversion instructions.
function %I64_I8() {
ebb0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
[-,%rcx] v11 = ireduce.i8 v1 ; bin:
[-,%rsi] v12 = ireduce.i8 v2 ; bin:
[-,%r10] v13 = ireduce.i8 v3 ; bin:
; asm: movsbq %cl, %rsi
[-,%rsi] v20 = sextend.i64 v11 ; bin: 48 0f be f1
; asm: movsbq %sil, %r10
[-,%r10] v21 = sextend.i64 v12 ; bin: 4c 0f be d6
; asm: movsbq %r10b, %rcx
[-,%rcx] v22 = sextend.i64 v13 ; bin: 49 0f be ca
; asm: movzbl %cl, %esi
[-,%rsi] v30 = uextend.i64 v11 ; bin: 0f b6 f1
; asm: movzbl %sil, %r10d
[-,%r10] v31 = uextend.i64 v12 ; bin: 44 0f b6 d6
; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i16 conversion instructions.
function %I64_I16() {
ebb0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
[-,%rcx] v11 = ireduce.i16 v1 ; bin:
[-,%rsi] v12 = ireduce.i16 v2 ; bin:
[-,%r10] v13 = ireduce.i16 v3 ; bin:
; asm: movswq %cx, %rsi
[-,%rsi] v20 = sextend.i64 v11 ; bin: 48 0f bf f1
; asm: movswq %si, %r10
[-,%r10] v21 = sextend.i64 v12 ; bin: 4c 0f bf d6
; asm: movswq %r10w, %rcx
[-,%rcx] v22 = sextend.i64 v13 ; bin: 49 0f bf ca
; asm: movzwl %cx, %esi
[-,%rsi] v30 = uextend.i64 v11 ; bin: 0f b7 f1
; asm: movzwl %si, %r10d
[-,%r10] v31 = uextend.i64 v12 ; bin: 44 0f b7 d6
; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i32 conversion instructions.
function %I64_I32() {
ebb0:
[-,%rcx] v1 = iconst.i64 1
[-,%rsi] v2 = iconst.i64 2
[-,%r10] v3 = iconst.i64 3
[-,%rcx] v11 = ireduce.i32 v1 ; bin:
[-,%rsi] v12 = ireduce.i32 v2 ; bin:
[-,%r10] v13 = ireduce.i32 v3 ; bin:
; asm: movslq %ecx, %rsi
[-,%rsi] v20 = sextend.i64 v11 ; bin: 48 63 f1
; asm: movslq %esi, %r10
[-,%r10] v21 = sextend.i64 v12 ; bin: 4c 63 d6
; asm: movslq %r10d, %rcx
[-,%rcx] v22 = sextend.i64 v13 ; bin: 49 63 ca
; asm: movl %ecx, %esi
[-,%rsi] v30 = uextend.i64 v11 ; bin: 89 ce
; asm: movl %esi, %r10d
[-,%r10] v31 = uextend.i64 v12 ; bin: 41 89 f2
; asm: movl %r10d, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 44 89 d1
trap user0 ; bin: user0 0f 0b
}