[WIP] Add a Trap sink to code generation (#279)

* First draft of TrapSink implementation.

* Add trap sink calls to 'trapif' and 'trapff' recipes.

* Add SourceLoc to trap sink calls, and add trap sink calls to all loads and stores.

* Add IntegerDivisionByZero trap to div recipe.

* Only emit load/store traps if 'notrap' flag is not set on the instruction.

* Update filetest machinery to add new trap sink functionality.

* Update filetests to include traps in output.

* Add a few more trap outputs to filetests.

* Add trap output to CLI tool.
This commit is contained in:
Tyler McMullen
2018-03-28 22:48:03 -07:00
committed by Dan Gohman
parent d566faa8fb
commit 951ff11f85
12 changed files with 358 additions and 263 deletions

View File

@@ -148,30 +148,30 @@ ebb0:
; Load/Store
; asm: movss (%ecx), %xmm5
[-,%xmm5] v100 = load.f32 v0 ; bin: f3 0f 10 29
[-,%xmm5] v100 = load.f32 v0 ; bin: heap_oob f3 0f 10 29
; asm: movss (%esi), %xmm2
[-,%xmm2] v101 = load.f32 v1 ; bin: f3 0f 10 16
[-,%xmm2] v101 = load.f32 v1 ; bin: heap_oob f3 0f 10 16
; asm: movss 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f32 v0+50 ; bin: f3 0f 10 69 32
[-,%xmm5] v110 = load.f32 v0+50 ; bin: heap_oob f3 0f 10 69 32
; asm: movss -50(%esi), %xmm2
[-,%xmm2] v111 = load.f32 v1-50 ; bin: f3 0f 10 56 ce
[-,%xmm2] v111 = load.f32 v1-50 ; bin: heap_oob f3 0f 10 56 ce
; asm: movss 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f32 v0+10000 ; bin: f3 0f 10 a9 00002710
[-,%xmm5] v120 = load.f32 v0+10000 ; bin: heap_oob f3 0f 10 a9 00002710
; asm: movss -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f32 v1-10000 ; bin: f3 0f 10 96 ffffd8f0
[-,%xmm2] v121 = load.f32 v1-10000 ; bin: heap_oob f3 0f 10 96 ffffd8f0
; asm: movss %xmm5, (%ecx)
[-] store.f32 v100, v0 ; bin: f3 0f 11 29
[-] store.f32 v100, v0 ; bin: heap_oob f3 0f 11 29
; asm: movss %xmm2, (%esi)
[-] store.f32 v101, v1 ; bin: f3 0f 11 16
[-] store.f32 v101, v1 ; bin: heap_oob f3 0f 11 16
; asm: movss %xmm5, 50(%ecx)
[-] store.f32 v100, v0+50 ; bin: f3 0f 11 69 32
[-] store.f32 v100, v0+50 ; bin: heap_oob f3 0f 11 69 32
; asm: movss %xmm2, -50(%esi)
[-] store.f32 v101, v1-50 ; bin: f3 0f 11 56 ce
[-] store.f32 v101, v1-50 ; bin: heap_oob f3 0f 11 56 ce
; asm: movss %xmm5, 10000(%ecx)
[-] store.f32 v100, v0+10000 ; bin: f3 0f 11 a9 00002710
[-] store.f32 v100, v0+10000 ; bin: heap_oob f3 0f 11 a9 00002710
; asm: movss %xmm2, -10000(%esi)
[-] store.f32 v101, v1-10000 ; bin: f3 0f 11 96 ffffd8f0
[-] store.f32 v101, v1-10000 ; bin: heap_oob f3 0f 11 96 ffffd8f0
; Spill / Fill.
@@ -363,30 +363,30 @@ ebb0:
; Load/Store
; asm: movsd (%ecx), %xmm5
[-,%xmm5] v100 = load.f64 v0 ; bin: f2 0f 10 29
[-,%xmm5] v100 = load.f64 v0 ; bin: heap_oob f2 0f 10 29
; asm: movsd (%esi), %xmm2
[-,%xmm2] v101 = load.f64 v1 ; bin: f2 0f 10 16
[-,%xmm2] v101 = load.f64 v1 ; bin: heap_oob f2 0f 10 16
; asm: movsd 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f64 v0+50 ; bin: f2 0f 10 69 32
[-,%xmm5] v110 = load.f64 v0+50 ; bin: heap_oob f2 0f 10 69 32
; asm: movsd -50(%esi), %xmm2
[-,%xmm2] v111 = load.f64 v1-50 ; bin: f2 0f 10 56 ce
[-,%xmm2] v111 = load.f64 v1-50 ; bin: heap_oob f2 0f 10 56 ce
; asm: movsd 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f64 v0+10000 ; bin: f2 0f 10 a9 00002710
[-,%xmm5] v120 = load.f64 v0+10000 ; bin: heap_oob f2 0f 10 a9 00002710
; asm: movsd -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f64 v1-10000 ; bin: f2 0f 10 96 ffffd8f0
[-,%xmm2] v121 = load.f64 v1-10000 ; bin: heap_oob f2 0f 10 96 ffffd8f0
; asm: movsd %xmm5, (%ecx)
[-] store.f64 v100, v0 ; bin: f2 0f 11 29
[-] store.f64 v100, v0 ; bin: heap_oob f2 0f 11 29
; asm: movsd %xmm2, (%esi)
[-] store.f64 v101, v1 ; bin: f2 0f 11 16
[-] store.f64 v101, v1 ; bin: heap_oob f2 0f 11 16
; asm: movsd %xmm5, 50(%ecx)
[-] store.f64 v100, v0+50 ; bin: f2 0f 11 69 32
[-] store.f64 v100, v0+50 ; bin: heap_oob f2 0f 11 69 32
; asm: movsd %xmm2, -50(%esi)
[-] store.f64 v101, v1-50 ; bin: f2 0f 11 56 ce
[-] store.f64 v101, v1-50 ; bin: heap_oob f2 0f 11 56 ce
; asm: movsd %xmm5, 10000(%ecx)
[-] store.f64 v100, v0+10000 ; bin: f2 0f 11 a9 00002710
[-] store.f64 v100, v0+10000 ; bin: heap_oob f2 0f 11 a9 00002710
; asm: movsd %xmm2, -10000(%esi)
[-] store.f64 v101, v1-10000 ; bin: f2 0f 11 96 ffffd8f0
[-] store.f64 v101, v1-10000 ; bin: heap_oob f2 0f 11 96 ffffd8f0
; Spill / Fill.
@@ -471,21 +471,21 @@ ebb1:
brff ule v1, ebb1 ; bin: 76 f0
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 0f 0b
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 0f 0b
trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 0f 0b
trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 0f 0b
trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 0f 0b
trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 0f 0b
trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 0f 0b
trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 0f 0b
trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3

View File

@@ -128,13 +128,13 @@ ebb0:
; asm: movl $2, %edx
[-,%rdx] v53 = iconst.i32 2 ; bin: ba 00000002
; asm: idivl %ecx
[-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: f7 f9
[-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: int_divz f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: f7 fe
[-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: int_divz f7 fe
; asm: divl %ecx
[-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: f7 f1
[-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: int_divz f7 f1
; asm: divl %esi
[-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: f7 f6
[-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: int_divz f7 f6
; Register copies.
@@ -155,105 +155,105 @@ ebb0:
; Register indirect addressing with no displacement.
; asm: movl %ecx, (%esi)
store v1, v2 ; bin: 89 0e
store v1, v2 ; bin: heap_oob 89 0e
; asm: movl %esi, (%ecx)
store v2, v1 ; bin: 89 31
store v2, v1 ; bin: heap_oob 89 31
; asm: movw %cx, (%esi)
istore16 v1, v2 ; bin: 66 89 0e
istore16 v1, v2 ; bin: heap_oob 66 89 0e
; asm: movw %si, (%ecx)
istore16 v2, v1 ; bin: 66 89 31
istore16 v2, v1 ; bin: heap_oob 66 89 31
; asm: movb %cl, (%esi)
istore8 v1, v2 ; bin: 88 0e
istore8 v1, v2 ; bin: heap_oob 88 0e
; Can't store %sil in 32-bit mode (needs REX prefix).
; asm: movl (%ecx), %edi
[-,%rdi] v100 = load.i32 v1 ; bin: 8b 39
[-,%rdi] v100 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%esi), %edx
[-,%rdx] v101 = load.i32 v2 ; bin: 8b 16
[-,%rdx] v101 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%ecx), %edi
[-,%rdi] v102 = uload16.i32 v1 ; bin: 0f b7 39
[-,%rdi] v102 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%esi), %edx
[-,%rdx] v103 = uload16.i32 v2 ; bin: 0f b7 16
[-,%rdx] v103 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%ecx), %edi
[-,%rdi] v104 = sload16.i32 v1 ; bin: 0f bf 39
[-,%rdi] v104 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%esi), %edx
[-,%rdx] v105 = sload16.i32 v2 ; bin: 0f bf 16
[-,%rdx] v105 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%ecx), %edi
[-,%rdi] v106 = uload8.i32 v1 ; bin: 0f b6 39
[-,%rdi] v106 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%esi), %edx
[-,%rdx] v107 = uload8.i32 v2 ; bin: 0f b6 16
[-,%rdx] v107 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%ecx), %edi
[-,%rdi] v108 = sload8.i32 v1 ; bin: 0f be 39
[-,%rdi] v108 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%esi), %edx
[-,%rdx] v109 = sload8.i32 v2 ; bin: 0f be 16
[-,%rdx] v109 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl %ecx, 100(%esi)
store v1, v2+100 ; bin: 89 4e 64
store v1, v2+100 ; bin: heap_oob 89 4e 64
; asm: movl %esi, -100(%ecx)
store v2, v1-100 ; bin: 89 71 9c
store v2, v1-100 ; bin: heap_oob 89 71 9c
; asm: movw %cx, 100(%esi)
istore16 v1, v2+100 ; bin: 66 89 4e 64
istore16 v1, v2+100 ; bin: heap_oob 66 89 4e 64
; asm: movw %si, -100(%ecx)
istore16 v2, v1-100 ; bin: 66 89 71 9c
istore16 v2, v1-100 ; bin: heap_oob 66 89 71 9c
; asm: movb %cl, 100(%esi)
istore8 v1, v2+100 ; bin: 88 4e 64
istore8 v1, v2+100 ; bin: heap_oob 88 4e 64
; asm: movl 50(%ecx), %edi
[-,%rdi] v110 = load.i32 v1+50 ; bin: 8b 79 32
[-,%rdi] v110 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%esi), %edx
[-,%rdx] v111 = load.i32 v2-50 ; bin: 8b 56 ce
[-,%rdx] v111 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%ecx), %edi
[-,%rdi] v112 = uload16.i32 v1+50 ; bin: 0f b7 79 32
[-,%rdi] v112 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%esi), %edx
[-,%rdx] v113 = uload16.i32 v2-50 ; bin: 0f b7 56 ce
[-,%rdx] v113 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%ecx), %edi
[-,%rdi] v114 = sload16.i32 v1+50 ; bin: 0f bf 79 32
[-,%rdi] v114 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%esi), %edx
[-,%rdx] v115 = sload16.i32 v2-50 ; bin: 0f bf 56 ce
[-,%rdx] v115 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%ecx), %edi
[-,%rdi] v116 = uload8.i32 v1+50 ; bin: 0f b6 79 32
[-,%rdi] v116 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%esi), %edx
[-,%rdx] v117 = uload8.i32 v2-50 ; bin: 0f b6 56 ce
[-,%rdx] v117 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%ecx), %edi
[-,%rdi] v118 = sload8.i32 v1+50 ; bin: 0f be 79 32
[-,%rdi] v118 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%esi), %edx
[-,%rdx] v119 = sload8.i32 v2-50 ; bin: 0f be 56 ce
[-,%rdx] v119 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl %ecx, 10000(%esi)
store v1, v2+10000 ; bin: 89 8e 00002710
store v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%ecx)
store v2, v1-10000 ; bin: 89 b1 ffffd8f0
store v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%esi)
istore16 v1, v2+10000 ; bin: 66 89 8e 00002710
istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%ecx)
istore16 v2, v1-10000 ; bin: 66 89 b1 ffffd8f0
istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%esi)
istore8 v1, v2+10000 ; bin: 88 8e 00002710
istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movl 50000(%ecx), %edi
[-,%rdi] v120 = load.i32 v1+50000 ; bin: 8b b9 0000c350
[-,%rdi] v120 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%esi), %edx
[-,%rdx] v121 = load.i32 v2-50000 ; bin: 8b 96 ffff3cb0
[-,%rdx] v121 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%ecx), %edi
[-,%rdi] v122 = uload16.i32 v1+50000 ; bin: 0f b7 b9 0000c350
[-,%rdi] v122 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%esi), %edx
[-,%rdx] v123 = uload16.i32 v2-50000 ; bin: 0f b7 96 ffff3cb0
[-,%rdx] v123 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%ecx), %edi
[-,%rdi] v124 = sload16.i32 v1+50000 ; bin: 0f bf b9 0000c350
[-,%rdi] v124 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%esi), %edx
[-,%rdx] v125 = sload16.i32 v2-50000 ; bin: 0f bf 96 ffff3cb0
[-,%rdx] v125 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%ecx), %edi
[-,%rdi] v126 = uload8.i32 v1+50000 ; bin: 0f b6 b9 0000c350
[-,%rdi] v126 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%esi), %edx
[-,%rdx] v127 = uload8.i32 v2-50000 ; bin: 0f b6 96 ffff3cb0
[-,%rdx] v127 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%ecx), %edi
[-,%rdi] v128 = sload8.i32 v1+50000 ; bin: 0f be b9 0000c350
[-,%rdi] v128 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%esi), %edx
[-,%rdx] v129 = sload8.i32 v2-50000 ; bin: 0f be 96 ffff3cb0
[-,%rdx] v129 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Bit-counting instructions.
@@ -437,7 +437,7 @@ ebb1:
; asm: ebb2:
ebb2:
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Special branch encodings only for I32 mode.
@@ -524,25 +524,25 @@ ebb1:
; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 0f 0b
trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 0f 0b
trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 0f 0b
trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 0f 0b
trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 0f 0b
trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 0f 0b
trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 0f 0b
trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 0f 0b
trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 0f 0b
trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 0f 0b
trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; Stack check.
; asm: cmpl %esp, %ecx
@@ -576,7 +576,7 @@ ebb0:
; asm: movzbl %cl, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b6 f1
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Tests for i32/i16 conversion instructions.
@@ -592,5 +592,5 @@ ebb0:
; asm: movzwl %cx, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b7 f1
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}

View File

@@ -158,34 +158,34 @@ ebb0:
; Load/Store
; asm: movss (%r14), %xmm5
[-,%xmm5] v100 = load.f32 v3 ; bin: f3 41 0f 10 2e
[-,%xmm5] v100 = load.f32 v3 ; bin: heap_oob f3 41 0f 10 2e
; asm: movss (%rax), %xmm10
[-,%xmm10] v101 = load.f32 v2 ; bin: f3 44 0f 10 10
[-,%xmm10] v101 = load.f32 v2 ; bin: heap_oob f3 44 0f 10 10
; asm: movss 50(%r14), %xmm5
[-,%xmm5] v110 = load.f32 v3+50 ; bin: f3 41 0f 10 6e 32
[-,%xmm5] v110 = load.f32 v3+50 ; bin: heap_oob f3 41 0f 10 6e 32
; asm: movss -50(%rax), %xmm10
[-,%xmm10] v111 = load.f32 v2-50 ; bin: f3 44 0f 10 50 ce
[-,%xmm10] v111 = load.f32 v2-50 ; bin: heap_oob f3 44 0f 10 50 ce
; asm: movss 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f32 v3+10000 ; bin: f3 41 0f 10 ae 00002710
[-,%xmm5] v120 = load.f32 v3+10000 ; bin: heap_oob f3 41 0f 10 ae 00002710
; asm: movss -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f32 v2-10000 ; bin: f3 44 0f 10 90 ffffd8f0
[-,%xmm10] v121 = load.f32 v2-10000 ; bin: heap_oob f3 44 0f 10 90 ffffd8f0
; asm: movss %xmm5, (%r14)
[-] store.f32 v100, v3 ; bin: f3 41 0f 11 2e
[-] store.f32 v100, v3 ; bin: heap_oob f3 41 0f 11 2e
; asm: movss %xmm10, (%rax)
[-] store.f32 v101, v2 ; bin: f3 44 0f 11 10
[-] store.f32 v101, v2 ; bin: heap_oob f3 44 0f 11 10
; asm: movss %xmm5, (%r13)
[-] store.f32 v100, v4 ; bin: f3 41 0f 11 6d 00
[-] store.f32 v100, v4 ; bin: heap_oob f3 41 0f 11 6d 00
; asm: movss %xmm10, (%r13)
[-] store.f32 v101, v4 ; bin: f3 45 0f 11 55 00
[-] store.f32 v101, v4 ; bin: heap_oob f3 45 0f 11 55 00
; asm: movss %xmm5, 50(%r14)
[-] store.f32 v100, v3+50 ; bin: f3 41 0f 11 6e 32
[-] store.f32 v100, v3+50 ; bin: heap_oob f3 41 0f 11 6e 32
; asm: movss %xmm10, -50(%rax)
[-] store.f32 v101, v2-50 ; bin: f3 44 0f 11 50 ce
[-] store.f32 v101, v2-50 ; bin: heap_oob f3 44 0f 11 50 ce
; asm: movss %xmm5, 10000(%r14)
[-] store.f32 v100, v3+10000 ; bin: f3 41 0f 11 ae 00002710
[-] store.f32 v100, v3+10000 ; bin: heap_oob f3 41 0f 11 ae 00002710
; asm: movss %xmm10, -10000(%rax)
[-] store.f32 v101, v2-10000 ; bin: f3 44 0f 11 90 ffffd8f0
[-] store.f32 v101, v2-10000 ; bin: heap_oob f3 44 0f 11 90 ffffd8f0
; Spill / Fill.
@@ -393,34 +393,34 @@ ebb0:
; Load/Store
; asm: movsd (%r14), %xmm5
[-,%xmm5] v100 = load.f64 v3 ; bin: f2 41 0f 10 2e
[-,%xmm5] v100 = load.f64 v3 ; bin: heap_oob f2 41 0f 10 2e
; asm: movsd (%rax), %xmm10
[-,%xmm10] v101 = load.f64 v2 ; bin: f2 44 0f 10 10
[-,%xmm10] v101 = load.f64 v2 ; bin: heap_oob f2 44 0f 10 10
; asm: movsd 50(%r14), %xmm5
[-,%xmm5] v110 = load.f64 v3+50 ; bin: f2 41 0f 10 6e 32
[-,%xmm5] v110 = load.f64 v3+50 ; bin: heap_oob f2 41 0f 10 6e 32
; asm: movsd -50(%rax), %xmm10
[-,%xmm10] v111 = load.f64 v2-50 ; bin: f2 44 0f 10 50 ce
[-,%xmm10] v111 = load.f64 v2-50 ; bin: heap_oob f2 44 0f 10 50 ce
; asm: movsd 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f64 v3+10000 ; bin: f2 41 0f 10 ae 00002710
[-,%xmm5] v120 = load.f64 v3+10000 ; bin: heap_oob f2 41 0f 10 ae 00002710
; asm: movsd -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f64 v2-10000 ; bin: f2 44 0f 10 90 ffffd8f0
[-,%xmm10] v121 = load.f64 v2-10000 ; bin: heap_oob f2 44 0f 10 90 ffffd8f0
; asm: movsd %xmm5, (%r14)
[-] store.f64 v100, v3 ; bin: f2 41 0f 11 2e
[-] store.f64 v100, v3 ; bin: heap_oob f2 41 0f 11 2e
; asm: movsd %xmm10, (%rax)
[-] store.f64 v101, v2 ; bin: f2 44 0f 11 10
[-] store.f64 v101, v2 ; bin: heap_oob f2 44 0f 11 10
; asm: movsd %xmm5, (%r13)
[-] store.f64 v100, v4 ; bin: f2 41 0f 11 6d 00
[-] store.f64 v100, v4 ; bin: heap_oob f2 41 0f 11 6d 00
; asm: movsd %xmm10, (%r13)
[-] store.f64 v101, v4 ; bin: f2 45 0f 11 55 00
[-] store.f64 v101, v4 ; bin: heap_oob f2 45 0f 11 55 00
; asm: movsd %xmm5, 50(%r14)
[-] store.f64 v100, v3+50 ; bin: f2 41 0f 11 6e 32
[-] store.f64 v100, v3+50 ; bin: heap_oob f2 41 0f 11 6e 32
; asm: movsd %xmm10, -50(%rax)
[-] store.f64 v101, v2-50 ; bin: f2 44 0f 11 50 ce
[-] store.f64 v101, v2-50 ; bin: heap_oob f2 44 0f 11 50 ce
; asm: movsd %xmm5, 10000(%r14)
[-] store.f64 v100, v3+10000 ; bin: f2 41 0f 11 ae 00002710
[-] store.f64 v100, v3+10000 ; bin: heap_oob f2 41 0f 11 ae 00002710
; asm: movsd %xmm10, -10000(%rax)
[-] store.f64 v101, v2-10000 ; bin: f2 44 0f 11 90 ffffd8f0
[-] store.f64 v101, v2-10000 ; bin: heap_oob f2 44 0f 11 90 ffffd8f0
; Spill / Fill.
@@ -505,21 +505,21 @@ ebb1:
brff ule v1, ebb1 ; bin: 76 f0
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 0f 0b
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 0f 0b
trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 0f 0b
trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 0f 0b
trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 0f 0b
trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 0f 0b
trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 0f 0b
trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 0f 0b
trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3

View File

@@ -175,146 +175,146 @@ ebb0:
; Register indirect addressing with no displacement.
; asm: movq %rcx, (%r10)
store v1, v3 ; bin: 49 89 0a
store v1, v3 ; bin: heap_oob 49 89 0a
; asm: movq %r10, (%rcx)
store v3, v1 ; bin: 4c 89 11
store v3, v1 ; bin: heap_oob 4c 89 11
; asm: movl %ecx, (%r10)
istore32 v1, v3 ; bin: 41 89 0a
istore32 v1, v3 ; bin: heap_oob 41 89 0a
; asm: movl %r10d, (%rcx)
istore32 v3, v1 ; bin: 44 89 11
istore32 v3, v1 ; bin: heap_oob 44 89 11
; asm: movw %cx, (%r10)
istore16 v1, v3 ; bin: 66 41 89 0a
istore16 v1, v3 ; bin: heap_oob 66 41 89 0a
; asm: movw %r10w, (%rcx)
istore16 v3, v1 ; bin: 66 44 89 11
istore16 v3, v1 ; bin: heap_oob 66 44 89 11
; asm: movb %cl, (%r10)
istore8 v1, v3 ; bin: 41 88 0a
istore8 v1, v3 ; bin: heap_oob 41 88 0a
; asm: movb %r10b, (%rcx)
istore8 v3, v1 ; bin: 44 88 11
istore8 v3, v1 ; bin: heap_oob 44 88 11
; asm: movq (%rcx), %r14
[-,%r14] v120 = load.i64 v1 ; bin: 4c 8b 31
[-,%r14] v120 = load.i64 v1 ; bin: heap_oob 4c 8b 31
; asm: movq (%r10), %rdx
[-,%rdx] v121 = load.i64 v3 ; bin: 49 8b 12
[-,%rdx] v121 = load.i64 v3 ; bin: heap_oob 49 8b 12
; asm: movl (%rcx), %r14d
[-,%r14] v122 = uload32.i64 v1 ; bin: 44 8b 31
[-,%r14] v122 = uload32.i64 v1 ; bin: heap_oob 44 8b 31
; asm: movl (%r10), %edx
[-,%rdx] v123 = uload32.i64 v3 ; bin: 41 8b 12
[-,%rdx] v123 = uload32.i64 v3 ; bin: heap_oob 41 8b 12
; asm: movslq (%rcx), %r14
[-,%r14] v124 = sload32.i64 v1 ; bin: 4c 63 31
[-,%r14] v124 = sload32.i64 v1 ; bin: heap_oob 4c 63 31
; asm: movslq (%r10), %rdx
[-,%rdx] v125 = sload32.i64 v3 ; bin: 49 63 12
[-,%rdx] v125 = sload32.i64 v3 ; bin: heap_oob 49 63 12
; asm: movzwq (%rcx), %r14
[-,%r14] v126 = uload16.i64 v1 ; bin: 4c 0f b7 31
[-,%r14] v126 = uload16.i64 v1 ; bin: heap_oob 4c 0f b7 31
; asm: movzwq (%r10), %rdx
[-,%rdx] v127 = uload16.i64 v3 ; bin: 49 0f b7 12
[-,%rdx] v127 = uload16.i64 v3 ; bin: heap_oob 49 0f b7 12
; asm: movswq (%rcx), %r14
[-,%r14] v128 = sload16.i64 v1 ; bin: 4c 0f bf 31
[-,%r14] v128 = sload16.i64 v1 ; bin: heap_oob 4c 0f bf 31
; asm: movswq (%r10), %rdx
[-,%rdx] v129 = sload16.i64 v3 ; bin: 49 0f bf 12
[-,%rdx] v129 = sload16.i64 v3 ; bin: heap_oob 49 0f bf 12
; asm: movzbq (%rcx), %r14
[-,%r14] v130 = uload8.i64 v1 ; bin: 4c 0f b6 31
[-,%r14] v130 = uload8.i64 v1 ; bin: heap_oob 4c 0f b6 31
; asm: movzbq (%r10), %rdx
[-,%rdx] v131 = uload8.i64 v3 ; bin: 49 0f b6 12
[-,%rdx] v131 = uload8.i64 v3 ; bin: heap_oob 49 0f b6 12
; asm: movsbq (%rcx), %r14
[-,%r14] v132 = sload8.i64 v1 ; bin: 4c 0f be 31
[-,%r14] v132 = sload8.i64 v1 ; bin: heap_oob 4c 0f be 31
; asm: movsbq (%r10), %rdx
[-,%rdx] v133 = sload8.i64 v3 ; bin: 49 0f be 12
[-,%rdx] v133 = sload8.i64 v3 ; bin: heap_oob 49 0f be 12
; Register-indirect with 8-bit signed displacement.
; asm: movq %rcx, 100(%r10)
store v1, v3+100 ; bin: 49 89 4a 64
store v1, v3+100 ; bin: heap_oob 49 89 4a 64
; asm: movq %r10, -100(%rcx)
store v3, v1-100 ; bin: 4c 89 51 9c
store v3, v1-100 ; bin: heap_oob 4c 89 51 9c
; asm: movl %ecx, 100(%r10)
istore32 v1, v3+100 ; bin: 41 89 4a 64
istore32 v1, v3+100 ; bin: heap_oob 41 89 4a 64
; asm: movl %r10d, -100(%rcx)
istore32 v3, v1-100 ; bin: 44 89 51 9c
istore32 v3, v1-100 ; bin: heap_oob 44 89 51 9c
; asm: movw %cx, 100(%r10)
istore16 v1, v3+100 ; bin: 66 41 89 4a 64
istore16 v1, v3+100 ; bin: heap_oob 66 41 89 4a 64
; asm: movw %r10w, -100(%rcx)
istore16 v3, v1-100 ; bin: 66 44 89 51 9c
istore16 v3, v1-100 ; bin: heap_oob 66 44 89 51 9c
; asm: movb %cl, 100(%r10)
istore8 v1, v3+100 ; bin: 41 88 4a 64
istore8 v1, v3+100 ; bin: heap_oob 41 88 4a 64
; asm: movb %r10b, 100(%rcx)
istore8 v3, v1+100 ; bin: 44 88 51 64
istore8 v3, v1+100 ; bin: heap_oob 44 88 51 64
; asm: movq 50(%rcx), %r10
[-,%r10] v140 = load.i64 v1+50 ; bin: 4c 8b 51 32
[-,%r10] v140 = load.i64 v1+50 ; bin: heap_oob 4c 8b 51 32
; asm: movq -50(%r10), %rdx
[-,%rdx] v141 = load.i64 v3-50 ; bin: 49 8b 52 ce
[-,%rdx] v141 = load.i64 v3-50 ; bin: heap_oob 49 8b 52 ce
; asm: movl 50(%rcx), %edi
[-,%rdi] v142 = uload32.i64 v1+50 ; bin: 8b 79 32
[-,%rdi] v142 = uload32.i64 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v143 = uload32.i64 v2-50 ; bin: 8b 56 ce
[-,%rdx] v143 = uload32.i64 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movslq 50(%rcx), %rdi
[-,%rdi] v144 = sload32.i64 v1+50 ; bin: 48 63 79 32
[-,%rdi] v144 = sload32.i64 v1+50 ; bin: heap_oob 48 63 79 32
; asm: movslq -50(%rsi), %rdx
[-,%rdx] v145 = sload32.i64 v2-50 ; bin: 48 63 56 ce
[-,%rdx] v145 = sload32.i64 v2-50 ; bin: heap_oob 48 63 56 ce
; asm: movzwq 50(%rcx), %rdi
[-,%rdi] v146 = uload16.i64 v1+50 ; bin: 48 0f b7 79 32
[-,%rdi] v146 = uload16.i64 v1+50 ; bin: heap_oob 48 0f b7 79 32
; asm: movzwq -50(%rsi), %rdx
[-,%rdx] v147 = uload16.i64 v2-50 ; bin: 48 0f b7 56 ce
[-,%rdx] v147 = uload16.i64 v2-50 ; bin: heap_oob 48 0f b7 56 ce
; asm: movswq 50(%rcx), %rdi
[-,%rdi] v148 = sload16.i64 v1+50 ; bin: 48 0f bf 79 32
[-,%rdi] v148 = sload16.i64 v1+50 ; bin: heap_oob 48 0f bf 79 32
; asm: movswq -50(%rsi), %rdx
[-,%rdx] v149 = sload16.i64 v2-50 ; bin: 48 0f bf 56 ce
[-,%rdx] v149 = sload16.i64 v2-50 ; bin: heap_oob 48 0f bf 56 ce
; asm: movzbq 50(%rcx), %rdi
[-,%rdi] v150 = uload8.i64 v1+50 ; bin: 48 0f b6 79 32
[-,%rdi] v150 = uload8.i64 v1+50 ; bin: heap_oob 48 0f b6 79 32
; asm: movzbq -50(%rsi), %rdx
[-,%rdx] v151 = uload8.i64 v2-50 ; bin: 48 0f b6 56 ce
[-,%rdx] v151 = uload8.i64 v2-50 ; bin: heap_oob 48 0f b6 56 ce
; asm: movsbq 50(%rcx), %rdi
[-,%rdi] v152 = sload8.i64 v1+50 ; bin: 48 0f be 79 32
[-,%rdi] v152 = sload8.i64 v1+50 ; bin: heap_oob 48 0f be 79 32
; asm: movsbq -50(%rsi), %rdx
[-,%rdx] v153 = sload8.i64 v2-50 ; bin: 48 0f be 56 ce
[-,%rdx] v153 = sload8.i64 v2-50 ; bin: heap_oob 48 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movq %rcx, 10000(%r10)
store v1, v3+10000 ; bin: 49 89 8a 00002710
store v1, v3+10000 ; bin: heap_oob 49 89 8a 00002710
; asm: movq %r10, -10000(%rcx)
store v3, v1-10000 ; bin: 4c 89 91 ffffd8f0
store v3, v1-10000 ; bin: heap_oob 4c 89 91 ffffd8f0
; asm: movl %ecx, 10000(%rsi)
istore32 v1, v2+10000 ; bin: 89 8e 00002710
istore32 v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%rcx)
istore32 v2, v1-10000 ; bin: 89 b1 ffffd8f0
istore32 v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%rsi)
istore16 v1, v2+10000 ; bin: 66 89 8e 00002710
istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%rcx)
istore16 v2, v1-10000 ; bin: 66 89 b1 ffffd8f0
istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%rsi)
istore8 v1, v2+10000 ; bin: 88 8e 00002710
istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movb %sil, 10000(%rcx)
istore8 v2, v1+10000 ; bin: 40 88 b1 00002710
istore8 v2, v1+10000 ; bin: heap_oob 40 88 b1 00002710
; asm: movq 50000(%rcx), %r10
[-,%r10] v160 = load.i64 v1+50000 ; bin: 4c 8b 91 0000c350
[-,%r10] v160 = load.i64 v1+50000 ; bin: heap_oob 4c 8b 91 0000c350
; asm: movq -50000(%r10), %rdx
[-,%rdx] v161 = load.i64 v3-50000 ; bin: 49 8b 92 ffff3cb0
[-,%rdx] v161 = load.i64 v3-50000 ; bin: heap_oob 49 8b 92 ffff3cb0
; asm: movl 50000(%rcx), %edi
[-,%rdi] v162 = uload32.i64 v1+50000 ; bin: 8b b9 0000c350
[-,%rdi] v162 = uload32.i64 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v163 = uload32.i64 v2-50000 ; bin: 8b 96 ffff3cb0
[-,%rdx] v163 = uload32.i64 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movslq 50000(%rcx), %rdi
[-,%rdi] v164 = sload32.i64 v1+50000 ; bin: 48 63 b9 0000c350
[-,%rdi] v164 = sload32.i64 v1+50000 ; bin: heap_oob 48 63 b9 0000c350
; asm: movslq -50000(%rsi), %rdx
[-,%rdx] v165 = sload32.i64 v2-50000 ; bin: 48 63 96 ffff3cb0
[-,%rdx] v165 = sload32.i64 v2-50000 ; bin: heap_oob 48 63 96 ffff3cb0
; asm: movzwq 50000(%rcx), %rdi
[-,%rdi] v166 = uload16.i64 v1+50000 ; bin: 48 0f b7 b9 0000c350
[-,%rdi] v166 = uload16.i64 v1+50000 ; bin: heap_oob 48 0f b7 b9 0000c350
; asm: movzwq -50000(%rsi), %rdx
[-,%rdx] v167 = uload16.i64 v2-50000 ; bin: 48 0f b7 96 ffff3cb0
[-,%rdx] v167 = uload16.i64 v2-50000 ; bin: heap_oob 48 0f b7 96 ffff3cb0
; asm: movswq 50000(%rcx), %rdi
[-,%rdi] v168 = sload16.i64 v1+50000 ; bin: 48 0f bf b9 0000c350
[-,%rdi] v168 = sload16.i64 v1+50000 ; bin: heap_oob 48 0f bf b9 0000c350
; asm: movswq -50000(%rsi), %rdx
[-,%rdx] v169 = sload16.i64 v2-50000 ; bin: 48 0f bf 96 ffff3cb0
[-,%rdx] v169 = sload16.i64 v2-50000 ; bin: heap_oob 48 0f bf 96 ffff3cb0
; asm: movzbq 50000(%rcx), %rdi
[-,%rdi] v170 = uload8.i64 v1+50000 ; bin: 48 0f b6 b9 0000c350
[-,%rdi] v170 = uload8.i64 v1+50000 ; bin: heap_oob 48 0f b6 b9 0000c350
; asm: movzbq -50000(%rsi), %rdx
[-,%rdx] v171 = uload8.i64 v2-50000 ; bin: 48 0f b6 96 ffff3cb0
[-,%rdx] v171 = uload8.i64 v2-50000 ; bin: heap_oob 48 0f b6 96 ffff3cb0
; asm: movsbq 50000(%rcx), %rdi
[-,%rdi] v172 = sload8.i64 v1+50000 ; bin: 48 0f be b9 0000c350
[-,%rdi] v172 = sload8.i64 v1+50000 ; bin: heap_oob 48 0f be b9 0000c350
; asm: movsbq -50000(%rsi), %rdx
[-,%rdx] v173 = sload8.i64 v2-50000 ; bin: 48 0f be 96 ffff3cb0
[-,%rdx] v173 = sload8.i64 v2-50000 ; bin: heap_oob 48 0f be 96 ffff3cb0
; More arithmetic.
@@ -329,17 +329,17 @@ ebb0:
[-,%rax] v190 = iconst.i64 1
[-,%rdx] v191 = iconst.i64 2
; asm: idivq %rcx
[-,%rax,%rdx] v192, v193 = x86_sdivmodx v190, v191, v1 ; bin: 48 f7 f9
[-,%rax,%rdx] v192, v193 = x86_sdivmodx v190, v191, v1 ; bin: int_divz 48 f7 f9
; asm: idivq %rsi
[-,%rax,%rdx] v194, v195 = x86_sdivmodx v190, v191, v2 ; bin: 48 f7 fe
[-,%rax,%rdx] v194, v195 = x86_sdivmodx v190, v191, v2 ; bin: int_divz 48 f7 fe
; asm: idivq %r10
[-,%rax,%rdx] v196, v197 = x86_sdivmodx v190, v191, v3 ; bin: 49 f7 fa
[-,%rax,%rdx] v196, v197 = x86_sdivmodx v190, v191, v3 ; bin: int_divz 49 f7 fa
; asm: divq %rcx
[-,%rax,%rdx] v198, v199 = x86_udivmodx v190, v191, v1 ; bin: 48 f7 f1
[-,%rax,%rdx] v198, v199 = x86_udivmodx v190, v191, v1 ; bin: int_divz 48 f7 f1
; asm: divq %rsi
[-,%rax,%rdx] v200, v201 = x86_udivmodx v190, v191, v2 ; bin: 48 f7 f6
[-,%rax,%rdx] v200, v201 = x86_udivmodx v190, v191, v2 ; bin: int_divz 48 f7 f6
; asm: divq %r10
[-,%rax,%rdx] v202, v203 = x86_udivmodx v190, v191, v3 ; bin: 49 f7 f2
[-,%rax,%rdx] v202, v203 = x86_udivmodx v190, v191, v3 ; bin: int_divz 49 f7 f2
; double-length multiply instructions, 64 bit
[-,%rax] v1001 = iconst.i64 1
@@ -637,25 +637,25 @@ ebb1:
; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 0f 0b
trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 0f 0b
trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 0f 0b
trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 0f 0b
trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 0f 0b
trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 0f 0b
trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 0f 0b
trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 0f 0b
trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 0f 0b
trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 0f 0b
trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; Stack check.
; asm: cmpq %rsp, %rcx
@@ -729,71 +729,71 @@ ebb0:
; Register indirect addressing with no displacement.
; asm: movl (%rcx), %edi
[-,%rdi] v10 = load.i32 v1 ; bin: 8b 39
[-,%rdi] v10 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%rsi), %edx
[-,%rdx] v11 = load.i32 v2 ; bin: 8b 16
[-,%rdx] v11 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%rcx), %edi
[-,%rdi] v12 = uload16.i32 v1 ; bin: 0f b7 39
[-,%rdi] v12 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%rsi), %edx
[-,%rdx] v13 = uload16.i32 v2 ; bin: 0f b7 16
[-,%rdx] v13 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%rcx), %edi
[-,%rdi] v14 = sload16.i32 v1 ; bin: 0f bf 39
[-,%rdi] v14 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%rsi), %edx
[-,%rdx] v15 = sload16.i32 v2 ; bin: 0f bf 16
[-,%rdx] v15 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%rcx), %edi
[-,%rdi] v16 = uload8.i32 v1 ; bin: 0f b6 39
[-,%rdi] v16 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%rsi), %edx
[-,%rdx] v17 = uload8.i32 v2 ; bin: 0f b6 16
[-,%rdx] v17 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%rcx), %edi
[-,%rdi] v18 = sload8.i32 v1 ; bin: 0f be 39
[-,%rdi] v18 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%rsi), %edx
[-,%rdx] v19 = sload8.i32 v2 ; bin: 0f be 16
[-,%rdx] v19 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl 50(%rcx), %edi
[-,%rdi] v20 = load.i32 v1+50 ; bin: 8b 79 32
[-,%rdi] v20 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx
[-,%rdx] v21 = load.i32 v2-50 ; bin: 8b 56 ce
[-,%rdx] v21 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%rcx), %edi
[-,%rdi] v22 = uload16.i32 v1+50 ; bin: 0f b7 79 32
[-,%rdi] v22 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%rsi), %edx
[-,%rdx] v23 = uload16.i32 v2-50 ; bin: 0f b7 56 ce
[-,%rdx] v23 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%rcx), %edi
[-,%rdi] v24 = sload16.i32 v1+50 ; bin: 0f bf 79 32
[-,%rdi] v24 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%rsi), %edx
[-,%rdx] v25 = sload16.i32 v2-50 ; bin: 0f bf 56 ce
[-,%rdx] v25 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%rcx), %edi
[-,%rdi] v26 = uload8.i32 v1+50 ; bin: 0f b6 79 32
[-,%rdi] v26 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%rsi), %edx
[-,%rdx] v27 = uload8.i32 v2-50 ; bin: 0f b6 56 ce
[-,%rdx] v27 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%rcx), %edi
[-,%rdi] v28 = sload8.i32 v1+50 ; bin: 0f be 79 32
[-,%rdi] v28 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%rsi), %edx
[-,%rdx] v29 = sload8.i32 v2-50 ; bin: 0f be 56 ce
[-,%rdx] v29 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl 50000(%rcx), %edi
[-,%rdi] v30 = load.i32 v1+50000 ; bin: 8b b9 0000c350
[-,%rdi] v30 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx
[-,%rdx] v31 = load.i32 v2-50000 ; bin: 8b 96 ffff3cb0
[-,%rdx] v31 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%rcx), %edi
[-,%rdi] v32 = uload16.i32 v1+50000 ; bin: 0f b7 b9 0000c350
[-,%rdi] v32 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%rsi), %edx
[-,%rdx] v33 = uload16.i32 v2-50000 ; bin: 0f b7 96 ffff3cb0
[-,%rdx] v33 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%rcx), %edi
[-,%rdi] v34 = sload16.i32 v1+50000 ; bin: 0f bf b9 0000c350
[-,%rdi] v34 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%rsi), %edx
[-,%rdx] v35 = sload16.i32 v2-50000 ; bin: 0f bf 96 ffff3cb0
[-,%rdx] v35 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%rcx), %edi
[-,%rdi] v36 = uload8.i32 v1+50000 ; bin: 0f b6 b9 0000c350
[-,%rdi] v36 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%rsi), %edx
[-,%rdx] v37 = uload8.i32 v2-50000 ; bin: 0f b6 96 ffff3cb0
[-,%rdx] v37 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%rcx), %edi
[-,%rdi] v38 = sload8.i32 v1+50000 ; bin: 0f be b9 0000c350
[-,%rdi] v38 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%rsi), %edx
[-,%rdx] v39 = sload8.i32 v2-50000 ; bin: 0f be 96 ffff3cb0
[-,%rdx] v39 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Integer Register-Register Operations.
@@ -924,17 +924,17 @@ ebb0:
[-,%rax] v160 = iconst.i32 1
[-,%rdx] v161 = iconst.i32 2
; asm: idivl %ecx
[-,%rax,%rdx] v162, v163 = x86_sdivmodx v160, v161, v1 ; bin: f7 f9
[-,%rax,%rdx] v162, v163 = x86_sdivmodx v160, v161, v1 ; bin: int_divz f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v164, v165 = x86_sdivmodx v160, v161, v2 ; bin: f7 fe
[-,%rax,%rdx] v164, v165 = x86_sdivmodx v160, v161, v2 ; bin: int_divz f7 fe
; asm: idivl %r10d
[-,%rax,%rdx] v166, v167 = x86_sdivmodx v160, v161, v3 ; bin: 41 f7 fa
[-,%rax,%rdx] v166, v167 = x86_sdivmodx v160, v161, v3 ; bin: int_divz 41 f7 fa
; asm: divl %ecx
[-,%rax,%rdx] v168, v169 = x86_udivmodx v160, v161, v1 ; bin: f7 f1
[-,%rax,%rdx] v168, v169 = x86_udivmodx v160, v161, v1 ; bin: int_divz f7 f1
; asm: divl %esi
[-,%rax,%rdx] v170, v171 = x86_udivmodx v160, v161, v2 ; bin: f7 f6
[-,%rax,%rdx] v170, v171 = x86_udivmodx v160, v161, v2 ; bin: int_divz f7 f6
; asm: divl %r10d
[-,%rax,%rdx] v172, v173 = x86_udivmodx v160, v161, v3 ; bin: 41 f7 f2
[-,%rax,%rdx] v172, v173 = x86_udivmodx v160, v161, v3 ; bin: int_divz 41 f7 f2
; Bit-counting instructions.
@@ -1144,7 +1144,7 @@ ebb0:
; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Tests for i32/i16 conversion instructions.
@@ -1172,7 +1172,7 @@ ebb0:
; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i8 conversion instructions.
@@ -1200,7 +1200,7 @@ ebb0:
; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i16 conversion instructions.
@@ -1228,7 +1228,7 @@ ebb0:
; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}
; Tests for i64/i32 conversion instructions.
@@ -1256,5 +1256,5 @@ ebb0:
; asm: movl %r10d, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 44 89 d1
trap user0 ; bin: 0f 0b
trap user0 ; bin: user0 0f 0b
}

View File

@@ -44,6 +44,18 @@ impl binemit::RelocSink for PrintRelocs {
}
}
struct PrintTraps {
flag_print: bool,
}
impl binemit::TrapSink for PrintTraps {
fn trap(&mut self, offset: binemit::CodeOffset, _srcloc: ir::SourceLoc, code: ir::TrapCode) {
if self.flag_print {
println!("trap: {} at {}", code, offset);
}
}
}
pub fn run(
files: Vec<String>,
flag_print: bool,
@@ -94,8 +106,9 @@ fn handle_module(
// Encode the result as machine code.
let mut mem = Vec::new();
let mut relocs = PrintRelocs { flag_print };
let mut traps = PrintTraps { flag_print };
mem.resize(size as usize, 0);
context.emit_to_memory(mem.as_mut_ptr(), &mut relocs, &*isa);
context.emit_to_memory(mem.as_mut_ptr(), &mut relocs, &mut traps, &*isa);
if flag_print {
print!(".byte ");