[WIP] Add a Trap sink to code generation (#279)

* First draft of TrapSink implementation.

* Add trap sink calls to 'trapif' and 'trapff' recipes.

* Add SourceLoc to trap sink calls, and add trap sink calls to all loads and stores.

* Add IntegerDivisionByZero trap to div recipe.

* Only emit load/store traps if 'notrap' flag is not set on the instruction.

* Update filetest machinery to add new trap sink functionality.

* Update filetests to include traps in output.

* Add a few more trap outputs to filetests.

* Add trap output to CLI tool.
This commit is contained in:
Tyler McMullen
2018-03-28 22:48:03 -07:00
committed by Dan Gohman
parent d566faa8fb
commit 951ff11f85
12 changed files with 358 additions and 263 deletions

View File

@@ -148,30 +148,30 @@ ebb0:
; Load/Store ; Load/Store
; asm: movss (%ecx), %xmm5 ; asm: movss (%ecx), %xmm5
[-,%xmm5] v100 = load.f32 v0 ; bin: f3 0f 10 29 [-,%xmm5] v100 = load.f32 v0 ; bin: heap_oob f3 0f 10 29
; asm: movss (%esi), %xmm2 ; asm: movss (%esi), %xmm2
[-,%xmm2] v101 = load.f32 v1 ; bin: f3 0f 10 16 [-,%xmm2] v101 = load.f32 v1 ; bin: heap_oob f3 0f 10 16
; asm: movss 50(%ecx), %xmm5 ; asm: movss 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f32 v0+50 ; bin: f3 0f 10 69 32 [-,%xmm5] v110 = load.f32 v0+50 ; bin: heap_oob f3 0f 10 69 32
; asm: movss -50(%esi), %xmm2 ; asm: movss -50(%esi), %xmm2
[-,%xmm2] v111 = load.f32 v1-50 ; bin: f3 0f 10 56 ce [-,%xmm2] v111 = load.f32 v1-50 ; bin: heap_oob f3 0f 10 56 ce
; asm: movss 10000(%ecx), %xmm5 ; asm: movss 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f32 v0+10000 ; bin: f3 0f 10 a9 00002710 [-,%xmm5] v120 = load.f32 v0+10000 ; bin: heap_oob f3 0f 10 a9 00002710
; asm: movss -10000(%esi), %xmm2 ; asm: movss -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f32 v1-10000 ; bin: f3 0f 10 96 ffffd8f0 [-,%xmm2] v121 = load.f32 v1-10000 ; bin: heap_oob f3 0f 10 96 ffffd8f0
; asm: movss %xmm5, (%ecx) ; asm: movss %xmm5, (%ecx)
[-] store.f32 v100, v0 ; bin: f3 0f 11 29 [-] store.f32 v100, v0 ; bin: heap_oob f3 0f 11 29
; asm: movss %xmm2, (%esi) ; asm: movss %xmm2, (%esi)
[-] store.f32 v101, v1 ; bin: f3 0f 11 16 [-] store.f32 v101, v1 ; bin: heap_oob f3 0f 11 16
; asm: movss %xmm5, 50(%ecx) ; asm: movss %xmm5, 50(%ecx)
[-] store.f32 v100, v0+50 ; bin: f3 0f 11 69 32 [-] store.f32 v100, v0+50 ; bin: heap_oob f3 0f 11 69 32
; asm: movss %xmm2, -50(%esi) ; asm: movss %xmm2, -50(%esi)
[-] store.f32 v101, v1-50 ; bin: f3 0f 11 56 ce [-] store.f32 v101, v1-50 ; bin: heap_oob f3 0f 11 56 ce
; asm: movss %xmm5, 10000(%ecx) ; asm: movss %xmm5, 10000(%ecx)
[-] store.f32 v100, v0+10000 ; bin: f3 0f 11 a9 00002710 [-] store.f32 v100, v0+10000 ; bin: heap_oob f3 0f 11 a9 00002710
; asm: movss %xmm2, -10000(%esi) ; asm: movss %xmm2, -10000(%esi)
[-] store.f32 v101, v1-10000 ; bin: f3 0f 11 96 ffffd8f0 [-] store.f32 v101, v1-10000 ; bin: heap_oob f3 0f 11 96 ffffd8f0
; Spill / Fill. ; Spill / Fill.
@@ -363,30 +363,30 @@ ebb0:
; Load/Store ; Load/Store
; asm: movsd (%ecx), %xmm5 ; asm: movsd (%ecx), %xmm5
[-,%xmm5] v100 = load.f64 v0 ; bin: f2 0f 10 29 [-,%xmm5] v100 = load.f64 v0 ; bin: heap_oob f2 0f 10 29
; asm: movsd (%esi), %xmm2 ; asm: movsd (%esi), %xmm2
[-,%xmm2] v101 = load.f64 v1 ; bin: f2 0f 10 16 [-,%xmm2] v101 = load.f64 v1 ; bin: heap_oob f2 0f 10 16
; asm: movsd 50(%ecx), %xmm5 ; asm: movsd 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f64 v0+50 ; bin: f2 0f 10 69 32 [-,%xmm5] v110 = load.f64 v0+50 ; bin: heap_oob f2 0f 10 69 32
; asm: movsd -50(%esi), %xmm2 ; asm: movsd -50(%esi), %xmm2
[-,%xmm2] v111 = load.f64 v1-50 ; bin: f2 0f 10 56 ce [-,%xmm2] v111 = load.f64 v1-50 ; bin: heap_oob f2 0f 10 56 ce
; asm: movsd 10000(%ecx), %xmm5 ; asm: movsd 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f64 v0+10000 ; bin: f2 0f 10 a9 00002710 [-,%xmm5] v120 = load.f64 v0+10000 ; bin: heap_oob f2 0f 10 a9 00002710
; asm: movsd -10000(%esi), %xmm2 ; asm: movsd -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f64 v1-10000 ; bin: f2 0f 10 96 ffffd8f0 [-,%xmm2] v121 = load.f64 v1-10000 ; bin: heap_oob f2 0f 10 96 ffffd8f0
; asm: movsd %xmm5, (%ecx) ; asm: movsd %xmm5, (%ecx)
[-] store.f64 v100, v0 ; bin: f2 0f 11 29 [-] store.f64 v100, v0 ; bin: heap_oob f2 0f 11 29
; asm: movsd %xmm2, (%esi) ; asm: movsd %xmm2, (%esi)
[-] store.f64 v101, v1 ; bin: f2 0f 11 16 [-] store.f64 v101, v1 ; bin: heap_oob f2 0f 11 16
; asm: movsd %xmm5, 50(%ecx) ; asm: movsd %xmm5, 50(%ecx)
[-] store.f64 v100, v0+50 ; bin: f2 0f 11 69 32 [-] store.f64 v100, v0+50 ; bin: heap_oob f2 0f 11 69 32
; asm: movsd %xmm2, -50(%esi) ; asm: movsd %xmm2, -50(%esi)
[-] store.f64 v101, v1-50 ; bin: f2 0f 11 56 ce [-] store.f64 v101, v1-50 ; bin: heap_oob f2 0f 11 56 ce
; asm: movsd %xmm5, 10000(%ecx) ; asm: movsd %xmm5, 10000(%ecx)
[-] store.f64 v100, v0+10000 ; bin: f2 0f 11 a9 00002710 [-] store.f64 v100, v0+10000 ; bin: heap_oob f2 0f 11 a9 00002710
; asm: movsd %xmm2, -10000(%esi) ; asm: movsd %xmm2, -10000(%esi)
[-] store.f64 v101, v1-10000 ; bin: f2 0f 11 96 ffffd8f0 [-] store.f64 v101, v1-10000 ; bin: heap_oob f2 0f 11 96 ffffd8f0
; Spill / Fill. ; Spill / Fill.
@@ -471,21 +471,21 @@ ebb1:
brff ule v1, ebb1 ; bin: 76 f0 brff ule v1, ebb1 ; bin: 76 f0
; asm: jp .+4; ud2 ; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 0f 0b trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2 ; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 0f 0b trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2 ; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 0f 0b trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2 ; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 0f 0b trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2 ; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 0f 0b trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2 ; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 0f 0b trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2 ; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 0f 0b trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2 ; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 0f 0b trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl ; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3 [-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3

View File

@@ -128,13 +128,13 @@ ebb0:
; asm: movl $2, %edx ; asm: movl $2, %edx
[-,%rdx] v53 = iconst.i32 2 ; bin: ba 00000002 [-,%rdx] v53 = iconst.i32 2 ; bin: ba 00000002
; asm: idivl %ecx ; asm: idivl %ecx
[-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: f7 f9 [-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: int_divz f7 f9
; asm: idivl %esi ; asm: idivl %esi
[-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: f7 fe [-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: int_divz f7 fe
; asm: divl %ecx ; asm: divl %ecx
[-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: f7 f1 [-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: int_divz f7 f1
; asm: divl %esi ; asm: divl %esi
[-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: f7 f6 [-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: int_divz f7 f6
; Register copies. ; Register copies.
@@ -155,105 +155,105 @@ ebb0:
; Register indirect addressing with no displacement. ; Register indirect addressing with no displacement.
; asm: movl %ecx, (%esi) ; asm: movl %ecx, (%esi)
store v1, v2 ; bin: 89 0e store v1, v2 ; bin: heap_oob 89 0e
; asm: movl %esi, (%ecx) ; asm: movl %esi, (%ecx)
store v2, v1 ; bin: 89 31 store v2, v1 ; bin: heap_oob 89 31
; asm: movw %cx, (%esi) ; asm: movw %cx, (%esi)
istore16 v1, v2 ; bin: 66 89 0e istore16 v1, v2 ; bin: heap_oob 66 89 0e
; asm: movw %si, (%ecx) ; asm: movw %si, (%ecx)
istore16 v2, v1 ; bin: 66 89 31 istore16 v2, v1 ; bin: heap_oob 66 89 31
; asm: movb %cl, (%esi) ; asm: movb %cl, (%esi)
istore8 v1, v2 ; bin: 88 0e istore8 v1, v2 ; bin: heap_oob 88 0e
; Can't store %sil in 32-bit mode (needs REX prefix). ; Can't store %sil in 32-bit mode (needs REX prefix).
; asm: movl (%ecx), %edi ; asm: movl (%ecx), %edi
[-,%rdi] v100 = load.i32 v1 ; bin: 8b 39 [-,%rdi] v100 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%esi), %edx ; asm: movl (%esi), %edx
[-,%rdx] v101 = load.i32 v2 ; bin: 8b 16 [-,%rdx] v101 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%ecx), %edi ; asm: movzwl (%ecx), %edi
[-,%rdi] v102 = uload16.i32 v1 ; bin: 0f b7 39 [-,%rdi] v102 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%esi), %edx ; asm: movzwl (%esi), %edx
[-,%rdx] v103 = uload16.i32 v2 ; bin: 0f b7 16 [-,%rdx] v103 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%ecx), %edi ; asm: movswl (%ecx), %edi
[-,%rdi] v104 = sload16.i32 v1 ; bin: 0f bf 39 [-,%rdi] v104 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%esi), %edx ; asm: movswl (%esi), %edx
[-,%rdx] v105 = sload16.i32 v2 ; bin: 0f bf 16 [-,%rdx] v105 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%ecx), %edi ; asm: movzbl (%ecx), %edi
[-,%rdi] v106 = uload8.i32 v1 ; bin: 0f b6 39 [-,%rdi] v106 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%esi), %edx ; asm: movzbl (%esi), %edx
[-,%rdx] v107 = uload8.i32 v2 ; bin: 0f b6 16 [-,%rdx] v107 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%ecx), %edi ; asm: movsbl (%ecx), %edi
[-,%rdi] v108 = sload8.i32 v1 ; bin: 0f be 39 [-,%rdi] v108 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%esi), %edx ; asm: movsbl (%esi), %edx
[-,%rdx] v109 = sload8.i32 v2 ; bin: 0f be 16 [-,%rdx] v109 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement. ; Register-indirect with 8-bit signed displacement.
; asm: movl %ecx, 100(%esi) ; asm: movl %ecx, 100(%esi)
store v1, v2+100 ; bin: 89 4e 64 store v1, v2+100 ; bin: heap_oob 89 4e 64
; asm: movl %esi, -100(%ecx) ; asm: movl %esi, -100(%ecx)
store v2, v1-100 ; bin: 89 71 9c store v2, v1-100 ; bin: heap_oob 89 71 9c
; asm: movw %cx, 100(%esi) ; asm: movw %cx, 100(%esi)
istore16 v1, v2+100 ; bin: 66 89 4e 64 istore16 v1, v2+100 ; bin: heap_oob 66 89 4e 64
; asm: movw %si, -100(%ecx) ; asm: movw %si, -100(%ecx)
istore16 v2, v1-100 ; bin: 66 89 71 9c istore16 v2, v1-100 ; bin: heap_oob 66 89 71 9c
; asm: movb %cl, 100(%esi) ; asm: movb %cl, 100(%esi)
istore8 v1, v2+100 ; bin: 88 4e 64 istore8 v1, v2+100 ; bin: heap_oob 88 4e 64
; asm: movl 50(%ecx), %edi ; asm: movl 50(%ecx), %edi
[-,%rdi] v110 = load.i32 v1+50 ; bin: 8b 79 32 [-,%rdi] v110 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%esi), %edx ; asm: movl -50(%esi), %edx
[-,%rdx] v111 = load.i32 v2-50 ; bin: 8b 56 ce [-,%rdx] v111 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%ecx), %edi ; asm: movzwl 50(%ecx), %edi
[-,%rdi] v112 = uload16.i32 v1+50 ; bin: 0f b7 79 32 [-,%rdi] v112 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%esi), %edx ; asm: movzwl -50(%esi), %edx
[-,%rdx] v113 = uload16.i32 v2-50 ; bin: 0f b7 56 ce [-,%rdx] v113 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%ecx), %edi ; asm: movswl 50(%ecx), %edi
[-,%rdi] v114 = sload16.i32 v1+50 ; bin: 0f bf 79 32 [-,%rdi] v114 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%esi), %edx ; asm: movswl -50(%esi), %edx
[-,%rdx] v115 = sload16.i32 v2-50 ; bin: 0f bf 56 ce [-,%rdx] v115 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%ecx), %edi ; asm: movzbl 50(%ecx), %edi
[-,%rdi] v116 = uload8.i32 v1+50 ; bin: 0f b6 79 32 [-,%rdi] v116 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%esi), %edx ; asm: movzbl -50(%esi), %edx
[-,%rdx] v117 = uload8.i32 v2-50 ; bin: 0f b6 56 ce [-,%rdx] v117 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%ecx), %edi ; asm: movsbl 50(%ecx), %edi
[-,%rdi] v118 = sload8.i32 v1+50 ; bin: 0f be 79 32 [-,%rdi] v118 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%esi), %edx ; asm: movsbl -50(%esi), %edx
[-,%rdx] v119 = sload8.i32 v2-50 ; bin: 0f be 56 ce [-,%rdx] v119 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement. ; Register-indirect with 32-bit signed displacement.
; asm: movl %ecx, 10000(%esi) ; asm: movl %ecx, 10000(%esi)
store v1, v2+10000 ; bin: 89 8e 00002710 store v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%ecx) ; asm: movl %esi, -10000(%ecx)
store v2, v1-10000 ; bin: 89 b1 ffffd8f0 store v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%esi) ; asm: movw %cx, 10000(%esi)
istore16 v1, v2+10000 ; bin: 66 89 8e 00002710 istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%ecx) ; asm: movw %si, -10000(%ecx)
istore16 v2, v1-10000 ; bin: 66 89 b1 ffffd8f0 istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%esi) ; asm: movb %cl, 10000(%esi)
istore8 v1, v2+10000 ; bin: 88 8e 00002710 istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movl 50000(%ecx), %edi ; asm: movl 50000(%ecx), %edi
[-,%rdi] v120 = load.i32 v1+50000 ; bin: 8b b9 0000c350 [-,%rdi] v120 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%esi), %edx ; asm: movl -50000(%esi), %edx
[-,%rdx] v121 = load.i32 v2-50000 ; bin: 8b 96 ffff3cb0 [-,%rdx] v121 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%ecx), %edi ; asm: movzwl 50000(%ecx), %edi
[-,%rdi] v122 = uload16.i32 v1+50000 ; bin: 0f b7 b9 0000c350 [-,%rdi] v122 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%esi), %edx ; asm: movzwl -50000(%esi), %edx
[-,%rdx] v123 = uload16.i32 v2-50000 ; bin: 0f b7 96 ffff3cb0 [-,%rdx] v123 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%ecx), %edi ; asm: movswl 50000(%ecx), %edi
[-,%rdi] v124 = sload16.i32 v1+50000 ; bin: 0f bf b9 0000c350 [-,%rdi] v124 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%esi), %edx ; asm: movswl -50000(%esi), %edx
[-,%rdx] v125 = sload16.i32 v2-50000 ; bin: 0f bf 96 ffff3cb0 [-,%rdx] v125 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%ecx), %edi ; asm: movzbl 50000(%ecx), %edi
[-,%rdi] v126 = uload8.i32 v1+50000 ; bin: 0f b6 b9 0000c350 [-,%rdi] v126 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%esi), %edx ; asm: movzbl -50000(%esi), %edx
[-,%rdx] v127 = uload8.i32 v2-50000 ; bin: 0f b6 96 ffff3cb0 [-,%rdx] v127 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%ecx), %edi ; asm: movsbl 50000(%ecx), %edi
[-,%rdi] v128 = sload8.i32 v1+50000 ; bin: 0f be b9 0000c350 [-,%rdi] v128 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%esi), %edx ; asm: movsbl -50000(%esi), %edx
[-,%rdx] v129 = sload8.i32 v2-50000 ; bin: 0f be 96 ffff3cb0 [-,%rdx] v129 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Bit-counting instructions. ; Bit-counting instructions.
@@ -437,7 +437,7 @@ ebb1:
; asm: ebb2: ; asm: ebb2:
ebb2: ebb2:
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Special branch encodings only for I32 mode. ; Special branch encodings only for I32 mode.
@@ -524,25 +524,25 @@ ebb1:
; The trapif instructions are encoded as macros: a conditional jump over a ud2. ; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2 ; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 0f 0b trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2 ; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 0f 0b trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2 ; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 0f 0b trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2 ; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 0f 0b trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2 ; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 0f 0b trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2 ; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 0f 0b trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2 ; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 0f 0b trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2 ; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 0f 0b trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2 ; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 0f 0b trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2 ; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 0f 0b trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; Stack check. ; Stack check.
; asm: cmpl %esp, %ecx ; asm: cmpl %esp, %ecx
@@ -576,7 +576,7 @@ ebb0:
; asm: movzbl %cl, %esi ; asm: movzbl %cl, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b6 f1 [-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b6 f1
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Tests for i32/i16 conversion instructions. ; Tests for i32/i16 conversion instructions.
@@ -592,5 +592,5 @@ ebb0:
; asm: movzwl %cx, %esi ; asm: movzwl %cx, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b7 f1 [-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b7 f1
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }

View File

@@ -158,34 +158,34 @@ ebb0:
; Load/Store ; Load/Store
; asm: movss (%r14), %xmm5 ; asm: movss (%r14), %xmm5
[-,%xmm5] v100 = load.f32 v3 ; bin: f3 41 0f 10 2e [-,%xmm5] v100 = load.f32 v3 ; bin: heap_oob f3 41 0f 10 2e
; asm: movss (%rax), %xmm10 ; asm: movss (%rax), %xmm10
[-,%xmm10] v101 = load.f32 v2 ; bin: f3 44 0f 10 10 [-,%xmm10] v101 = load.f32 v2 ; bin: heap_oob f3 44 0f 10 10
; asm: movss 50(%r14), %xmm5 ; asm: movss 50(%r14), %xmm5
[-,%xmm5] v110 = load.f32 v3+50 ; bin: f3 41 0f 10 6e 32 [-,%xmm5] v110 = load.f32 v3+50 ; bin: heap_oob f3 41 0f 10 6e 32
; asm: movss -50(%rax), %xmm10 ; asm: movss -50(%rax), %xmm10
[-,%xmm10] v111 = load.f32 v2-50 ; bin: f3 44 0f 10 50 ce [-,%xmm10] v111 = load.f32 v2-50 ; bin: heap_oob f3 44 0f 10 50 ce
; asm: movss 10000(%r14), %xmm5 ; asm: movss 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f32 v3+10000 ; bin: f3 41 0f 10 ae 00002710 [-,%xmm5] v120 = load.f32 v3+10000 ; bin: heap_oob f3 41 0f 10 ae 00002710
; asm: movss -10000(%rax), %xmm10 ; asm: movss -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f32 v2-10000 ; bin: f3 44 0f 10 90 ffffd8f0 [-,%xmm10] v121 = load.f32 v2-10000 ; bin: heap_oob f3 44 0f 10 90 ffffd8f0
; asm: movss %xmm5, (%r14) ; asm: movss %xmm5, (%r14)
[-] store.f32 v100, v3 ; bin: f3 41 0f 11 2e [-] store.f32 v100, v3 ; bin: heap_oob f3 41 0f 11 2e
; asm: movss %xmm10, (%rax) ; asm: movss %xmm10, (%rax)
[-] store.f32 v101, v2 ; bin: f3 44 0f 11 10 [-] store.f32 v101, v2 ; bin: heap_oob f3 44 0f 11 10
; asm: movss %xmm5, (%r13) ; asm: movss %xmm5, (%r13)
[-] store.f32 v100, v4 ; bin: f3 41 0f 11 6d 00 [-] store.f32 v100, v4 ; bin: heap_oob f3 41 0f 11 6d 00
; asm: movss %xmm10, (%r13) ; asm: movss %xmm10, (%r13)
[-] store.f32 v101, v4 ; bin: f3 45 0f 11 55 00 [-] store.f32 v101, v4 ; bin: heap_oob f3 45 0f 11 55 00
; asm: movss %xmm5, 50(%r14) ; asm: movss %xmm5, 50(%r14)
[-] store.f32 v100, v3+50 ; bin: f3 41 0f 11 6e 32 [-] store.f32 v100, v3+50 ; bin: heap_oob f3 41 0f 11 6e 32
; asm: movss %xmm10, -50(%rax) ; asm: movss %xmm10, -50(%rax)
[-] store.f32 v101, v2-50 ; bin: f3 44 0f 11 50 ce [-] store.f32 v101, v2-50 ; bin: heap_oob f3 44 0f 11 50 ce
; asm: movss %xmm5, 10000(%r14) ; asm: movss %xmm5, 10000(%r14)
[-] store.f32 v100, v3+10000 ; bin: f3 41 0f 11 ae 00002710 [-] store.f32 v100, v3+10000 ; bin: heap_oob f3 41 0f 11 ae 00002710
; asm: movss %xmm10, -10000(%rax) ; asm: movss %xmm10, -10000(%rax)
[-] store.f32 v101, v2-10000 ; bin: f3 44 0f 11 90 ffffd8f0 [-] store.f32 v101, v2-10000 ; bin: heap_oob f3 44 0f 11 90 ffffd8f0
; Spill / Fill. ; Spill / Fill.
@@ -393,34 +393,34 @@ ebb0:
; Load/Store ; Load/Store
; asm: movsd (%r14), %xmm5 ; asm: movsd (%r14), %xmm5
[-,%xmm5] v100 = load.f64 v3 ; bin: f2 41 0f 10 2e [-,%xmm5] v100 = load.f64 v3 ; bin: heap_oob f2 41 0f 10 2e
; asm: movsd (%rax), %xmm10 ; asm: movsd (%rax), %xmm10
[-,%xmm10] v101 = load.f64 v2 ; bin: f2 44 0f 10 10 [-,%xmm10] v101 = load.f64 v2 ; bin: heap_oob f2 44 0f 10 10
; asm: movsd 50(%r14), %xmm5 ; asm: movsd 50(%r14), %xmm5
[-,%xmm5] v110 = load.f64 v3+50 ; bin: f2 41 0f 10 6e 32 [-,%xmm5] v110 = load.f64 v3+50 ; bin: heap_oob f2 41 0f 10 6e 32
; asm: movsd -50(%rax), %xmm10 ; asm: movsd -50(%rax), %xmm10
[-,%xmm10] v111 = load.f64 v2-50 ; bin: f2 44 0f 10 50 ce [-,%xmm10] v111 = load.f64 v2-50 ; bin: heap_oob f2 44 0f 10 50 ce
; asm: movsd 10000(%r14), %xmm5 ; asm: movsd 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f64 v3+10000 ; bin: f2 41 0f 10 ae 00002710 [-,%xmm5] v120 = load.f64 v3+10000 ; bin: heap_oob f2 41 0f 10 ae 00002710
; asm: movsd -10000(%rax), %xmm10 ; asm: movsd -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f64 v2-10000 ; bin: f2 44 0f 10 90 ffffd8f0 [-,%xmm10] v121 = load.f64 v2-10000 ; bin: heap_oob f2 44 0f 10 90 ffffd8f0
; asm: movsd %xmm5, (%r14) ; asm: movsd %xmm5, (%r14)
[-] store.f64 v100, v3 ; bin: f2 41 0f 11 2e [-] store.f64 v100, v3 ; bin: heap_oob f2 41 0f 11 2e
; asm: movsd %xmm10, (%rax) ; asm: movsd %xmm10, (%rax)
[-] store.f64 v101, v2 ; bin: f2 44 0f 11 10 [-] store.f64 v101, v2 ; bin: heap_oob f2 44 0f 11 10
; asm: movsd %xmm5, (%r13) ; asm: movsd %xmm5, (%r13)
[-] store.f64 v100, v4 ; bin: f2 41 0f 11 6d 00 [-] store.f64 v100, v4 ; bin: heap_oob f2 41 0f 11 6d 00
; asm: movsd %xmm10, (%r13) ; asm: movsd %xmm10, (%r13)
[-] store.f64 v101, v4 ; bin: f2 45 0f 11 55 00 [-] store.f64 v101, v4 ; bin: heap_oob f2 45 0f 11 55 00
; asm: movsd %xmm5, 50(%r14) ; asm: movsd %xmm5, 50(%r14)
[-] store.f64 v100, v3+50 ; bin: f2 41 0f 11 6e 32 [-] store.f64 v100, v3+50 ; bin: heap_oob f2 41 0f 11 6e 32
; asm: movsd %xmm10, -50(%rax) ; asm: movsd %xmm10, -50(%rax)
[-] store.f64 v101, v2-50 ; bin: f2 44 0f 11 50 ce [-] store.f64 v101, v2-50 ; bin: heap_oob f2 44 0f 11 50 ce
; asm: movsd %xmm5, 10000(%r14) ; asm: movsd %xmm5, 10000(%r14)
[-] store.f64 v100, v3+10000 ; bin: f2 41 0f 11 ae 00002710 [-] store.f64 v100, v3+10000 ; bin: heap_oob f2 41 0f 11 ae 00002710
; asm: movsd %xmm10, -10000(%rax) ; asm: movsd %xmm10, -10000(%rax)
[-] store.f64 v101, v2-10000 ; bin: f2 44 0f 11 90 ffffd8f0 [-] store.f64 v101, v2-10000 ; bin: heap_oob f2 44 0f 11 90 ffffd8f0
; Spill / Fill. ; Spill / Fill.
@@ -505,21 +505,21 @@ ebb1:
brff ule v1, ebb1 ; bin: 76 f0 brff ule v1, ebb1 ; bin: 76 f0
; asm: jp .+4; ud2 ; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 0f 0b trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2 ; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 0f 0b trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2 ; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 0f 0b trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2 ; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 0f 0b trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2 ; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 0f 0b trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2 ; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 0f 0b trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2 ; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 0f 0b trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2 ; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 0f 0b trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl ; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3 [-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3

View File

@@ -175,146 +175,146 @@ ebb0:
; Register indirect addressing with no displacement. ; Register indirect addressing with no displacement.
; asm: movq %rcx, (%r10) ; asm: movq %rcx, (%r10)
store v1, v3 ; bin: 49 89 0a store v1, v3 ; bin: heap_oob 49 89 0a
; asm: movq %r10, (%rcx) ; asm: movq %r10, (%rcx)
store v3, v1 ; bin: 4c 89 11 store v3, v1 ; bin: heap_oob 4c 89 11
; asm: movl %ecx, (%r10) ; asm: movl %ecx, (%r10)
istore32 v1, v3 ; bin: 41 89 0a istore32 v1, v3 ; bin: heap_oob 41 89 0a
; asm: movl %r10d, (%rcx) ; asm: movl %r10d, (%rcx)
istore32 v3, v1 ; bin: 44 89 11 istore32 v3, v1 ; bin: heap_oob 44 89 11
; asm: movw %cx, (%r10) ; asm: movw %cx, (%r10)
istore16 v1, v3 ; bin: 66 41 89 0a istore16 v1, v3 ; bin: heap_oob 66 41 89 0a
; asm: movw %r10w, (%rcx) ; asm: movw %r10w, (%rcx)
istore16 v3, v1 ; bin: 66 44 89 11 istore16 v3, v1 ; bin: heap_oob 66 44 89 11
; asm: movb %cl, (%r10) ; asm: movb %cl, (%r10)
istore8 v1, v3 ; bin: 41 88 0a istore8 v1, v3 ; bin: heap_oob 41 88 0a
; asm: movb %r10b, (%rcx) ; asm: movb %r10b, (%rcx)
istore8 v3, v1 ; bin: 44 88 11 istore8 v3, v1 ; bin: heap_oob 44 88 11
; asm: movq (%rcx), %r14 ; asm: movq (%rcx), %r14
[-,%r14] v120 = load.i64 v1 ; bin: 4c 8b 31 [-,%r14] v120 = load.i64 v1 ; bin: heap_oob 4c 8b 31
; asm: movq (%r10), %rdx ; asm: movq (%r10), %rdx
[-,%rdx] v121 = load.i64 v3 ; bin: 49 8b 12 [-,%rdx] v121 = load.i64 v3 ; bin: heap_oob 49 8b 12
; asm: movl (%rcx), %r14d ; asm: movl (%rcx), %r14d
[-,%r14] v122 = uload32.i64 v1 ; bin: 44 8b 31 [-,%r14] v122 = uload32.i64 v1 ; bin: heap_oob 44 8b 31
; asm: movl (%r10), %edx ; asm: movl (%r10), %edx
[-,%rdx] v123 = uload32.i64 v3 ; bin: 41 8b 12 [-,%rdx] v123 = uload32.i64 v3 ; bin: heap_oob 41 8b 12
; asm: movslq (%rcx), %r14 ; asm: movslq (%rcx), %r14
[-,%r14] v124 = sload32.i64 v1 ; bin: 4c 63 31 [-,%r14] v124 = sload32.i64 v1 ; bin: heap_oob 4c 63 31
; asm: movslq (%r10), %rdx ; asm: movslq (%r10), %rdx
[-,%rdx] v125 = sload32.i64 v3 ; bin: 49 63 12 [-,%rdx] v125 = sload32.i64 v3 ; bin: heap_oob 49 63 12
; asm: movzwq (%rcx), %r14 ; asm: movzwq (%rcx), %r14
[-,%r14] v126 = uload16.i64 v1 ; bin: 4c 0f b7 31 [-,%r14] v126 = uload16.i64 v1 ; bin: heap_oob 4c 0f b7 31
; asm: movzwq (%r10), %rdx ; asm: movzwq (%r10), %rdx
[-,%rdx] v127 = uload16.i64 v3 ; bin: 49 0f b7 12 [-,%rdx] v127 = uload16.i64 v3 ; bin: heap_oob 49 0f b7 12
; asm: movswq (%rcx), %r14 ; asm: movswq (%rcx), %r14
[-,%r14] v128 = sload16.i64 v1 ; bin: 4c 0f bf 31 [-,%r14] v128 = sload16.i64 v1 ; bin: heap_oob 4c 0f bf 31
; asm: movswq (%r10), %rdx ; asm: movswq (%r10), %rdx
[-,%rdx] v129 = sload16.i64 v3 ; bin: 49 0f bf 12 [-,%rdx] v129 = sload16.i64 v3 ; bin: heap_oob 49 0f bf 12
; asm: movzbq (%rcx), %r14 ; asm: movzbq (%rcx), %r14
[-,%r14] v130 = uload8.i64 v1 ; bin: 4c 0f b6 31 [-,%r14] v130 = uload8.i64 v1 ; bin: heap_oob 4c 0f b6 31
; asm: movzbq (%r10), %rdx ; asm: movzbq (%r10), %rdx
[-,%rdx] v131 = uload8.i64 v3 ; bin: 49 0f b6 12 [-,%rdx] v131 = uload8.i64 v3 ; bin: heap_oob 49 0f b6 12
; asm: movsbq (%rcx), %r14 ; asm: movsbq (%rcx), %r14
[-,%r14] v132 = sload8.i64 v1 ; bin: 4c 0f be 31 [-,%r14] v132 = sload8.i64 v1 ; bin: heap_oob 4c 0f be 31
; asm: movsbq (%r10), %rdx ; asm: movsbq (%r10), %rdx
[-,%rdx] v133 = sload8.i64 v3 ; bin: 49 0f be 12 [-,%rdx] v133 = sload8.i64 v3 ; bin: heap_oob 49 0f be 12
; Register-indirect with 8-bit signed displacement. ; Register-indirect with 8-bit signed displacement.
; asm: movq %rcx, 100(%r10) ; asm: movq %rcx, 100(%r10)
store v1, v3+100 ; bin: 49 89 4a 64 store v1, v3+100 ; bin: heap_oob 49 89 4a 64
; asm: movq %r10, -100(%rcx) ; asm: movq %r10, -100(%rcx)
store v3, v1-100 ; bin: 4c 89 51 9c store v3, v1-100 ; bin: heap_oob 4c 89 51 9c
; asm: movl %ecx, 100(%r10) ; asm: movl %ecx, 100(%r10)
istore32 v1, v3+100 ; bin: 41 89 4a 64 istore32 v1, v3+100 ; bin: heap_oob 41 89 4a 64
; asm: movl %r10d, -100(%rcx) ; asm: movl %r10d, -100(%rcx)
istore32 v3, v1-100 ; bin: 44 89 51 9c istore32 v3, v1-100 ; bin: heap_oob 44 89 51 9c
; asm: movw %cx, 100(%r10) ; asm: movw %cx, 100(%r10)
istore16 v1, v3+100 ; bin: 66 41 89 4a 64 istore16 v1, v3+100 ; bin: heap_oob 66 41 89 4a 64
; asm: movw %r10w, -100(%rcx) ; asm: movw %r10w, -100(%rcx)
istore16 v3, v1-100 ; bin: 66 44 89 51 9c istore16 v3, v1-100 ; bin: heap_oob 66 44 89 51 9c
; asm: movb %cl, 100(%r10) ; asm: movb %cl, 100(%r10)
istore8 v1, v3+100 ; bin: 41 88 4a 64 istore8 v1, v3+100 ; bin: heap_oob 41 88 4a 64
; asm: movb %r10b, 100(%rcx) ; asm: movb %r10b, 100(%rcx)
istore8 v3, v1+100 ; bin: 44 88 51 64 istore8 v3, v1+100 ; bin: heap_oob 44 88 51 64
; asm: movq 50(%rcx), %r10 ; asm: movq 50(%rcx), %r10
[-,%r10] v140 = load.i64 v1+50 ; bin: 4c 8b 51 32 [-,%r10] v140 = load.i64 v1+50 ; bin: heap_oob 4c 8b 51 32
; asm: movq -50(%r10), %rdx ; asm: movq -50(%r10), %rdx
[-,%rdx] v141 = load.i64 v3-50 ; bin: 49 8b 52 ce [-,%rdx] v141 = load.i64 v3-50 ; bin: heap_oob 49 8b 52 ce
; asm: movl 50(%rcx), %edi ; asm: movl 50(%rcx), %edi
[-,%rdi] v142 = uload32.i64 v1+50 ; bin: 8b 79 32 [-,%rdi] v142 = uload32.i64 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx ; asm: movl -50(%rsi), %edx
[-,%rdx] v143 = uload32.i64 v2-50 ; bin: 8b 56 ce [-,%rdx] v143 = uload32.i64 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movslq 50(%rcx), %rdi ; asm: movslq 50(%rcx), %rdi
[-,%rdi] v144 = sload32.i64 v1+50 ; bin: 48 63 79 32 [-,%rdi] v144 = sload32.i64 v1+50 ; bin: heap_oob 48 63 79 32
; asm: movslq -50(%rsi), %rdx ; asm: movslq -50(%rsi), %rdx
[-,%rdx] v145 = sload32.i64 v2-50 ; bin: 48 63 56 ce [-,%rdx] v145 = sload32.i64 v2-50 ; bin: heap_oob 48 63 56 ce
; asm: movzwq 50(%rcx), %rdi ; asm: movzwq 50(%rcx), %rdi
[-,%rdi] v146 = uload16.i64 v1+50 ; bin: 48 0f b7 79 32 [-,%rdi] v146 = uload16.i64 v1+50 ; bin: heap_oob 48 0f b7 79 32
; asm: movzwq -50(%rsi), %rdx ; asm: movzwq -50(%rsi), %rdx
[-,%rdx] v147 = uload16.i64 v2-50 ; bin: 48 0f b7 56 ce [-,%rdx] v147 = uload16.i64 v2-50 ; bin: heap_oob 48 0f b7 56 ce
; asm: movswq 50(%rcx), %rdi ; asm: movswq 50(%rcx), %rdi
[-,%rdi] v148 = sload16.i64 v1+50 ; bin: 48 0f bf 79 32 [-,%rdi] v148 = sload16.i64 v1+50 ; bin: heap_oob 48 0f bf 79 32
; asm: movswq -50(%rsi), %rdx ; asm: movswq -50(%rsi), %rdx
[-,%rdx] v149 = sload16.i64 v2-50 ; bin: 48 0f bf 56 ce [-,%rdx] v149 = sload16.i64 v2-50 ; bin: heap_oob 48 0f bf 56 ce
; asm: movzbq 50(%rcx), %rdi ; asm: movzbq 50(%rcx), %rdi
[-,%rdi] v150 = uload8.i64 v1+50 ; bin: 48 0f b6 79 32 [-,%rdi] v150 = uload8.i64 v1+50 ; bin: heap_oob 48 0f b6 79 32
; asm: movzbq -50(%rsi), %rdx ; asm: movzbq -50(%rsi), %rdx
[-,%rdx] v151 = uload8.i64 v2-50 ; bin: 48 0f b6 56 ce [-,%rdx] v151 = uload8.i64 v2-50 ; bin: heap_oob 48 0f b6 56 ce
; asm: movsbq 50(%rcx), %rdi ; asm: movsbq 50(%rcx), %rdi
[-,%rdi] v152 = sload8.i64 v1+50 ; bin: 48 0f be 79 32 [-,%rdi] v152 = sload8.i64 v1+50 ; bin: heap_oob 48 0f be 79 32
; asm: movsbq -50(%rsi), %rdx ; asm: movsbq -50(%rsi), %rdx
[-,%rdx] v153 = sload8.i64 v2-50 ; bin: 48 0f be 56 ce [-,%rdx] v153 = sload8.i64 v2-50 ; bin: heap_oob 48 0f be 56 ce
; Register-indirect with 32-bit signed displacement. ; Register-indirect with 32-bit signed displacement.
; asm: movq %rcx, 10000(%r10) ; asm: movq %rcx, 10000(%r10)
store v1, v3+10000 ; bin: 49 89 8a 00002710 store v1, v3+10000 ; bin: heap_oob 49 89 8a 00002710
; asm: movq %r10, -10000(%rcx) ; asm: movq %r10, -10000(%rcx)
store v3, v1-10000 ; bin: 4c 89 91 ffffd8f0 store v3, v1-10000 ; bin: heap_oob 4c 89 91 ffffd8f0
; asm: movl %ecx, 10000(%rsi) ; asm: movl %ecx, 10000(%rsi)
istore32 v1, v2+10000 ; bin: 89 8e 00002710 istore32 v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%rcx) ; asm: movl %esi, -10000(%rcx)
istore32 v2, v1-10000 ; bin: 89 b1 ffffd8f0 istore32 v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%rsi) ; asm: movw %cx, 10000(%rsi)
istore16 v1, v2+10000 ; bin: 66 89 8e 00002710 istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%rcx) ; asm: movw %si, -10000(%rcx)
istore16 v2, v1-10000 ; bin: 66 89 b1 ffffd8f0 istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%rsi) ; asm: movb %cl, 10000(%rsi)
istore8 v1, v2+10000 ; bin: 88 8e 00002710 istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movb %sil, 10000(%rcx) ; asm: movb %sil, 10000(%rcx)
istore8 v2, v1+10000 ; bin: 40 88 b1 00002710 istore8 v2, v1+10000 ; bin: heap_oob 40 88 b1 00002710
; asm: movq 50000(%rcx), %r10 ; asm: movq 50000(%rcx), %r10
[-,%r10] v160 = load.i64 v1+50000 ; bin: 4c 8b 91 0000c350 [-,%r10] v160 = load.i64 v1+50000 ; bin: heap_oob 4c 8b 91 0000c350
; asm: movq -50000(%r10), %rdx ; asm: movq -50000(%r10), %rdx
[-,%rdx] v161 = load.i64 v3-50000 ; bin: 49 8b 92 ffff3cb0 [-,%rdx] v161 = load.i64 v3-50000 ; bin: heap_oob 49 8b 92 ffff3cb0
; asm: movl 50000(%rcx), %edi ; asm: movl 50000(%rcx), %edi
[-,%rdi] v162 = uload32.i64 v1+50000 ; bin: 8b b9 0000c350 [-,%rdi] v162 = uload32.i64 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx ; asm: movl -50000(%rsi), %edx
[-,%rdx] v163 = uload32.i64 v2-50000 ; bin: 8b 96 ffff3cb0 [-,%rdx] v163 = uload32.i64 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movslq 50000(%rcx), %rdi ; asm: movslq 50000(%rcx), %rdi
[-,%rdi] v164 = sload32.i64 v1+50000 ; bin: 48 63 b9 0000c350 [-,%rdi] v164 = sload32.i64 v1+50000 ; bin: heap_oob 48 63 b9 0000c350
; asm: movslq -50000(%rsi), %rdx ; asm: movslq -50000(%rsi), %rdx
[-,%rdx] v165 = sload32.i64 v2-50000 ; bin: 48 63 96 ffff3cb0 [-,%rdx] v165 = sload32.i64 v2-50000 ; bin: heap_oob 48 63 96 ffff3cb0
; asm: movzwq 50000(%rcx), %rdi ; asm: movzwq 50000(%rcx), %rdi
[-,%rdi] v166 = uload16.i64 v1+50000 ; bin: 48 0f b7 b9 0000c350 [-,%rdi] v166 = uload16.i64 v1+50000 ; bin: heap_oob 48 0f b7 b9 0000c350
; asm: movzwq -50000(%rsi), %rdx ; asm: movzwq -50000(%rsi), %rdx
[-,%rdx] v167 = uload16.i64 v2-50000 ; bin: 48 0f b7 96 ffff3cb0 [-,%rdx] v167 = uload16.i64 v2-50000 ; bin: heap_oob 48 0f b7 96 ffff3cb0
; asm: movswq 50000(%rcx), %rdi ; asm: movswq 50000(%rcx), %rdi
[-,%rdi] v168 = sload16.i64 v1+50000 ; bin: 48 0f bf b9 0000c350 [-,%rdi] v168 = sload16.i64 v1+50000 ; bin: heap_oob 48 0f bf b9 0000c350
; asm: movswq -50000(%rsi), %rdx ; asm: movswq -50000(%rsi), %rdx
[-,%rdx] v169 = sload16.i64 v2-50000 ; bin: 48 0f bf 96 ffff3cb0 [-,%rdx] v169 = sload16.i64 v2-50000 ; bin: heap_oob 48 0f bf 96 ffff3cb0
; asm: movzbq 50000(%rcx), %rdi ; asm: movzbq 50000(%rcx), %rdi
[-,%rdi] v170 = uload8.i64 v1+50000 ; bin: 48 0f b6 b9 0000c350 [-,%rdi] v170 = uload8.i64 v1+50000 ; bin: heap_oob 48 0f b6 b9 0000c350
; asm: movzbq -50000(%rsi), %rdx ; asm: movzbq -50000(%rsi), %rdx
[-,%rdx] v171 = uload8.i64 v2-50000 ; bin: 48 0f b6 96 ffff3cb0 [-,%rdx] v171 = uload8.i64 v2-50000 ; bin: heap_oob 48 0f b6 96 ffff3cb0
; asm: movsbq 50000(%rcx), %rdi ; asm: movsbq 50000(%rcx), %rdi
[-,%rdi] v172 = sload8.i64 v1+50000 ; bin: 48 0f be b9 0000c350 [-,%rdi] v172 = sload8.i64 v1+50000 ; bin: heap_oob 48 0f be b9 0000c350
; asm: movsbq -50000(%rsi), %rdx ; asm: movsbq -50000(%rsi), %rdx
[-,%rdx] v173 = sload8.i64 v2-50000 ; bin: 48 0f be 96 ffff3cb0 [-,%rdx] v173 = sload8.i64 v2-50000 ; bin: heap_oob 48 0f be 96 ffff3cb0
; More arithmetic. ; More arithmetic.
@@ -329,17 +329,17 @@ ebb0:
[-,%rax] v190 = iconst.i64 1 [-,%rax] v190 = iconst.i64 1
[-,%rdx] v191 = iconst.i64 2 [-,%rdx] v191 = iconst.i64 2
; asm: idivq %rcx ; asm: idivq %rcx
[-,%rax,%rdx] v192, v193 = x86_sdivmodx v190, v191, v1 ; bin: 48 f7 f9 [-,%rax,%rdx] v192, v193 = x86_sdivmodx v190, v191, v1 ; bin: int_divz 48 f7 f9
; asm: idivq %rsi ; asm: idivq %rsi
[-,%rax,%rdx] v194, v195 = x86_sdivmodx v190, v191, v2 ; bin: 48 f7 fe [-,%rax,%rdx] v194, v195 = x86_sdivmodx v190, v191, v2 ; bin: int_divz 48 f7 fe
; asm: idivq %r10 ; asm: idivq %r10
[-,%rax,%rdx] v196, v197 = x86_sdivmodx v190, v191, v3 ; bin: 49 f7 fa [-,%rax,%rdx] v196, v197 = x86_sdivmodx v190, v191, v3 ; bin: int_divz 49 f7 fa
; asm: divq %rcx ; asm: divq %rcx
[-,%rax,%rdx] v198, v199 = x86_udivmodx v190, v191, v1 ; bin: 48 f7 f1 [-,%rax,%rdx] v198, v199 = x86_udivmodx v190, v191, v1 ; bin: int_divz 48 f7 f1
; asm: divq %rsi ; asm: divq %rsi
[-,%rax,%rdx] v200, v201 = x86_udivmodx v190, v191, v2 ; bin: 48 f7 f6 [-,%rax,%rdx] v200, v201 = x86_udivmodx v190, v191, v2 ; bin: int_divz 48 f7 f6
; asm: divq %r10 ; asm: divq %r10
[-,%rax,%rdx] v202, v203 = x86_udivmodx v190, v191, v3 ; bin: 49 f7 f2 [-,%rax,%rdx] v202, v203 = x86_udivmodx v190, v191, v3 ; bin: int_divz 49 f7 f2
; double-length multiply instructions, 64 bit ; double-length multiply instructions, 64 bit
[-,%rax] v1001 = iconst.i64 1 [-,%rax] v1001 = iconst.i64 1
@@ -637,25 +637,25 @@ ebb1:
; The trapif instructions are encoded as macros: a conditional jump over a ud2. ; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2 ; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 0f 0b trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2 ; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 0f 0b trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2 ; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 0f 0b trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2 ; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 0f 0b trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2 ; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 0f 0b trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2 ; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 0f 0b trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2 ; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 0f 0b trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2 ; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 0f 0b trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2 ; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 0f 0b trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2 ; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 0f 0b trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; Stack check. ; Stack check.
; asm: cmpq %rsp, %rcx ; asm: cmpq %rsp, %rcx
@@ -729,71 +729,71 @@ ebb0:
; Register indirect addressing with no displacement. ; Register indirect addressing with no displacement.
; asm: movl (%rcx), %edi ; asm: movl (%rcx), %edi
[-,%rdi] v10 = load.i32 v1 ; bin: 8b 39 [-,%rdi] v10 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%rsi), %edx ; asm: movl (%rsi), %edx
[-,%rdx] v11 = load.i32 v2 ; bin: 8b 16 [-,%rdx] v11 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%rcx), %edi ; asm: movzwl (%rcx), %edi
[-,%rdi] v12 = uload16.i32 v1 ; bin: 0f b7 39 [-,%rdi] v12 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%rsi), %edx ; asm: movzwl (%rsi), %edx
[-,%rdx] v13 = uload16.i32 v2 ; bin: 0f b7 16 [-,%rdx] v13 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%rcx), %edi ; asm: movswl (%rcx), %edi
[-,%rdi] v14 = sload16.i32 v1 ; bin: 0f bf 39 [-,%rdi] v14 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%rsi), %edx ; asm: movswl (%rsi), %edx
[-,%rdx] v15 = sload16.i32 v2 ; bin: 0f bf 16 [-,%rdx] v15 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%rcx), %edi ; asm: movzbl (%rcx), %edi
[-,%rdi] v16 = uload8.i32 v1 ; bin: 0f b6 39 [-,%rdi] v16 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%rsi), %edx ; asm: movzbl (%rsi), %edx
[-,%rdx] v17 = uload8.i32 v2 ; bin: 0f b6 16 [-,%rdx] v17 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%rcx), %edi ; asm: movsbl (%rcx), %edi
[-,%rdi] v18 = sload8.i32 v1 ; bin: 0f be 39 [-,%rdi] v18 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%rsi), %edx ; asm: movsbl (%rsi), %edx
[-,%rdx] v19 = sload8.i32 v2 ; bin: 0f be 16 [-,%rdx] v19 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement. ; Register-indirect with 8-bit signed displacement.
; asm: movl 50(%rcx), %edi ; asm: movl 50(%rcx), %edi
[-,%rdi] v20 = load.i32 v1+50 ; bin: 8b 79 32 [-,%rdi] v20 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%rsi), %edx ; asm: movl -50(%rsi), %edx
[-,%rdx] v21 = load.i32 v2-50 ; bin: 8b 56 ce [-,%rdx] v21 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%rcx), %edi ; asm: movzwl 50(%rcx), %edi
[-,%rdi] v22 = uload16.i32 v1+50 ; bin: 0f b7 79 32 [-,%rdi] v22 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%rsi), %edx ; asm: movzwl -50(%rsi), %edx
[-,%rdx] v23 = uload16.i32 v2-50 ; bin: 0f b7 56 ce [-,%rdx] v23 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%rcx), %edi ; asm: movswl 50(%rcx), %edi
[-,%rdi] v24 = sload16.i32 v1+50 ; bin: 0f bf 79 32 [-,%rdi] v24 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%rsi), %edx ; asm: movswl -50(%rsi), %edx
[-,%rdx] v25 = sload16.i32 v2-50 ; bin: 0f bf 56 ce [-,%rdx] v25 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%rcx), %edi ; asm: movzbl 50(%rcx), %edi
[-,%rdi] v26 = uload8.i32 v1+50 ; bin: 0f b6 79 32 [-,%rdi] v26 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%rsi), %edx ; asm: movzbl -50(%rsi), %edx
[-,%rdx] v27 = uload8.i32 v2-50 ; bin: 0f b6 56 ce [-,%rdx] v27 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%rcx), %edi ; asm: movsbl 50(%rcx), %edi
[-,%rdi] v28 = sload8.i32 v1+50 ; bin: 0f be 79 32 [-,%rdi] v28 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%rsi), %edx ; asm: movsbl -50(%rsi), %edx
[-,%rdx] v29 = sload8.i32 v2-50 ; bin: 0f be 56 ce [-,%rdx] v29 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement. ; Register-indirect with 32-bit signed displacement.
; asm: movl 50000(%rcx), %edi ; asm: movl 50000(%rcx), %edi
[-,%rdi] v30 = load.i32 v1+50000 ; bin: 8b b9 0000c350 [-,%rdi] v30 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%rsi), %edx ; asm: movl -50000(%rsi), %edx
[-,%rdx] v31 = load.i32 v2-50000 ; bin: 8b 96 ffff3cb0 [-,%rdx] v31 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%rcx), %edi ; asm: movzwl 50000(%rcx), %edi
[-,%rdi] v32 = uload16.i32 v1+50000 ; bin: 0f b7 b9 0000c350 [-,%rdi] v32 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%rsi), %edx ; asm: movzwl -50000(%rsi), %edx
[-,%rdx] v33 = uload16.i32 v2-50000 ; bin: 0f b7 96 ffff3cb0 [-,%rdx] v33 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%rcx), %edi ; asm: movswl 50000(%rcx), %edi
[-,%rdi] v34 = sload16.i32 v1+50000 ; bin: 0f bf b9 0000c350 [-,%rdi] v34 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%rsi), %edx ; asm: movswl -50000(%rsi), %edx
[-,%rdx] v35 = sload16.i32 v2-50000 ; bin: 0f bf 96 ffff3cb0 [-,%rdx] v35 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%rcx), %edi ; asm: movzbl 50000(%rcx), %edi
[-,%rdi] v36 = uload8.i32 v1+50000 ; bin: 0f b6 b9 0000c350 [-,%rdi] v36 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%rsi), %edx ; asm: movzbl -50000(%rsi), %edx
[-,%rdx] v37 = uload8.i32 v2-50000 ; bin: 0f b6 96 ffff3cb0 [-,%rdx] v37 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%rcx), %edi ; asm: movsbl 50000(%rcx), %edi
[-,%rdi] v38 = sload8.i32 v1+50000 ; bin: 0f be b9 0000c350 [-,%rdi] v38 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%rsi), %edx ; asm: movsbl -50000(%rsi), %edx
[-,%rdx] v39 = sload8.i32 v2-50000 ; bin: 0f be 96 ffff3cb0 [-,%rdx] v39 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Integer Register-Register Operations. ; Integer Register-Register Operations.
@@ -924,17 +924,17 @@ ebb0:
[-,%rax] v160 = iconst.i32 1 [-,%rax] v160 = iconst.i32 1
[-,%rdx] v161 = iconst.i32 2 [-,%rdx] v161 = iconst.i32 2
; asm: idivl %ecx ; asm: idivl %ecx
[-,%rax,%rdx] v162, v163 = x86_sdivmodx v160, v161, v1 ; bin: f7 f9 [-,%rax,%rdx] v162, v163 = x86_sdivmodx v160, v161, v1 ; bin: int_divz f7 f9
; asm: idivl %esi ; asm: idivl %esi
[-,%rax,%rdx] v164, v165 = x86_sdivmodx v160, v161, v2 ; bin: f7 fe [-,%rax,%rdx] v164, v165 = x86_sdivmodx v160, v161, v2 ; bin: int_divz f7 fe
; asm: idivl %r10d ; asm: idivl %r10d
[-,%rax,%rdx] v166, v167 = x86_sdivmodx v160, v161, v3 ; bin: 41 f7 fa [-,%rax,%rdx] v166, v167 = x86_sdivmodx v160, v161, v3 ; bin: int_divz 41 f7 fa
; asm: divl %ecx ; asm: divl %ecx
[-,%rax,%rdx] v168, v169 = x86_udivmodx v160, v161, v1 ; bin: f7 f1 [-,%rax,%rdx] v168, v169 = x86_udivmodx v160, v161, v1 ; bin: int_divz f7 f1
; asm: divl %esi ; asm: divl %esi
[-,%rax,%rdx] v170, v171 = x86_udivmodx v160, v161, v2 ; bin: f7 f6 [-,%rax,%rdx] v170, v171 = x86_udivmodx v160, v161, v2 ; bin: int_divz f7 f6
; asm: divl %r10d ; asm: divl %r10d
[-,%rax,%rdx] v172, v173 = x86_udivmodx v160, v161, v3 ; bin: 41 f7 f2 [-,%rax,%rdx] v172, v173 = x86_udivmodx v160, v161, v3 ; bin: int_divz 41 f7 f2
; Bit-counting instructions. ; Bit-counting instructions.
@@ -1144,7 +1144,7 @@ ebb0:
; asm: movzbl %r10b, %ecx ; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b6 ca [-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Tests for i32/i16 conversion instructions. ; Tests for i32/i16 conversion instructions.
@@ -1172,7 +1172,7 @@ ebb0:
; asm: movzwl %r10w, %ecx ; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b7 ca [-,%rcx] v32 = uextend.i32 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Tests for i64/i8 conversion instructions. ; Tests for i64/i8 conversion instructions.
@@ -1200,7 +1200,7 @@ ebb0:
; asm: movzbl %r10b, %ecx ; asm: movzbl %r10b, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b6 ca [-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b6 ca
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Tests for i64/i16 conversion instructions. ; Tests for i64/i16 conversion instructions.
@@ -1228,7 +1228,7 @@ ebb0:
; asm: movzwl %r10w, %ecx ; asm: movzwl %r10w, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b7 ca [-,%rcx] v32 = uextend.i64 v13 ; bin: 41 0f b7 ca
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }
; Tests for i64/i32 conversion instructions. ; Tests for i64/i32 conversion instructions.
@@ -1256,5 +1256,5 @@ ebb0:
; asm: movl %r10d, %ecx ; asm: movl %r10d, %ecx
[-,%rcx] v32 = uextend.i64 v13 ; bin: 44 89 d1 [-,%rcx] v32 = uextend.i64 v13 ; bin: 44 89 d1
trap user0 ; bin: 0f 0b trap user0 ; bin: user0 0f 0b
} }

View File

@@ -44,6 +44,18 @@ impl binemit::RelocSink for PrintRelocs {
} }
} }
struct PrintTraps {
flag_print: bool,
}
impl binemit::TrapSink for PrintTraps {
fn trap(&mut self, offset: binemit::CodeOffset, _srcloc: ir::SourceLoc, code: ir::TrapCode) {
if self.flag_print {
println!("trap: {} at {}", code, offset);
}
}
}
pub fn run( pub fn run(
files: Vec<String>, files: Vec<String>,
flag_print: bool, flag_print: bool,
@@ -94,8 +106,9 @@ fn handle_module(
// Encode the result as machine code. // Encode the result as machine code.
let mut mem = Vec::new(); let mut mem = Vec::new();
let mut relocs = PrintRelocs { flag_print }; let mut relocs = PrintRelocs { flag_print };
let mut traps = PrintTraps { flag_print };
mem.resize(size as usize, 0); mem.resize(size as usize, 0);
context.emit_to_memory(mem.as_mut_ptr(), &mut relocs, &*isa); context.emit_to_memory(mem.as_mut_ptr(), &mut relocs, &mut traps, &*isa);
if flag_print { if flag_print {
print!(".byte "); print!(".byte ");

View File

@@ -278,7 +278,10 @@ null = EncRecipe('null', Unary, size=0, ins=GPR, outs=0, emit='')
# XX opcode, no ModR/M. # XX opcode, no ModR/M.
trap = TailRecipe( trap = TailRecipe(
'trap', Trap, size=0, ins=(), outs=(), 'trap', Trap, size=0, ins=(), outs=(),
emit='PUT_OP(bits, BASE_REX, sink);') emit='''
sink.trap(code, func.srclocs[inst]);
PUT_OP(bits, BASE_REX, sink);
''')
# Macro: conditional jump over a ud2. # Macro: conditional jump over a ud2.
trapif = EncRecipe( trapif = EncRecipe(
@@ -289,6 +292,7 @@ trapif = EncRecipe(
sink.put1(0x70 | (icc2opc(cond.inverse()) as u8)); sink.put1(0x70 | (icc2opc(cond.inverse()) as u8));
sink.put1(2); sink.put1(2);
// ud2. // ud2.
sink.trap(code, func.srclocs[inst]);
sink.put1(0x0f); sink.put1(0x0f);
sink.put1(0x0b); sink.put1(0x0b);
''') ''')
@@ -302,6 +306,7 @@ trapff = EncRecipe(
sink.put1(0x70 | (fcc2opc(cond.inverse()) as u8)); sink.put1(0x70 | (fcc2opc(cond.inverse()) as u8));
sink.put1(2); sink.put1(2);
// ud2. // ud2.
sink.trap(code, func.srclocs[inst]);
sink.put1(0x0f); sink.put1(0x0f);
sink.put1(0x0b); sink.put1(0x0b);
''') ''')
@@ -450,6 +455,7 @@ div = TailRecipe(
'div', Ternary, size=1, 'div', Ternary, size=1,
ins=(GPR.rax, GPR.rdx, GPR), outs=(GPR.rax, GPR.rdx), ins=(GPR.rax, GPR.rdx, GPR), outs=(GPR.rax, GPR.rdx),
emit=''' emit='''
sink.trap(TrapCode::IntegerDivisionByZero, func.srclocs[inst]);
PUT_OP(bits, rex1(in_reg2), sink); PUT_OP(bits, rex1(in_reg2), sink);
modrm_r_bits(in_reg2, bits, sink); modrm_r_bits(in_reg2, bits, sink);
''') ''')
@@ -678,6 +684,9 @@ st = TailRecipe(
instp=IsEqual(Store.offset, 0), instp=IsEqual(Store.offset, 0),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_rm(in_reg1, in_reg0, sink); modrm_rm(in_reg1, in_reg0, sink);
''') ''')
@@ -690,6 +699,9 @@ st_abcd = TailRecipe(
when_prefixed=st, when_prefixed=st,
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_rm(in_reg1, in_reg0, sink); modrm_rm(in_reg1, in_reg0, sink);
''') ''')
@@ -700,6 +712,9 @@ fst = TailRecipe(
instp=IsEqual(Store.offset, 0), instp=IsEqual(Store.offset, 0),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_rm(in_reg1, in_reg0, sink); modrm_rm(in_reg1, in_reg0, sink);
''') ''')
@@ -710,6 +725,9 @@ stDisp8 = TailRecipe(
instp=IsSignedInt(Store.offset, 8), instp=IsSignedInt(Store.offset, 8),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp8(in_reg1, in_reg0, sink); modrm_disp8(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -721,6 +739,9 @@ stDisp8_abcd = TailRecipe(
when_prefixed=stDisp8, when_prefixed=stDisp8,
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp8(in_reg1, in_reg0, sink); modrm_disp8(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -731,6 +752,9 @@ fstDisp8 = TailRecipe(
instp=IsSignedInt(Store.offset, 8), instp=IsSignedInt(Store.offset, 8),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp8(in_reg1, in_reg0, sink); modrm_disp8(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -742,6 +766,9 @@ stDisp32 = TailRecipe(
'stDisp32', Store, size=5, ins=(GPR, GPR_DEREF_SAFE), outs=(), 'stDisp32', Store, size=5, ins=(GPR, GPR_DEREF_SAFE), outs=(),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp32(in_reg1, in_reg0, sink); modrm_disp32(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -752,6 +779,9 @@ stDisp32_abcd = TailRecipe(
when_prefixed=stDisp32, when_prefixed=stDisp32,
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp32(in_reg1, in_reg0, sink); modrm_disp32(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -761,6 +791,9 @@ fstDisp32 = TailRecipe(
'fstDisp32', Store, size=5, ins=(FPR, GPR_DEREF_SAFE), outs=(), 'fstDisp32', Store, size=5, ins=(FPR, GPR_DEREF_SAFE), outs=(),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg1, in_reg0), sink); PUT_OP(bits, rex2(in_reg1, in_reg0), sink);
modrm_disp32(in_reg1, in_reg0, sink); modrm_disp32(in_reg1, in_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -827,6 +860,9 @@ ld = TailRecipe(
instp=IsEqual(Load.offset, 0), instp=IsEqual(Load.offset, 0),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_rm(in_reg0, out_reg0, sink); modrm_rm(in_reg0, out_reg0, sink);
''') ''')
@@ -837,6 +873,9 @@ fld = TailRecipe(
instp=IsEqual(Load.offset, 0), instp=IsEqual(Load.offset, 0),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_rm(in_reg0, out_reg0, sink); modrm_rm(in_reg0, out_reg0, sink);
''') ''')
@@ -847,6 +886,9 @@ ldDisp8 = TailRecipe(
instp=IsSignedInt(Load.offset, 8), instp=IsSignedInt(Load.offset, 8),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_disp8(in_reg0, out_reg0, sink); modrm_disp8(in_reg0, out_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -859,6 +901,9 @@ fldDisp8 = TailRecipe(
instp=IsSignedInt(Load.offset, 8), instp=IsSignedInt(Load.offset, 8),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_disp8(in_reg0, out_reg0, sink); modrm_disp8(in_reg0, out_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -871,6 +916,9 @@ ldDisp32 = TailRecipe(
instp=IsSignedInt(Load.offset, 32), instp=IsSignedInt(Load.offset, 32),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_disp32(in_reg0, out_reg0, sink); modrm_disp32(in_reg0, out_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();
@@ -883,6 +931,9 @@ fldDisp32 = TailRecipe(
instp=IsSignedInt(Load.offset, 32), instp=IsSignedInt(Load.offset, 32),
clobbers_flags=False, clobbers_flags=False,
emit=''' emit='''
if !flags.notrap() {
sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]);
}
PUT_OP(bits, rex2(in_reg0, out_reg0), sink); PUT_OP(bits, rex2(in_reg0, out_reg0), sink);
modrm_disp32(in_reg0, out_reg0, sink); modrm_disp32(in_reg0, out_reg0, sink);
let offset: i32 = offset.into(); let offset: i32 = offset.into();

View File

@@ -14,7 +14,7 @@
//! relocations to a `RelocSink` trait object. Relocations are less frequent than the //! relocations to a `RelocSink` trait object. Relocations are less frequent than the
//! `CodeSink::put*` methods, so the performance impact of the virtual callbacks is less severe. //! `CodeSink::put*` methods, so the performance impact of the virtual callbacks is less severe.
use ir::{ExternalName, JumpTable}; use ir::{ExternalName, JumpTable, TrapCode, SourceLoc};
use super::{CodeSink, CodeOffset, Reloc, Addend}; use super::{CodeSink, CodeOffset, Reloc, Addend};
use std::ptr::write_unaligned; use std::ptr::write_unaligned;
@@ -33,15 +33,21 @@ pub struct MemoryCodeSink<'a> {
data: *mut u8, data: *mut u8,
offset: isize, offset: isize,
relocs: &'a mut RelocSink, relocs: &'a mut RelocSink,
traps: &'a mut TrapSink,
} }
impl<'a> MemoryCodeSink<'a> { impl<'a> MemoryCodeSink<'a> {
/// Create a new memory code sink that writes a function to the memory pointed to by `data`. /// Create a new memory code sink that writes a function to the memory pointed to by `data`.
pub fn new(data: *mut u8, relocs: &mut RelocSink) -> MemoryCodeSink { pub fn new<'sink>(
data: *mut u8,
relocs: &'sink mut RelocSink,
traps: &'sink mut TrapSink,
) -> MemoryCodeSink<'sink> {
MemoryCodeSink { MemoryCodeSink {
data, data,
offset: 0, offset: 0,
relocs, relocs,
traps,
} }
} }
} }
@@ -58,6 +64,12 @@ pub trait RelocSink {
fn reloc_jt(&mut self, CodeOffset, Reloc, JumpTable); fn reloc_jt(&mut self, CodeOffset, Reloc, JumpTable);
} }
/// A trait for receiving trap codes and offsets.
pub trait TrapSink {
/// Add trap information for a specific offset.
fn trap(&mut self, CodeOffset, SourceLoc, TrapCode);
}
impl<'a> CodeSink for MemoryCodeSink<'a> { impl<'a> CodeSink for MemoryCodeSink<'a> {
fn offset(&self) -> CodeOffset { fn offset(&self) -> CodeOffset {
self.offset as CodeOffset self.offset as CodeOffset
@@ -105,4 +117,9 @@ impl<'a> CodeSink for MemoryCodeSink<'a> {
let ofs = self.offset(); let ofs = self.offset();
self.relocs.reloc_jt(ofs, rel, jt); self.relocs.reloc_jt(ofs, rel, jt);
} }
fn trap(&mut self, code: TrapCode, srcloc: SourceLoc) {
let ofs = self.offset();
self.traps.trap(ofs, srcloc, code);
}
} }

View File

@@ -8,9 +8,9 @@ mod memorysink;
pub use regalloc::RegDiversions; pub use regalloc::RegDiversions;
pub use self::relaxation::relax_branches; pub use self::relaxation::relax_branches;
pub use self::memorysink::{MemoryCodeSink, RelocSink}; pub use self::memorysink::{MemoryCodeSink, RelocSink, TrapSink};
use ir::{ExternalName, JumpTable, Function, Inst}; use ir::{ExternalName, JumpTable, Function, Inst, TrapCode, SourceLoc};
use std::fmt; use std::fmt;
/// Offset in bytes from the beginning of the function. /// Offset in bytes from the beginning of the function.
@@ -86,6 +86,9 @@ pub trait CodeSink {
/// Add a relocation referencing a jump table. /// Add a relocation referencing a jump table.
fn reloc_jt(&mut self, Reloc, JumpTable); fn reloc_jt(&mut self, Reloc, JumpTable);
/// Add trap information for the current offset.
fn trap(&mut self, TrapCode, SourceLoc);
} }
/// Report a bad encoding error. /// Report a bad encoding error.

View File

@@ -9,7 +9,7 @@
//! contexts concurrently. Typically, you would have one context per compilation thread and only a //! contexts concurrently. Typically, you would have one context per compilation thread and only a
//! single ISA instance. //! single ISA instance.
use binemit::{CodeOffset, relax_branches, MemoryCodeSink, RelocSink}; use binemit::{CodeOffset, relax_branches, MemoryCodeSink, RelocSink, TrapSink};
use dominator_tree::DominatorTree; use dominator_tree::DominatorTree;
use flowgraph::ControlFlowGraph; use flowgraph::ControlFlowGraph;
use ir::Function; use ir::Function;
@@ -111,9 +111,15 @@ impl Context {
/// code is returned by `compile` above. /// code is returned by `compile` above.
/// ///
/// The machine code is not relocated. Instead, any relocations are emitted into `relocs`. /// The machine code is not relocated. Instead, any relocations are emitted into `relocs`.
pub fn emit_to_memory(&self, mem: *mut u8, relocs: &mut RelocSink, isa: &TargetIsa) { pub fn emit_to_memory(
&self,
mem: *mut u8,
relocs: &mut RelocSink,
traps: &mut TrapSink,
isa: &TargetIsa,
) {
let _tt = timing::binemit(); let _tt = timing::binemit();
isa.emit_function(&self.func, &mut MemoryCodeSink::new(mem, relocs)); isa.emit_function(&self.func, &mut MemoryCodeSink::new(mem, relocs, traps));
} }
/// Run the verifier on the function. /// Run the verifier on the function.

View File

@@ -1,7 +1,7 @@
//! Emitting binary Intel machine code. //! Emitting binary Intel machine code.
use binemit::{CodeSink, Reloc, bad_encoding}; use binemit::{CodeSink, Reloc, bad_encoding};
use ir::{Function, Inst, Ebb, InstructionData, Opcode}; use ir::{Function, Inst, Ebb, InstructionData, Opcode, TrapCode};
use ir::condcodes::{CondCode, IntCC, FloatCC}; use ir::condcodes::{CondCode, IntCC, FloatCC};
use isa::{RegUnit, StackRef, StackBase, StackBaseMask}; use isa::{RegUnit, StackRef, StackBase, StackBaseMask};
use regalloc::RegDiversions; use regalloc::RegDiversions;

View File

@@ -102,6 +102,10 @@ impl binemit::CodeSink for TextSink {
fn reloc_jt(&mut self, reloc: binemit::Reloc, jt: ir::JumpTable) { fn reloc_jt(&mut self, reloc: binemit::Reloc, jt: ir::JumpTable) {
write!(self.text, "{}({}) ", reloc, jt).unwrap(); write!(self.text, "{}({}) ", reloc, jt).unwrap();
} }
fn trap(&mut self, code: ir::TrapCode, _srcloc: ir::SourceLoc) {
write!(self.text, "{} ", code).unwrap();
}
} }
impl SubTest for TestBinEmit { impl SubTest for TestBinEmit {

View File

@@ -111,4 +111,5 @@ impl binemit::CodeSink for SizeSink {
) { ) {
} }
fn reloc_jt(&mut self, _reloc: binemit::Reloc, _jt: ir::JumpTable) {} fn reloc_jt(&mut self, _reloc: binemit::Reloc, _jt: ir::JumpTable) {}
fn trap(&mut self, _code: ir::TrapCode, _srcloc: ir::SourceLoc) {}
} }