Fix up adjust_sp_imm instruction.

* Use imm64 rather than offset32
* Add predicate to enforce signed 32-bit limit to imm
* Remove AdjustSpImm format
* Add encoding tests for adjust_sp_imm
* Adjust use of adjust_sp_imm in Intel prologue_epilogue to match
This commit is contained in:
Tyler McMullen
2017-12-02 15:37:04 -08:00
committed by Jakob Stoklund Olesen
parent 1a11c351b5
commit ced39f5186
11 changed files with 46 additions and 27 deletions

View File

@@ -389,6 +389,17 @@ ebb0:
; asm: popl %ecx
[-,%rcx] v512 = x86_pop.i32 ; bin: 59
; Adjust Stack Pointer
; asm: addq $1024, %rsp
adjust_sp_imm 1024 ; bin: 81 c4 00000400
; asm: addq $-1024, %rsp
adjust_sp_imm -1024 ; bin: 81 c4 fffffc00
; asm: addq $2147483647, %rsp
adjust_sp_imm 2147483647 ; bin: 81 c4 7fffffff
; asm: addq $-2147483648, %rsp
adjust_sp_imm -2147483648 ; bin: 81 c4 80000000
; asm: testl %ecx, %ecx
; asm: je ebb1
brz v1, ebb1 ; bin: 85 c9 74 0e

View File

@@ -493,6 +493,16 @@ ebb0:
; asm: popq %r10
[-,%r10] v514 = x86_pop.i64 ; bin: 41 5a
; Adjust Stack Pointer
; asm: addq $1024, %rsp
adjust_sp_imm 1024 ; bin: 48 81 c4 00000400
; asm: addq $-1024, %rsp
adjust_sp_imm -1024 ; bin: 48 81 c4 fffffc00
; asm: addq $2147483647, %rsp
adjust_sp_imm 2147483647 ; bin: 48 81 c4 7fffffff
; asm: addq $-2147483648, %rsp
adjust_sp_imm -2147483648 ; bin: 48 81 c4 80000000
; asm: testq %rcx, %rcx
; asm: je ebb1
brz v1, ebb1 ; bin: 48 85 c9 74 1b

View File

@@ -8,18 +8,18 @@ function %foo(f64 [%xmm0], i64 fp [%rbp], i64 csr [%rbx], i64 csr [%r12]) -> i64
ss1 = incoming_arg 32, offset -32
ebb0(v0: f64 [%xmm0], v1: i64 [%rbp], v2: i64 [%rbx], v3: i64 [%r12]):
x86_push v1 ; bin: 48 55
x86_push v1 ; bin: 55
copy_special %rsp -> %rbp ; bin: 48 89 e5
x86_push v2 ; bin: 48 53
x86_push v3 ; bin: 49 54
x86_push v2 ; bin: 53
x86_push v3 ; bin: 41 54
adjust_sp_imm -168 ; bin: 48 81 c4 ffffff58
; ... function body ...
adjust_sp_imm +168 ; bin: 48 81 c4 000000a8
[-,%r12] v100 = x86_pop.i64 ; bin: 49 5c
[-,%rbx] v101 = x86_pop.i64 ; bin: 48 5b
[-,%rbp] v102 = x86_pop.i64 ; bin: 48 5d
adjust_sp_imm 168 ; bin: 48 81 c4 000000a8
[-,%r12] v100 = x86_pop.i64 ; bin: 41 5c
[-,%rbx] v101 = x86_pop.i64 ; bin: 5b
[-,%rbp] v102 = x86_pop.i64 ; bin: 5d
return v100, v101, v102
}