Consider this testcase:
target i686
function u0:0() -> i32 system_v {
ss0 = explicit_slot 0
block0:
v2 = stack_addr.i32 ss0
return v2
}
Before this commit, in 32-bit mode the x86 backend would generate
incorrect code for stack addresses:
0: 55 push ebp
1: 89 e5 mov ebp, esp
3: 83 ec 08 sub esp, 8
6: 8d 44 24 00 lea eax, [esp]
a: 00 00 add byte ptr [eax], al
c: 00 83 c4 08 5d c3 add byte ptr [ebx - 0x3ca2f73c], al
This happened because the ModRM byte indicated a disp8 encoding, but
the instruction actually used a disp32 encoding. After this commit,
correct code is generated:
0: 55 push ebp
1: 89 e5 mov ebp, esp
3: 83 ec 08 sub esp, 8
6: 8d 84 24 00 00 00 00 lea eax, [esp]
d: 83 c4 08 add esp, 8
10: 5d pop ebp
11: c3 ret
34 lines
1.4 KiB
Plaintext
34 lines
1.4 KiB
Plaintext
; binary emission of stack address instructions on i686.
|
|
test binemit
|
|
set opt_level=none
|
|
target i686 haswell
|
|
|
|
; The binary encodings can be verified with the command:
|
|
;
|
|
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/stack-addr32.clif | llvm-mc -show-encoding -triple=i686
|
|
;
|
|
|
|
function %stack_addr() {
|
|
ss0 = incoming_arg 8, offset 0
|
|
ss1 = incoming_arg 1024, offset -1024
|
|
ss2 = incoming_arg 1024, offset -2048
|
|
ss3 = incoming_arg 8, offset -2056
|
|
ss4 = explicit_slot 8, offset 0
|
|
ss5 = explicit_slot 8, offset 1024
|
|
|
|
block0:
|
|
[-,%rcx] v0 = stack_addr.i32 ss0 ; bin: 8d 8c 24 00000808
|
|
[-,%rcx] v1 = stack_addr.i32 ss1 ; bin: 8d 8c 24 00000408
|
|
[-,%rcx] v2 = stack_addr.i32 ss2 ; bin: 8d 8c 24 00000008
|
|
[-,%rcx] v3 = stack_addr.i32 ss3 ; bin: 8d 8c 24 00000000
|
|
[-,%rcx] v4 = stack_addr.i32 ss4 ; bin: 8d 8c 24 00000808
|
|
[-,%rcx] v5 = stack_addr.i32 ss5 ; bin: 8d 8c 24 00000c08
|
|
|
|
[-,%rcx] v20 = stack_addr.i32 ss4+1 ; bin: 8d 8c 24 00000809
|
|
[-,%rcx] v21 = stack_addr.i32 ss4+2 ; bin: 8d 8c 24 0000080a
|
|
[-,%rcx] v22 = stack_addr.i32 ss4+2048 ; bin: 8d 8c 24 00001008
|
|
[-,%rcx] v23 = stack_addr.i32 ss4-4096 ; bin: 8d 8c 24 fffff808
|
|
|
|
return
|
|
}
|