As a follow-up to #5780, disassemble the regions identified by bb_starts, falling back on disassembling the whole buffer. This ensures that instructions like br_table that introduce a lot of constants don't throw off capstone for the remainder of the function. --------- Co-authored-by: Jamey Sharp <jamey@minilop.net>
264 lines
5.4 KiB
Plaintext
264 lines
5.4 KiB
Plaintext
test compile precise-output
|
|
target x86_64
|
|
|
|
function u0:0(i64 sarg(64)) -> i8 system_v {
|
|
block0(v0: i64):
|
|
v1 = load.i8 v0
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rsi
|
|
; movzbq 0(%rsi), %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; leaq 0x10(%rbp), %rsi
|
|
; movzbq (%rsi), %rax ; trap: heap_oob
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function u0:1(i64 sarg(64), i64) -> i8 system_v {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = load.i8 v1
|
|
v3 = load.i8 v0
|
|
v4 = iadd.i8 v2, v3
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rcx
|
|
; movzbq 0(%rdi), %rax
|
|
; movzbq 0(%rcx), %r9
|
|
; addl %eax, %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; leaq 0x10(%rbp), %rcx
|
|
; movzbq (%rdi), %rax ; trap: heap_oob
|
|
; movzbq (%rcx), %r9 ; trap: heap_oob
|
|
; addl %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function u0:2(i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn1(v0)
|
|
return v1
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rsi
|
|
; subq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust 64
|
|
; lea 0(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %r11
|
|
; call *%r11
|
|
; call User(userextname0)
|
|
; addq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust -64
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; movq %rdi, %rsi
|
|
; subq $0x40, %rsp
|
|
; leaq (%rsp), %rdi
|
|
; movl $0x40, %edx
|
|
; movabsq $0, %r11 ; reloc_external Abs8 %Memcpy 0
|
|
; callq *%r11
|
|
; callq 0x26 ; reloc_external CallPCRel4 u0:0 -4
|
|
; addq $0x40, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function u0:3(i64, i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64, i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64, v1: i64):
|
|
v2 = call fn1(v0, v1)
|
|
return v2
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq %rsp, $16, %rsp
|
|
; movq %r13, 0(%rsp)
|
|
; block0:
|
|
; movq %rdi, %r13
|
|
; subq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust 64
|
|
; lea 0(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %rax
|
|
; call *%rax
|
|
; movq %r13, %rdi
|
|
; call User(userextname0)
|
|
; addq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust -64
|
|
; movq 0(%rsp), %r13
|
|
; addq %rsp, $16, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq $0x10, %rsp
|
|
; movq %r13, (%rsp)
|
|
; block1: ; offset 0xc
|
|
; movq %rdi, %r13
|
|
; subq $0x40, %rsp
|
|
; leaq (%rsp), %rdi
|
|
; movl $0x40, %edx
|
|
; movabsq $0, %rax ; reloc_external Abs8 %Memcpy 0
|
|
; callq *%rax
|
|
; movq %r13, %rdi
|
|
; callq 0x30 ; reloc_external CallPCRel4 u0:0 -4
|
|
; addq $0x40, %rsp
|
|
; movq (%rsp), %r13
|
|
; addq $0x10, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function u0:4(i64 sarg(128), i64 sarg(64)) -> i8 system_v {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = load.i8 v0
|
|
v3 = load.i8 v1
|
|
v4 = iadd.i8 v2, v3
|
|
return v4
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rsi
|
|
; lea 144(%rbp), %rcx
|
|
; movzbq 0(%rsi), %rax
|
|
; movzbq 0(%rcx), %r9
|
|
; addl %eax, %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block1: ; offset 0x4
|
|
; leaq 0x10(%rbp), %rsi
|
|
; leaq 0x90(%rbp), %rcx
|
|
; movzbq (%rsi), %rax ; trap: heap_oob
|
|
; movzbq (%rcx), %r9 ; trap: heap_oob
|
|
; addl %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|
|
function u0:5(i64, i64, i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64, i64 sarg(128), i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = call fn1(v0, v1, v2)
|
|
return v3
|
|
}
|
|
|
|
; VCode:
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq %rsp, $16, %rsp
|
|
; movq %r12, 0(%rsp)
|
|
; movq %r14, 8(%rsp)
|
|
; block0:
|
|
; movq %rdx, %r14
|
|
; movq %rdi, %r12
|
|
; subq %rsp, $192, %rsp
|
|
; virtual_sp_offset_adjust 192
|
|
; lea 0(%rsp), %rdi
|
|
; movl $128, %edx
|
|
; load_ext_name %Memcpy+0, %rax
|
|
; call *%rax
|
|
; lea 128(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %r11
|
|
; movq %r14, %rsi
|
|
; call *%r11
|
|
; movq %r12, %rdi
|
|
; call User(userextname0)
|
|
; addq %rsp, $192, %rsp
|
|
; virtual_sp_offset_adjust -192
|
|
; movq 0(%rsp), %r12
|
|
; movq 8(%rsp), %r14
|
|
; addq %rsp, $16, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
;
|
|
; Disassembled:
|
|
; block0: ; offset 0x0
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq $0x10, %rsp
|
|
; movq %r12, (%rsp)
|
|
; movq %r14, 8(%rsp)
|
|
; block1: ; offset 0x11
|
|
; movq %rdx, %r14
|
|
; movq %rdi, %r12
|
|
; subq $0xc0, %rsp
|
|
; leaq (%rsp), %rdi
|
|
; movl $0x80, %edx
|
|
; movabsq $0, %rax ; reloc_external Abs8 %Memcpy 0
|
|
; callq *%rax
|
|
; leaq 0x80(%rsp), %rdi
|
|
; movl $0x40, %edx
|
|
; movabsq $0, %r11 ; reloc_external Abs8 %Memcpy 0
|
|
; movq %r14, %rsi
|
|
; callq *%r11
|
|
; movq %r12, %rdi
|
|
; callq 0x58 ; reloc_external CallPCRel4 u0:0 -4
|
|
; addq $0xc0, %rsp
|
|
; movq (%rsp), %r12
|
|
; movq 8(%rsp), %r14
|
|
; addq $0x10, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; retq
|
|
|