x64: Deduplicate fcmp emission logic (#6113)

* x64: Deduplicate fcmp emission logic

The `select`-of-`fcmp` lowering duplicated a good deal of `FloatCC`
lowering logic that was already done by `emit_fcmp`, so this commit
refactors these lowering rules to instead delegate to `emit_fcmp` and
then handle that result.

* Swap order of condition codes

Shouldn't affect the correctness of this operation and it's a bit more
natural to write the lowering rule this way.

* Swap the order of comparison operands

No need to swap `a b`, only the `x y` needs swapping.

* Fix x64 printing of `XmmCmove`
This commit is contained in:
Alex Crichton
2023-03-29 11:24:25 -05:00
committed by GitHub
parent dcf0ea9ff3
commit afb417920d
6 changed files with 73 additions and 112 deletions

View File

@@ -60,10 +60,10 @@ block0(v0: f64, v1: i64):
; setz %al
; andl %edi, %eax, %edi
; movzbq %dil, %rax
; ucomisd %xmm0, %xmm9
; ucomisd %xmm9, %xmm0
; movdqa %xmm0, %xmm2
; mov z, sd; j%xmm2 $next; mov%xmm0 %xmm0, %xmm0; $next:
; mov np, sd; j%xmm2 $next; mov%xmm0 %xmm0, %xmm0; $next:
; movsd %xmm0, %xmm0; jnp $next; movsd %xmm2, %xmm0; $next:
; movsd %xmm0, %xmm0; jz $next; movsd %xmm2, %xmm0; $next:
; movq %rbp, %rsp
; popq %rbp
; ret
@@ -79,11 +79,11 @@ block0(v0: f64, v1: i64):
; sete %al
; andl %eax, %edi
; movzbq %dil, %rax
; ucomisd %xmm0, %xmm9
; ucomisd %xmm9, %xmm0
; movdqa %xmm0, %xmm2
; je 0x2f
; jnp 0x2f
; movsd %xmm2, %xmm0
; jnp 0x39
; je 0x39
; movsd %xmm2, %xmm0
; movq %rbp, %rsp
; popq %rbp

View File

@@ -52,11 +52,11 @@ block0(v0: f32, v1: i128, v2: i128):
; block0:
; ucomiss %xmm0, %xmm0
; movq %rdi, %rax
; cmovnzq %rdx, %rax, %rax
; cmovpq %rdx, %rax, %rax
; cmovnzq %rdx, %rax, %rax
; movq %rsi, %rdx
; cmovnzq %rcx, %rdx, %rdx
; cmovpq %rcx, %rdx, %rdx
; cmovnzq %rcx, %rdx, %rdx
; movq %rbp, %rsp
; popq %rbp
; ret
@@ -68,11 +68,11 @@ block0(v0: f32, v1: i128, v2: i128):
; block1: ; offset 0x4
; ucomiss %xmm0, %xmm0
; movq %rdi, %rax
; cmovneq %rdx, %rax
; cmovpq %rdx, %rax
; cmovneq %rdx, %rax
; movq %rsi, %rdx
; cmovneq %rcx, %rdx
; cmovpq %rcx, %rdx
; cmovneq %rcx, %rdx
; movq %rbp, %rsp
; popq %rbp
; retq

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
; Check that no intervening moves are inserted when lowering `select` (see
@@ -9,8 +9,35 @@ block0(v0: f32, v1: f32):
v3 = iconst.i32 1
v4 = iconst.i32 0
v5 = select v2, v3, v4
; check: ucomiss
; nextln: cmovnzl
; nextln: cmovpl
return v5
}
; VCode:
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl $1, %eax
; ucomiss %xmm1, %xmm0
; cmovpl const(0), %eax, %eax
; cmovnzl const(0), %eax, %eax
; movq %rbp, %rsp
; popq %rbp
; ret
;
; Disassembled:
; block0: ; offset 0x0
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; movl $1, %eax
; ucomiss %xmm1, %xmm0
; cmovpl 0xd(%rip), %eax
; cmovnel 6(%rip), %eax
; movq %rbp, %rsp
; popq %rbp
; retq
; addb %al, (%rax)
; addb %al, (%rax)
; addb %al, (%rax)
; addb %al, (%rax)

View File

@@ -44,10 +44,10 @@ block0(v0: f32, v1: f32, v2: i64, v3: i64):
; pushq %rbp
; movq %rsp, %rbp
; block0:
; ucomiss %xmm0, %xmm1
; ucomiss %xmm1, %xmm0
; movq %rdi, %rax
; cmovnzq %rsi, %rax, %rax
; cmovpq %rsi, %rax, %rax
; cmovnzq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
@@ -57,10 +57,10 @@ block0(v0: f32, v1: f32, v2: i64, v3: i64):
; pushq %rbp
; movq %rsp, %rbp
; block1: ; offset 0x4
; ucomiss %xmm0, %xmm1
; ucomiss %xmm1, %xmm0
; movq %rdi, %rax
; cmovneq %rsi, %rax
; cmovpq %rsi, %rax
; cmovneq %rsi, %rax
; movq %rbp, %rsp
; popq %rbp
; retq