aarch64: Add support for the fmls instruction (#5895)

This commit adds lowerings to the AArch64 backend for the `fmls`
instruction which is intended to be leveraged in the relaxed-simd
proposal for WebAssembly. This should hopefully allow for a
teeny-bit-more efficient codegen for this operator instead of using the
`fmla` instruction plus a negation instruction.
This commit is contained in:
Alex Crichton
2023-03-01 23:45:58 -06:00
committed by GitHub
parent 52b4c48a1b
commit 9984e959cd
5 changed files with 173 additions and 2 deletions

View File

@@ -1401,6 +1401,8 @@
(Bsl) (Bsl)
;; Floating-point fused multiply-add vectors ;; Floating-point fused multiply-add vectors
(Fmla) (Fmla)
;; Floating-point fused multiply-subtract vectors
(Fmls)
)) ))
;; A Vector miscellaneous operation with two registers. ;; A Vector miscellaneous operation with two registers.

View File

@@ -2906,6 +2906,9 @@ impl MachInstEmit for Inst {
VecALUModOp::Fmla => { VecALUModOp::Fmla => {
(0b000_01110_00_1 | (size.enc_float_size() << 1), 0b110011) (0b000_01110_00_1 | (size.enc_float_size() << 1), 0b110011)
} }
VecALUModOp::Fmls => {
(0b000_01110_10_1 | (size.enc_float_size() << 1), 0b110011)
}
}; };
sink.put4(enc_vec_rrr(top11 | q << 9, rm, bit15_10, rn, rd)); sink.put4(enc_vec_rrr(top11 | q << 9, rm, bit15_10, rn, rd));
} }

View File

@@ -2363,6 +2363,7 @@ impl Inst {
let (op, size) = match alu_op { let (op, size) = match alu_op {
VecALUModOp::Bsl => ("bsl", VectorSize::Size8x16), VecALUModOp::Bsl => ("bsl", VectorSize::Size8x16),
VecALUModOp::Fmla => ("fmla", size), VecALUModOp::Fmla => ("fmla", size),
VecALUModOp::Fmls => ("fmls", size),
}; };
let rd = pretty_print_vreg_vector(rd.to_reg(), size, allocs); let rd = pretty_print_vreg_vector(rd.to_reg(), size, allocs);
let ri = pretty_print_vreg_vector(ri, size, allocs); let ri = pretty_print_vreg_vector(ri, size, allocs);

View File

@@ -404,7 +404,13 @@
(rule (lower (has_type ty @ (multi_lane _ _) (fma x y z))) (rule (lower (has_type ty @ (multi_lane _ _) (fma x y z)))
(vec_rrr_mod (VecALUModOp.Fmla) z x y (vector_size ty))) (vec_rrr_mod (VecALUModOp.Fmla) z x y (vector_size ty)))
(rule 1 (lower (has_type (ty_scalar_float ty) (fma x y z))) (rule 1 (lower (has_type ty @ (multi_lane _ _) (fma (fneg x) y z)))
(vec_rrr_mod (VecALUModOp.Fmls) z x y (vector_size ty)))
(rule 2 (lower (has_type ty @ (multi_lane _ _) (fma x (fneg y) z)))
(vec_rrr_mod (VecALUModOp.Fmls) z x y (vector_size ty)))
(rule 3 (lower (has_type (ty_scalar_float ty) (fma x y z)))
(fpu_rrrr (FPUOp3.MAdd) (scalar_size ty) x y z)) (fpu_rrrr (FPUOp3.MAdd) (scalar_size ty) x y z))
;;;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@@ -0,0 +1,159 @@
test compile precise-output
target aarch64
function %fma_f32(f32, f32, f32) -> f32 {
block0(v0: f32, v1: f32, v2: f32):
v3 = fma v0, v1, v2
return v3
}
; VCode:
; block0:
; fmadd s0, s0, s1, s2
; ret
;
; Disassembled:
; block0: ; offset 0x0
; fmadd s0, s0, s1, s2
; ret
function %fma_f64(f64, f64, f64) -> f64 {
block0(v0: f64, v1: f64, v2: f64):
v3 = fma v0, v1, v2
return v3
}
; VCode:
; block0:
; fmadd d0, d0, d1, d2
; ret
;
; Disassembled:
; block0: ; offset 0x0
; fmadd d0, d0, d1, d2
; ret
function %fma_f32x4(f32x4, f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4, v2: f32x4):
v3 = fma v0, v1, v2
return v3
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmla v0.4s, v0.4s, v5.4s, v1.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmla v0.4s, v5.4s, v1.4s
; ret
function %fma_f64x2(f64x2, f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2, v2: f64x2):
v3 = fma v0, v1, v2
return v3
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmla v0.2d, v0.2d, v5.2d, v1.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmla v0.2d, v5.2d, v1.2d
; ret
function %fma_neg_f32x4(f32x4, f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4, v2: f32x4):
v3 = fneg v0
v4 = fma v3, v1, v2
return v4
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.4s, v0.4s, v5.4s, v1.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.4s, v5.4s, v1.4s
; ret
function %fma_neg_f64x2(f64x2, f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2, v2: f64x2):
v3 = fneg v0
v4 = fma v3, v1, v2
return v4
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.2d, v0.2d, v5.2d, v1.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.2d, v5.2d, v1.2d
; ret
function %fma_neg_other_f32x4(f32x4, f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4, v2: f32x4):
v3 = fneg v1
v4 = fma v0, v3, v2
return v4
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.4s, v0.4s, v5.4s, v1.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.4s, v5.4s, v1.4s
; ret
function %fma_neg_other_f64x2(f64x2, f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2, v2: f64x2):
v3 = fneg v1
v4 = fma v0, v3, v2
return v4
}
; VCode:
; block0:
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.2d, v0.2d, v5.2d, v1.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; mov v5.16b, v0.16b
; mov v0.16b, v2.16b
; fmls v0.2d, v5.2d, v1.2d
; ret