Avoid extra register movement when lowering the x86 extractlane of a float vector

This commit is based on the assumption that floats are already stored in XMM registers in x86. When extracting a lane, cranelift was moving the float to a regular register and back to an XMM register; this change avoids this by shuffling the float value to the lowest bits of the XMM register. It also assumes that the upper bits can be left as is (instead of zeroing them out).
This commit is contained in:
Andrew Brown
2019-08-21 16:56:11 -07:00
parent f1363168a9
commit 00bedca274
7 changed files with 154 additions and 43 deletions

View File

@@ -356,7 +356,6 @@ pub(crate) fn define(
let copy_to_ssa = shared.by_name("copy_to_ssa");
let ctz = shared.by_name("ctz");
let debugtrap = shared.by_name("debugtrap");
let extractlane = shared.by_name("extractlane");
let f32const = shared.by_name("f32const");
let f64const = shared.by_name("f64const");
let fadd = shared.by_name("fadd");
@@ -460,6 +459,7 @@ pub(crate) fn define(
let x86_fmax = x86.by_name("x86_fmax");
let x86_fmin = x86.by_name("x86_fmin");
let x86_pop = x86.by_name("x86_pop");
let x86_pextr = x86.by_name("x86_pextr");
let x86_pshufd = x86.by_name("x86_pshufd");
let x86_pshufb = x86.by_name("x86_pshufb");
let x86_push = x86.by_name("x86_push");
@@ -1791,16 +1791,16 @@ pub(crate) fn define(
}
// SIMD extractlane
let mut extractlane_mapping: HashMap<u64, (Vec<u8>, Option<SettingPredicateNumber>)> =
let mut x86_pextr_mapping: HashMap<u64, (Vec<u8>, Option<SettingPredicateNumber>)> =
HashMap::new();
extractlane_mapping.insert(8, (vec![0x66, 0x0f, 0x3a, 0x14], Some(use_sse41_simd))); // PEXTRB
extractlane_mapping.insert(16, (vec![0x66, 0x0f, 0xc5], None)); // PEXTRW from zSSE2, SSE4.1 has a PEXTRW that can move to reg/m16 but the opcode is four bytes
extractlane_mapping.insert(32, (vec![0x66, 0x0f, 0x3a, 0x16], Some(use_sse41_simd))); // PEXTRD
extractlane_mapping.insert(64, (vec![0x66, 0x0f, 0x3a, 0x16], Some(use_sse41_simd))); // PEXTRQ, only x86_64
x86_pextr_mapping.insert(8, (vec![0x66, 0x0f, 0x3a, 0x14], Some(use_sse41))); // PEXTRB
x86_pextr_mapping.insert(16, (vec![0x66, 0x0f, 0xc5], None)); // PEXTRW from zSSE2, SSE4.1 has a PEXTRW that can move to reg/m16 but the opcode is four bytes
x86_pextr_mapping.insert(32, (vec![0x66, 0x0f, 0x3a, 0x16], Some(use_sse41))); // PEXTRD
x86_pextr_mapping.insert(64, (vec![0x66, 0x0f, 0x3a, 0x16], Some(use_sse41))); // PEXTRQ, only x86_64
for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
if let Some((opcode, isap)) = extractlane_mapping.get(&ty.lane_bits()) {
let instruction = extractlane.bind_vector_from_lane(ty, sse_vector_size);
if let Some((opcode, isap)) = x86_pextr_mapping.get(&ty.lane_bits()) {
let instruction = x86_pextr.bind_vector_from_lane(ty, sse_vector_size);
let template = rec_r_ib_unsigned_gpr.opcodes(opcode.clone());
if ty.lane_bits() < 64 {
e.enc_32_64_maybe_isap(instruction, template.nonrex(), isap.clone());

View File

@@ -291,5 +291,22 @@ pub(crate) fn define(
.operands_out(vec![a]),
);
let Idx = &operand_doc("Idx", uimm8, "Lane index");
let x = &operand("x", TxN);
let a = &operand("a", &TxN.lane_of());
ig.push(
Inst::new(
"x86_pextr",
r#"
Extract lane ``Idx`` from ``x``.
The lane index, ``Idx``, is an immediate value, not an SSA value. It
must indicate a valid lane index for the type of ``x``.
"#,
)
.operands_in(vec![x, Idx])
.operands_out(vec![a]),
);
ig.build()
}

View File

@@ -23,6 +23,7 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
let bor = insts.by_name("bor");
let clz = insts.by_name("clz");
let ctz = insts.by_name("ctz");
let extractlane = insts.by_name("extractlane");
let f64const = insts.by_name("f64const");
let fcmp = insts.by_name("fcmp");
let fcvt_from_uint = insts.by_name("fcvt_from_uint");
@@ -379,5 +380,7 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
);
}
narrow.custom_legalize(extractlane, "convert_extractlane");
narrow.build_and_add_to(&mut shared.transform_groups);
}