Align functions according to their ISA's requirements (#4826)

Add a function_alignment function to the TargetIsa trait, and use it to align functions when generating objects. Additionally, collect the maximum alignment required for pc-relative constants in functions and pass that value out. Use the max of these two values when padding functions for alignment.

This fixes a bug on x86_64 where rip-relative loads to sse registers could cause a segfault, as functions weren't always guaranteed to be aligned to 16-byte addresses.

Fixes #4812
This commit is contained in:
Trevor Elliott
2022-08-31 14:41:44 -07:00
committed by GitHub
parent f18a1f1488
commit dde2c5a3b6
13 changed files with 81 additions and 15 deletions

View File

@@ -95,6 +95,7 @@ impl TargetIsa for AArch64Backend {
dynamic_stackslot_offsets, dynamic_stackslot_offsets,
bb_starts: emit_result.bb_offsets, bb_starts: emit_result.bb_offsets,
bb_edges: emit_result.bb_edges, bb_edges: emit_result.bb_edges,
alignment: emit_result.alignment,
}) })
} }
@@ -179,6 +180,12 @@ impl TargetIsa for AArch64Backend {
fn map_regalloc_reg_to_dwarf(&self, reg: Reg) -> Result<u16, systemv::RegisterMappingError> { fn map_regalloc_reg_to_dwarf(&self, reg: Reg) -> Result<u16, systemv::RegisterMappingError> {
inst::unwind::systemv::map_reg(reg).map(|reg| reg.0) inst::unwind::systemv::map_reg(reg).map(|reg| reg.0)
} }
fn function_alignment(&self) -> u32 {
// We use 32-byte alignment for performance reasons, but for correctness we would only need
// 4-byte alignment.
32
}
} }
impl fmt::Display for AArch64Backend { impl fmt::Display for AArch64Backend {

View File

@@ -277,6 +277,9 @@ pub trait TargetIsa: fmt::Display + Send + Sync {
/// will be "labeled" or might have calls between them, typically the number /// will be "labeled" or might have calls between them, typically the number
/// of defined functions in the object file. /// of defined functions in the object file.
fn text_section_builder(&self, num_labeled_funcs: u32) -> Box<dyn TextSectionBuilder>; fn text_section_builder(&self, num_labeled_funcs: u32) -> Box<dyn TextSectionBuilder>;
/// The function alignment required by this ISA.
fn function_alignment(&self) -> u32;
} }
/// Methods implemented for free for target ISA! /// Methods implemented for free for target ISA!

View File

@@ -93,6 +93,7 @@ impl TargetIsa for S390xBackend {
dynamic_stackslot_offsets, dynamic_stackslot_offsets,
bb_starts: emit_result.bb_offsets, bb_starts: emit_result.bb_offsets,
bb_edges: emit_result.bb_edges, bb_edges: emit_result.bb_edges,
alignment: emit_result.alignment,
}) })
} }
@@ -161,6 +162,10 @@ impl TargetIsa for S390xBackend {
fn text_section_builder(&self, num_funcs: u32) -> Box<dyn TextSectionBuilder> { fn text_section_builder(&self, num_funcs: u32) -> Box<dyn TextSectionBuilder> {
Box::new(MachTextSectionBuilder::<inst::Inst>::new(num_funcs)) Box::new(MachTextSectionBuilder::<inst::Inst>::new(num_funcs))
} }
fn function_alignment(&self) -> u32 {
4
}
} }
impl fmt::Display for S390xBackend { impl fmt::Display for S390xBackend {

View File

@@ -88,6 +88,7 @@ impl TargetIsa for X64Backend {
dynamic_stackslot_offsets, dynamic_stackslot_offsets,
bb_starts: emit_result.bb_offsets, bb_starts: emit_result.bb_offsets,
bb_edges: emit_result.bb_edges, bb_edges: emit_result.bb_edges,
alignment: emit_result.alignment,
}) })
} }
@@ -158,6 +159,12 @@ impl TargetIsa for X64Backend {
fn text_section_builder(&self, num_funcs: u32) -> Box<dyn TextSectionBuilder> { fn text_section_builder(&self, num_funcs: u32) -> Box<dyn TextSectionBuilder> {
Box::new(MachTextSectionBuilder::<inst::Inst>::new(num_funcs)) Box::new(MachTextSectionBuilder::<inst::Inst>::new(num_funcs))
} }
/// Align functions on x86 to 16 bytes, ensuring that rip-relative loads to SSE registers are
/// always from aligned memory.
fn function_alignment(&self) -> u32 {
16
}
} }
impl fmt::Display for X64Backend { impl fmt::Display for X64Backend {

View File

@@ -468,7 +468,11 @@ impl<I: VCodeInst> MachBuffer<I> {
/// Align up to the given alignment. /// Align up to the given alignment.
pub fn align_to(&mut self, align_to: CodeOffset) { pub fn align_to(&mut self, align_to: CodeOffset) {
trace!("MachBuffer: align to {}", align_to); trace!("MachBuffer: align to {}", align_to);
assert!(align_to.is_power_of_two()); assert!(
align_to.is_power_of_two(),
"{} is not a power of two",
align_to
);
while self.cur_offset() & (align_to - 1) != 0 { while self.cur_offset() & (align_to - 1) != 0 {
self.put1(0); self.put1(0);
} }
@@ -1620,7 +1624,7 @@ impl<I: VCodeInst> MachTextSectionBuilder<I> {
} }
impl<I: VCodeInst> TextSectionBuilder for MachTextSectionBuilder<I> { impl<I: VCodeInst> TextSectionBuilder for MachTextSectionBuilder<I> {
fn append(&mut self, named: bool, func: &[u8], align: Option<u32>) -> u64 { fn append(&mut self, named: bool, func: &[u8], align: u32) -> u64 {
// Conditionally emit an island if it's necessary to resolve jumps // Conditionally emit an island if it's necessary to resolve jumps
// between functions which are too far away. // between functions which are too far away.
let size = func.len() as u32; let size = func.len() as u32;
@@ -1628,7 +1632,7 @@ impl<I: VCodeInst> TextSectionBuilder for MachTextSectionBuilder<I> {
self.buf.emit_island_maybe_forced(self.force_veneers, size); self.buf.emit_island_maybe_forced(self.force_veneers, size);
} }
self.buf.align_to(align.unwrap_or(I::LabelUse::ALIGN)); self.buf.align_to(align);
let pos = self.buf.cur_offset(); let pos = self.buf.cur_offset();
if named { if named {
self.buf self.buf

View File

@@ -300,6 +300,9 @@ pub struct CompiledCodeBase<T: CompilePhase> {
/// This info is generated only if the `machine_code_cfg_info` /// This info is generated only if the `machine_code_cfg_info`
/// flag is set. /// flag is set.
pub bb_edges: Vec<(CodeOffset, CodeOffset)>, pub bb_edges: Vec<(CodeOffset, CodeOffset)>,
/// Minimum alignment for the function, derived from the use of any
/// pc-relative loads.
pub alignment: u32,
} }
impl CompiledCodeStencil { impl CompiledCodeStencil {
@@ -314,6 +317,7 @@ impl CompiledCodeStencil {
dynamic_stackslot_offsets: self.dynamic_stackslot_offsets, dynamic_stackslot_offsets: self.dynamic_stackslot_offsets,
bb_starts: self.bb_starts, bb_starts: self.bb_starts,
bb_edges: self.bb_edges, bb_edges: self.bb_edges,
alignment: self.alignment,
} }
} }
} }
@@ -355,7 +359,7 @@ pub trait TextSectionBuilder {
/// ///
/// This function returns the offset at which the data was placed in the /// This function returns the offset at which the data was placed in the
/// text section. /// text section.
fn append(&mut self, labeled: bool, data: &[u8], align: Option<u32>) -> u64; fn append(&mut self, labeled: bool, data: &[u8], align: u32) -> u64;
/// Attempts to resolve a relocation for this function. /// Attempts to resolve a relocation for this function.
/// ///

View File

@@ -221,6 +221,9 @@ pub struct EmitResult<I: VCodeInst> {
/// Stack frame size. /// Stack frame size.
pub frame_size: u32, pub frame_size: u32,
/// The alignment requirement for pc-relative loads.
pub alignment: u32,
} }
/// A builder for a VCode function body. /// A builder for a VCode function body.
@@ -1058,7 +1061,10 @@ impl<I: VCodeInst> VCode<I> {
} }
// Emit the constants used by the function. // Emit the constants used by the function.
let mut alignment = 1;
for (constant, data) in self.constants.iter() { for (constant, data) in self.constants.iter() {
alignment = data.alignment().max(alignment);
let label = buffer.get_label_for_constant(constant); let label = buffer.get_label_for_constant(constant);
buffer.defer_constant(label, data.alignment(), data.as_slice(), u32::max_value()); buffer.defer_constant(label, data.alignment(), data.as_slice(), u32::max_value());
} }
@@ -1101,6 +1107,7 @@ impl<I: VCodeInst> VCode<I> {
dynamic_stackslot_offsets: self.abi.dynamic_stackslot_offsets().clone(), dynamic_stackslot_offsets: self.abi.dynamic_stackslot_offsets().clone(),
value_labels_ranges, value_labels_ranges,
frame_size, frame_size,
alignment,
} }
} }

View File

@@ -21,7 +21,6 @@ use std::ptr::NonNull;
use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::atomic::{AtomicPtr, Ordering};
use target_lexicon::PointerWidth; use target_lexicon::PointerWidth;
const EXECUTABLE_DATA_ALIGNMENT: u64 = 0x10;
const WRITABLE_DATA_ALIGNMENT: u64 = 0x8; const WRITABLE_DATA_ALIGNMENT: u64 = 0x8;
const READONLY_DATA_ALIGNMENT: u64 = 0x1; const READONLY_DATA_ALIGNMENT: u64 = 0x1;
@@ -234,7 +233,12 @@ impl JITModule {
let plt_entry = self let plt_entry = self
.memory .memory
.code .code
.allocate(std::mem::size_of::<[u8; 16]>(), EXECUTABLE_DATA_ALIGNMENT) .allocate(
std::mem::size_of::<[u8; 16]>(),
self.isa
.symbol_alignment()
.max(self.isa.function_alignment() as u64),
)
.unwrap() .unwrap()
.cast::<[u8; 16]>(); .cast::<[u8; 16]>();
unsafe { unsafe {
@@ -680,16 +684,20 @@ impl Module for JITModule {
} }
// work around borrow-checker to allow reuse of ctx below // work around borrow-checker to allow reuse of ctx below
let _ = ctx.compile(self.isa())?; let res = ctx.compile(self.isa())?;
let alignment = res.alignment as u64;
let compiled_code = ctx.compiled_code().unwrap(); let compiled_code = ctx.compiled_code().unwrap();
let code_size = compiled_code.code_info().total_size; let code_size = compiled_code.code_info().total_size;
let size = code_size as usize; let size = code_size as usize;
let align = alignment
.max(self.isa.function_alignment() as u64)
.max(self.isa.symbol_alignment());
let ptr = self let ptr = self
.memory .memory
.code .code
.allocate(size, EXECUTABLE_DATA_ALIGNMENT) .allocate(size, align)
.expect("TODO: handle OOM etc."); .expect("TODO: handle OOM etc.");
{ {
@@ -745,6 +753,7 @@ impl Module for JITModule {
&mut self, &mut self,
id: FuncId, id: FuncId,
func: &ir::Function, func: &ir::Function,
alignment: u64,
bytes: &[u8], bytes: &[u8],
relocs: &[MachReloc], relocs: &[MachReloc],
) -> ModuleResult<ModuleCompiledFunction> { ) -> ModuleResult<ModuleCompiledFunction> {
@@ -764,10 +773,13 @@ impl Module for JITModule {
} }
let size = bytes.len(); let size = bytes.len();
let align = alignment
.max(self.isa.function_alignment() as u64)
.max(self.isa.symbol_alignment());
let ptr = self let ptr = self
.memory .memory
.code .code
.allocate(size, EXECUTABLE_DATA_ALIGNMENT) .allocate(size, align)
.expect("TODO: handle OOM etc."); .expect("TODO: handle OOM etc.");
unsafe { unsafe {

View File

@@ -640,6 +640,7 @@ pub trait Module {
&mut self, &mut self,
func_id: FuncId, func_id: FuncId,
func: &ir::Function, func: &ir::Function,
alignment: u64,
bytes: &[u8], bytes: &[u8],
relocs: &[MachReloc], relocs: &[MachReloc],
) -> ModuleResult<ModuleCompiledFunction>; ) -> ModuleResult<ModuleCompiledFunction>;
@@ -736,10 +737,11 @@ impl<M: Module> Module for &mut M {
&mut self, &mut self,
func_id: FuncId, func_id: FuncId,
func: &ir::Function, func: &ir::Function,
alignment: u64,
bytes: &[u8], bytes: &[u8],
relocs: &[MachReloc], relocs: &[MachReloc],
) -> ModuleResult<ModuleCompiledFunction> { ) -> ModuleResult<ModuleCompiledFunction> {
(**self).define_function_bytes(func_id, func, bytes, relocs) (**self).define_function_bytes(func_id, func, alignment, bytes, relocs)
} }
fn define_data(&mut self, data: DataId, data_ctx: &DataContext) -> ModuleResult<()> { fn define_data(&mut self, data: DataId, data_ctx: &DataContext) -> ModuleResult<()> {

View File

@@ -314,11 +314,13 @@ impl Module for ObjectModule {
info!("defining function {}: {}", func_id, ctx.func.display()); info!("defining function {}: {}", func_id, ctx.func.display());
let mut code: Vec<u8> = Vec::new(); let mut code: Vec<u8> = Vec::new();
ctx.compile_and_emit(self.isa(), &mut code)?; let res = ctx.compile_and_emit(self.isa(), &mut code)?;
let alignment = res.alignment as u64;
self.define_function_bytes( self.define_function_bytes(
func_id, func_id,
&ctx.func, &ctx.func,
alignment,
&code, &code,
ctx.compiled_code().unwrap().buffer.relocs(), ctx.compiled_code().unwrap().buffer.relocs(),
) )
@@ -328,6 +330,7 @@ impl Module for ObjectModule {
&mut self, &mut self,
func_id: FuncId, func_id: FuncId,
func: &ir::Function, func: &ir::Function,
alignment: u64,
bytes: &[u8], bytes: &[u8],
relocs: &[MachReloc], relocs: &[MachReloc],
) -> ModuleResult<ModuleCompiledFunction> { ) -> ModuleResult<ModuleCompiledFunction> {
@@ -348,7 +351,10 @@ impl Module for ObjectModule {
} }
*defined = true; *defined = true;
let align = std::cmp::max(self.function_alignment, self.isa.symbol_alignment()); let align = self
.function_alignment
.max(self.isa.symbol_alignment())
.max(alignment);
let (section, offset) = if self.per_function_section { let (section, offset) = if self.per_function_section {
let symbol_name = self.object.symbol(symbol).name.clone(); let symbol_name = self.object.symbol(symbol).name.clone();
let (section, offset) = let (section, offset) =

View File

@@ -271,7 +271,8 @@ impl wasmtime_environ::Compiler for Compiler {
&mut func_env, &mut func_env,
)?; )?;
let (_, code_buf) = compile_maybe_cached(&mut context, isa, cache_ctx.as_mut())?; let (code, code_buf) = compile_maybe_cached(&mut context, isa, cache_ctx.as_mut())?;
let alignment = code.alignment;
let compiled_code = context.compiled_code().unwrap(); let compiled_code = context.compiled_code().unwrap();
let func_relocs = compiled_code let func_relocs = compiled_code
@@ -333,6 +334,7 @@ impl wasmtime_environ::Compiler for Compiler {
stack_maps, stack_maps,
start: 0, start: 0,
length, length,
alignment,
}, },
address_map: address_transform, address_map: address_transform,
})) }))

View File

@@ -95,7 +95,11 @@ impl<'a> ModuleTextBuilder<'a> {
func: &'a CompiledFunction, func: &'a CompiledFunction,
) -> (SymbolId, Range<u64>) { ) -> (SymbolId, Range<u64>) {
let body_len = func.body.len() as u64; let body_len = func.body.len() as u64;
let off = self.text.append(labeled, &func.body, None); let off = self.text.append(
labeled,
&func.body,
self.isa.function_alignment().max(func.info.alignment),
);
let symbol_id = self.obj.add_symbol(Symbol { let symbol_id = self.obj.add_symbol(Symbol {
name, name,
@@ -198,7 +202,7 @@ impl<'a> ModuleTextBuilder<'a> {
if padding == 0 { if padding == 0 {
return; return;
} }
self.text.append(false, &vec![0; padding], Some(1)); self.text.append(false, &vec![0; padding], 1);
} }
/// Indicates that the text section has been written completely and this /// Indicates that the text section has been written completely and this

View File

@@ -28,6 +28,9 @@ pub struct FunctionInfo {
pub start: u64, pub start: u64,
/// The size of the compiled function, in bytes. /// The size of the compiled function, in bytes.
pub length: u32, pub length: u32,
/// The alignment requirements of this function, in bytes.
pub alignment: u32,
} }
/// Information about a compiled trampoline which the host can call to enter /// Information about a compiled trampoline which the host can call to enter