Support heaps with no offset-guard pages.

Also, say "guard-offset pages" rather than just "guard pages" to describe the
region of a heap which is never accessible and which exists to support
optimizations for heap accesses with offsets.

And, introduce a `Uimm64` immediate type, and make all heap fields use
`Uimm64` instead of `Imm64` since they really are unsigned.
This commit is contained in:
Dan Gohman
2018-11-29 04:53:30 -08:00
parent 93696a80bb
commit a20c852148
27 changed files with 302 additions and 172 deletions

View File

@@ -49,7 +49,7 @@ fn dynamic_addr(
bound_gv: ir::GlobalValue,
func: &mut ir::Function,
) {
let access_size = i64::from(access_size);
let access_size = u64::from(access_size);
let offset_ty = func.dfg.value_type(offset);
let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
let min_size = func.heaps[heap].min_size.into();
@@ -67,13 +67,13 @@ fn dynamic_addr(
} else if access_size <= min_size {
// We know that bound >= min_size, so here we can compare `offset > bound - access_size`
// without wrapping.
let adj_bound = pos.ins().iadd_imm(bound, -access_size);
let adj_bound = pos.ins().iadd_imm(bound, -(access_size as i64));
oob = pos
.ins()
.icmp(IntCC::UnsignedGreaterThan, offset, adj_bound);
} else {
// We need an overflow check for the adjusted offset.
let access_size_val = pos.ins().iconst(offset_ty, access_size);
let access_size_val = pos.ins().iconst(offset_ty, access_size as i64);
let (adj_offset, overflow) = pos.ins().iadd_cout(offset, access_size_val);
pos.ins().trapnz(overflow, ir::TrapCode::HeapOutOfBounds);
oob = pos
@@ -91,11 +91,11 @@ fn static_addr(
heap: ir::Heap,
offset: ir::Value,
access_size: u32,
bound: i64,
bound: u64,
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
) {
let access_size = i64::from(access_size);
let access_size = u64::from(access_size);
let offset_ty = func.dfg.value_type(offset);
let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
let mut pos = FuncCursor::new(func).at_inst(inst);
@@ -117,7 +117,7 @@ fn static_addr(
}
// Check `offset > limit` which is now known non-negative.
let limit = bound - access_size;
let limit = bound - u64::from(access_size);
// We may be able to omit the check entirely for 32-bit offsets if the heap bound is 4 GB or
// more.
@@ -126,10 +126,10 @@ fn static_addr(
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is
// likely to be a convenient constant on ARM and other RISC architectures.
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit - 1)
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit as i64 - 1)
} else {
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit)
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit as i64)
};
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
}

View File

@@ -92,17 +92,15 @@ fn compute_addr(
let element_size = pos.func.tables[table].element_size;
let mut offset;
let element_size_i64: i64 = element_size.into();
debug_assert!(element_size_i64 >= 0);
let element_size_u64 = element_size_i64 as u64;
if element_size_u64 == 1 {
let element_size: u64 = element_size.into();
if element_size == 1 {
offset = index;
} else if element_size_u64.is_power_of_two() {
} else if element_size.is_power_of_two() {
offset = pos
.ins()
.ishl_imm(index, i64::from(element_size_u64.trailing_zeros()));
.ishl_imm(index, i64::from(element_size.trailing_zeros()));
} else {
offset = pos.ins().imul_imm(index, element_size);
offset = pos.ins().imul_imm(index, element_size as i64);
}
if element_offset == Offset32::new(0) {