Initial forward-edge CFI implementation (#3693)

* Initial forward-edge CFI implementation

Give the user the option to start all basic blocks that are targets
of indirect branches with the BTI instruction introduced by the
Branch Target Identification extension to the Arm instruction set
architecture.

Copyright (c) 2022, Arm Limited.

* Refactor `from_artifacts` to avoid second `make_executable` (#1)

This involves "parsing" twice but this is parsing just the header of an
ELF file so it's not a very intensive operation and should be ok to do
twice.

* Address the code review feedback

Copyright (c) 2022, Arm Limited.

Co-authored-by: Alex Crichton <alex@alexcrichton.com>
This commit is contained in:
Anton Kirilov
2022-09-08 15:35:58 +01:00
committed by GitHub
parent caad14826c
commit d8b290898c
32 changed files with 441 additions and 105 deletions

View File

@@ -5,13 +5,13 @@ use crate::shared::Definitions as SharedDefinitions;
fn define_settings(_shared: &SettingGroup) -> SettingGroup {
let mut setting = SettingGroupBuilder::new("arm64");
let has_lse = setting.add_bool(
setting.add_bool(
"has_lse",
"Has Large System Extensions (FEAT_LSE) support.",
"",
false,
);
setting.add_bool(
"has_pauth",
"Has Pointer authentication (FEAT_PAuth) support; enables the use of \
@@ -44,8 +44,13 @@ fn define_settings(_shared: &SettingGroup) -> SettingGroup {
"",
false,
);
setting.add_bool(
"use_bti",
"Use Branch Target Identification (FEAT_BTI) instructions.",
"",
false,
);
setting.add_predicate("use_lse", predicate!(has_lse));
setting.build()
}

View File

@@ -237,7 +237,7 @@ impl<'a> AliasAnalysis<'a> {
trace!("after inst{}: state is {:?}", inst.index(), state);
}
visit_block_succs(self.func, block, |_inst, succ| {
visit_block_succs(self.func, block, |_inst, succ, _from_table| {
let succ_first_inst = self
.func
.layout

View File

@@ -129,8 +129,15 @@ pub fn has_memory_fence_semantics(op: Opcode) -> bool {
}
}
/// Visit all successors of a block with a given visitor closure.
pub(crate) fn visit_block_succs<F: FnMut(Inst, Block)>(f: &Function, block: Block, mut visit: F) {
/// Visit all successors of a block with a given visitor closure. The closure
/// arguments are the branch instruction that is used to reach the successor,
/// the successor block itself, and a flag indicating whether the block is
/// branched to via a table entry.
pub(crate) fn visit_block_succs<F: FnMut(Inst, Block, bool)>(
f: &Function,
block: Block,
mut visit: F,
) {
for inst in f.layout.block_likely_branches(block) {
if f.dfg[inst].opcode().is_branch() {
visit_branch_targets(f, inst, &mut visit);
@@ -138,18 +145,20 @@ pub(crate) fn visit_block_succs<F: FnMut(Inst, Block)>(f: &Function, block: Bloc
}
}
fn visit_branch_targets<F: FnMut(Inst, Block)>(f: &Function, inst: Inst, visit: &mut F) {
fn visit_branch_targets<F: FnMut(Inst, Block, bool)>(f: &Function, inst: Inst, visit: &mut F) {
match f.dfg[inst].analyze_branch(&f.dfg.value_lists) {
BranchInfo::NotABranch => {}
BranchInfo::SingleDest(dest, _) => {
visit(inst, dest);
visit(inst, dest, false);
}
BranchInfo::Table(table, maybe_dest) => {
if let Some(dest) = maybe_dest {
visit(inst, dest);
// The default block is reached via a direct conditional branch,
// so it is not part of the table.
visit(inst, dest, false);
}
for &dest in f.jump_tables[table].as_slice() {
visit(inst, dest);
visit(inst, dest, true);
}
}
}

View File

@@ -67,7 +67,11 @@ fn saved_reg_stack_size(
/// point for the trait; it is never actually instantiated.
pub struct AArch64MachineDeps;
impl IsaFlags for aarch64_settings::Flags {}
impl IsaFlags for aarch64_settings::Flags {
fn is_forward_edge_cfi_enabled(&self) -> bool {
self.use_bti()
}
}
impl ABIMachineSpec for AArch64MachineDeps {
type I = Inst;
@@ -549,13 +553,21 @@ impl ABIMachineSpec for AArch64MachineDeps {
},
});
}
} else if flags.unwind_info() && call_conv.extends_apple_aarch64() {
// The macOS unwinder seems to require this.
insts.push(Inst::Unwind {
inst: UnwindInst::Aarch64SetPointerAuth {
return_addresses: false,
},
});
} else {
if isa_flags.use_bti() {
insts.push(Inst::Bti {
targets: BranchTargetType::C,
});
}
if flags.unwind_info() && call_conv.extends_apple_aarch64() {
// The macOS unwinder seems to require this.
insts.push(Inst::Unwind {
inst: UnwindInst::Aarch64SetPointerAuth {
return_addresses: false,
},
});
}
}
insts

View File

@@ -880,6 +880,11 @@
;; supported.
(Xpaclri)
;; Branch target identification; equivalent to a no-op if Branch Target
;; Identification (FEAT_BTI) is not supported.
(Bti
(targets BranchTargetType))
;; Marker, no-op in generated code: SP "virtual offset" is adjusted. This
;; controls how AMode::NominalSPOffset args are lowered.
(VirtualSPOffsetAdj
@@ -1568,6 +1573,15 @@
(B)
))
;; Branch target types
(type BranchTargetType
(enum
(None)
(C)
(J)
(JC)
))
;; Extractors for target features ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(decl pure sign_return_address_disabled () Unit)
(extern constructor sign_return_address_disabled sign_return_address_disabled)

View File

@@ -3332,6 +3332,16 @@ impl MachInstEmit for Inst {
sink.put4(0xd503233f | key << 6);
}
&Inst::Xpaclri => sink.put4(0xd50320ff),
&Inst::Bti { targets } => {
let targets = match targets {
BranchTargetType::None => 0b00,
BranchTargetType::C => 0b01,
BranchTargetType::J => 0b10,
BranchTargetType::JC => 0b11,
};
sink.put4(0xd503241f | targets << 6);
}
&Inst::VirtualSPOffsetAdj { offset } => {
trace!(
"virtual sp offset adjusted by {} -> {}",

View File

@@ -58,6 +58,13 @@ fn test_aarch64_binemit() {
));
insns.push((Inst::Pacisp { key: APIKey::B }, "7F2303D5", "pacibsp"));
insns.push((Inst::Xpaclri, "FF2003D5", "xpaclri"));
insns.push((
Inst::Bti {
targets: BranchTargetType::J,
},
"9F2403D5",
"bti j",
));
insns.push((Inst::Nop0, "", "nop-zero-len"));
insns.push((Inst::Nop4, "1F2003D5", "nop"));
insns.push((Inst::Csdb, "9F2203D5", "csdb"));

View File

@@ -36,10 +36,10 @@ mod emit_tests;
// Instructions (top level): definition
pub use crate::isa::aarch64::lower::isle::generated_code::{
ALUOp, ALUOp3, AMode, APIKey, AtomicRMWLoopOp, AtomicRMWOp, BitOp, FPUOp1, FPUOp2, FPUOp3,
FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUModOp, VecALUOp,
VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, VecRRNarrowOp, VecRRPairLongOp,
VecRRRLongModOp, VecRRRLongOp, VecShiftImmModOp, VecShiftImmOp,
ALUOp, ALUOp3, AMode, APIKey, AtomicRMWLoopOp, AtomicRMWOp, BitOp, BranchTargetType, FPUOp1,
FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUModOp,
VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, VecRRNarrowOp,
VecRRPairLongOp, VecRRRLongModOp, VecRRRLongOp, VecShiftImmModOp, VecShiftImmOp,
};
/// A floating-point unit (FPU) operation with two args, a register and an immediate.
@@ -1072,6 +1072,7 @@ fn aarch64_get_operands<F: Fn(VReg) -> VReg>(inst: &Inst, collector: &mut Operan
// Neither LR nor SP is an allocatable register, so there is no need
// to do anything.
}
&Inst::Bti { .. } => {}
&Inst::VirtualSPOffsetAdj { .. } => {}
&Inst::ElfTlsGetAddr { rd, .. } => {
@@ -1266,6 +1267,19 @@ impl MachInst for Inst {
fn ref_type_regclass(_: &settings::Flags) -> RegClass {
RegClass::Int
}
fn gen_block_start(
is_indirect_branch_target: bool,
is_forward_edge_cfi_enabled: bool,
) -> Option<Self> {
if is_indirect_branch_target && is_forward_edge_cfi_enabled {
Some(Inst::Bti {
targets: BranchTargetType::J,
})
} else {
None
}
}
}
//=============================================================================
@@ -2700,7 +2714,7 @@ impl Inst {
"csel {}, xzr, {}, hs ; ",
"csdb ; ",
"adr {}, pc+16 ; ",
"ldrsw {}, [{}, {}, LSL 2] ; ",
"ldrsw {}, [{}, {}, uxtw #2] ; ",
"add {}, {}, {} ; ",
"br {} ; ",
"jt_entries {:?}"
@@ -2812,6 +2826,16 @@ impl Inst {
"paci".to_string() + key + "sp"
}
&Inst::Xpaclri => "xpaclri".to_string(),
&Inst::Bti { targets } => {
let targets = match targets {
BranchTargetType::None => "",
BranchTargetType::C => " c",
BranchTargetType::J => " j",
BranchTargetType::JC => " jc",
};
"bti".to_string() + targets
}
&Inst::VirtualSPOffsetAdj { offset } => {
state.virtual_sp_offset += offset;
format!("virtual_sp_offset_adjust {}", offset)

View File

@@ -86,7 +86,7 @@ impl Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> {
}
fn use_lse(&mut self, _: Inst) -> Option<()> {
if self.isa_flags.use_lse() {
if self.isa_flags.has_lse() {
Some(())
} else {
None

View File

@@ -657,18 +657,20 @@ pub(crate) fn lower_branch(
// emit_island // this forces an island at this point
// // if the jumptable would push us past
// // the deadline
// subs idx, #jt_size
// cmp idx, #jt_size
// b.hs default
// csel vTmp2, xzr, idx, hs
// csdb
// adr vTmp1, PC+16
// ldr vTmp2, [vTmp1, idx, lsl #2]
// add vTmp2, vTmp2, vTmp1
// br vTmp2
// ldr vTmp2, [vTmp1, vTmp2, uxtw #2]
// add vTmp1, vTmp1, vTmp2
// br vTmp1
// [jumptable offsets relative to JT base]
let jt_size = targets.len() - 1;
assert!(jt_size <= std::u32::MAX as usize);
ctx.emit(Inst::EmitIsland {
needed_space: 4 * (6 + jt_size) as CodeOffset,
needed_space: 4 * (8 + jt_size) as CodeOffset,
});
let ridx = put_input_in_reg(
@@ -707,8 +709,10 @@ pub(crate) fn lower_branch(
// Emit the compound instruction that does:
//
// b.hs default
// csel rB, xzr, rIndex, hs
// csdb
// adr rA, jt
// ldrsw rB, [rA, rIndex, UXTW 2]
// ldrsw rB, [rA, rB, uxtw #2]
// add rA, rA, rB
// br rA
// [jt entries]

View File

@@ -115,6 +115,10 @@ impl TargetIsa for AArch64Backend {
self.isa_flags.iter().collect()
}
fn is_branch_protection_enabled(&self) -> bool {
self.isa_flags.use_bti()
}
fn dynamic_vector_bytes(&self, _dyn_ty: Type) -> u32 {
16
}

View File

@@ -226,6 +226,11 @@ pub trait TargetIsa: fmt::Display + Send + Sync {
/// Get the ISA-dependent flag values that were used to make this trait object.
fn isa_flags(&self) -> Vec<settings::Value>;
/// Get a flag indicating whether branch protection is enabled.
fn is_branch_protection_enabled(&self) -> bool {
false
}
/// Get the ISA-dependent maximum vector register size, in bytes.
fn dynamic_vector_bytes(&self, dynamic_ty: ir::Type) -> u32;

View File

@@ -286,7 +286,12 @@ impl StackAMode {
}
/// Trait implemented by machine-specific backend to represent ISA flags.
pub trait IsaFlags: Clone {}
pub trait IsaFlags: Clone {
/// Get a flag indicating whether forward-edge CFI is enabled.
fn is_forward_edge_cfi_enabled(&self) -> bool {
false
}
}
/// Trait implemented by machine-specific backend to provide information about
/// register assignments and to allow generating the specific instructions for
@@ -1256,6 +1261,10 @@ impl<M: ABIMachineSpec> Callee<M> {
}
}
pub fn is_forward_edge_cfi_enabled(&self) -> bool {
self.isa_flags.is_forward_edge_cfi_enabled()
}
/// Get the calling convention implemented by this ABI object.
pub fn call_conv(&self, sigs: &SigSet) -> isa::CallConv {
sigs[self.sig].call_conv

View File

@@ -106,6 +106,8 @@ pub struct BlockLoweringOrder {
/// which is used by VCode emission to sink the blocks at the last
/// moment (when we actually emit bytes into the MachBuffer).
cold_blocks: FxHashSet<BlockIndex>,
/// Lowered blocks that are indirect branch targets.
indirect_branch_targets: FxHashSet<BlockIndex>,
}
/// The origin of a block in the lowered block-order: either an original CLIF
@@ -230,14 +232,20 @@ impl BlockLoweringOrder {
// Cache the block successors to avoid re-examining branches below.
let mut block_succs: SmallVec<[(Inst, usize, Block); 128]> = SmallVec::new();
let mut block_succ_range = SecondaryMap::with_default((0, 0));
let mut indirect_branch_target_clif_blocks = FxHashSet::default();
for block in f.layout.blocks() {
let block_succ_start = block_succs.len();
let mut succ_idx = 0;
visit_block_succs(f, block, |inst, succ| {
visit_block_succs(f, block, |inst, succ, from_table| {
block_out_count[block] += 1;
block_in_count[succ] += 1;
block_succs.push((inst, succ_idx, succ));
succ_idx += 1;
if from_table {
indirect_branch_target_clif_blocks.insert(succ);
}
});
let block_succ_end = block_succs.len();
block_succ_range[block] = (block_succ_start, block_succ_end);
@@ -432,6 +440,7 @@ impl BlockLoweringOrder {
let mut cold_blocks = FxHashSet::default();
let mut lowered_succ_ranges = vec![];
let mut lb_to_bindex = FxHashMap::default();
let mut indirect_branch_targets = FxHashSet::default();
for (block, succ_range) in rpo.into_iter() {
let index = BlockIndex::new(lowered_order.len());
lb_to_bindex.insert(block, index);
@@ -445,11 +454,19 @@ impl BlockLoweringOrder {
if f.layout.is_cold(block) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&block) {
indirect_branch_targets.insert(index);
}
}
LoweredBlock::Edge { pred, succ, .. } => {
if f.layout.is_cold(pred) || f.layout.is_cold(succ) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&succ) {
indirect_branch_targets.insert(index);
}
}
}
}
@@ -474,6 +491,7 @@ impl BlockLoweringOrder {
lowered_succ_ranges,
orig_map,
cold_blocks,
indirect_branch_targets,
};
trace!("BlockLoweringOrder: {:?}", result);
result
@@ -494,6 +512,12 @@ impl BlockLoweringOrder {
pub fn is_cold(&self, block: BlockIndex) -> bool {
self.cold_blocks.contains(&block)
}
/// Determine whether the given lowered block index is an indirect branch
/// target.
pub fn is_indirect_branch_target(&self, block: BlockIndex) -> bool {
self.indirect_branch_targets.contains(&block)
}
}
#[cfg(test)]

View File

@@ -168,6 +168,16 @@ pub trait MachInst: Clone + Debug {
/// Is this a safepoint?
fn is_safepoint(&self) -> bool;
/// Generate an instruction that must appear at the beginning of a basic
/// block, if any. Note that the return value must not be subject to
/// register allocation.
fn gen_block_start(
_is_indirect_branch_target: bool,
_is_forward_edge_cfi_enabled: bool,
) -> Option<Self> {
None
}
/// A label-use kind: a type that describes the types of label references that
/// can occur in an instruction.
type LabelUse: MachInstLabelUse;

View File

@@ -845,6 +845,8 @@ impl<I: VCodeInst> VCode<I> {
ra_edits_per_block.push((end_edit_idx - start_edit_idx) as u32);
}
let is_forward_edge_cfi_enabled = self.abi.is_forward_edge_cfi_enabled();
for (block_order_idx, &block) in final_order.iter().enumerate() {
trace!("emitting block {:?}", block);
let new_offset = I::align_basic_block(buffer.cur_offset());
@@ -902,6 +904,13 @@ impl<I: VCodeInst> VCode<I> {
last_offset = Some(cur_offset);
}
if let Some(block_start) = I::gen_block_start(
self.block_order.is_indirect_branch_target(block),
is_forward_edge_cfi_enabled,
) {
do_emit(&block_start, &[], &mut disasm, &mut buffer, &mut state);
}
for inst_or_edit in regalloc.block_insts_and_edits(&self, block) {
match inst_or_edit {
InstOrEdit::Inst(iix) => {

View File

@@ -0,0 +1,111 @@
test compile precise-output
set unwind_info=false
target aarch64 use_bti
function %f1(i32) -> i32 {
jt0 = jump_table [block1, block2, block3]
block0(v0: i32):
br_table v0, block4, jt0
block1:
v1 = iconst.i32 1
jump block5(v1)
block2:
v2 = iconst.i32 2
jump block5(v2)
block3:
v3 = iconst.i32 3
jump block5(v3)
block4:
v4 = iconst.i32 4
jump block5(v4)
block5(v5: i32):
v6 = iadd.i32 v0, v5
return v6
}
; bti c
; block0:
; emit_island 44
; subs wzr, w0, #3
; b.hs label1 ; csel x1, xzr, x0, hs ; csdb ; adr x15, pc+16 ; ldrsw x1, [x15, x1, uxtw #2] ; add x15, x15, x1 ; br x15 ; jt_entries [Label(MachLabel(3)), Label(MachLabel(5)), Label(MachLabel(7))]
; block1:
; movz x5, #4
; b label2
; block2:
; b label9
; block3:
; bti j
; movz x5, #1
; b label4
; block4:
; b label9
; block5:
; bti j
; movz x5, #2
; b label6
; block6:
; b label9
; block7:
; bti j
; movz x5, #3
; b label8
; block8:
; b label9
; block9:
; add w0, w0, w5
; ret
function %f2(i64) -> i64 {
jt0 = jump_table [block2]
block0(v0: i64):
v1 = ireduce.i32 v0
v2 = load.i64 notrap aligned table v0
br_table v1, block1, jt0
block1:
return v2
block2:
v3 = iconst.i64 42
v4 = iadd.i64 v2, v3
return v4
}
; bti c
; block0:
; ldr x6, [x0]
; emit_island 36
; subs wzr, w0, #1
; b.hs label1 ; csel x8, xzr, x0, hs ; csdb ; adr x7, pc+16 ; ldrsw x8, [x7, x8, uxtw #2] ; add x7, x7, x8 ; br x7 ; jt_entries [Label(MachLabel(2))]
; block1:
; mov x0, x6
; ret
; block2:
; bti j
; mov x0, x6
; add x0, x0, #42
; ret
function %f3(i64) -> i64 {
fn0 = %g(i64) -> i64
block0(v0: i64):
v1 = call fn0(v0)
return v1
}
; bti c
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; ldr x4, 8 ; b 12 ; data TestCase(%g) + 0
; blr x4
; ldp fp, lr, [sp], #16
; ret

View File

@@ -30,9 +30,9 @@ block5(v5: i32):
}
; block0:
; emit_island 36
; emit_island 44
; subs wzr, w0, #3
; b.hs label1 ; csel x1, xzr, x0, hs ; csdb ; adr x15, pc+16 ; ldrsw x1, [x15, x1, LSL 2] ; add x15, x15, x1 ; br x15 ; jt_entries [Label(MachLabel(3)), Label(MachLabel(5)), Label(MachLabel(7))]
; b.hs label1 ; csel x1, xzr, x0, hs ; csdb ; adr x15, pc+16 ; ldrsw x1, [x15, x1, uxtw #2] ; add x15, x15, x1 ; br x15 ; jt_entries [Label(MachLabel(3)), Label(MachLabel(5)), Label(MachLabel(7))]
; block1:
; movz x5, #4
; b label2

View File

@@ -1,6 +1,7 @@
test interpret
test run
target aarch64
target aarch64 use_bti
target x86_64
target s390x

View File

@@ -1,6 +1,6 @@
//! Defines `JITModule`.
use crate::{compiled_blob::CompiledBlob, memory::Memory};
use crate::{compiled_blob::CompiledBlob, memory::BranchProtection, memory::Memory};
use cranelift_codegen::isa::TargetIsa;
use cranelift_codegen::settings::Configurable;
use cranelift_codegen::{self, ir, settings, MachReloc};
@@ -480,6 +480,12 @@ impl JITModule {
);
}
let branch_protection =
if cfg!(target_arch = "aarch64") && use_bti(&builder.isa.isa_flags()) {
BranchProtection::BTI
} else {
BranchProtection::None
};
let mut module = Self {
isa: builder.isa,
hotswap_enabled: builder.hotswap_enabled,
@@ -487,9 +493,10 @@ impl JITModule {
lookup_symbols: builder.lookup_symbols,
libcall_names: builder.libcall_names,
memory: MemoryHandle {
code: Memory::new(),
readonly: Memory::new(),
writable: Memory::new(),
code: Memory::new(branch_protection),
// Branch protection is not applicable to non-executable memory.
readonly: Memory::new(BranchProtection::None),
writable: Memory::new(BranchProtection::None),
},
declarations: ModuleDeclarations::default(),
function_got_entries: SecondaryMap::new(),
@@ -959,3 +966,10 @@ fn lookup_with_dlsym(name: &str) -> Option<*const u8> {
None
}
}
fn use_bti(isa_flags: &Vec<settings::Value>) -> bool {
isa_flags
.iter()
.find(|&f| f.name == "use_bti")
.map_or(false, |f| f.as_bool().unwrap_or(false))
}

View File

@@ -104,6 +104,15 @@ impl Drop for PtrLen {
// TODO: add a `Drop` impl for `cfg(target_os = "windows")`
/// Type of branch protection to apply to executable memory.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BranchProtection {
/// No protection.
None,
/// Use the Branch Target Identification extension of the Arm architecture.
BTI,
}
/// JIT memory manager. This manages pages of suitably aligned and
/// accessible memory. Memory will be leaked by default to have
/// function pointers remain valid for the remainder of the
@@ -113,15 +122,17 @@ pub(crate) struct Memory {
already_protected: usize,
current: PtrLen,
position: usize,
branch_protection: BranchProtection,
}
impl Memory {
pub(crate) fn new() -> Self {
pub(crate) fn new(branch_protection: BranchProtection) -> Self {
Self {
allocations: Vec::new(),
already_protected: 0,
current: PtrLen::new(),
position: 0,
branch_protection,
}
}
@@ -157,14 +168,35 @@ impl Memory {
pub(crate) fn set_readable_and_executable(&mut self) {
self.finish_current();
let set_region_readable_and_executable = |ptr, len| {
if len != 0 {
if self.branch_protection == BranchProtection::BTI {
#[cfg(all(target_arch = "aarch64", target_os = "linux"))]
if std::arch::is_aarch64_feature_detected!("bti") {
let prot = libc::PROT_EXEC | libc::PROT_READ | /* PROT_BTI */ 0x10;
unsafe {
if libc::mprotect(ptr as *mut libc::c_void, len, prot) < 0 {
panic!("unable to make memory readable+executable");
}
}
return;
}
}
unsafe {
region::protect(ptr, len, region::Protection::READ_EXECUTE)
.expect("unable to make memory readable+executable");
}
}
};
#[cfg(feature = "selinux-fix")]
{
for &PtrLen { ref map, ptr, len } in &self.allocations[self.already_protected..] {
if len != 0 && map.is_some() {
unsafe {
region::protect(ptr, len, region::Protection::READ_EXECUTE)
.expect("unable to make memory readable+executable");
}
if map.is_some() {
set_region_readable_and_executable(ptr, len);
}
}
}
@@ -172,12 +204,7 @@ impl Memory {
#[cfg(not(feature = "selinux-fix"))]
{
for &PtrLen { ptr, len } in &self.allocations[self.already_protected..] {
if len != 0 {
unsafe {
region::protect(ptr, len, region::Protection::READ_EXECUTE)
.expect("unable to make memory readable+executable");
}
}
set_region_readable_and_executable(ptr, len);
}
}