Flush Icache on AArch64 Windows (#4997)

* cranelift: Add FlushInstructionCache for AArch64 on Windows

This was previously done on #3426 for linux.

* wasmtime: Add FlushInstructionCache for AArch64 on Windows

This was previously done on #3426 for linux.

* cranelift: Add MemoryUse flag to JIT Memory Manager

This allows us to keep the icache flushing code self-contained and not leak implementation details.

This also changes the windows icache flushing code to only flush pages that were previously unflushed.

* Add jit-icache-coherence crate

* cranelift: Use `jit-icache-coherence`

* wasmtime: Use `jit-icache-coherence`

* jit-icache-coherence: Make rustix feature additive

Mutually exclusive features cause issues.

* wasmtime: Remove rustix from wasmtime-jit

We now use it via jit-icache-coherence

* Rename wasmtime-jit-icache-coherency crate

* Use cfg-if in wasmtime-jit-icache-coherency crate

* Use inline instead of inline(always)

* Add unsafe marker to clear_cache

* Conditionally compile all rustix operations

membarrier does not exist on MacOS

* Publish `wasmtime-jit-icache-coherence`

* Remove explicit windows check

This is implied by the target_os = "windows" above

* cranelift: Remove len != 0 check

This is redundant as it is done in non_protected_allocations_iter

* Comment cleanups

Thanks @akirilov-arm!

* Make clear_cache safe

* Rename pipeline_flush to pipeline_flush_mt

* Revert "Make clear_cache safe"

This reverts commit 21165d81c9030ed9b291a1021a367214d2942c90.

* More docs!

* Fix pipeline_flush reference on clear_cache

* Update more docs!

* Move pipeline flush after `mprotect` calls

Technically the `clear_cache` operation is a lie in AArch64, so move the pipeline flush after the `mprotect` calls so that it benefits from the implicit cache cleaning done by it.

* wasmtime: Remove rustix backend from icache crate

* wasmtime: Use libc for macos

* wasmtime: Flush icache on all arch's for windows

* wasmtime: Add flags to membarrier call
This commit is contained in:
Afonso Bordado
2022-10-12 19:15:38 +01:00
committed by GitHub
parent 75cd888e23
commit 4639e85c4e
12 changed files with 334 additions and 87 deletions

View File

@@ -4,9 +4,11 @@ use memmap2::MmapMut;
#[cfg(not(any(feature = "selinux-fix", windows)))]
use std::alloc;
use std::convert::TryFrom;
use std::ffi::c_void;
use std::io;
use std::mem;
use std::ptr;
use wasmtime_jit_icache_coherence as icache_coherence;
/// A simple struct consisting of a pointer and length.
struct PtrLen {
@@ -161,6 +163,7 @@ impl Memory {
// TODO: Allocate more at a time.
self.current = PtrLen::with_size(size)?;
self.position = size;
Ok(self.current.ptr)
}
@@ -168,45 +171,45 @@ impl Memory {
pub(crate) fn set_readable_and_executable(&mut self) {
self.finish_current();
// Clear all the newly allocated code from cache if the processor requires it
//
// Do this before marking the memory as R+X, technically we should be able to do it after
// but there are some CPU's that have had errata about doing this with read only memory.
for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
unsafe {
icache_coherence::clear_cache(ptr as *const c_void, len)
.expect("Failed cache clear")
};
}
let set_region_readable_and_executable = |ptr, len| {
if len != 0 {
if self.branch_protection == BranchProtection::BTI {
#[cfg(all(target_arch = "aarch64", target_os = "linux"))]
if std::arch::is_aarch64_feature_detected!("bti") {
let prot = libc::PROT_EXEC | libc::PROT_READ | /* PROT_BTI */ 0x10;
if self.branch_protection == BranchProtection::BTI {
#[cfg(all(target_arch = "aarch64", target_os = "linux"))]
if std::arch::is_aarch64_feature_detected!("bti") {
let prot = libc::PROT_EXEC | libc::PROT_READ | /* PROT_BTI */ 0x10;
unsafe {
if libc::mprotect(ptr as *mut libc::c_void, len, prot) < 0 {
panic!("unable to make memory readable+executable");
}
unsafe {
if libc::mprotect(ptr as *mut libc::c_void, len, prot) < 0 {
panic!("unable to make memory readable+executable");
}
return;
}
}
unsafe {
region::protect(ptr, len, region::Protection::READ_EXECUTE)
.expect("unable to make memory readable+executable");
return;
}
}
unsafe {
region::protect(ptr, len, region::Protection::READ_EXECUTE)
.expect("unable to make memory readable+executable");
}
};
#[cfg(feature = "selinux-fix")]
{
for &PtrLen { ref map, ptr, len } in &self.allocations[self.already_protected..] {
if map.is_some() {
set_region_readable_and_executable(ptr, len);
}
}
for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
set_region_readable_and_executable(ptr, len);
}
#[cfg(not(feature = "selinux-fix"))]
{
for &PtrLen { ptr, len } in &self.allocations[self.already_protected..] {
set_region_readable_and_executable(ptr, len);
}
}
// Flush any in-flight instructions from the pipeline
icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");
self.already_protected = self.allocations.len();
}
@@ -215,33 +218,27 @@ impl Memory {
pub(crate) fn set_readonly(&mut self) {
self.finish_current();
#[cfg(feature = "selinux-fix")]
{
for &PtrLen { ref map, ptr, len } in &self.allocations[self.already_protected..] {
if len != 0 && map.is_some() {
unsafe {
region::protect(ptr, len, region::Protection::READ)
.expect("unable to make memory readonly");
}
}
}
}
#[cfg(not(feature = "selinux-fix"))]
{
for &PtrLen { ptr, len } in &self.allocations[self.already_protected..] {
if len != 0 {
unsafe {
region::protect(ptr, len, region::Protection::READ)
.expect("unable to make memory readonly");
}
}
for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
unsafe {
region::protect(ptr, len, region::Protection::READ)
.expect("unable to make memory readonly");
}
}
self.already_protected = self.allocations.len();
}
/// Iterates non protected memory allocations that are of not zero bytes in size.
fn non_protected_allocations_iter(&self) -> impl Iterator<Item = &PtrLen> {
let iter = self.allocations[self.already_protected..].iter();
#[cfg(feature = "selinux-fix")]
return iter.filter(|&PtrLen { ref map, len, .. }| len != 0 && map.is_some());
#[cfg(not(feature = "selinux-fix"))]
return iter.filter(|&PtrLen { len, .. }| *len != 0);
}
/// Frees all allocated memory regions that would be leaked otherwise.
/// Likely to invalidate existing function pointers, causing unsafety.
pub(crate) unsafe fn free_memory(&mut self) {