Use mmap'd *.cwasm as a source for memory initialization images (#3787)

* Skip memfd creation with precompiled modules

This commit updates the memfd support internally to not actually use a
memfd if a compiled module originally came from disk via the
`wasmtime::Module::deserialize_file` API. In this situation we already
have a file descriptor open and there's no need to copy a module's heap
image to a new file descriptor.

To facilitate a new source of `mmap` the currently-memfd-specific-logic
of creating a heap image is generalized to a new form of
`MemoryInitialization` which is attempted for all modules at
module-compile-time. This means that the serialized artifact to disk
will have the memory image in its entirety waiting for us. Furthermore
the memory image is ensured to be padded and aligned carefully to the
target system's page size, notably meaning that the data section in the
final object file is page-aligned and the size of the data section is
also page aligned.

This means that when a precompiled module is mapped from disk we can
reuse the underlying `File` to mmap all initial memory images. This
means that the offset-within-the-memory-mapped-file can differ for
memfd-vs-not, but that's just another piece of state to track in the
memfd implementation.

In the limit this waters down the term "memfd" for this technique of
quickly initializing memory because we no longer use memfd
unconditionally (only when the backing file isn't available).
This does however open up an avenue in the future to porting this
support to other OSes because while `memfd_create` is Linux-specific
both macOS and Windows support mapping a file with copy-on-write. This
porting isn't done in this PR and is left for a future refactoring.

Closes #3758

* Enable "memfd" support on all unix systems

Cordon off the Linux-specific bits and enable the memfd support to
compile and run on platforms like macOS which have a Linux-like `mmap`.
This only works if a module is mapped from a precompiled module file on
disk, but that's better than not supporting it at all!

* Fix linux compile

* Use `Arc<File>` instead of `MmapVecFileBacking`

* Use a named struct instead of mysterious tuples

* Comment about unsafety in `Module::deserialize_file`

* Fix tests

* Fix uffd compile

* Always align data segments

No need to have conditional alignment since their sizes are all aligned
anyway

* Update comment in build.rs

* Use rustix, not `region`

* Fix some confusing logic/names around memory indexes

These functions all work with memory indexes, not specifically defined
memory indexes.
This commit is contained in:
Alex Crichton
2022-02-10 15:40:40 -06:00
committed by GitHub
parent 1cb08d4e67
commit c0c368d151
18 changed files with 629 additions and 280 deletions

View File

@@ -5,9 +5,9 @@
use crate::code_memory::CodeMemory;
use crate::debug::create_gdbjit_image;
use crate::{MmapVec, ProfilingAgent};
use anyhow::{anyhow, bail, Context, Result};
use object::write::{Object, StandardSegment};
use crate::ProfilingAgent;
use anyhow::{anyhow, bail, Context, Error, Result};
use object::write::{Object, StandardSegment, WritableBuffer};
use object::{File, Object as _, ObjectSection, SectionKind};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
@@ -23,7 +23,7 @@ use wasmtime_environ::{
};
use wasmtime_runtime::{
CompiledModuleId, CompiledModuleIdAllocator, GdbJitImageRegistration, InstantiationError,
VMFunctionBody, VMTrampoline,
MmapVec, VMFunctionBody, VMTrampoline,
};
/// This is the name of the section in the final ELF image which contains
@@ -167,6 +167,7 @@ pub fn finish_compile(
debuginfo,
has_unparsed_debuginfo,
data,
data_align,
passive_data,
..
} = translation;
@@ -179,8 +180,13 @@ pub fn finish_compile(
SectionKind::ReadOnlyData,
);
let mut total_data_len = 0;
for data in data.iter() {
obj.append_section_data(data_id, data, 1);
for data in data {
let offset = obj.append_section_data(data_id, &data, data_align.unwrap_or(1));
// All data segments are expected to be adjacent to one another, and
// with a higher alignment each data segment needs to be individually
// aligned to make this so, so assert that the offset this was placed at
// is always against the previous segment.
assert_eq!(offset as usize, total_data_len);
total_data_len += data.len();
}
for data in passive_data.iter() {
@@ -266,7 +272,7 @@ pub fn finish_compile(
bincode::serialize_into(&mut bytes, &info)?;
obj.append_section_data(info_id, &bytes, 1);
return Ok((MmapVec::from_obj(obj)?, info));
return Ok((mmap_vec_from_obj(obj)?, info));
fn push_debug<'a, T>(obj: &mut Object, section: &T)
where
@@ -285,6 +291,74 @@ pub fn finish_compile(
}
}
/// Creates a new `MmapVec` from serializing the specified `obj`.
///
/// The returned `MmapVec` will contain the serialized version of `obj` and
/// is sized appropriately to the exact size of the object serialized.
pub fn mmap_vec_from_obj(obj: Object) -> Result<MmapVec> {
let mut result = ObjectMmap::default();
return match obj.emit(&mut result) {
Ok(()) => {
assert!(result.mmap.is_some(), "no reserve");
let mmap = result.mmap.expect("reserve not called");
assert_eq!(mmap.len(), result.len);
Ok(mmap)
}
Err(e) => match result.err.take() {
Some(original) => Err(original.context(e)),
None => Err(e.into()),
},
};
/// Helper struct to implement the `WritableBuffer` trait from the `object`
/// crate.
///
/// This enables writing an object directly into an mmap'd memory so it's
/// immediately usable for execution after compilation. This implementation
/// relies on a call to `reserve` happening once up front with all the needed
/// data, and the mmap internally does not attempt to grow afterwards.
#[derive(Default)]
struct ObjectMmap {
mmap: Option<MmapVec>,
len: usize,
err: Option<Error>,
}
impl WritableBuffer for ObjectMmap {
fn len(&self) -> usize {
self.len
}
fn reserve(&mut self, additional: usize) -> Result<(), ()> {
assert!(self.mmap.is_none(), "cannot reserve twice");
self.mmap = match MmapVec::with_capacity(additional) {
Ok(mmap) => Some(mmap),
Err(e) => {
self.err = Some(e);
return Err(());
}
};
Ok(())
}
fn resize(&mut self, new_len: usize) {
// Resizing always appends 0 bytes and since new mmaps start out as 0
// bytes we don't actually need to do anything as part of this other
// than update our own length.
if new_len <= self.len {
return;
}
self.len = new_len;
}
fn write_bytes(&mut self, val: &[u8]) {
let mmap = self.mmap.as_mut().expect("write before reserve");
mmap[self.len..][..val.len()].copy_from_slice(val);
self.len += val.len();
}
}
}
/// This is intended to mirror the type tables in `wasmtime_environ`, except that
/// it doesn't store the native signatures which are no longer needed past compilation.
#[derive(Serialize, Deserialize)]