diff --git a/crates/fiber/src/unix.rs b/crates/fiber/src/unix.rs index 143fea95215e..7b6534344ecd 100644 --- a/crates/fiber/src/unix.rs +++ b/crates/fiber/src/unix.rs @@ -38,19 +38,13 @@ use std::ops::Range; use std::ptr; pub enum FiberStack { - Mmap { - // The top of the stack; for stacks allocated by the fiber implementation itself, - // the base address of the allocation will be `top.sub(len.unwrap())` - top: *mut u8, - // The length of the stack - len: usize, - }, - Manual { + Default { // The top of the stack; for stacks allocated by the fiber implementation itself, // the base address of the allocation will be `top.sub(len.unwrap())` top: *mut u8, // The length of the stack len: usize, + mmap: bool, }, Custom(Box), } @@ -82,17 +76,19 @@ impl FiberStack { rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE, )?; - Ok(Self::Mmap { + Ok(Self::Default { top: mmap.cast::().add(mmap_len), len: mmap_len, + mmap: true, }) } } pub unsafe fn from_raw_parts(base: *mut u8, len: usize) -> io::Result { - Ok(Self::Manual { + Ok(Self::Default { top: base.add(len), len, + mmap: false, }) } @@ -102,23 +98,50 @@ impl FiberStack { pub fn top(&self) -> Option<*mut u8> { Some(match self { - FiberStack::Mmap { top, len: _ } => *top, - FiberStack::Manual { top, len: _ } => *top, - FiberStack::Custom(r) => r.top(), + FiberStack::Default { + top, + len: _, + mmap: _, + } => *top, + FiberStack::Custom(r) => { + let top = r.top(); + let page_size = rustix::param::page_size(); + assert!( + top.align_offset(page_size) == 0, + "expected fiber stack top ({}) to be page aligned ({})", + top as usize, + page_size + ); + top + } }) } pub fn range(&self) -> Option> { Some(match self { - FiberStack::Mmap { top, len } => { + FiberStack::Default { top, len, mmap: _ } => { let base = unsafe { top.sub(*len) as usize }; base..base + len } - FiberStack::Manual { top, len } => { - let base = unsafe { top.sub(*len) as usize }; - base..base + len + FiberStack::Custom(s) => { + let range = s.range(); + let page_size = rustix::param::page_size(); + let start_ptr = range.start as *const u8; + assert!( + start_ptr.align_offset(page_size) == 0, + "expected fiber stack end ({}) to be page aligned ({})", + range.start as usize, + page_size + ); + let end_ptr = range.end as *const u8; + assert!( + end_ptr.align_offset(page_size) == 0, + "expected fiber stack start ({}) to be page aligned ({})", + range.end as usize, + page_size + ); + range } - FiberStack::Custom(r) => r.range(), }) } } @@ -126,7 +149,12 @@ impl FiberStack { impl Drop for FiberStack { fn drop(&mut self) { unsafe { - if let FiberStack::Mmap { top, len } = self { + if let FiberStack::Default { + top, + len, + mmap: true, + } = self + { let ret = rustix::mm::munmap(top.sub(*len) as _, *len); debug_assert!(ret.is_ok()); } diff --git a/crates/runtime/src/instance/allocator/on_demand.rs b/crates/runtime/src/instance/allocator/on_demand.rs index 849b4769f918..eb8ca63cd800 100644 --- a/crates/runtime/src/instance/allocator/on_demand.rs +++ b/crates/runtime/src/instance/allocator/on_demand.rs @@ -33,22 +33,20 @@ pub struct OnDemandInstanceAllocator { impl OnDemandInstanceAllocator { /// Creates a new on-demand instance allocator. - #[cfg(feature = "async")] - pub fn new( - mem_creator: Option>, - stack_creator: Option>, - stack_size: usize, - ) -> Self { + pub fn new(mem_creator: Option>, stack_size: usize) -> Self { Self { mem_creator, - stack_creator, + #[cfg(feature = "async")] + stack_creator: None, #[cfg(feature = "async")] stack_size, } } - #[cfg(not(feature = "async"))] - pub fn new(mem_creator: Option>) -> Self { - Self { mem_creator } + + /// Set the stack creator. + #[cfg(feature = "async")] + pub fn set_stack_creator(&mut self, stack_creator: Arc) { + self.stack_creator = Some(stack_creator); } } diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 35e10cff6a3a..fee90cd8e1ea 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1587,16 +1587,18 @@ impl Config { pub(crate) fn build_allocator(&self) -> Result> { match &self.allocation_strategy { - #[cfg(feature = "async")] - InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( - self.mem_creator.clone(), - self.stack_creator.clone(), - self.async_stack_size, - ))), - #[cfg(not(feature = "async"))] - InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( - self.mem_creator.clone(), - ))), + InstanceAllocationStrategy::OnDemand => { + #[allow(unused_mut)] + let mut allocator = Box::new(OnDemandInstanceAllocator::new( + self.mem_creator.clone(), + self.async_stack_size, + )); + #[cfg(feature = "async")] + if let Some(stack_creator) = &self.stack_creator { + allocator.set_stack_creator(stack_creator.clone()); + } + Ok(allocator) + } #[cfg(feature = "pooling-allocator")] InstanceAllocationStrategy::Pooling(config) => { #[cfg(feature = "async")] diff --git a/crates/wasmtime/src/stack.rs b/crates/wasmtime/src/stack.rs index 323660451666..fde7dd7568d2 100644 --- a/crates/wasmtime/src/stack.rs +++ b/crates/wasmtime/src/stack.rs @@ -8,7 +8,7 @@ use wasmtime_fiber::{RuntimeFiberStack, RuntimeFiberStackCreator}; /// /// # Safety /// -/// This trait is unsafe, as memory safety depends on a proper implemenation +/// This trait is unsafe, as memory safety depends on a proper implementation /// of memory management. Stacks created by the StackCreator should always be /// treated as owned by an wasmtime instance, and any modification of them /// outside of wasmtime invoked routines is unsafe and may lead to corruption. diff --git a/crates/wasmtime/src/trampoline.rs b/crates/wasmtime/src/trampoline.rs index 985807472492..2c28509c8f81 100644 --- a/crates/wasmtime/src/trampoline.rs +++ b/crates/wasmtime/src/trampoline.rs @@ -41,10 +41,7 @@ fn create_handle( let module = Arc::new(module); let runtime_info = &BareModuleInfo::maybe_imported_func(module, one_signature).into_traitobj(); - #[cfg(feature = "async")] - let allocator = OnDemandInstanceAllocator::new(config.mem_creator.clone(), None, 0); - #[cfg(not(feature = "async"))] - let allocator = OnDemandInstanceAllocator::new(config.mem_creator.clone()); + let allocator = OnDemandInstanceAllocator::new(config.mem_creator.clone(), 0); let handle = allocator.allocate_module(InstanceAllocationRequest { imports, host_state, diff --git a/tests/all/stack_creator.rs b/tests/all/stack_creator.rs index 46271e5a47f7..5b46d8942a2c 100644 --- a/tests/all/stack_creator.rs +++ b/tests/all/stack_creator.rs @@ -1,157 +1,155 @@ -#[cfg(all(not(target_os = "windows"), not(miri)))] -mod not_for_windows { - use anyhow::{bail, Context}; - use std::{ - alloc::{GlobalAlloc, Layout, System}, - ops::Range, - ptr::NonNull, - sync::Arc, - }; - use wasmtime::*; +#![cfg(all(not(target_os = "windows"), not(miri)))] +use anyhow::{bail, Context}; +use std::{ + alloc::{GlobalAlloc, Layout, System}, + ops::Range, + ptr::NonNull, + sync::Arc, +}; +use wasmtime::*; - fn align_up(v: usize, align: usize) -> usize { - return (v + (align - 1)) & (!(align - 1)); - } +fn align_up(v: usize, align: usize) -> usize { + return (v + (align - 1)) & (!(align - 1)); +} - struct CustomStack { - base: NonNull, - len: usize, +struct CustomStack { + base: NonNull, + len: usize, +} +unsafe impl Send for CustomStack {} +unsafe impl Sync for CustomStack {} +impl CustomStack { + fn new(base: NonNull, len: usize) -> Self { + CustomStack { base, len } } - unsafe impl Send for CustomStack {} - unsafe impl Sync for CustomStack {} - impl CustomStack { - fn new(base: NonNull, len: usize) -> Self { - CustomStack { base, len } - } +} +unsafe impl StackMemory for CustomStack { + fn top(&self) -> *mut u8 { + unsafe { self.base.as_ptr().add(self.len) } } - unsafe impl StackMemory for CustomStack { - fn top(&self) -> *mut u8 { - unsafe { self.base.as_ptr().add(self.len) } - } - fn range(&self) -> Range { - let base = self.base.as_ptr() as usize; - base..base + self.len - } + fn range(&self) -> Range { + let base = self.base.as_ptr() as usize; + base..base + self.len } +} - // A creator that allocates stacks on the heap instead of mmap'ing. - struct CustomStackCreator { - memory: NonNull, - size: usize, - layout: Layout, - } +// A creator that allocates stacks on the heap instead of mmap'ing. +struct CustomStackCreator { + memory: NonNull, + size: usize, + layout: Layout, +} - unsafe impl Send for CustomStackCreator {} - unsafe impl Sync for CustomStackCreator {} - impl CustomStackCreator { - fn new() -> Result { - // 1MB - const MINIMUM_STACK_SIZE: usize = 1 * 1_024 * 1_024; - let page_size = rustix::param::page_size(); - let size = align_up(MINIMUM_STACK_SIZE, page_size); - // Add an extra page for the guard page - let layout = Layout::from_size_align(size + page_size, page_size) - .context("unable to compute stack layout")?; - let memory = unsafe { - let mem = System.alloc(layout); - let notnull = NonNull::new(mem); - if let Some(mem) = notnull { - // Mark guard page as protected - rustix::mm::mprotect( - mem.as_ptr().cast(), - page_size, - rustix::mm::MprotectFlags::empty(), - )?; - } - notnull - } - .context("unable to allocate stack memory")?; - Ok(CustomStackCreator { - memory, - size, - layout, - }) - } - fn range(&self) -> Range { - let page_size = rustix::param::page_size(); - let base = unsafe { self.memory.as_ptr().add(page_size) as usize }; - base..base + self.size - } - } - impl Drop for CustomStackCreator { - fn drop(&mut self) { - let page_size = rustix::param::page_size(); - unsafe { - // Unprotect the guard page as the allocator could reuse it. +unsafe impl Send for CustomStackCreator {} +unsafe impl Sync for CustomStackCreator {} +impl CustomStackCreator { + fn new() -> Result { + // 1MB + const MINIMUM_STACK_SIZE: usize = 1 * 1_024 * 1_024; + let page_size = rustix::param::page_size(); + let size = align_up(MINIMUM_STACK_SIZE, page_size); + // Add an extra page for the guard page + let layout = Layout::from_size_align(size + page_size, page_size) + .context("unable to compute stack layout")?; + let memory = unsafe { + let mem = System.alloc(layout); + let notnull = NonNull::new(mem); + if let Some(mem) = notnull { + // Mark guard page as protected rustix::mm::mprotect( - self.memory.as_ptr().cast(), + mem.as_ptr().cast(), page_size, - rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE, - ) - .unwrap(); - System.dealloc(self.memory.as_ptr(), self.layout); + rustix::mm::MprotectFlags::empty(), + )?; } + notnull } + .context("unable to allocate stack memory")?; + Ok(CustomStackCreator { + memory, + size, + layout, + }) } - unsafe impl StackCreator for CustomStackCreator { - fn new_stack(&self, size: usize) -> Result> { - if size != self.size { - bail!("must use the size we allocated for this stack memory creator"); - } - let page_size = rustix::param::page_size(); - // skip over the page size - let base_ptr = unsafe { self.memory.as_ptr().add(page_size) }; - let base = NonNull::new(base_ptr).context("unable to compute stack base")?; - Ok(Box::new(CustomStack::new(base, self.size))) + fn range(&self) -> Range { + let page_size = rustix::param::page_size(); + let base = unsafe { self.memory.as_ptr().add(page_size) as usize }; + base..base + self.size + } +} +impl Drop for CustomStackCreator { + fn drop(&mut self) { + let page_size = rustix::param::page_size(); + unsafe { + // Unprotect the guard page as the allocator could reuse it. + rustix::mm::mprotect( + self.memory.as_ptr().cast(), + page_size, + rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE, + ) + .unwrap(); + System.dealloc(self.memory.as_ptr(), self.layout); } } - - fn config() -> (Store<()>, Arc) { - let stack_creator = Arc::new(CustomStackCreator::new().unwrap()); - let mut config = Config::new(); - config - .async_support(true) - .max_wasm_stack(stack_creator.size / 2) - .async_stack_size(stack_creator.size) - .with_host_stack(stack_creator.clone()); - ( - Store::new(&Engine::new(&config).unwrap(), ()), - stack_creator, - ) +} +unsafe impl StackCreator for CustomStackCreator { + fn new_stack(&self, size: usize) -> Result> { + if size != self.size { + bail!("must use the size we allocated for this stack memory creator"); + } + let page_size = rustix::param::page_size(); + // skip over the page size + let base_ptr = unsafe { self.memory.as_ptr().add(page_size) }; + let base = NonNull::new(base_ptr).context("unable to compute stack base")?; + Ok(Box::new(CustomStack::new(base, self.size))) } +} - #[tokio::test] - async fn called_on_custom_heap_stack() -> Result<()> { - let (mut store, stack_creator) = config(); - let module = Module::new( - store.engine(), - r#" +fn config() -> (Store<()>, Arc) { + let stack_creator = Arc::new(CustomStackCreator::new().unwrap()); + let mut config = Config::new(); + config + .async_support(true) + .max_wasm_stack(stack_creator.size / 2) + .async_stack_size(stack_creator.size) + .with_host_stack(stack_creator.clone()); + ( + Store::new(&Engine::new(&config).unwrap(), ()), + stack_creator, + ) +} + +#[tokio::test] +async fn called_on_custom_heap_stack() -> Result<()> { + let (mut store, stack_creator) = config(); + let module = Module::new( + store.engine(), + r#" (module (import "host" "callback" (func $callback (result i64))) (func $f (result i64) (call $callback)) (export "f" (func $f)) ) "#, - )?; + )?; - let ty = FuncType::new([], [ValType::I64]); - let host_func = Func::new(&mut store, ty, move |_caller, _params, results| { - let foo = 42; - // output an address on the stack - results[0] = Val::I64((&foo as *const i32) as usize as i64); - Ok(()) - }); - let export = wasmtime::Extern::Func(host_func); - let instance = Instance::new_async(&mut store, &module, &[export]).await?; - let mut results = [Val::I64(0)]; - instance - .get_func(&mut store, "f") - .context("missing function export")? - .call_async(&mut store, &[], &mut results) - .await?; - // Make sure the stack address we wrote was within our custom stack range - let stack_address = results[0].i64().unwrap() as usize; - assert!(stack_creator.range().contains(&stack_address)); + let ty = FuncType::new([], [ValType::I64]); + let host_func = Func::new(&mut store, ty, move |_caller, _params, results| { + let foo = 42; + // output an address on the stack + results[0] = Val::I64((&foo as *const i32) as usize as i64); Ok(()) - } + }); + let export = wasmtime::Extern::Func(host_func); + let instance = Instance::new_async(&mut store, &module, &[export]).await?; + let mut results = [Val::I64(0)]; + instance + .get_func(&mut store, "f") + .context("missing function export")? + .call_async(&mut store, &[], &mut results) + .await?; + // Make sure the stack address we wrote was within our custom stack range + let stack_address = results[0].i64().unwrap() as usize; + assert!(stack_creator.range().contains(&stack_address)); + Ok(()) }