diff --git a/Cargo.lock b/Cargo.lock index 574c97a2f20c..0f6c55fe22ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3484,6 +3484,7 @@ dependencies = [ "thiserror", "userfaultfd", "wasmtime-environ", + "wasmtime-fiber", "winapi", ] diff --git a/crates/fiber/src/arch/x86_64.S b/crates/fiber/src/arch/x86_64.S index 89e20b1aeee4..2ae01fc2900b 100644 --- a/crates/fiber/src/arch/x86_64.S +++ b/crates/fiber/src/arch/x86_64.S @@ -68,7 +68,7 @@ FUNCTION(wasmtime_fiber_init): // And then we specify the stack pointer resumption should begin at. Our // `wasmtime_fiber_switch` function consumes 6 registers plus a return - // pointer, and the top 16 bytes aree resereved, so that's: + // pointer, and the top 16 bytes are reserved, so that's: // // (6 + 1) * 16 + 16 = 0x48 lea -0x48(%rdi), %rax diff --git a/crates/fiber/src/lib.rs b/crates/fiber/src/lib.rs index 9a8d057f35c6..50431780e449 100644 --- a/crates/fiber/src/lib.rs +++ b/crates/fiber/src/lib.rs @@ -14,7 +14,38 @@ mod unix; #[cfg(unix)] use unix as imp; +/// Represents an execution stack to use for a fiber. +#[derive(Debug)] +pub struct FiberStack(imp::FiberStack); + +impl FiberStack { + /// Creates a new fiber stack of the given size. + pub fn new(size: usize) -> io::Result { + Ok(Self(imp::FiberStack::new(size)?)) + } + + /// Creates a new fiber stack with the given pointer to the top of the stack. + /// + /// # Safety + /// + /// This is unsafe because there is no validation of the given stack pointer. + /// + /// The caller must properly allocate the stack space with a guard page and + /// make the pages accessible for correct behavior. + pub unsafe fn from_top_ptr(top: *mut u8) -> io::Result { + Ok(Self(imp::FiberStack::from_top_ptr(top)?)) + } + + /// Gets the top of the stack. + /// + /// Returns `None` if the platform does not support getting the top of the stack. + pub fn top(&self) -> Option<*mut u8> { + self.0.top() + } +} + pub struct Fiber<'a, Resume, Yield, Return> { + stack: FiberStack, inner: imp::Fiber, done: Cell, _phantom: PhantomData<&'a (Resume, Yield, Return)>, @@ -34,42 +65,23 @@ enum RunResult { } impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> { - /// Creates a new fiber which will execute `func` on a new native stack of - /// size `stack_size`. + /// Creates a new fiber which will execute `func` on the given stack. /// /// This function returns a `Fiber` which, when resumed, will execute `func` /// to completion. When desired the `func` can suspend itself via /// `Fiber::suspend`. pub fn new( - stack_size: usize, + stack: FiberStack, func: impl FnOnce(Resume, &Suspend) -> Return + 'a, - ) -> io::Result> { - Ok(Fiber { - inner: imp::Fiber::new(stack_size, func)?, - done: Cell::new(false), - _phantom: PhantomData, - }) - } + ) -> Self { + let inner = imp::Fiber::new(&stack.0, func); - /// Creates a new fiber with existing stack space that will execute `func`. - /// - /// This function returns a `Fiber` which, when resumed, will execute `func` - /// to completion. When desired the `func` can suspend itself via - /// `Fiber::suspend`. - /// - /// # Safety - /// - /// The caller must properly allocate the stack space with a guard page and - /// make the pages accessible for correct behavior. - pub unsafe fn new_with_stack( - top_of_stack: *mut u8, - func: impl FnOnce(Resume, &Suspend) -> Return + 'a, - ) -> io::Result> { - Ok(Fiber { - inner: imp::Fiber::new_with_stack(top_of_stack, func)?, + Self { + stack, + inner, done: Cell::new(false), _phantom: PhantomData, - }) + } } /// Resumes execution of this fiber. @@ -90,7 +102,7 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> { pub fn resume(&self, val: Resume) -> Result { assert!(!self.done.replace(true), "cannot resume a finished fiber"); let result = Cell::new(RunResult::Resuming(val)); - self.inner.resume(&result); + self.inner.resume(&self.stack.0, &result); match result.into_inner() { RunResult::Resuming(_) | RunResult::Executing => unreachable!(), RunResult::Yield(y) => { @@ -106,6 +118,11 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> { pub fn done(&self) -> bool { self.done.get() } + + /// Gets the stack associated with this fiber. + pub fn stack(&self) -> &FiberStack { + &self.stack + } } impl Suspend { @@ -148,19 +165,20 @@ impl Drop for Fiber<'_, A, B, C> { #[cfg(test)] mod tests { - use super::Fiber; + use super::{Fiber, FiberStack}; use std::cell::Cell; use std::panic::{self, AssertUnwindSafe}; use std::rc::Rc; #[test] fn small_stacks() { - Fiber::<(), (), ()>::new(0, |_, _| {}) - .unwrap() + let stack = FiberStack::new(0).unwrap(); + Fiber::<(), (), ()>::new(stack, |_, _| {}) .resume(()) .unwrap(); - Fiber::<(), (), ()>::new(1, |_, _| {}) - .unwrap() + + let stack = FiberStack::new(1).unwrap(); + Fiber::<(), (), ()>::new(stack, |_, _| {}) .resume(()) .unwrap(); } @@ -169,10 +187,10 @@ mod tests { fn smoke() { let hit = Rc::new(Cell::new(false)); let hit2 = hit.clone(); - let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |_, _| { + let stack = FiberStack::new(1024 * 1024).unwrap(); + let fiber = Fiber::<(), (), ()>::new(stack, move |_, _| { hit2.set(true); - }) - .unwrap(); + }); assert!(!hit.get()); fiber.resume(()).unwrap(); assert!(hit.get()); @@ -182,12 +200,12 @@ mod tests { fn suspend_and_resume() { let hit = Rc::new(Cell::new(false)); let hit2 = hit.clone(); - let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |_, s| { + let stack = FiberStack::new(1024 * 1024).unwrap(); + let fiber = Fiber::<(), (), ()>::new(stack, move |_, s| { s.suspend(()); hit2.set(true); s.suspend(()); - }) - .unwrap(); + }); assert!(!hit.get()); assert!(fiber.resume(()).is_err()); assert!(!hit.get()); @@ -219,14 +237,14 @@ mod tests { } fn run_test() { - let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |(), s| { + let stack = FiberStack::new(1024 * 1024).unwrap(); + let fiber = Fiber::<(), (), ()>::new(stack, move |(), s| { assert_contains_host(); s.suspend(()); assert_contains_host(); s.suspend(()); assert_contains_host(); - }) - .unwrap(); + }); assert!(fiber.resume(()).is_err()); assert!(fiber.resume(()).is_err()); assert!(fiber.resume(()).is_ok()); @@ -239,11 +257,11 @@ mod tests { fn panics_propagated() { let a = Rc::new(Cell::new(false)); let b = SetOnDrop(a.clone()); - let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |(), _s| { + let stack = FiberStack::new(1024 * 1024).unwrap(); + let fiber = Fiber::<(), (), ()>::new(stack, move |(), _s| { drop(&b); panic!(); - }) - .unwrap(); + }); assert!(panic::catch_unwind(AssertUnwindSafe(|| fiber.resume(()))).is_err()); assert!(a.get()); @@ -258,12 +276,12 @@ mod tests { #[test] fn suspend_and_resume_values() { - let fiber = Fiber::new(1024 * 1024, move |first, s| { + let stack = FiberStack::new(1024 * 1024).unwrap(); + let fiber = Fiber::new(stack, move |first, s| { assert_eq!(first, 2.0); assert_eq!(s.suspend(4), 3.0); "hello".to_string() - }) - .unwrap(); + }); assert_eq!(fiber.resume(2.0), Err(4)); assert_eq!(fiber.resume(3.0), Ok("hello".to_string())); } diff --git a/crates/fiber/src/unix.rs b/crates/fiber/src/unix.rs index 0cc57ca319ce..82cc31e40cf0 100644 --- a/crates/fiber/src/unix.rs +++ b/crates/fiber/src/unix.rs @@ -34,17 +34,81 @@ use std::cell::Cell; use std::io; use std::ptr; -pub struct Fiber { +#[derive(Debug)] +pub struct FiberStack { // The top of the stack; for stacks allocated by the fiber implementation itself, - // the base address of the allocation will be `top_of_stack.sub(alloc_len.unwrap())` - top_of_stack: *mut u8, - alloc_len: Option, + // the base address of the allocation will be `top.sub(len.unwrap())` + top: *mut u8, + // The length of the stack; `None` when the stack was not created by this implementation. + len: Option, } -pub struct Suspend { - top_of_stack: *mut u8, +impl FiberStack { + pub fn new(size: usize) -> io::Result { + unsafe { + // Round up our stack size request to the nearest multiple of the + // page size. + let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize; + let size = if size == 0 { + page_size + } else { + (size + (page_size - 1)) & (!(page_size - 1)) + }; + + // Add in one page for a guard page and then ask for some memory. + let mmap_len = size + page_size; + let mmap = libc::mmap( + ptr::null_mut(), + mmap_len, + libc::PROT_NONE, + libc::MAP_ANON | libc::MAP_PRIVATE, + -1, + 0, + ); + if mmap == libc::MAP_FAILED { + return Err(io::Error::last_os_error()); + } + + if libc::mprotect( + mmap.cast::().add(page_size).cast(), + size, + libc::PROT_READ | libc::PROT_WRITE, + ) != 0 + { + return Err(io::Error::last_os_error()); + } + + Ok(Self { + top: mmap.cast::().add(mmap_len), + len: Some(mmap_len), + }) + } + } + + pub unsafe fn from_top_ptr(top: *mut u8) -> io::Result { + Ok(Self { top, len: None }) + } + + pub fn top(&self) -> Option<*mut u8> { + Some(self.top) + } +} + +impl Drop for FiberStack { + fn drop(&mut self) { + unsafe { + if let Some(len) = self.len { + let ret = libc::munmap(self.top.sub(len) as _, len); + debug_assert!(ret == 0); + } + } + } } +pub struct Fiber; + +pub struct Suspend(*mut u8); + extern "C" { fn wasmtime_fiber_init( top_of_stack: *mut u8, @@ -59,97 +123,35 @@ where F: FnOnce(A, &super::Suspend) -> C, { unsafe { - let inner = Suspend { top_of_stack }; + let inner = Suspend(top_of_stack); let initial = inner.take_resume::(); super::Suspend::::execute(inner, initial, Box::from_raw(arg0.cast::())) } } impl Fiber { - pub fn new(stack_size: usize, func: F) -> io::Result - where - F: FnOnce(A, &super::Suspend) -> C, - { - let fiber = Self::alloc_with_stack(stack_size)?; - fiber.init(func); - Ok(fiber) - } - - pub fn new_with_stack(top_of_stack: *mut u8, func: F) -> io::Result - where - F: FnOnce(A, &super::Suspend) -> C, - { - let fiber = Self { - top_of_stack, - alloc_len: None, - }; - - fiber.init(func); - - Ok(fiber) - } - - fn init(&self, func: F) + pub fn new(stack: &FiberStack, func: F) -> Self where F: FnOnce(A, &super::Suspend) -> C, { unsafe { let data = Box::into_raw(Box::new(func)).cast(); - wasmtime_fiber_init(self.top_of_stack, fiber_start::, data); + wasmtime_fiber_init(stack.top, fiber_start::, data); } - } - fn alloc_with_stack(stack_size: usize) -> io::Result { - unsafe { - // Round up our stack size request to the nearest multiple of the - // page size. - let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize; - let stack_size = if stack_size == 0 { - page_size - } else { - (stack_size + (page_size - 1)) & (!(page_size - 1)) - }; - - // Add in one page for a guard page and then ask for some memory. - let mmap_len = stack_size + page_size; - let mmap = libc::mmap( - ptr::null_mut(), - mmap_len, - libc::PROT_NONE, - libc::MAP_ANON | libc::MAP_PRIVATE, - -1, - 0, - ); - if mmap == libc::MAP_FAILED { - return Err(io::Error::last_os_error()); - } - let ret = Self { - top_of_stack: mmap.cast::().add(mmap_len), - alloc_len: Some(mmap_len), - }; - let res = libc::mprotect( - mmap.cast::().add(page_size).cast(), - stack_size, - libc::PROT_READ | libc::PROT_WRITE, - ); - if res != 0 { - Err(io::Error::last_os_error()) - } else { - Ok(ret) - } - } + Self } - pub(crate) fn resume(&self, result: &Cell>) { + pub(crate) fn resume(&self, stack: &FiberStack, result: &Cell>) { unsafe { // Store where our result is going at the very tip-top of the // stack, otherwise known as our reserved slot for this information. // // In the diagram above this is updating address 0xAff8 - let addr = self.top_of_stack.cast::().offset(-1); + let addr = stack.top.cast::().offset(-1); addr.write(result as *const _ as usize); - wasmtime_fiber_switch(self.top_of_stack); + wasmtime_fiber_switch(stack.top); // null this out to help catch use-after-free addr.write(0); @@ -157,23 +159,12 @@ impl Fiber { } } -impl Drop for Fiber { - fn drop(&mut self) { - unsafe { - if let Some(alloc_len) = self.alloc_len { - let ret = libc::munmap(self.top_of_stack.sub(alloc_len) as _, alloc_len); - debug_assert!(ret == 0); - } - } - } -} - impl Suspend { pub(crate) fn switch(&self, result: RunResult) -> A { unsafe { // Calculate 0xAff8 and then write to it (*self.result_location::()).set(result); - wasmtime_fiber_switch(self.top_of_stack); + wasmtime_fiber_switch(self.0); self.take_resume::() } } @@ -186,8 +177,8 @@ impl Suspend { } unsafe fn result_location(&self) -> *const Cell> { - let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read(); + let ret = self.0.cast::<*const u8>().offset(-1).read(); assert!(!ret.is_null()); - return ret.cast(); + ret.cast() } } diff --git a/crates/fiber/src/windows.rs b/crates/fiber/src/windows.rs index b2d657eb8848..ef921345cb44 100644 --- a/crates/fiber/src/windows.rs +++ b/crates/fiber/src/windows.rs @@ -7,6 +7,24 @@ use winapi::shared::winerror::ERROR_NOT_SUPPORTED; use winapi::um::fibersapi::*; use winapi::um::winbase::*; +// The fiber's stack is empty on Windows as we use the native implementation. +#[derive(Debug)] +pub struct FiberStack; + +impl FiberStack { + pub fn new(size: usize) -> io::Result { + Ok(Self) + } + + pub unsafe fn from_top_ptr(top: *mut u8) -> io::Result { + Err(io::Error::from_raw_os_error(ERROR_NOT_SUPPORTED as i32)) + } + + pub fn top(&self) -> Option<*mut u8> { + None + } +} + pub struct Fiber { fiber: LPVOID, state: Box, @@ -41,16 +59,17 @@ where } impl Fiber { - pub fn new(stack_size: usize, func: F) -> io::Result - where - F: FnOnce(A, &super::Suspend) -> C, - { + pub fn new( + _stack: &FiberStack, + func: impl FnOnce(Resume, &Suspend) -> Return + 'a, + ) -> Self { unsafe { let state = Box::new(StartState { initial_closure: Cell::new(Box::into_raw(Box::new(func)).cast()), parent: Cell::new(ptr::null_mut()), result_location: Cell::new(ptr::null()), }); + let fiber = CreateFiberEx( 0, stack_size, @@ -58,23 +77,17 @@ impl Fiber { Some(fiber_start::), &*state as *const StartState as *mut _, ); + if fiber.is_null() { drop(Box::from_raw(state.initial_closure.get().cast::())); - Err(io::Error::last_os_error()) - } else { - Ok(Self { fiber, state }) + return Err(io::Error::last_os_error()); } - } - } - pub fn new_with_stack(_top_of_stack: *mut u8, _func: F) -> io::Result - where - F: FnOnce(A, &super::Suspend) -> C, - { - Err(io::Error::from_raw_os_error(ERROR_NOT_SUPPORTED as i32)) + Ok(Self { fiber, state }) + } } - pub(crate) fn resume(&self, result: &Cell>) { + pub(crate) fn resume(&self, _stack: &FiberStack, result: &Cell>) { unsafe { let is_fiber = IsThreadAFiber() != 0; let parent_fiber = if is_fiber { diff --git a/crates/runtime/Cargo.toml b/crates/runtime/Cargo.toml index 715cbafa0f56..8a7f6737d7aa 100644 --- a/crates/runtime/Cargo.toml +++ b/crates/runtime/Cargo.toml @@ -13,6 +13,7 @@ edition = "2018" [dependencies] wasmtime-environ = { path = "../environ", version = "0.25.0" } +wasmtime-fiber = { path = "../fiber", version = "0.25.0", optional = true } region = "2.1.0" libc = { version = "0.2.82", default-features = false } log = "0.4.8" @@ -45,5 +46,7 @@ maintenance = { status = "actively-developed" } [features] default = [] +async = ["wasmtime-fiber"] + # Enables support for userfaultfd in the pooling allocator when building on Linux uffd = ["userfaultfd"] diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 84e96d7aadd3..b8f2b3e3d873 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -87,13 +87,14 @@ pub enum InstantiationError { } /// An error while creating a fiber stack. +#[cfg(feature = "async")] #[derive(Error, Debug)] pub enum FiberStackError { /// Insufficient resources available for the request. #[error("Insufficient resources: {0}")] Resource(anyhow::Error), - /// An error for when the allocator doesn't support custom fiber stacks. - #[error("Custom fiber stacks are not supported by the allocator")] + /// An error for when the allocator doesn't support fiber stacks. + #[error("fiber stacks are not supported by the allocator")] NotSupported, /// A limit on how many fibers are supported has been reached. #[error("Limit of {0} concurrent fibers has been reached")] @@ -152,20 +153,16 @@ pub unsafe trait InstanceAllocator: Send + Sync { unsafe fn deallocate(&self, handle: &InstanceHandle); /// Allocates a fiber stack for calling async functions on. - /// - /// Returns the top of the fiber stack if successfully allocated. - fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError>; + #[cfg(feature = "async")] + fn allocate_fiber_stack(&self) -> Result; - /// Deallocates a fiber stack that was previously allocated. + /// Deallocates a fiber stack that was previously allocated with `allocate_fiber_stack`. /// /// # Safety /// - /// This function is unsafe because there are no guarantees that the given stack - /// is no longer in use. - /// - /// Additionally, passing a stack pointer that was not returned from `allocate_fiber_stack` - /// will lead to undefined behavior. - unsafe fn deallocate_fiber_stack(&self, stack: *mut u8); + /// The provided stack is required to have been allocated with `allocate_fiber_stack`. + #[cfg(feature = "async")] + unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack); } fn get_table_init_start( @@ -539,12 +536,21 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) { #[derive(Clone)] pub struct OnDemandInstanceAllocator { mem_creator: Option>, + #[cfg(feature = "async")] + stack_size: usize, } impl OnDemandInstanceAllocator { /// Creates a new on-demand instance allocator. - pub fn new(mem_creator: Option>) -> Self { - Self { mem_creator } + pub fn new( + mem_creator: Option>, + #[cfg(feature = "async")] stack_size: usize, + ) -> Self { + Self { + mem_creator, + #[cfg(feature = "async")] + stack_size, + } } fn create_tables(module: &Module) -> PrimaryMap { @@ -576,6 +582,16 @@ impl OnDemandInstanceAllocator { } } +impl Default for OnDemandInstanceAllocator { + fn default() -> Self { + Self { + mem_creator: None, + #[cfg(feature = "async")] + stack_size: 0, + } + } +} + unsafe impl InstanceAllocator for OnDemandInstanceAllocator { unsafe fn allocate( &self, @@ -627,13 +643,18 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator { alloc::dealloc(handle.instance.cast(), layout); } - fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { - // The on-demand allocator does not support allocating fiber stacks - Err(FiberStackError::NotSupported) + #[cfg(feature = "async")] + fn allocate_fiber_stack(&self) -> Result { + if self.stack_size == 0 { + return Err(FiberStackError::NotSupported); + } + + wasmtime_fiber::FiberStack::new(self.stack_size) + .map_err(|e| FiberStackError::Resource(e.into())) } - unsafe fn deallocate_fiber_stack(&self, _stack: *mut u8) { - // This should never be called as `allocate_fiber_stack` never returns success - unreachable!() + #[cfg(feature = "async")] + unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) { + // The on-demand allocator has no further bookkeeping for fiber stacks } } diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index 833538c6042e..3331d31c60a5 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -8,8 +8,8 @@ //! when modules can be constrained based on configurable limits. use super::{ - initialize_instance, initialize_vmcontext, FiberStackError, InstanceAllocationRequest, - InstanceAllocator, InstanceHandle, InstantiationError, + initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator, + InstanceHandle, InstantiationError, }; use crate::{instance::Instance, Memory, Mmap, Table, VMContext}; use anyhow::{anyhow, bail, Context, Result}; @@ -41,10 +41,13 @@ cfg_if::cfg_if! { } } -use imp::{ - commit_memory_pages, commit_stack_pages, commit_table_pages, decommit_memory_pages, - decommit_stack_pages, decommit_table_pages, -}; +use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages}; + +#[cfg(all(feature = "async", unix))] +use imp::{commit_stack_pages, decommit_stack_pages}; + +#[cfg(feature = "async")] +use super::FiberStackError; fn round_up_to_pow2(n: usize, to: usize) -> usize { debug_assert!(to > 0); @@ -705,6 +708,7 @@ impl TablePool { /// /// The top of the stack (starting stack pointer) is returned when a stack is allocated /// from the pool. +#[cfg(feature = "async")] #[derive(Debug)] struct StackPool { mapping: Mmap, @@ -714,13 +718,14 @@ struct StackPool { free_list: Mutex>, } +#[cfg(feature = "async")] impl StackPool { fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result { let page_size = region::page::size(); // On Windows, don't allocate any fiber stacks as native fibers are always used // Add a page to the stack size for the guard page when using fiber stacks - let stack_size = if cfg!(windows) || stack_size == 0 { + let stack_size = if stack_size == 0 { 0 } else { round_up_to_pow2(stack_size, page_size) @@ -758,8 +763,10 @@ impl StackPool { }) } - fn allocate(&self, strategy: PoolingAllocationStrategy) -> Result<*mut u8, FiberStackError> { - // Stacks are not supported if nothing was allocated + fn allocate( + &self, + strategy: PoolingAllocationStrategy, + ) -> Result { if self.stack_size == 0 { return Err(FiberStackError::NotSupported); } @@ -787,32 +794,28 @@ impl StackPool { commit_stack_pages(bottom_of_stack, size_without_guard) .map_err(FiberStackError::Resource)?; - // The top of the stack should be returned - Ok(bottom_of_stack.add(size_without_guard)) + wasmtime_fiber::FiberStack::from_top_ptr(bottom_of_stack.add(size_without_guard)) + .map_err(|e| FiberStackError::Resource(e.into())) } } - fn deallocate(&self, top_of_stack: *mut u8) { - debug_assert!(!top_of_stack.is_null()); + fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) { + // Remove the guard page from the size + let stack_size = self.stack_size - self.page_size; + let bottom_of_stack = unsafe { stack.top().unwrap().sub(stack_size) }; - unsafe { - // Remove the guard page from the size - let stack_size = self.stack_size - self.page_size; - let bottom_of_stack = top_of_stack.sub(stack_size); - - let base = self.mapping.as_ptr() as usize; - let start_of_stack = (bottom_of_stack as usize) - self.page_size; + let base = self.mapping.as_ptr() as usize; + let start_of_stack = (bottom_of_stack as usize) - self.page_size; - debug_assert!(start_of_stack >= base && start_of_stack < (base + self.mapping.len())); - debug_assert!((start_of_stack - base) % self.stack_size == 0); + debug_assert!(start_of_stack >= base && start_of_stack < (base + self.mapping.len())); + debug_assert!((start_of_stack - base) % self.stack_size == 0); - let index = (start_of_stack - base) / self.stack_size; - debug_assert!(index < self.max_instances); + let index = (start_of_stack - base) / self.stack_size; + debug_assert!(index < self.max_instances); - decommit_stack_pages(bottom_of_stack, stack_size).unwrap(); + decommit_stack_pages(bottom_of_stack, stack_size).unwrap(); - self.free_list.lock().unwrap().push(index); - } + self.free_list.lock().unwrap().push(index); } } @@ -828,7 +831,10 @@ pub struct PoolingInstanceAllocator { instance_limits: InstanceLimits, // This is manually drop so that the pools unmap their memory before the page fault handler drops. instances: mem::ManuallyDrop, + #[cfg(all(feature = "async", unix))] stacks: StackPool, + #[cfg(all(feature = "async", windows))] + stack_size: usize, #[cfg(all(feature = "uffd", target_os = "linux"))] _fault_handler: imp::PageFaultHandler, } @@ -839,7 +845,7 @@ impl PoolingInstanceAllocator { strategy: PoolingAllocationStrategy, module_limits: ModuleLimits, mut instance_limits: InstanceLimits, - stack_size: usize, + #[cfg(feature = "async")] stack_size: usize, ) -> Result { if instance_limits.count == 0 { bail!("the instance count limit cannot be zero"); @@ -857,7 +863,6 @@ impl PoolingInstanceAllocator { min(instance_limits.memory_reservation_size, 0x200000000); let instances = InstancePool::new(&module_limits, &instance_limits)?; - let stacks = StackPool::new(&instance_limits, stack_size)?; #[cfg(all(feature = "uffd", target_os = "linux"))] let _fault_handler = imp::PageFaultHandler::new(&instances)?; @@ -867,7 +872,10 @@ impl PoolingInstanceAllocator { module_limits, instance_limits, instances: mem::ManuallyDrop::new(instances), - stacks, + #[cfg(all(feature = "async", unix))] + stacks: StackPool::new(&instance_limits, stack_size)?, + #[cfg(all(feature = "async", windows))] + stack_size, #[cfg(all(feature = "uffd", target_os = "linux"))] _fault_handler, }) @@ -956,13 +964,31 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator { self.instances.deallocate(handle); } - fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { + #[cfg(all(feature = "async", unix))] + fn allocate_fiber_stack(&self) -> Result { self.stacks.allocate(self.strategy) } - unsafe fn deallocate_fiber_stack(&self, stack: *mut u8) { + #[cfg(all(feature = "async", unix))] + unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack) { self.stacks.deallocate(stack); } + + #[cfg(all(feature = "async", windows))] + fn allocate_fiber_stack(&self) -> Result { + if self.stack_size == 0 { + return Err(FiberStackError::NotSupported); + } + + // On windows, we don't use a stack pool as we use the native fiber implementation + wasmtime_fiber::FiberStack::new(self.stack_size) + .map_err(|e| FiberStackError::Resource(e.into())) + } + + #[cfg(all(feature = "async", windows))] + unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack) { + // A no-op as we don't own the fiber stack on Windows + } } #[cfg(test)] @@ -1470,7 +1496,7 @@ mod test { Ok(()) } - #[cfg(all(unix, target_pointer_width = "64"))] + #[cfg(all(unix, target_pointer_width = "64", feature = "async"))] #[test] fn test_stack_pool() -> Result<()> { let pool = StackPool::new( @@ -1497,7 +1523,10 @@ mod test { let stack = pool .allocate(PoolingAllocationStrategy::NextAvailable) .expect("allocation should succeed"); - assert_eq!(((stack as usize - base) / pool.stack_size) - 1, i); + assert_eq!( + ((stack.top().unwrap() as usize - base) / pool.stack_size) - 1, + i + ); stacks.push(stack); } @@ -1512,7 +1541,7 @@ mod test { }; for stack in stacks { - pool.deallocate(stack); + pool.deallocate(&stack); } assert_eq!( @@ -1611,13 +1640,13 @@ mod test { for _ in 0..10 { let stack = allocator.allocate_fiber_stack()?; - // The stack pointer is at the top, so decerement it first - let addr = stack.sub(1); + // The stack pointer is at the top, so decrement it first + let addr = stack.top().unwrap().sub(1); assert_eq!(*addr, 0); *addr = 1; - allocator.deallocate_fiber_stack(stack); + allocator.deallocate_fiber_stack(&stack); } } diff --git a/crates/runtime/src/instance/allocator/pooling/linux.rs b/crates/runtime/src/instance/allocator/pooling/linux.rs index 324200efe42b..db8e6ff9bd24 100644 --- a/crates/runtime/src/instance/allocator/pooling/linux.rs +++ b/crates/runtime/src/instance/allocator/pooling/linux.rs @@ -48,11 +48,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } +#[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE Ok(()) } +#[cfg(feature = "async")] pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } diff --git a/crates/runtime/src/instance/allocator/pooling/uffd.rs b/crates/runtime/src/instance/allocator/pooling/uffd.rs index ebe5effbc348..190a333ba156 100644 --- a/crates/runtime/src/instance/allocator/pooling/uffd.rs +++ b/crates/runtime/src/instance/allocator/pooling/uffd.rs @@ -79,11 +79,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len) } +#[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE Ok(()) } +#[cfg(feature = "async")] pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len) } diff --git a/crates/runtime/src/instance/allocator/pooling/unix.rs b/crates/runtime/src/instance/allocator/pooling/unix.rs index d172f411ebbf..6a18b6b9a017 100644 --- a/crates/runtime/src/instance/allocator/pooling/unix.rs +++ b/crates/runtime/src/instance/allocator/pooling/unix.rs @@ -58,11 +58,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } +#[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE Ok(()) } +#[cfg(feature = "async")] pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } diff --git a/crates/runtime/src/instance/allocator/pooling/windows.rs b/crates/runtime/src/instance/allocator/pooling/windows.rs index 286cd459fe29..c12db0fc638e 100644 --- a/crates/runtime/src/instance/allocator/pooling/windows.rs +++ b/crates/runtime/src/instance/allocator/pooling/windows.rs @@ -45,11 +45,3 @@ pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len) } - -pub fn commit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { - commit(addr, len) -} - -pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { - decommit(addr, len) -} diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index 1e6cfc8cd2e6..20f28ffd9761 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -38,7 +38,7 @@ pub use crate::export::*; pub use crate::externref::*; pub use crate::imports::Imports; pub use crate::instance::{ - FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits, + InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits, InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator, PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance, }; diff --git a/crates/runtime/src/traphandlers/unix.rs b/crates/runtime/src/traphandlers/unix.rs index a97976f9caa2..e3b12f7a15d8 100644 --- a/crates/runtime/src/traphandlers/unix.rs +++ b/crates/runtime/src/traphandlers/unix.rs @@ -86,8 +86,7 @@ unsafe extern "C" fn trap_handler( // handling, and reset our trap handling flag. Then we figure // out what to do based on the result of the trap handling. let pc = get_pc(context); - let jmp_buf = - info.jmp_buf_if_trap(pc, |handler| handler(signum, siginfo, context)); + let jmp_buf = info.jmp_buf_if_trap(pc, |handler| handler(signum, siginfo, context)); // Figure out what to do based on the result of this handling of // the trap. Note that our sentinel value of 1 means that the diff --git a/crates/wasmtime/Cargo.toml b/crates/wasmtime/Cargo.toml index c99f37001e3a..d30718da6489 100644 --- a/crates/wasmtime/Cargo.toml +++ b/crates/wasmtime/Cargo.toml @@ -72,7 +72,7 @@ experimental_x64 = ["wasmtime-jit/experimental_x64"] # Enables support for "async stores" as well as defining host functions as # `async fn` and calling functions asynchronously. -async = ["wasmtime-fiber"] +async = ["wasmtime-fiber", "wasmtime-runtime/async"] # Enables userfaultfd support in the runtime's pooling allocator when building on Linux uffd = ["wasmtime-runtime/uffd"] diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index da85f832b8cd..af5b48d797cd 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1332,25 +1332,20 @@ impl Config { match self.allocation_strategy { InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( self.mem_creator.clone(), + #[cfg(feature = "async")] + self.async_stack_size, ))), InstanceAllocationStrategy::Pooling { strategy, module_limits, instance_limits, - } => { + } => Ok(Box::new(PoolingInstanceAllocator::new( + strategy.into(), + module_limits.into(), + instance_limits.into(), #[cfg(feature = "async")] - let stack_size = self.async_stack_size; - - #[cfg(not(feature = "async"))] - let stack_size = 0; - - Ok(Box::new(PoolingInstanceAllocator::new( - strategy.into(), - module_limits.into(), - instance_limits.into(), - stack_size, - )?)) - } + self.async_stack_size, + )?)), } } } diff --git a/crates/wasmtime/src/func.rs b/crates/wasmtime/src/func.rs index f99548fee57e..af33ed2fe313 100644 --- a/crates/wasmtime/src/func.rs +++ b/crates/wasmtime/src/func.rs @@ -106,7 +106,7 @@ impl HostFunc { impl Drop for HostFunc { fn drop(&mut self) { // Host functions are always allocated with the default (on-demand) allocator - unsafe { OnDemandInstanceAllocator::new(None).deallocate(&self.instance) } + unsafe { OnDemandInstanceAllocator::default().deallocate(&self.instance) } } } diff --git a/crates/wasmtime/src/store.rs b/crates/wasmtime/src/store.rs index e90b394d26c9..1bb1467b5e22 100644 --- a/crates/wasmtime/src/store.rs +++ b/crates/wasmtime/src/store.rs @@ -219,7 +219,7 @@ impl Store { pub fn get(&self) -> Option<&T> { let values = self.inner.context_values.borrow(); - // Safety: a context value cannot be removed once added and therefore the addres is + // Safety: a context value cannot be removed once added and therefore the address is // stable for the life of the store values .get(&TypeId::of::()) @@ -740,9 +740,15 @@ impl Store { debug_assert!(self.async_support()); debug_assert!(config.async_stack_size > 0); - type SuspendType = wasmtime_fiber::Suspend, (), Result<(), Trap>>; + let stack = self + .inner + .engine + .allocator() + .allocate_fiber_stack() + .map_err(|e| Trap::from(anyhow::Error::from(e)))?; + let mut slot = None; - let func = |keep_going, suspend: &SuspendType| { + let fiber = wasmtime_fiber::Fiber::new(stack, |keep_going, suspend| { // First check and see if we were interrupted/dropped, and only // continue if we haven't been. keep_going?; @@ -760,46 +766,18 @@ impl Store { slot = Some(func()); Ok(()) - }; - - let (fiber, stack) = match self.inner.engine.allocator().allocate_fiber_stack() { - Ok(stack) => { - // Use the returned stack and deallocate it when finished - ( - unsafe { - wasmtime_fiber::Fiber::new_with_stack(stack, func) - .map_err(|e| Trap::from(anyhow::Error::from(e)))? - }, - stack, - ) - } - Err(wasmtime_runtime::FiberStackError::NotSupported) => { - // The allocator doesn't support custom fiber stacks for the current platform - // Request that the fiber itself allocate the stack - ( - wasmtime_fiber::Fiber::new(config.async_stack_size, func) - .map_err(|e| Trap::from(anyhow::Error::from(e)))?, - std::ptr::null_mut(), - ) - } - Err(e) => return Err(Trap::from(anyhow::Error::from(e))), - }; + }); // Once we have the fiber representing our synchronous computation, we // wrap that in a custom future implementation which does the // translation from the future protocol to our fiber API. - FiberFuture { - fiber, - store: self, - stack, - } - .await?; + FiberFuture { fiber, store: self }.await?; + return Ok(slot.unwrap()); struct FiberFuture<'a> { fiber: wasmtime_fiber::Fiber<'a, Result<(), Trap>, (), Result<(), Trap>>, store: &'a Store, - stack: *mut u8, } impl Future for FiberFuture<'_> { @@ -807,7 +785,7 @@ impl Store { fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { // We need to carry over this `cx` into our fiber's runtime - // for when it trys to poll sub-futures that are created. Doing + // for when it tries to poll sub-futures that are created. Doing // this must be done unsafely, however, since `cx` is only alive // for this one singular function call. Here we do a `transmute` // to extend the lifetime of `Context` so it can be stored in @@ -864,13 +842,12 @@ impl Store { // callers that they shouldn't be doing that. debug_assert!(result.is_ok()); } - if !self.stack.is_null() { - unsafe { - self.store - .engine() - .allocator() - .deallocate_fiber_stack(self.stack) - }; + + unsafe { + self.store + .engine() + .allocator() + .deallocate_fiber_stack(self.fiber.stack()); } } } @@ -999,7 +976,7 @@ impl fmt::Debug for Store { impl Drop for StoreInner { fn drop(&mut self) { let allocator = self.engine.allocator(); - let ondemand = OnDemandInstanceAllocator::new(self.engine.config().mem_creator.clone()); + let ondemand = OnDemandInstanceAllocator::default(); for instance in self.instances.borrow().iter() { unsafe { if instance.ondemand { diff --git a/crates/wasmtime/src/trampoline.rs b/crates/wasmtime/src/trampoline.rs index 0e417e17b282..49b2b32e1456 100644 --- a/crates/wasmtime/src/trampoline.rs +++ b/crates/wasmtime/src/trampoline.rs @@ -62,22 +62,27 @@ fn create_handle( imports.functions = func_imports; unsafe { + let config = store.engine().config(); // Use the on-demand allocator when creating handles associated with host objects // The configured instance allocator should only be used when creating module instances // as we don't want host objects to count towards instance limits. - let handle = OnDemandInstanceAllocator::new(store.engine().config().mem_creator.clone()) - .allocate(InstanceAllocationRequest { - module: Arc::new(module), - finished_functions: &finished_functions, - imports, - lookup_shared_signature: &|_| shared_signature_id.unwrap(), - host_state, - interrupts: store.interrupts(), - externref_activations_table: store.externref_activations_table() - as *const VMExternRefActivationsTable - as *mut _, - stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _, - })?; + let handle = OnDemandInstanceAllocator::new( + config.mem_creator.clone(), + #[cfg(feature = "async")] + config.async_stack_size, + ) + .allocate(InstanceAllocationRequest { + module: Arc::new(module), + finished_functions: &finished_functions, + imports, + lookup_shared_signature: &|_| shared_signature_id.unwrap(), + host_state, + interrupts: store.interrupts(), + externref_activations_table: store.externref_activations_table() + as *const VMExternRefActivationsTable + as *mut _, + stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _, + })?; Ok(store.add_instance(handle, true)) } diff --git a/crates/wasmtime/src/trampoline/func.rs b/crates/wasmtime/src/trampoline/func.rs index c0e33dd2b062..7b5eb70e6459 100644 --- a/crates/wasmtime/src/trampoline/func.rs +++ b/crates/wasmtime/src/trampoline/func.rs @@ -276,7 +276,7 @@ pub fn create_function( unsafe { Ok(( - OnDemandInstanceAllocator::new(None).allocate(InstanceAllocationRequest { + OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest { module: Arc::new(module), finished_functions: &finished_functions, imports: Imports::default(), @@ -308,7 +308,7 @@ pub unsafe fn create_raw_function( finished_functions.push(func); Ok( - OnDemandInstanceAllocator::new(None).allocate(InstanceAllocationRequest { + OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest { module: Arc::new(module), finished_functions: &finished_functions, imports: Imports::default(),