diff --git a/cranelift/wasm/src/code_translator.rs b/cranelift/wasm/src/code_translator.rs index 445c6ae64ea1..756d834888f5 100644 --- a/cranelift/wasm/src/code_translator.rs +++ b/cranelift/wasm/src/code_translator.rs @@ -612,7 +612,7 @@ pub fn translate_operator( bitcast_arguments(args, &types, builder); let call = environ.translate_call_indirect( - builder.cursor(), + builder, TableIndex::from_u32(*table_index), table, TypeIndex::from_u32(*index), diff --git a/cranelift/wasm/src/environ/dummy.rs b/cranelift/wasm/src/environ/dummy.rs index 7b3af0c8969f..3583d74c67cb 100644 --- a/cranelift/wasm/src/environ/dummy.rs +++ b/cranelift/wasm/src/environ/dummy.rs @@ -404,7 +404,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ fn translate_call_indirect( &mut self, - mut pos: FuncCursor, + builder: &mut FunctionBuilder, _table_index: TableIndex, _table: ir::Table, _sig_index: TypeIndex, @@ -413,7 +413,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ call_args: &[ir::Value], ) -> WasmResult { // Pass the current function's vmctx parameter on to the callee. - let vmctx = pos + let vmctx = builder .func .special_param(ir::ArgumentPurpose::VMContext) .expect("Missing vmctx parameter"); @@ -423,22 +423,22 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ // TODO: Generate bounds checking code. let ptr = self.pointer_type(); let callee_offset = if ptr == I32 { - pos.ins().imul_imm(callee, 4) + builder.ins().imul_imm(callee, 4) } else { - let ext = pos.ins().uextend(I64, callee); - pos.ins().imul_imm(ext, 4) + let ext = builder.ins().uextend(I64, callee); + builder.ins().imul_imm(ext, 4) }; let mflags = ir::MemFlags::trusted(); - let func_ptr = pos.ins().load(ptr, mflags, callee_offset, 0); + let func_ptr = builder.ins().load(ptr, mflags, callee_offset, 0); // Build a value list for the indirect call instruction containing the callee, call_args, // and the vmctx parameter. let mut args = ir::ValueList::default(); - args.push(func_ptr, &mut pos.func.dfg.value_lists); - args.extend(call_args.iter().cloned(), &mut pos.func.dfg.value_lists); - args.push(vmctx, &mut pos.func.dfg.value_lists); + args.push(func_ptr, &mut builder.func.dfg.value_lists); + args.extend(call_args.iter().cloned(), &mut builder.func.dfg.value_lists); + args.push(vmctx, &mut builder.func.dfg.value_lists); - Ok(pos + Ok(builder .ins() .CallIndirect(ir::Opcode::CallIndirect, INVALID, sig_ref, args) .0) diff --git a/cranelift/wasm/src/environ/spec.rs b/cranelift/wasm/src/environ/spec.rs index 460f4ecf738e..2500b5b5b4ee 100644 --- a/cranelift/wasm/src/environ/spec.rs +++ b/cranelift/wasm/src/environ/spec.rs @@ -219,7 +219,7 @@ pub trait FuncEnvironment: TargetEnvironment { #[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))] fn translate_call_indirect( &mut self, - pos: FuncCursor, + builder: &mut FunctionBuilder, table_index: TableIndex, table: ir::Table, sig_index: TypeIndex, diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 9c1f4afa4310..fbd7008527c0 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -750,6 +750,56 @@ impl<'module_environment> FuncEnvironment<'module_environment> { pos.ins().uextend(I64, val) } } + + fn get_or_init_funcref_table_elem( + &mut self, + builder: &mut FunctionBuilder, + table_index: TableIndex, + table: ir::Table, + index: ir::Value, + ) -> ir::Value { + let pointer_type = self.pointer_type(); + + // To support lazy initialization of table + // contents, we check for a null entry here, and + // if null, we take a slow-path that invokes a + // libcall. + let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0); + let value = builder + .ins() + .load(pointer_type, ir::MemFlags::trusted(), table_entry_addr, 0); + // Mask off the "initialized bit". See REF_INIT_BIT in + // crates/runtime/src/table.rs for more details. + let value_masked = builder.ins().band_imm(value, !1); + + let null_block = builder.create_block(); + let continuation_block = builder.create_block(); + let result_param = builder.append_block_param(continuation_block, pointer_type); + builder.set_cold_block(null_block); + + builder.ins().brz(value, null_block, &[]); + builder.ins().jump(continuation_block, &[value_masked]); + builder.seal_block(null_block); + + builder.switch_to_block(null_block); + let table_index = builder.ins().iconst(I32, table_index.index() as i64); + let builtin_idx = BuiltinFunctionIndex::table_get_lazy_init_funcref(); + let builtin_sig = self + .builtin_function_signatures + .table_get_lazy_init_funcref(builder.func); + let (vmctx, builtin_addr) = + self.translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx); + let call_inst = + builder + .ins() + .call_indirect(builtin_sig, builtin_addr, &[vmctx, table_index, index]); + let returned_entry = builder.func.dfg.inst_results(call_inst)[0]; + builder.ins().jump(continuation_block, &[returned_entry]); + builder.seal_block(continuation_block); + + builder.switch_to_block(continuation_block); + result_param + } } impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> { @@ -886,13 +936,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m match plan.table.wasm_ty { WasmType::FuncRef => match plan.style { TableStyle::CallerChecksSignature => { - let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0); - Ok(builder.ins().load( - pointer_type, - ir::MemFlags::trusted(), - table_entry_addr, - 0, - )) + Ok(self.get_or_init_funcref_table_elem(builder, table_index, table, index)) } }, WasmType::ExternRef => { @@ -1033,9 +1077,16 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m WasmType::FuncRef => match plan.style { TableStyle::CallerChecksSignature => { let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0); - builder - .ins() - .store(ir::MemFlags::trusted(), value, table_entry_addr, 0); + // Set the "initialized bit". See doc-comment on + // `REF_INIT_BIT` in crates/runtime/src/table.rs + // for details. + let value_with_init_bit = builder.ins().bor_imm(value, 1); + builder.ins().store( + ir::MemFlags::trusted(), + value_with_init_bit, + table_entry_addr, + 0, + ); Ok(()) } }, @@ -1253,10 +1304,16 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m mut pos: cranelift_codegen::cursor::FuncCursor<'_>, func_index: FuncIndex, ) -> WasmResult { - let vmctx = self.vmctx(&mut pos.func); - let vmctx = pos.ins().global_value(self.pointer_type(), vmctx); - let offset = self.offsets.vmctx_anyfunc(func_index); - Ok(pos.ins().iadd_imm(vmctx, i64::from(offset))) + let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64); + let builtin_index = BuiltinFunctionIndex::ref_func(); + let builtin_sig = self.builtin_function_signatures.ref_func(&mut pos.func); + let (vmctx, builtin_addr) = + self.translate_load_builtin_function_address(&mut pos, builtin_index); + + let call_inst = pos + .ins() + .call_indirect(builtin_sig, builtin_addr, &[vmctx, func_index]); + Ok(pos.func.dfg.first_result(call_inst)) } fn translate_custom_global_get( @@ -1459,7 +1516,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m fn translate_call_indirect( &mut self, - mut pos: FuncCursor<'_>, + builder: &mut FunctionBuilder, table_index: TableIndex, table: ir::Table, ty_index: TypeIndex, @@ -1469,21 +1526,17 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ) -> WasmResult { let pointer_type = self.pointer_type(); - let table_entry_addr = pos.ins().table_addr(pointer_type, table, callee, 0); - - // Dereference the table entry to get the pointer to the - // `VMCallerCheckedAnyfunc`. - let anyfunc_ptr = - pos.ins() - .load(pointer_type, ir::MemFlags::trusted(), table_entry_addr, 0); + // Get the anyfunc pointer (the funcref) from the table. + let anyfunc_ptr = self.get_or_init_funcref_table_elem(builder, table_index, table, callee); // Check for whether the table element is null, and trap if so. - pos.ins() + builder + .ins() .trapz(anyfunc_ptr, ir::TrapCode::IndirectCallToNull); // Dereference anyfunc pointer to get the function address. let mem_flags = ir::MemFlags::trusted(); - let func_addr = pos.ins().load( + let func_addr = builder.ins().load( pointer_type, mem_flags, anyfunc_ptr, @@ -1495,19 +1548,19 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m TableStyle::CallerChecksSignature => { let sig_id_size = self.offsets.size_of_vmshared_signature_index(); let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap(); - let vmctx = self.vmctx(pos.func); - let base = pos.ins().global_value(pointer_type, vmctx); + let vmctx = self.vmctx(builder.func); + let base = builder.ins().global_value(pointer_type, vmctx); let offset = i32::try_from(self.offsets.vmctx_vmshared_signature_id(ty_index)).unwrap(); // Load the caller ID. let mut mem_flags = ir::MemFlags::trusted(); mem_flags.set_readonly(); - let caller_sig_id = pos.ins().load(sig_id_type, mem_flags, base, offset); + let caller_sig_id = builder.ins().load(sig_id_type, mem_flags, base, offset); // Load the callee ID. let mem_flags = ir::MemFlags::trusted(); - let callee_sig_id = pos.ins().load( + let callee_sig_id = builder.ins().load( sig_id_type, mem_flags, anyfunc_ptr, @@ -1515,16 +1568,21 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ); // Check that they match. - let cmp = pos.ins().icmp(IntCC::Equal, callee_sig_id, caller_sig_id); - pos.ins().trapz(cmp, ir::TrapCode::BadSignature); + let cmp = builder + .ins() + .icmp(IntCC::Equal, callee_sig_id, caller_sig_id); + builder.ins().trapz(cmp, ir::TrapCode::BadSignature); } } let mut real_call_args = Vec::with_capacity(call_args.len() + 2); - let caller_vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap(); + let caller_vmctx = builder + .func + .special_param(ArgumentPurpose::VMContext) + .unwrap(); // First append the callee vmctx address. - let vmctx = pos.ins().load( + let vmctx = builder.ins().load( pointer_type, mem_flags, anyfunc_ptr, @@ -1536,7 +1594,9 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m // Then append the regular call arguments. real_call_args.extend_from_slice(call_args); - Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args)) + Ok(builder + .ins() + .call_indirect(sig_ref, func_addr, &real_call_args)) } fn translate_call( diff --git a/crates/environ/src/builtin.rs b/crates/environ/src/builtin.rs index e660ffc2d7cd..cb3e795826a2 100644 --- a/crates/environ/src/builtin.rs +++ b/crates/environ/src/builtin.rs @@ -18,8 +18,12 @@ macro_rules! foreach_builtin_function { memory_fill(vmctx, i32, i64, i32, i64) -> (); /// Returns an index for wasm's `memory.init` instruction. memory_init(vmctx, i32, i32, i64, i32, i32) -> (); + /// Returns a value for wasm's `ref.func` instruction. + ref_func(vmctx, i32) -> (pointer); /// Returns an index for wasm's `data.drop` instruction. data_drop(vmctx, i32) -> (); + /// Returns a table entry after lazily initializing it. + table_get_lazy_init_funcref(vmctx, i32, i32) -> (pointer); /// Returns an index for Wasm's `table.grow` instruction for `funcref`s. table_grow_funcref(vmctx, i32, i32, pointer) -> (i32); /// Returns an index for Wasm's `table.grow` instruction for `externref`s. diff --git a/crates/jit/src/instantiate.rs b/crates/jit/src/instantiate.rs index 1256198aa206..b79a0eff9455 100644 --- a/crates/jit/src/instantiate.rs +++ b/crates/jit/src/instantiate.rs @@ -245,7 +245,7 @@ pub struct CompiledModule { address_map_data: Range, trap_data: Range, module: Arc, - funcs: PrimaryMap, + funcs: Arc>, trampolines: Vec, meta: Metadata, code: Range, @@ -304,7 +304,7 @@ impl CompiledModule { let mut ret = Self { module: Arc::new(info.module), - funcs: info.funcs, + funcs: Arc::new(info.funcs), trampolines: info.trampolines, wasm_data: subslice_range(section(ELF_WASM_DATA)?, code.mmap), address_map_data: code @@ -387,7 +387,7 @@ impl CompiledModule { } /// Returns the `FunctionInfo` map for all defined functions. - pub fn functions(&self) -> &PrimaryMap { + pub fn functions(&self) -> &Arc> { &self.funcs } diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index 27012d4bd027..3c90018651fa 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -57,6 +57,9 @@ pub(crate) struct Instance { /// The unique ID for the `Module` this `Instance` was instantiated from. unique_id: Option, + /// The instantiation info needed for deferred initialization. + info: Arc, + /// Offsets in the `vmctx` region, precomputed from the `module` above. offsets: VMOffsets, @@ -107,11 +110,13 @@ impl Instance { wasm_data: &'static [u8], memories: PrimaryMap, tables: PrimaryMap, + info: Arc, host_state: Box, ) -> Instance { Instance { module: module.clone(), unique_id, + info, offsets: VMOffsets::new(HostPtr, &module), memories, tables, @@ -276,7 +281,7 @@ impl Instance { } /// Lookup an export with the given export declaration. - pub fn lookup_by_declaration(&self, export: &EntityIndex) -> Export { + pub fn lookup_by_declaration(&mut self, export: &EntityIndex) -> Export { match export { EntityIndex::Function(index) => { let anyfunc = self.get_caller_checked_anyfunc(*index).unwrap(); @@ -462,6 +467,33 @@ impl Instance { Layout::from_size_align(size, align).unwrap() } + /// Construct a new VMCallerCheckedAnyfunc for the given function + /// (imported or defined in this module) and store into the given + /// location. Used during lazy initialization the first time the + /// VMCallerCheckedAnyfunc in the VMContext is referenced. + fn construct_anyfunc(&mut self, index: FuncIndex, into: *mut VMCallerCheckedAnyfunc) { + let sig = self.module.functions[index]; + let type_index = self.info.shared_signatures.lookup(sig); + + let (func_ptr, vmctx) = if let Some(def_index) = self.module.defined_func_index(index) { + ( + (self.info.image_base + self.info.functions[def_index].start as usize) as *mut _, + self.vmctx_ptr(), + ) + } else { + let import = self.imported_function(index); + (import.body.as_ptr(), import.vmctx) + }; + + // Safety: we have a `&mut self`, so we have exclusive access + // to this Instance. + unsafe { + (*into).vmctx = vmctx; + (*into).type_index = type_index; + (*into).func_ptr = func_ptr; + } + } + /// Get a `&VMCallerCheckedAnyfunc` for the given `FuncIndex`. /// /// Returns `None` if the index is the reserved index value. @@ -469,14 +501,25 @@ impl Instance { /// The returned reference is a stable reference that won't be moved and can /// be passed into JIT code. pub(crate) fn get_caller_checked_anyfunc( - &self, + &mut self, index: FuncIndex, - ) -> Option<&VMCallerCheckedAnyfunc> { + ) -> Option<*mut VMCallerCheckedAnyfunc> { if index == FuncIndex::reserved_value() { return None; } - unsafe { Some(&*self.vmctx_plus_offset(self.offsets.vmctx_anyfunc(index))) } + // Safety: we have a `&mut self`, so we have exclusive access + // to this Instance. + unsafe { + let anyfunc: *mut VMCallerCheckedAnyfunc = + self.vmctx_plus_offset::(self.offsets.vmctx_anyfunc(index)); + // Check the func_ptr field to see if we need to initialize. + if (*anyfunc).func_ptr.is_null() { + self.construct_anyfunc(index, anyfunc); + } + + Some(anyfunc) + } } unsafe fn anyfunc_base(&self) -> *mut VMCallerCheckedAnyfunc { @@ -542,6 +585,7 @@ impl Instance { ptr::null_mut() } else { debug_assert!(idx.as_u32() < self.offsets.num_defined_functions); + self.get_caller_checked_anyfunc(*idx); // Force lazy init base.add(usize::try_from(idx.as_u32()).unwrap()) } }), @@ -703,11 +747,21 @@ impl Instance { // dropping a non-passive segment is a no-op (not a trap). } + /// Get a table and the owning instance regardless of whether it + /// is locally-defined or an imported, foreign table. + pub(crate) fn get_table_and_instance( + &mut self, + table_index: TableIndex, + ) -> (*mut Table, &mut Instance) { + let (idx, instance) = self.get_defined_table_index_and_instance(table_index); + let table = ptr::addr_of_mut!(instance.tables[idx]); + (table, instance) + } + /// Get a table by index regardless of whether it is locally-defined or an /// imported, foreign table. pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table { - let (idx, instance) = self.get_defined_table_index_and_instance(table_index); - ptr::addr_of_mut!(instance.tables[idx]) + self.get_table_and_instance(table_index).0 } /// Get a locally-defined table. @@ -732,6 +786,12 @@ impl Instance { } } + /// Does a given table have lazy-init data (hence can skip initialization)? + pub(crate) fn table_has_lazy_init(&mut self, index: TableIndex) -> bool { + let table = self.get_table(index); + unsafe { (*table).lazy_data().is_some() } + } + fn drop_globals(&mut self) { for (idx, global) in self.module.globals.iter() { let idx = match self.module.defined_global_index(idx) { @@ -787,6 +847,13 @@ impl InstanceHandle { } } + #[inline] + pub(crate) unsafe fn from_instance(instance: &Instance) -> Self { + Self { + instance: instance as *const Instance as *mut Instance, + } + } + /// Return a reference to the vmctx used by compiled wasm code. pub fn vmctx(&self) -> &VMContext { self.instance().vmctx() @@ -804,8 +871,8 @@ impl InstanceHandle { } /// Lookup an export with the given export declaration. - pub fn lookup_by_declaration(&self, export: &EntityIndex) -> Export { - self.instance().lookup_by_declaration(export) + pub fn lookup_by_declaration(&mut self, export: &EntityIndex) -> Export { + self.instance_mut().lookup_by_declaration(export) } /// Return an iterator over the exports of this instance. diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 4b9b61397a34..7cdac1e887b6 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -1,7 +1,7 @@ use crate::imports::Imports; use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator}; use crate::memory::{DefaultMemoryCreator, Memory}; -use crate::table::Table; +use crate::table::{Table, TablesLazyData}; use crate::traphandlers::Trap; use crate::vmcontext::{ VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMGlobalDefinition, VMSharedSignatureIndex, @@ -12,7 +12,7 @@ use anyhow::Result; use std::alloc; use std::any::Any; use std::convert::TryFrom; -use std::ptr::{self, NonNull}; +use std::ptr; use std::slice; use std::sync::Arc; use thiserror::Error; @@ -30,30 +30,43 @@ pub use self::pooling::{ InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator, }; +/// Information needed for the instance allocation request and +/// afterward as well (for lazy initialization). Will be held alive by +/// the instance. +#[derive(Default)] +pub struct InstanceAllocationInfo { + /// The base address of where JIT functions are located. + pub image_base: usize, + + /// Descriptors about each compiled function, such as the offset from + /// `image_base`. + pub functions: Arc>, + + /// Translation from `SignatureIndex` to `VMSharedSignatureIndex` + pub shared_signatures: SharedSignatures, +} + /// Represents a request for a new runtime instance. pub struct InstanceAllocationRequest<'a> { /// The module being instantiated. pub module: Arc, + /// The info needed for both the allocation request and deferred + /// initialization. + pub info: Arc, + /// The unique ID of the module being allocated within this engine. pub unique_id: Option, - /// The base address of where JIT functions are located. - pub image_base: usize, - /// If using MemFD-based memories, the backing MemFDs. pub memfds: Option>, - /// Descriptors about each compiled function, such as the offset from - /// `image_base`. - pub functions: &'a PrimaryMap, + /// Table funcref lazy-init data, if any. + pub table_lazy_data: Arc, /// The imports to use for the instantiation. pub imports: Imports<'a>, - /// Translation from `SignatureIndex` to `VMSharedSignatureIndex` - pub shared_signatures: SharedSignatures<'a>, - /// The host state to associate with the instance. pub host_state: Box, @@ -218,17 +231,18 @@ pub unsafe trait InstanceAllocator: Send + Sync { unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack); } -pub enum SharedSignatures<'a> { +/// Shared signatures. +pub enum SharedSignatures { /// Used for instantiating user-defined modules - Table(&'a PrimaryMap), + Table(PrimaryMap), /// Used for instance creation that has only a single function Always(VMSharedSignatureIndex), /// Used for instance creation that has no functions None, } -impl SharedSignatures<'_> { - fn lookup(&self, index: SignatureIndex) -> VMSharedSignatureIndex { +impl SharedSignatures { + pub(crate) fn lookup(&self, index: SignatureIndex) -> VMSharedSignatureIndex { match self { SharedSignatures::Table(table) => table[index], SharedSignatures::Always(index) => *index, @@ -237,14 +251,20 @@ impl SharedSignatures<'_> { } } -impl<'a> From for SharedSignatures<'a> { - fn from(val: VMSharedSignatureIndex) -> SharedSignatures<'a> { +impl std::default::Default for SharedSignatures { + fn default() -> Self { + SharedSignatures::None + } +} + +impl From for SharedSignatures { + fn from(val: VMSharedSignatureIndex) -> SharedSignatures { SharedSignatures::Always(val) } } -impl<'a> From> for SharedSignatures<'a> { - fn from(val: Option) -> SharedSignatures<'a> { +impl From> for SharedSignatures { + fn from(val: Option) -> SharedSignatures { match val { Some(idx) => SharedSignatures::Always(idx), None => SharedSignatures::None, @@ -252,9 +272,9 @@ impl<'a> From> for SharedSignatures<'a> { } } -impl<'a> From<&'a PrimaryMap> for SharedSignatures<'a> { - fn from(val: &'a PrimaryMap) -> SharedSignatures<'a> { - SharedSignatures::Table(val) +impl From<&PrimaryMap> for SharedSignatures { + fn from(val: &PrimaryMap) -> SharedSignatures { + SharedSignatures::Table(val.clone()) } } @@ -309,6 +329,12 @@ fn check_table_init_bounds( fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> { for init in &module.table_initializers { + // Check whether the table has lazy data. If so, we can skip + // all initializers. + if instance.table_has_lazy_init(init.table_index) { + continue; + } + instance .table_init_segment( init.table_index, @@ -486,6 +512,13 @@ fn initialize_instance( Ok(()) } +/// Initialize the VMContext data associated with an Instance. +/// +/// Precondition: the VMContext memory must be zeroed. We omit writes +/// here to fields that should be initialized to zero. This allows the +/// caller to use an efficient means of zeroing memory (such as using +/// anonymous-mmap'd zero pages) if available, rather than falling +/// back onto a memset (or the manual equivalent) here. unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationRequest) { if let Some(store) = req.store.as_raw() { *instance.interrupts() = (*store).vminterrupts(); @@ -500,7 +533,7 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_signature_ids_begin()); for sig in module.types.values() { *ptr = match sig { - ModuleType::Function(sig) => req.shared_signatures.lookup(*sig), + ModuleType::Function(sig) => req.info.shared_signatures.lookup(*sig), _ => VMSharedSignatureIndex::new(u32::max_value()), }; ptr = ptr.add(1); @@ -536,32 +569,9 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR req.imports.globals.len(), ); - // Initialize the functions - let mut base = instance.anyfunc_base(); - for (index, sig) in instance.module.functions.iter() { - let type_index = req.shared_signatures.lookup(*sig); - - let (func_ptr, vmctx) = if let Some(def_index) = instance.module.defined_func_index(index) { - ( - NonNull::new((req.image_base + req.functions[def_index].start as usize) as *mut _) - .unwrap(), - instance.vmctx_ptr(), - ) - } else { - let import = instance.imported_function(index); - (import.body, import.vmctx) - }; - - ptr::write( - base, - VMCallerCheckedAnyfunc { - func_ptr, - type_index, - vmctx, - }, - ); - base = base.add(1); - } + // N.B.: no need to initialize the anyfuncs array; it is zeroed + // already (by the precondition for this function) and the lazy + // init will fill in entries as we take pointers to them. // Initialize the defined tables let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_tables_begin()); @@ -584,8 +594,8 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR initialize_vmcontext_globals(instance); } -unsafe fn initialize_vmcontext_globals(instance: &Instance) { - let module = &instance.module; +unsafe fn initialize_vmcontext_globals(instance: &mut Instance) { + let module = instance.module.clone(); let num_imports = module.num_imported_globals; for (index, global) in module.globals.iter().skip(num_imports) { let def_index = module.defined_global_index(index).unwrap(); @@ -651,17 +661,29 @@ impl OnDemandInstanceAllocator { fn create_tables( module: &Module, store: &mut StorePtr, + table_lazy_data: &Arc, ) -> Result, InstantiationError> { let num_imports = module.num_imported_tables; let mut tables: PrimaryMap = PrimaryMap::with_capacity(module.table_plans.len() - num_imports); - for table in &module.table_plans.values().as_slice()[num_imports..] { + for (table_index, table) in module.table_plans.iter().skip(num_imports) { + let defined_table_index = module + .defined_table_index(table_index) + .expect("Skipped imported tables"); tables.push( - Table::new_dynamic(table, unsafe { - store - .get() - .expect("if module has table plans, store is not empty") - }) + Table::new_dynamic( + table, + unsafe { + store + .get() + .expect("if module has table plans, store is not empty") + }, + table_lazy_data + .tables + .get(defined_table_index) + .cloned() + .unwrap_or(None), + ) .map_err(InstantiationError::Resource)?, ); } @@ -724,17 +746,18 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator { mut req: InstanceAllocationRequest, ) -> Result { let memories = self.create_memories(&req.module, &mut req.store, &req.memfds)?; - let tables = Self::create_tables(&req.module, &mut req.store)?; let host_state = std::mem::replace(&mut req.host_state, Box::new(())); - let mut handle = { + let (mut handle, vmctx_data) = { + let tables = Self::create_tables(&req.module, &mut req.store, &req.table_lazy_data)?; let instance = Instance::create_raw( &req.module, req.unique_id, &*req.wasm_data, memories, tables, + req.info.clone(), host_state, ); let layout = instance.alloc_layout(); @@ -742,12 +765,30 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator { if instance_ptr.is_null() { alloc::handle_alloc_error(layout); } - ptr::write(instance_ptr, instance); - InstanceHandle { + let handle = InstanceHandle { instance: instance_ptr, - } + }; + + ptr::write(instance_ptr, instance); + + let vmctx_data = { + let vmctx_ptr = (*instance_ptr).vmctx_ptr() as *mut u8; + let vmctx_len = (*instance_ptr).offsets.size_of_vmctx() as usize; + std::slice::from_raw_parts_mut(vmctx_ptr, vmctx_len) + }; + + (handle, vmctx_data) }; + // Zero the VMContext memory first; `initialize_vmcontext()` + // requires this. + vmctx_data.fill(0); + // Drop the &mut slice over the VMContext data before writing + // to it via `initialize_vmcontext()` below; we should not let + // its lifetime extend over the function call that initializes + // the VMContext. + drop(vmctx_data); + initialize_vmcontext(handle.instance_mut(), req); Ok(handle) diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index ae860588badd..f50db5217df2 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -11,6 +11,7 @@ use super::{ initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstantiationError, }; +use crate::table::TablesLazyData; use crate::MemFdSlot; use crate::{instance::Instance, Memory, Mmap, ModuleMemFds, Table}; use anyhow::{anyhow, bail, Context, Result}; @@ -44,7 +45,10 @@ cfg_if::cfg_if! { } } -use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages}; +use imp::{ + commit_instance_pages, commit_memory_pages, commit_table_pages, decommit_instance_pages, + decommit_memory_pages, decommit_table_pages, +}; #[cfg(all(feature = "async", unix))] use imp::{commit_stack_pages, decommit_stack_pages}; @@ -357,15 +361,19 @@ impl InstancePool { &*req.wasm_data, PrimaryMap::default(), PrimaryMap::default(), + req.info.clone(), host_state, ); + // Ensure the instance memory is present. + let instance = self.instance(index); + commit_instance_pages(instance as *mut Instance as *mut u8, self.instance_size) + .map_err(|e| InstantiationError::Resource(e.into()))?; + // Instances are uninitialized memory at first; we need to // write an empty but initialized `Instance` struct into the // chosen slot before we do anything else with it. (This is // paired with a `drop_in_place` in deallocate below.) - let instance = self.instance(index); - std::ptr::write(instance as _, instance_data); // set_instance_memories and _tables will need the store before we can completely @@ -386,8 +394,15 @@ impl InstancePool { instance, self.tables.get(index).map(|x| x as *mut usize), self.tables.max_elements, + &req.table_lazy_data, )?; + // Note that `initialize_vmcontext` requires the VMContext + // (which comes just after the Instance) to be zeroed. This is + // ensured by using {decommit,commit}_instance_pages, which + // will use either madvise(MADV_DONTNEED) (on Linux) or + // mmap-of-fresh-anonymous-memory to efficiently zero the + // instance state. initialize_vmcontext(instance, req); Ok(InstanceHandle { @@ -499,6 +514,14 @@ impl InstancePool { // touched again until we write a fresh Instance in-place with // std::ptr::write in allocate() above. + // We now decommit the Instance/VMContext storage: this does a + // fast clear back to zeroed memory by unmapping any page(s) + // that were mapped. On the next touch (when this slot is + // reused), freshly zeroed pages will be demand-paged in by + // the kernel. + decommit_instance_pages(instance as *mut Instance as *mut u8, self.instance_size) + .expect("Could not decommit instance; fatal error"); + self.index_allocator.lock().unwrap().free(SlotId(index)); } @@ -575,12 +598,16 @@ impl InstancePool { instance: &mut Instance, mut tables: impl Iterator, max_elements: u32, + table_lazy_data: &Arc, ) -> Result<(), InstantiationError> { let module = instance.module.as_ref(); debug_assert!(instance.tables.is_empty()); - for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() { + for (table_index, plan) in module.table_plans.iter().skip(module.num_imported_tables) { + let defined_table_index = module + .defined_table_index(table_index) + .expect("skipped imported tables"); let base = tables.next().unwrap(); commit_table_pages( @@ -591,8 +618,17 @@ impl InstancePool { let table = unsafe { std::slice::from_raw_parts_mut(base, max_elements as usize) }; instance.tables.push( - Table::new_static(plan, table, unsafe { &mut *instance.store() }) - .map_err(InstantiationError::Resource)?, + Table::new_static( + plan, + table, + unsafe { &mut *instance.store() }, + table_lazy_data + .tables + .get(defined_table_index) + .cloned() + .unwrap_or(None), + ) + .map_err(InstantiationError::Resource)?, ); } @@ -1130,6 +1166,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator { #[cfg(test)] mod test { use super::*; + use crate::InstanceAllocationInfo; use crate::{Imports, StorePtr, VMSharedSignatureIndex}; use wasmtime_environ::{ EntityRef, Global, GlobalInit, Memory, MemoryPlan, ModuleType, SignatureIndex, Table, @@ -1462,7 +1499,7 @@ mod test { let mut handles = Vec::new(); let module = Arc::new(Module::default()); - let functions = &PrimaryMap::new(); + let functions = Arc::new(PrimaryMap::new()); for _ in (0..3).rev() { handles.push( @@ -1470,19 +1507,22 @@ mod test { .allocate(InstanceAllocationRequest { module: module.clone(), unique_id: None, - image_base: 0, - functions, imports: Imports { functions: &[], tables: &[], memories: &[], globals: &[], }, - shared_signatures: VMSharedSignatureIndex::default().into(), host_state: Box::new(()), store: StorePtr::empty(), wasm_data: &[], memfds: None, + table_lazy_data: Default::default(), + info: Arc::new(InstanceAllocationInfo { + image_base: 0, + functions: functions.clone(), + shared_signatures: VMSharedSignatureIndex::default().into(), + }), }) .expect("allocation should succeed"), ); @@ -1496,19 +1536,22 @@ mod test { match instances.allocate(InstanceAllocationRequest { module: module.clone(), unique_id: None, - functions, - image_base: 0, imports: Imports { functions: &[], tables: &[], memories: &[], globals: &[], }, - shared_signatures: VMSharedSignatureIndex::default().into(), host_state: Box::new(()), store: StorePtr::empty(), wasm_data: &[], memfds: None, + table_lazy_data: Default::default(), + info: Arc::new(InstanceAllocationInfo { + functions: functions.clone(), + image_base: 0, + shared_signatures: VMSharedSignatureIndex::default().into(), + }), }) { Err(InstantiationError::Limit(3)) => {} _ => panic!("unexpected error"), diff --git a/crates/runtime/src/instance/allocator/pooling/linux.rs b/crates/runtime/src/instance/allocator/pooling/linux.rs index dec5c128cbba..657bf127b12a 100644 --- a/crates/runtime/src/instance/allocator/pooling/linux.rs +++ b/crates/runtime/src/instance/allocator/pooling/linux.rs @@ -44,6 +44,15 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } +pub fn commit_instance_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as instance pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_instance_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) +} + #[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE diff --git a/crates/runtime/src/instance/allocator/pooling/uffd.rs b/crates/runtime/src/instance/allocator/pooling/uffd.rs index be16ca2db1ec..58578fa42c0b 100644 --- a/crates/runtime/src/instance/allocator/pooling/uffd.rs +++ b/crates/runtime/src/instance/allocator/pooling/uffd.rs @@ -75,6 +75,15 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len) } +pub fn commit_instance_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as table pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_instance_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + #[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE @@ -435,7 +444,7 @@ impl Drop for PageFaultHandler { mod test { use super::*; use crate::{ - Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits, + Imports, InstanceAllocationInfo, InstanceAllocationRequest, InstanceLimits, ModuleLimits, PoolingAllocationStrategy, Store, StorePtr, VMSharedSignatureIndex, }; use std::sync::atomic::AtomicU64; @@ -572,7 +581,7 @@ mod test { let mut handles = Vec::new(); let module = Arc::new(module); - let functions = &PrimaryMap::new(); + let functions = Arc::new(PrimaryMap::new()); // Allocate the maximum number of instances with the maximum number of memories for _ in 0..instances.max_instances { @@ -582,18 +591,20 @@ mod test { module: module.clone(), memfds: None, unique_id: None, - image_base: 0, - functions, imports: Imports { functions: &[], tables: &[], memories: &[], globals: &[], }, - shared_signatures: VMSharedSignatureIndex::default().into(), host_state: Box::new(()), store: StorePtr::new(&mut mock_store), wasm_data: &[], + info: Arc::new(InstanceAllocationInfo { + image_base: 0, + functions: functions.clone(), + shared_signatures: VMSharedSignatureIndex::default().into(), + }), }) .expect("instance should allocate"), ); diff --git a/crates/runtime/src/instance/allocator/pooling/unix.rs b/crates/runtime/src/instance/allocator/pooling/unix.rs index 59e43ecd49b3..2965553218ba 100644 --- a/crates/runtime/src/instance/allocator/pooling/unix.rs +++ b/crates/runtime/src/instance/allocator/pooling/unix.rs @@ -51,6 +51,15 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len, false) } +pub fn commit_instance_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as table pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_instance_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) +} + #[cfg(feature = "async")] pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { // A no-op as stack pages remain READ|WRITE diff --git a/crates/runtime/src/instance/allocator/pooling/windows.rs b/crates/runtime/src/instance/allocator/pooling/windows.rs index c12db0fc638e..68375b5c13b0 100644 --- a/crates/runtime/src/instance/allocator/pooling/windows.rs +++ b/crates/runtime/src/instance/allocator/pooling/windows.rs @@ -45,3 +45,11 @@ pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { decommit(addr, len) } + +pub fn commit_instance_pages(addr: *mut u8, len: usize) -> Result<()> { + commit(addr, len) +} + +pub fn decommit_instance_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index 9f41e36156b9..b377c18896d0 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -43,8 +43,8 @@ pub use crate::export::*; pub use crate::externref::*; pub use crate::imports::Imports; pub use crate::instance::{ - InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstantiationError, LinkError, - OnDemandInstanceAllocator, StorePtr, + InstanceAllocationInfo, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, + InstantiationError, LinkError, OnDemandInstanceAllocator, SharedSignatures, StorePtr, }; #[cfg(feature = "pooling-allocator")] pub use crate::instance::{ @@ -53,7 +53,7 @@ pub use crate::instance::{ pub use crate::jit_int::GdbJitImageRegistration; pub use crate::memory::{DefaultMemoryCreator, Memory, RuntimeLinearMemory, RuntimeMemoryCreator}; pub use crate::mmap::Mmap; -pub use crate::table::{Table, TableElement}; +pub use crate::table::{Table, TableElement, TablesLazyData}; pub use crate::traphandlers::{ catch_traps, init_traps, raise_lib_trap, raise_user_trap, resume_panic, tls_eager_initialize, SignalHandler, TlsRestore, Trap, diff --git a/crates/runtime/src/libcalls.rs b/crates/runtime/src/libcalls.rs index cbb31f03e6d4..2b5d17c6ff1d 100644 --- a/crates/runtime/src/libcalls.rs +++ b/crates/runtime/src/libcalls.rs @@ -58,13 +58,16 @@ use crate::externref::VMExternRef; use crate::instance::Instance; -use crate::table::{Table, TableElementType}; +use crate::table::{Table, TableElementType, REF_MASK}; use crate::traphandlers::{raise_lib_trap, resume_panic, Trap}; use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext}; +use crate::InstanceHandle; use backtrace::Backtrace; use std::mem; use std::ptr::{self, NonNull}; -use wasmtime_environ::{DataIndex, ElemIndex, GlobalIndex, MemoryIndex, TableIndex, TrapCode}; +use wasmtime_environ::{ + DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TrapCode, +}; const TOINT_32: f32 = 1.0 / f32::EPSILON; const TOINT_64: f64 = 1.0 / f64::EPSILON; @@ -294,7 +297,14 @@ pub unsafe extern "C" fn table_copy( let instance = (*vmctx).instance_mut(); let dst_table = instance.get_table(dst_table_index); let src_table = instance.get_table(src_table_index); - Table::copy(dst_table, src_table, dst, src, len) + Table::copy( + dst_table, + src_table, + InstanceHandle::from_instance(instance), + dst, + src, + len, + ) }; if let Err(trap) = result { raise_lib_trap(trap); @@ -386,6 +396,15 @@ pub unsafe extern "C" fn memory_init( } } +/// Implementation of `ref.func`. +pub unsafe extern "C" fn ref_func(vmctx: *mut VMContext, func_index: u32) -> *mut u8 { + let instance = (*vmctx).instance_mut(); + let anyfunc = instance + .get_caller_checked_anyfunc(FuncIndex::from_u32(func_index)) + .unwrap(); + anyfunc as *mut _ +} + /// Implementation of `data.drop`. pub unsafe extern "C" fn data_drop(vmctx: *mut VMContext, data_index: u32) { let data_index = DataIndex::from_u32(data_index); @@ -393,6 +412,23 @@ pub unsafe extern "C" fn data_drop(vmctx: *mut VMContext, data_index: u32) { instance.data_drop(data_index) } +/// Returns a table entry after lazily initializing it. +pub unsafe extern "C" fn table_get_lazy_init_funcref( + vmctx: *mut VMContext, + table_index: u32, + index: u32, +) -> *mut u8 { + let instance = (*vmctx).instance_mut(); + let table_index = TableIndex::from_u32(table_index); + let (table, table_instance) = instance.get_table_and_instance(table_index); + let table_instance = InstanceHandle::from_instance(table_instance); + let elem = (*table) + .get_or_init(index, table_instance) + .expect("table access already bounds-checked"); + + (elem.into_raw() & REF_MASK) as *mut _ +} + /// Drop a `VMExternRef`. pub unsafe extern "C" fn drop_externref(externref: *mut u8) { let externref = externref as *mut crate::externref::VMExternData; diff --git a/crates/runtime/src/table.rs b/crates/runtime/src/table.rs index 53f5f2dd3779..ec79bacb7341 100644 --- a/crates/runtime/src/table.rs +++ b/crates/runtime/src/table.rs @@ -3,12 +3,15 @@ //! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories. use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition}; -use crate::{Store, Trap, VMExternRef}; +use crate::{InstanceHandle, Store, Trap, VMExternRef}; use anyhow::{bail, format_err, Error, Result}; use std::convert::{TryFrom, TryInto}; use std::ops::Range; use std::ptr; -use wasmtime_environ::{TablePlan, TrapCode, WasmType}; +use std::sync::Arc; +use wasmtime_environ::{ + DefinedTableIndex, FuncIndex, Module, PrimaryMap, TablePlan, TrapCode, WasmType, +}; /// An element going into or coming out of a table. /// @@ -32,6 +35,41 @@ pub enum TableElementType { unsafe impl Send for TableElement where VMExternRef: Send {} unsafe impl Sync for TableElement where VMExternRef: Sync {} +/// The mask we apply to all refs loaded from tables. +/// +/// This allows us to use the LSB as an "initialized flag" (see below) +/// to distinguish from an uninitialized element in a +/// lazily-initialized funcref table. +pub(crate) const REF_MASK: usize = !1; + +/// An "initialized bit" in a table. +/// +/// We lazily initialize tables of funcrefs, and this mechanism +/// requires us to interpret zero as "uninitialized", triggering a +/// slowpath on table read to possibly initialize the element. (This +/// has to be *zero* because that is the only value we can cheaply +/// initialize, e.g. with newly mmap'd memory.) +/// +/// However, the user can also store a null reference into a table. We +/// have to interpret this as "actually null", and not "lazily +/// initialize to the original funcref that this slot had". +/// +/// To do so, we rewrite nulls into the "initialized null" value. Note +/// that this should *only exist inside the table*: whenever we load a +/// value out of a table, we immediately mask off the low bit that +/// contains the initialized-null flag. Conversely, when we store into +/// a table, we have to translate a true null into an "initialized +/// null". +/// +/// We can generalize a bit in order to simply the table-set logic: we +/// can set the LSB of *all* explicitly stored values to 1 in order to +/// note that they are indeed explicitly stored. We then mask off this +/// bit every time we load. +/// +/// Note that we take care to set this bit and mask it off when +/// accessing tables direclty in fastpaths in generated code as well +const REF_INIT_BIT: usize = 1; + impl TableElement { /// Consumes the given raw pointer into a table element. /// @@ -42,7 +80,7 @@ impl TableElement { /// This should only be used if the raw pointer is no longer in use. unsafe fn from_raw(ty: TableElementType, ptr: usize) -> Self { match ty { - TableElementType::Func => Self::FuncRef(ptr as _), + TableElementType::Func => Self::FuncRef((ptr & REF_MASK) as _), TableElementType::Extern => Self::ExternRef(if ptr == 0 { None } else { @@ -57,8 +95,9 @@ impl TableElement { /// /// This is unsafe as it will clone any externref, incrementing the reference count. unsafe fn clone_from_raw(ty: TableElementType, ptr: usize) -> Self { + let ptr = ptr & REF_MASK; match ty { - TableElementType::Func => Self::FuncRef(ptr as _), + TableElementType::Func => Self::FuncRef((ptr & REF_MASK) as _), TableElementType::Extern => Self::ExternRef(if ptr == 0 { None } else { @@ -75,9 +114,9 @@ impl TableElement { /// the reference count. /// /// Use `from_raw` to properly drop any table elements stored as raw pointers. - unsafe fn into_raw(self) -> usize { + pub(crate) unsafe fn into_raw(self) -> usize { match self { - Self::FuncRef(e) => e as _, + Self::FuncRef(e) => (e as usize) | REF_INIT_BIT, Self::ExternRef(e) => e.map_or(0, |e| e.into_raw() as usize), } } @@ -113,6 +152,9 @@ pub enum Table { size: u32, /// The type of this table. ty: TableElementType, + /// The original function indices used for lazy + /// initialization, if any. + lazy_data: Option>, }, /// A "dynamic" table where table storage space is dynamically allocated via /// `malloc` (aka Rust's `Vec`). @@ -124,9 +166,88 @@ pub enum Table { ty: TableElementType, /// Maximum size that `elements` can grow to. maximum: Option, + /// The original function indices used for lazy + /// initialization, if any. + lazy_data: Option>, }, } +/// Initialization information for a funcref table, allowing lazy +/// computation of each funcref. This array simply contains the +/// FuncIndex (or None) for each element, and can be computed once for +/// the module. +pub struct TableLazyData { + elements: Vec>, +} + +/// Lazy initialization data for all tables in a module. +#[derive(Default)] +pub struct TablesLazyData { + pub(crate) tables: PrimaryMap>>, +} + +impl TablesLazyData { + /// Create a funcref init table for each defined table, if + /// possible, to allow for lazy table initialization. This happens + /// once when a module is loaded and its per-table data is then + /// shared by every instance of the module. + pub fn new(module: &Module) -> Arc { + let num_defined_tables = module.table_plans.len() - module.num_imported_tables; + let mut tables = PrimaryMap::with_capacity(num_defined_tables); + let mut no_static_init = PrimaryMap::with_capacity(num_defined_tables); + for _ in 0..num_defined_tables { + tables.push(None); + no_static_init.push(false); + } + + for init in &module.table_initializers { + let defined_index = match module.defined_table_index(init.table_index) { + Some(i) => i, + None => continue, + }; + + if no_static_init[defined_index] { + continue; + } + if init.base.is_some() { + tables[defined_index] = None; + no_static_init[defined_index] = true; + continue; + } + + let base = usize::try_from(init.offset).unwrap(); + let top = base.checked_add(init.elements.len()).unwrap(); + let table_init_size = module.table_plans[init.table_index].table.minimum as usize; + if top > table_init_size { + tables[defined_index] = None; + no_static_init[defined_index] = true; + continue; + } + + let data = + tables[defined_index].get_or_insert_with(|| TableLazyData { elements: vec![] }); + + if top > data.elements.len() { + data.elements.resize(top, None); + } + + for (dest, src) in data.elements[base..top] + .iter_mut() + .zip(init.elements.iter()) + { + *dest = Some(*src); + } + } + + let mut tables_ret = PrimaryMap::with_capacity(tables.len()); + for (_, table) in tables.into_iter() { + tables_ret.push(table.map(|table| Arc::new(table))); + } + + Arc::new(Self { tables: tables_ret }) + } +} + fn wasm_to_table_type(ty: WasmType) -> Result { match ty { WasmType::FuncRef => Ok(TableElementType::Func), @@ -137,7 +258,11 @@ fn wasm_to_table_type(ty: WasmType) -> Result { impl Table { /// Create a new dynamic (movable) table instance for the specified table plan. - pub fn new_dynamic(plan: &TablePlan, store: &mut dyn Store) -> Result { + pub fn new_dynamic( + plan: &TablePlan, + store: &mut dyn Store, + lazy_data: Option>, + ) -> Result { Self::limit_new(plan, store)?; let elements = vec![0; plan.table.minimum as usize]; let ty = wasm_to_table_type(plan.table.wasm_ty)?; @@ -147,6 +272,7 @@ impl Table { elements, ty, maximum, + lazy_data, }) } @@ -155,6 +281,7 @@ impl Table { plan: &TablePlan, data: &'static mut [usize], store: &mut dyn Store, + lazy_data: Option>, ) -> Result { Self::limit_new(plan, store)?; let size = plan.table.minimum; @@ -164,7 +291,12 @@ impl Table { _ => data, }; - Ok(Table::Static { data, size, ty }) + Ok(Table::Static { + data, + size, + ty, + lazy_data, + }) } fn limit_new(plan: &TablePlan, store: &mut dyn Store) -> Result<()> { @@ -328,10 +460,46 @@ impl Table { Ok(Some(old_size)) } + /// Returns the lazy-init FuncIndex table, if any. + pub(crate) fn lazy_data(&self) -> Option<&Arc> { + match self { + Table::Static { lazy_data, .. } | Table::Dynamic { lazy_data, .. } => { + lazy_data.as_ref() + } + } + } + /// Get reference to the specified element. /// /// Returns `None` if the index is out of bounds. - pub fn get(&self, index: u32) -> Option { + pub fn get_or_init( + &mut self, + index: u32, + mut instance: InstanceHandle, + ) -> Option { + let is_func = self.element_type() == TableElementType::Func; + let e = self.elements_mut().get_mut(index as usize)?; + + unsafe { + if is_func && *e == 0 { + // Initialize with an "initialized null"; this will be + // overwritten if we have a non-null func to fill in. + *e = REF_INIT_BIT; + + // Lazy initialization path. + if let Some(lazy) = self.lazy_data() { + if let Some(&Some(func)) = lazy.elements.get(index as usize) { + if let Some(anyfunc) = + instance.instance_mut().get_caller_checked_anyfunc(func) + { + let e = self.elements_mut().get_mut(index as usize).unwrap(); + *e = TableElement::FuncRef(anyfunc).into_raw(); + } + } + } + } + } + self.elements() .get(index as usize) .map(|p| unsafe { TableElement::clone_from_raw(self.element_type(), *p) }) @@ -363,6 +531,7 @@ impl Table { pub unsafe fn copy( dst_table: *mut Self, src_table: *mut Self, + src_instance: InstanceHandle, dst_index: u32, src_index: u32, len: u32, @@ -387,6 +556,11 @@ impl Table { let src_range = src_index as usize..src_index as usize + len as usize; let dst_range = dst_index as usize..dst_index as usize + len as usize; + // Force lazy initialization of all source elements. + for i in src_range.clone() { + let _ = (*src_table).get_or_init(i as u32, src_instance.clone()); + } + // Check if the tables are the same as we cannot mutably borrow and also borrow the same `RefCell` if ptr::eq(dst_table, src_table) { (*dst_table).copy_elements_within(dst_range, src_range); @@ -522,6 +696,7 @@ impl Default for Table { data: &mut [], size: 0, ty: TableElementType::Func, + lazy_data: None, } } } diff --git a/crates/runtime/src/vmcontext.rs b/crates/runtime/src/vmcontext.rs index f60ce4723cce..3c0c7a152d24 100644 --- a/crates/runtime/src/vmcontext.rs +++ b/crates/runtime/src/vmcontext.rs @@ -548,11 +548,11 @@ impl Default for VMSharedSignatureIndex { /// The VM caller-checked "anyfunc" record, for caller-side signature checking. /// It consists of the actual function pointer and a signature id to be checked /// by the caller. -#[derive(Debug, Clone)] +#[derive(Debug)] #[repr(C)] pub struct VMCallerCheckedAnyfunc { /// Function body. - pub func_ptr: NonNull, + pub func_ptr: *mut VMFunctionBody, /// Function signature id. pub type_index: VMSharedSignatureIndex, /// Function `VMContext`. diff --git a/crates/wasmtime/src/externals.rs b/crates/wasmtime/src/externals.rs index c94703849759..f4ee10b7811d 100644 --- a/crates/wasmtime/src/externals.rs +++ b/crates/wasmtime/src/externals.rs @@ -477,7 +477,7 @@ impl Table { let init = init.into_table_element(store, ty.element())?; unsafe { let table = Table::from_wasmtime_table(wasmtime_export, store); - (*table.wasmtime_table(store)) + (*table.wasmtime_table(store).1) .fill(0, init, ty.minimum()) .map_err(Trap::from_runtime)?; @@ -497,12 +497,13 @@ impl Table { TableType::from_wasmtime_table(ty) } - fn wasmtime_table(&self, store: &mut StoreOpaque) -> *mut runtime::Table { + fn wasmtime_table(&self, store: &mut StoreOpaque) -> (InstanceHandle, *mut runtime::Table) { unsafe { let export = &store[self.0]; let mut handle = InstanceHandle::from_vmctx(export.vmctx); let idx = handle.table_index(&*export.definition); - handle.get_defined_table(idx) + let table = handle.get_defined_table(idx); + (handle, table) } } @@ -515,9 +516,9 @@ impl Table { /// Panics if `store` does not own this table. pub fn get(&self, mut store: impl AsContextMut, index: u32) -> Option { let store = store.as_context_mut().0; - let table = self.wasmtime_table(store); + let (instance, table) = self.wasmtime_table(store); unsafe { - match (*table).get(index)? { + match (*table).get_or_init(index, instance)? { runtime::TableElement::FuncRef(f) => { let func = Func::from_caller_checked_anyfunc(store, f); Some(Val::FuncRef(func)) @@ -545,7 +546,7 @@ impl Table { let store = store.as_context_mut().0; let ty = self.ty(&store).element().clone(); let val = val.into_table_element(store, ty)?; - let table = self.wasmtime_table(store); + let (_, table) = self.wasmtime_table(store); unsafe { (*table) .set(index, val) @@ -591,7 +592,7 @@ impl Table { let store = store.as_context_mut().0; let ty = self.ty(&store).element().clone(); let init = init.into_table_element(store, ty)?; - let table = self.wasmtime_table(store); + let (_, table) = self.wasmtime_table(store); unsafe { match (*table).grow(delta, init, store)? { Some(size) => { @@ -656,10 +657,10 @@ impl Table { bail!("tables do not have the same element type"); } - let dst = dst_table.wasmtime_table(store); - let src = src_table.wasmtime_table(store); + let (_, dst) = dst_table.wasmtime_table(store); + let (src_instance, src) = src_table.wasmtime_table(store); unsafe { - runtime::Table::copy(dst, src, dst_index, src_index, len) + runtime::Table::copy(dst, src, src_instance, dst_index, src_index, len) .map_err(Trap::from_runtime)?; } Ok(()) @@ -686,7 +687,7 @@ impl Table { let ty = self.ty(&store).element().clone(); let val = val.into_table_element(store, ty)?; - let table = self.wasmtime_table(store); + let (_, table) = self.wasmtime_table(store); unsafe { (*table).fill(dst, val, len).map_err(Trap::from_runtime)?; } diff --git a/crates/wasmtime/src/func.rs b/crates/wasmtime/src/func.rs index 0bc9d0777518..a2b63ef37f84 100644 --- a/crates/wasmtime/src/func.rs +++ b/crates/wasmtime/src/func.rs @@ -791,7 +791,7 @@ impl Func { trampoline( (*anyfunc.as_ptr()).vmctx, callee, - (*anyfunc.as_ptr()).func_ptr.as_ptr(), + (*anyfunc.as_ptr()).func_ptr, params_and_returns, ) }) @@ -974,7 +974,7 @@ impl Func { unsafe { let f = self.caller_checked_anyfunc(store); VMFunctionImport { - body: f.as_ref().func_ptr, + body: NonNull::new(f.as_ref().func_ptr).unwrap(), vmctx: f.as_ref().vmctx, } } @@ -2060,7 +2060,7 @@ impl HostFunc { /// Requires that this function's signature is already registered within /// `Engine`. This happens automatically during the above two constructors. - fn _new(engine: &Engine, instance: InstanceHandle, trampoline: VMTrampoline) -> Self { + fn _new(engine: &Engine, mut instance: InstanceHandle, trampoline: VMTrampoline) -> Self { let idx = EntityIndex::Function(FuncIndex::from_u32(0)); let export = match instance.lookup_by_declaration(&idx) { wasmtime_runtime::Export::Function(f) => f, diff --git a/crates/wasmtime/src/func/typed.rs b/crates/wasmtime/src/func/typed.rs index 4ddced224d1e..620188309236 100644 --- a/crates/wasmtime/src/func/typed.rs +++ b/crates/wasmtime/src/func/typed.rs @@ -160,12 +160,8 @@ where let result = invoke_wasm_and_catch_traps(store, |callee| { let (anyfunc, ret, params, returned) = &mut captures; let anyfunc = anyfunc.as_ref(); - let result = Params::invoke::( - anyfunc.func_ptr.as_ptr(), - anyfunc.vmctx, - callee, - *params, - ); + let result = + Params::invoke::(anyfunc.func_ptr, anyfunc.vmctx, callee, *params); ptr::write(ret.as_mut_ptr(), result); *returned = true }); diff --git a/crates/wasmtime/src/instance.rs b/crates/wasmtime/src/instance.rs index 99687621026a..49d8ef8cb8a0 100644 --- a/crates/wasmtime/src/instance.rs +++ b/crates/wasmtime/src/instance.rs @@ -328,13 +328,16 @@ impl Instance { // Instantiated instances will lazily fill in exports, so we process // all that lazy logic here. InstanceData::Instantiated { id, exports, .. } => { - let instance = store.instance(*id); - let (i, _, index) = instance.module().exports.get_full(name)?; + let id = *id; + let instance = store.instance(id); + let (i, _, &index) = instance.module().exports.get_full(name)?; if let Some(export) = &exports[i] { return Some(export.clone()); } + + let instance = store.instance_mut(id); // reborrow the &mut Instancehandle let item = unsafe { - Extern::from_wasmtime_export(instance.lookup_by_declaration(index), store) + Extern::from_wasmtime_export(instance.lookup_by_declaration(&index), store) }; let exports = match &mut store[self.0] { InstanceData::Instantiated { exports, .. } => exports, @@ -701,6 +704,7 @@ impl<'a> Instantiator<'a> { // this instance, so we determine what the ID is and then assert // it's the same later when we do actually insert it. let instance_to_be = store.store_data().next_id::(); + let mut instance_handle = store .engine() @@ -709,10 +713,9 @@ impl<'a> Instantiator<'a> { module: compiled_module.module().clone(), unique_id: Some(compiled_module.unique_id()), memfds: self.cur.module.memfds().clone(), - image_base: compiled_module.code().as_ptr() as usize, - functions: compiled_module.functions(), + table_lazy_data: self.cur.module.table_lazy_data().clone(), + info: self.cur.module.alloc_info().clone(), imports: self.cur.build(), - shared_signatures: self.cur.module.signatures().as_module_map().into(), host_state: Box::new(Instance(instance_to_be)), store: StorePtr::new(store.traitobj()), wasm_data: compiled_module.wasm_data(), @@ -818,7 +821,7 @@ impl<'a> Instantiator<'a> { }; // If a start function is present, invoke it. Make sure we use all the // trap-handling configuration in `store` as well. - let instance = store.0.instance(id); + let instance = store.0.instance_mut(id); let f = match instance.lookup_by_declaration(&EntityIndex::Function(start)) { wasmtime_runtime::Export::Function(f) => f, _ => unreachable!(), // valid modules shouldn't hit this @@ -829,9 +832,7 @@ impl<'a> Instantiator<'a> { mem::transmute::< *const VMFunctionBody, unsafe extern "C" fn(*mut VMContext, *mut VMContext), - >(f.anyfunc.as_ref().func_ptr.as_ptr())( - f.anyfunc.as_ref().vmctx, vmctx - ) + >(f.anyfunc.as_ref().func_ptr)(f.anyfunc.as_ref().vmctx, vmctx) })?; } Ok(()) diff --git a/crates/wasmtime/src/module.rs b/crates/wasmtime/src/module.rs index 09c2d3f485c7..a1044ce87c6d 100644 --- a/crates/wasmtime/src/module.rs +++ b/crates/wasmtime/src/module.rs @@ -12,6 +12,7 @@ use wasmparser::{Parser, ValidPayload, Validator}; use wasmtime_environ::{ModuleEnvironment, ModuleIndex, PrimaryMap}; use wasmtime_jit::{CompiledModule, CompiledModuleInfo, MmapVec, TypeTables}; use wasmtime_runtime::ModuleMemFds; +use wasmtime_runtime::{InstanceAllocationInfo, TablesLazyData}; mod registry; mod serialization; @@ -108,8 +109,15 @@ struct ModuleInner { types: Arc, /// Registered shared signature for the module. signatures: Arc, + /// a set of memfd images for memories, if any. memfds: Option>, + + /// Table lazy init data. + table_lazy_data: Arc, + + /// InstanceAllocationInfo shared by all instantiated modules, used for lazy initialization. + alloc_info: Arc, } impl Module { @@ -496,6 +504,17 @@ impl Module { module.into_module(engine) } + fn create_alloc_info( + module: &CompiledModule, + sigs: &SignatureCollection, + ) -> Arc { + Arc::new(InstanceAllocationInfo { + image_base: module.code().as_ptr() as usize, + functions: module.functions().clone(), + shared_signatures: sigs.as_module_map().into(), + }) + } + fn from_parts( engine: &Engine, mut modules: Vec>, @@ -516,6 +535,8 @@ impl Module { let module = modules.remove(main_module); + let alloc_info = Module::create_alloc_info(&module, &signatures); + let module_upvars = module_upvars .iter() .map(|m| { @@ -532,6 +553,7 @@ impl Module { .collect::>>()?; let memfds = ModuleMemFds::new(module.module(), module.wasm_data())?; + let table_lazy_data = TablesLazyData::new(module.module()); return Ok(Self { inner: Arc::new(ModuleInner { @@ -542,6 +564,8 @@ impl Module { module_upvars, signatures, memfds, + table_lazy_data, + alloc_info, }), }); @@ -556,12 +580,15 @@ impl Module { ) -> Result { let module = artifacts[module_index].clone(); let memfds = ModuleMemFds::new(module.module(), module.wasm_data())?; + let table_lazy_data = TablesLazyData::new(module.module()); + let alloc_info = Module::create_alloc_info(&artifacts[module_index], signatures); Ok(Module { inner: Arc::new(ModuleInner { engine: engine.clone(), types: types.clone(), module, memfds, + table_lazy_data, artifact_upvars: artifact_upvars .iter() .map(|i| artifacts[*i].clone()) @@ -581,6 +608,7 @@ impl Module { }) .collect::>>()?, signatures: signatures.clone(), + alloc_info, }), }) } @@ -683,12 +711,18 @@ impl Module { ) -> Result { let module = self.inner.artifact_upvars[artifact_index].clone(); let memfds = ModuleMemFds::new(module.module(), module.wasm_data())?; + let table_lazy_data = TablesLazyData::new(module.module()); + let alloc_info = Module::create_alloc_info( + &self.inner.artifact_upvars[artifact_index], + &self.inner.signatures, + ); Ok(Module { inner: Arc::new(ModuleInner { types: self.inner.types.clone(), engine: self.inner.engine.clone(), module, memfds, + table_lazy_data, artifact_upvars: artifact_upvars .iter() .map(|i| self.inner.artifact_upvars[*i].clone()) @@ -703,6 +737,7 @@ impl Module { }) .collect(), signatures: self.inner.signatures.clone(), + alloc_info, }), }) } @@ -727,6 +762,14 @@ impl Module { &self.inner.memfds } + pub(crate) fn table_lazy_data(&self) -> &Arc { + &self.inner.table_lazy_data + } + + pub(crate) fn alloc_info(&self) -> &Arc { + &self.inner.alloc_info + } + /// Looks up the module upvar value at the `index` specified. /// /// Note that this panics if `index` is out of bounds since this should diff --git a/crates/wasmtime/src/module/registry.rs b/crates/wasmtime/src/module/registry.rs index 60bbbfffd783..a1a97e740622 100644 --- a/crates/wasmtime/src/module/registry.rs +++ b/crates/wasmtime/src/module/registry.rs @@ -95,7 +95,7 @@ impl ModuleRegistry { /// Looks up a trampoline from an anyfunc. pub fn lookup_trampoline(&self, anyfunc: &VMCallerCheckedAnyfunc) -> Option { - let module = self.module(anyfunc.func_ptr.as_ptr() as usize)?; + let module = self.module(anyfunc.func_ptr as usize)?; module.signatures.trampoline(anyfunc.type_index) } } diff --git a/crates/wasmtime/src/store.rs b/crates/wasmtime/src/store.rs index e6b4c8709abe..03dbcfb860c6 100644 --- a/crates/wasmtime/src/store.rs +++ b/crates/wasmtime/src/store.rs @@ -92,9 +92,10 @@ use std::sync::atomic::AtomicU64; use std::sync::Arc; use std::task::{Context, Poll}; use wasmtime_runtime::{ - InstanceAllocationRequest, InstanceAllocator, InstanceHandle, ModuleInfo, - OnDemandInstanceAllocator, SignalHandler, StorePtr, VMCallerCheckedAnyfunc, VMContext, - VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex, VMTrampoline, + InstanceAllocationInfo, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, + ModuleInfo, OnDemandInstanceAllocator, SignalHandler, StorePtr, VMCallerCheckedAnyfunc, + VMContext, VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex, + VMTrampoline, }; mod context; @@ -409,7 +410,6 @@ impl Store { /// tables created to 10,000. This can be overridden with the /// [`Store::limiter`] configuration method. pub fn new(engine: &Engine, data: T) -> Self { - let functions = &Default::default(); // Wasmtime uses the callee argument to host functions to learn about // the original pointer to the `Store` itself, allowing it to // reconstruct a `StoreContextMut`. When we initially call a `Func`, @@ -418,18 +418,22 @@ impl Store { // part of `Func::call` to guarantee that the `callee: *mut VMContext` // is never null. let default_callee = unsafe { + let info = Arc::new(InstanceAllocationInfo { + image_base: 0, + functions: Default::default(), + shared_signatures: None.into(), + }); OnDemandInstanceAllocator::default() .allocate(InstanceAllocationRequest { host_state: Box::new(()), - image_base: 0, - functions, - shared_signatures: None.into(), imports: Default::default(), module: Arc::new(wasmtime_environ::Module::default()), unique_id: None, memfds: None, + table_lazy_data: Default::default(), store: StorePtr::empty(), wasm_data: &[], + info, }) .expect("failed to allocate default callee") }; diff --git a/crates/wasmtime/src/trampoline.rs b/crates/wasmtime/src/trampoline.rs index 02e0b51c8130..9e3d0c8ee125 100644 --- a/crates/wasmtime/src/trampoline.rs +++ b/crates/wasmtime/src/trampoline.rs @@ -18,8 +18,8 @@ use std::any::Any; use std::sync::Arc; use wasmtime_environ::{EntityIndex, GlobalIndex, MemoryIndex, Module, TableIndex}; use wasmtime_runtime::{ - Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, StorePtr, - VMFunctionImport, VMSharedSignatureIndex, + Imports, InstanceAllocationInfo, InstanceAllocationRequest, InstanceAllocator, + OnDemandInstanceAllocator, StorePtr, VMFunctionImport, VMSharedSignatureIndex, }; fn create_handle( @@ -31,7 +31,7 @@ fn create_handle( ) -> Result { let mut imports = Imports::default(); imports.functions = func_imports; - let functions = &Default::default(); + let functions = Default::default(); unsafe { let config = store.engine().config(); @@ -43,13 +43,16 @@ fn create_handle( module: Arc::new(module), unique_id: None, memfds: None, - functions, - image_base: 0, + table_lazy_data: Default::default(), imports, - shared_signatures: shared_signature_id.into(), host_state, store: StorePtr::new(store.traitobj()), wasm_data: &[], + info: Arc::new(InstanceAllocationInfo { + functions, + image_base: 0, + shared_signatures: shared_signature_id.into(), + }), }, )?; @@ -64,7 +67,7 @@ pub fn generate_global_export( ) -> Result { let instance = create_global(store, gt, val)?; let idx = EntityIndex::Global(GlobalIndex::from_u32(0)); - match store.instance(instance).lookup_by_declaration(&idx) { + match store.instance_mut(instance).lookup_by_declaration(&idx) { wasmtime_runtime::Export::Global(g) => Ok(g), _ => unreachable!(), } @@ -76,7 +79,7 @@ pub fn generate_memory_export( ) -> Result { let instance = create_memory(store, m)?; let idx = EntityIndex::Memory(MemoryIndex::from_u32(0)); - match store.instance(instance).lookup_by_declaration(&idx) { + match store.instance_mut(instance).lookup_by_declaration(&idx) { wasmtime_runtime::Export::Memory(m) => Ok(m), _ => unreachable!(), } @@ -88,7 +91,7 @@ pub fn generate_table_export( ) -> Result { let instance = create_table(store, t)?; let idx = EntityIndex::Table(TableIndex::from_u32(0)); - match store.instance(instance).lookup_by_declaration(&idx) { + match store.instance_mut(instance).lookup_by_declaration(&idx) { wasmtime_runtime::Export::Table(t) => Ok(t), _ => unreachable!(), } diff --git a/crates/wasmtime/src/trampoline/func.rs b/crates/wasmtime/src/trampoline/func.rs index 77d5f26d188d..b47fbbb16141 100644 --- a/crates/wasmtime/src/trampoline/func.rs +++ b/crates/wasmtime/src/trampoline/func.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use wasmtime_environ::{EntityIndex, Module, ModuleType, PrimaryMap, SignatureIndex}; use wasmtime_jit::{CodeMemory, MmapVec, ProfilingAgent}; use wasmtime_runtime::{ - Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, + Imports, InstanceAllocationInfo, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, OnDemandInstanceAllocator, StorePtr, VMContext, VMFunctionBody, VMSharedSignatureIndex, VMTrampoline, }; @@ -158,18 +158,22 @@ pub unsafe fn create_raw_function( .exports .insert(String::new(), EntityIndex::Function(func_id)); + let info = Arc::new(InstanceAllocationInfo { + functions: Arc::new(functions), + image_base: (*func).as_ptr() as usize, + shared_signatures: sig.into(), + }); Ok( OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest { module: Arc::new(module), unique_id: None, memfds: None, - functions: &functions, - image_base: (*func).as_ptr() as usize, + table_lazy_data: Default::default(), imports: Imports::default(), - shared_signatures: sig.into(), host_state, store: StorePtr::empty(), wasm_data: &[], + info, })?, ) } diff --git a/crates/wasmtime/src/trampoline/global.rs b/crates/wasmtime/src/trampoline/global.rs index 3feb599b26ee..62645a072b77 100644 --- a/crates/wasmtime/src/trampoline/global.rs +++ b/crates/wasmtime/src/trampoline/global.rs @@ -1,3 +1,5 @@ +use std::ptr::NonNull; + use crate::store::{InstanceId, StoreOpaque}; use crate::trampoline::create_handle; use crate::{GlobalType, Mutability, Val}; @@ -51,7 +53,7 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu }); func_imports.push(VMFunctionImport { - body: f.func_ptr, + body: NonNull::new(f.func_ptr).unwrap(), vmctx: f.vmctx, }); @@ -73,7 +75,7 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu )?; if let Some(x) = externref_init { - let instance = store.instance(id); + let instance = store.instance_mut(id); match instance.lookup_by_declaration(&EntityIndex::Global(global_id)) { wasmtime_runtime::Export::Global(g) => unsafe { *(*g.definition).as_externref_mut() = Some(x.inner);