From 1e71470c5c90685978f58e70f60a20ab627abc60 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 2 Nov 2022 09:02:59 -0700 Subject: [PATCH] Refactor configuration for the pooling allocator This commit changes the APIs in the `wasmtime` crate for configuring the pooling allocator. I plan on adding a few more configuration options in the near future and the current structure was feeling unwieldy for adding these new abstractions. The previous `struct`-based API has been replaced with a builder-style API in a similar shape as to `Config`. This is done to help make it easier to add more configuration options in the future through adding more methods as opposed to adding more field which could break prior initializations. --- benches/instantiation.rs | 12 +- benches/thread_eager_init.rs | 11 +- crates/cli-flags/src/lib.rs | 8 +- crates/fuzzing/src/generators.rs | 4 +- crates/fuzzing/src/generators/config.rs | 66 ++-- .../instance_allocation_strategy.rs | 43 +-- .../fuzzing/src/generators/instance_limits.rs | 49 --- .../fuzzing/src/generators/pooling_config.rs | 81 +++++ crates/runtime/src/cow_disabled.rs | 2 +- crates/runtime/src/instance/allocator.rs | 5 +- .../runtime/src/instance/allocator/pooling.rs | 286 +++++++----------- crates/runtime/src/lib.rs | 5 +- crates/wasmtime/src/config.rs | 247 +++++++++++---- fuzz/fuzz_targets/instantiate-many.rs | 4 +- tests/all/async_functions.rs | 29 +- tests/all/instance.rs | 10 +- tests/all/limits.rs | 33 +- tests/all/memory.rs | 11 +- tests/all/pooling_allocator.rs | 175 ++++------- tests/all/wast.rs | 19 +- 20 files changed, 532 insertions(+), 568 deletions(-) delete mode 100644 crates/fuzzing/src/generators/instance_limits.rs create mode 100644 crates/fuzzing/src/generators/pooling_config.rs diff --git a/benches/instantiation.rs b/benches/instantiation.rs index 356dbeb5a2e4..ae272c1fcbc1 100644 --- a/benches/instantiation.rs +++ b/benches/instantiation.rs @@ -202,13 +202,11 @@ fn bench_instantiation(c: &mut Criterion) { fn strategies() -> impl Iterator { [ InstanceAllocationStrategy::OnDemand, - InstanceAllocationStrategy::Pooling { - strategy: Default::default(), - instance_limits: InstanceLimits { - memory_pages: 10_000, - ..Default::default() - }, - }, + InstanceAllocationStrategy::Pooling({ + let mut config = PoolingAllocationConfig::default(); + config.instance_memory_pages(10_000); + config + }), ] .into_iter() } diff --git a/benches/thread_eager_init.rs b/benches/thread_eager_init.rs index dbd5617a6f58..9f6e4c5b9e53 100644 --- a/benches/thread_eager_init.rs +++ b/benches/thread_eager_init.rs @@ -91,15 +91,10 @@ fn test_setup() -> (Engine, Module) { // We only expect to create one Instance at a time, with a single memory. let pool_count = 10; + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(pool_count).instance_memory_pages(1); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: pool_count, - memory_pages: 1, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config).unwrap(); // The module has a memory (shouldn't matter) and a single function which is a no-op. diff --git a/crates/cli-flags/src/lib.rs b/crates/cli-flags/src/lib.rs index e4066edb9d02..4b9ccae60a74 100644 --- a/crates/cli-flags/src/lib.rs +++ b/crates/cli-flags/src/lib.rs @@ -21,8 +21,6 @@ use clap::Parser; use std::collections::HashMap; use std::path::PathBuf; use wasmtime::{Config, ProfilingStrategy}; -#[cfg(feature = "pooling-allocator")] -use wasmtime::{InstanceLimits, PoolingAllocationStrategy}; pub const SUPPORTED_WASM_FEATURES: &[(&str, &str)] = &[ ("all", "enables all supported WebAssembly features"), @@ -332,11 +330,7 @@ impl CommonOptions { #[cfg(feature = "pooling-allocator")] { if self.pooling_allocator { - let instance_limits = InstanceLimits::default(); - config.allocation_strategy(wasmtime::InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits, - }); + config.allocation_strategy(wasmtime::InstanceAllocationStrategy::pooling()); } } diff --git a/crates/fuzzing/src/generators.rs b/crates/fuzzing/src/generators.rs index c0b93e1242f2..3da2e240401a 100644 --- a/crates/fuzzing/src/generators.rs +++ b/crates/fuzzing/src/generators.rs @@ -13,9 +13,9 @@ mod codegen_settings; pub mod component_types; mod config; mod instance_allocation_strategy; -mod instance_limits; mod memory; mod module; +mod pooling_config; mod single_inst_module; mod spec_test; mod stacks; @@ -25,9 +25,9 @@ mod value; pub use codegen_settings::CodegenSettings; pub use config::{Config, WasmtimeConfig}; pub use instance_allocation_strategy::InstanceAllocationStrategy; -pub use instance_limits::InstanceLimits; pub use memory::{MemoryConfig, NormalMemoryConfig, UnalignedMemory, UnalignedMemoryCreator}; pub use module::ModuleConfig; +pub use pooling_config::PoolingAllocationConfig; pub use single_inst_module::SingleInstModule; pub use spec_test::SpecTest; pub use stacks::Stacks; diff --git a/crates/fuzzing/src/generators/config.rs b/crates/fuzzing/src/generators/config.rs index 1da2616d4a06..861809da8f06 100644 --- a/crates/fuzzing/src/generators/config.rs +++ b/crates/fuzzing/src/generators/config.rs @@ -72,23 +72,20 @@ impl Config { config.canonicalize_nans = true; // If using the pooling allocator, update the instance limits too - if let InstanceAllocationStrategy::Pooling { - instance_limits: limits, - .. - } = &mut self.wasmtime.strategy - { + if let InstanceAllocationStrategy::Pooling(pooling) = &mut self.wasmtime.strategy { // One single-page memory - limits.memories = config.max_memories as u32; - limits.memory_pages = 10; + pooling.instance_memories = config.max_memories as u32; + pooling.instance_memory_pages = 10; - limits.tables = config.max_tables as u32; - limits.table_elements = 1_000; + pooling.instance_tables = config.max_tables as u32; + pooling.instance_table_elements = 1_000; - limits.size = 1_000_000; + pooling.instance_size = 1_000_000; match &mut self.wasmtime.memory_config { MemoryConfig::Normal(config) => { - config.static_memory_maximum_size = Some(limits.memory_pages * 0x10000); + config.static_memory_maximum_size = + Some(pooling.instance_memory_pages * 0x10000); } MemoryConfig::CustomUnaligned => unreachable!(), // Arbitrary impl for `Config` should have prevented this } @@ -122,25 +119,22 @@ impl Config { config.max_memories = 1; config.max_tables = 5; - if let InstanceAllocationStrategy::Pooling { - instance_limits: limits, - .. - } = &mut self.wasmtime.strategy - { + if let InstanceAllocationStrategy::Pooling(pooling) = &mut self.wasmtime.strategy { // Configure the lower bound of a number of limits to what's // required to actually run the spec tests. Fuzz-generated inputs // may have limits less than these thresholds which would cause the // spec tests to fail which isn't particularly interesting. - limits.memories = 1; - limits.tables = limits.memories.max(5); - limits.table_elements = limits.memories.max(1_000); - limits.memory_pages = limits.memory_pages.max(900); - limits.count = limits.count.max(500); - limits.size = limits.size.max(64 * 1024); + pooling.instance_memories = 1; + pooling.instance_tables = pooling.instance_tables.max(5); + pooling.instance_table_elements = pooling.instance_table_elements.max(1_000); + pooling.instance_memory_pages = pooling.instance_memory_pages.max(900); + pooling.instance_count = pooling.instance_count.max(500); + pooling.instance_size = pooling.instance_size.max(64 * 1024); match &mut self.wasmtime.memory_config { MemoryConfig::Normal(config) => { - config.static_memory_maximum_size = Some(limits.memory_pages * 0x10000); + config.static_memory_maximum_size = + Some(pooling.instance_memory_pages * 0x10000); } MemoryConfig::CustomUnaligned => unreachable!(), // Arbitrary impl for `Config` should have prevented this } @@ -173,8 +167,7 @@ impl Config { self.wasmtime.memory_guaranteed_dense_image_size, )) .allocation_strategy(self.wasmtime.strategy.to_wasmtime()) - .generate_address_map(self.wasmtime.generate_address_map) - .async_stack_zeroing(self.wasmtime.async_stack_zeroing); + .generate_address_map(self.wasmtime.generate_address_map); self.wasmtime.codegen.configure(&mut cfg); @@ -319,11 +312,7 @@ impl<'a> Arbitrary<'a> for Config { // If using the pooling allocator, constrain the memory and module configurations // to the module limits. - if let InstanceAllocationStrategy::Pooling { - instance_limits: limits, - .. - } = &mut config.wasmtime.strategy - { + if let InstanceAllocationStrategy::Pooling(pooling) = &mut config.wasmtime.strategy { let cfg = &mut config.module_config.config; // If the pooling allocator is used, do not allow shared memory to // be created. FIXME: see @@ -333,19 +322,21 @@ impl<'a> Arbitrary<'a> for Config { // Force the use of a normal memory config when using the pooling allocator and // limit the static memory maximum to be the same as the pooling allocator's memory // page limit. - if cfg.max_memory_pages < limits.memory_pages { - limits.memory_pages = cfg.max_memory_pages; + if cfg.max_memory_pages < pooling.instance_memory_pages { + pooling.instance_memory_pages = cfg.max_memory_pages; } else { - cfg.max_memory_pages = limits.memory_pages; + cfg.max_memory_pages = pooling.instance_memory_pages; } config.wasmtime.memory_config = match config.wasmtime.memory_config { MemoryConfig::Normal(mut config) => { - config.static_memory_maximum_size = Some(limits.memory_pages * 0x10000); + config.static_memory_maximum_size = + Some(pooling.instance_memory_pages * 0x10000); MemoryConfig::Normal(config) } MemoryConfig::CustomUnaligned => { let mut config: NormalMemoryConfig = u.arbitrary()?; - config.static_memory_maximum_size = Some(limits.memory_pages * 0x10000); + config.static_memory_maximum_size = + Some(pooling.instance_memory_pages * 0x10000); MemoryConfig::Normal(config) } }; @@ -357,8 +348,8 @@ impl<'a> Arbitrary<'a> for Config { // Force this pooling allocator to always be able to accommodate the // module that may be generated. - limits.memories = cfg.max_memories as u32; - limits.tables = cfg.max_tables as u32; + pooling.instance_memories = cfg.max_memories as u32; + pooling.instance_tables = cfg.max_tables as u32; } Ok(config) @@ -387,7 +378,6 @@ pub struct WasmtimeConfig { padding_between_functions: Option, generate_address_map: bool, native_unwind_info: bool, - async_stack_zeroing: bool, } impl WasmtimeConfig { diff --git a/crates/fuzzing/src/generators/instance_allocation_strategy.rs b/crates/fuzzing/src/generators/instance_allocation_strategy.rs index f5aabeb58ecc..e23ce9164230 100644 --- a/crates/fuzzing/src/generators/instance_allocation_strategy.rs +++ b/crates/fuzzing/src/generators/instance_allocation_strategy.rs @@ -1,19 +1,13 @@ +use super::PoolingAllocationConfig; use arbitrary::Arbitrary; -use super::InstanceLimits; - /// Configuration for `wasmtime::InstanceAllocationStrategy`. #[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Hash)] pub enum InstanceAllocationStrategy { /// Use the on-demand instance allocation strategy. OnDemand, /// Use the pooling instance allocation strategy. - Pooling { - /// The pooling strategy to use. - strategy: PoolingAllocationStrategy, - /// The instance limits. - instance_limits: InstanceLimits, - }, + Pooling(PoolingAllocationConfig), } impl InstanceAllocationStrategy { @@ -21,37 +15,8 @@ impl InstanceAllocationStrategy { pub fn to_wasmtime(&self) -> wasmtime::InstanceAllocationStrategy { match self { InstanceAllocationStrategy::OnDemand => wasmtime::InstanceAllocationStrategy::OnDemand, - InstanceAllocationStrategy::Pooling { - strategy, - instance_limits, - } => wasmtime::InstanceAllocationStrategy::Pooling { - strategy: strategy.to_wasmtime(), - instance_limits: instance_limits.to_wasmtime(), - }, - } - } -} - -/// Configuration for `wasmtime::PoolingAllocationStrategy`. -#[derive(Arbitrary, Clone, Debug, PartialEq, Eq, Hash)] -pub enum PoolingAllocationStrategy { - /// Use next available instance slot. - NextAvailable, - /// Use random instance slot. - Random, - /// Use an affinity-based strategy. - ReuseAffinity, -} - -impl PoolingAllocationStrategy { - fn to_wasmtime(&self) -> wasmtime::PoolingAllocationStrategy { - match self { - PoolingAllocationStrategy::NextAvailable => { - wasmtime::PoolingAllocationStrategy::NextAvailable - } - PoolingAllocationStrategy::Random => wasmtime::PoolingAllocationStrategy::Random, - PoolingAllocationStrategy::ReuseAffinity => { - wasmtime::PoolingAllocationStrategy::ReuseAffinity + InstanceAllocationStrategy::Pooling(pooling) => { + wasmtime::InstanceAllocationStrategy::Pooling(pooling.to_wasmtime()) } } } diff --git a/crates/fuzzing/src/generators/instance_limits.rs b/crates/fuzzing/src/generators/instance_limits.rs deleted file mode 100644 index 7176d2ba650b..000000000000 --- a/crates/fuzzing/src/generators/instance_limits.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Generate instance limits for the pooling allocation strategy. - -use arbitrary::{Arbitrary, Unstructured}; - -/// Configuration for `wasmtime::PoolingAllocationStrategy`. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -#[allow(missing_docs)] -pub struct InstanceLimits { - pub count: u32, - pub memories: u32, - pub tables: u32, - pub memory_pages: u64, - pub table_elements: u32, - pub size: usize, -} - -impl InstanceLimits { - /// Convert the generated limits to Wasmtime limits. - pub fn to_wasmtime(&self) -> wasmtime::InstanceLimits { - wasmtime::InstanceLimits { - count: self.count, - memories: self.memories, - tables: self.tables, - memory_pages: self.memory_pages, - table_elements: self.table_elements, - size: self.size, - } - } -} - -impl<'a> Arbitrary<'a> for InstanceLimits { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - const MAX_COUNT: u32 = 100; - const MAX_TABLES: u32 = 10; - const MAX_MEMORIES: u32 = 10; - const MAX_ELEMENTS: u32 = 1000; - const MAX_MEMORY_PAGES: u64 = 160; // 10 MiB - const MAX_SIZE: usize = 1 << 20; // 1 MiB - - Ok(Self { - tables: u.int_in_range(0..=MAX_TABLES)?, - memories: u.int_in_range(0..=MAX_MEMORIES)?, - table_elements: u.int_in_range(0..=MAX_ELEMENTS)?, - memory_pages: u.int_in_range(0..=MAX_MEMORY_PAGES)?, - count: u.int_in_range(1..=MAX_COUNT)?, - size: u.int_in_range(0..=MAX_SIZE)?, - }) - } -} diff --git a/crates/fuzzing/src/generators/pooling_config.rs b/crates/fuzzing/src/generators/pooling_config.rs new file mode 100644 index 000000000000..f6a49a5e7b4b --- /dev/null +++ b/crates/fuzzing/src/generators/pooling_config.rs @@ -0,0 +1,81 @@ +//! Generate instance limits for the pooling allocation strategy. + +use arbitrary::{Arbitrary, Unstructured}; + +/// Configuration for `wasmtime::PoolingAllocationStrategy`. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +#[allow(missing_docs)] +pub struct PoolingAllocationConfig { + pub strategy: PoolingAllocationStrategy, + pub instance_count: u32, + pub instance_memories: u32, + pub instance_tables: u32, + pub instance_memory_pages: u64, + pub instance_table_elements: u32, + pub instance_size: usize, + pub async_stack_zeroing: bool, +} + +impl PoolingAllocationConfig { + /// Convert the generated limits to Wasmtime limits. + pub fn to_wasmtime(&self) -> wasmtime::PoolingAllocationConfig { + let mut cfg = wasmtime::PoolingAllocationConfig::default(); + + cfg.strategy(self.strategy.to_wasmtime()) + .instance_count(self.instance_count) + .instance_memories(self.instance_memories) + .instance_tables(self.instance_tables) + .instance_memory_pages(self.instance_memory_pages) + .instance_table_elements(self.instance_table_elements) + .instance_size(self.instance_size) + .async_stack_zeroing(self.async_stack_zeroing); + cfg + } +} + +impl<'a> Arbitrary<'a> for PoolingAllocationConfig { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + const MAX_COUNT: u32 = 100; + const MAX_TABLES: u32 = 10; + const MAX_MEMORIES: u32 = 10; + const MAX_ELEMENTS: u32 = 1000; + const MAX_MEMORY_PAGES: u64 = 160; // 10 MiB + const MAX_SIZE: usize = 1 << 20; // 1 MiB + + Ok(Self { + strategy: u.arbitrary()?, + instance_tables: u.int_in_range(0..=MAX_TABLES)?, + instance_memories: u.int_in_range(0..=MAX_MEMORIES)?, + instance_table_elements: u.int_in_range(0..=MAX_ELEMENTS)?, + instance_memory_pages: u.int_in_range(0..=MAX_MEMORY_PAGES)?, + instance_count: u.int_in_range(1..=MAX_COUNT)?, + instance_size: u.int_in_range(0..=MAX_SIZE)?, + async_stack_zeroing: u.arbitrary()?, + }) + } +} + +/// Configuration for `wasmtime::PoolingAllocationStrategy`. +#[derive(Arbitrary, Clone, Debug, PartialEq, Eq, Hash)] +pub enum PoolingAllocationStrategy { + /// Use next available instance slot. + NextAvailable, + /// Use random instance slot. + Random, + /// Use an affinity-based strategy. + ReuseAffinity, +} + +impl PoolingAllocationStrategy { + fn to_wasmtime(&self) -> wasmtime::PoolingAllocationStrategy { + match self { + PoolingAllocationStrategy::NextAvailable => { + wasmtime::PoolingAllocationStrategy::NextAvailable + } + PoolingAllocationStrategy::Random => wasmtime::PoolingAllocationStrategy::Random, + PoolingAllocationStrategy::ReuseAffinity => { + wasmtime::PoolingAllocationStrategy::ReuseAffinity + } + } + } +} diff --git a/crates/runtime/src/cow_disabled.rs b/crates/runtime/src/cow_disabled.rs index be06a9f4a1c6..63a92bd0ce07 100644 --- a/crates/runtime/src/cow_disabled.rs +++ b/crates/runtime/src/cow_disabled.rs @@ -57,7 +57,7 @@ impl MemoryImageSlot { unreachable!(); } - pub(crate) fn clear_and_remain_ready(&mut self) -> Result<()> { + pub(crate) fn clear_and_remain_ready(&mut self, _keep_resident: usize) -> Result<()> { unreachable!(); } diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index b52e8aa836a7..ebf4f80b0d65 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -21,7 +21,10 @@ use wasmtime_environ::{ mod pooling; #[cfg(feature = "pooling-allocator")] -pub use self::pooling::{InstanceLimits, PoolingAllocationStrategy, PoolingInstanceAllocator}; +pub use self::pooling::{ + InstanceLimits, PoolingAllocationStrategy, PoolingInstanceAllocator, + PoolingInstanceAllocatorConfig, +}; /// Represents a request for a new runtime instance. pub struct InstanceAllocationRequest<'a> { diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index 97e2eea93d1d..424708d5b048 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -50,114 +50,34 @@ fn round_up_to_pow2(n: usize, to: usize) -> usize { (n + to - 1) & !(to - 1) } -/// Represents the limits placed on instances by the pooling instance allocator. +/// Instance-related limit configuration for pooling. +/// +/// More docs on this can be found at `wasmtime::PoolingAllocationConfig`. #[derive(Debug, Copy, Clone)] pub struct InstanceLimits { - /// The maximum number of concurrent instances supported (default is 1000). - /// - /// This value has a direct impact on the amount of memory allocated by the pooling - /// instance allocator. - /// - /// The pooling instance allocator allocates three memory pools with sizes depending on this value: - /// - /// * An instance pool, where each entry in the pool can store the runtime representation - /// of an instance, including a maximal `VMContext` structure. - /// - /// * A memory pool, where each entry in the pool contains the reserved address space for each - /// linear memory supported by an instance. - /// - /// * A table pool, where each entry in the pool contains the space needed for each WebAssembly table - /// supported by an instance (see `table_elements` to control the size of each table). - /// - /// Additionally, this value will also control the maximum number of execution stacks allowed for - /// asynchronous execution (one per instance), when enabled. - /// - /// The memory pool will reserve a large quantity of host process address space to elide the bounds - /// checks required for correct WebAssembly memory semantics. Even for 64-bit address spaces, the - /// address space is limited when dealing with a large number of supported instances. - /// - /// For example, on Linux x86_64, the userland address space limit is 128 TiB. That might seem like a lot, - /// but each linear memory will *reserve* 6 GiB of space by default. Multiply that by the number of linear - /// memories each instance supports and then by the number of supported instances and it becomes apparent - /// that address space can be exhausted depending on the number of supported instances. + /// Maximum instances to support pub count: u32, - /// The maximum size, in bytes, allocated for an instance and its - /// `VMContext`. - /// - /// This amount of space is pre-allocated for `count` number of instances - /// and is used to store the runtime `wasmtime_runtime::Instance` structure - /// along with its adjacent `VMContext` structure. The `Instance` type has a - /// static size but `VMContext` is dynamically sized depending on the module - /// being instantiated. This size limit loosely correlates to the size of - /// the wasm module, taking into account factors such as: - /// - /// * number of functions - /// * number of globals - /// * number of memories - /// * number of tables - /// * number of function types - /// - /// If the allocated size per instance is too small then instantiation of a - /// module will fail at runtime with an error indicating how many bytes were - /// needed. This amount of bytes are committed to memory per-instance when - /// a pooling allocator is created. - /// - /// The default value for this is 1MB. + /// Maximum size of instance VMContext pub size: usize, - /// The maximum number of defined tables for a module (default is 1). - /// - /// This value controls the capacity of the `VMTableDefinition` table in each instance's - /// `VMContext` structure. - /// - /// The allocated size of the table will be `tables * sizeof(VMTableDefinition)` for each - /// instance regardless of how many tables are defined by an instance's module. + /// Maximum number of tables per instance pub tables: u32, - /// The maximum table elements for any table defined in a module (default is 10000). - /// - /// If a table's minimum element limit is greater than this value, the module will - /// fail to instantiate. - /// - /// If a table's maximum element limit is unbounded or greater than this value, - /// the maximum will be `table_elements` for the purpose of any `table.grow` instruction. - /// - /// This value is used to reserve the maximum space for each supported table; table elements - /// are pointer-sized in the Wasmtime runtime. Therefore, the space reserved for each instance - /// is `tables * table_elements * sizeof::<*const ()>`. + /// Maximum number of table elements per table pub table_elements: u32, - /// The maximum number of defined linear memories for a module (default is 1). - /// - /// This value controls the capacity of the `VMMemoryDefinition` table in each instance's - /// `VMContext` structure. - /// - /// The allocated size of the table will be `memories * sizeof(VMMemoryDefinition)` for each - /// instance regardless of how many memories are defined by an instance's module. + /// Maximum number of linear memories per instance pub memories: u32, - /// The maximum number of pages for any linear memory defined in a module (default is 160). - /// - /// The default of 160 means at most 10 MiB of host memory may be committed for each instance. - /// - /// If a memory's minimum page limit is greater than this value, the module will - /// fail to instantiate. - /// - /// If a memory's maximum page limit is unbounded or greater than this value, - /// the maximum will be `memory_pages` for the purpose of any `memory.grow` instruction. - /// - /// This value is used to control the maximum accessible space for each linear memory of an instance. - /// - /// The reservation size of each linear memory is controlled by the - /// `static_memory_maximum_size` setting and this value cannot - /// exceed the configured static memory maximum size. + /// Maximum number of wasm pages for each linear memory. pub memory_pages: u64, } impl Default for InstanceLimits { fn default() -> Self { - // See doc comments for `wasmtime::InstanceLimits` for these default values + // See doc comments for `wasmtime::PoolingAllocationConfig` for these + // default values Self { count: 1000, size: 1 << 20, // 1 MB @@ -209,16 +129,12 @@ struct InstancePool { } impl InstancePool { - fn new( - strategy: PoolingAllocationStrategy, - instance_limits: &InstanceLimits, - tunables: &Tunables, - ) -> Result { + fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result { let page_size = crate::page_size(); - let instance_size = round_up_to_pow2(instance_limits.size, mem::align_of::()); + let instance_size = round_up_to_pow2(config.limits.size, mem::align_of::()); - let max_instances = instance_limits.count as usize; + let max_instances = config.limits.count as usize; let allocation_size = round_up_to_pow2( instance_size @@ -234,9 +150,12 @@ impl InstancePool { mapping, instance_size, max_instances, - index_allocator: Mutex::new(PoolingAllocationState::new(strategy, max_instances)), - memories: MemoryPool::new(instance_limits, tunables)?, - tables: TablePool::new(instance_limits)?, + index_allocator: Mutex::new(PoolingAllocationState::new( + config.strategy, + max_instances, + )), + memories: MemoryPool::new(&config.limits, tunables)?, + tables: TablePool::new(&config.limits)?, }; Ok(pool) @@ -892,25 +811,21 @@ struct StackPool { #[cfg(all(feature = "async", unix))] impl StackPool { - fn new( - instance_limits: &InstanceLimits, - stack_size: usize, - async_stack_zeroing: bool, - ) -> Result { + fn new(config: &PoolingInstanceAllocatorConfig) -> Result { use rustix::mm::{mprotect, MprotectFlags}; let page_size = crate::page_size(); // Add a page to the stack size for the guard page when using fiber stacks - let stack_size = if stack_size == 0 { + let stack_size = if config.stack_size == 0 { 0 } else { - round_up_to_pow2(stack_size, page_size) + round_up_to_pow2(config.stack_size, page_size) .checked_add(page_size) .ok_or_else(|| anyhow!("stack size exceeds addressable memory"))? }; - let max_instances = instance_limits.count as usize; + let max_instances = config.limits.count as usize; let allocation_size = stack_size .checked_mul(max_instances) @@ -936,7 +851,7 @@ impl StackPool { stack_size, max_instances, page_size, - async_stack_zeroing, + async_stack_zeroing: config.async_stack_zeroing, // We always use a `NextAvailable` strategy for stack // allocation. We don't want or need an affinity policy // here: stacks do not benefit from being allocated to the @@ -1011,6 +926,33 @@ impl StackPool { } } +/// Configuration options for the pooling instance allocator supplied at +/// construction. +#[derive(Copy, Clone, Debug)] +pub struct PoolingInstanceAllocatorConfig { + /// Allocation strategy to use for slot indexes in the pooling instance + /// allocator. + pub strategy: PoolingAllocationStrategy, + /// The size, in bytes, of async stacks to allocate (not including the guard + /// page). + pub stack_size: usize, + /// The limits to apply to instances allocated within this allocator. + pub limits: InstanceLimits, + /// Whether or not async stacks are zeroed after use. + pub async_stack_zeroing: bool, +} + +impl Default for PoolingInstanceAllocatorConfig { + fn default() -> PoolingInstanceAllocatorConfig { + PoolingInstanceAllocatorConfig { + strategy: Default::default(), + stack_size: 2 << 20, + limits: InstanceLimits::default(), + async_stack_zeroing: false, + } + } +} + /// Implements the pooling instance allocator. /// /// This allocator internally maintains pools of instances, memories, tables, and stacks. @@ -1027,28 +969,19 @@ pub struct PoolingInstanceAllocator { impl PoolingInstanceAllocator { /// Creates a new pooling instance allocator with the given strategy and limits. - pub fn new( - strategy: PoolingAllocationStrategy, - instance_limits: InstanceLimits, - stack_size: usize, - tunables: &Tunables, - async_stack_zeroing: bool, - ) -> Result { - if instance_limits.count == 0 { + pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result { + if config.limits.count == 0 { bail!("the instance count limit cannot be zero"); } - let instances = InstancePool::new(strategy, &instance_limits, tunables)?; - - drop(stack_size); // suppress unused warnings w/o async feature - drop(async_stack_zeroing); // suppress unused warnings w/o async feature + let instances = InstancePool::new(config, tunables)?; Ok(Self { instances: instances, #[cfg(all(feature = "async", unix))] - stacks: StackPool::new(&instance_limits, stack_size, async_stack_zeroing)?, + stacks: StackPool::new(config)?, #[cfg(all(feature = "async", windows))] - stack_size, + stack_size: config.stack_size, }) } } @@ -1173,7 +1106,9 @@ mod test { #[cfg(target_pointer_width = "64")] #[test] fn test_instance_pool() -> Result<()> { - let instance_limits = InstanceLimits { + let mut config = PoolingInstanceAllocatorConfig::default(); + config.strategy = PoolingAllocationStrategy::NextAvailable; + config.limits = InstanceLimits { count: 3, tables: 1, memories: 1, @@ -1184,8 +1119,7 @@ mod test { }; let instances = InstancePool::new( - PoolingAllocationStrategy::NextAvailable, - &instance_limits, + &config, &Tunables { static_memory_bound: 1, ..Tunables::default() @@ -1336,14 +1270,16 @@ mod test { #[cfg(all(unix, target_pointer_width = "64", feature = "async"))] #[test] fn test_stack_pool() -> Result<()> { - let pool = StackPool::new( - &InstanceLimits { + let config = PoolingInstanceAllocatorConfig { + limits: InstanceLimits { count: 10, ..Default::default() }, - 1, - true, - )?; + stack_size: 1, + async_stack_zeroing: true, + ..PoolingInstanceAllocatorConfig::default() + }; + let pool = StackPool::new(&config)?; let native_page_size = crate::page_size(); assert_eq!(pool.stack_size, 2 * native_page_size); @@ -1410,39 +1346,38 @@ mod test { #[test] fn test_pooling_allocator_with_zero_instance_count() { + let config = PoolingInstanceAllocatorConfig { + limits: InstanceLimits { + count: 0, + ..Default::default() + }, + ..PoolingInstanceAllocatorConfig::default() + }; assert_eq!( - PoolingInstanceAllocator::new( - PoolingAllocationStrategy::Random, - InstanceLimits { - count: 0, - ..Default::default() - }, - 4096, - &Tunables::default(), - true, - ) - .map_err(|e| e.to_string()) - .expect_err("expected a failure constructing instance allocator"), + PoolingInstanceAllocator::new(&config, &Tunables::default(),) + .map_err(|e| e.to_string()) + .expect_err("expected a failure constructing instance allocator"), "the instance count limit cannot be zero" ); } #[test] fn test_pooling_allocator_with_memory_pages_exceeded() { + let config = PoolingInstanceAllocatorConfig { + limits: InstanceLimits { + count: 1, + memory_pages: 0x10001, + ..Default::default() + }, + ..PoolingInstanceAllocatorConfig::default() + }; assert_eq!( PoolingInstanceAllocator::new( - PoolingAllocationStrategy::Random, - InstanceLimits { - count: 1, - memory_pages: 0x10001, - ..Default::default() - }, - 4096, + &config, &Tunables { static_memory_bound: 1, ..Tunables::default() }, - true, ) .map_err(|e| e.to_string()) .expect_err("expected a failure constructing instance allocator"), @@ -1452,21 +1387,22 @@ mod test { #[test] fn test_pooling_allocator_with_reservation_size_exceeded() { + let config = PoolingInstanceAllocatorConfig { + limits: InstanceLimits { + count: 1, + memory_pages: 2, + ..Default::default() + }, + ..PoolingInstanceAllocatorConfig::default() + }; assert_eq!( PoolingInstanceAllocator::new( - PoolingAllocationStrategy::Random, - InstanceLimits { - count: 1, - memory_pages: 2, - ..Default::default() - }, - 4096, + &config, &Tunables { static_memory_bound: 1, static_memory_offset_guard_size: 0, ..Tunables::default() }, - true ) .map_err(|e| e.to_string()) .expect_err("expected a failure constructing instance allocator"), @@ -1477,9 +1413,9 @@ mod test { #[cfg(all(unix, target_pointer_width = "64", feature = "async"))] #[test] fn test_stack_zeroed() -> Result<()> { - let allocator = PoolingInstanceAllocator::new( - PoolingAllocationStrategy::NextAvailable, - InstanceLimits { + let config = PoolingInstanceAllocatorConfig { + strategy: PoolingAllocationStrategy::NextAvailable, + limits: InstanceLimits { count: 1, table_elements: 0, memory_pages: 0, @@ -1487,10 +1423,11 @@ mod test { memories: 0, ..Default::default() }, - 128, - &Tunables::default(), - true, - )?; + stack_size: 128, + async_stack_zeroing: true, + ..PoolingInstanceAllocatorConfig::default() + }; + let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default())?; unsafe { for _ in 0..255 { @@ -1512,9 +1449,9 @@ mod test { #[cfg(all(unix, target_pointer_width = "64", feature = "async"))] #[test] fn test_stack_unzeroed() -> Result<()> { - let allocator = PoolingInstanceAllocator::new( - PoolingAllocationStrategy::NextAvailable, - InstanceLimits { + let config = PoolingInstanceAllocatorConfig { + strategy: PoolingAllocationStrategy::NextAvailable, + limits: InstanceLimits { count: 1, table_elements: 0, memory_pages: 0, @@ -1522,10 +1459,11 @@ mod test { memories: 0, ..Default::default() }, - 128, - &Tunables::default(), - false, - )?; + stack_size: 128, + async_stack_zeroing: false, + ..PoolingInstanceAllocatorConfig::default() + }; + let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default())?; unsafe { for i in 0..255 { diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index 87e485dfdce4..53d1720ef05e 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -57,7 +57,10 @@ pub use crate::instance::{ InstantiationError, LinkError, OnDemandInstanceAllocator, StorePtr, }; #[cfg(feature = "pooling-allocator")] -pub use crate::instance::{InstanceLimits, PoolingAllocationStrategy, PoolingInstanceAllocator}; +pub use crate::instance::{ + InstanceLimits, PoolingAllocationStrategy, PoolingInstanceAllocator, + PoolingInstanceAllocatorConfig, +}; pub use crate::memory::{ DefaultMemoryCreator, Memory, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, }; diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index b29fe2322683..836f942c9665 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -16,9 +16,6 @@ use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemo pub use wasmtime_environ::CacheStore; -#[cfg(feature = "pooling-allocator")] -pub use wasmtime_runtime::{InstanceLimits, PoolingAllocationStrategy}; - /// Represents the module instance allocation strategy to use. #[derive(Clone)] pub enum InstanceAllocationStrategy { @@ -35,22 +32,14 @@ pub enum InstanceAllocationStrategy { /// from the pool. Resources are returned to the pool when the `Store` referencing the instance /// is dropped. #[cfg(feature = "pooling-allocator")] - Pooling { - /// The allocation strategy to use. - strategy: PoolingAllocationStrategy, - /// The instance limits to use. - instance_limits: InstanceLimits, - }, + Pooling(PoolingAllocationConfig), } impl InstanceAllocationStrategy { /// The default pooling instance allocation strategy. #[cfg(feature = "pooling-allocator")] pub fn pooling() -> Self { - Self::Pooling { - strategy: PoolingAllocationStrategy::default(), - instance_limits: InstanceLimits::default(), - } + Self::Pooling(Default::default()) } } @@ -109,7 +98,6 @@ pub struct Config { pub(crate) memory_init_cow: bool, pub(crate) memory_guaranteed_dense_image_size: u64, pub(crate) force_memory_init_memfd: bool, - pub(crate) async_stack_zeroing: bool, } /// User-provided configuration for the compiler. @@ -197,7 +185,6 @@ impl Config { memory_init_cow: true, memory_guaranteed_dense_image_size: 16 << 20, force_memory_init_memfd: false, - async_stack_zeroing: false, }; #[cfg(compiler)] { @@ -343,35 +330,6 @@ impl Config { self } - /// Configures whether or not stacks used for async futures are reset to - /// zero after usage. - /// - /// When the [`async_support`](Config::async_support) method is enabled for - /// Wasmtime and the [`call_async`] variant - /// of calling WebAssembly is used then Wasmtime will create a separate - /// runtime execution stack for each future produced by [`call_async`]. - /// When using the pooling instance allocator - /// ([`InstanceAllocationStrategy::Pooling`]) this allocation will happen - /// from a pool of stacks and additionally deallocation will simply release - /// the stack back to the pool. During the deallocation process Wasmtime - /// won't by default reset the contents of the stack back to zero. - /// - /// When this option is enabled it can be seen as a defense-in-depth - /// mechanism to reset a stack back to zero. This is not required for - /// correctness and can be a costly operation in highly concurrent - /// environments due to modifications of the virtual address space requiring - /// process-wide synchronization. - /// - /// This option defaults to `false`. - /// - /// [`call_async`]: crate::TypedFunc::call_async - #[cfg(feature = "async")] - #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))] - pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self { - self.async_stack_zeroing = enable; - self - } - /// Configures whether DWARF debug information will be emitted during /// compilation. /// @@ -1488,22 +1446,20 @@ impl Config { #[cfg(not(feature = "async"))] let stack_size = 0; - match self.allocation_strategy { + match &self.allocation_strategy { InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( self.mem_creator.clone(), stack_size, ))), #[cfg(feature = "pooling-allocator")] - InstanceAllocationStrategy::Pooling { - strategy, - instance_limits, - } => Ok(Box::new(wasmtime_runtime::PoolingInstanceAllocator::new( - strategy, - instance_limits, - stack_size, - &self.tunables, - self.async_stack_zeroing, - )?)), + InstanceAllocationStrategy::Pooling(config) => { + let mut config = config.config; + config.stack_size = stack_size; + Ok(Box::new(wasmtime_runtime::PoolingInstanceAllocator::new( + &config, + &self.tunables, + )?)) + } } } @@ -1707,3 +1663,184 @@ pub enum WasmBacktraceDetails { /// `WASMTIME_BACKTRACE_DETAILS` environment variable. Environment, } + +/// Global configuration options used to create an [`Engine`](crate::Engine) +/// and customize its behavior. +/// +/// This structure exposed a builder-like interface and is primarily consumed by +/// [`Engine::new()`](crate::Engine::new). +/// +/// The validation of `Config` is deferred until the engine is being built, thus +/// a problematic config may cause `Engine::new` to fail. +#[cfg(feature = "pooling-allocator")] +#[derive(Debug, Clone, Default)] +pub struct PoolingAllocationConfig { + config: wasmtime_runtime::PoolingInstanceAllocatorConfig, +} + +#[cfg(feature = "pooling-allocator")] +pub use wasmtime_runtime::PoolingAllocationStrategy; + +#[cfg(feature = "pooling-allocator")] +impl PoolingAllocationConfig { + /// Configures the method by which slots in the pooling allocator are + /// allocated to instances + /// + /// This defaults to [`PoolingAllocationStrategy::ReuseAffinity`] when the + /// `memory-init-cow` feature of Wasmtime is enabled, which is enabled by + /// default. Otherwise it defaults to + /// [`PoolingAllocationStrategy::NextAvailable`] Otherwise it defaults to + /// [`PoolingAllocationStrategy::NextAvailable`]. + pub fn strategy(&mut self, strategy: PoolingAllocationStrategy) -> &mut Self { + self.config.strategy = strategy; + self + } + + /// Configures whether or not stacks used for async futures are reset to + /// zero after usage. + /// + /// When the [`async_support`](Config::async_support) method is enabled for + /// Wasmtime and the [`call_async`] variant + /// of calling WebAssembly is used then Wasmtime will create a separate + /// runtime execution stack for each future produced by [`call_async`]. + /// When using the pooling instance allocator + /// ([`InstanceAllocationStrategy::Pooling`]) this allocation will happen + /// from a pool of stacks and additionally deallocation will simply release + /// the stack back to the pool. During the deallocation process Wasmtime + /// won't by default reset the contents of the stack back to zero. + /// + /// When this option is enabled it can be seen as a defense-in-depth + /// mechanism to reset a stack back to zero. This is not required for + /// correctness and can be a costly operation in highly concurrent + /// environments due to modifications of the virtual address space requiring + /// process-wide synchronization. + /// + /// This option defaults to `false`. + /// + /// [`call_async`]: crate::TypedFunc::call_async + #[cfg(feature = "async")] + #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))] + pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self { + self.config.async_stack_zeroing = enable; + self + } + + /// The maximum number of concurrent instances supported (default is 1000). + /// + /// This value has a direct impact on the amount of memory allocated by the pooling + /// instance allocator. + /// + /// The pooling instance allocator allocates three memory pools with sizes depending on this value: + /// + /// * An instance pool, where each entry in the pool can store the runtime representation + /// of an instance, including a maximal `VMContext` structure. + /// + /// * A memory pool, where each entry in the pool contains the reserved address space for each + /// linear memory supported by an instance. + /// + /// * A table pool, where each entry in the pool contains the space needed for each WebAssembly table + /// supported by an instance (see `table_elements` to control the size of each table). + /// + /// Additionally, this value will also control the maximum number of execution stacks allowed for + /// asynchronous execution (one per instance), when enabled. + /// + /// The memory pool will reserve a large quantity of host process address space to elide the bounds + /// checks required for correct WebAssembly memory semantics. Even for 64-bit address spaces, the + /// address space is limited when dealing with a large number of supported instances. + /// + /// For example, on Linux x86_64, the userland address space limit is 128 TiB. That might seem like a lot, + /// but each linear memory will *reserve* 6 GiB of space by default. Multiply that by the number of linear + /// memories each instance supports and then by the number of supported instances and it becomes apparent + /// that address space can be exhausted depending on the number of supported instances. + pub fn instance_count(&mut self, count: u32) -> &mut Self { + self.config.limits.count = count; + self + } + + /// The maximum size, in bytes, allocated for an instance and its + /// `VMContext`. + /// + /// This amount of space is pre-allocated for `count` number of instances + /// and is used to store the runtime `wasmtime_runtime::Instance` structure + /// along with its adjacent `VMContext` structure. The `Instance` type has a + /// static size but `VMContext` is dynamically sized depending on the module + /// being instantiated. This size limit loosely correlates to the size of + /// the wasm module, taking into account factors such as: + /// + /// * number of functions + /// * number of globals + /// * number of memories + /// * number of tables + /// * number of function types + /// + /// If the allocated size per instance is too small then instantiation of a + /// module will fail at runtime with an error indicating how many bytes were + /// needed. This amount of bytes are committed to memory per-instance when + /// a pooling allocator is created. + /// + /// The default value for this is 1MB. + pub fn instance_size(&mut self, size: usize) -> &mut Self { + self.config.limits.size = size; + self + } + + /// The maximum number of defined tables for a module (default is 1). + /// + /// This value controls the capacity of the `VMTableDefinition` table in each instance's + /// `VMContext` structure. + /// + /// The allocated size of the table will be `tables * sizeof(VMTableDefinition)` for each + /// instance regardless of how many tables are defined by an instance's module. + pub fn instance_tables(&mut self, tables: u32) -> &mut Self { + self.config.limits.tables = tables; + self + } + + /// The maximum table elements for any table defined in a module (default is 10000). + /// + /// If a table's minimum element limit is greater than this value, the module will + /// fail to instantiate. + /// + /// If a table's maximum element limit is unbounded or greater than this value, + /// the maximum will be `table_elements` for the purpose of any `table.grow` instruction. + /// + /// This value is used to reserve the maximum space for each supported table; table elements + /// are pointer-sized in the Wasmtime runtime. Therefore, the space reserved for each instance + /// is `tables * table_elements * sizeof::<*const ()>`. + pub fn instance_table_elements(&mut self, elements: u32) -> &mut Self { + self.config.limits.table_elements = elements; + self + } + + /// The maximum number of defined linear memories for a module (default is 1). + /// + /// This value controls the capacity of the `VMMemoryDefinition` table in each instance's + /// `VMContext` structure. + /// + /// The allocated size of the table will be `memories * sizeof(VMMemoryDefinition)` for each + /// instance regardless of how many memories are defined by an instance's module. + pub fn instance_memories(&mut self, memories: u32) -> &mut Self { + self.config.limits.memories = memories; + self + } + + /// The maximum number of pages for any linear memory defined in a module (default is 160). + /// + /// The default of 160 means at most 10 MiB of host memory may be committed for each instance. + /// + /// If a memory's minimum page limit is greater than this value, the module will + /// fail to instantiate. + /// + /// If a memory's maximum page limit is unbounded or greater than this value, + /// the maximum will be `memory_pages` for the purpose of any `memory.grow` instruction. + /// + /// This value is used to control the maximum accessible space for each linear memory of an instance. + /// + /// The reservation size of each linear memory is controlled by the + /// `static_memory_maximum_size` setting and this value cannot + /// exceed the configured static memory maximum size. + pub fn instance_memory_pages(&mut self, pages: u64) -> &mut Self { + self.config.limits.memory_pages = pages; + self + } +} diff --git a/fuzz/fuzz_targets/instantiate-many.rs b/fuzz/fuzz_targets/instantiate-many.rs index ad3cd1d6a0d8..9245071cad02 100644 --- a/fuzz/fuzz_targets/instantiate-many.rs +++ b/fuzz/fuzz_targets/instantiate-many.rs @@ -41,9 +41,7 @@ fn execute_one(data: &[u8]) -> Result<()> { let max_instances = match &config.wasmtime.strategy { generators::InstanceAllocationStrategy::OnDemand => u.int_in_range(1..=100)?, - generators::InstanceAllocationStrategy::Pooling { - instance_limits, .. - } => instance_limits.count, + generators::InstanceAllocationStrategy::Pooling(config) => config.instance_count, }; // Front-load with instantiation commands diff --git a/tests/all/async_functions.rs b/tests/all/async_functions.rs index ee57764fa019..d2b0d03cbfdf 100644 --- a/tests/all/async_functions.rs +++ b/tests/all/async_functions.rs @@ -354,17 +354,13 @@ async fn fuel_eventually_finishes() { #[tokio::test] async fn async_with_pooling_stacks() { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(0); let mut config = Config::new(); config.async_support(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: 0, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -383,17 +379,14 @@ async fn async_with_pooling_stacks() { #[tokio::test] async fn async_host_func_with_pooling_stacks() -> Result<()> { + let mut pooling = PoolingAllocationConfig::default(); + pooling + .instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(0); let mut config = Config::new(); config.async_support(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: 0, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pooling)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); diff --git a/tests/all/instance.rs b/tests/all/instance.rs index f51514c97b22..e37dd95a9320 100644 --- a/tests/all/instance.rs +++ b/tests/all/instance.rs @@ -40,14 +40,10 @@ fn linear_memory_limits() -> Result<()> { return Ok(()); } test(&Engine::default())?; + let mut pool = PoolingAllocationConfig::default(); + pool.instance_memory_pages(65536); test(&Engine::new(Config::new().allocation_strategy( - InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - memory_pages: 65536, - ..Default::default() - }, - }, + InstanceAllocationStrategy::Pooling(pool), ))?)?; return Ok(()); diff --git a/tests/all/limits.rs b/tests/all/limits.rs index 49224ef857a3..d1b013cb14dd 100644 --- a/tests/all/limits.rs +++ b/tests/all/limits.rs @@ -350,16 +350,11 @@ fn test_initial_table_limits_exceeded() -> Result<()> { #[test] fn test_pooling_allocator_initial_limits_exceeded() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1).instance_memories(2); let mut config = Config::new(); config.wasm_multi_memory(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memories: 2, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; let module = Module::new( @@ -721,15 +716,10 @@ fn custom_limiter_detect_grow_failure() -> Result<()> { if std::env::var("WASMTIME_TEST_NO_HOG_MEMORY").is_ok() { return Ok(()); } + let mut pool = PoolingAllocationConfig::default(); + pool.instance_memory_pages(10).instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - memory_pages: 10, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config).unwrap(); let linker = Linker::new(&engine); @@ -829,16 +819,11 @@ async fn custom_limiter_async_detect_grow_failure() -> Result<()> { if std::env::var("WASMTIME_TEST_NO_HOG_MEMORY").is_ok() { return Ok(()); } + let mut pool = PoolingAllocationConfig::default(); + pool.instance_memory_pages(10).instance_table_elements(10); let mut config = Config::new(); config.async_support(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - memory_pages: 10, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config).unwrap(); let linker = Linker::new(&engine); diff --git a/tests/all/memory.rs b/tests/all/memory.rs index 50b5e4a3471f..2005479f0cbc 100644 --- a/tests/all/memory.rs +++ b/tests/all/memory.rs @@ -186,19 +186,14 @@ fn guards_present() -> Result<()> { fn guards_present_pooling() -> Result<()> { const GUARD_SIZE: u64 = 65536; + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(2).instance_memory_pages(10); let mut config = Config::new(); config.static_memory_maximum_size(1 << 20); config.dynamic_memory_guard_size(GUARD_SIZE); config.static_memory_guard_size(GUARD_SIZE); config.guard_before_linear_memory(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::default(), - instance_limits: InstanceLimits { - count: 2, - memory_pages: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; let mut store = Store::new(&engine, ()); diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index 9b389cfd1c6c..bd3344c3b749 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -4,16 +4,12 @@ use wasmtime::*; #[test] fn successful_instantiation() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -30,16 +26,12 @@ fn successful_instantiation() -> Result<()> { #[test] fn memory_limit() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(3) + .instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 3, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(65536); config.static_memory_maximum_size(3 * 65536); @@ -109,16 +101,12 @@ fn memory_limit() -> Result<()> { #[test] fn memory_init() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(2) + .instance_table_elements(0); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 2, - table_elements: 0, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; @@ -142,16 +130,12 @@ fn memory_init() -> Result<()> { #[test] fn memory_guard_page_trap() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(2) + .instance_table_elements(0); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 2, - table_elements: 0, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; @@ -210,16 +194,12 @@ fn memory_zeroed() -> Result<()> { return Ok(()); } + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(0); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: 0, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -253,16 +233,12 @@ fn memory_zeroed() -> Result<()> { #[test] fn table_limit() -> Result<()> { const TABLE_ELEMENTS: u32 = 10; + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(TABLE_ELEMENTS); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: TABLE_ELEMENTS, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -340,16 +316,12 @@ fn table_limit() -> Result<()> { #[test] fn table_init() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(0) + .instance_table_elements(6); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 0, - table_elements: 6, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; @@ -390,16 +362,12 @@ fn table_zeroed() -> Result<()> { return Ok(()); } + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memory_pages(1) + .instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memory_pages: 1, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -434,16 +402,12 @@ fn table_zeroed() -> Result<()> { #[test] fn instantiation_limit() -> Result<()> { const INSTANCE_LIMIT: u32 = 10; + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(INSTANCE_LIMIT) + .instance_memory_pages(1) + .instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: INSTANCE_LIMIT, - memory_pages: 1, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.dynamic_memory_guard_size(0); config.static_memory_guard_size(0); config.static_memory_maximum_size(65536); @@ -484,16 +448,12 @@ fn instantiation_limit() -> Result<()> { #[test] fn preserve_data_segments() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(2) + .instance_memory_pages(1) + .instance_table_elements(10); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 2, - memory_pages: 1, - table_elements: 10, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; let m = Module::new( &engine, @@ -536,16 +496,12 @@ fn multi_memory_with_imported_memories() -> Result<()> { // This test checks that the base address for the defined memory is correct for the instance // despite the presence of an imported memory. + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1) + .instance_memories(2) + .instance_memory_pages(1); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - memories: 2, - memory_pages: 1, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); config.wasm_multi_memory(true); let engine = Engine::new(&config)?; @@ -581,15 +537,11 @@ fn drop_externref_global_during_module_init() -> Result<()> { } } + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(1); let mut config = Config::new(); config.wasm_reference_types(true); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 1, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; @@ -630,15 +582,10 @@ fn drop_externref_global_during_module_init() -> Result<()> { #[test] #[cfg(target_pointer_width = "64")] fn instance_too_large() -> Result<()> { + let mut pool = PoolingAllocationConfig::default(); + pool.instance_size(16).instance_count(1); let mut config = Config::new(); - config.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - size: 16, - count: 1, - ..Default::default() - }, - }); + config.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); let engine = Engine::new(&config)?; let expected = "\ diff --git a/tests/all/wast.rs b/tests/all/wast.rs index 280c68779648..27ea94b91a89 100644 --- a/tests/all/wast.rs +++ b/tests/all/wast.rs @@ -2,8 +2,7 @@ use once_cell::sync::Lazy; use std::path::Path; use std::sync::{Condvar, Mutex}; use wasmtime::{ - Config, Engine, InstanceAllocationStrategy, InstanceLimits, PoolingAllocationStrategy, Store, - Strategy, + Config, Engine, InstanceAllocationStrategy, PoolingAllocationConfig, Store, Strategy, }; use wasmtime_wast::WastContext; @@ -79,16 +78,12 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()> // However, these limits may become insufficient in the future as the wast tests change. // If a wast test fails because of a limit being "exceeded" or if memory/table // fails to grow, the values here will need to be adjusted. - cfg.allocation_strategy(InstanceAllocationStrategy::Pooling { - strategy: PoolingAllocationStrategy::NextAvailable, - instance_limits: InstanceLimits { - count: 450, - memories: 2, - tables: 4, - memory_pages: 805, - ..Default::default() - }, - }); + let mut pool = PoolingAllocationConfig::default(); + pool.instance_count(450) + .instance_memories(2) + .instance_tables(4) + .instance_memory_pages(805); + cfg.allocation_strategy(InstanceAllocationStrategy::Pooling(pool)); Some(lock_pooling()) } else { None