Skip to content

Commit

Permalink
Address review feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
fitzgen committed Aug 16, 2023
1 parent bb64a53 commit c4aa70e
Show file tree
Hide file tree
Showing 8 changed files with 120 additions and 86 deletions.
4 changes: 2 additions & 2 deletions crates/fuzzing/src/generators/pooling_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@ impl PoolingAllocationConfig {
cfg.memory_pages(self.memory_pages);
cfg.table_elements(self.table_elements);

cfg.component_instance_size(self.component_instance_size);
cfg.max_component_instance_size(self.component_instance_size);
cfg.max_memories_per_component(self.max_memories_per_component);
cfg.max_tables_per_component(self.max_tables_per_component);

cfg.core_instance_size(self.core_instance_size);
cfg.max_core_instance_size(self.core_instance_size);
cfg.max_memories_per_module(self.max_memories_per_module);
cfg.max_tables_per_module(self.max_tables_per_module);

Expand Down
43 changes: 30 additions & 13 deletions crates/runtime/src/instance/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,19 @@ pub unsafe trait InstanceAllocatorImpl {
/// Not all instance allocators will have limits for the maximum number of
/// concurrent component instances that can be live at the same time, and
/// these allocators may implement this method with a no-op.
//
// Note: It would be nice to have an associated type that on construction
// does the increment and on drop does the decrement but there are two
// problems with this:
//
// 1. This trait's implementations are always used as trait objects, and
// associated types are not object safe.
//
// 2. We would want a parameterized `Drop` implementation so that we could
// pass in the `InstaceAllocatorImpl` on drop, but this doesn't exist in
// Rust. Therefore, we would be forced to add reference counting and
// stuff like that to keep a handle on the instance allocator from this
// theoretical type. That's a bummer.
fn increment_component_instance_count(&self) -> Result<()>;

/// The dual of `increment_component_instance_count`.
Expand Down Expand Up @@ -308,23 +321,23 @@ pub trait InstanceAllocator: InstanceAllocatorImpl {
let num_defined_tables = module.table_plans.len() - module.num_imported_tables;
let mut tables = PrimaryMap::with_capacity(num_defined_tables);

let result = self
.allocate_memories(&mut request, &mut memories)
.and_then(|()| self.allocate_tables(&mut request, &mut tables));
if let Err(e) = result {
self.deallocate_memories(&mut memories);
self.deallocate_tables(&mut tables);
self.decrement_core_instance_count();
return Err(e);
}

unsafe {
Ok(Instance::new(
match (|| {
self.allocate_memories(&mut request, &mut memories)?;
self.allocate_tables(&mut request, &mut tables)?;
Ok(())
})() {
Ok(_) => Ok(Instance::new(
request,
memories,
tables,
&module.memory_plans,
))
)),
Err(e) => {
self.deallocate_memories(&mut memories);
self.deallocate_tables(&mut tables);
self.decrement_core_instance_count();
Err(e)
}
}
}

Expand Down Expand Up @@ -392,6 +405,10 @@ pub trait InstanceAllocator: InstanceAllocatorImpl {
memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
) {
for (memory_index, (allocation_index, memory)) in mem::take(memories) {
// Because deallocating memory is infallible, we don't need to worry
// about leaking subsequent memories if the first memory failed to
// deallocate. If deallocating memory ever becomes fallible, we will
// need to be careful here!
self.deallocate_memory(memory_index, allocation_index, memory);
}
}
Expand Down
68 changes: 35 additions & 33 deletions crates/runtime/src/instance/allocator/pooling/index_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ impl SimpleIndexAllocator {
}
}

/// A particular defined memory within a particular module.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct MemoryInModule(pub CompiledModuleId, pub DefinedMemoryIndex);

/// An index allocator that has configurable affinity between slots and modules
/// so that slots are often reused for the same module again.
#[derive(Debug)]
Expand Down Expand Up @@ -86,7 +90,7 @@ struct Inner {
///
/// The `List` here is appended to during deallocation and removal happens
/// from the tail during allocation.
module_affine: HashMap<(CompiledModuleId, DefinedMemoryIndex), List>,
module_affine: HashMap<MemoryInModule, List>,
}

/// A helper "linked list" data structure which is based on indices.
Expand All @@ -107,7 +111,7 @@ struct Link {
#[derive(Clone, Debug)]
enum SlotState {
/// This slot is currently in use and is affine to the specified module's memory.
Used(Option<(CompiledModuleId, DefinedMemoryIndex)>),
Used(Option<MemoryInModule>),

/// This slot is not currently used, and has never been used.
UnusedCold,
Expand All @@ -131,7 +135,7 @@ impl SlotState {
#[derive(Default, Copy, Clone, Debug)]
struct Unused {
/// Which module this slot was historically affine to, if any.
affinity: Option<(CompiledModuleId, DefinedMemoryIndex)>,
affinity: Option<MemoryInModule>,

/// Metadata about the linked list for all slots affine to `affinity`.
affine_list_link: Link,
Expand Down Expand Up @@ -162,10 +166,7 @@ impl ModuleAffinityIndexAllocator {
/// affinity request if the allocation strategy supports it.
///
/// Returns `None` if no more slots are available.
pub fn alloc(
&self,
for_memory: Option<(CompiledModuleId, DefinedMemoryIndex)>,
) -> Option<SlotId> {
pub fn alloc(&self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
self._alloc(for_memory, AllocMode::AnySlot)
}

Expand All @@ -182,16 +183,12 @@ impl ModuleAffinityIndexAllocator {
memory_index: DefinedMemoryIndex,
) -> Option<SlotId> {
self._alloc(
Some((module_id, memory_index)),
Some(MemoryInModule(module_id, memory_index)),
AllocMode::ForceAffineAndClear,
)
}

fn _alloc(
&self,
for_memory: Option<(CompiledModuleId, DefinedMemoryIndex)>,
mode: AllocMode,
) -> Option<SlotId> {
fn _alloc(&self, for_memory: Option<MemoryInModule>, mode: AllocMode) -> Option<SlotId> {
let mut inner = self.0.lock().unwrap();
let inner = &mut *inner;

Expand Down Expand Up @@ -302,9 +299,7 @@ impl ModuleAffinityIndexAllocator {
/// For testing only, get the list of all modules with at least
/// one slot with affinity for that module.
#[cfg(test)]
pub(crate) fn testing_module_affinity_list(
&self,
) -> Vec<(CompiledModuleId, DefinedMemoryIndex)> {
pub(crate) fn testing_module_affinity_list(&self) -> Vec<MemoryInModule> {
let inner = self.0.lock().unwrap();
inner.module_affine.keys().copied().collect()
}
Expand All @@ -313,10 +308,7 @@ impl ModuleAffinityIndexAllocator {
impl Inner {
/// Attempts to allocate a slot already affine to `id`, returning `None` if
/// `id` is `None` or if there are no affine slots.
fn pick_affine(
&mut self,
for_memory: Option<(CompiledModuleId, DefinedMemoryIndex)>,
) -> Option<SlotId> {
fn pick_affine(&mut self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
// Note that the `tail` is chosen here of the affine list as it's the
// most recently used, which for affine allocations is what we want --
// maximizing temporal reuse.
Expand Down Expand Up @@ -477,8 +469,8 @@ mod test {
#[test]
fn test_affinity_allocation_strategy() {
let id_alloc = CompiledModuleIdAllocator::new();
let id1 = (id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = (id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id1 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let state = ModuleAffinityIndexAllocator::new(100, 100);

let index1 = state.alloc(Some(id1)).unwrap();
Expand Down Expand Up @@ -536,8 +528,8 @@ mod test {
for max_unused_warm_slots in [0, 1, 2] {
let state = ModuleAffinityIndexAllocator::new(100, max_unused_warm_slots);

let index1 = state.alloc(Some((id, memory_index))).unwrap();
let index2 = state.alloc(Some((id, memory_index))).unwrap();
let index1 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
let index2 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
state.free(index2);
state.free(index1);
assert!(state
Expand All @@ -559,9 +551,10 @@ mod test {
let mut rng = rand::thread_rng();

let id_alloc = CompiledModuleIdAllocator::new();
let ids = std::iter::repeat_with(|| (id_alloc.alloc(), DefinedMemoryIndex::new(0)))
.take(10)
.collect::<Vec<_>>();
let ids =
std::iter::repeat_with(|| MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0)))
.take(10)
.collect::<Vec<_>>();
let state = ModuleAffinityIndexAllocator::new(1000, 1000);
let mut allocated: Vec<SlotId> = vec![];
let mut last_id = vec![None; 1000];
Expand Down Expand Up @@ -603,9 +596,9 @@ mod test {
#[test]
fn test_affinity_threshold() {
let id_alloc = CompiledModuleIdAllocator::new();
let id1 = (id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = (id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id3 = (id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id1 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id2 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let id3 = MemoryInModule(id_alloc.alloc(), DefinedMemoryIndex::new(0));
let state = ModuleAffinityIndexAllocator::new(10, 2);

// Set some slot affinities
Expand Down Expand Up @@ -640,19 +633,28 @@ mod test {

// LRU is 1, so that should be picked
assert_eq!(
state.alloc(Some((id_alloc.alloc(), DefinedMemoryIndex::new(0)))),
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(1))
);

// Pick another LRU entry, this time 2
assert_eq!(
state.alloc(Some((id_alloc.alloc(), DefinedMemoryIndex::new(0)))),
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(2))
);

// This should preserve slot `0` and pick up something new
assert_eq!(
state.alloc(Some((id_alloc.alloc(), DefinedMemoryIndex::new(0)))),
state.alloc(Some(MemoryInModule(
id_alloc.alloc(),
DefinedMemoryIndex::new(0)
))),
Some(SlotId(3))
);

Expand Down
28 changes: 19 additions & 9 deletions crates/runtime/src/instance/allocator/pooling/memory_pool.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use super::{
index_allocator::{ModuleAffinityIndexAllocator, SlotId},
index_allocator::{MemoryInModule, ModuleAffinityIndexAllocator, SlotId},
MemoryAllocationIndex,
};
use crate::{
Expand All @@ -17,9 +17,6 @@ use wasmtime_environ::{
///
/// A linear memory is divided into accessible pages and guard pages.
///
/// Each instance index into the pool returns an iterator over the base
/// addresses of the instance's linear memories.
///
/// A diagram for this struct's fields is:
///
/// ```ignore
Expand All @@ -35,7 +32,7 @@ use wasmtime_environ::{
/// +-----------+--------+---+-----------+ +--------+---+-----------+
/// | |<------------------+---------------------------------->
/// \ | \
/// mapping | `max_instances * max_memories` memories
/// mapping | `max_total_memories` memories
/// /
/// initial_memory_offset
/// ```
Expand Down Expand Up @@ -65,6 +62,8 @@ pub struct MemoryPool {
max_total_memories: usize,
// The maximum number of memories that a single core module instance may
// use.
//
// NB: this is needed for validation but does not affect the pool's size.
memories_per_instance: usize,
// How much linear memory, in bytes, to keep resident after resetting for
// use with the next instance. This much memory will be `memset` to zero
Expand Down Expand Up @@ -114,9 +113,8 @@ impl MemoryPool {
0
};

// The entire allocation here is the size of each memory times the
// max memories per instance times the number of instances allowed in
// this pool, plus guard regions.
// The entire allocation here is the size of each memory (with guard
// regions) times the total number of memories in the pool.
//
// Note, though, that guard regions are required to be after each linear
// memory. If the `guard_before_linear_memory` setting is specified,
Expand Down Expand Up @@ -214,7 +212,7 @@ impl MemoryPool {
request
.runtime_info
.unique_id()
.map(|id| (id, memory_index)),
.map(|id| MemoryInModule(id, memory_index)),
)
.map(|slot| MemoryAllocationIndex(u32::try_from(slot.index()).unwrap()))
.ok_or_else(|| {
Expand Down Expand Up @@ -302,6 +300,18 @@ impl MemoryPool {
// allocated further (the module is being dropped) so this shouldn't hit
// any sort of infinite loop since this should be the final operation
// working with `module`.
//
// TODO: We are given a module id, but key affinity by pair of module id
// and defined memory index. We are missing any defined memory index or
// count of how many memories the module defines here. Therefore, we
// probe up to the maximum number of memories per instance. This is fine
// because that maximum is generally relatively small. If this method
// somehow ever gets hot because of unnecessary probing, we should
// either pass in the actual number of defined memories for the given
// module to this method, or keep a side table of all slots that are
// associated with a module (not just module and memory). The latter
// would require care to make sure that its maintenance wouldn't be too
// expensive for normal allocation/free operations.
for i in 0..self.memories_per_instance {
use wasmtime_environ::EntityRef;
let memory_index = DefinedMemoryIndex::new(i);
Expand Down
16 changes: 8 additions & 8 deletions crates/wasmtime/src/component/component.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,14 @@ impl Component {
None => bincode::deserialize(code_memory.wasmtime_info())?,
};

// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;

// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
Expand All @@ -258,14 +266,6 @@ impl Component {
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));

// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;

// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
Expand Down
Loading

0 comments on commit c4aa70e

Please sign in to comment.