Skip to content

Commit

Permalink
partly illustrate idea: aggregate madvise by doing it lazy
Browse files Browse the repository at this point in the history
  • Loading branch information
ihciah committed Aug 10, 2022
1 parent 63c2d1e commit 071f5d9
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 3 deletions.
21 changes: 21 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions crates/runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ rand = "0.8.3"
anyhow = "1.0.38"
memfd = { version = "0.6.1", optional = true }
paste = "1.0.3"
priority-queue = "1.2.3"
slab = "0.4.7"
encoding_rs = { version = "0.8.31", optional = true }

[target.'cfg(target_os = "macos")'.dependencies]
Expand Down
6 changes: 5 additions & 1 deletion crates/runtime/src/instance/allocator/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ use wasmtime_environ::{
mod index_allocator;
use index_allocator::{PoolingAllocationState, SlotId};

mod lazy_pool;

cfg_if::cfg_if! {
if #[cfg(windows)] {
mod windows;
Expand Down Expand Up @@ -180,6 +182,8 @@ pub enum PoolingAllocationStrategy {
/// the same module, potentially enabling faster instantiation by
/// reusing e.g. memory mappings.
ReuseAffinity,
/// Lazy clean
LazyClean,
}

impl Default for PoolingAllocationStrategy {
Expand Down Expand Up @@ -937,7 +941,7 @@ impl StackPool {
// same compiled module with the same image (they always
// start zeroed just the same for everyone).
index_allocator: Mutex::new(PoolingAllocationState::new(
PoolingAllocationStrategy::NextAvailable,
PoolingAllocationStrategy::LazyClean,
max_instances,
)),
})
Expand Down
11 changes: 9 additions & 2 deletions crates/runtime/src/instance/allocator/pooling/index_allocator.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
//! Index/slot allocator policies for the pooling allocator.

use super::PoolingAllocationStrategy;
use super::{lazy_pool::LazyPool, PoolingAllocationStrategy};
use crate::CompiledModuleId;
use rand::Rng;
use std::collections::HashMap;

/// A slot index. The job of this allocator is to hand out these
/// indices.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct SlotId(pub usize);
impl SlotId {
/// The index of this slot.
Expand Down Expand Up @@ -75,6 +75,7 @@ pub(crate) enum PoolingAllocationState {
/// indices are kept up-to-date to allow fast removal.
slot_state: Vec<SlotState>,
},
LazyClean(LazyPool),
}

#[derive(Clone, Debug)]
Expand Down Expand Up @@ -264,6 +265,9 @@ impl PoolingAllocationState {
})
.collect(),
},
PoolingAllocationStrategy::LazyClean => {
PoolingAllocationState::LazyClean(LazyPool::new(ids, max_instances))
}
}
}

Expand All @@ -273,6 +277,7 @@ impl PoolingAllocationState {
&PoolingAllocationState::NextAvailable(ref free_list)
| &PoolingAllocationState::Random(ref free_list) => free_list.is_empty(),
&PoolingAllocationState::ReuseAffinity { ref free_list, .. } => free_list.is_empty(),
&PoolingAllocationState::LazyClean(ref lazy) => lazy.is_empty(),
}
}

Expand Down Expand Up @@ -348,6 +353,7 @@ impl PoolingAllocationState {
slot_id
}
}
&mut PoolingAllocationState::LazyClean(ref mut lazy) => lazy.alloc(),
}
}

Expand Down Expand Up @@ -382,6 +388,7 @@ impl PoolingAllocationState {
SlotState::Free(FreeSlotState::NoAffinity { free_list_index });
}
}
&mut PoolingAllocationState::LazyClean(ref mut lazy) => lazy.free(index),
}
}

Expand Down
127 changes: 127 additions & 0 deletions crates/runtime/src/instance/allocator/pooling/lazy_pool.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
use std::collections::HashMap;

use priority_queue::PriorityQueue;
use slab::Slab;

use super::index_allocator::SlotId;

type Range = (SlotId, SlotId);

/// LazyPool maintains dirty ranges and clean slots.
/// To reduce madvise cost, we want to merge continues slots
/// and do madvise in batch.
#[derive(Clone, Debug)]
pub(crate) struct LazyPool {
max_instances: usize,
// slab id -> range
dirty_ranges_slab: Slab<Range>,
// begin -> slab id
dirty_begin_mapping: HashMap<SlotId, usize>,
// end -> slab id
dirty_end_mapping: HashMap<SlotId, usize>,
// slab id with priority len_hint
dirty_ranges: PriorityQueue<usize, usize>,
clean: Vec<SlotId>,
// TODO: more fields for calling decommit, for example, how to calcalate
// pointer and length.
}

impl LazyPool {
/// Create LazyPool with given clean slots.
pub(crate) fn new(ids: Vec<SlotId>, max_instances: usize) -> Self {
Self {
max_instances,
dirty_ranges_slab: Slab::with_capacity(max_instances),
dirty_begin_mapping: HashMap::new(),
dirty_end_mapping: HashMap::new(),
dirty_ranges: PriorityQueue::new(),
clean: ids,
}
}

/// Check if the LazyPool is empty.
pub(crate) fn is_empty(&self) -> bool {
self.clean.is_empty() && self.dirty_ranges.is_empty()
}

/// Alloc a clean slot id. Must make sure not empty.
pub(crate) fn alloc(&mut self) -> SlotId {
debug_assert!(!self.is_empty());

// try to alloc from clean directly
if let Some(id) = self.clean.pop() {
return id;
}
// get largest range
let (slab_id, _) = self.dirty_ranges.pop().unwrap();
let (left, right) = self.dirty_ranges_slab.remove(slab_id);
self.dirty_begin_mapping.remove(&left);
self.dirty_end_mapping.remove(&right);

// clean it with madvise
// todo!();

// put them to clean
let ret = left;
for id in left.0 + 1..=right.0 {
self.clean.push(SlotId(id));
}
ret
}

/// Free a slot id.
pub(crate) fn free(&mut self, index: SlotId) {
let (mut slab_left, mut slab_right) = (None, None);
// check prev and next
if index.0 > 0 {
let prev = SlotId(index.0 - 1);
slab_left = self.dirty_end_mapping.remove(&prev);
}

if index.0 + 1 < self.max_instances {
let next = SlotId(index.0 + 1);
slab_right = self.dirty_begin_mapping.remove(&next);
}

match (slab_left, slab_right) {
(None, None) => {
// unable to merge
let slab_id = self.dirty_ranges_slab.insert((index, index));
self.dirty_begin_mapping.insert(index, slab_id);
self.dirty_end_mapping.insert(index, slab_id);
self.dirty_ranges.push(slab_id, 1);
}
(Some(slab_id), None) => {
// merge with left
self.dirty_end_mapping.insert(index, slab_id);
let range = self.dirty_ranges_slab.get_mut(slab_id).unwrap();
range.1 = index;
let size = range.1 .0 - range.0 .0;
if size & 0x1111 == 0 {
self.dirty_ranges.change_priority(&slab_id, size);
}
}
(None, Some(slab_id)) => {
// merge with right
self.dirty_begin_mapping.insert(index, slab_id);
let range = self.dirty_ranges_slab.get_mut(slab_id).unwrap();
range.0 = index;
let size = range.1 .0 - range.0 .0;
if size & 0x1111 == 0 {
self.dirty_ranges.change_priority(&slab_id, size);
}
}
(Some(left_slab_id), Some(right_slab_id)) => {
// merge with left and right
let right_range = self.dirty_ranges_slab.remove(right_slab_id);
let range = self.dirty_ranges_slab.get_mut(left_slab_id).unwrap();
range.1 = right_range.1;
let size = range.1 .0 - range.0 .0;
if size & 0x1111 == 0 {
self.dirty_ranges.change_priority(&left_slab_id, size);
}
self.dirty_ranges.remove(&right_slab_id);
}
}
}
}

0 comments on commit 071f5d9

Please sign in to comment.