Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mpk: limit the number of protection keys #7364

Merged
merged 4 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions crates/runtime/src/instance/allocator/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,8 @@ pub struct PoolingInstanceAllocatorConfig {
pub table_keep_resident: usize,
/// Whether to enable memory protection keys.
pub memory_protection_keys: MpkEnabled,
/// How many memory protection keys to allocate.
pub max_memory_protection_keys: usize,
}

impl Default for PoolingInstanceAllocatorConfig {
Expand All @@ -232,6 +234,7 @@ impl Default for PoolingInstanceAllocatorConfig {
linear_memory_keep_resident: 0,
table_keep_resident: 0,
memory_protection_keys: MpkEnabled::Disable,
max_memory_protection_keys: 16,
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,14 +156,14 @@ impl MemoryPool {
let pkeys = match config.memory_protection_keys {
MpkEnabled::Auto => {
if mpk::is_supported() {
mpk::keys()
mpk::keys(config.max_memory_protection_keys)
} else {
&[]
}
}
MpkEnabled::Enable => {
if mpk::is_supported() {
mpk::keys()
mpk::keys(config.max_memory_protection_keys)
} else {
bail!("mpk is disabled on this system")
}
Expand Down
2 changes: 1 addition & 1 deletion crates/runtime/src/mpk/disabled.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use anyhow::Result;
pub fn is_supported() -> bool {
false
}
pub fn keys() -> &'static [ProtectionKey] {
pub fn keys(_: usize) -> &'static [ProtectionKey] {
&[]
}
pub fn allow(_: ProtectionMask) {}
Expand Down
39 changes: 22 additions & 17 deletions crates/runtime/src/mpk/enabled.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,34 +9,39 @@ pub fn is_supported() -> bool {
cfg!(target_os = "linux") && cfg!(target_arch = "x86_64") && pkru::has_cpuid_bit_set()
}

/// Allocate all protection keys available to this process.
/// Allocate up to `max` protection keys.
///
/// This asks the kernel for all available keys (we expect 1-15; 0 is
/// kernel-reserved) in a thread-safe way. This avoids interference when
/// This asks the kernel for all available keys up to `max` in a thread-safe way
/// (we can expect 1-15; 0 is kernel-reserved). This avoids interference when
/// multiple threads try to allocate keys at the same time (e.g., during
/// testing). It also ensures that a single copy of the keys are reserved for
/// the lifetime of the process.
/// testing). It also ensures that a single copy of the keys is reserved for the
/// lifetime of the process. Because of this, `max` is only a hint to
/// allocation: it only is effective on the first invocation of this function.
///
/// TODO: this is not the best-possible design. This creates global state that
/// would prevent any other code in the process from using protection keys; the
/// `KEYS` are never deallocated from the system with `pkey_dealloc`.
pub fn keys() -> &'static [ProtectionKey] {
pub fn keys(max: usize) -> &'static [ProtectionKey] {
abrown marked this conversation as resolved.
Show resolved Hide resolved
let keys = KEYS.get_or_init(|| {
let mut allocated = vec![];
if is_supported() {
while let Ok(key_id) = sys::pkey_alloc(0, 0) {
debug_assert!(key_id < 16);
// UNSAFETY: here we unsafely assume that the system-allocated
// pkey will exist forever.
allocated.push(ProtectionKey {
id: key_id,
stripe: allocated.len().try_into().unwrap(),
});
while allocated.len() < max {
if let Ok(key_id) = sys::pkey_alloc(0, 0) {
debug_assert!(key_id < 16);
// UNSAFETY: here we unsafely assume that the
// system-allocated pkey will exist forever.
allocated.push(ProtectionKey {
id: key_id,
stripe: allocated.len().try_into().unwrap(),
});
} else {
break;
}
}
}
allocated
});
&keys
&keys[..keys.len().min(max)]
}
static KEYS: OnceLock<Vec<ProtectionKey>> = OnceLock::new();

Expand Down Expand Up @@ -152,14 +157,14 @@ mod tests {
#[test]
fn check_initialized_keys() {
if is_supported() {
assert!(!keys().is_empty())
assert!(!keys(15).is_empty())
}
}

#[test]
fn check_invalid_mark() {
skip_if_mpk_unavailable!();
let pkey = keys()[0];
let pkey = keys(15)[0];
let unaligned_region = unsafe {
let addr = 1 as *mut u8; // this is not page-aligned!
let len = 1;
Expand Down
17 changes: 17 additions & 0 deletions crates/wasmtime/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2386,6 +2386,23 @@ impl PoolingAllocationConfig {
self
}

/// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
/// will use.
///
/// This setting is only applicable when
/// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
/// or `auto`. Configuring this above the HW and OS limits (typically 15)
/// has no effect.
///
/// If multiple Wasmtime engines are used in the same process, note that all
/// engines will share the same set of allocated keys; this setting will
/// limit how many keys are allocated initially and thus available to all
/// other engines.
pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
self.config.max_memory_protection_keys = max;
self
}

/// Check if memory protection keys (MPK) are available on the current host.
///
/// This is a convenience method for determining MPK availability using the
Expand Down
12 changes: 7 additions & 5 deletions tests/all/wast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,13 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
}

// The limits here are crafted such that the wast tests should pass.
// However, these limits may become insufficient in the future as the wast tests change.
// If a wast test fails because of a limit being "exceeded" or if memory/table
// fails to grow, the values here will need to be adjusted.
// However, these limits may become insufficient in the future as the
// wast tests change. If a wast test fails because of a limit being
// "exceeded" or if memory/table fails to grow, the values here will
// need to be adjusted.
let mut pool = PoolingAllocationConfig::default();
pool.total_memories(450)
pool.total_memories(450 * 2)
.max_memory_protection_keys(2)
.memory_pages(805)
.max_memories_per_module(if multi_memory { 9 } else { 1 })
.max_tables_per_module(4);
Expand Down Expand Up @@ -180,7 +182,7 @@ fn feature_found_src(bytes: &[u8], name: &str) -> bool {
// specified maximum we can put a cap on the virtual address space reservations
// made.
fn lock_pooling() -> impl Drop {
const MAX_CONCURRENT_POOLING: u32 = 8;
const MAX_CONCURRENT_POOLING: u32 = 4;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure about this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This makes some sense to me in that you doubled the number of memories which would halve the limit of concurrent tests because right now each test creates its own engine.


static ACTIVE: Lazy<MyState> = Lazy::new(MyState::default);

Expand Down