Skip to content

Commit

Permalink
Change stack walking to stop at a precise fp (#9420)
Browse files Browse the repository at this point in the history
* Change stack walking to stop at a precise fp

Prior to this commit entry trampolines into wasm would record their
stack pointer at the time of the function call to wasm and then this
stack pointer was used to halt the stack walking process. The problem
with this though is that due to the `tail` ABI it's possible that the
callee will update the caller's stack pointer temporarily. This means
that the recorded stack pointer at the time the trampoline called wasm
may differ from the callee's idea of what the stack pointer is when a
backtrace happens.

To handle this condition when stack walking the frame pointer instead of
the stack pointer is now recorded when wasm is invoked. This frame
pointer is a trusted value as it's managed by Cranelift itself. This
additionally enables the stop condition for frame walking to be a
precise "it must be this value" condition.

Put together this commit fixes an issue where when `return_call` is used
it's possible for the initial few frames of the stack to get lost in
stack traces. After this the frame pointer chain should always be
precisely walked in its entirety, even in the face of different numbers
of arguments and parameters as `return_call` instructions are executed.

* Add tail-calls, params, and results to stacks fuzzer

This commit extends the preexisting `stacks` fuzzer with a few new
features:

* `return_call` instructions are now generated
* functions may have both params/results to exercise logic around stack
  adjustments and how that might affect a stack trace
  • Loading branch information
alexcrichton authored Oct 9, 2024
1 parent 151c0f7 commit de469e2
Show file tree
Hide file tree
Showing 14 changed files with 236 additions and 175 deletions.
10 changes: 5 additions & 5 deletions crates/cranelift/src/compiler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ impl wasmtime_environ::Compiler for Compiler {
debug_assert_vmctx_kind(isa, &mut builder, vmctx, wasmtime_environ::VMCONTEXT_MAGIC);
let offsets = VMOffsets::new(isa.pointer_bytes(), &translation.module);
let vm_runtime_limits_offset = offsets.ptr.vmctx_runtime_limits();
save_last_wasm_entry_sp(
save_last_wasm_entry_fp(
&mut builder,
pointer_type,
&offsets.ptr,
Expand Down Expand Up @@ -969,7 +969,7 @@ fn debug_assert_vmctx_kind(
}
}

fn save_last_wasm_entry_sp(
fn save_last_wasm_entry_fp(
builder: &mut FunctionBuilder,
pointer_type: ir::Type,
ptr_size: &impl PtrSize,
Expand All @@ -985,12 +985,12 @@ fn save_last_wasm_entry_sp(
);

// Then store our current stack pointer into the appropriate slot.
let sp = builder.ins().get_stack_pointer(pointer_type);
let fp = builder.ins().get_frame_pointer(pointer_type);
builder.ins().store(
MemFlags::trusted(),
sp,
fp,
limits,
ptr_size.vmruntime_limits_last_wasm_entry_sp(),
ptr_size.vmruntime_limits_last_wasm_entry_fp(),
);
}

Expand Down
4 changes: 2 additions & 2 deletions crates/environ/src/vmoffsets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ pub trait PtrSize {
self.vmruntime_limits_last_wasm_exit_fp() + self.size()
}

/// Return the offset of the `last_wasm_entry_sp` field of `VMRuntimeLimits`.
fn vmruntime_limits_last_wasm_entry_sp(&self) -> u8 {
/// Return the offset of the `last_wasm_entry_fp` field of `VMRuntimeLimits`.
fn vmruntime_limits_last_wasm_entry_fp(&self) -> u8 {
self.vmruntime_limits_last_wasm_exit_pc() + self.size()
}

Expand Down
136 changes: 98 additions & 38 deletions crates/fuzzing/src/generators/stacks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
use std::mem;

use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
use wasm_encoder::{Instruction, ValType};

const MAX_FUNCS: usize = 20;
const MAX_FUNCS: u32 = 20;
const MAX_OPS: usize = 1_000;
const MAX_PARAMS: usize = 10;

/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
Expand All @@ -24,13 +25,16 @@ pub struct Stacks {
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
params: usize,
results: usize,
}

#[derive(Arbitrary, Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
ReturnCall(u32),
}

impl<'a> Arbitrary<'a> for Stacks {
Expand All @@ -44,35 +48,44 @@ impl<'a> Arbitrary<'a> for Stacks {

impl Stacks {
fn arbitrary_funcs(u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];

// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
// Generate a list of functions first with a number of parameters and
// results. Bodies are generated afterwards.
let nfuncs = u.int_in_range(1..=MAX_FUNCS)?;
let mut funcs = (0..nfuncs)
.map(|_| {
Ok(Function {
ops: Vec::new(), // generated later
params: u.int_in_range(0..=MAX_PARAMS)?,
results: u.int_in_range(0..=MAX_PARAMS)?,
})
})
.collect::<Result<Vec<_>>>()?;
let mut funcs_by_result = vec![Vec::new(); MAX_PARAMS + 1];
for (i, func) in funcs.iter().enumerate() {
funcs_by_result[func.results].push(i as u32);
}

while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
// ...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
// Fill in each function body with various instructions/operations now
// that the set of functions is known.
for f in funcs.iter_mut() {
let funcs_with_same_results = &funcs_by_result[f.results];
for _ in 0..u.arbitrary_len::<usize>()?.min(MAX_OPS) {
let op = match u.int_in_range(0..=3)? {
0 => Op::CheckStackInHost,
1 => Op::Call(u.int_in_range(0..=nfuncs - 1)?),
2 => Op::CallThroughHost(u.int_in_range(0..=nfuncs - 1)?),
// This only works if the target function has the same
// number of results, so choose from a different set here.
3 => Op::ReturnCall(*u.choose(funcs_with_same_results)?),
_ => unreachable!(),
};
f.ops.push(op);
// once a `return_call` has been generated there's no need to
// generate any more instructions, so fall through to below.
if let Some(Op::ReturnCall(_)) = f.ops.last() {
break;
}
}
funcs[f].ops = ops;
}

Ok(funcs)
Expand Down Expand Up @@ -120,22 +133,30 @@ impl Stacks {
vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);

let null_type = types.len();
types.ty().function(vec![], vec![]);

let call_func_type = types.len();
types
.ty()
.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);

let check_stack_type = types.len();
types.ty().function(vec![], vec![]);

let func_types_start = types.len();
for func in self.funcs.iter() {
types.ty().function(
vec![ValType::I32; func.params],
vec![ValType::I32; func.results],
);
}

section(&mut module, types);

let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
wasm_encoder::EntityType::Function(check_stack_type),
);
let call_func_func = 1;
imports.import(
Expand All @@ -147,8 +168,8 @@ impl Stacks {
section(&mut module, imports);

let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
for (i, _) in self.funcs.iter().enumerate() {
funcs.function(func_types_start + (i as u32));
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
Expand Down Expand Up @@ -246,6 +267,25 @@ impl Stacks {
.instruction(&Instruction::GlobalSet(stack_len_global));
};

let push_params = |body: &mut wasm_encoder::Function, func: u32| {
let func = &self.funcs[func as usize];
for _ in 0..func.params {
body.instruction(&Instruction::I32Const(0));
}
};
let pop_results = |body: &mut wasm_encoder::Function, func: u32| {
let func = &self.funcs[func as usize];
for _ in 0..func.results {
body.instruction(&Instruction::Drop);
}
};
let push_results = |body: &mut wasm_encoder::Function, func: u32| {
let func = &self.funcs[func as usize];
for _ in 0..func.results {
body.instruction(&Instruction::I32Const(0));
}
};

let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
Expand All @@ -256,19 +296,35 @@ impl Stacks {
);
check_fuel(&mut body);

let mut check_fuel_and_pop_at_end = true;

// Perform our specified operations.
for op in &func.ops {
assert!(check_fuel_and_pop_at_end);
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
push_params(&mut body, *f);
body.instruction(&Instruction::Call(f + num_imported_funcs));
pop_results(&mut body, *f);
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}

// For a `return_call` preemptively check fuel to possibly
// trap and then pop our function from the in-wasm managed
// stack. After that execute the `return_call` itself.
Op::ReturnCall(f) => {
push_params(&mut body, *f);
check_fuel(&mut body);
pop_func_from_stack(&mut body);
check_fuel_and_pop_at_end = false;
body.instruction(&Instruction::ReturnCall(f + num_imported_funcs));
}
}
}

Expand All @@ -278,9 +334,11 @@ impl Stacks {
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);

pop_func_from_stack(&mut body);
if check_fuel_and_pop_at_end {
check_fuel(&mut body);
pop_func_from_stack(&mut body);
push_results(&mut body, func_index as u32);
}

function(&mut code, body);
}
Expand All @@ -307,7 +365,9 @@ impl Stacks {
check_fuel(&mut run_body);

// Call the first locally defined function.
push_params(&mut run_body, 0);
run_body.instruction(&Instruction::Call(num_imported_funcs));
pop_results(&mut run_body, 0);

check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
Expand Down
5 changes: 4 additions & 1 deletion crates/fuzzing/src/oracles/stacks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ pub fn check_stacks(stacks: Stacks) -> usize {
"call_func",
|mut caller: Caller<'_, ()>, f: Option<Func>| {
let f = f.unwrap();
f.call(&mut caller, &[], &mut [])?;
let ty = f.ty(&caller);
let params = vec![Val::I32(0); ty.params().len()];
let mut results = vec![Val::I32(0); ty.results().len()];
f.call(&mut caller, &params, &mut results)?;
Ok(())
},
)
Expand Down
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/aarch64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,6 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
// And the current frame pointer points to the next older frame pointer.
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;

pub fn reached_entry_sp(fp: usize, entry_sp: usize) -> bool {
fp >= entry_sp
}

pub fn assert_entry_sp_is_aligned(sp: usize) {
assert_eq!(sp % 16, 0, "stack should always be aligned to 16");
}

pub fn assert_fp_is_aligned(_fp: usize) {
// From AAPCS64, section 6.2.3 The Frame Pointer[0]:
//
Expand Down
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,6 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {

pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = imp::NEXT_OLDER_FP_FROM_FP_OFFSET;

pub fn reached_entry_sp(fp: usize, entry_sp: usize) -> bool {
imp::reached_entry_sp(fp, entry_sp)
}

pub fn assert_entry_sp_is_aligned(sp: usize) {
imp::assert_entry_sp_is_aligned(sp)
}

pub fn assert_fp_is_aligned(fp: usize) {
imp::assert_fp_is_aligned(fp)
}
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/riscv64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,6 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
// And the current frame pointer points to the next older frame pointer.
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;

pub fn reached_entry_sp(fp: usize, entry_sp: usize) -> bool {
fp >= entry_sp
}

pub fn assert_entry_sp_is_aligned(sp: usize) {
assert_eq!(sp % 16, 0, "stack should always be aligned to 16");
}

pub fn assert_fp_is_aligned(fp: usize) {
assert_eq!(fp % 16, 0, "stack should always be aligned to 16");
}
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/s390x.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,6 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
// by the current "FP".
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;

pub fn reached_entry_sp(fp: usize, entry_sp: usize) -> bool {
fp > entry_sp
}

pub fn assert_entry_sp_is_aligned(sp: usize) {
assert_eq!(sp % 8, 0, "stack should always be aligned to 8");
}

pub fn assert_fp_is_aligned(fp: usize) {
assert_eq!(fp % 8, 0, "stack should always be aligned to 8");
}
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/unsupported.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,6 @@ pub unsafe fn get_next_older_pc_from_fp(_fp: usize) -> usize {

pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;

pub fn reached_entry_sp(_fp: usize, _entry_sp: usize) -> bool {
panic!()
}

pub fn assert_entry_sp_is_aligned(_sp: usize) {
panic!()
}

pub fn assert_fp_is_aligned(_fp: usize) {
panic!()
}
8 changes: 0 additions & 8 deletions crates/wasmtime/src/runtime/vm/arch/x86_64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,6 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
// And the current frame pointer points to the next older frame pointer.
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;

pub fn reached_entry_sp(fp: usize, entry_sp: usize) -> bool {
fp >= entry_sp
}

pub fn assert_entry_sp_is_aligned(sp: usize) {
assert_eq!(sp % 16, 0, "stack should always be aligned to 16");
}

pub fn assert_fp_is_aligned(fp: usize) {
assert_eq!(fp % 16, 0, "stack should always be aligned to 16");
}
Loading

0 comments on commit de469e2

Please sign in to comment.