From d69ba34ee30004e30a00928da000a50d2797f297 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Thu, 21 Mar 2024 13:59:43 -0400 Subject: [PATCH] winch: Switch to using cranelift for all trampolines (#8109) * Switch winch over to using cranelift for all trampolines * Fix unused code warnings * Fix unused code warnings prtest:full --- crates/winch/src/compiler.rs | 59 +-- winch/codegen/src/abi/mod.rs | 59 +-- winch/codegen/src/isa/aarch64/mod.rs | 10 +- winch/codegen/src/isa/aarch64/regs.rs | 1 + winch/codegen/src/isa/mod.rs | 12 +- winch/codegen/src/isa/x64/mod.rs | 32 -- winch/codegen/src/lib.rs | 2 - winch/codegen/src/trampoline.rs | 626 -------------------------- 8 files changed, 15 insertions(+), 786 deletions(-) delete mode 100644 winch/codegen/src/trampoline.rs diff --git a/crates/winch/src/compiler.rs b/crates/winch/src/compiler.rs index a9d52de34708..dd0629218186 100644 --- a/crates/winch/src/compiler.rs +++ b/crates/winch/src/compiler.rs @@ -11,7 +11,7 @@ use wasmtime_environ::{ ModuleTranslation, ModuleTypesBuilder, PrimaryMap, TrapEncodingBuilder, VMOffsets, WasmFunctionInfo, }; -use winch_codegen::{BuiltinFunctions, TargetIsa, TrampolineKind}; +use winch_codegen::{BuiltinFunctions, TargetIsa}; /// Function compilation context. /// This struct holds information that can be shared globally across @@ -25,11 +25,6 @@ struct CompilationContext { pub(crate) struct Compiler { isa: Box, - - /// The trampoline compiler is only used for the component model currently, but will soon be - /// used for all winch trampolines. For now, mark it as unused to handle the situation where - /// the component-model feature is disabled. - #[allow(unused)] trampolines: Box, contexts: Mutex>, } @@ -150,22 +145,8 @@ impl wasmtime_environ::Compiler for Compiler { types: &ModuleTypesBuilder, index: DefinedFuncIndex, ) -> Result, CompileError> { - let func_index = translation.module.func_index(index); - let sig = translation.module.functions[func_index].signature; - let ty = &types[sig]; - let buffer = self - .isa - .compile_trampoline(&ty, TrampolineKind::ArrayToWasm(func_index)) - .map_err(|e| CompileError::Codegen(format!("{:?}", e)))?; - - let mut compiled_function = - CompiledFunction::new(buffer, CompiledFuncEnv {}, self.isa.function_alignment()); - - if self.isa.flags().unwind_info() { - self.emit_unwind_info(&mut compiled_function)?; - } - - Ok(Box::new(compiled_function)) + self.trampolines + .compile_array_to_wasm_trampoline(translation, types, index) } fn compile_native_to_wasm_trampoline( @@ -174,42 +155,16 @@ impl wasmtime_environ::Compiler for Compiler { types: &ModuleTypesBuilder, index: DefinedFuncIndex, ) -> Result, CompileError> { - let func_index = translation.module.func_index(index); - let sig = translation.module.functions[func_index].signature; - let ty = &types[sig]; - - let buffer = self - .isa - .compile_trampoline(ty, TrampolineKind::NativeToWasm(func_index)) - .map_err(|e| CompileError::Codegen(format!("{:?}", e)))?; - - let mut compiled_function = - CompiledFunction::new(buffer, CompiledFuncEnv {}, self.isa.function_alignment()); - - if self.isa.flags().unwind_info() { - self.emit_unwind_info(&mut compiled_function)?; - } - - Ok(Box::new(compiled_function)) + self.trampolines + .compile_native_to_wasm_trampoline(translation, types, index) } fn compile_wasm_to_native_trampoline( &self, wasm_func_ty: &wasmtime_environ::WasmFuncType, ) -> Result, CompileError> { - let buffer = self - .isa - .compile_trampoline(wasm_func_ty, TrampolineKind::WasmToNative) - .map_err(|e| CompileError::Codegen(format!("{:?}", e)))?; - - let mut compiled_function = - CompiledFunction::new(buffer, CompiledFuncEnv {}, self.isa.function_alignment()); - - if self.isa.flags().unwind_info() { - self.emit_unwind_info(&mut compiled_function)?; - } - - Ok(Box::new(compiled_function)) + self.trampolines + .compile_wasm_to_native_trampoline(wasm_func_ty) } fn append_code( diff --git a/winch/codegen/src/abi/mod.rs b/winch/codegen/src/abi/mod.rs index 2676d870adcb..2c3055d7a549 100644 --- a/winch/codegen/src/abi/mod.rs +++ b/winch/codegen/src/abi/mod.rs @@ -88,60 +88,6 @@ pub(crate) fn vmctx_types() -> [WasmValType; 2] { [A::ptr_type(), A::ptr_type()] } -/// Returns an [ABISig] for the array calling convention. -/// The signature looks like: -/// ```ignore -/// unsafe extern "C" fn( -/// callee_vmctx: *mut VMOpaqueContext, -/// caller_vmctx: *mut VMOpaqueContext, -/// values_ptr: *mut ValRaw, -/// values_len: usize, -/// ) -/// ``` -pub(crate) fn array_sig(call_conv: &CallingConvention) -> ABISig { - let params = [A::ptr_type(), A::ptr_type(), A::ptr_type(), A::ptr_type()]; - A::sig_from(¶ms, &[], call_conv) -} - -/// Returns an [ABISig] that follows a variation of the system's -/// calling convention. -/// The main difference between the flavor of the returned signature -/// and the vanilla signature is how multiple values are returned. -/// Multiple returns are handled following Wasmtime's expectations: -/// * A single value is returned via a register according to the calling -/// convention. -/// * More than one values are returned via a return pointer. -/// These variations look like: -/// -/// Single return value. -/// -/// ```ignore -/// unsafe extern "C" fn( -/// callee_vmctx: *mut VMOpaqueContext, -/// caller_vmctx: *mut VMOpaqueContext, -/// // rest of parameters -/// ) -> // single result -/// ``` -/// -/// Multiple return values. -/// -/// ```ignore -/// unsafe extern "C" fn( -/// callee_vmctx: *mut VMOpaqueContext, -/// caller_vmctx: *mut VMOpaqueContext, -/// // rest of parameters -/// retptr: *mut (), // 2+ results -/// ) -> // first result -/// ``` -pub(crate) fn native_sig(ty: &WasmFuncType, call_conv: &CallingConvention) -> ABISig { - // 6 is used semi-arbitrarily here, we can modify as we see fit. - let mut params: SmallVec<[WasmValType; 6]> = SmallVec::new(); - params.extend_from_slice(&vmctx_types::()); - params.extend_from_slice(ty.params()); - - A::sig_from(¶ms, ty.returns(), call_conv) -} - /// Trait implemented by a specific ISA and used to provide /// information about alignment, parameter passing, usage of /// specific registers, etc. @@ -156,6 +102,7 @@ pub(crate) trait ABI { fn arg_base_offset() -> u8; /// The offset to the return address, relative to the frame pointer. + #[allow(unused)] fn ret_addr_offset() -> u8; /// Construct the ABI-specific signature from a WebAssembly @@ -204,9 +151,11 @@ pub(crate) trait ABI { } /// Returns the frame pointer register. + #[allow(unused)] fn fp_reg() -> Reg; /// Returns the stack pointer register. + #[allow(unused)] fn sp_reg() -> Reg; /// Returns the pinned register used to hold @@ -215,6 +164,7 @@ pub(crate) trait ABI { /// Returns the callee-saved registers for the given /// calling convention. + #[allow(unused)] fn callee_saved_regs(call_conv: &CallingConvention) -> SmallVec<[(Reg, OperandSize); 18]>; /// The size, in bytes, of each stack slot used for stack parameter passing. @@ -609,6 +559,7 @@ impl ABIParams { } /// Get the [`ABIOperand`] param in the nth position. + #[allow(unused)] pub fn get(&self, n: usize) -> Option<&ABIOperand> { self.operands.inner.get(n) } diff --git a/winch/codegen/src/isa/aarch64/mod.rs b/winch/codegen/src/isa/aarch64/mod.rs index 52ef3e125004..e17d34dede4a 100644 --- a/winch/codegen/src/isa/aarch64/mod.rs +++ b/winch/codegen/src/isa/aarch64/mod.rs @@ -8,7 +8,7 @@ use crate::{ regalloc::RegAlloc, regset::RegBitSet, stack::Stack, - BuiltinFunctions, TrampolineKind, + BuiltinFunctions, }; use anyhow::Result; use cranelift_codegen::settings::{self, Flags}; @@ -136,14 +136,6 @@ impl TargetIsa for Aarch64 { 32 } - fn compile_trampoline( - &self, - _ty: &WasmFuncType, - _kind: TrampolineKind, - ) -> Result> { - todo!() - } - fn emit_unwind_info( &self, _result: &MachBufferFinalized, diff --git a/winch/codegen/src/isa/aarch64/regs.rs b/winch/codegen/src/isa/aarch64/regs.rs index ec540b43fe10..a8d89615a102 100644 --- a/winch/codegen/src/isa/aarch64/regs.rs +++ b/winch/codegen/src/isa/aarch64/regs.rs @@ -154,6 +154,7 @@ pub(crate) const ALL_GPR: u32 = u32::MAX & !NON_ALLOCATABLE_GPR; /// This function will return the set of registers that need to be saved /// according to the system ABI and that are known not to be saved during the /// prologue emission. +#[allow(unused)] pub(crate) fn callee_saved() -> SmallVec<[(Reg, OperandSize); 18]> { use OperandSize::*; let regs: SmallVec<[_; 18]> = smallvec![ diff --git a/winch/codegen/src/isa/mod.rs b/winch/codegen/src/isa/mod.rs index 2579840d9069..a3bafe9ddbb3 100644 --- a/winch/codegen/src/isa/mod.rs +++ b/winch/codegen/src/isa/mod.rs @@ -1,4 +1,4 @@ -use crate::{BuiltinFunctions, TrampolineKind}; +use crate::BuiltinFunctions; use anyhow::{anyhow, Result}; use core::fmt::Formatter; use cranelift_codegen::isa::unwind::{UnwindInfo, UnwindInfoKind}; @@ -203,16 +203,6 @@ pub trait TargetIsa: Send + Sync { /// See `cranelift_codegen::isa::TargetIsa::function_alignment`. fn function_alignment(&self) -> u32; - /// Compile a trampoline kind. - /// - /// This function, internally dispatches to the right trampoline to emit - /// depending on the `kind` paramter. - fn compile_trampoline( - &self, - ty: &WasmFuncType, - kind: TrampolineKind, - ) -> Result>; - /// Returns the pointer width of the ISA in bytes. fn pointer_bytes(&self) -> u8 { let width = self.triple().pointer_width().unwrap(); diff --git a/winch/codegen/src/isa/x64/mod.rs b/winch/codegen/src/isa/x64/mod.rs index 20578b0bdf50..9c193c5116a1 100644 --- a/winch/codegen/src/isa/x64/mod.rs +++ b/winch/codegen/src/isa/x64/mod.rs @@ -8,7 +8,6 @@ use crate::isa::x64::masm::MacroAssembler as X64Masm; use crate::masm::MacroAssembler; use crate::regalloc::RegAlloc; use crate::stack::Stack; -use crate::trampoline::{Trampoline, TrampolineKind}; use crate::{ isa::{Builder, TargetIsa}, regset::RegBitSet, @@ -149,37 +148,6 @@ impl TargetIsa for X64 { 16 } - fn compile_trampoline( - &self, - ty: &WasmFuncType, - kind: TrampolineKind, - ) -> Result> { - use TrampolineKind::*; - - let mut masm = X64Masm::new( - self.pointer_bytes(), - self.shared_flags.clone(), - self.isa_flags.clone(), - ); - let call_conv = self.wasmtime_call_conv(); - - let trampoline = Trampoline::new( - &mut masm, - regs::scratch(), - regs::argv(), - &call_conv, - self.pointer_bytes(), - ); - - match kind { - ArrayToWasm(idx) => trampoline.emit_array_to_wasm(ty, idx)?, - NativeToWasm(idx) => trampoline.emit_native_to_wasm(ty, idx)?, - WasmToNative => trampoline.emit_wasm_to_native(ty)?, - } - - Ok(masm.finalize()) - } - fn emit_unwind_info( &self, buffer: &MachBufferFinalized, diff --git a/winch/codegen/src/lib.rs b/winch/codegen/src/lib.rs index 12656ff4a265..30158a082a91 100644 --- a/winch/codegen/src/lib.rs +++ b/winch/codegen/src/lib.rs @@ -16,6 +16,4 @@ mod masm; mod regalloc; mod regset; mod stack; -mod trampoline; -pub use trampoline::TrampolineKind; mod visitor; diff --git a/winch/codegen/src/trampoline.rs b/winch/codegen/src/trampoline.rs deleted file mode 100644 index d6994ed24e83..000000000000 --- a/winch/codegen/src/trampoline.rs +++ /dev/null @@ -1,626 +0,0 @@ -//! Trampoline implementation for Winch. -//! -//! This module contains all the necessary pieces to emit the various -//! trampolines required by Wasmtime to call JIT code. -// -// TODO -// -// * Remove the usage of hardcoded operand sizes (`OperandSize::S64`) when -// loading/storing the VM context pointer. The real value of the operand size -// and VM context type should be derived from the ABI's pointer size. This is -// going to be relevant once 32-bit architectures are supported. -use crate::{ - abi::{array_sig, native_sig, wasm_sig, ABIOperand, ABIParams, ABISig, RetArea, ABI}, - codegen::ptr_type_from_ptr_size, - isa::CallingConvention, - masm::{CalleeKind, MacroAssembler, OperandSize, RegImm, SPOffset, MAX_CONTEXT_ARGS}, - reg::Reg, -}; -use anyhow::{anyhow, Result}; -use smallvec::SmallVec; -use std::mem; -use wasmtime_environ::{FuncIndex, PtrSize, WasmFuncType, WasmValType}; - -/// The supported trampoline kinds. -/// See -/// for more details. -pub enum TrampolineKind { - /// Calling from native to Wasm, using the array calling convention. - ArrayToWasm(FuncIndex), - /// Calling from native to Wasm. - NativeToWasm(FuncIndex), - /// Calling from Wasm to native. - WasmToNative, -} - -/// The max value size of an element in the array calling convention. -const VALUE_SIZE: usize = mem::size_of::(); - -/// The main trampoline abstraction. -pub(crate) struct Trampoline<'a, M> -where - M: MacroAssembler, -{ - /// The macro assembler. - masm: &'a mut M, - /// The main scratch register for the current architecture. It is - /// not allocatable for the callee. - scratch_reg: Reg, - /// A second scratch register. This will be allocatable for the - /// callee, so it can only be used after the callee-saved - /// registers are on the stack. - alloc_scratch_reg: Reg, - /// Registers to be saved as part of the trampoline's prologue - /// and to be restored as part of the trampoline's epilogue. - callee_saved_regs: SmallVec<[(Reg, OperandSize); 18]>, - /// The calling convention used by the trampoline, - /// which is the Wasmtime variant of the system ABI's - /// calling convention. - call_conv: &'a CallingConvention, - /// The pointer size of the current ISA. - pointer_size: M::Ptr, - /// WasmType representation of the pointer size. - pointer_type: WasmValType, -} - -impl<'a, M> Trampoline<'a, M> -where - M: MacroAssembler, -{ - /// Create a new trampoline. - pub fn new( - masm: &'a mut M, - scratch_reg: Reg, - alloc_scratch_reg: Reg, - call_conv: &'a CallingConvention, - pointer_size: M::Ptr, - ) -> Self { - let size = pointer_size.size(); - Self { - masm, - scratch_reg, - alloc_scratch_reg, - callee_saved_regs: ::callee_saved_regs(call_conv), - call_conv, - pointer_size, - pointer_type: ptr_type_from_ptr_size(size), - } - } - - /// Emit an array-to-wasm trampoline. - pub fn emit_array_to_wasm(mut self, ty: &WasmFuncType, callee_index: FuncIndex) -> Result<()> { - let array_sig = array_sig::(&self.call_conv); - let wasm_sig: ABISig = wasm_sig::(&ty); - - let val_ptr = array_sig - .params - .get(2) - .map(|operand| RegImm::reg(operand.unwrap_reg())) - .ok_or_else(|| anyhow!("Expected value pointer to be in a register"))?; - - // Assign the caller and caller VMContext arguments. - let (vmctx, caller_vmctx) = Self::callee_and_caller_vmctx(&array_sig.params)?; - let (dst_callee_vmctx, dst_caller_vmctx) = Self::callee_and_caller_vmctx(&wasm_sig.params)?; - - self.masm.prologue(caller_vmctx, &self.callee_saved_regs); - - self.masm - .mov(vmctx.into(), dst_callee_vmctx, self.pointer_type.into()); - self.masm.mov( - caller_vmctx.into(), - dst_caller_vmctx, - self.pointer_type.into(), - ); - - let ret_area = self.make_ret_area(&wasm_sig); - let vmctx_runtime_limits_addr = self.vmctx_runtime_limits_addr(vmctx); - let (offsets, spill_size) = self.spill(&array_sig.params()[2..]); - - // Call the function that was passed into the trampoline. - let allocated_stack = self.masm.call(wasm_sig.params_stack_size(), |masm| { - // Save the SP when entering Wasm. - // TODO: Once Winch supports comparison operators, - // check that the caller VM context is what we expect. - // See [`wasmtime_environ::MAGIC`]. - Self::save_last_wasm_entry_sp( - masm, - vmctx_runtime_limits_addr, - self.scratch_reg, - &self.pointer_size, - ); - - // Move the values register to the scratch - // register for argument assignment. - masm.mov(val_ptr, self.scratch_reg.into(), OperandSize::S64); - Self::load_values_from_array( - masm, - &wasm_sig, - ret_area.as_ref(), - self.scratch_reg, - self.alloc_scratch_reg, - ); - CalleeKind::Direct(callee_index.as_u32()) - }); - - self.masm.free_stack(allocated_stack); - - // Move the val ptr back into the scratch register so we can - // load the return values. - let val_ptr_offset = offsets[0]; - self.masm - .load_ptr(self.masm.address_from_sp(val_ptr_offset), self.scratch_reg); - - self.store_results_to_array(&wasm_sig, ret_area.as_ref()); - - if wasm_sig.has_stack_results() { - self.masm.free_stack(wasm_sig.results.size()); - } - - self.masm.free_stack(spill_size); - self.masm.epilogue(&self.callee_saved_regs); - Ok(()) - } - - /// Stores the results into the values array used by the array calling - /// convention. - fn store_results_to_array(&mut self, sig: &ABISig, ret_area: Option<&RetArea>) { - for (i, operand) in sig.results().iter().enumerate() { - let value_offset = (i * VALUE_SIZE) as u32; - match operand { - ABIOperand::Reg { ty, reg, .. } => self.masm.store( - (*reg).into(), - self.masm.address_at_reg(self.scratch_reg, value_offset), - (*ty).into(), - ), - ABIOperand::Stack { ty, offset, .. } => { - let addr = match ret_area.unwrap() { - RetArea::SP(sp_offset) => { - let elem_offs = SPOffset::from_u32(sp_offset.as_u32() - offset); - self.masm.address_from_sp(elem_offs) - } - _ => unreachable!(), - }; - let size: OperandSize = (*ty).into(); - self.masm.load(addr, self.alloc_scratch_reg, size); - self.masm.store( - self.alloc_scratch_reg.into(), - self.masm.address_at_reg(self.scratch_reg, value_offset), - (*ty).into(), - ); - } - } - } - } - - /// Emit a native-to-wasm trampoline. - pub fn emit_native_to_wasm(mut self, ty: &WasmFuncType, callee_index: FuncIndex) -> Result<()> { - let native_sig = native_sig::(&ty, &self.call_conv); - let wasm_sig = wasm_sig::(&ty); - let (vmctx, caller_vmctx) = Self::callee_and_caller_vmctx(&native_sig.params)?; - - self.masm.prologue(caller_vmctx, &self.callee_saved_regs); - - let vmctx_runtime_limits_addr = self.vmctx_runtime_limits_addr(vmctx); - let ret_area = self.make_ret_area(&wasm_sig); - let (offsets, spill_size) = self.spill(native_sig.params()); - - let reserved_stack = self.masm.call(wasm_sig.params_stack_size(), |masm| { - // Save the SP when entering Wasm. - // TODO: Once Winch supports comparison operators, - // check that the caller VM context is what we expect. - // See [`wasmtime_environ::MAGIC`]. - Self::save_last_wasm_entry_sp( - masm, - vmctx_runtime_limits_addr, - self.scratch_reg, - &self.pointer_size, - ); - Self::assign_args( - masm, - &wasm_sig.params_without_retptr(), - &native_sig.params_without_retptr(), - &offsets, - self.scratch_reg, - ); - Self::load_retptr(masm, ret_area.as_ref(), &wasm_sig); - CalleeKind::Direct(callee_index.as_u32()) - }); - - self.masm.free_stack(reserved_stack); - self.forward_results(&wasm_sig, &native_sig, ret_area.as_ref(), offsets.last()); - if wasm_sig.has_stack_results() { - self.masm.free_stack(wasm_sig.results.size()); - } - - self.masm.free_stack(spill_size); - self.masm.epilogue(&self.callee_saved_regs); - - Ok(()) - } - - /// Creates the return area in the caller's frame. - fn make_ret_area(&mut self, sig: &ABISig) -> Option { - sig.has_stack_results().then(|| { - self.masm.reserve_stack(sig.results.size()); - let offs = self.masm.sp_offset(); - RetArea::sp(offs) - }) - } - - /// Loads the return area pointer into its [ABIOperand] destination. - fn load_retptr(masm: &mut M, ret_area: Option<&RetArea>, callee: &ABISig) { - if let Some(area) = ret_area { - match (area, callee.params.unwrap_results_area_operand()) { - (RetArea::SP(sp_offset), ABIOperand::Reg { ty, reg, .. }) => { - let addr = masm.address_from_sp(*sp_offset); - masm.load_addr(addr, *reg, (*ty).into()); - } - (RetArea::SP(sp_offset), ABIOperand::Stack { ty, offset, .. }) => { - let retptr = masm.address_from_sp(*sp_offset); - let scratch = ::scratch_reg(); - masm.load_addr(retptr, scratch, (*ty).into()); - let retptr_slot = masm.address_from_sp(SPOffset::from_u32(*offset)); - masm.store(scratch.into(), retptr_slot, (*ty).into()); - } - _ => unreachable!(), - } - } - } - - /// Forwards results from callee to caller; it loads results from the - /// callee's return area and stores them into the caller's return area. - fn forward_results( - &mut self, - callee_sig: &ABISig, - caller_sig: &ABISig, - callee_ret_area: Option<&RetArea>, - caller_retptr_offset: Option<&SPOffset>, - ) { - // Spill any result registers used by the callee to avoid - // use-assign issues when forwarding the results. - let results_spill = self.spill(callee_sig.results()); - let mut spill_offsets_iter = results_spill.0.iter(); - - let caller_retptr = caller_sig.has_stack_results().then(|| { - let fp = ::fp_reg(); - let arg_base: u32 = ::arg_base_offset().into(); - match caller_sig.params.unwrap_results_area_operand() { - ABIOperand::Reg { ty, .. } => { - let addr = self.masm.address_from_sp(*caller_retptr_offset.unwrap()); - let size: OperandSize = (*ty).into(); - self.masm.load(addr, self.scratch_reg, size); - self.scratch_reg - } - ABIOperand::Stack { ty, offset, .. } => { - let size: OperandSize = (*ty).into(); - let addr = self.masm.address_at_reg(fp, arg_base + offset); - self.masm.load(addr, self.scratch_reg, size); - self.scratch_reg - } - } - }); - - for (callee_operand, caller_operand) in - callee_sig.results().iter().zip(caller_sig.results()) - { - match (callee_operand, caller_operand) { - (ABIOperand::Reg { ty, .. }, ABIOperand::Stack { offset, .. }) => { - let reg_offset = spill_offsets_iter.next().unwrap(); - let size: OperandSize = (*ty).into(); - self.masm.load( - self.masm.address_from_sp(*reg_offset), - self.alloc_scratch_reg, - size, - ); - self.masm.store( - self.alloc_scratch_reg.into(), - self.masm.address_at_reg(caller_retptr.unwrap(), *offset), - (*ty).into(), - ); - } - ( - ABIOperand::Stack { ty, offset, .. }, - ABIOperand::Stack { - offset: caller_offset, - .. - }, - ) => { - let addr = { - let base = callee_ret_area.unwrap().unwrap_sp(); - let slot_offset = base.as_u32() - *offset; - self.masm.address_from_sp(SPOffset::from_u32(slot_offset)) - }; - let size: OperandSize = (*ty).into(); - - self.masm.load(addr, self.alloc_scratch_reg, size); - self.masm.store( - self.alloc_scratch_reg.into(), - self.masm - .address_at_reg(caller_retptr.unwrap(), *caller_offset), - (*ty).into(), - ); - } - (ABIOperand::Stack { ty, offset, .. }, ABIOperand::Reg { reg, .. }) => { - let addr = { - let base = callee_ret_area.unwrap().unwrap_sp(); - let slot_offset = base.as_u32() - *offset; - self.masm.address_from_sp(SPOffset::from_u32(slot_offset)) - }; - - self.masm.load(addr, *reg, (*ty).into()); - } - (ABIOperand::Reg { ty, .. }, ABIOperand::Reg { reg: dst, .. }) => { - let spill_offset = spill_offsets_iter.next().unwrap(); - self.masm - .load(self.masm.address_from_sp(*spill_offset), *dst, (*ty).into()); - } - } - } - self.masm.free_stack(results_spill.1); - } - - /// Emit a wasm-to-native trampoline. - pub fn emit_wasm_to_native(mut self, ty: &WasmFuncType) -> Result<()> { - let wasm_sig = wasm_sig::(&ty); - let native_sig = native_sig::(ty, &self.call_conv); - - let (vmctx, caller_vmctx) = Self::callee_and_caller_vmctx(&wasm_sig.params).unwrap(); - let vmctx_runtime_limits_addr = self.vmctx_runtime_limits_addr(caller_vmctx); - - self.masm.prologue(caller_vmctx, &[]); - - // Save the FP and return address when exiting Wasm. - // TODO: Once Winch supports comparison operators, - // check that the caller VM context is what we expect. - // See [`wasmtime_environ::MAGIC`]. - Self::save_last_wasm_exit_fp_and_pc( - self.masm, - vmctx_runtime_limits_addr, - self.scratch_reg, - self.alloc_scratch_reg, - &self.pointer_size, - ); - - let ret_area = self.make_ret_area(&native_sig); - let (offsets, spill_size) = self.spill(wasm_sig.params()); - - let reserved_stack = self.masm.call(native_sig.params_stack_size(), |masm| { - // Move the VM context into one of the scratch registers. - masm.mov( - vmctx.into(), - self.alloc_scratch_reg.into(), - OperandSize::S64, - ); - - Self::assign_args( - masm, - &native_sig.params_without_retptr(), - &wasm_sig.params_without_retptr(), - &offsets, - self.scratch_reg, - ); - - Self::load_retptr(masm, ret_area.as_ref(), &native_sig); - - let body_offset = self.pointer_size.vmnative_call_host_func_context_func_ref() - + self.pointer_size.vm_func_ref_native_call(); - let callee_addr = masm.address_at_reg(self.alloc_scratch_reg, body_offset.into()); - masm.load_ptr(callee_addr, self.scratch_reg); - - CalleeKind::Indirect(self.scratch_reg) - }); - - self.masm.free_stack(reserved_stack); - self.forward_results(&native_sig, &wasm_sig, ret_area.as_ref(), offsets.last()); - - if native_sig.has_stack_results() { - self.masm.free_stack(native_sig.results.size()); - } - - self.masm.free_stack(spill_size); - self.masm.epilogue(&[]); - - Ok(()) - } - - /// Perfom argument assignment, translating between - /// caller and callee calling conventions. - fn assign_args( - masm: &mut M, - callee_params: &[ABIOperand], - caller_params: &[ABIOperand], - caller_stack_offsets: &[SPOffset], - scratch: Reg, - ) { - assert!(callee_params.len() == caller_params.len()); - let arg_base_offset: u32 = ::arg_base_offset().into(); - let fp = ::fp_reg(); - let mut offset_index = 0; - - callee_params - .iter() - .zip(caller_params) - .for_each( - |(callee_param, caller_param)| match (callee_param, caller_param) { - (ABIOperand::Reg { ty, reg: dst, .. }, ABIOperand::Reg { .. }) => { - let offset = caller_stack_offsets[offset_index]; - let addr = masm.address_from_sp(offset); - masm.load(addr, *dst, (*ty).into()); - offset_index += 1; - } - - (ABIOperand::Stack { ty, offset, .. }, ABIOperand::Reg { .. }) => { - let spill_offset = caller_stack_offsets[offset_index]; - let addr = masm.address_from_sp(spill_offset); - let size: OperandSize = (*ty).into(); - masm.load(addr, scratch, size); - - let arg_addr = masm.address_at_sp(SPOffset::from_u32(*offset)); - masm.store(scratch.into(), arg_addr, (*ty).into()); - offset_index += 1; - } - - (ABIOperand::Reg { ty, reg: dst, .. }, ABIOperand::Stack { offset, .. }) => { - let addr = masm.address_at_reg(fp, arg_base_offset + offset); - masm.load(addr, *dst, (*ty).into()); - } - - ( - ABIOperand::Stack { - ty, - offset: callee_offset, - .. - }, - ABIOperand::Stack { - offset: caller_offset, - .. - }, - ) => { - let addr = masm.address_at_reg(fp, arg_base_offset + caller_offset); - masm.load(addr, scratch, (*ty).into()); - - let arg_addr = masm.address_at_sp(SPOffset::from_u32(*callee_offset)); - masm.store(scratch.into(), arg_addr, (*ty).into()); - } - }, - ); - } - - /// Returns the register pair containing the callee and caller VM context pointers. - fn callee_and_caller_vmctx(params: &ABIParams) -> Result<(Reg, Reg)> { - let vmctx = params - .get(0) - .map(|operand| operand.unwrap_reg()) - .expect("Callee VMContext to be in a register"); - let caller_vmctx = params - .get(1) - .map(|operand| operand.unwrap_reg()) - .expect("Caller VMContext to be in a register"); - Ok((vmctx, caller_vmctx)) - } - - /// Returns the address of the VM context runtime limits - /// field. - fn vmctx_runtime_limits_addr(&mut self, vmctx: Reg) -> M::Address { - self.masm - .address_at_reg(vmctx, self.pointer_size.vmcontext_runtime_limits().into()) - } - - /// Performs a spill of the given operands. - fn spill(&mut self, operands: &[ABIOperand]) -> (SmallVec<[SPOffset; 6]>, u32) { - let mut offsets = SmallVec::new(); - let mut spill_size = 0; - operands.iter().for_each(|param| { - if let Some(reg) = param.get_reg() { - let slot = self.masm.push(reg, param.ty().into()); - offsets.push(slot.offset); - spill_size += slot.size; - } - }); - - (offsets, spill_size) - } - - /// Loads and assigns values from the value array used in the array - /// calling convention. - fn load_values_from_array( - masm: &mut M, - callee_sig: &ABISig, - ret_area: Option<&RetArea>, - values_reg: Reg, - scratch: Reg, - ) { - callee_sig - .params_without_retptr() - .iter() - .skip(MAX_CONTEXT_ARGS) - .enumerate() - .for_each(|(i, param)| { - let value_offset = (i * VALUE_SIZE) as u32; - - match param { - ABIOperand::Reg { reg, ty, .. } => masm.load( - masm.address_at_reg(values_reg, value_offset), - *reg, - (*ty).into(), - ), - ABIOperand::Stack { offset, ty, .. } => { - masm.load( - masm.address_at_reg(values_reg, value_offset), - scratch, - (*ty).into(), - ); - masm.store( - scratch.into(), - masm.address_at_sp(SPOffset::from_u32(*offset)), - (*ty).into(), - ); - } - } - }); - - // Assign the retpr param. - if let Some(offs) = ret_area { - let results_area_operand = callee_sig.params.unwrap_results_area_operand(); - let addr = match offs { - RetArea::SP(sp_offset) => masm.address_from_sp(*sp_offset), - _ => unreachable!(), - }; - match results_area_operand { - ABIOperand::Reg { ty, reg, .. } => { - masm.load_addr(addr, (*reg).into(), (*ty).into()); - } - ABIOperand::Stack { ty, offset, .. } => { - masm.load_addr(addr, scratch, (*ty).into()); - masm.store( - scratch.into(), - masm.address_at_sp(SPOffset::from_u32(*offset)), - (*ty).into(), - ); - } - } - } - } - - fn save_last_wasm_entry_sp( - masm: &mut M, - vm_runtime_limits_addr: M::Address, - scratch: Reg, - ptr: &impl PtrSize, - ) { - let sp = ::sp_reg(); - masm.load_ptr(vm_runtime_limits_addr, scratch); - let addr = masm.address_at_reg(scratch, ptr.vmruntime_limits_last_wasm_entry_sp().into()); - masm.store(sp.into(), addr, OperandSize::S64); - } - - fn save_last_wasm_exit_fp_and_pc( - masm: &mut M, - vm_runtime_limits_addr: M::Address, - scratch: Reg, - alloc_scratch: Reg, - ptr: &impl PtrSize, - ) { - masm.load_ptr(vm_runtime_limits_addr, alloc_scratch); - let last_wasm_exit_fp_addr = masm.address_at_reg( - alloc_scratch, - ptr.vmruntime_limits_last_wasm_exit_fp().into(), - ); - let last_wasm_exit_pc_addr = masm.address_at_reg( - alloc_scratch, - ptr.vmruntime_limits_last_wasm_exit_pc().into(), - ); - - // Handle the frame pointer. - let fp = ::fp_reg(); - let fp_addr = masm.address_at_reg(fp, 0); - masm.load_ptr(fp_addr, scratch); - masm.store(scratch.into(), last_wasm_exit_fp_addr, OperandSize::S64); - - // Handle the return address. - let ret_addr_offset = ::ret_addr_offset(); - let ret_addr = masm.address_at_reg(fp, ret_addr_offset.into()); - masm.load_ptr(ret_addr, scratch); - masm.store(scratch.into(), last_wasm_exit_pc_addr, OperandSize::S64); - } -}