diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c6212b4854f5..233a29617214 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -552,6 +552,13 @@ jobs: with: toolchain: wasmtime-ci-pinned-nightly + # Check that `pulley-interpreter` compiles with tail calls enabled. Don't + # actually run the tests with tail calls enabled, because they are not yet + # implemented in rustc and cause an ICE. + - run: cargo check -p pulley-interpreter + env: + RUSTFLAGS: "--cfg pulley_tail_calls" + # Ensure that fuzzers still build. # # Install the OCaml packages necessary for fuzz targets that use the diff --git a/Cargo.toml b/Cargo.toml index 486ff69d4e9a..8fa098ae04c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -176,6 +176,9 @@ unused_import_braces = 'warn' unused-lifetimes = 'warn' unused-macro-rules = 'warn' +# Don't warn about unknown cfg condition in `#[cfg(pulley_tail_calls)]` +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(pulley_tail_calls)'] } + [workspace.lints.clippy] # The default set of lints in Clippy is viewed as "too noisy" right now so # they're all turned off by default. Selective lints are then enabled below as diff --git a/pulley/Cargo.toml b/pulley/Cargo.toml index 9d95056fda15..bc27619e72af 100644 --- a/pulley/Cargo.toml +++ b/pulley/Cargo.toml @@ -27,7 +27,7 @@ arbitrary = ["dep:arbitrary", "arbitrary/derive", "std", "cranelift-bitset/arbit encode = [] decode = [] disas = ["decode"] -interp = ["decode"] +interp = ["decode", "encode"] [package.metadata.docs.rs] all-features = true diff --git a/pulley/src/decode.rs b/pulley/src/decode.rs index 1a0c8b202a62..bcd57017283d 100644 --- a/pulley/src/decode.rs +++ b/pulley/src/decode.rs @@ -260,7 +260,8 @@ impl BytecodeStream for UnsafeBytecodeStream { /// Anything that can be decoded from a bytecode stream, e.g. opcodes, /// immediates, registers, etc... -trait Decode: Sized { +pub trait Decode: Sized { + /// Decode this type from the given bytecode stream. fn decode(bytecode: &mut T) -> Result where T: BytecodeStream; @@ -377,6 +378,32 @@ impl Decode for PcRelOffset { } } +impl Decode for Opcode { + fn decode(bytecode: &mut T) -> Result + where + T: BytecodeStream, + { + let byte = u8::decode(bytecode)?; + match Opcode::new(byte) { + Some(v) => Ok(v), + None => Err(bytecode.invalid_opcode(byte)), + } + } +} + +impl Decode for ExtendedOpcode { + fn decode(bytecode: &mut T) -> Result + where + T: BytecodeStream, + { + let word = u16::decode(bytecode)?; + match ExtendedOpcode::new(word) { + Some(v) => Ok(v), + None => Err(bytecode.invalid_extended_opcode(word)), + } + } +} + impl Decode for BinaryOperands { fn decode(bytecode: &mut T) -> Result where @@ -655,3 +682,53 @@ macro_rules! define_extended_decoder { }; } for_each_extended_op!(define_extended_decoder); + +/// Unwrap a `Result`. +/// Always succeeds, since `Uninhabited` is uninhabited. +pub fn unwrap_uninhabited(res: Result) -> T { + match res { + Ok(ok) => ok, + + // Nightly rust warns that this pattern is unreachable, but stable rust + // doesn't. + #[allow(unreachable_patterns)] + Err(err) => match err {}, + } +} + +/// Functions for decoding the operands of an instruction, assuming the opcode +/// has already been decoded. +pub mod operands { + use super::*; + + macro_rules! define_operands_decoder { + ( + $( + $( #[$attr:meta] )* + $snake_name:ident = $name:ident $( { + $( + $( #[$field_attr:meta] )* + $field:ident : $field_ty:ty + ),* + } )? ; + )* + ) => { + $( + #[allow(unused_variables)] + #[allow(missing_docs)] + pub fn $snake_name(pc: &mut T) -> Result<($($($field_ty,)*)?), T::Error> { + Ok((($($((<$field_ty>::decode(pc))?,)*)?))) + } + )* + }; + } + + for_each_op!(define_operands_decoder); + + #[allow(missing_docs)] + pub fn extended(pc: &mut T) -> Result<(ExtendedOpcode,), T::Error> { + Ok((ExtendedOpcode::decode(pc)?,)) + } + + for_each_extended_op!(define_operands_decoder); +} diff --git a/pulley/src/interp.rs b/pulley/src/interp.rs index 24b3e2a59f5c..49e3a1bb3899 100644 --- a/pulley/src/interp.rs +++ b/pulley/src/interp.rs @@ -4,19 +4,22 @@ use crate::decode::*; use crate::imms::*; use crate::regs::*; use crate::ExtendedOpcode; +use crate::Opcode; use alloc::string::ToString; use alloc::{vec, vec::Vec}; use core::fmt; use core::mem; +use core::ops::ControlFlow; use core::ops::{Index, IndexMut}; use core::ptr::{self, NonNull}; use sptr::Strict; +mod interp_loop; + const DEFAULT_STACK_SIZE: usize = 1 << 20; // 1 MiB /// A virtual machine for interpreting Pulley bytecode. pub struct Vm { - decoder: Decoder, state: MachineState, } @@ -35,7 +38,6 @@ impl Vm { /// Create a new virtual machine with the given stack. pub fn with_stack(stack: Vec) -> Self { Self { - decoder: Decoder::new(), state: MachineState::with_stack(stack), } } @@ -120,29 +122,11 @@ impl Vm { } unsafe fn run(&mut self, pc: NonNull) -> Result<(), NonNull> { - let mut visitor = InterpreterVisitor { - state: &mut self.state, - pc: UnsafeBytecodeStream::new(pc), - }; - - loop { - let continuation = self.decoder.decode_one(&mut visitor).unwrap(); - - // Really wish we had `feature(explicit_tail_calls)`... - match continuation { - Continuation::Continue => { - continue; - } - - // Out-of-line slow paths marked `cold` and `inline(never)` to - // improve codegen. - Continuation::Trap => { - let pc = visitor.pc.as_ptr(); - return self.trap(pc); - } - Continuation::ReturnToHost => return self.return_to_host(), - Continuation::HostCall => return self.host_call(), - } + let mut bytecode = UnsafeBytecodeStream::new(pc); + match interp_loop::interpreter_loop(self, &mut bytecode) { + Done::ReturnToHost => self.return_to_host(), + Done::Trap(pc) => self.trap(pc), + Done::HostCall => self.host_call(), } } @@ -271,6 +255,14 @@ impl From for Val { #[derive(Copy, Clone)] pub struct XRegVal(XRegUnion); +impl PartialEq for XRegVal { + fn eq(&self, other: &Self) -> bool { + self.get_u64() == other.get_u64() + } +} + +impl Eq for XRegVal {} + impl fmt::Debug for XRegVal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("XRegVal") @@ -304,6 +296,9 @@ impl Default for XRegVal { #[allow(missing_docs)] impl XRegVal { + /// Sentinel return address that signals the end of the call stack. + pub const HOST_RETURN_ADDR: Self = Self(XRegUnion { i64: -1 }); + pub fn new_i32(x: i32) -> Self { let mut val = XRegVal::default(); val.set_i32(x); @@ -566,8 +561,8 @@ impl MachineState { let sp = sp.as_mut_ptr(); let sp = unsafe { sp.add(len) }; state[XReg::sp] = XRegVal::new_ptr(sp); - state[XReg::fp] = XRegVal::new_i64(-1); - state[XReg::lr] = XRegVal::new_i64(-1); + state[XReg::fp] = XRegVal::HOST_RETURN_ADDR; + state[XReg::lr] = XRegVal::HOST_RETURN_ADDR; state } @@ -588,588 +583,16 @@ impl MachineState { } } -enum Continuation { - Continue, +/// The reason the interpreter loop terminated. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum Done { + /// A `ret` instruction was executed and the call stack was empty. This is + /// how the loop normally ends. ReturnToHost, - Trap, + + /// A `trap` instruction was executed at the given PC. + Trap(NonNull), #[allow(dead_code)] HostCall, } - -struct InterpreterVisitor<'a> { - state: &'a mut MachineState, - pc: UnsafeBytecodeStream, -} - -impl InterpreterVisitor<'_> { - #[inline(always)] - fn pc_rel_jump(&mut self, offset: PcRelOffset, inst_size: isize) -> Continuation { - let offset = isize::try_from(i32::from(offset)).unwrap(); - self.pc = unsafe { self.pc.offset(offset - inst_size) }; - Continuation::Continue - } -} - -#[doc(hidden)] -impl OpVisitor for InterpreterVisitor<'_> { - type BytecodeStream = UnsafeBytecodeStream; - - fn bytecode(&mut self) -> &mut Self::BytecodeStream { - &mut self.pc - } - - type Return = Continuation; - - fn ret(&mut self) -> Self::Return { - if self.state[XReg::lr].get_u64() == u64::MAX { - Continuation::ReturnToHost - } else { - let return_addr = self.state[XReg::lr].get_ptr(); - self.pc = unsafe { UnsafeBytecodeStream::new(NonNull::new_unchecked(return_addr)) }; - // log::trace!("returning to {return_addr:#p}"); - Continuation::Continue - } - } - - fn call(&mut self, offset: PcRelOffset) -> Self::Return { - let return_addr = self.pc.as_ptr(); - self.state[XReg::lr].set_ptr(return_addr.as_ptr()); - self.pc_rel_jump(offset, 5) - } - - fn jump(&mut self, offset: PcRelOffset) -> Self::Return { - self.pc_rel_jump(offset, 5) - } - - fn br_if(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return { - let cond = self.state[cond].get_u64(); - if cond != 0 { - self.pc_rel_jump(offset, 6) - } else { - Continuation::Continue - } - } - - fn br_if_not(&mut self, cond: XReg, offset: PcRelOffset) -> Self::Return { - let cond = self.state[cond].get_u64(); - if cond == 0 { - self.pc_rel_jump(offset, 6) - } else { - Continuation::Continue - } - } - - fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u32(); - let b = self.state[b].get_u32(); - if a == b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xneq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u32(); - let b = self.state[b].get_u32(); - if a != b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xslt32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_i32(); - let b = self.state[b].get_i32(); - if a < b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xslteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_i32(); - let b = self.state[b].get_i32(); - if a <= b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xult32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u32(); - let b = self.state[b].get_u32(); - if a < b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xulteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u32(); - let b = self.state[b].get_u32(); - if a <= b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u64(); - let b = self.state[b].get_u64(); - if a == b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xneq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u64(); - let b = self.state[b].get_u64(); - if a != b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xslt64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_i64(); - let b = self.state[b].get_i64(); - if a < b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xslteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_i64(); - let b = self.state[b].get_i64(); - if a <= b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xult64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u64(); - let b = self.state[b].get_u64(); - if a < b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn br_if_xulteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset) -> Self::Return { - let a = self.state[a].get_u64(); - let b = self.state[b].get_u64(); - if a <= b { - self.pc_rel_jump(offset, 7) - } else { - Continuation::Continue - } - } - - fn xmov(&mut self, dst: XReg, src: XReg) -> Self::Return { - let val = self.state[src]; - self.state[dst] = val; - Continuation::Continue - } - - fn fmov(&mut self, dst: FReg, src: FReg) -> Self::Return { - let val = self.state[src]; - self.state[dst] = val; - Continuation::Continue - } - - fn vmov(&mut self, dst: VReg, src: VReg) -> Self::Return { - let val = self.state[src]; - self.state[dst] = val; - Continuation::Continue - } - - fn xconst8(&mut self, dst: XReg, imm: i8) -> Self::Return { - self.state[dst].set_i64(i64::from(imm)); - Continuation::Continue - } - - fn xconst16(&mut self, dst: XReg, imm: i16) -> Self::Return { - self.state[dst].set_i64(i64::from(imm)); - Continuation::Continue - } - - fn xconst32(&mut self, dst: XReg, imm: i32) -> Self::Return { - self.state[dst].set_i64(i64::from(imm)); - Continuation::Continue - } - - fn xconst64(&mut self, dst: XReg, imm: i64) -> Self::Return { - self.state[dst].set_i64(imm); - Continuation::Continue - } - - fn xadd32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u32(); - let b = self.state[operands.src2].get_u32(); - self.state[operands.dst].set_u32(a.wrapping_add(b)); - Continuation::Continue - } - - fn xadd64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u64(); - let b = self.state[operands.src2].get_u64(); - self.state[operands.dst].set_u64(a.wrapping_add(b)); - Continuation::Continue - } - - fn xeq64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u64(); - let b = self.state[operands.src2].get_u64(); - self.state[operands.dst].set_u64(u64::from(a == b)); - Continuation::Continue - } - - fn xneq64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u64(); - let b = self.state[operands.src2].get_u64(); - self.state[operands.dst].set_u64(u64::from(a != b)); - Continuation::Continue - } - - fn xslt64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_i64(); - let b = self.state[operands.src2].get_i64(); - self.state[operands.dst].set_u64(u64::from(a < b)); - Continuation::Continue - } - - fn xslteq64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_i64(); - let b = self.state[operands.src2].get_i64(); - self.state[operands.dst].set_u64(u64::from(a <= b)); - Continuation::Continue - } - - fn xult64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u64(); - let b = self.state[operands.src2].get_u64(); - self.state[operands.dst].set_u64(u64::from(a < b)); - Continuation::Continue - } - - fn xulteq64(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u64(); - let b = self.state[operands.src2].get_u64(); - self.state[operands.dst].set_u64(u64::from(a <= b)); - Continuation::Continue - } - - fn xeq32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u32(); - let b = self.state[operands.src2].get_u32(); - self.state[operands.dst].set_u64(u64::from(a == b)); - Continuation::Continue - } - - fn xneq32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u32(); - let b = self.state[operands.src2].get_u32(); - self.state[operands.dst].set_u64(u64::from(a != b)); - Continuation::Continue - } - - fn xslt32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_i32(); - let b = self.state[operands.src2].get_i32(); - self.state[operands.dst].set_u64(u64::from(a < b)); - Continuation::Continue - } - - fn xslteq32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_i32(); - let b = self.state[operands.src2].get_i32(); - self.state[operands.dst].set_u64(u64::from(a <= b)); - Continuation::Continue - } - - fn xult32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u32(); - let b = self.state[operands.src2].get_u32(); - self.state[operands.dst].set_u64(u64::from(a < b)); - Continuation::Continue - } - - fn xulteq32(&mut self, operands: BinaryOperands) -> Self::Return { - let a = self.state[operands.src1].get_u32(); - let b = self.state[operands.src2].get_u32(); - self.state[operands.dst].set_u64(u64::from(a <= b)); - Continuation::Continue - } - - fn load32_u(&mut self, dst: XReg, ptr: XReg) -> Self::Return { - let ptr = self.state[ptr].get_ptr::(); - let val = unsafe { ptr::read_unaligned(ptr) }; - self.state[dst].set_u64(u64::from(val)); - Continuation::Continue - } - - fn load32_s(&mut self, dst: XReg, ptr: XReg) -> Self::Return { - let ptr = self.state[ptr].get_ptr::(); - let val = unsafe { ptr::read_unaligned(ptr) }; - self.state[dst].set_i64(i64::from(val)); - Continuation::Continue - } - - fn load64(&mut self, dst: XReg, ptr: XReg) -> Self::Return { - let ptr = self.state[ptr].get_ptr::(); - let val = unsafe { ptr::read_unaligned(ptr) }; - self.state[dst].set_u64(val); - Continuation::Continue - } - - fn load32_u_offset8(&mut self, dst: XReg, ptr: XReg, offset: i8) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset.into()) - .read_unaligned() - }; - self.state[dst].set_u64(u64::from(val)); - Continuation::Continue - } - - fn load32_s_offset8(&mut self, dst: XReg, ptr: XReg, offset: i8) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset.into()) - .read_unaligned() - }; - self.state[dst].set_i64(i64::from(val)); - Continuation::Continue - } - - fn load32_u_offset64(&mut self, dst: XReg, ptr: XReg, offset: i64) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset as isize) - .read_unaligned() - }; - self.state[dst].set_u64(u64::from(val)); - Continuation::Continue - } - - fn load32_s_offset64(&mut self, dst: XReg, ptr: XReg, offset: i64) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset as isize) - .read_unaligned() - }; - self.state[dst].set_i64(i64::from(val)); - Continuation::Continue - } - - fn load64_offset8(&mut self, dst: XReg, ptr: XReg, offset: i8) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset.into()) - .read_unaligned() - }; - self.state[dst].set_u64(val); - Continuation::Continue - } - - fn load64_offset64(&mut self, dst: XReg, ptr: XReg, offset: i64) -> Self::Return { - let val = unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset as isize) - .read_unaligned() - }; - self.state[dst].set_u64(val); - Continuation::Continue - } - - fn store32(&mut self, ptr: XReg, src: XReg) -> Self::Return { - let ptr = self.state[ptr].get_ptr::(); - let val = self.state[src].get_u32(); - unsafe { - ptr::write_unaligned(ptr, val); - } - Continuation::Continue - } - - fn store64(&mut self, ptr: XReg, src: XReg) -> Self::Return { - let ptr = self.state[ptr].get_ptr::(); - let val = self.state[src].get_u64(); - unsafe { - ptr::write_unaligned(ptr, val); - } - Continuation::Continue - } - - fn store32_offset8(&mut self, ptr: XReg, offset: i8, src: XReg) -> Self::Return { - let val = self.state[src].get_u32(); - unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset.into()) - .write_unaligned(val); - } - Continuation::Continue - } - - fn store64_offset8(&mut self, ptr: XReg, offset: i8, src: XReg) -> Self::Return { - let val = self.state[src].get_u64(); - unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset.into()) - .write_unaligned(val); - } - Continuation::Continue - } - - fn store32_offset64(&mut self, ptr: XReg, offset: i64, src: XReg) -> Self::Return { - let val = self.state[src].get_u32(); - unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset as isize) - .write_unaligned(val); - } - Continuation::Continue - } - - fn store64_offset64(&mut self, ptr: XReg, offset: i64, src: XReg) -> Self::Return { - let val = self.state[src].get_u64(); - unsafe { - self.state[ptr] - .get_ptr::() - .byte_offset(offset as isize) - .write_unaligned(val); - } - Continuation::Continue - } - - fn xpush32(&mut self, src: XReg) -> Self::Return { - self.state.push(self.state[src].get_u32()); - Continuation::Continue - } - - fn xpush32_many(&mut self, srcs: RegSet) -> Self::Return { - for src in srcs { - self.xpush32(src); - } - Continuation::Continue - } - - fn xpush64(&mut self, src: XReg) -> Self::Return { - self.state.push(self.state[src].get_u64()); - Continuation::Continue - } - - fn xpush64_many(&mut self, srcs: RegSet) -> Self::Return { - for src in srcs { - self.xpush64(src); - } - Continuation::Continue - } - - fn xpop32(&mut self, dst: XReg) -> Self::Return { - let val = self.state.pop(); - self.state[dst].set_u32(val); - Continuation::Continue - } - - fn xpop32_many(&mut self, dsts: RegSet) -> Self::Return { - for dst in dsts.into_iter().rev() { - self.xpop32(dst); - } - Continuation::Continue - } - - fn xpop64(&mut self, dst: XReg) -> Self::Return { - let val = self.state.pop(); - self.state[dst].set_u64(val); - Continuation::Continue - } - - fn xpop64_many(&mut self, dsts: RegSet) -> Self::Return { - for dst in dsts.into_iter().rev() { - self.xpop64(dst); - } - Continuation::Continue - } - - /// `push lr; push fp; fp = sp` - fn push_frame(&mut self) -> Self::Return { - self.state.push(self.state[XReg::lr].get_ptr::()); - self.state.push(self.state[XReg::fp].get_ptr::()); - self.state[XReg::fp] = self.state[XReg::sp]; - Continuation::Continue - } - - /// `sp = fp; pop fp; pop lr` - fn pop_frame(&mut self) -> Self::Return { - self.state[XReg::sp] = self.state[XReg::fp]; - let fp = self.state.pop(); - let lr = self.state.pop(); - self.state[XReg::fp].set_ptr::(fp); - self.state[XReg::lr].set_ptr::(lr); - Continuation::Continue - } - - fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg) -> Self::Return { - let val = self.state[src].get_f32(); - self.state[dst].set_u64(u32::from_ne_bytes(val.to_ne_bytes()).into()); - Continuation::Continue - } - - fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg) -> Self::Return { - let val = self.state[src].get_f64(); - self.state[dst].set_u64(u64::from_ne_bytes(val.to_ne_bytes())); - Continuation::Continue - } - - fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg) -> Self::Return { - let val = self.state[src].get_u32(); - self.state[dst].set_f32(f32::from_ne_bytes(val.to_ne_bytes())); - Continuation::Continue - } - - fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg) -> Self::Return { - let val = self.state[src].get_u64(); - self.state[dst].set_f64(f64::from_ne_bytes(val.to_ne_bytes())); - Continuation::Continue - } -} - -impl ExtendedOpVisitor for InterpreterVisitor<'_> { - fn nop(&mut self) -> Self::Return { - Continuation::Continue - } - - fn trap(&mut self) -> Self::Return { - Continuation::Trap - } - - fn get_sp(&mut self, dst: XReg) -> Self::Return { - let sp = self.state[XReg::sp].get_u64(); - self.state[dst].set_u64(sp); - Continuation::Continue - } -} diff --git a/pulley/src/interp/interp_loop.rs b/pulley/src/interp/interp_loop.rs new file mode 100644 index 000000000000..c17cba3cac56 --- /dev/null +++ b/pulley/src/interp/interp_loop.rs @@ -0,0 +1,669 @@ +use super::*; + +#[derive(Copy, Clone)] +pub struct OpcodeHandler { + /// The type of non tail-recursive opcode handlers: return + /// `ControlFlow::Continue` with the next handler to call, or + /// `ControlFlow::Done` with the reason to stop. + #[cfg(not(pulley_tail_calls))] + fun: fn(&mut MachineState, &mut UnsafeBytecodeStream) -> ControlFlow, + + /// The type of tail-recursive opcode handlers: instead of returning + /// `ControwFlow::Continue`, tail call the next handler directly; so + /// `ControlFlow::Continue` is uninhabited. + #[cfg(pulley_tail_calls)] + fun: fn(&mut MachineState, &mut UnsafeBytecodeStream) -> Done, +} + +#[cfg(not(pulley_tail_calls))] +pub fn interpreter_loop(vm: &mut Vm, bytecode: &mut UnsafeBytecodeStream) -> Done { + let opcode = Opcode::decode(bytecode).unwrap(); + let mut handler = OPCODE_HANDLER_TABLE[opcode as usize]; + + // As tight as we can get the interpreter loop without tail calls: while + // the handlers keep returning the next handler to call, call it. + loop { + match (handler.fun)(&mut vm.state, bytecode) { + ControlFlow::Continue(next_handler) => handler = next_handler, + ControlFlow::Break(done) => return done, + } + } +} + +/// The extra indirection through a macro is necessary to avoid a compiler error +/// when compiling without `#![feature(explicit_tail_calls)]` enabled (via +/// `--cfg pulley_tail_calls`). +/// +/// It seems rustc first parses the the function, encounters `become` and emits +/// an error about using an unstable keyword on a stable compiler, then applies +/// `#[cfg(...)` after parsing to disable the function. +/// +/// Macro bodies are just bags of tokens; the body is not parsed until after +/// they are expanded, and this macro is only expanded when `pulley_tail_calls` +/// is enabled. +#[cfg_attr(not(pulley_tail_calls), allow(unused_macros))] +macro_rules! tail_call { + ($e:expr) => { + become $e + }; +} + +#[cfg(pulley_tail_calls)] +pub fn interpreter_loop(vm: &mut Vm, bytecode: &mut UnsafeBytecodeStream) -> Done { + let opcode = Opcode::decode(bytecode).unwrap(); + let handler = OPCODE_HANDLER_TABLE[opcode as usize]; + + // The ideal interpreter loop: a bunch of opcode handlers tail calling + // each other! + tail_call!((handler.fun)(&mut vm.state, bytecode)); +} + +/// Wrap the business logic of each handler with the boilerplate of decoding +/// operands and dispatching to next handler/exiting the loop. +macro_rules! define_opcode_handlers { + ( + $( + fn $name:ident ( + $state:ident : &mut MachineState, + $bytecode:ident : &mut UnsafeBytecodeStream$(,)? + $($field:ident : $field_ty:ty),* + ) $body:block + )* + ) => { + $( + #[cfg(not(pulley_tail_calls))] + pub fn $name($state: &mut MachineState, $bytecode: &mut UnsafeBytecodeStream) -> ControlFlow { + let ($($field,)*) = crate::decode::unwrap_uninhabited(crate::decode::operands::$name($bytecode)); + match $body { + ControlFlow::Continue(()) => { + // Decode the next handler and return it so that `run` + // can call it. + let next_opcode = Opcode::decode($bytecode).unwrap(); + let next_handler = OPCODE_HANDLER_TABLE[next_opcode as usize]; + ControlFlow::Continue(next_handler) + } + ControlFlow::Break(done) => ControlFlow::Break(done), + } + } + + #[cfg(pulley_tail_calls)] + pub fn $name($state: &mut MachineState, $bytecode: &mut UnsafeBytecodeStream) -> Done { + let ($($field,)*) = crate::decode::unwrap_uninhabited(crate::decode::operands::$name($bytecode)); + match $body { + ControlFlow::Continue(()) => { + // Decode the next handler and return it so that `run` + // can call it. + let next_opcode = Opcode::decode($bytecode).unwrap(); + let next_handler = OPCODE_HANDLER_TABLE[next_opcode as usize]; + tail_call!((next_handler.fun)($state, $bytecode)); + } + ControlFlow::Break(done) => done, + } + } + )* + }; +} + +/// Define the table of opcode handlers. +macro_rules! opcode_handler_table_entry { + ( + $( + $( #[$attr:meta] )* + $snake_name:ident = $name:ident $( { + $( + $( #[$field_attr:meta] )* + $field:ident : $field_ty:ty + ),* + } )? ; + )* + ) => {[ $(OpcodeHandler { fun: $snake_name },)* OpcodeHandler { fun: extended }]}; +} + +/// Add one to account for `ExtendedOp`. +const NUM_OPCODES: usize = Opcode::MAX as usize + 1; +static OPCODE_HANDLER_TABLE: [OpcodeHandler; NUM_OPCODES] = + for_each_op!(opcode_handler_table_entry); + +#[inline] +fn pc_rel_jump(pc: &mut UnsafeBytecodeStream, offset: PcRelOffset, inst_size: isize) { + let offset = isize::try_from(i32::from(offset)).unwrap(); + *pc = unsafe { pc.offset(offset - inst_size) }; +} + +define_opcode_handlers! { + fn ret(state: &mut MachineState, pc: &mut UnsafeBytecodeStream) { + if state[XReg::lr] == XRegVal::HOST_RETURN_ADDR { + ControlFlow::Break(Done::ReturnToHost) + } else { + let return_addr = state[XReg::lr].get_ptr(); + *pc = unsafe { UnsafeBytecodeStream::new(NonNull::new_unchecked(return_addr)) }; + ControlFlow::Continue(()) + } + } + + fn call(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, offset: PcRelOffset) { + let return_addr = pc.as_ptr(); + state[XReg::lr].set_ptr(return_addr.as_ptr()); + pc_rel_jump(pc, offset, 5); + ControlFlow::Continue(()) + } + + fn jump(_state: &mut MachineState, pc: &mut UnsafeBytecodeStream, offset: PcRelOffset) { + pc_rel_jump(pc, offset, 5); + ControlFlow::Continue(()) + } + + fn br_if(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, cond: XReg, offset: PcRelOffset) { + let cond = state[cond].get_u64(); + if cond != 0 { + pc_rel_jump(pc, offset, 6) + } + ControlFlow::Continue(()) + } + + fn br_if_not(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, cond: XReg, offset: PcRelOffset) { + let cond = state[cond].get_u64(); + if cond == 0 { + pc_rel_jump(pc, offset, 6) + } + ControlFlow::Continue(()) + } + + fn br_if_xeq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u32(); + let b = state[b].get_u32(); + if a == b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xneq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u32(); + let b = state[b].get_u32(); + if a != b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xslt32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_i32(); + let b = state[b].get_i32(); + if a < b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xslteq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_i32(); + let b = state[b].get_i32(); + if a <= b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xult32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u32(); + let b = state[b].get_u32(); + if a < b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xulteq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u32(); + let b = state[b].get_u32(); + if a <= b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xeq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u64(); + let b = state[b].get_u64(); + if a == b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xneq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u64(); + let b = state[b].get_u64(); + if a != b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xslt64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_i64(); + let b = state[b].get_i64(); + if a < b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xslteq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_i64(); + let b = state[b].get_i64(); + if a <= b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xult64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u64(); + let b = state[b].get_u64(); + if a < b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn br_if_xulteq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, a: XReg, b: XReg, offset: PcRelOffset) { + let a = state[a].get_u64(); + let b = state[b].get_u64(); + if a <= b { + pc_rel_jump(pc, offset, 7) + } + ControlFlow::Continue(()) + } + + fn xmov(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, src: XReg) { + let val = state[src]; + state[dst] = val; + ControlFlow::Continue(()) + } + + fn fmov(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: FReg, src: FReg) { + let val = state[src]; + state[dst] = val; + ControlFlow::Continue(()) + } + + fn vmov(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: VReg, src: VReg) { + let val = state[src]; + state[dst] = val; + ControlFlow::Continue(()) + } + + fn xconst8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, imm: i8) { + state[dst].set_i64(i64::from(imm)); + ControlFlow::Continue(()) + } + + fn xconst16(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, imm: i16) { + state[dst].set_i64(i64::from(imm)); + ControlFlow::Continue(()) + } + + fn xconst32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, imm: i32) { + state[dst].set_i64(i64::from(imm)); + ControlFlow::Continue(()) + } + + fn xconst64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, imm: i64) { + state[dst].set_i64(imm); + ControlFlow::Continue(()) + } + + fn xadd32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u32(); + let b = state[operands.src2].get_u32(); + state[operands.dst].set_u32(a.wrapping_add(b)); + ControlFlow::Continue(()) + } + + fn xadd64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u64(); + let b = state[operands.src2].get_u64(); + state[operands.dst].set_u64(a.wrapping_add(b)); + ControlFlow::Continue(()) + } + + fn xeq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u64(); + let b = state[operands.src2].get_u64(); + state[operands.dst].set_u64(u64::from(a == b)); + ControlFlow::Continue(()) + } + + fn xneq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u64(); + let b = state[operands.src2].get_u64(); + state[operands.dst].set_u64(u64::from(a != b)); + ControlFlow::Continue(()) + } + + fn xslt64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_i64(); + let b = state[operands.src2].get_i64(); + state[operands.dst].set_u64(u64::from(a < b)); + ControlFlow::Continue(()) + } + + fn xslteq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_i64(); + let b = state[operands.src2].get_i64(); + state[operands.dst].set_u64(u64::from(a <= b)); + ControlFlow::Continue(()) + } + + fn xult64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u64(); + let b = state[operands.src2].get_u64(); + state[operands.dst].set_u64(u64::from(a < b)); + ControlFlow::Continue(()) + } + + fn xulteq64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u64(); + let b = state[operands.src2].get_u64(); + state[operands.dst].set_u64(u64::from(a <= b)); + ControlFlow::Continue(()) + } + + fn xeq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u32(); + let b = state[operands.src2].get_u32(); + state[operands.dst].set_u64(u64::from(a == b)); + ControlFlow::Continue(()) + } + + fn xneq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u32(); + let b = state[operands.src2].get_u32(); + state[operands.dst].set_u64(u64::from(a != b)); + ControlFlow::Continue(()) + } + + fn xslt32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_i32(); + let b = state[operands.src2].get_i32(); + state[operands.dst].set_u64(u64::from(a < b)); + ControlFlow::Continue(()) + } + + fn xslteq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_i32(); + let b = state[operands.src2].get_i32(); + state[operands.dst].set_u64(u64::from(a <= b)); + ControlFlow::Continue(()) + } + + fn xult32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u32(); + let b = state[operands.src2].get_u32(); + state[operands.dst].set_u64(u64::from(a < b)); + ControlFlow::Continue(()) + } + + fn xulteq32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, operands: BinaryOperands) { + let a = state[operands.src1].get_u32(); + let b = state[operands.src2].get_u32(); + state[operands.dst].set_u64(u64::from(a <= b)); + ControlFlow::Continue(()) + } + + fn load32_u(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg) { + let ptr = state[ptr].get_ptr::(); + let val = unsafe { ptr::read_unaligned(ptr) }; + state[dst].set_u64(u64::from(val)); + ControlFlow::Continue(()) + } + + fn load32_s(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg) { + let ptr = state[ptr].get_ptr::(); + let val = unsafe { ptr::read_unaligned(ptr) }; + state[dst].set_i64(i64::from(val)); + ControlFlow::Continue(()) + } + + fn load64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg) { + let ptr = state[ptr].get_ptr::(); + let val = unsafe { ptr::read_unaligned(ptr) }; + state[dst].set_u64(val); + ControlFlow::Continue(()) + } + + fn load32_u_offset8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i8) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset.into()) + .read_unaligned() + }; + state[dst].set_u64(u64::from(val)); + ControlFlow::Continue(()) + } + + fn load32_s_offset8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i8) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset.into()) + .read_unaligned() + }; + state[dst].set_i64(i64::from(val)); + ControlFlow::Continue(()) + } + + fn load32_u_offset64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i64) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset as isize) + .read_unaligned() + }; + state[dst].set_u64(u64::from(val)); + ControlFlow::Continue(()) + } + + fn load32_s_offset64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i64) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset as isize) + .read_unaligned() + }; + state[dst].set_i64(i64::from(val)); + ControlFlow::Continue(()) + } + + fn load64_offset8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i8) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset.into()) + .read_unaligned() + }; + state[dst].set_u64(val); + ControlFlow::Continue(()) + } + + fn load64_offset64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, ptr: XReg, offset: i64) { + let val = unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset as isize) + .read_unaligned() + }; + state[dst].set_u64(val); + ControlFlow::Continue(()) + } + + fn store32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, src: XReg) { + let ptr = state[ptr].get_ptr::(); + let val = state[src].get_u32(); + unsafe { + ptr::write_unaligned(ptr, val); + } + ControlFlow::Continue(()) + } + + fn store64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, src: XReg) { + let ptr = state[ptr].get_ptr::(); + let val = state[src].get_u64(); + unsafe { + ptr::write_unaligned(ptr, val); + } + ControlFlow::Continue(()) + } + + fn store32_offset8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, offset: i8, src: XReg) { + let val = state[src].get_u32(); + unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset.into()) + .write_unaligned(val); + } + ControlFlow::Continue(()) + } + + fn store64_offset8(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, offset: i8, src: XReg) { + let val = state[src].get_u64(); + unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset.into()) + .write_unaligned(val); + } + ControlFlow::Continue(()) + } + + fn store32_offset64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, offset: i64, src: XReg) { + let val = state[src].get_u32(); + unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset as isize) + .write_unaligned(val); + } + ControlFlow::Continue(()) + } + + fn store64_offset64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, ptr: XReg, offset: i64, src: XReg) { + let val = state[src].get_u64(); + unsafe { + state[ptr] + .get_ptr::() + .byte_offset(offset as isize) + .write_unaligned(val); + } + ControlFlow::Continue(()) + } + + fn xpush32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, src: XReg) { + state.push(state[src].get_u32()); + ControlFlow::Continue(()) + } + + fn xpush32_many(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, srcs: RegSet) { + for src in srcs { + state.push(state[src].get_u32()); + } + ControlFlow::Continue(()) + } + + fn xpush64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, src: XReg) { + state.push(state[src].get_u64()); + ControlFlow::Continue(()) + } + + fn xpush64_many(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, srcs: RegSet) { + for src in srcs { + state.push(state[src].get_u64()); + } + ControlFlow::Continue(()) + } + + fn xpop32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg) { + let val = state.pop(); + state[dst].set_u32(val); + ControlFlow::Continue(()) + } + + fn xpop32_many(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dsts: RegSet) { + for dst in dsts.into_iter().rev() { + let val = state.pop(); + state[dst].set_u32(val); + } + ControlFlow::Continue(()) + } + + fn xpop64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg) { + let val = state.pop(); + state[dst].set_u64(val); + ControlFlow::Continue(()) + } + + fn xpop64_many(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dsts: RegSet) { + for dst in dsts.into_iter().rev() { + let val = state.pop(); + state[dst].set_u64(val); + } + ControlFlow::Continue(()) + } + + fn push_frame(state: &mut MachineState, pc: &mut UnsafeBytecodeStream) { + state.push(state[XReg::lr].get_ptr::()); + state.push(state[XReg::fp].get_ptr::()); + state[XReg::fp] = state[XReg::sp]; + ControlFlow::Continue(()) + } + + fn pop_frame(state: &mut MachineState, pc: &mut UnsafeBytecodeStream) { + state[XReg::sp] = state[XReg::fp]; + let fp = state.pop(); + let lr = state.pop(); + state[XReg::fp].set_ptr::(fp); + state[XReg::lr].set_ptr::(lr); + ControlFlow::Continue(()) + } + + fn bitcast_int_from_float_32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, src: FReg) { + let val = state[src].get_f32(); + state[dst].set_u64(u32::from_ne_bytes(val.to_ne_bytes()).into()); + ControlFlow::Continue(()) + } + + fn bitcast_int_from_float_64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: XReg, src: FReg) { + let val = state[src].get_f64(); + state[dst].set_u64(u64::from_ne_bytes(val.to_ne_bytes())); + ControlFlow::Continue(()) + } + + fn bitcast_float_from_int_32(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: FReg, src: XReg) { + let val = state[src].get_u32(); + state[dst].set_f32(f32::from_ne_bytes(val.to_ne_bytes())); + ControlFlow::Continue(()) + } + + fn bitcast_float_from_int_64(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, dst: FReg, src: XReg) { + let val = state[src].get_u64(); + state[dst].set_f64(f64::from_ne_bytes(val.to_ne_bytes())); + ControlFlow::Continue(()) + } + + fn extended(state: &mut MachineState, pc: &mut UnsafeBytecodeStream, opcode: ExtendedOpcode) { + match opcode { + ExtendedOpcode::Nop => ControlFlow::Continue(()), + ExtendedOpcode::Trap => ControlFlow::Break(Done::Trap(pc.as_ptr())), + ExtendedOpcode::GetSp => { + let (dst,) = crate::decode::unwrap_uninhabited(crate::decode::operands::get_sp(pc)); + let sp = state[XReg::sp].get_u64(); + state[dst].set_u64(sp); + ControlFlow::Continue(()) + } + } + } +} diff --git a/pulley/src/lib.rs b/pulley/src/lib.rs index f792b83df727..b80a5646ec9b 100644 --- a/pulley/src/lib.rs +++ b/pulley/src/lib.rs @@ -1,6 +1,8 @@ //! The pulley bytecode for fast interpreters. #![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![cfg_attr(pulley_tail_calls, feature(explicit_tail_calls))] +#![cfg_attr(pulley_tail_calls, allow(incomplete_features, unstable_features))] #![deny(missing_docs)] #![no_std]