Skip to content

Commit

Permalink
Add integer binary instructions to Winch (#6538)
Browse files Browse the repository at this point in the history
* Add integer binary instructions to Winch

* Use handle_invalid_operand_combination and load_constant
  • Loading branch information
jeffcharles authored Jun 7, 2023
1 parent 7229ba9 commit f5fafba
Show file tree
Hide file tree
Showing 67 changed files with 1,862 additions and 16 deletions.
16 changes: 16 additions & 0 deletions fuzz/fuzz_targets/differential.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,22 @@ fn winch_supports_module(module: &[u8]) -> bool {
| I64GeU { .. }
| I32Eqz { .. }
| I64Eqz { .. }
| I32And { .. }
| I64And { .. }
| I32Or { .. }
| I64Or { .. }
| I32Xor { .. }
| I64Xor { .. }
| I32Shl { .. }
| I64Shl { .. }
| I32ShrS { .. }
| I64ShrS { .. }
| I32ShrU { .. }
| I64ShrU { .. }
| I32Rotl { .. }
| I64Rotl { .. }
| I32Rotr { .. }
| I64Rotr { .. }
| LocalGet { .. }
| LocalSet { .. }
| Call { .. }
Expand Down
21 changes: 20 additions & 1 deletion winch/codegen/src/isa/aarch64/masm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@ use crate::{
abi::{self, local::LocalSlot},
codegen::CodeGenContext,
isa::reg::Reg,
masm::{CalleeKind, CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind},
masm::{
CalleeKind, CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind,
ShiftKind,
},
};
use cranelift_codegen::{settings, Final, MachBufferFinalized};

Expand Down Expand Up @@ -179,6 +182,22 @@ impl Masm for MacroAssembler {
self.asm.mul(rhs.into(), lhs.into(), dst.into(), size);
}

fn and(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) {
todo!()
}

fn or(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) {
todo!()
}

fn xor(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) {
todo!()
}

fn shift(&mut self, _context: &mut CodeGenContext, _kind: ShiftKind, _size: OperandSize) {
todo!()
}

fn div(&mut self, _context: &mut CodeGenContext, _kind: DivKind, _size: OperandSize) {
todo!()
}
Expand Down
176 changes: 163 additions & 13 deletions winch/codegen/src/isa/x64/asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use crate::{
isa::reg::Reg,
masm::{CalleeKind, CmpKind, DivKind, OperandSize, RemKind},
masm::{CalleeKind, CmpKind, DivKind, OperandSize, RemKind, ShiftKind},
};
use cranelift_codegen::{
entity::EntityRef,
Expand All @@ -11,7 +11,8 @@ use cranelift_codegen::{
isa::x64::{
args::{
self, AluRmiROpcode, Amode, CmpOpcode, DivSignedness, ExtMode, FromWritableReg, Gpr,
GprMem, GprMemImm, RegMem, RegMemImm, SyntheticAmode, WritableGpr, CC,
GprMem, GprMemImm, Imm8Gpr, Imm8Reg, RegMem, RegMemImm,
ShiftKind as CraneliftShiftKind, SyntheticAmode, WritableGpr, CC,
},
settings as x64_settings, CallInfo, EmitInfo, EmitState, Inst,
},
Expand Down Expand Up @@ -59,6 +60,12 @@ impl From<Reg> for GprMemImm {
}
}

impl From<Reg> for Imm8Gpr {
fn from(value: Reg) -> Self {
Imm8Gpr::new(Imm8Reg::Reg { reg: value.into() }).expect("valid Imm8Gpr")
}
}

impl From<OperandSize> for args::OperandSize {
fn from(size: OperandSize) -> Self {
match size {
Expand Down Expand Up @@ -94,6 +101,18 @@ impl From<CmpKind> for CC {
}
}

impl From<ShiftKind> for CraneliftShiftKind {
fn from(value: ShiftKind) -> Self {
match value {
ShiftKind::Shl => CraneliftShiftKind::ShiftLeft,
ShiftKind::ShrS => CraneliftShiftKind::ShiftRightArithmetic,
ShiftKind::ShrU => CraneliftShiftKind::ShiftRightLogical,
ShiftKind::Rotl => CraneliftShiftKind::RotateLeft,
ShiftKind::Rotr => CraneliftShiftKind::RotateRight,
}
}
}

/// Low level assembler implementation for x64.
pub(crate) struct Assembler {
/// The machine instruction buffer.
Expand Down Expand Up @@ -292,6 +311,148 @@ impl Assembler {
}
}

/// Logical and instruction variants.
pub fn and(&mut self, src: Operand, dst: Operand, size: OperandSize) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => {
if let Ok(val) = i32::try_from(*imm) {
self.and_ir(val, *dst, size);
} else {
let scratch = regs::scratch();
self.load_constant(imm, scratch, size);
self.and_rr(scratch, *dst, size);
}
}
(Operand::Reg(src), Operand::Reg(dst)) => self.and_rr(*src, *dst, size),
_ => Self::handle_invalid_operand_combination(src, dst),
}
}

fn and_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::And,
src1: dst.into(),
src2: src.into(),
dst: dst.into(),
});
}

fn and_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) {
let imm = RegMemImm::imm(imm as u32);

self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::And,
src1: dst.into(),
src2: GprMemImm::new(imm).expect("valid immediate"),
dst: dst.into(),
});
}

/// Logical or instruction variants.
pub fn or(&mut self, src: Operand, dst: Operand, size: OperandSize) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => {
if let Ok(val) = i32::try_from(*imm) {
self.or_ir(val, *dst, size);
} else {
let scratch = regs::scratch();
self.load_constant(imm, scratch, size);
self.or_rr(scratch, *dst, size);
}
}
(Operand::Reg(src), Operand::Reg(dst)) => self.or_rr(*src, *dst, size),
_ => Self::handle_invalid_operand_combination(src, dst),
}
}

fn or_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Or,
src1: dst.into(),
src2: src.into(),
dst: dst.into(),
});
}

fn or_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) {
let imm = RegMemImm::imm(imm as u32);

self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Or,
src1: dst.into(),
src2: GprMemImm::new(imm).expect("valid immediate"),
dst: dst.into(),
});
}

/// Logical exclusive or instruction variants.
pub fn xor(&mut self, src: Operand, dst: Operand, size: OperandSize) {
match &(src, dst) {
(Operand::Imm(imm), Operand::Reg(dst)) => {
if let Ok(val) = i32::try_from(*imm) {
self.xor_ir(val, *dst, size);
} else {
let scratch = regs::scratch();
self.load_constant(imm, scratch, size);
self.xor_rr(scratch, *dst, size);
}
}
(Operand::Reg(src), Operand::Reg(dst)) => self.xor_rr(*src, *dst, size),
_ => Self::handle_invalid_operand_combination(src, dst),
}
}

/// Logical exclusive or with registers.
pub fn xor_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Xor,
src1: dst.into(),
src2: src.into(),
dst: dst.into(),
});
}

fn xor_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) {
let imm = RegMemImm::imm(imm as u32);

self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Xor,
src1: dst.into(),
src2: GprMemImm::new(imm).expect("valid immediate"),
dst: dst.into(),
});
}

/// Shift with register and register.
pub fn shift_rr(&mut self, src: Reg, dst: Reg, kind: ShiftKind, size: OperandSize) {
self.emit(Inst::ShiftR {
size: size.into(),
kind: kind.into(),
src: dst.into(),
num_bits: src.into(),
dst: dst.into(),
});
}

/// Shift with immediate and register.
pub fn shift_ir(&mut self, imm: u8, dst: Reg, kind: ShiftKind, size: OperandSize) {
let imm = imm.into();

self.emit(Inst::ShiftR {
size: size.into(),
kind: kind.into(),
src: dst.into(),
num_bits: Imm8Gpr::new(imm).expect("valid immediate"),
dst: dst.into(),
});
}

/// Signed/unsigned division.
///
/// Emits a sequence of instructions to ensure the correctness of
Expand Down Expand Up @@ -469,17 +630,6 @@ impl Assembler {
});
}

/// Logical exclusive or with registers.
pub fn xor_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) {
self.emit(Inst::AluRmiR {
size: size.into(),
op: AluRmiROpcode::Xor,
src1: dst.into(),
src2: src.into(),
dst: dst.into(),
});
}

/// Compare two operands and set status register flags.
pub fn cmp(&mut self, src: Operand, dst: Operand, size: OperandSize) {
match &(src, dst) {
Expand Down
78 changes: 77 additions & 1 deletion winch/codegen/src/isa/x64/masm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ use super::{
asm::{Assembler, Operand},
regs::{self, rbp, rsp},
};
use crate::masm::{CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind};
use crate::masm::{
CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind, ShiftKind,
};
use crate::{
abi::{self, align_to, calculate_frame_adjustment, LocalSlot},
codegen::CodeGenContext,
Expand Down Expand Up @@ -193,6 +195,80 @@ impl Masm for MacroAssembler {
self.asm.mul(src, dst, size);
}

fn and(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) {
let (src, dst): (Operand, Operand) = if dst == lhs {
(rhs.into(), dst.into())
} else {
panic!(
"the destination and first source argument must be the same, dst={:?}, lhs={:?}",
dst, lhs
);
};

self.asm.and(src, dst, size);
}

fn or(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) {
let (src, dst): (Operand, Operand) = if dst == lhs {
(rhs.into(), dst.into())
} else {
panic!(
"the destination and first source argument must be the same, dst={:?}, lhs={:?}",
dst, lhs
);
};

self.asm.or(src, dst, size);
}

fn xor(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) {
let (src, dst): (Operand, Operand) = if dst == lhs {
(rhs.into(), dst.into())
} else {
panic!(
"the destination and first source argument must be the same, dst={:?}, lhs={:?}",
dst, lhs
);
};

self.asm.xor(src, dst, size);
}

fn shift(&mut self, context: &mut CodeGenContext, kind: ShiftKind, size: OperandSize) {
let top = context.stack.peek().expect("value at stack top");

if size == OperandSize::S32 && top.is_i32_const() {
let val = context
.stack
.pop_i32_const()
.expect("i32 const value at stack top");
let reg = context.pop_to_reg(self, None, size);

self.asm.shift_ir(val as u8, reg, kind, size);

context.stack.push(Val::reg(reg));
} else if size == OperandSize::S64 && top.is_i64_const() {
let val = context
.stack
.pop_i64_const()
.expect("i64 const value at stack top");
let reg = context.pop_to_reg(self, None, size);

self.asm.shift_ir(val as u8, reg, kind, size);

context.stack.push(Val::reg(reg));
} else {
// Number of bits to shift must be in the CL register.
let src = context.pop_to_reg(self, Some(regs::rcx()), size);
let dst = context.pop_to_reg(self, None, size);

self.asm.shift_rr(src.into(), dst.into(), kind, size);

context.regalloc.free_gpr(src);
context.stack.push(Val::reg(dst));
}
}

fn div(&mut self, context: &mut CodeGenContext, kind: DivKind, size: OperandSize) {
// Allocate rdx:rax.
let rdx = context.gpr(regs::rdx(), self);
Expand Down
Loading

0 comments on commit f5fafba

Please sign in to comment.