Skip to content

Commit

Permalink
Update mono to support Unsafe.BitCast
Browse files Browse the repository at this point in the history
  • Loading branch information
tannergooding committed Jun 28, 2024
1 parent 98e7570 commit c8b6e0a
Show file tree
Hide file tree
Showing 6 changed files with 330 additions and 11 deletions.
5 changes: 5 additions & 0 deletions src/mono/browser/runtime/jiterpreter-tables.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ export const unopTable: { [opcode: number]: OpRec3 | undefined } = {
[MintOpcode.MINT_NEG_R4]: [WasmOpcode.f32_neg, WasmOpcode.f32_load, WasmOpcode.f32_store],
[MintOpcode.MINT_NEG_R8]: [WasmOpcode.f64_neg, WasmOpcode.f64_load, WasmOpcode.f64_store],

[MintOpcode.MINT_BITCAST_I4_R4]: [WasmOpcode.i32_reinterpret_f32, WasmOpcode.f32_load, WasmOpcode.i32_store],
[MintOpcode.MINT_BITCAST_I8_R8]: [WasmOpcode.i64_reinterpret_f64, WasmOpcode.f64_load, WasmOpcode.i64_store],
[MintOpcode.MINT_BITCAST_R4_I4]: [WasmOpcode.f32_reinterpret_i32, WasmOpcode.i32_load, WasmOpcode.f32_store],
[MintOpcode.MINT_BITCAST_R8_I8]: [WasmOpcode.f64_reinterpret_i64, WasmOpcode.i64_load, WasmOpcode.f64_store],

[MintOpcode.MINT_CONV_R4_I4]: [WasmOpcode.f32_convert_s_i32, WasmOpcode.i32_load, WasmOpcode.f32_store],
[MintOpcode.MINT_CONV_R8_I4]: [WasmOpcode.f64_convert_s_i32, WasmOpcode.i32_load, WasmOpcode.f64_store],
[MintOpcode.MINT_CONV_R_UN_I4]: [WasmOpcode.f64_convert_u_i32, WasmOpcode.i32_load, WasmOpcode.f64_store],
Expand Down
16 changes: 16 additions & 0 deletions src/mono/mono/mini/interp/interp.c
Original file line number Diff line number Diff line change
Expand Up @@ -5556,6 +5556,22 @@ MINT_IN_CASE(MINT_BRTRUE_I8_SP) ZEROP_SP(gint64, !=); MINT_IN_BREAK;
LOCAL_VAR (ip [1], gint64) = ~ LOCAL_VAR (ip [2], gint64);
ip += 3;
MINT_IN_BREAK;
MINT_IN_CASE(MINT_BITCAST_I4_R4)
memcpy (&LOCAL_VAR (ip [1], gint32), &LOCAL_VAR (ip [2], float), sizeof (float));
ip += 3;
MINT_IN_BREAK;
MINT_IN_CASE(MINT_BITCAST_I8_R8)
memcpy (&LOCAL_VAR (ip [1], gint64), &LOCAL_VAR (ip [2], double), sizeof (double));
ip += 3;
MINT_IN_BREAK;
MINT_IN_CASE(MINT_BITCAST_R4_I4)
memcpy (&LOCAL_VAR (ip [1], float), &LOCAL_VAR (ip [2], gint32), sizeof (gint32));
ip += 3;
MINT_IN_BREAK;
MINT_IN_CASE(MINT_BITCAST_R8_I8)
memcpy (&LOCAL_VAR (ip [1], double), &LOCAL_VAR (ip [2], gint64), sizeof (gint64));
ip += 3;
MINT_IN_BREAK;
MINT_IN_CASE(MINT_CONV_I1_I4)
// FIXME read casted var directly and remove redundant conv opcodes
LOCAL_VAR (ip [1], gint32) = (gint8)LOCAL_VAR (ip [2], gint32);
Expand Down
5 changes: 5 additions & 0 deletions src/mono/mono/mini/interp/mintops.def
Original file line number Diff line number Diff line change
Expand Up @@ -548,6 +548,11 @@ OPDEF(MINT_NEG_R8, "neg.r8", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_NOT_I4, "not.i4", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_NOT_I8, "not.i8", 3, 1, 1, MintOpNoArgs)

OPDEF(MINT_BITCAST_I4_R4, "bitcast.i4.r4", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_BITCAST_I8_R8, "bitcast.i8.r8", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_BITCAST_R4_I4, "bitcast.r4.i4", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_BITCAST_R8_I8, "bitcast.r8.i8", 3, 1, 1, MintOpNoArgs)

OPDEF(MINT_CONV_R_UN_I4, "conv.r.un.i4", 3, 1, 1, MintOpNoArgs)
OPDEF(MINT_CONV_R_UN_I8, "conv.r.un.i8", 3, 1, 1, MintOpNoArgs)

Expand Down
127 changes: 127 additions & 0 deletions src/mono/mono/mini/interp/transform.c
Original file line number Diff line number Diff line change
Expand Up @@ -2138,6 +2138,16 @@ interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClas
return TRUE;
}
}
} else if (in_corlib && !strcmp (klass_name_space, "System") && (!strcmp (klass_name, "BitConverter"))) {
if (!strcmp (tm, "DoubleToInt64Bits") || !strcmp (tm, "DoubleToUInt64Bits")) {
*op = MINT_BITCAST_I8_R8;
} else if (!strcmp (tm, "Int32BitsToSingle") || !strcmp (tm, "UInt32BitsToSingle")) {
*op = MINT_BITCAST_R4_I4;
} else if (!strcmp (tm, "Int64BitsToDouble") || !strcmp (tm, "UInt64BitsToDouble")) {
*op = MINT_BITCAST_R8_I8;
} else if (!strcmp (tm, "SingleToInt32Bits") || !strcmp (tm, "SingleToUInt32Bits")) {
*op = MINT_BITCAST_I4_R4;
}
} else if (in_corlib && !strcmp (klass_name_space, "System.Runtime.CompilerServices") && !strcmp (klass_name, "Unsafe")) {
if (!strcmp (tm, "AddByteOffset"))
#if SIZEOF_VOID_P == 4
Expand All @@ -2155,6 +2165,123 @@ interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClas
return TRUE;
} else if (!strcmp (tm, "AreSame")) {
*op = MINT_CEQ_P;
} else if (!strcmp (tm, "BitCast")) {
MonoGenericContext *ctx = mono_method_get_context (target_method);
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 2);
g_assert (csignature->param_count == 1);

// We explicitly do not handle gsharedvt as it is meant as a slow fallback strategy
// instead we fallback to the managed implementation which will do the right things

MonoType *tfrom = ctx->method_inst->type_argv [0];
if (mini_is_gsharedvt_variable_type (tfrom)) {
return FALSE;
}

MonoType *tto = ctx->method_inst->type_argv [1];
if (mini_is_gsharedvt_variable_type (tto)) {
return FALSE;
}

// The underlying API always throws for reference type inputs, so we
// fallback to the managed implementation to let that handling occur

MonoTypeEnum tfrom_type = tfrom->type;
if (MONO_TYPE_IS_REFERENCE (tfrom)) {
return FALSE;
}

MonoTypeEnum tto_type = tto->type;
if (MONO_TYPE_IS_REFERENCE (tto)) {
return FALSE;
}

// We also always throw for Nullable<T> inputs, so fallback to the
// managed implementation here as well.

MonoClass *tfrom_klass = mono_class_from_mono_type_internal (tfrom);
if (mono_class_is_nullable (tfrom_klass)) {
return FALSE;
}

MonoClass *tto_klass = mono_class_from_mono_type_internal (tto);
if (mono_class_is_nullable (tto_klass)) {
return FALSE;
}

// The same applies for when the type sizes do not match, as this will always throw
// and so its not an expected case and we can fallback to the managed implementation

int tfrom_align, tto_align;
gint32 size = mono_type_size (tfrom, &tfrom_align);

if (size != mono_type_size (tto, &tto_align)) {
return FALSE;
}
g_assert (size < G_MAXUINT16);

// We have several different move opcodes to handle the data depending on the
// source and target types, so detect and optimize the most common ones falling
// back to what is effectively `ReadUnaligned<TTo>(ref As<TFrom, byte>(ref source))`
// for anything that can't be special cased as potentially zero-cost move.

bool tfrom_is_primitive_or_enum = false;
if (m_class_is_primitive(tfrom_klass)) {
tfrom_is_primitive_or_enum = true;
} else if (m_class_is_enumtype(tfrom_klass)) {
tfrom_is_primitive_or_enum = true;
tfrom_type = mono_class_enum_basetype_internal(tfrom_klass)->type;
}

bool tto_is_primitive_or_enum = false;
if (m_class_is_primitive(tto_klass)) {
tto_is_primitive_or_enum = true;
} else if (m_class_is_enumtype(tto_klass)) {
tto_is_primitive_or_enum = true;
tto_type = mono_class_enum_basetype_internal(tto_klass)->type;
}

if (tfrom_is_primitive_or_enum && tto_is_primitive_or_enum) {
if (size == 1) {
// FIXME: This doesn't work
//
// *op = MINT_MOV_1;
} else if (size == 2) {
// FIXME: This doesn't work
//
// *op = MINT_MOV_2;
} else if (size == 4) {
if ((tfrom_type == MONO_TYPE_R4) && ((tto_type == MONO_TYPE_I4) || (tto_type == MONO_TYPE_U4))) {
*op = MINT_BITCAST_I4_R4;
} else if ((tto_type == MONO_TYPE_R4) && ((tfrom_type == MONO_TYPE_I4) || (tfrom_type == MONO_TYPE_U4))) {
*op = MINT_BITCAST_R4_I4;
} else {
*op = MINT_MOV_4;
}
} else if (size == 8) {
if ((tfrom_type == MONO_TYPE_R8) && ((tto_type == MONO_TYPE_I8) || (tto_type == MONO_TYPE_U8))) {
*op = MINT_BITCAST_I8_R8;
} else if ((tto_type == MONO_TYPE_R8) && ((tfrom_type == MONO_TYPE_I8) || (tfrom_type == MONO_TYPE_U8))) {
*op = MINT_BITCAST_R8_I8;
} else {
*op = MINT_MOV_8;
}
}
}

if (*op == -1) {
// FIXME: This isn't quite right
//
// interp_add_ins (td, MINT_MOV_VT);
// interp_ins_set_sreg (td->last_ins, td->sp [-1].var);
// push_type_vt (td, tto_klass, size);
// interp_ins_set_dreg (td->last_ins, td->sp [-1].var);
// td->last_ins->data [0] = GINT32_TO_UINT16 (size);
// td->ip++;
// return TRUE;
}
} else if (!strcmp (tm, "ByteOffset")) {
#if SIZEOF_VOID_P == 4
interp_add_ins (td, MINT_SUB_I4);
Expand Down
177 changes: 177 additions & 0 deletions src/mono/mono/mini/intrinsics.c
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,40 @@ emit_span_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature
return NULL;
}

static MonoInst*
emit_bitconverter_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;

if (!strcmp (cmethod->name, "DoubleToInt64Bits") || !strcmp (cmethod->name, "DoubleToUInt64Bits")) {
g_assert (fsig->param_count == 1);
int dreg = mono_alloc_dreg (cfg, STACK_I8);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE_F_TO_I8, dreg, args [0]->dreg);
ins->type = STACK_I8;
return ins;
} else if (!strcmp (cmethod->name, "Int32BitsToSingle") || !strcmp (cmethod->name, "UInt32BitsToSingle")) {
g_assert (fsig->param_count == 1);
int dreg = mono_alloc_dreg (cfg, STACK_R4);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE_I4_TO_F, dreg, args [0]->dreg);
ins->type = STACK_R4;
return ins;
} else if (!strcmp (cmethod->name, "Int64BitsToDouble") || !strcmp (cmethod->name, "UInt64BitsToDouble")) {
g_assert (fsig->param_count == 1);
int dreg = mono_alloc_dreg (cfg, STACK_R8);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE_I8_TO_F, dreg, args [0]->dreg);
ins->type = STACK_R8;
return ins;
} else if (!strcmp (cmethod->name, "SingleToInt32Bits") || !strcmp (cmethod->name, "SingleToUInt32Bits")) {
g_assert (fsig->param_count == 1);
int dreg = mono_alloc_dreg (cfg, STACK_I4);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE_F_TO_I4, dreg, args [0]->dreg);
ins->type = STACK_I4;
return ins;
}

return NULL;
}

static MonoInst*
emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
Expand Down Expand Up @@ -488,6 +522,145 @@ emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignatu
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "BitCast")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 2);
g_assert (fsig->param_count == 1);

// We explicitly do not handle gsharedvt as it is meant as a slow fallback strategy
// instead we fallback to the managed implementation which will do the right things

MonoType *tfrom = ctx->method_inst->type_argv [0];
if (mini_is_gsharedvt_variable_type (tfrom)) {
return NULL;
}

MonoType *tto = ctx->method_inst->type_argv [1];
if (mini_is_gsharedvt_variable_type (tto)) {
return NULL;
}

// The underlying API always throws for reference type inputs, so we
// fallback to the managed implementation to let that handling occur

MonoTypeEnum tfrom_type = tfrom->type;
if (MONO_TYPE_IS_REFERENCE (tfrom)) {
return NULL;
}

MonoTypeEnum tto_type = tto->type;
if (MONO_TYPE_IS_REFERENCE (tto)) {
return NULL;
}

// We also always throw for Nullable<T> inputs, so fallback to the
// managed implementation here as well.

MonoClass *tfrom_klass = mono_class_from_mono_type_internal (tfrom);
if (mono_class_is_nullable (tfrom_klass)) {
return NULL;
}

MonoClass *tto_klass = mono_class_from_mono_type_internal (tto);
if (mono_class_is_nullable (tto_klass)) {
return NULL;
}

// The same applies for when the type sizes do not match, as this will always throw
// and so its not an expected case and we can fallback to the managed implementation

int tfrom_align, tto_align;
gint32 size = mono_type_size (tfrom, &tfrom_align);

if (size != mono_type_size (tto, &tto_align)) {
return FALSE;
}
g_assert (size < G_MAXUINT16);

// We have several different move opcodes to handle the data depending on the
// source and target types, so detect and optimize the most common ones falling
// back to what is effectively `ReadUnaligned<TTo>(ref As<TFrom, byte>(ref source))`
// for anything that can't be special cased as potentially zero-cost move.

guint32 opcode = OP_LDADDR;
MonoStackType tto_stack = STACK_OBJ;

bool tfrom_is_primitive_or_enum = false;
if (m_class_is_primitive(tfrom_klass)) {
tfrom_is_primitive_or_enum = true;
} else if (m_class_is_enumtype(tfrom_klass)) {
tfrom_is_primitive_or_enum = true;
tfrom_type = mono_class_enum_basetype_internal(tfrom_klass)->type;
}

bool tto_is_primitive_or_enum = false;
if (m_class_is_primitive(tto_klass)) {
tto_is_primitive_or_enum = true;
} else if (m_class_is_enumtype(tto_klass)) {
tto_is_primitive_or_enum = true;
tto_type = mono_class_enum_basetype_internal(tto_klass)->type;
}

if (tfrom_is_primitive_or_enum && tto_is_primitive_or_enum) {
if (size == 1) {
// FIXME: This doesn't work
//
// opcode = OP_MOVE;
// tto_stack = STACK_I4;
} else if (size == 2) {
// FIXME: This doesn't work
//
// opcode = OP_MOVE;
// tto_stack = STACK_I4;
} else if (size == 4) {
if ((tfrom_type == MONO_TYPE_R4) && ((tto_type == MONO_TYPE_I4) || (tto_type == MONO_TYPE_U4))) {
opcode = OP_MOVE_F_TO_I4;
tto_stack = STACK_I4;
} else if ((tto_type == MONO_TYPE_R4) && ((tfrom_type == MONO_TYPE_I4) || (tfrom_type == MONO_TYPE_U4))) {
opcode = OP_MOVE_I4_TO_F;
tto_stack = STACK_R4;
} else {
opcode = OP_MOVE;
tto_stack = STACK_I4;
}
} else if (size == 8) {
if ((tfrom_type == MONO_TYPE_R8) && ((tto_type == MONO_TYPE_I8) || (tto_type == MONO_TYPE_U8))) {
opcode = OP_MOVE_F_TO_I8;
tto_stack = STACK_I8;
} else if ((tto_type == MONO_TYPE_R8) && ((tfrom_type == MONO_TYPE_I8) || (tfrom_type == MONO_TYPE_U8))) {
opcode = OP_MOVE_I8_TO_F;
tto_stack = STACK_R8;
} else {
opcode = OP_MOVE;
tto_stack = STACK_I8;
}
}
} else if (mini_class_is_simd (cfg, tfrom_klass) && mini_class_is_simd (cfg, tto_klass)) {
opcode = OP_XMOVE;
tto_stack = STACK_VTYPE;
}

if (opcode == OP_LDADDR) {
// FIXME: This isn't quite right
//
// MonoInst *addr;
// EMIT_NEW_VARLOADA_VREG (cfg, addr, args [0]->dreg, tfrom);
// addr->klass = tfrom_klass;
//
// // We don't need to call mini_get_underlying_type on tto
// // since we have skipped handling for gsharedvt further up
// assert(MONO_TYPE_ISSTRUCT (tto));
//
// return mini_emit_memory_load (cfg, tto, addr, 0, MONO_INST_UNALIGNED);
return NULL;
}

int dreg = mono_alloc_dreg (cfg, tto_stack);
EMIT_NEW_UNALU (cfg, ins, opcode, dreg, args [0]->dreg);
ins->type = tto_stack;
ins->klass = tto_klass;
return ins;
} else if (!strcmp (cmethod->name, "IsAddressLessThan")) {
g_assert (ctx);
g_assert (ctx->method_inst);
Expand Down Expand Up @@ -2087,6 +2260,10 @@ mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSign
!strcmp (cmethod_klass_name_space, "System") &&
(!strcmp (cmethod_klass_name, "Span`1") || !strcmp (cmethod_klass_name, "ReadOnlySpan`1"))) {
return emit_span_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
!strcmp (cmethod_klass_name, "BitConverter")) {
return emit_bitconverter_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") &&
!strcmp (cmethod_klass_name, "Unsafe")) {
Expand Down
Loading

0 comments on commit c8b6e0a

Please sign in to comment.