diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 96d593d8b695f..756b4601cff8f 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -921,14 +921,22 @@ bool BasicBlock::isEmpty() { if (!IsLIR()) { - return (this->FirstNonPhiDef() == nullptr); + for (Statement* stmt : Statements()) + { + if (!stmt->GetRootNode()->OperIs(GT_PHI, GT_NOP)) + { + return false; + } + } } - - for (GenTree* node : LIR::AsRange(this).NonPhiNodes()) + else { - if (node->OperGet() != GT_IL_OFFSET) + for (GenTree* node : LIR::AsRange(this).NonPhiNodes()) { - return false; + if (node->OperGet() != GT_IL_OFFSET) + { + return false; + } } } diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index bcb03bc56e5fe..32c51ab3b6173 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -787,11 +787,11 @@ Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkD { // If we're assigning to a ref typed local that has one definition, // we may be able to sharpen the type for the local. - GenTree* lhs = tree->gtGetOp1()->gtEffectiveVal(); + GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); - if ((lhs->OperGet() == GT_LCL_VAR) && (lhs->TypeGet() == TYP_REF)) + if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) { - const unsigned lclNum = lhs->AsLclVarCommon()->GetLclNum(); + const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); LclVarDsc* lcl = comp->lvaGetDesc(lclNum); if (lcl->lvSingleDef) @@ -807,6 +807,20 @@ Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkD } } } + + // If we created a self-assignment (say because we are sharing return spill temps) + // we can remove it. + // + GenTree* const lhs = tree->gtGetOp1(); + GenTree* const rhs = tree->gtGetOp2(); + if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) + { + comp->gtUpdateNodeSideEffects(tree); + assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); + JITDUMP("... removing self-assignment\n"); + DISPTREE(tree); + tree->gtBashToNOP(); + } } else if (tree->OperGet() == GT_JTRUE) { diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 1a1224d096d62..c1f6d5d1c8ff3 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -14603,14 +14603,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { - JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); + JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { - JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); + JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } @@ -20625,6 +20625,17 @@ void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; + // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate + // is also a tail call candidate, it can use the same return spill temp. + // + if (compIsForInlining() && call->CanTailCall() && + (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) + { + inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; + JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), + inlineCandidateInfo->preexistingSpillTemp); + } + // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 8699d898cf368..538ae2b42479c 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -644,8 +644,44 @@ class IndirectCallTransformer if (origCall->TypeGet() != TYP_VOID) { - returnTemp = compiler->lvaGrabTemp(false DEBUGARG("guarded devirt return temp")); - JITDUMP("Reworking call(s) to return value via a new temp V%02u\n", returnTemp); + // If there's a spill temp already associated with this inline candidate, + // use that instead of allocating a new temp. + // + returnTemp = inlineInfo->preexistingSpillTemp; + + if (returnTemp != BAD_VAR_NUM) + { + JITDUMP("Reworking call(s) to return value via a existing return temp V%02u\n", returnTemp); + + // We will be introducing multiple defs for this temp, so make sure + // it is no longer marked as single def. + // + // Otherwise, we could make an incorrect type deduction. Say the + // original call site returns a B, but after we devirtualize along the + // GDV happy path we see that method returns a D. We can't then assume that + // the return temp is type D, because we don't know what type the fallback + // path returns. So we have to stick with the current type for B as the + // return type. + // + // Note local vars always live in the root method's symbol table. So we + // need to use the root compiler for lookup here. + // + LclVarDsc* const returnTempLcl = compiler->impInlineRoot()->lvaGetDesc(returnTemp); + + if (returnTempLcl->lvSingleDef == 1) + { + // In this case it's ok if we already updated the type assuming single def, + // we just don't want any further updates. + // + JITDUMP("Return temp V%02u is no longer a single def temp\n", returnTemp); + returnTempLcl->lvSingleDef = 0; + } + } + else + { + returnTemp = compiler->lvaGrabTemp(false DEBUGARG("guarded devirt return temp")); + JITDUMP("Reworking call(s) to return value via a new temp V%02u\n", returnTemp); + } if (varTypeIsStruct(origCall)) { @@ -720,11 +756,12 @@ class IndirectCallTransformer assert(!call->IsVirtual()); // Re-establish this call as an inline candidate. - GenTree* oldRetExpr = inlineInfo->retExpr; - // Todo -- pass this back from impdevirt...? - inlineInfo->clsHandle = compiler->info.compCompHnd->getMethodClass(methodHnd); - inlineInfo->exactContextHnd = context; - call->gtInlineCandidateInfo = inlineInfo; + // + GenTree* oldRetExpr = inlineInfo->retExpr; + inlineInfo->clsHandle = compiler->info.compCompHnd->getMethodClass(methodHnd); + inlineInfo->exactContextHnd = context; + inlineInfo->preexistingSpillTemp = returnTemp; + call->gtInlineCandidateInfo = inlineInfo; // Add the call. compiler->fgNewStmtAtEnd(thenBlock, call); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 8fbce8a586e79..ed47522a61c89 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -7733,7 +7733,31 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // if (nextBlock->bbJumpKind != BBJ_RETURN) { - BasicBlock* const nextNextBlock = nextBlock->GetUniqueSucc(); + BasicBlock* nextNextBlock = nextBlock->GetUniqueSucc(); + + // Check if we have a sequence of GT_ASG blocks where the same variable is assigned + // to temp locals over and over. + if (nextNextBlock->bbJumpKind != BBJ_RETURN) + { + // Make sure the block has a single statement + assert(nextBlock->firstStmt() == nextBlock->lastStmt()); + // And the root node is "ASG(LCL_VAR, LCL_VAR)" + GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); + assert(asgNode->OperIs(GT_ASG)); + + unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); + + while (nextNextBlock->bbJumpKind != BBJ_RETURN) + { + assert(nextNextBlock->firstStmt() == nextNextBlock->lastStmt()); + asgNode = nextNextBlock->firstStmt()->GetRootNode(); + assert(asgNode->OperIs(GT_ASG)); + assert(lcl == asgNode->gtGetOp2()->AsLclVarCommon()->GetLclNum()); + lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); + nextNextBlock = nextNextBlock->GetUniqueSucc(); + } + } + assert(nextNextBlock->bbJumpKind == BBJ_RETURN); if (nextNextBlock->hasProfileWeight()) diff --git a/src/tests/JIT/opt/Devirtualization/GitHub_50492.cs b/src/tests/JIT/opt/Devirtualization/GitHub_50492.cs new file mode 100644 index 0000000000000..cdcb0bf584458 --- /dev/null +++ b/src/tests/JIT/opt/Devirtualization/GitHub_50492.cs @@ -0,0 +1,42 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Runtime.CompilerServices; +using System.Threading; + +class Base +{ + public virtual int Test(Base b1, Base b2, bool p) => 0; + + public virtual int Foo() => 1; +} + +class ClassA : Base +{ + public override int Test(Base b1, Base b2, bool p) => p ? b2.Foo() : 42; +} + +class ClassB : ClassA +{ + public override int Test(Base b1, Base b2, bool p) => b1.Test(b1, b2, p); +} + +class Program +{ + public static int Main() + { + for (int i = 0; i < 100; i++) + { + // Make sure it doesn't assert, see https://github.com/dotnet/runtime/issues/50492 + Test(new ClassB(), new ClassA(), new Base(), true); + Thread.Sleep(15); + } + return 100; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int Test(Base b0, Base b1, Base b2, bool p) + { + return b0.Test(b1, b2, p); + } +} diff --git a/src/tests/JIT/opt/Devirtualization/GitHub_50492.csproj b/src/tests/JIT/opt/Devirtualization/GitHub_50492.csproj new file mode 100644 index 0000000000000..9788c6cc864ef --- /dev/null +++ b/src/tests/JIT/opt/Devirtualization/GitHub_50492.csproj @@ -0,0 +1,22 @@ + + + Exe + True + + + + + + + + + +