diff --git a/src/coreclr/jit/jitstd/list.h b/src/coreclr/jit/jitstd/list.h index 734158147610a..f00c159645255 100644 --- a/src/coreclr/jit/jitstd/list.h +++ b/src/coreclr/jit/jitstd/list.h @@ -59,7 +59,6 @@ class list bool operator==(const const_iterator& it) const; bool operator!=(const const_iterator& it) const; const T& operator*() const; - const T* operator&() const; const T* operator->() const; operator const T*() const; @@ -85,7 +84,6 @@ class list bool operator==(const iterator& it); bool operator!=(const iterator& it); T& operator*(); - T* operator&(); T* operator->(); operator T*(); @@ -115,7 +113,6 @@ class list bool operator==(const const_reverse_iterator& it) const; bool operator!=(const const_reverse_iterator& it) const; const T& operator*() const; - const T* operator&() const; const T* operator->() const; operator const T*() const; @@ -142,7 +139,6 @@ class list bool operator==(const reverse_iterator& it); bool operator!=(const reverse_iterator& it); T& operator*(); - T* operator&(); T* operator->(); operator T*(); friend class list::const_reverse_iterator; @@ -970,12 +966,6 @@ T& list::iterator::operator*() return m_pNode->m_value; } -template -T* list::iterator::operator&() -{ - return &(m_pNode->m_value); -} - template T* list::iterator::operator->() { @@ -1062,12 +1052,6 @@ const T& list::const_iterator::operator*() const return m_pNode->m_value; } -template -const T* list::const_iterator::operator&() const -{ - return &(m_pNode->m_value); -} - template const T* list::const_iterator::operator->() const { @@ -1147,12 +1131,6 @@ T& list::reverse_iterator::operator*() return m_pNode->m_value; } -template -T* list::reverse_iterator::operator&() -{ - return &(m_pNode->m_value); -} - template T* list::reverse_iterator::operator->() { @@ -1236,12 +1214,6 @@ const T& list::const_reverse_iterator::operator*() const return m_pNode->m_value; } -template -const T* list::const_reverse_iterator::operator&() const -{ - return &(m_pNode->m_value); -} - template const T* list::const_reverse_iterator::operator->() const { diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 841b5bf0e796b..a479af572b644 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -2172,11 +2172,9 @@ void LinearScan::checkLastUses(BasicBlock* block) VARSET_TP computedLive(VarSetOps::MakeCopy(compiler, block->bbLiveOut)); - bool foundDiff = false; - RefPositionReverseIterator reverseIterator = refPositions.rbegin(); - RefPosition* currentRefPosition; - for (currentRefPosition = &reverseIterator; currentRefPosition->refType != RefTypeBB; - reverseIterator++, currentRefPosition = &reverseIterator) + bool foundDiff = false; + RefPositionReverseIterator currentRefPosition = refPositions.rbegin(); + for (; currentRefPosition->refType != RefTypeBB; currentRefPosition++) { // We should never see ParamDefs or ZeroInits within a basic block. assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit); @@ -2235,7 +2233,7 @@ void LinearScan::checkLastUses(BasicBlock* block) } } - assert(reverseIterator != refPositions.rend()); + assert(currentRefPosition != refPositions.rend()); } VARSET_TP liveInNotComputedLive(VarSetOps::Diff(compiler, block->bbLiveIn, computedLive)); @@ -4616,10 +4614,9 @@ void LinearScan::allocateRegisters() bool handledBlockEnd = false; - for (RefPosition& refPositionIterator : refPositions) + for (RefPosition& currentRefPosition : refPositions) { - RefPosition* currentRefPosition = &refPositionIterator; - RefPosition* nextRefPosition = currentRefPosition->nextRefPosition; + RefPosition* nextRefPosition = currentRefPosition.nextRefPosition; // TODO: Can we combine this with the freeing of registers below? It might // mess with the dump, since this was previously being done before the call below @@ -4634,7 +4631,7 @@ void LinearScan::allocateRegisters() clearSpillCost(regRecord->regNum, regRecord->registerType); makeRegisterInactive(regRecord); } - if (currentRefPosition->nodeLocation > prevLocation) + if (currentRefPosition.nodeLocation > prevLocation) { makeRegsAvailable(regsToMakeInactive); // TODO: Clean this up. We need to make the delayRegs inactive as well, but don't want @@ -4660,9 +4657,9 @@ void LinearScan::allocateRegisters() Interval* currentInterval = nullptr; Referenceable* currentReferent = nullptr; - RefType refType = currentRefPosition->refType; + RefType refType = currentRefPosition.refType; - currentReferent = currentRefPosition->referent; + currentReferent = currentRefPosition.referent; if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->IsPhysRegRef() && !lastAllocatedRefPosition->getInterval()->isInternal && @@ -4688,7 +4685,7 @@ void LinearScan::allocateRegisters() // for all the uses, internal registers & all but the last def, and // another for the final def (if any). - LsraLocation currentLocation = currentRefPosition->nodeLocation; + LsraLocation currentLocation = currentRefPosition.nodeLocation; // Free at a new location. if (currentLocation > prevLocation) @@ -4827,7 +4824,7 @@ void LinearScan::allocateRegisters() if (currentReferent != nullptr) { previousRefPosition = currentReferent->recentRefPosition; - currentReferent->recentRefPosition = currentRefPosition; + currentReferent->recentRefPosition = ¤tRefPosition; } else { @@ -4835,7 +4832,7 @@ void LinearScan::allocateRegisters() } #ifdef DEBUG - activeRefPosition = currentRefPosition; + activeRefPosition = ¤tRefPosition; // For the purposes of register resolution, we handle the DummyDefs before // the block boundary - so the RefTypeBB is after all the DummyDefs. @@ -4848,7 +4845,7 @@ void LinearScan::allocateRegisters() // dump it now. if ((refType == RefTypeBB) && handledBlockEnd) { - dumpNewBlock(currentBlock, currentRefPosition->nodeLocation); + dumpNewBlock(currentBlock, currentRefPosition.nodeLocation); } #endif // DEBUG @@ -4860,7 +4857,7 @@ void LinearScan::allocateRegisters() regsInUseThisLocation = RBM_NONE; regsInUseNextLocation = RBM_NONE; handledBlockEnd = true; - curBBStartLocation = currentRefPosition->nodeLocation; + curBBStartLocation = currentRefPosition.nodeLocation; if (currentBlock == nullptr) { currentBlock = startBlockSequence(); @@ -4882,16 +4879,16 @@ void LinearScan::allocateRegisters() if (refType == RefTypeKillGCRefs) { - spillGCRefs(currentRefPosition); + spillGCRefs(¤tRefPosition); continue; } - if (currentRefPosition->isPhysRegRef) + if (currentRefPosition.isPhysRegRef) { - RegRecord* regRecord = currentRefPosition->getReg(); + RegRecord* regRecord = currentRefPosition.getReg(); Interval* assignedInterval = regRecord->assignedInterval; - updateNextFixedRef(regRecord, currentRefPosition->nextRefPosition); + updateNextFixedRef(regRecord, currentRefPosition.nextRefPosition); // If this is a FixedReg, disassociate any inactive constant interval from this register. // Otherwise, do nothing. @@ -4914,8 +4911,8 @@ void LinearScan::allocateRegisters() } #endif // TARGET_ARM } - regsInUseThisLocation |= currentRefPosition->registerAssignment; - INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition->assignedReg())); + regsInUseThisLocation |= currentRefPosition.registerAssignment; + INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FIXED_REG, nullptr, currentRefPosition.assignedReg())); continue; } if (refType == RefTypeKill) @@ -4939,7 +4936,7 @@ void LinearScan::allocateRegisters() if (refType == RefTypeExpUse) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_EXP_USE)); - currentInterval = currentRefPosition->getInterval(); + currentInterval = currentRefPosition.getInterval(); if (currentInterval->physReg != REG_NA) { updateNextIntervalRef(currentInterval->physReg, currentInterval); @@ -4949,8 +4946,8 @@ void LinearScan::allocateRegisters() regNumber assignedRegister = REG_NA; - assert(currentRefPosition->isIntervalRef()); - currentInterval = currentRefPosition->getInterval(); + assert(currentRefPosition.isIntervalRef()); + currentInterval = currentRefPosition.getInterval(); assert(currentInterval != nullptr); assignedRegister = currentInterval->physReg; @@ -4966,7 +4963,7 @@ void LinearScan::allocateRegisters() // of any flow they won't have been marked during dataflow. Otherwise, if we allocate a // register we won't unassign it. INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ZERO_REF, currentInterval)); - currentRefPosition->lastUse = true; + currentRefPosition.lastUse = true; } LclVarDsc* varDsc = currentInterval->getLocalVar(compiler); assert(varDsc != nullptr); @@ -4977,7 +4974,7 @@ void LinearScan::allocateRegisters() allocate = false; } else if (refType == RefTypeParamDef && (varDsc->lvRefCntWtd() <= BB_UNITY_WEIGHT) && - (!currentRefPosition->lastUse || (currentInterval->physReg == REG_STK))) + (!currentRefPosition.lastUse || (currentInterval->physReg == REG_STK))) { // If this is a low ref-count parameter, and either it is used (def is not the last use) or it's // passed on the stack, don't allocate a register. @@ -5061,7 +5058,7 @@ void LinearScan::allocateRegisters() } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); } - currentRefPosition->registerAssignment = RBM_NONE; + currentRefPosition.registerAssignment = RBM_NONE; continue; } #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE @@ -5071,14 +5068,14 @@ void LinearScan::allocateRegisters() { if (assignedRegister != REG_NA) { - unassignPhysReg(getRegisterRecord(assignedRegister), currentRefPosition); + unassignPhysReg(getRegisterRecord(assignedRegister), ¤tRefPosition); } else if (!didDump) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); didDump = true; } - currentRefPosition->registerAssignment = RBM_NONE; + currentRefPosition.registerAssignment = RBM_NONE; continue; } @@ -5098,7 +5095,7 @@ void LinearScan::allocateRegisters() // kill would lead to spill of source but not the putarg_reg if it were treated // as special. if (srcInterval->isActive && - genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment && + genRegMask(srcInterval->physReg) == currentRefPosition.registerAssignment && currentInterval->getNextRefLocation() == nextFixedRef[srcInterval->physReg]) { assert(physRegRecord->regNum == srcInterval->physReg); @@ -5126,14 +5123,14 @@ void LinearScan::allocateRegisters() if (currentInterval->isSpecialPutArg) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval, - currentRefPosition->assignedReg())); + currentRefPosition.assignedReg())); continue; } } if (assignedRegister == REG_NA && RefTypeIsUse(refType)) { - currentRefPosition->reload = true; + currentRefPosition.reload = true; INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, currentInterval, assignedRegister)); } @@ -5169,10 +5166,10 @@ void LinearScan::allocateRegisters() if (previousRefPosition != nullptr) { - assert(previousRefPosition->nextRefPosition == currentRefPosition); + assert(previousRefPosition->nextRefPosition == ¤tRefPosition); assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment || - currentRefPosition->outOfOrder || previousRefPosition->copyReg || - previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef); + currentRefPosition.outOfOrder || previousRefPosition->copyReg || + previousRefPosition->refType == RefTypeExpUse || currentRefPosition.refType == RefTypeDummyDef); } else if (assignedRegister != REG_NA) { @@ -5221,9 +5218,9 @@ void LinearScan::allocateRegisters() if (keepAssignment == false) { - RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg); - currentRefPosition->registerAssignment = allRegs(currentInterval->registerType); - currentRefPosition->isFixedRegRef = false; + RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg); + currentRefPosition.registerAssignment = allRegs(currentInterval->registerType); + currentRefPosition.isFixedRegRef = false; unassignPhysRegNoSpill(physRegRecord); // If the preferences are currently set to just this register, reset them to allRegs @@ -5233,7 +5230,7 @@ void LinearScan::allocateRegisters() if (currentInterval->registerPreferences == assignedRegBit) { - currentInterval->registerPreferences = currentRefPosition->registerAssignment; + currentInterval->registerPreferences = currentRefPosition.registerAssignment; } else { @@ -5249,10 +5246,10 @@ void LinearScan::allocateRegisters() if (assignedRegister != REG_NA) { RegRecord* physRegRecord = getRegisterRecord(assignedRegister); - assert((assignedRegBit == currentRefPosition->registerAssignment) || + assert((assignedRegBit == currentRefPosition.registerAssignment) || (physRegRecord->assignedInterval == currentInterval) || !isRegInUse(assignedRegister, currentInterval->registerType)); - if (conflictingFixedRegReference(assignedRegister, currentRefPosition)) + if (conflictingFixedRegReference(assignedRegister, ¤tRefPosition)) { // We may have already reassigned the register to the conflicting reference. // If not, we need to unassign this interval. @@ -5261,15 +5258,15 @@ void LinearScan::allocateRegisters() unassignPhysRegNoSpill(physRegRecord); clearConstantReg(assignedRegister, currentInterval->registerType); } - currentRefPosition->moveReg = true; - assignedRegister = REG_NA; - currentRefPosition->registerAssignment &= ~assignedRegBit; + currentRefPosition.moveReg = true; + assignedRegister = REG_NA; + currentRefPosition.registerAssignment &= ~assignedRegBit; setIntervalAsSplit(currentInterval); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister)); } - else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0) + else if ((genRegMask(assignedRegister) & currentRefPosition.registerAssignment) != 0) { - currentRefPosition->registerAssignment = assignedRegBit; + currentRefPosition.registerAssignment = assignedRegBit; if (!currentInterval->isActive) { // If we've got an exposed use at the top of a block, the @@ -5282,7 +5279,7 @@ void LinearScan::allocateRegisters() } else { - currentRefPosition->reload = true; + currentRefPosition.reload = true; } } INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister)); @@ -5290,16 +5287,16 @@ void LinearScan::allocateRegisters() else { // It's already in a register, but not one we need. - if (!RefTypeIsDef(currentRefPosition->refType)) + if (!RefTypeIsDef(currentRefPosition.refType)) { - regNumber copyReg = assignCopyReg(currentRefPosition); - lastAllocatedRefPosition = currentRefPosition; + regNumber copyReg = assignCopyReg(¤tRefPosition); + lastAllocatedRefPosition = ¤tRefPosition; regMaskTP copyRegMask = getRegMask(copyReg, currentInterval->registerType); regMaskTP assignedRegMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= copyRegMask | assignedRegMask; - if (currentRefPosition->lastUse) + if (currentRefPosition.lastUse) { - if (currentRefPosition->delayRegFree) + if (currentRefPosition.delayRegFree) { INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval, assignedRegister)); @@ -5315,7 +5312,7 @@ void LinearScan::allocateRegisters() else { copyRegsToFree |= copyRegMask; - if (currentRefPosition->delayRegFree) + if (currentRefPosition.delayRegFree) { regsInUseNextLocation |= copyRegMask | assignedRegMask; } @@ -5327,8 +5324,8 @@ void LinearScan::allocateRegisters() // If we ever actually move lclVar intervals instead of copying, this will need to change. if (!currentInterval->isLocalVar) { - currentRefPosition->moveReg = true; - currentRefPosition->copyReg = false; + currentRefPosition.moveReg = true; + currentRefPosition.copyReg = false; } clearNextIntervalRef(copyReg, currentInterval->registerType); clearSpillCost(copyReg, currentInterval->registerType); @@ -5352,10 +5349,10 @@ void LinearScan::allocateRegisters() if (assignedRegister == REG_NA) { - if (currentRefPosition->RegOptional()) + if (currentRefPosition.RegOptional()) { // We can avoid allocating a register if it is a last use requiring a reload. - if (currentRefPosition->lastUse && currentRefPosition->reload) + if (currentRefPosition.lastUse && currentRefPosition.reload) { allocate = false; } @@ -5371,9 +5368,9 @@ void LinearScan::allocateRegisters() #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH) // We can also avoid allocating a register (in fact we don't want to) if we have // an UpperVectorRestore on xarch where the value is on the stack. - if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA)) + if ((currentRefPosition.refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA)) { - assert(currentRefPosition->regOptional); + assert(currentRefPosition.regOptional); allocate = false; } #endif @@ -5393,24 +5390,24 @@ void LinearScan::allocateRegisters() // Allocate a register, if we must, or if it is profitable to do so. // If we have a fixed reg requirement, and the interval is inactive in another register, // unassign that register. - if (currentRefPosition->isFixedRegRef && !currentInterval->isActive && + if (currentRefPosition.isFixedRegRef && !currentInterval->isActive && (currentInterval->assignedReg != nullptr) && (currentInterval->assignedReg->assignedInterval == currentInterval) && - (genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition->registerAssignment)) + (genRegMask(currentInterval->assignedReg->regNum) != currentRefPosition.registerAssignment)) { unassignPhysReg(currentInterval->assignedReg, nullptr); } - assignedRegister = allocateReg(currentInterval, currentRefPosition DEBUG_ARG(®isterScore)); + assignedRegister = allocateReg(currentInterval, ¤tRefPosition DEBUG_ARG(®isterScore)); } // If no register was found, this RefPosition must not require a register. if (assignedRegister == REG_NA) { - assert(currentRefPosition->RegOptional()); + assert(currentRefPosition.RegOptional()); INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval)); - currentRefPosition->registerAssignment = RBM_NONE; - currentRefPosition->reload = false; - currentInterval->isActive = false; + currentRefPosition.registerAssignment = RBM_NONE; + currentRefPosition.reload = false; + currentInterval->isActive = false; setIntervalAsSpilled(currentInterval); } #ifdef DEBUG @@ -5418,8 +5415,8 @@ void LinearScan::allocateRegisters() { if (VERBOSE) { - if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) && - currentRefPosition->treeNode->IsReuseRegVal()) + if (currentInterval->isConstant && (currentRefPosition.treeNode != nullptr) && + currentRefPosition.treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, currentInterval, assignedRegister, currentBlock, registerScore); @@ -5442,7 +5439,7 @@ void LinearScan::allocateRegisters() // it should have been marked for reload above. if (assignedRegister != REG_NA && RefTypeIsUse(refType) && !isInRegister) { - assert(currentRefPosition->reload); + assert(currentRefPosition.reload); } } @@ -5452,11 +5449,11 @@ void LinearScan::allocateRegisters() assignedRegBit = genRegMask(assignedRegister); regMaskTP regMask = getRegMask(assignedRegister, currentInterval->registerType); regsInUseThisLocation |= regMask; - if (currentRefPosition->delayRegFree) + if (currentRefPosition.delayRegFree) { regsInUseNextLocation |= regMask; } - currentRefPosition->registerAssignment = assignedRegBit; + currentRefPosition.registerAssignment = assignedRegBit; currentInterval->physReg = assignedRegister; regsToFree &= ~regMask; // we'll set it again later if it's dead @@ -5472,29 +5469,29 @@ void LinearScan::allocateRegisters() { if (currentInterval->isWriteThru) { - if (currentRefPosition->refType == RefTypeDef) + if (currentRefPosition.refType == RefTypeDef) { - currentRefPosition->writeThru = true; + currentRefPosition.writeThru = true; } - if (!currentRefPosition->lastUse) + if (!currentRefPosition.lastUse) { - if (currentRefPosition->spillAfter) + if (currentRefPosition.spillAfter) { unassign = true; } } } - if (currentRefPosition->lastUse || currentRefPosition->nextRefPosition == nullptr) + if (currentRefPosition.lastUse || currentRefPosition.nextRefPosition == nullptr) { - assert(currentRefPosition->isIntervalRef()); + assert(currentRefPosition.isIntervalRef()); // If this isn't a final use, we'll mark the register as available, but keep the association. - if ((refType != RefTypeExpUse) && (currentRefPosition->nextRefPosition == nullptr)) + if ((refType != RefTypeExpUse) && (currentRefPosition.nextRefPosition == nullptr)) { unassign = true; } else { - if (currentRefPosition->delayRegFree) + if (currentRefPosition.delayRegFree) { delayRegsToMakeInactive |= regMask; } @@ -5517,7 +5514,7 @@ void LinearScan::allocateRegisters() if (unassign) { - if (currentRefPosition->delayRegFree) + if (currentRefPosition.delayRegFree) { delayRegsToFree |= regMask; @@ -5537,7 +5534,7 @@ void LinearScan::allocateRegisters() updateSpillCost(assignedRegister, currentInterval); } } - lastAllocatedRefPosition = currentRefPosition; + lastAllocatedRefPosition = ¤tRefPosition; } #ifdef JIT32_GCENCODER @@ -6685,15 +6682,13 @@ void LinearScan::resolveRegisters() } // handle incoming arguments and special temps - RefPositionIterator refPosIterator = refPositions.begin(); - RefPosition* currentRefPosition = refPosIterator != refPositions.end() ? &refPosIterator : nullptr; + RefPositionIterator currentRefPosition = refPositions.begin(); if (enregisterLocalVars) { VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum]; - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; if (currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit) { break; @@ -6719,8 +6714,8 @@ void LinearScan::resolveRegisters() } else { - assert(refPosIterator == refPositions.end() || - (refPosIterator->refType != RefTypeParamDef && refPosIterator->refType != RefTypeZeroInit)); + assert(currentRefPosition == refPositions.end() || + (currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit)); } // write back assignments @@ -6740,9 +6735,8 @@ void LinearScan::resolveRegisters() } // Handle the DummyDefs, updating the incoming var location. - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; if (currentRefPosition->refType != RefTypeDummyDef) { break; @@ -6767,14 +6761,13 @@ void LinearScan::resolveRegisters() } // The next RefPosition should be for the block. Move past it. - assert(refPosIterator != refPositions.end()); + assert(currentRefPosition != refPositions.end()); assert(currentRefPosition->refType == RefTypeBB); - ++refPosIterator; + ++currentRefPosition; // Handle the RefPositions for the block - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; if (currentRefPosition->refType == RefTypeBB || currentRefPosition->refType == RefTypeDummyDef) { break; @@ -9468,8 +9461,7 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) // currentRefPosition is not used for LSRA_DUMP_PRE // We keep separate iterators for defs, so that we can print them // on the lhs of the dump - RefPositionIterator refPosIterator = refPositions.begin(); - RefPosition* currentRefPosition = &refPosIterator; + RefPositionIterator currentRefPosition = refPositions.begin(); switch (mode) { @@ -9490,9 +9482,8 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) if (mode != LSRA_DUMP_PRE) { printf("Incoming Parameters: "); - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; if (currentRefPosition->refType == RefTypeBB) { break; @@ -9537,20 +9528,15 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) { bool printedBlockHeader = false; // We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; - if (currentRefPosition->refType != RefTypeExpUse && currentRefPosition->refType != RefTypeDummyDef && - !(currentRefPosition->refType == RefTypeBB && !printedBlockHeader)) - { - break; - } - Interval* interval = nullptr; if (currentRefPosition->isIntervalRef()) { interval = currentRefPosition->getInterval(); } + + bool continueLoop = true; switch (currentRefPosition->refType) { case RefTypeExpUse: @@ -9564,14 +9550,24 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum); break; case RefTypeBB: + if (printedBlockHeader) + { + continueLoop = false; + break; + } block->dspBlockHeader(compiler); printedBlockHeader = true; printf("=====\n"); break; default: - printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); + continueLoop = false; break; } + if (!continueLoop) + { + // Avoid loop iteration that will update currentRefPosition + break; + } } } else @@ -9624,12 +9620,9 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) // and combining the fixed regs with their associated def or use bool killPrinted = false; RefPosition* lastFixedRegRefPos = nullptr; - for (; refPosIterator != refPositions.end(); ++refPosIterator) + for (; currentRefPosition != refPositions.end(); ++currentRefPosition) { - currentRefPosition = &refPosIterator; - if (!(currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg || - currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) || - !(currentRefPosition->nodeLocation == tree->gtSeqNum || + if (!(currentRefPosition->nodeLocation == tree->gtSeqNum || currentRefPosition->nodeLocation == tree->gtSeqNum + 1)) { break; @@ -9640,6 +9633,8 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) { interval = currentRefPosition->getInterval(); } + + bool continueLoop = true; switch (currentRefPosition->refType) { case RefTypeUse: @@ -9712,9 +9707,14 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode) lastFixedRegRefPos = currentRefPosition; break; default: - printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum); + continueLoop = false; break; } + if (!continueLoop) + { + // Avoid loop iteration that will update currentRefPosition + break; + } } } printf("\n"); @@ -10411,46 +10411,45 @@ void LinearScan::verifyFinalAllocation() BasicBlock* currentBlock = nullptr; GenTree* firstBlockEndResolutionNode = nullptr; LsraLocation currentLocation = MinLocation; - for (RefPosition& refPosition : refPositions) + for (RefPosition& currentRefPosition : refPositions) { - RefPosition* currentRefPosition = &refPosition; - Interval* interval = nullptr; - RegRecord* regRecord = nullptr; - regNumber regNum = REG_NA; - activeRefPosition = currentRefPosition; + Interval* interval = nullptr; + RegRecord* regRecord = nullptr; + regNumber regNum = REG_NA; + activeRefPosition = ¤tRefPosition; - if (currentRefPosition->refType != RefTypeBB) + if (currentRefPosition.refType != RefTypeBB) { - if (currentRefPosition->IsPhysRegRef()) + if (currentRefPosition.IsPhysRegRef()) { - regRecord = currentRefPosition->getReg(); - regRecord->recentRefPosition = currentRefPosition; + regRecord = currentRefPosition.getReg(); + regRecord->recentRefPosition = ¤tRefPosition; regNum = regRecord->regNum; } - else if (currentRefPosition->isIntervalRef()) + else if (currentRefPosition.isIntervalRef()) { - interval = currentRefPosition->getInterval(); - interval->recentRefPosition = currentRefPosition; - if (currentRefPosition->registerAssignment != RBM_NONE) + interval = currentRefPosition.getInterval(); + interval->recentRefPosition = ¤tRefPosition; + if (currentRefPosition.registerAssignment != RBM_NONE) { - if (!genMaxOneBit(currentRefPosition->registerAssignment)) + if (!genMaxOneBit(currentRefPosition.registerAssignment)) { - assert(currentRefPosition->refType == RefTypeExpUse || - currentRefPosition->refType == RefTypeDummyDef); + assert(currentRefPosition.refType == RefTypeExpUse || + currentRefPosition.refType == RefTypeDummyDef); } else { - regNum = currentRefPosition->assignedReg(); + regNum = currentRefPosition.assignedReg(); regRecord = getRegisterRecord(regNum); } } } } - LsraLocation newLocation = currentRefPosition->nodeLocation; + LsraLocation newLocation = currentRefPosition.nodeLocation; currentLocation = newLocation; - switch (currentRefPosition->refType) + switch (currentRefPosition.refType) { case RefTypeBB: { @@ -10534,7 +10533,7 @@ void LinearScan::verifyFinalAllocation() if (VERBOSE) { - dumpRefPositionShort(currentRefPosition, currentBlock); + dumpRefPositionShort(¤tRefPosition, currentBlock); dumpRegRecords(); } @@ -10599,7 +10598,7 @@ void LinearScan::verifyFinalAllocation() dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum); break; } - if (currentRefPosition->reload) + if (currentRefPosition.reload) { interval->isActive = true; assert(regNum != REG_NA); @@ -10614,7 +10613,7 @@ void LinearScan::verifyFinalAllocation() if (interval->physReg != REG_NA) { // then unassign it if no new register was assigned to the RefTypeDef - if (RefTypeIsDef(currentRefPosition->refType)) + if (RefTypeIsDef(currentRefPosition.refType)) { assert(interval->assignedReg != nullptr); if (interval->assignedReg->assignedInterval == interval) @@ -10628,14 +10627,14 @@ void LinearScan::verifyFinalAllocation() dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval); } - else if (RefTypeIsDef(currentRefPosition->refType)) + else if (RefTypeIsDef(currentRefPosition.refType)) { interval->isActive = true; if (VERBOSE) { - if (interval->isConstant && (currentRefPosition->treeNode != nullptr) && - currentRefPosition->treeNode->IsReuseRegVal()) + if (interval->isConstant && (currentRefPosition.treeNode != nullptr) && + currentRefPosition.treeNode->IsReuseRegVal()) { dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock); } @@ -10645,11 +10644,11 @@ void LinearScan::verifyFinalAllocation() } } } - else if (currentRefPosition->copyReg) + else if (currentRefPosition.copyReg) { dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock); } - else if (currentRefPosition->moveReg) + else if (currentRefPosition.moveReg) { assert(interval->assignedReg != nullptr); interval->assignedReg->assignedInterval = nullptr; @@ -10666,13 +10665,13 @@ void LinearScan::verifyFinalAllocation() { dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock); } - if (currentRefPosition->lastUse || (currentRefPosition->spillAfter && !currentRefPosition->writeThru)) + if (currentRefPosition.lastUse || (currentRefPosition.spillAfter && !currentRefPosition.writeThru)) { interval->isActive = false; } if (regNum != REG_NA) { - if (currentRefPosition->spillAfter) + if (currentRefPosition.spillAfter) { if (VERBOSE) { @@ -10680,14 +10679,14 @@ void LinearScan::verifyFinalAllocation() // is the homeReg of the interval not the reg currently assigned // to refPos. regNumber spillReg = regNum; - if (currentRefPosition->copyReg) + if (currentRefPosition.copyReg) { assert(interval != nullptr); spillReg = interval->physReg; } dumpRegRecords(); dumpEmptyRefPosition(); - if (currentRefPosition->writeThru) + if (currentRefPosition.writeThru) { printf("WThru %-4s ", getRegName(spillReg)); } @@ -10697,13 +10696,13 @@ void LinearScan::verifyFinalAllocation() } } } - else if (currentRefPosition->copyReg) + else if (currentRefPosition.copyReg) { regRecord->assignedInterval = interval; } else { - if (RefTypeIsDef(currentRefPosition->refType)) + if (RefTypeIsDef(currentRefPosition.refType)) { // Interval was assigned to a different register. // Clear the assigned interval of current register. @@ -10723,7 +10722,7 @@ void LinearScan::verifyFinalAllocation() // However, we will assert that, at resolution time, no registers contain GC refs. { DBEXEC(VERBOSE, printf(" ")); - regMaskTP candidateRegs = currentRefPosition->registerAssignment; + regMaskTP candidateRegs = currentRefPosition.registerAssignment; while (candidateRegs != RBM_NONE) { regMaskTP nextRegBit = genFindLowestBit(candidateRegs); @@ -10739,32 +10738,32 @@ void LinearScan::verifyFinalAllocation() case RefTypeExpUse: case RefTypeDummyDef: // Do nothing; these will be handled by the RefTypeBB. - DBEXEC(VERBOSE, dumpRefPositionShort(currentRefPosition, currentBlock)); + DBEXEC(VERBOSE, dumpRefPositionShort(¤tRefPosition, currentBlock)); DBEXEC(VERBOSE, printf(" ")); break; case RefTypeInvalid: - // for these 'currentRefPosition->refType' values, No action to take + // for these 'currentRefPosition.refType' values, No action to take break; } - if (currentRefPosition->refType != RefTypeBB) + if (currentRefPosition.refType != RefTypeBB) { DBEXEC(VERBOSE, dumpRegRecords()); if (interval != nullptr) { - if (currentRefPosition->copyReg) + if (currentRefPosition.copyReg) { assert(interval->physReg != regNum); regRecord->assignedInterval = nullptr; assert(interval->assignedReg != nullptr); regRecord = interval->assignedReg; } - if (currentRefPosition->spillAfter || currentRefPosition->lastUse) + if (currentRefPosition.spillAfter || currentRefPosition.lastUse) { - assert(!currentRefPosition->spillAfter || currentRefPosition->IsActualRef()); + assert(!currentRefPosition.spillAfter || currentRefPosition.IsActualRef()); - if (RefTypeIsDef(currentRefPosition->refType)) + if (RefTypeIsDef(currentRefPosition.refType)) { // If an interval got assigned to a different register (while the different // register got spilled), then clear the assigned interval of current register. @@ -10783,12 +10782,12 @@ void LinearScan::verifyFinalAllocation() regRecord->assignedInterval = nullptr; } #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE - else if (interval->isUpperVector && !currentRefPosition->RegOptional()) + else if (interval->isUpperVector && !currentRefPosition.RegOptional()) { // These only require a register if they are not RegOptional, and their lclVar // interval is living in a register and not already partially spilled. - if ((currentRefPosition->refType == RefTypeUpperVectorSave) || - (currentRefPosition->refType == RefTypeUpperVectorRestore)) + if ((currentRefPosition.refType == RefTypeUpperVectorSave) || + (currentRefPosition.refType == RefTypeUpperVectorRestore)) { Interval* lclVarInterval = interval->relatedInterval; assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled); @@ -10797,7 +10796,7 @@ void LinearScan::verifyFinalAllocation() #endif else { - assert(currentRefPosition->RegOptional()); + assert(currentRefPosition.RegOptional()); } } }