From b720be6a50f4e1b3280d2b029ee38dda14577525 Mon Sep 17 00:00:00 2001 From: Manman Ren Date: Tue, 11 Sep 2012 22:23:19 +0000 Subject: Release build: guard dump functions with "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)" No functional change. Update r163339. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163653 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 17f9d9e..6dbe1c5 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -156,7 +156,7 @@ void LiveIntervals::printInstrs(raw_ostream &OS) const { MF->print(OS, Indexes); } -#ifndef NDEBUG +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LiveIntervals::dumpInstrs() const { printInstrs(dbgs()); } -- cgit v1.1 From 6d742cc61eb9c933a36f0bc96631fae95c6d3d2e Mon Sep 17 00:00:00 2001 From: Lang Hames Date: Wed, 12 Sep 2012 06:56:16 +0000 Subject: Make findLastUseBefore handle reg-unit liveness. findLastUseBefore was previous considering virtreg liveness only, leading to incorrect live intervals for reg units when instrs with physreg operands were moved up. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163685 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 37 ++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 6dbe1c5..f7d601e 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1196,14 +1196,35 @@ private: // Return the last use of reg between NewIdx and OldIdx. SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) { SlotIndex LastUse = NewIdx; - for (MachineRegisterInfo::use_nodbg_iterator - UI = MRI.use_nodbg_begin(Reg), - UE = MRI.use_nodbg_end(); - UI != UE; UI.skipInstruction()) { - const MachineInstr* MI = &*UI; - SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); - if (InstSlot > LastUse && InstSlot < OldIdx) - LastUse = InstSlot; + + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + for (MachineRegisterInfo::use_nodbg_iterator + UI = MRI.use_nodbg_begin(Reg), + UE = MRI.use_nodbg_end(); + UI != UE; UI.skipInstruction()) { + const MachineInstr* MI = &*UI; + SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); + if (InstSlot > LastUse && InstSlot < OldIdx) + LastUse = InstSlot; + } + } else { + MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx); + MachineBasicBlock::iterator MII(MI); + ++MII; + MachineBasicBlock* MBB = MI->getParent(); + for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){ + for (MachineInstr::mop_iterator MOI = MII->operands_begin(), + MOE = MII->operands_end(); + MOI != MOE; ++MOI) { + const MachineOperand& mop = *MOI; + if (!mop.isReg() || mop.getReg() == 0 || + TargetRegisterInfo::isVirtualRegister(mop.getReg())) + continue; + + if (TRI.hasRegUnit(mop.getReg(), Reg)) + LastUse = LIS.getInstructionIndex(MII); + } + } } return LastUse; } -- cgit v1.1 From 87f7864c6d81ae134335b8271ac12c937c81dffc Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Mon, 17 Sep 2012 23:03:25 +0000 Subject: Merge into undefined lanes under -new-coalescer. Add LIS::pruneValue() and extendToIndices(). These two functions are used by the register coalescer when merging two live ranges requires more than a trivial value mapping as supported by LiveInterval::join(). The pruneValue() function can remove the part of a value number that is going to conflict in join(). Afterwards, extendToIndices can restore the live range, using any new dominating value numbers and updating the SSA form. Use this complex value mapping to support merging a register into a vector lane that has a conflicting value, but the clobbered lane is undef. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164074 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index f7d601e..6c26b79 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -731,6 +731,73 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li, return CanSeparate; } +void LiveIntervals::extendToIndices(LiveInterval *LI, + ArrayRef Indices) { + assert(LRCalc && "LRCalc not initialized."); + LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); + for (unsigned i = 0, e = Indices.size(); i != e; ++i) + LRCalc->extend(LI, Indices[i]); +} + +void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill, + SmallVectorImpl *EndPoints) { + LiveRangeQuery LRQ(*LI, Kill); + assert (!LRQ.valueDefined() && "Can't prune value at the defining instr"); + + // Also can't prune a value that isn't there. + VNInfo *VNI = LRQ.valueOut(); + if (!VNI) + return; + + MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill); + SlotIndex MBBStart, MBBEnd; + tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB); + + // If VNI isn't live out from KillMBB, the value is trivially pruned. + if (LRQ.endPoint() < MBBEnd) { + LI->removeRange(Kill, LRQ.endPoint()); + if (EndPoints) EndPoints->push_back(LRQ.endPoint()); + return; + } + + // VNI is live out of KillMBB. + LI->removeRange(Kill, MBBEnd); + if (EndPoints) EndPoints->push_back(MBBEnd); + + // Find all blocks that are reachable from MBB without leaving VNI's live + // range. + for (df_iterator + I = df_begin(KillMBB), E = df_end(KillMBB); I != E;) { + MachineBasicBlock *MBB = *I; + // KillMBB itself was already handled. + if (MBB == KillMBB) { + ++I; + continue; + } + + // Check if VNI is live in to MBB. + tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); + LiveRangeQuery LRQ(*LI, MBBStart); + if (LRQ.valueIn() != VNI) { + // This block isn't part of the VNI live range. Prune the search. + I.skipChildren(); + continue; + } + + // Prune the search if VNI is killed in MBB. + if (LRQ.endPoint() < MBBEnd) { + LI->removeRange(MBBStart, LRQ.endPoint()); + if (EndPoints) EndPoints->push_back(LRQ.endPoint()); + I.skipChildren(); + continue; + } + + // VNI is live through MBB. + LI->removeRange(MBBStart, MBBEnd); + if (EndPoints) EndPoints->push_back(MBBEnd); + ++I; + } +} //===----------------------------------------------------------------------===// // Register allocator hooks. -- cgit v1.1 From 2df8ac84ae8317b6a96f19bbc984d2bd02cff087 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Thu, 20 Sep 2012 23:08:39 +0000 Subject: Extend -new-coalescer SSA update to handle mapped values as well. The old-fashioned many-to-one value mapping doesn't always work when merging vector lanes. A value can map to multiple different values, and it can even be necessary to insert new PHIs. When a value number is defined by a copy from a value number that required SSa update, include the live range of the copied value number in the SSA update as well. It is not necessarily a copy of the original value number any longer. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164329 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 3 --- 1 file changed, 3 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 6c26b79..dca3058 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -742,9 +742,6 @@ void LiveIntervals::extendToIndices(LiveInterval *LI, void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill, SmallVectorImpl *EndPoints) { LiveRangeQuery LRQ(*LI, Kill); - assert (!LRQ.valueDefined() && "Can't prune value at the defining instr"); - - // Also can't prune a value that isn't there. VNInfo *VNI = LRQ.valueOut(); if (!VNI) return; -- cgit v1.1 From d4919a988d1e2d62b5c108bbb98a1633edee76db Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Tue, 2 Oct 2012 22:08:36 +0000 Subject: Handle reserved registers more accurately in handleMove(). Reserved register live ranges look like a set of dead defs - any uses of reserved registers are ignored. Instead of skipping the updating of reserved register operands entirely, just ignore the use operands and treat the def operands normally. No test case, handleMove() is not commonly used yet. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165060 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index dca3058..141f8ed 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1164,18 +1164,17 @@ private: unsigned Reg = MO.getReg(); - // TODO: Currently we're skipping uses that are reserved or have no - // interval, but we're not updating their kills. This should be - // fixed. - if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)) - continue; + // Don't track uses of reserved registers - they're not accurate. + // Reserved register live ranges look like a set of dead defs. + bool Resv = + TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg); // Collect ranges for register units. These live ranges are computed on // demand, so just skip any that haven't been computed yet. if (TargetRegisterInfo::isPhysicalRegister(Reg)) { for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) - collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx); + collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx, Resv); } else { // Collect ranges for individual virtual registers. collectRanges(MO, &LIS.getInterval(Reg), @@ -1186,8 +1185,8 @@ private: void collectRanges(const MachineOperand &MO, LiveInterval *LI, RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting, - SlotIndex OldIdx) { - if (MO.readsReg()) { + SlotIndex OldIdx, bool IgnoreReads = false) { + if (!IgnoreReads && MO.readsReg()) { LiveRange* LR = LI->getLiveRangeContaining(OldIdx); if (LR != 0) Entering.insert(std::make_pair(LI, LR)); -- cgit v1.1 From ad5e969ba945c46aef58ae26b77f80cd99c8f3fb Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Fri, 12 Oct 2012 21:31:57 +0000 Subject: Use a transposed algorithm for handleMove(). Completely update one interval at a time instead of collecting live range fragments to be updated. This avoids building data structures, except for a single SmallPtrSet of updated intervals. Also share code between handleMove() and handleMoveIntoBundle(). Add support for moving dead defs across other live values in the interval. The MI scheduler can do that. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165824 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 640 ++++++++++++----------------------- 1 file changed, 213 insertions(+), 427 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 141f8ed..2ea9056 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1007,246 +1007,240 @@ private: LiveIntervals& LIS; const MachineRegisterInfo& MRI; const TargetRegisterInfo& TRI; + SlotIndex OldIdx; SlotIndex NewIdx; - - typedef std::pair IntRangePair; - typedef DenseSet RangeSet; - - struct RegRanges { - LiveRange* Use; - LiveRange* EC; - LiveRange* Dead; - LiveRange* Def; - RegRanges() : Use(0), EC(0), Dead(0), Def(0) {} - }; - typedef DenseMap BundleRanges; + SmallPtrSet Updated; public: HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, - const TargetRegisterInfo& TRI, SlotIndex NewIdx) - : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {} - - // Update intervals for all operands of MI from OldIdx to NewIdx. - // This assumes that MI used to be at OldIdx, and now resides at - // NewIdx. - void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) { - assert(NewIdx != OldIdx && "No-op move? That's a bit strange."); - - // Collect the operands. - RangeSet Entering, Internal, Exiting; - bool hasRegMaskOp = false; - collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); - - // To keep the LiveRanges valid within an interval, move the ranges closest - // to the destination first. This prevents ranges from overlapping, to that - // APIs like removeRange still work. - if (NewIdx < OldIdx) { - moveAllEnteringFrom(OldIdx, Entering); - moveAllInternalFrom(OldIdx, Internal); - moveAllExitingFrom(OldIdx, Exiting); - } - else { - moveAllExitingFrom(OldIdx, Exiting); - moveAllInternalFrom(OldIdx, Internal); - moveAllEnteringFrom(OldIdx, Entering); - } - - if (hasRegMaskOp) - updateRegMaskSlots(OldIdx); - -#ifndef NDEBUG - LIValidator validator; - validator = std::for_each(Entering.begin(), Entering.end(), validator); - validator = std::for_each(Internal.begin(), Internal.end(), validator); - validator = std::for_each(Exiting.begin(), Exiting.end(), validator); - assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness."); -#endif - - } - - // Update intervals for all operands of MI to refer to BundleStart's - // SlotIndex. - void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) { - if (MI == BundleStart) - return; // Bundling instr with itself - nothing to do. - - SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI); - assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI && - "SlotIndex <-> Instruction mapping broken for MI"); - - // Collect all ranges already in the bundle. - MachineBasicBlock::instr_iterator BII(BundleStart); - RangeSet Entering, Internal, Exiting; - bool hasRegMaskOp = false; - collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) { - if (&*BII == MI) + const TargetRegisterInfo& TRI, + SlotIndex OldIdx, SlotIndex NewIdx) + : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx) {} + + /// Update all live ranges touched by MI, assuming a move from OldIdx to + /// NewIdx. + void updateAllRanges(MachineInstr *MI) { + DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI); + bool hasRegMask = false; + for (MIOperands MO(MI); MO.isValid(); ++MO) { + if (MO->isRegMask()) + hasRegMask = true; + if (!MO->isReg()) continue; - collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - } - - BundleRanges BR = createBundleRanges(Entering, Internal, Exiting); - - Entering.clear(); - Internal.clear(); - Exiting.clear(); - collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - - DEBUG(dbgs() << "Entering: " << Entering.size() << "\n"); - DEBUG(dbgs() << "Internal: " << Internal.size() << "\n"); - DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n"); - - moveAllEnteringFromInto(OldIdx, Entering, BR); - moveAllInternalFromInto(OldIdx, Internal, BR); - moveAllExitingFromInto(OldIdx, Exiting, BR); + // Aggressively clear all kill flags. + // They are reinserted by VirtRegRewriter. + if (MO->isUse()) + MO->setIsKill(false); + unsigned Reg = MO->getReg(); + if (!Reg) + continue; + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + updateRange(LIS.getInterval(Reg)); + continue; + } -#ifndef NDEBUG - LIValidator validator; - validator = std::for_each(Entering.begin(), Entering.end(), validator); - validator = std::for_each(Internal.begin(), Internal.end(), validator); - validator = std::for_each(Exiting.begin(), Exiting.end(), validator); - assert(validator.rangesOk() && "moveAllOperandsInto broke liveness."); -#endif + // For physregs, only update the regunits that actually have a + // precomputed live range. + for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) + if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) + updateRange(*LI); + } + if (hasRegMask) + updateRegMaskSlots(); } private: + /// Update a single live range, assuming an instruction has been moved from + /// OldIdx to NewIdx. + void updateRange(LiveInterval &LI) { + if (!Updated.insert(&LI)) + return; + DEBUG({ + dbgs() << " "; + if (TargetRegisterInfo::isVirtualRegister(LI.reg)) + dbgs() << PrintReg(LI.reg); + else + dbgs() << PrintRegUnit(LI.reg, &TRI); + dbgs() << ":\t" << LI << '\n'; + }); + if (SlotIndex::isEarlierInstr(OldIdx, NewIdx)) + handleMoveDown(LI); + else + handleMoveUp(LI); + DEBUG(dbgs() << " -->\t" << LI << '\n'); + LI.verify(); + } + + /// Update LI to reflect an instruction has been moved downwards from OldIdx + /// to NewIdx. + /// + /// 1. Live def at OldIdx: + /// Move def to NewIdx, assert endpoint after NewIdx. + /// + /// 2. Live def at OldIdx, killed at NewIdx: + /// Change to dead def at NewIdx. + /// (Happens when bundling def+kill together). + /// + /// 3. Dead def at OldIdx: + /// Move def to NewIdx, possibly across another live value. + /// + /// 4. Def at OldIdx AND at NewIdx: + /// Remove live range [OldIdx;NewIdx) and value defined at OldIdx. + /// (Happens when bundling multiple defs together). + /// + /// 5. Value read at OldIdx, killed before NewIdx: + /// Extend kill to NewIdx. + /// + void handleMoveDown(LiveInterval &LI) { + // First look for a kill at OldIdx. + LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex()); + LiveInterval::iterator E = LI.end(); + // Is LI even live at OldIdx? + if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) + return; -#ifndef NDEBUG - class LIValidator { - private: - DenseSet Checked, Bogus; - public: - void operator()(const IntRangePair& P) { - const LiveInterval* LI = P.first; - if (Checked.count(LI)) + // Handle a live-in value. + if (!SlotIndex::isSameInstr(I->start, OldIdx)) { + bool isKill = SlotIndex::isSameInstr(OldIdx, I->end); + // If the live-in value already extends to NewIdx, there is nothing to do. + if (!SlotIndex::isEarlierInstr(I->end, NewIdx)) return; - Checked.insert(LI); - if (LI->empty()) + // Aggressively remove all kill flags from the old kill point. + // Kill flags shouldn't be used while live intervals exist, they will be + // reinserted by VirtRegRewriter. + if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end)) + for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO) + if (MO->isReg() && MO->isUse()) + MO->setIsKill(false); + // Adjust I->end to reach NewIdx. This may temporarily make LI invalid by + // overlapping ranges. Case 5 above. + I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); + // If this was a kill, there may also be a def. Otherwise we're done. + if (!isKill) return; - SlotIndex LastEnd = LI->begin()->start; - for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end(); - LRI != LRE; ++LRI) { - const LiveRange& LR = *LRI; - if (LastEnd > LR.start || LR.start >= LR.end) - Bogus.insert(LI); - LastEnd = LR.end; - } - } - - bool rangesOk() const { - return Bogus.empty(); - } - }; -#endif - - // Collect IntRangePairs for all operands of MI that may need fixing. - // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes' - // maps). - void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal, - RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) { - hasRegMaskOp = false; - for (MachineInstr::mop_iterator MOI = MI->operands_begin(), - MOE = MI->operands_end(); - MOI != MOE; ++MOI) { - const MachineOperand& MO = *MOI; - - if (MO.isRegMask()) { - hasRegMaskOp = true; - continue; - } - - if (!MO.isReg() || MO.getReg() == 0) - continue; - - unsigned Reg = MO.getReg(); - - // Don't track uses of reserved registers - they're not accurate. - // Reserved register live ranges look like a set of dead defs. - bool Resv = - TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg); - - // Collect ranges for register units. These live ranges are computed on - // demand, so just skip any that haven't been computed yet. - if (TargetRegisterInfo::isPhysicalRegister(Reg)) { - for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) - if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) - collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx, Resv); - } else { - // Collect ranges for individual virtual registers. - collectRanges(MO, &LIS.getInterval(Reg), - Entering, Internal, Exiting, OldIdx); - } + ++I; } - } - void collectRanges(const MachineOperand &MO, LiveInterval *LI, - RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting, - SlotIndex OldIdx, bool IgnoreReads = false) { - if (!IgnoreReads && MO.readsReg()) { - LiveRange* LR = LI->getLiveRangeContaining(OldIdx); - if (LR != 0) - Entering.insert(std::make_pair(LI, LR)); + // Check for a def at OldIdx. + if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start)) + return; + // We have a def at OldIdx. + VNInfo *DefVNI = I->valno; + assert(DefVNI->def == I->start && "Inconsistent def"); + DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); + // If the defined value extends beyond NewIdx, just move the def down. + // This is case 1 above. + if (SlotIndex::isEarlierInstr(NewIdx, I->end)) { + I->start = DefVNI->def; + return; } - if (MO.isDef()) { - LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot()); - assert(LR != 0 && "No live range for def?"); - if (LR->end > OldIdx.getDeadSlot()) - Exiting.insert(std::make_pair(LI, LR)); - else - Internal.insert(std::make_pair(LI, LR)); + // The remaining possibilities are now: + // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx). + // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot(). + // In either case, it is possible that there is an existing def at NewIdx. + assert((I->end == OldIdx.getDeadSlot() || + SlotIndex::isSameInstr(I->end, NewIdx)) && + "Cannot move def below kill"); + LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot()); + if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) { + // There is an existing def at NewIdx, case 4 above. The def at OldIdx is + // coalesced into that value. + assert(NewI->valno != DefVNI && "Multiple defs of value?"); + LI.removeValNo(DefVNI); + return; } + // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx. + // If the def at OldIdx was dead, we allow it to be moved across other LI + // values. The new range should be placed immediately before NewI, move any + // intermediate ranges up. + assert(NewI != I && "Inconsistent iterators"); + std::copy(llvm::next(I), NewI, I); + *llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } - BundleRanges createBundleRanges(RangeSet& Entering, - RangeSet& Internal, - RangeSet& Exiting) { - BundleRanges BR; + /// Update LI to reflect an instruction has been moved upwards from OldIdx + /// to NewIdx. + /// + /// 1. Live def at OldIdx: + /// Hoist def to NewIdx. + /// + /// 2. Dead def at OldIdx: + /// Hoist def+end to NewIdx, possibly move across other values. + /// + /// 3. Dead def at OldIdx AND existing def at NewIdx: + /// Remove value defined at OldIdx, coalescing it with existing value. + /// + /// 4. Live def at OldIdx AND existing def at NewIdx: + /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx. + /// (Happens when bundling multiple defs together). + /// + /// 5. Value killed at OldIdx: + /// Hoist kill to NewIdx, then scan for last kill between NewIdx and + /// OldIdx. + /// + void handleMoveUp(LiveInterval &LI) { + // First look for a kill at OldIdx. + LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex()); + LiveInterval::iterator E = LI.end(); + // Is LI even live at OldIdx? + if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) + return; - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) { - LiveInterval* LI = EI->first; - LiveRange* LR = EI->second; - BR[LI->reg].Use = LR; + // Handle a live-in value. + if (!SlotIndex::isSameInstr(I->start, OldIdx)) { + // If the live-in value isn't killed here, there is nothing to do. + if (!SlotIndex::isSameInstr(OldIdx, I->end)) + return; + // Adjust I->end to end at NewIdx. If we are hoisting a kill above + // another use, we need to search for that use. Case 5 above. + I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); + ++I; + // If OldIdx also defines a value, there couldn't have been another use. + if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) { + // No def, search for the new kill. + // This can never be an early clobber kill since there is no def. + llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot(); + return; + } } - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) { - LiveInterval* LI = II->first; - LiveRange* LR = II->second; - if (LR->end.isDead()) { - BR[LI->reg].Dead = LR; - } else { - BR[LI->reg].EC = LR; + // Now deal with the def at OldIdx. + assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?"); + VNInfo *DefVNI = I->valno; + assert(DefVNI->def == I->start && "Inconsistent def"); + DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); + + // Check for an existing def at NewIdx. + LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot()); + if (SlotIndex::isSameInstr(NewI->start, NewIdx)) { + assert(NewI->valno != DefVNI && "Same value defined more than once?"); + // There is an existing def at NewIdx. + if (I->end.isDead()) { + // Case 3: Remove the dead def at OldIdx. + LI.removeValNo(DefVNI); + return; } + // Case 4: Replace def at NewIdx with live def at OldIdx. + I->start = DefVNI->def; + LI.removeValNo(NewI->valno); + return; } - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) { - LiveInterval* LI = EI->first; - LiveRange* LR = EI->second; - BR[LI->reg].Def = LR; + // There is no existing def at NewIdx. Hoist DefVNI. + if (!I->end.isDead()) { + // Leave the end point of a live def. + I->start = DefVNI->def; + return; } - return BR; + // DefVNI is a dead def. It may have been moved across other values in LI, + // so move I up to NewI. Slide [NewI;I) down one position. + std::copy_backward(NewI, I, llvm::next(I)); + *NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } - void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) { - MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx); - if (!OldKillMI->killsRegister(reg)) - return; // Bail out if we don't have kill flags on the old register. - MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx); - assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill."); - assert(!NewKillMI->killsRegister(reg) && - "New kill instr is already a kill."); - OldKillMI->clearRegisterKills(reg, &TRI); - NewKillMI->addRegisterKilled(reg, &TRI); - } - - void updateRegMaskSlots(SlotIndex OldIdx) { + void updateRegMaskSlots() { SmallVectorImpl::iterator RI = std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), OldIdx); @@ -1257,7 +1251,7 @@ private: } // Return the last use of reg between NewIdx and OldIdx. - SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) { + SlotIndex findLastUseBefore(unsigned Reg) { SlotIndex LastUse = NewIdx; if (TargetRegisterInfo::isVirtualRegister(Reg)) { @@ -1291,233 +1285,25 @@ private: } return LastUse; } - - void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - bool LiveThrough = LR->end > OldIdx.getRegSlot(); - if (LiveThrough) - return; - SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); - if (LastUse != NewIdx) - moveKillFlags(LI->reg, NewIdx, LastUse); - LR->end = LastUse.getRegSlot(LR->end.isEarlyClobber()); - } - - void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - // Extend the LiveRange if NewIdx is past the end. - if (NewIdx > LR->end) { - // Move kill flags if OldIdx was not originally the end - // (otherwise LR->end points to an invalid slot). - if (LR->end.getRegSlot() != OldIdx.getRegSlot()) { - assert(LR->end > OldIdx && "LiveRange does not cover original slot"); - moveKillFlags(LI->reg, LR->end, NewIdx); - } - LR->end = NewIdx.getRegSlot(LR->end.isEarlyClobber()); - } - } - - void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) { - bool GoingUp = NewIdx < OldIdx; - - if (GoingUp) { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringUpFrom(OldIdx, *EI); - } else { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringDownFrom(OldIdx, *EI); - } - } - - void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && - LR->end <= OldIdx.getDeadSlot() && - "Range should be internal to OldIdx."); - LiveRange Tmp(*LR); - Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber()); - Tmp.valno->def = Tmp.start; - Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot(); - LI->removeRange(*LR); - LI->addRange(Tmp); - } - - void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) { - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) - moveInternalFrom(OldIdx, *II); - } - - void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveRange* LR = P.second; - assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && - "Range should start in OldIdx."); - assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx."); - SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber()); - LR->start = NewStart; - LR->valno->def = NewStart; - } - - void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) { - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) - moveExitingFrom(OldIdx, *EI); - } - - void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - bool LiveThrough = LR->end > OldIdx.getRegSlot(); - if (LiveThrough) { - assert((LR->start < NewIdx || BR[LI->reg].Def == LR) && - "Def in bundle should be def range."); - assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && - "If bundle has use for this reg it should be LR."); - BR[LI->reg].Use = LR; - return; - } - - SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); - moveKillFlags(LI->reg, OldIdx, LastUse); - - if (LR->start < NewIdx) { - // Becoming a new entering range. - assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 && - "Bundle shouldn't be re-defining reg mid-range."); - assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && - "Bundle shouldn't have different use range for same reg."); - LR->end = LastUse.getRegSlot(); - BR[LI->reg].Use = LR; - } else { - // Becoming a new Dead-def. - assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) && - "Live range starting at unexpected slot."); - assert(BR[LI->reg].Def == LR && "Reg should have def range."); - assert(BR[LI->reg].Dead == 0 && - "Can't have def and dead def of same reg in a bundle."); - LR->end = LastUse.getDeadSlot(); - BR[LI->reg].Dead = BR[LI->reg].Def; - BR[LI->reg].Def = 0; - } - } - - void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - if (NewIdx > LR->end) { - // Range extended to bundle. Add to bundle uses. - // Note: Currently adds kill flags to bundle start. - assert(BR[LI->reg].Use == 0 && - "Bundle already has use range for reg."); - moveKillFlags(LI->reg, LR->end, NewIdx); - LR->end = NewIdx.getRegSlot(); - BR[LI->reg].Use = LR; - } else { - assert(BR[LI->reg].Use != 0 && - "Bundle should already have a use range for reg."); - } - } - - void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering, - BundleRanges& BR) { - bool GoingUp = NewIdx < OldIdx; - - if (GoingUp) { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringUpFromInto(OldIdx, *EI, BR); - } else { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringDownFromInto(OldIdx, *EI, BR); - } - } - - void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - // TODO: Sane rules for moving ranges into bundles. - } - - void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal, - BundleRanges& BR) { - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) - moveInternalFromInto(OldIdx, *II, BR); - } - - void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - - assert(LR->start.isRegister() && - "Don't know how to merge exiting ECs into bundles yet."); - - if (LR->end > NewIdx.getDeadSlot()) { - // This range is becoming an exiting range on the bundle. - // If there was an old dead-def of this reg, delete it. - if (BR[LI->reg].Dead != 0) { - LI->removeRange(*BR[LI->reg].Dead); - BR[LI->reg].Dead = 0; - } - assert(BR[LI->reg].Def == 0 && - "Can't have two defs for the same variable exiting a bundle."); - LR->start = NewIdx.getRegSlot(); - LR->valno->def = LR->start; - BR[LI->reg].Def = LR; - } else { - // This range is becoming internal to the bundle. - assert(LR->end == NewIdx.getRegSlot() && - "Can't bundle def whose kill is before the bundle"); - if (BR[LI->reg].Dead || BR[LI->reg].Def) { - // Already have a def for this. Just delete range. - LI->removeRange(*LR); - } else { - // Make range dead, record. - LR->end = NewIdx.getDeadSlot(); - BR[LI->reg].Dead = LR; - assert(BR[LI->reg].Use == LR && - "Range becoming dead should currently be use."); - } - // In both cases the range is no longer a use on the bundle. - BR[LI->reg].Use = 0; - } - } - - void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting, - BundleRanges& BR) { - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) - moveExitingFromInto(OldIdx, *EI, BR); - } - }; void LiveIntervals::handleMove(MachineInstr* MI) { + assert(!MI->isBundled() && "Can't handle bundled instructions yet."); SlotIndex OldIndex = Indexes->getInstructionIndex(MI); Indexes->removeMachineInstrFromMaps(MI); - SlotIndex NewIndex = MI->isInsideBundle() ? - Indexes->getInstructionIndex(MI) : - Indexes->insertMachineInstrInMaps(MI); + SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI); assert(getMBBStartIdx(MI->getParent()) <= OldIndex && OldIndex < getMBBEndIdx(MI->getParent()) && "Cannot handle moves across basic block boundaries."); - assert(!MI->isBundled() && "Can't handle bundled instructions yet."); - HMEditor HME(*this, *MRI, *TRI, NewIndex); - HME.moveAllRangesFrom(MI, OldIndex); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex); + HME.updateAllRanges(MI); } void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart) { + SlotIndex OldIndex = Indexes->getInstructionIndex(MI); SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); - HMEditor HME(*this, *MRI, *TRI, NewIndex); - HME.moveAllRangesInto(MI, BundleStart); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex); + HME.updateAllRanges(MI); } -- cgit v1.1 From af8969076083396f06431971cce867ea11fb968c Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Sat, 13 Oct 2012 16:15:31 +0000 Subject: Allow for loops in LiveIntervals::pruneValue(). It is possible that the live range of the value being pruned loops back into the kill MBB where the search started. When that happens, make sure that the beginning of KillMBB is also pruned. Instead of starting a DFS at KillMBB and skipping the root of the search, start a DFS at each KillMBB successor, and allow the search to loop back to KillMBB. This fixes PR14078. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165872 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 61 +++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 29 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 2ea9056..8daac46 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -761,38 +761,41 @@ void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill, LI->removeRange(Kill, MBBEnd); if (EndPoints) EndPoints->push_back(MBBEnd); - // Find all blocks that are reachable from MBB without leaving VNI's live - // range. - for (df_iterator - I = df_begin(KillMBB), E = df_end(KillMBB); I != E;) { - MachineBasicBlock *MBB = *I; - // KillMBB itself was already handled. - if (MBB == KillMBB) { - ++I; - continue; - } + // Find all blocks that are reachable from KillMBB without leaving VNI's live + // range. It is possible that KillMBB itself is reachable, so start a DFS + // from each successor. + typedef SmallPtrSet VisitedTy; + VisitedTy Visited; + for (MachineBasicBlock::succ_iterator + SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end(); + SuccI != SuccE; ++SuccI) { + for (df_ext_iterator + I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited); + I != E;) { + MachineBasicBlock *MBB = *I; + + // Check if VNI is live in to MBB. + tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); + LiveRangeQuery LRQ(*LI, MBBStart); + if (LRQ.valueIn() != VNI) { + // This block isn't part of the VNI live range. Prune the search. + I.skipChildren(); + continue; + } - // Check if VNI is live in to MBB. - tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); - LiveRangeQuery LRQ(*LI, MBBStart); - if (LRQ.valueIn() != VNI) { - // This block isn't part of the VNI live range. Prune the search. - I.skipChildren(); - continue; - } + // Prune the search if VNI is killed in MBB. + if (LRQ.endPoint() < MBBEnd) { + LI->removeRange(MBBStart, LRQ.endPoint()); + if (EndPoints) EndPoints->push_back(LRQ.endPoint()); + I.skipChildren(); + continue; + } - // Prune the search if VNI is killed in MBB. - if (LRQ.endPoint() < MBBEnd) { - LI->removeRange(MBBStart, LRQ.endPoint()); - if (EndPoints) EndPoints->push_back(LRQ.endPoint()); - I.skipChildren(); - continue; + // VNI is live through MBB. + LI->removeRange(MBBStart, MBBEnd); + if (EndPoints) EndPoints->push_back(MBBEnd); + ++I; } - - // VNI is live through MBB. - LI->removeRange(MBBStart, MBBEnd); - if (EndPoints) EndPoints->push_back(MBBEnd); - ++I; } } -- cgit v1.1 From 790047620a8f31cee1841c06c9e5e7688166ad93 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Mon, 15 Oct 2012 22:14:34 +0000 Subject: Remove LIS::isAllocatable() and isReserved() helpers. All callers can simply use the corresponding MRI functions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165985 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 8daac46..15fc69d 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -110,8 +110,6 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { DomTree = &getAnalysis(); if (!LRCalc) LRCalc = new LiveRangeCalc(); - AllocatableRegs = TRI->getAllocatableSet(fn); - ReservedRegs = TRI->getReservedRegs(fn); // Allocate space for all virtual registers. VirtRegIntervals.resize(MRI->getNumVirtRegs()); @@ -542,11 +540,11 @@ void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) { // Ignore uses of reserved registers. We only track defs of those. for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { unsigned Root = *Roots; - if (!isReserved(Root) && !MRI->reg_empty(Root)) + if (!MRI->isReserved(Root) && !MRI->reg_empty(Root)) LRCalc->extendToUses(LI, Root); for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) { unsigned Reg = *Supers; - if (!isReserved(Reg) && !MRI->reg_empty(Reg)) + if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg)) LRCalc->extendToUses(LI, Reg); } } -- cgit v1.1 From 27c28cef11da5373d9a146060f9c10a3c6ab58e3 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 16 Oct 2012 00:22:51 +0000 Subject: misched: Added handleMove support for updating all kill flags, not just for allocatable regs. This is a medium term workaround until we have a more robust solution in the form of a register liveness utility for postRA passes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@166001 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 15fc69d..65bc4af 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1011,12 +1011,24 @@ private: SlotIndex OldIdx; SlotIndex NewIdx; SmallPtrSet Updated; + bool UpdateFlags; public: HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, const TargetRegisterInfo& TRI, - SlotIndex OldIdx, SlotIndex NewIdx) - : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx) {} + SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags) + : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx), + UpdateFlags(UpdateFlags) {} + + // FIXME: UpdateFlags is a workaround that creates live intervals for all + // physregs, even those that aren't needed for regalloc, in order to update + // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill + // flags, and postRA passes will use a live register utility instead. + LiveInterval *getRegUnitLI(unsigned Unit) { + if (UpdateFlags) + return &LIS.getRegUnit(Unit); + return LIS.getCachedRegUnit(Unit); + } /// Update all live ranges touched by MI, assuming a move from OldIdx to /// NewIdx. @@ -1044,7 +1056,7 @@ public: // For physregs, only update the regunits that actually have a // precomputed live range. for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) - if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) + if (LiveInterval *LI = getRegUnitLI(*Units)) updateRange(*LI); } if (hasRegMask) @@ -1288,7 +1300,7 @@ private: } }; -void LiveIntervals::handleMove(MachineInstr* MI) { +void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) { assert(!MI->isBundled() && "Can't handle bundled instructions yet."); SlotIndex OldIndex = Indexes->getInstructionIndex(MI); Indexes->removeMachineInstrFromMaps(MI); @@ -1297,14 +1309,15 @@ void LiveIntervals::handleMove(MachineInstr* MI) { OldIndex < getMBBEndIdx(MI->getParent()) && "Cannot handle moves across basic block boundaries."); - HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); HME.updateAllRanges(MI); } void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, - MachineInstr* BundleStart) { + MachineInstr* BundleStart, + bool UpdateFlags) { SlotIndex OldIndex = Indexes->getInstructionIndex(MI); SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); - HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); HME.updateAllRanges(MI); } -- cgit v1.1 From 722c9a7925d1a66569513a1894fdd230962fa3f9 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Fri, 9 Nov 2012 19:18:49 +0000 Subject: Fix assertions in updateRegMaskSlots(). The RegMaskSlots contains 'r' slots while NewIdx and OldIdx are 'B' slots. This broke the checks in the assertions. This fixes PR14302. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167625 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 65bc4af..4e75d89 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -146,6 +146,11 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const { OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n'; } + OS << "RegMasks:"; + for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i) + OS << ' ' << RegMaskSlots[i]; + OS << '\n'; + printInstrs(OS); } @@ -1257,10 +1262,15 @@ private: SmallVectorImpl::iterator RI = std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), OldIdx); - assert(*RI == OldIdx && "No RegMask at OldIdx."); - *RI = NewIdx; - assert(*prior(RI) < *RI && *RI < *next(RI) && - "RegSlots out of order. Did you move one call across another?"); + assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && + "No RegMask at OldIdx."); + *RI = NewIdx.getRegSlot(); + assert((RI == LIS.RegMaskSlots.begin() || + SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) && + "Cannot move regmask instruction above another call"); + assert((llvm::next(RI) == LIS.RegMaskSlots.end() || + SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) && + "Cannot move regmask instruction below another call"); } // Return the last use of reg between NewIdx and OldIdx. -- cgit v1.1 From 1ead68d769f27f6d68d4aaeffe4199fa2cacbc95 Mon Sep 17 00:00:00 2001 From: Jakob Stoklund Olesen Date: Wed, 28 Nov 2012 19:13:06 +0000 Subject: Make the LiveRegMatrix analysis available to targets. No functional change, just moved header files. Targets can inject custom passes between register allocation and rewriting. This makes it possible to tweak the register allocation before rewriting, using the full global interference checking available from LiveRegMatrix. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@168806 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 4e75d89..c2610dc 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -24,6 +24,7 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/VirtRegMap.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" @@ -34,7 +35,6 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "LiveRangeCalc.h" -#include "VirtRegMap.h" #include #include #include -- cgit v1.1 From 30fe61aa35ff12cdcbf5f94d8e3ab9a2d964654e Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sat, 1 Dec 2012 01:22:41 +0000 Subject: misched: Fix LiveInterval update to better handle DebugVal. Assertion failed: (itr != mi2iMap.end() && "Instruction not found in maps.") rdar://12777252. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169070 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index c2610dc..404cf76 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1292,7 +1292,11 @@ private: MachineBasicBlock::iterator MII(MI); ++MII; MachineBasicBlock* MBB = MI->getParent(); - for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){ + for (; MII != MBB->end(); ++MII){ + if (MII->isDebugValue()) + continue; + if (LIS.getInstructionIndex(MII) < OldIdx) + break; for (MachineInstr::mop_iterator MOI = MII->operands_begin(), MOE = MII->operands_end(); MOI != MOE; ++MOI) { -- cgit v1.1 From d04a8d4b33ff316ca4cf961e06c9e312eff8e64f Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Mon, 3 Dec 2012 16:50:05 +0000 Subject: Use the new script to sort the includes of every file under lib. Sooooo many of these had incorrect or strange main module includes. I have manually inspected all of these, and fixed the main module include to be the nearest plausible thing I could find. If you own or care about any of these source files, I encourage you to take some time and check that these edits were sensible. I can't have broken anything (I strictly added headers, and reordered them, never removed), but they may not be the headers you'd really like to identify as containing the API being implemented. Many forward declarations and missing includes were added to a header files to allow them to parse cleanly when included first. The main module rule does in fact have its merits. =] git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169131 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 404cf76..c414ded 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -17,7 +17,9 @@ #define DEBUG_TYPE "regalloc" #include "llvm/CodeGen/LiveIntervalAnalysis.h" -#include "llvm/Value.h" +#include "LiveRangeCalc.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/LiveVariables.h" #include "llvm/CodeGen/MachineDominators.h" @@ -25,19 +27,17 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/VirtRegMap.h" -#include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/STLExtras.h" -#include "LiveRangeCalc.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Value.h" #include -#include #include +#include using namespace llvm; // Switch to the new experimental algorithm for computing live intervals. -- cgit v1.1 From 0b8c9a80f20772c3793201ab5b251d3520b9cea3 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Wed, 2 Jan 2013 11:36:10 +0000 Subject: Move all of the header files which are involved in modelling the LLVM IR into their new header subdirectory: include/llvm/IR. This matches the directory structure of lib, and begins to correct a long standing point of file layout clutter in LLVM. There are still more header files to move here, but I wanted to handle them in separate commits to make tracking what files make sense at each layer easier. The only really questionable files here are the target intrinsic tablegen files. But that's a battle I'd rather not fight today. I've updated both CMake and Makefile build systems (I think, and my tests think, but I may have missed something). I've also re-sorted the includes throughout the project. I'll be committing updates to Clang, DragonEgg, and Polly momentarily. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@171366 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/LiveIntervalAnalysis.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/CodeGen/LiveIntervalAnalysis.cpp') diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index c414ded..4198457 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -27,6 +27,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/VirtRegMap.h" +#include "llvm/IR/Value.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -34,7 +35,6 @@ #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Value.h" #include #include #include -- cgit v1.1