aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h25
-rw-r--r--include/llvm/CodeGen/LiveVariables.h41
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp355
-rw-r--r--lib/CodeGen/LiveVariables.cpp49
-rw-r--r--lib/CodeGen/RegAllocLinearScan.cpp18
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.cpp3
-rw-r--r--lib/CodeGen/VirtRegMap.cpp198
-rw-r--r--lib/CodeGen/VirtRegMap.h104
8 files changed, 631 insertions, 162 deletions
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 16dff55..e4582f7 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -32,6 +32,7 @@
namespace llvm {
class LiveVariables;
+ class LoopInfo;
class MRegisterInfo;
class SSARegMap;
class TargetInstrInfo;
@@ -104,8 +105,8 @@ namespace llvm {
return getBaseIndex(index) + InstrSlots::STORE;
}
- static float getSpillWeight(const MachineOperand &mop, unsigned loopDepth) {
- return (mop.isUse()+mop.isDef()) * powf(10.0F, (float)loopDepth);
+ static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
+ return (isDef + isUse) * powf(10.0F, (float)loopDepth);
}
typedef Reg2IntervalMap::iterator iterator;
@@ -229,7 +230,8 @@ namespace llvm {
/// addIntervalsForSpills - Create new intervals for spilled defs / uses of
/// the given interval.
std::vector<LiveInterval*>
- addIntervalsForSpills(const LiveInterval& i, VirtRegMap& vrm);
+ addIntervalsForSpills(const LiveInterval& i,
+ const LoopInfo *loopInfo, VirtRegMap& vrm);
private:
/// computeIntervals - Compute live intervals.
@@ -275,21 +277,32 @@ namespace llvm {
MachineInstr *DefMI, unsigned index, unsigned i,
bool isSS, int slot, unsigned reg);
+ bool anyKillInMBBAfterIdx(const LiveInterval &li,
+ MachineBasicBlock *MBB, unsigned Idx,
+ const VNInfo *VNI = NULL) const;
+
+ bool intervalIsInOneMBB(const LiveInterval &li) const;
+
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
- void rewriteInstructionForSpills(const LiveInterval &li,
+ void rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
unsigned id, unsigned index, unsigned end, MachineInstr *MI,
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, SSARegMap *RegMap, const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
+ unsigned &NewVReg, bool &HasDef, bool &HasUse, const LoopInfo *loopInfo,
+ std::vector<unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs);
- void rewriteInstructionsForSpills(const LiveInterval &li,
+ void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
LiveInterval::Ranges::const_iterator &I,
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, SSARegMap *RegMap, const TargetRegisterClass* rc,
- SmallVector<int, 4> &ReMatIds,
+ SmallVector<int, 4> &ReMatIds, const LoopInfo *loopInfo,
+ BitVector &SpillMBBs,
+ std::vector<std::pair<int, unsigned> > &SpillIdxes,
+ std::vector<unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs);
static LiveInterval createInterval(unsigned Reg);
diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h
index 8938330..b9d5784 100644
--- a/include/llvm/CodeGen/LiveVariables.h
+++ b/include/llvm/CodeGen/LiveVariables.h
@@ -154,20 +154,6 @@ private: // Intermediate data structures
SmallVector<unsigned, 4> *PHIVarInfo;
- /// addRegisterKilled - We have determined MI kills a register. Look for the
- /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
- /// add a implicit operand if it's not found. Returns true if the operand
- /// exists / is added.
- bool addRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
- bool AddIfNotFound = false);
-
- /// addRegisterDead - We have determined MI defined a register without a use.
- /// Look for the operand that defines it and mark it as IsDead. If
- /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
- /// true if the operand exists / is added.
- bool addRegisterDead(unsigned IncomingReg, MachineInstr *MI,
- bool AddIfNotFound = false);
-
void addRegisterKills(unsigned Reg, MachineInstr *MI,
SmallSet<unsigned, 4> &SubKills);
@@ -210,15 +196,28 @@ public:
/// the records for NewMI.
void instructionChanged(MachineInstr *OldMI, MachineInstr *NewMI);
+ /// transferKillDeadInfo - Similar to instructionChanged except it does not
+ /// update live variables internal data structures.
+ static void transferKillDeadInfo(MachineInstr *OldMI, MachineInstr *NewMI,
+ const MRegisterInfo *RegInfo);
+
+ /// addRegisterKilled - We have determined MI kills a register. Look for the
+ /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
+ /// add a implicit operand if it's not found. Returns true if the operand
+ /// exists / is added.
+ static bool addRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
+ const MRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
/// addVirtualRegisterKilled - Add information about the fact that the
/// specified register is killed after being used by the specified
/// instruction. If AddIfNotFound is true, add a implicit operand if it's
/// not found.
void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
bool AddIfNotFound = false) {
- if (addRegisterKilled(IncomingReg, MI, AddIfNotFound))
+ if (addRegisterKilled(IncomingReg, MI, RegInfo, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(MI);
- }
+ }
/// removeVirtualRegisterKilled - Remove the specified virtual
/// register from the live variable information. Returns true if the
@@ -248,12 +247,20 @@ public:
/// instruction.
void removeVirtualRegistersKilled(MachineInstr *MI);
+ /// addRegisterDead - We have determined MI defined a register without a use.
+ /// Look for the operand that defines it and mark it as IsDead. If
+ /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
+ /// true if the operand exists / is added.
+ static bool addRegisterDead(unsigned IncomingReg, MachineInstr *MI,
+ const MRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
/// addVirtualRegisterDead - Add information about the fact that the specified
/// register is dead after being used by the specified instruction. If
/// AddIfNotFound is true, add a implicit operand if it's not found.
void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI,
bool AddIfNotFound = false) {
- if (addRegisterDead(IncomingReg, MI, AddIfNotFound))
+ if (addRegisterDead(IncomingReg, MI, RegInfo, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(MI);
}
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index 55094e3..d2e32ab 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "VirtRegMap.h"
#include "llvm/Value.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -39,6 +40,9 @@ namespace {
// Hidden options for help debugging.
cl::opt<bool> DisableReMat("disable-rematerialization",
cl::init(false), cl::Hidden);
+
+ cl::opt<bool> SplitAtBB("split-intervals-at-bb",
+ cl::init(false), cl::Hidden);
}
STATISTIC(numIntervals, "Number of original intervals");
@@ -632,7 +636,8 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
/// slot / to reg or any rematerialized load into ith operand of specified
/// MI. If it is successul, MI is updated with the newly created MI and
/// returns true.
-bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
+bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
+ VirtRegMap &vrm,
MachineInstr *DefMI,
unsigned index, unsigned i,
bool isSS, int slot, unsigned reg) {
@@ -644,8 +649,11 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
// we can do this, we don't need to insert spill code.
if (lv_)
lv_->instructionChanged(MI, fmi);
+ else
+ LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
MachineBasicBlock &MBB = *MI->getParent();
vrm.virtFolded(reg, MI, i, fmi);
+ vrm.transferSpillPts(MI, fmi);
mi2iMap_.erase(MI);
i2miMap_[index/InstrSlots::NUM] = fmi;
mi2iMap_[fmi] = index;
@@ -656,17 +664,50 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
return false;
}
+bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
+ SmallPtrSet<MachineBasicBlock*, 4> MBBs;
+ for (LiveInterval::Ranges::const_iterator
+ I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
+ std::vector<IdxMBBPair>::const_iterator II =
+ std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
+ if (II == Idx2MBBMap.end())
+ continue;
+ if (I->end > II->first) // crossing a MBB.
+ return false;
+ MBBs.insert(II->second);
+ if (MBBs.size() > 1)
+ return false;
+ }
+ return true;
+}
+
+static
+bool hasALaterUse(MachineBasicBlock *MBB, MachineInstr *MI, unsigned Reg) {
+ MachineBasicBlock::iterator I = MI;
+ if (I == MBB->end())
+ return false;
+ ++I;
+ while (I != MBB->end()) {
+ if (I->findRegisterUseOperandIdx(Reg) != -1)
+ return true;
+ ++I;
+ }
+ return false;
+}
+
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
void LiveIntervals::
-rewriteInstructionForSpills(const LiveInterval &li,
- unsigned id, unsigned index, unsigned end,
- MachineInstr *MI, MachineInstr *OrigDefMI, MachineInstr *DefMI,
+rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
+ unsigned id, unsigned index, unsigned end, MachineInstr *MI,
+ MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, SSARegMap *RegMap,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
+ unsigned &NewVReg, bool &HasDef, bool &HasUse,
+ const LoopInfo *loopInfo, std::vector<unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs) {
RestartInstruction:
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
@@ -688,14 +729,15 @@ rewriteInstructionForSpills(const LiveInterval &li,
if (DefIsReMat) {
// If this is the rematerializable definition MI itself and
// all of its uses are rematerialized, simply delete it.
- if (MI == OrigDefMI && CanDelete) {
+ if (MI == ReMatOrigDefMI && CanDelete) {
RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
break;
}
// If def for this use can't be rematerialized, then try folding.
- TryFold = !OrigDefMI || (OrigDefMI && (MI == OrigDefMI || isLoad));
+ TryFold = !ReMatOrigDefMI ||
+ (ReMatOrigDefMI && (MI == ReMatOrigDefMI || isLoad));
if (isLoad) {
// Try fold loads (from stack slot, constant pool, etc.) into uses.
FoldSS = isLoadSS;
@@ -703,16 +745,32 @@ rewriteInstructionForSpills(const LiveInterval &li,
}
}
+ // If we are splitting live intervals, only fold if it's 1) the first
+ // use and it's a kill or 2) there isn't another use later in this MBB.
+ TryFold &= NewVReg == 0;
+ if (TryFold && TrySplit)
+ // Do not fold store into def here if we are splitting. We'll find an
+ // optimal point to insert a store later.
+ if (HasDef || mop.isDef() ||
+ (!mop.isKill() && hasALaterUse(MI->getParent(), MI, li.reg)))
+ TryFold = false;
+
// FIXME: fold subreg use
if (!isSubReg && TryFold &&
- tryFoldMemoryOperand(MI, vrm, DefMI, index, i, FoldSS, FoldSlot, Reg))
+ tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i, FoldSS, FoldSlot,
+ Reg))
// Folding the load/store can completely change the instruction in
// unpredictable ways, rescan it from the beginning.
goto RestartInstruction;
// Create a new virtual register for the spill interval.
- unsigned NewVReg = RegMap->createVirtualRegister(rc);
- vrm.grow();
+ bool CreatedNewVReg = false;
+ if (NewVReg == 0) {
+ NewVReg = RegMap->createVirtualRegister(rc);
+ vrm.grow();
+ CreatedNewVReg = true;
+ }
+ mop.setReg(NewVReg);
// Scan all of the operands of this instruction rewriting operands
// to use NewVReg instead of li.reg as appropriate. We do this for
@@ -725,10 +783,9 @@ rewriteInstructionForSpills(const LiveInterval &li,
//
// Keep track of whether we replace a use and/or def so that we can
// create the spill interval with the appropriate range.
- mop.setReg(NewVReg);
- bool HasUse = mop.isUse();
- bool HasDef = mop.isDef();
+ HasUse = mop.isUse();
+ HasDef = mop.isDef();
for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
if (!MI->getOperand(j).isRegister())
continue;
@@ -742,38 +799,49 @@ rewriteInstructionForSpills(const LiveInterval &li,
}
}
- if (DefIsReMat) {
- vrm.setVirtIsReMaterialized(NewVReg, DefMI/*, CanDelete*/);
- if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
- // Each valnum may have its own remat id.
- ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
+ if (CreatedNewVReg) {
+ if (DefIsReMat) {
+ vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
+ if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
+ // Each valnum may have its own remat id.
+ ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
+ } else {
+ vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
+ }
+ if (!CanDelete || (HasUse && HasDef)) {
+ // If this is a two-addr instruction then its use operands are
+ // rematerializable but its def is not. It should be assigned a
+ // stack slot.
+ vrm.assignVirt2StackSlot(NewVReg, Slot);
+ }
} else {
- vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
- }
- if (!CanDelete || (HasUse && HasDef)) {
- // If this is a two-addr instruction then its use operands are
- // rematerializable but its def is not. It should be assigned a
- // stack slot.
vrm.assignVirt2StackSlot(NewVReg, Slot);
}
- } else {
- vrm.assignVirt2StackSlot(NewVReg, Slot);
}
// create a new register interval for this spill / remat.
LiveInterval &nI = getOrCreateInterval(NewVReg);
- assert(nI.empty());
- NewLIs.push_back(&nI);
-
- // the spill weight is now infinity as it
- // cannot be spilled again
- nI.weight = HUGE_VALF;
+ if (CreatedNewVReg) {
+ NewLIs.push_back(&nI);
+ NewVRegs[MI->getParent()->getNumber()] = NewVReg;
+ if (TrySplit)
+ vrm.setIsSplitFromReg(NewVReg, li.reg);
+ }
if (HasUse) {
- LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
- nI.getNextValue(~0U, 0, VNInfoAllocator));
- DOUT << " +" << LR;
- nI.addRange(LR);
+ if (CreatedNewVReg) {
+ LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
+ nI.getNextValue(~0U, 0, VNInfoAllocator));
+ DOUT << " +" << LR;
+ nI.addRange(LR);
+ } else {
+ // Extend the split live interval to this def / use.
+ unsigned End = getUseIndex(index)+1;
+ LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
+ nI.getValNumInfo(nI.getNumValNums()-1));
+ DOUT << " +" << LR;
+ nI.addRange(LR);
+ }
}
if (HasDef) {
LiveRange LR(getDefIndex(index), getStoreIndex(index),
@@ -781,29 +849,57 @@ rewriteInstructionForSpills(const LiveInterval &li,
DOUT << " +" << LR;
nI.addRange(LR);
}
-
- // update live variables if it is available
- if (lv_)
- lv_->addVirtualRegisterKilled(NewVReg, MI);
-
+
DOUT << "\t\t\t\tAdded new interval: ";
nI.print(DOUT, mri_);
DOUT << '\n';
}
}
+bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
+ MachineBasicBlock *MBB, unsigned Idx,
+ const VNInfo *VNI) const {
+ unsigned End = getMBBEndIdx(MBB);
+ if (VNI) {
+ for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
+ unsigned KillIdx = VNI->kills[j];
+ if (KillIdx > Idx && KillIdx < End)
+ return true;
+ }
+ return false;
+ }
+
+ // Look at all the VNInfo's.
+ for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
+ i != e; ++i) {
+ const VNInfo *VNI = *i;
+ for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
+ unsigned KillIdx = VNI->kills[j];
+ if (KillIdx > Idx && KillIdx < End)
+ return true;
+ }
+ }
+ return false;
+}
+
void LiveIntervals::
-rewriteInstructionsForSpills(const LiveInterval &li,
+rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
LiveInterval::Ranges::const_iterator &I,
- MachineInstr *OrigDefMI, MachineInstr *DefMI,
+ MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, SSARegMap *RegMap,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
+ const LoopInfo *loopInfo,
+ BitVector &SpillMBBs,
+ std::vector<std::pair<int, unsigned> > &SpillIdxes,
+ std::vector<unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs) {
+ unsigned NewVReg = 0;
unsigned index = getBaseIndex(I->start);
unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
+ bool TrySplitMI = TrySplit && vrm.getPreSplitReg(li.reg) == 0;
for (; index != end; index += InstrSlots::NUM) {
// skip deleted instructions
while (index != end && !getInstructionFromIndex(index))
@@ -811,15 +907,71 @@ rewriteInstructionsForSpills(const LiveInterval &li,
if (index == end) break;
MachineInstr *MI = getInstructionFromIndex(index);
- rewriteInstructionForSpills(li, I->valno->id, index, end, MI,
- OrigDefMI, DefMI, Slot, LdSlot, isLoad,
- isLoadSS, DefIsReMat, CanDelete, vrm,
- RegMap, rc, ReMatIds, NewLIs);
+ MachineBasicBlock *MBB = MI->getParent();
+ NewVReg = !TrySplitMI ? 0 : NewVRegs[MBB->getNumber()];
+ bool IsNew = NewVReg == 0;
+ bool HasDef = false;
+ bool HasUse = false;
+ rewriteInstructionForSpills(li, TrySplitMI, I->valno->id, index, end,
+ MI, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot,
+ isLoad, isLoadSS, DefIsReMat, CanDelete, vrm,
+ RegMap, rc, ReMatIds, NewVReg, HasDef, HasUse,
+ loopInfo, NewVRegs, NewLIs);
+ if (!HasDef && !HasUse)
+ continue;
+
+ // Update weight of spill interval.
+ LiveInterval &nI = getOrCreateInterval(NewVReg);
+ if (!TrySplitMI)
+ // The spill weight is now infinity as it cannot be spilled again.
+ nI.weight = HUGE_VALF;
+ else {
+ // Keep track of the last def in each MBB.
+ if (HasDef) {
+ if (MI != ReMatOrigDefMI || !CanDelete) {
+ // If this is a two-address code, then this index probably starts a
+ // VNInfo so we should examine all the VNInfo's.
+ bool HasKill = HasUse
+ ? anyKillInMBBAfterIdx(li, MBB, getDefIndex(index))
+ : anyKillInMBBAfterIdx(li, MBB, getDefIndex(index), I->valno);
+ if (!HasKill) {
+ unsigned MBBId = MBB->getNumber();
+ if ((int)index > SpillIdxes[MBBId].first)
+ // High bit specify whether this spill ought to be folded if
+ // possible.
+ SpillIdxes[MBBId] = std::make_pair(index, NewVReg | (1 << 31));
+ SpillMBBs.set(MBBId);
+ }
+ }
+ if (!IsNew) {
+ // It this interval hasn't been assigned a stack slot
+ // (because earlier def is remat), do it now.
+ int SS = vrm.getStackSlot(NewVReg);
+ if (SS != (int)Slot) {
+ assert(SS == VirtRegMap::NO_STACK_SLOT);
+ vrm.assignVirt2StackSlot(NewVReg, Slot);
+ }
+ }
+ } else if (HasUse) {
+ // Use(s) following the last def, it's not safe to fold the spill.
+ unsigned MBBId = MBB->getNumber();
+ if ((SpillIdxes[MBBId].second & ((1<<31)-1)) == NewVReg &&
+ (int)getUseIndex(index) > SpillIdxes[MBBId].first)
+ SpillIdxes[MBBId].second &= (1<<31)-1;
+ }
+
+ // Update spill weight.
+ unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
+ nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
+ }
}
}
+
+
std::vector<LiveInterval*> LiveIntervals::
-addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm) {
+addIntervalsForSpills(const LiveInterval &li,
+ const LoopInfo *loopInfo, VirtRegMap &vrm) {
// Since this is called after the analysis is done we don't know if
// LiveVariables is available
lv_ = getAnalysisToUpdate<LiveVariables>();
@@ -831,6 +983,11 @@ addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm) {
li.print(DOUT, mri_);
DOUT << '\n';
+ // Each bit specify whether it a spill is required in the MBB.
+ BitVector SpillMBBs(mf_->getNumBlockIDs());
+ std::vector<std::pair<int, unsigned> > SpillIdxes(mf_->getNumBlockIDs(),
+ std::make_pair(-1,0));
+ std::vector<unsigned> NewVRegs(mf_->getNumBlockIDs(), 0);
std::vector<LiveInterval*> NewLIs;
SSARegMap *RegMap = mf_->getSSARegMap();
const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
@@ -845,6 +1002,43 @@ addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm) {
BitVector ReMatDelete(NumValNums);
unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
+ // Spilling a split live interval. It cannot be split any further. Also,
+ // it's also guaranteed to be a single val# / range interval.
+ if (vrm.getPreSplitReg(li.reg)) {
+ vrm.setIsSplitFromReg(li.reg, 0);
+ bool DefIsReMat = vrm.isReMaterialized(li.reg);
+ Slot = vrm.getStackSlot(li.reg);
+ assert(Slot != VirtRegMap::MAX_STACK_SLOT);
+ MachineInstr *ReMatDefMI = DefIsReMat ?
+ vrm.getReMaterializedMI(li.reg) : NULL;
+ int LdSlot = 0;
+ bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
+ bool isLoad = isLoadSS ||
+ (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
+ vrm.removeAllSpillPtsForReg(li.reg);
+ bool IsFirstRange = true;
+ for (LiveInterval::Ranges::const_iterator
+ I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
+ // If this is a split live interval with multiple ranges, it means there
+ // are two-address instructions that re-defined the value. Only the
+ // first def can be rematerialized!
+ if (IsFirstRange) {
+ rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
+ Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
+ false, vrm, RegMap, rc, ReMatIds,
+ loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
+ } else {
+ rewriteInstructionsForSpills(li, false, I, NULL, 0,
+ Slot, 0, false, false, false,
+ false, vrm, RegMap, rc, ReMatIds,
+ loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
+ }
+ IsFirstRange = false;
+ }
+ return NewLIs;
+ }
+
+ bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
bool NeedStackSlot = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
@@ -854,14 +1048,15 @@ addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm) {
if (DefIdx == ~1U)
continue; // Dead val#.
// Is the def for the val# rematerializable?
- MachineInstr *DefMI = (DefIdx == ~0u) ? 0 : getInstructionFromIndex(DefIdx);
- if (DefMI && isReMaterializable(li, VNI, DefMI)) {
+ MachineInstr *ReMatDefMI = (DefIdx == ~0u)
+ ? 0 : getInstructionFromIndex(DefIdx);
+ if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
// Remember how to remat the def of this val#.
- ReMatOrigDefs[VN] = DefMI;
+ ReMatOrigDefs[VN] = ReMatDefMI;
// Original def may be modified so we have to make a copy here. vrm must
// delete these!
- ReMatDefs[VN] = DefMI = DefMI->clone();
- vrm.setVirtIsReMaterialized(li.reg, DefMI);
+ ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
+ vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
bool CanDelete = true;
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
@@ -889,24 +1084,62 @@ addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm) {
}
// One stack slot per live interval.
- if (NeedStackSlot)
+ if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
Slot = vrm.assignVirt2StackSlot(li.reg);
+
// Create new intervals and rewrite defs and uses.
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- MachineInstr *DefMI = ReMatDefs[I->valno->id];
- MachineInstr *OrigDefMI = ReMatOrigDefs[I->valno->id];
- bool DefIsReMat = DefMI != NULL;
+ MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
+ MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
+ bool DefIsReMat = ReMatDefMI != NULL;
bool CanDelete = ReMatDelete[I->valno->id];
int LdSlot = 0;
- bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(DefMI, LdSlot);
+ bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
- (DefIsReMat && (DefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
- rewriteInstructionsForSpills(li, I, OrigDefMI, DefMI, Slot, LdSlot,
- isLoad, isLoadSS, DefIsReMat, CanDelete,
- vrm, RegMap, rc, ReMatIds, NewLIs);
+ (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
+ rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
+ Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
+ CanDelete, vrm, RegMap, rc, ReMatIds,
+ loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
}
+ // Insert spills if we are splitting.
+ if (TrySplit && NeedStackSlot) {
+ int Id = SpillMBBs.find_first();
+ while (Id != -1) {
+ unsigned index = SpillIdxes[Id].first;
+ unsigned VReg = SpillIdxes[Id].second & ((1 << 31)-1);
+ bool TryFold = SpillIdxes[Id].second & (1 << 31);
+ MachineInstr *MI = getInstructionFromIndex(index);
+ int OpIdx = -1;
+ if (TryFold) {
+ for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
+ MachineOperand &MO = MI->getOperand(j);
+ if (!MO.isRegister() || MO.getReg() != VReg)
+ continue;
+ if (MO.isUse()) {
+ // Can't fold if it's two-address code.
+ OpIdx = -1;
+ break;
+ }
+ OpIdx = (int)j;
+ }
+ }
+ // Fold the store into the def if possible.
+ if (OpIdx == -1 ||
+ !tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, true, Slot, VReg))
+ // Else tell the spiller to issue a store for us.
+ vrm.addSpillPoint(VReg, MI);
+ Id = SpillMBBs.find_next(Id);
+ }
+ }
+
+ // Finalize spill weights.
+ if (TrySplit)
+ for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
+ NewLIs[i]->weight /= NewLIs[i]->getSize();
+
return NewLIs;
}
diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp
index 2af8bf3..a42011d 100644
--- a/lib/CodeGen/LiveVariables.cpp
+++ b/lib/CodeGen/LiveVariables.cpp
@@ -193,6 +193,7 @@ void LiveVariables::HandleVirtRegUse(VarInfo &VRInfo, MachineBasicBlock *MBB,
}
bool LiveVariables::addRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
+ const MRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool Found = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -224,6 +225,7 @@ bool LiveVariables::addRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
}
bool LiveVariables::addRegisterDead(unsigned IncomingReg, MachineInstr *MI,
+ const MRegisterInfo *RegInfo,
bool AddIfNotFound) {
bool Found = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -331,7 +333,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *RefMI,
void LiveVariables::addRegisterKills(unsigned Reg, MachineInstr *MI,
SmallSet<unsigned, 4> &SubKills) {
if (SubKills.count(Reg) == 0)
- addRegisterKilled(Reg, MI, true);
+ addRegisterKilled(Reg, MI, RegInfo, true);
else {
for (const unsigned *SubRegs = RegInfo->getImmediateSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
@@ -342,7 +344,7 @@ void LiveVariables::addRegisterKills(unsigned Reg, MachineInstr *MI,
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *RefMI) {
SmallSet<unsigned, 4> SubKills;
if (HandlePhysRegKill(Reg, RefMI, SubKills)) {
- addRegisterKilled(Reg, RefMI, true);
+ addRegisterKilled(Reg, RefMI, RegInfo, true);
return true;
} else {
// Some sub-registers are killed by another MI.
@@ -359,15 +361,15 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI) {
if (PhysRegUsed[Reg]) {
if (!HandlePhysRegKill(Reg, LastRef)) {
if (PhysRegPartUse[Reg])
- addRegisterKilled(Reg, PhysRegPartUse[Reg], true);
+ addRegisterKilled(Reg, PhysRegPartUse[Reg], RegInfo, true);
}
} else if (PhysRegPartUse[Reg])
// Add implicit use / kill to last partial use.
- addRegisterKilled(Reg, PhysRegPartUse[Reg], true);
+ addRegisterKilled(Reg, PhysRegPartUse[Reg], RegInfo, true);
else if (LastRef != MI)
// Defined, but not used. However, watch out for cases where a super-reg
// is also defined on the same MI.
- addRegisterDead(Reg, LastRef);
+ addRegisterDead(Reg, LastRef, RegInfo);
}
for (const unsigned *SubRegs = RegInfo->getSubRegisters(Reg);
@@ -376,14 +378,14 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI) {
if (PhysRegUsed[SubReg]) {
if (!HandlePhysRegKill(SubReg, LastRef)) {
if (PhysRegPartUse[SubReg])
- addRegisterKilled(SubReg, PhysRegPartUse[SubReg], true);
+ addRegisterKilled(SubReg, PhysRegPartUse[SubReg], RegInfo, true);
}
} else if (PhysRegPartUse[SubReg])
// Add implicit use / kill to last use of a sub-register.
- addRegisterKilled(SubReg, PhysRegPartUse[SubReg], true);
+ addRegisterKilled(SubReg, PhysRegPartUse[SubReg], RegInfo, true);
else if (LastRef != MI)
// This must be a def of the subreg on the same MI.
- addRegisterDead(SubReg, LastRef);
+ addRegisterDead(SubReg, LastRef, RegInfo);
}
}
@@ -561,10 +563,10 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
for (unsigned j = 0, e2 = VirtRegInfo[i].Kills.size(); j != e2; ++j) {
if (VirtRegInfo[i].Kills[j] == VirtRegInfo[i].DefInst)
addRegisterDead(i + MRegisterInfo::FirstVirtualRegister,
- VirtRegInfo[i].Kills[j]);
+ VirtRegInfo[i].Kills[j], RegInfo);
else
addRegisterKilled(i + MRegisterInfo::FirstVirtualRegister,
- VirtRegInfo[i].Kills[j]);
+ VirtRegInfo[i].Kills[j], RegInfo);
}
// Check to make sure there are no unreachable blocks in the MC CFG for the
@@ -618,6 +620,33 @@ void LiveVariables::instructionChanged(MachineInstr *OldMI,
}
}
+/// transferKillDeadInfo - Similar to instructionChanged except it does not
+/// update live variables internal data structures.
+void LiveVariables::transferKillDeadInfo(MachineInstr *OldMI,
+ MachineInstr *NewMI,
+ const MRegisterInfo *RegInfo) {
+ // If the instruction defines any virtual registers, update the VarInfo,
+ // kill and dead information for the instruction.
+ for (unsigned i = 0, e = OldMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = OldMI->getOperand(i);
+ if (MO.isRegister() && MO.getReg() &&
+ MRegisterInfo::isVirtualRegister(MO.getReg())) {
+ unsigned Reg = MO.getReg();
+ if (MO.isDef()) {
+ if (MO.isDead()) {
+ MO.unsetIsDead();
+ addRegisterDead(Reg, NewMI, RegInfo);
+ }
+ }
+ if (MO.isKill()) {
+ MO.unsetIsKill();
+ addRegisterKilled(Reg, NewMI, RegInfo);
+ }
+ }
+ }
+}
+
+
/// removeVirtualRegistersKilled - Remove all killed info for the specified
/// instruction.
void LiveVariables::removeVirtualRegistersKilled(MachineInstr *MI) {
diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp
index bc9febf..ad9c5ec 100644
--- a/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/lib/CodeGen/RegAllocLinearScan.cpp
@@ -16,6 +16,7 @@
#include "PhysRegTracker.h"
#include "VirtRegMap.h"
#include "llvm/Function.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/Passes.h"
@@ -66,6 +67,7 @@ namespace {
SSARegMap *regmap_;
BitVector allocatableRegs_;
LiveIntervals* li_;
+ const LoopInfo *loopInfo;
/// handled_ - Intervals are added to the handled_ set in the order of their
/// start value. This is uses for backtracking.
@@ -101,6 +103,7 @@ namespace {
// Make sure PassManager knows which analyses to make available
// to coalescing and which analyses coalescing invalidates.
AU.addRequiredTransitive<RegisterCoalescer>();
+ AU.addRequired<LoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -251,6 +254,7 @@ bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
regmap_ = mf_->getSSARegMap();
allocatableRegs_ = mri_->getAllocatableSet(fn);
li_ = &getAnalysis<LiveIntervals>();
+ loopInfo = &getAnalysis<LoopInfo>();
// We don't run the coalescer here because we have no reason to
// interact with it. If the coalescer requires interaction, it
@@ -347,18 +351,22 @@ void RALinScan::linearScan()
DOUT << "\tinterval " << *i->first << " expired\n");
inactive_.clear();
- // Add live-ins to every BB except for entry.
+ // Add live-ins to every BB except for entry. Also perform trivial coalescing.
MachineFunction::iterator EntryMBB = mf_->begin();
SmallVector<MachineBasicBlock*, 8> LiveInMBBs;
for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
LiveInterval &cur = i->second;
unsigned Reg = 0;
- if (MRegisterInfo::isPhysicalRegister(cur.reg))
+ bool isPhys = MRegisterInfo::isPhysicalRegister(cur.reg);
+ if (isPhys)
Reg = i->second.reg;
else if (vrm_->isAssignedReg(cur.reg))
Reg = attemptTrivialCoalescing(cur, vrm_->getPhys(cur.reg));
if (!Reg)
continue;
+ // Ignore splited live intervals.
+ if (!isPhys && vrm_->getPreSplitReg(cur.reg))
+ continue;
for (LiveInterval::Ranges::const_iterator I = cur.begin(), E = cur.end();
I != E; ++I) {
const LiveRange &LR = *I;
@@ -686,7 +694,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
DOUT << "\t\t\tspilling(c): " << *cur << '\n';
std::vector<LiveInterval*> added =
- li_->addIntervalsForSpills(*cur, *vrm_);
+ li_->addIntervalsForSpills(*cur, loopInfo, *vrm_);
if (added.empty())
return; // Early exit if all spills were folded.
@@ -738,7 +746,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
DOUT << "\t\t\tspilling(a): " << *i->first << '\n';
earliestStart = std::min(earliestStart, i->first->beginNumber());
std::vector<LiveInterval*> newIs =
- li_->addIntervalsForSpills(*i->first, *vrm_);
+ li_->addIntervalsForSpills(*i->first, loopInfo, *vrm_);
std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(reg);
}
@@ -751,7 +759,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
DOUT << "\t\t\tspilling(i): " << *i->first << '\n';
earliestStart = std::min(earliestStart, i->first->beginNumber());
std::vector<LiveInterval*> newIs =
- li_->addIntervalsForSpills(*i->first, *vrm_);
+ li_->addIntervalsForSpills(*i->first, loopInfo, *vrm_);
std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(reg);
}
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp
index b919e8a..4b96cac 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -1463,7 +1463,8 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
if (UniqueUses.count(reg) != 0)
continue;
LiveInterval &RegInt = li_->getInterval(reg);
- RegInt.weight += li_->getSpillWeight(mop, loopDepth);
+ RegInt.weight +=
+ li_->getSpillWeight(mop.isDef(), mop.isUse(), loopDepth);
UniqueUses.insert(reg);
}
}
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index bc63066..fe4f7b7 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -63,8 +63,8 @@ namespace {
VirtRegMap::VirtRegMap(MachineFunction &mf)
: TII(*mf.getTarget().getInstrInfo()), MF(mf),
Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
- Virt2ReMatIdMap(NO_STACK_SLOT), ReMatMap(NULL),
- ReMatId(MAX_STACK_SLOT+1) {
+ Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
+ ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) {
grow();
}
@@ -73,6 +73,8 @@ void VirtRegMap::grow() {
Virt2PhysMap.grow(LastVirtReg);
Virt2StackSlotMap.grow(LastVirtReg);
Virt2ReMatIdMap.grow(LastVirtReg);
+ Virt2SplitMap.grow(LastVirtReg);
+ Virt2SpillPtsMap.grow(LastVirtReg);
ReMatMap.grow(LastVirtReg);
}
@@ -278,6 +280,16 @@ namespace {
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM);
+ void SpillRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MII,
+ int Idx, unsigned PhysReg, int StackSlot,
+ const TargetRegisterClass *RC,
+ MachineInstr *&LastStore,
+ AvailableSpills &Spills,
+ SmallSet<MachineInstr*, 4> &ReMatDefs,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ VirtRegMap &VRM);
void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM);
};
}
@@ -817,7 +829,8 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
NewMIs.clear();
- unsigned Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
+ int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
+ assert(Idx != -1);
MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS);
if (FoldedMI) {
if (!VRM.hasPhys(UnfoldVR))
@@ -847,8 +860,66 @@ static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
return 0;
}
+/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
+/// the last store to the same slot is now dead. If so, remove the last store.
+void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MII,
+ int Idx, unsigned PhysReg, int StackSlot,
+ const TargetRegisterClass *RC,
+ MachineInstr *&LastStore,
+ AvailableSpills &Spills,
+ SmallSet<MachineInstr*, 4> &ReMatDefs,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ VirtRegMap &VRM) {
+ MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC);
+ DOUT << "Store:\t" << *next(MII);
+
+ // If there is a dead store to this stack slot, nuke it now.
+ if (LastStore) {
+ DOUT << "Removed dead store:\t" << *LastStore;
+ ++NumDSE;
+ SmallVector<unsigned, 2> KillRegs;
+ InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
+ MachineBasicBlock::iterator PrevMII = LastStore;
+ bool CheckDef = PrevMII != MBB.begin();
+ if (CheckDef)
+ --PrevMII;
+ MBB.erase(LastStore);
+ VRM.RemoveFromFoldedVirtMap(LastStore);
+ if (CheckDef) {
+ // Look at defs of killed registers on the store. Mark the defs
+ // as dead since the store has been deleted and they aren't
+ // being reused.
+ for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
+ bool HasOtherDef = false;
+ if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
+ MachineInstr *DeadDef = PrevMII;
+ if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
+ // FIXME: This assumes a remat def does not have side
+ // effects.
+ MBB.erase(DeadDef);
+ VRM.RemoveFromFoldedVirtMap(DeadDef);
+ ++NumDRM;
+ }
+ }
+ }
+ }
+ }
+
+ LastStore = next(MII);
+
+ // If the stack slot value was previously available in some other
+ // register, change it now. Otherwise, make the register available,
+ // in PhysReg.
+ Spills.ModifyStackSlotOrReMat(StackSlot);
+ Spills.ClobberPhysReg(PhysReg);
+ Spills.addAvailable(StackSlot, LastStore, PhysReg);
+ ++NumStores;
+}
+
/// rewriteMBB - Keep track of which spills are available even after the
-/// register allocator is done with them. If possible, avoid reloading vregs.
+/// register allocator is done with them. If possible, avid reloading vregs.
void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
DOUT << MBB.getBasicBlock()->getName() << ":\n";
@@ -870,6 +941,10 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
+ // ReloadedSplits - Splits must be reloaded once per MBB. This keeps track
+ // which have been reloaded.
+ SmallSet<unsigned, 8> ReloadedSplits;
+
// Keep track of kill information.
BitVector RegKills(MRI->getNumRegs());
std::vector<MachineOperand*> KillOps;
@@ -886,13 +961,24 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
MaybeDeadStores, Spills, RegKills, KillOps, VRM))
NextMII = next(MII);
- /// ReusedOperands - Keep track of operand reuse in case we need to undo
- /// reuse.
MachineInstr &MI = *MII;
- ReuseInfo ReusedOperands(MI, MRI);
-
const TargetInstrDescriptor *TID = MI.getInstrDescriptor();
+ // Insert spills here if asked to.
+ std::vector<unsigned> SpillRegs = VRM.getSpillPtSpills(&MI);
+ for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
+ unsigned VirtReg = SpillRegs[i];
+ const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
+ unsigned Phys = VRM.getPhys(VirtReg);
+ int StackSlot = VRM.getStackSlot(VirtReg);
+ MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
+ SpillRegToStackSlot(MBB, MII, i, Phys, StackSlot, RC,
+ LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
+ }
+
+ /// ReusedOperands - Keep track of operand reuse in case we need to undo
+ /// reuse.
+ ReuseInfo ReusedOperands(MI, MRI);
// Process all of the spilled uses and all non spilled reg references.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
@@ -917,6 +1003,43 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
MF.setPhysRegUsed(Phys);
if (MO.isDef())
ReusedOperands.markClobbered(Phys);
+
+ // If it's a split live interval, insert a reload for the first use
+ // unless it's previously defined in the MBB.
+ unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
+ if (SplitReg) {
+ if (ReloadedSplits.insert(VirtReg)) {
+ bool HasUse = MO.isUse();
+ // If it's a def, we don't need to reload the value unless it's
+ // a two-address code.
+ if (!HasUse) {
+ for (unsigned j = i+1; j != e; ++j) {
+ MachineOperand &MOJ = MI.getOperand(j);
+ if (MOJ.isRegister() && MOJ.getReg() == VirtReg) {
+ HasUse = true;
+ break;
+ }
+ }
+ }
+
+ if (HasUse) {
+ if (VRM.isReMaterialized(VirtReg)) {
+ MRI->reMaterialize(MBB, &MI, Phys,
+ VRM.getReMaterializedMI(VirtReg));
+ ++NumReMats;
+ } else {
+ const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
+ MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), RC);
+ ++NumLoads;
+ }
+ // This invalidates Phys.
+ Spills.ClobberPhysReg(Phys);
+ UpdateKills(*prior(MII), RegKills, KillOps);
+ DOUT << '\t' << *prior(MII);
+ }
+ }
+ }
+
unsigned RReg = SubIdx ? MRI->getSubReg(Phys, SubIdx) : Phys;
MI.getOperand(i).setReg(RReg);
continue;
@@ -1128,6 +1251,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
DOUT << '\t' << MI;
+
// If we have folded references to memory operands, make sure we clear all
// physical registers that may contain the value of the spilled virtual
// register
@@ -1136,11 +1260,16 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
unsigned VirtReg = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
- if (VRM.isAssignedReg(VirtReg)) {
- DOUT << ": No stack slot!\n";
- continue;
- }
+
+ // If this is a split live interval, remember we have seen this so
+ // we do not need to reload it for later uses.
+ unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
+ if (SplitReg)
+ ReloadedSplits.insert(VirtReg);
+
int SS = VRM.getStackSlot(VirtReg);
+ if (SS == VirtRegMap::NO_STACK_SLOT)
+ continue;
FoldedSS.insert(SS);
DOUT << " - StackSlot: " << SS << "\n";
@@ -1338,50 +1467,9 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
MI.getOperand(i).setReg(RReg);
if (!MO.isDead()) {
- MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC);
- DOUT << "Store:\t" << *next(MII);
-
- // If there is a dead store to this stack slot, nuke it now.
MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
- if (LastStore) {
- DOUT << "Removed dead store:\t" << *LastStore;
- ++NumDSE;
- SmallVector<unsigned, 2> KillRegs;
- InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
- MachineBasicBlock::iterator PrevMII = LastStore;
- bool CheckDef = PrevMII != MBB.begin();
- if (CheckDef)
- --PrevMII;
- MBB.erase(LastStore);
- VRM.RemoveFromFoldedVirtMap(LastStore);
- if (CheckDef) {
- // Look at defs of killed registers on the store. Mark the defs
- // as dead since the store has been deleted and they aren't
- // being reused.
- for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
- bool HasOtherDef = false;
- if (InvalidateRegDef(PrevMII, MI, KillRegs[j], HasOtherDef)) {
- MachineInstr *DeadDef = PrevMII;
- if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
- // FIXME: This assumes a remat def does not have side
- // effects.
- MBB.erase(DeadDef);
- VRM.RemoveFromFoldedVirtMap(DeadDef);
- ++NumDRM;
- }
- }
- }
- }
- }
- LastStore = next(MII);
-
- // If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register available,
- // in PhysReg.
- Spills.ModifyStackSlotOrReMat(StackSlot);
- Spills.ClobberPhysReg(PhysReg);
- Spills.addAvailable(StackSlot, LastStore, PhysReg);
- ++NumStores;
+ SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, LastStore,
+ Spills, ReMatDefs, RegKills, KillOps, VRM);
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
diff --git a/lib/CodeGen/VirtRegMap.h b/lib/CodeGen/VirtRegMap.h
index f6d305e..dc2f1cd 100644
--- a/lib/CodeGen/VirtRegMap.h
+++ b/lib/CodeGen/VirtRegMap.h
@@ -18,7 +18,7 @@
#define LLVM_CODEGEN_VIRTREGMAP_H
#include "llvm/Target/MRegisterInfo.h"
-#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/Support/Streams.h"
#include <map>
@@ -50,22 +50,42 @@ namespace llvm {
/// spilled register is the temporary used to load it from the
/// stack).
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
+
/// Virt2StackSlotMap - This is virtual register to stack slot
/// mapping. Each spilled virtual register has an entry in it
/// which corresponds to the stack slot this register is spilled
/// at.
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+
+ /// Virt2StackSlotMap - This is virtual register to rematerialization id
+ /// mapping. Each spilled virtual register that should be remat'd has an
+ /// entry in it which corresponds to the remat id.
IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
+
+ /// Virt2SplitMap - This is virtual register to splitted virtual register
+ /// mapping.
+ IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
+
+ /// ReMatMap - This is virtual register to re-materialized instruction
+ /// mapping. Each virtual register whose definition is going to be
+ /// re-materialized has an entry in it.
+ IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
+
/// MI2VirtMap - This is MachineInstr to virtual register
/// mapping. In the case of memory spill code being folded into
/// instructions, we need to know which virtual register was
/// read/written by this instruction.
MI2VirtMapTy MI2VirtMap;
- /// ReMatMap - This is virtual register to re-materialized instruction
- /// mapping. Each virtual register whose definition is going to be
- /// re-materialized has an entry in it.
- IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
+ /// SpillPt2VirtMap - This records the virtual registers which should
+ /// be spilled right after the MachineInstr due to live interval
+ /// splitting.
+ DenseMap<MachineInstr*, std::vector<unsigned> > SpillPt2VirtMap;
+
+ /// Virt2SplitMap - This records the MachineInstrs where a virtual
+ /// register should be spilled due to live interval splitting.
+ IndexedMap<std::vector<MachineInstr*>, VirtReg2IndexFunctor>
+ Virt2SpillPtsMap;
/// ReMatId - Instead of assigning a stack slot to a to be rematerialized
/// virtual register, an unique id is being assigned. This keeps track of
@@ -120,11 +140,25 @@ namespace llvm {
grow();
}
+ /// @brief records virtReg is a split live interval from SReg.
+ void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
+ Virt2SplitMap[virtReg] = SReg;
+ }
+
+ /// @brief returns the live interval virtReg is split from.
+ unsigned getPreSplitReg(unsigned virtReg) {
+ return Virt2SplitMap[virtReg];
+ }
+
/// @brief returns true is the specified virtual register is not
/// mapped to a stack slot or rematerialized.
bool isAssignedReg(unsigned virtReg) const {
- return getStackSlot(virtReg) == NO_STACK_SLOT &&
- getReMatId(virtReg) == NO_STACK_SLOT;
+ if (getStackSlot(virtReg) == NO_STACK_SLOT &&
+ getReMatId(virtReg) == NO_STACK_SLOT)
+ return true;
+ // Split register can be assigned a physical register as well as a
+ // stack slot or remat id.
+ return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
}
/// @brief returns the stack slot mapped to the specified virtual
@@ -175,6 +209,62 @@ namespace llvm {
ReMatMap[virtReg] = def;
}
+ /// @brief returns the virtual registers that should be spilled due to
+ /// splitting right after the specified MachineInstr.
+ std::vector<unsigned> &getSpillPtSpills(MachineInstr *Pt) {
+ return SpillPt2VirtMap[Pt];
+ }
+
+ /// @brief records the specified MachineInstr as a spill point for virtReg.
+ void addSpillPoint(unsigned virtReg, MachineInstr *Pt) {
+ SpillPt2VirtMap[Pt].push_back(virtReg);
+ Virt2SpillPtsMap[virtReg].push_back(Pt);
+ }
+
+ /// @brief remove the virtReg from the list of registers that should be
+ /// spilled (due to splitting) right after the specified MachineInstr.
+ void removeRegFromSpillPt(MachineInstr *Pt, unsigned virtReg) {
+ std::vector<unsigned> &Regs = SpillPt2VirtMap[Pt];
+ if (Regs.back() == virtReg) // Most common case.
+ Regs.pop_back();
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i)
+ if (Regs[i] == virtReg) {
+ Regs.erase(Regs.begin()+i-1);
+ break;
+ }
+ }
+
+ /// @brief specify virtReg is no longer being spilled due to splitting.
+ void removeAllSpillPtsForReg(unsigned virtReg) {
+ std::vector<MachineInstr*> &SpillPts = Virt2SpillPtsMap[virtReg];
+ for (unsigned i = 0, e = SpillPts.size(); i != e; ++i)
+ removeRegFromSpillPt(SpillPts[i], virtReg);
+ Virt2SpillPtsMap[virtReg].clear();
+ }
+
+ /// @brief remove the specified MachineInstr as a spill point for the
+ /// specified register.
+ void removeRegSpillPt(unsigned virtReg, MachineInstr *Pt) {
+ std::vector<MachineInstr*> &SpillPts = Virt2SpillPtsMap[virtReg];
+ if (SpillPts.back() == Pt) // Most common case.
+ SpillPts.pop_back();
+ for (unsigned i = 0, e = SpillPts.size(); i != e; ++i)
+ if (SpillPts[i] == Pt) {
+ SpillPts.erase(SpillPts.begin()+i-1);
+ break;
+ }
+ }
+
+ void transferSpillPts(MachineInstr *Old, MachineInstr *New) {
+ std::vector<unsigned> &OldRegs = SpillPt2VirtMap[Old];
+ while (!OldRegs.empty()) {
+ unsigned virtReg = OldRegs.back();
+ OldRegs.pop_back();
+ removeRegSpillPt(virtReg, Old);
+ addSpillPoint(virtReg, New);
+ }
+ }
+
/// @brief Updates information about the specified virtual register's value
/// folded into newMI machine instruction. The OpNum argument indicates the
/// operand number of OldMI that is folded.