aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/ScheduleDAGInstrs.cpp
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2008-12-04 01:35:46 +0000
committerDan Gohman <gohman@apple.com>2008-12-04 01:35:46 +0000
commitcb4717c2e2533cc64e039e7d09ea0815c9d237e5 (patch)
tree95ab55375b0d0f529786c001567d1ee20db8bf37 /lib/CodeGen/ScheduleDAGInstrs.cpp
parent323f2535d4e53b9fdc2e26b35c9c253a5d2b712a (diff)
downloadexternal_llvm-cb4717c2e2533cc64e039e7d09ea0815c9d237e5.zip
external_llvm-cb4717c2e2533cc64e039e7d09ea0815c9d237e5.tar.gz
external_llvm-cb4717c2e2533cc64e039e7d09ea0815c9d237e5.tar.bz2
Add minimal support for disambiguating memory references. Currently
the main thing this covers is spills to distinct spill slots. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60517 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/ScheduleDAGInstrs.cpp')
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp136
1 files changed, 123 insertions, 13 deletions
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index 3100882..b72df3a 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -14,11 +14,13 @@
#define DEBUG_TYPE "sched-instrs"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include <map>
using namespace llvm;
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineBasicBlock *bb,
@@ -28,19 +30,40 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineBasicBlock *bb,
void ScheduleDAGInstrs::BuildSchedUnits() {
SUnits.clear();
SUnits.reserve(BB->size());
+ int Cost = 1; // FIXME
- std::vector<SUnit *> PendingLoads;
- SUnit *Terminator = 0;
- SUnit *Chain = 0;
+ // We build scheduling units by walking a block's instruction list from bottom
+ // to top.
+
+ // Remember where defs and uses of each physical register are as we procede.
SUnit *Defs[TargetRegisterInfo::FirstVirtualRegister] = {};
std::vector<SUnit *> Uses[TargetRegisterInfo::FirstVirtualRegister] = {};
- int Cost = 1; // FIXME
+
+ // Remember where unknown loads are after the most recent unknown store
+ // as we procede.
+ std::vector<SUnit *> PendingLoads;
+
+ // Remember where a generic side-effecting instruction is as we procede. If
+ // ChainMMO is null, this is assumed to have arbitrary side-effects. If
+ // ChainMMO is non-null, then Chain makes only a single memory reference.
+ SUnit *Chain = 0;
+ MachineMemOperand *ChainMMO = 0;
+
+ // Memory references to specific known memory locations are tracked so that
+ // they can be given more precise dependencies.
+ std::map<const Value *, SUnit *> MemDefs;
+ std::map<const Value *, std::vector<SUnit *> > MemUses;
+
+ // Terminators can perform control transfers, we we need to make sure that
+ // all the work of the block is done before the terminator.
+ SUnit *Terminator = 0;
for (MachineBasicBlock::iterator MII = BB->end(), MIE = BB->begin();
MII != MIE; --MII) {
MachineInstr *MI = prior(MII);
SUnit *SU = NewSUnit(MI);
+ // Add register-based dependencies (data, anti, and output).
for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
const MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg()) continue;
@@ -81,23 +104,110 @@ void ScheduleDAGInstrs::BuildSchedUnits() {
UseList.push_back(SU);
}
}
- bool False = false;
- bool True = true;
- if (!MI->isSafeToMove(TII, False)) {
+
+ // Add chain dependencies.
+ // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
+ // after stack slots are lowered to actual addresses.
+ // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
+ // produce more precise dependence information.
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (TID.isCall() || TID.isReturn() || TID.isBranch() ||
+ TID.hasUnmodeledSideEffects()) {
+ new_chain:
+ // This is the conservative case. Add dependencies on all memory references.
if (Chain)
Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ Chain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
PendingLoads[k]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
PendingLoads.clear();
- Chain = SU;
- } else if (!MI->isSafeToMove(TII, True)) {
- if (Chain)
- Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
- PendingLoads.push_back(SU);
+ for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
+ E = MemDefs.end(); I != E; ++I) {
+ I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ I->second = SU;
+ }
+ for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
+ MemUses.begin(), E = MemUses.end(); I != E; ++I) {
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ I->second[i]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ I->second.clear();
+ }
+ // See if it is known to just have a single memory reference.
+ MachineInstr *ChainMI = Chain->getInstr();
+ const TargetInstrDesc &ChainTID = ChainMI->getDesc();
+ if (!ChainTID.isCall() && !ChainTID.isReturn() && !ChainTID.isBranch() &&
+ !ChainTID.hasUnmodeledSideEffects() &&
+ ChainMI->hasOneMemOperand() &&
+ !ChainMI->memoperands_begin()->isVolatile() &&
+ ChainMI->memoperands_begin()->getValue())
+ // We know that the Chain accesses one specific memory location.
+ ChainMMO = &*ChainMI->memoperands_begin();
+ else
+ // Unknown memory accesses. Assume the worst.
+ ChainMMO = 0;
+ } else if (TID.mayStore()) {
+ if (MI->hasOneMemOperand() &&
+ MI->memoperands_begin()->getValue() &&
+ !MI->memoperands_begin()->isVolatile() &&
+ isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
+ // A store to a specific PseudoSourceValue. Add precise dependencies.
+ const Value *V = MI->memoperands_begin()->getValue();
+ // Handle the def in MemDefs, if there is one.
+ std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
+ if (I != MemDefs.end()) {
+ I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ I->second = SU;
+ } else {
+ MemDefs[V] = SU;
+ }
+ // Handle the uses in MemUses, if there are any.
+ std::map<const Value *, std::vector<SUnit *> >::iterator J = MemUses.find(V);
+ if (J != MemUses.end()) {
+ for (unsigned i = 0, e = J->second.size(); i != e; ++i)
+ J->second[i]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ J->second.clear();
+ }
+ // Add a general dependence too, if needed.
+ if (Chain)
+ Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ } else
+ // Treat all other stores conservatively.
+ goto new_chain;
+ } else if (TID.mayLoad()) {
+ if (TII->isInvariantLoad(MI)) {
+ // Invariant load, no chain dependencies needed!
+ } else if (MI->hasOneMemOperand() &&
+ MI->memoperands_begin()->getValue() &&
+ !MI->memoperands_begin()->isVolatile() &&
+ isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
+ // A load from a specific PseudoSourceValue. Add precise dependencies.
+ const Value *V = MI->memoperands_begin()->getValue();
+ std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
+ if (I != MemDefs.end())
+ I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ MemUses[V].push_back(SU);
+
+ // Add a general dependence too, if needed.
+ if (Chain && (!ChainMMO ||
+ (ChainMMO->isStore() || ChainMMO->isVolatile())))
+ Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ } else if (MI->hasVolatileMemoryRef()) {
+ // Treat volatile loads conservatively. Note that this includes
+ // cases where memoperand information is unavailable.
+ goto new_chain;
+ } else {
+ // A normal load. Just depend on the general chain.
+ if (Chain)
+ Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
+ PendingLoads.push_back(SU);
+ }
}
+
+ // Add chain edges from the terminator to ensure that all the work of the block is
+ // completed before any control transfers.
if (Terminator && SU->Succs.empty())
Terminator->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
- if (MI->getDesc().isTerminator() || MI->isLabel())
+ if (TID.isTerminator() || MI->isLabel())
Terminator = SU;
// Assign the Latency field of SU using target-provided information.