aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2008-11-29 01:43:36 +0000
committerChris Lattner <sabre@nondot.org>2008-11-29 01:43:36 +0000
commitfd9b56dc27b3509f41c7a08763e9cc49b422838d (patch)
treedef18a4ae1596487c205831717d1b5fc98e2296d /lib/Transforms
parentb80647df60b8cbd377a0cfc1e2acf46a0507358f (diff)
downloadexternal_llvm-fd9b56dc27b3509f41c7a08763e9cc49b422838d.zip
external_llvm-fd9b56dc27b3509f41c7a08763e9cc49b422838d.tar.gz
external_llvm-fd9b56dc27b3509f41c7a08763e9cc49b422838d.tar.bz2
Reimplement the internal abstraction used by MemDep in terms
of a pointer/int pair instead of a manually bitmangled pointer. This forces clients to think a little more about checking the appropriate pieces and will be useful for internal implementation improvements later. I'm not particularly happy with this. After going through this I don't think that the clients of memdep should be exposed to the internal type at all. I'll fix this in a subsequent commit. This has no functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60230 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp31
-rw-r--r--lib/Transforms/Scalar/GVN.cpp63
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp14
3 files changed, 56 insertions, 52 deletions
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index e6a05b7..8217a44 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -46,9 +46,11 @@ namespace {
Changed |= runOnBasicBlock(*I);
return Changed;
}
+
+ typedef MemoryDependenceAnalysis::DepResultTy DepResultTy;
bool runOnBasicBlock(BasicBlock &BB);
- bool handleFreeWithNonTrivialDependency(FreeInst *F, Instruction *Dep);
+ bool handleFreeWithNonTrivialDependency(FreeInst *F, DepResultTy Dep);
bool handleEndBlock(BasicBlock &BB);
bool RemoveUndeadPointers(Value* pointer, uint64_t killPointerSize,
BasicBlock::iterator& BBI,
@@ -108,17 +110,16 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// ... to a pointer that has been stored to before...
if (last) {
- Instruction* dep = MD.getDependency(Inst);
+ DepResultTy dep = MD.getDependency(Inst);
bool deletedStore = false;
// ... and no other memory dependencies are between them....
- while (dep != MemoryDependenceAnalysis::None &&
- dep != MemoryDependenceAnalysis::NonLocal &&
- isa<StoreInst>(dep)) {
- if (dep != last ||
+ while (dep.getInt() == MemoryDependenceAnalysis::Normal &&
+ isa<StoreInst>(dep.getPointer())) {
+ if (dep.getPointer() != last ||
TD.getTypeStoreSize(last->getOperand(0)->getType()) >
TD.getTypeStoreSize(Inst->getOperand(0)->getType())) {
- dep = MD.getDependency(Inst, dep);
+ dep = MD.getDependency(Inst, dep.getPointer());
continue;
}
@@ -151,14 +152,14 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// loaded from, then the store can be removed;
if (LoadInst* L = dyn_cast<LoadInst>(S->getOperand(0))) {
// FIXME: Don't do dep query if Parents don't match and other stuff!
- Instruction* dep = MD.getDependency(S);
+ DepResultTy dep = MD.getDependency(S);
DominatorTree& DT = getAnalysis<DominatorTree>();
if (!S->isVolatile() && S->getParent() == L->getParent() &&
S->getPointerOperand() == L->getPointerOperand() &&
- (dep == MemoryDependenceAnalysis::None ||
- dep == MemoryDependenceAnalysis::NonLocal ||
- DT.dominates(dep, L))) {
+ (dep.getInt() == MemoryDependenceAnalysis::None ||
+ dep.getInt() == MemoryDependenceAnalysis::NonLocal ||
+ DT.dominates(dep.getPointer(), L))) {
DeleteDeadInstruction(S);
if (!isa<TerminatorInst>(BB.begin()))
@@ -184,15 +185,15 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, Instruction* dep) {
+bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, DepResultTy dep) {
TargetData &TD = getAnalysis<TargetData>();
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- if (dep == MemoryDependenceAnalysis::None ||
- dep == MemoryDependenceAnalysis::NonLocal)
+ if (dep.getInt() == MemoryDependenceAnalysis::None ||
+ dep.getInt() == MemoryDependenceAnalysis::NonLocal)
return false;
- StoreInst* dependency = dyn_cast<StoreInst>(dep);
+ StoreInst* dependency = dyn_cast<StoreInst>(dep.getPointer());
if (!dependency)
return false;
else if (dependency->isVolatile())
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 2d0a99b..64cac8f 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -456,19 +456,21 @@ uint32_t ValueTable::lookup_or_add(Value* V) {
return nextValueNumber++;
}
- Instruction* local_dep = MD->getDependency(C);
+ MemoryDependenceAnalysis::DepResultTy local_dep = MD->getDependency(C);
- if (local_dep == MemoryDependenceAnalysis::None) {
+ if (local_dep.getInt() == MemoryDependenceAnalysis::None) {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
return nextValueNumber++;
- } else if (local_dep != MemoryDependenceAnalysis::NonLocal) {
- if (!isa<CallInst>(local_dep)) {
+ } else if (local_dep.getInt() != MemoryDependenceAnalysis::NonLocal) {
+ // FIXME: INDENT PROPERLY!
+ if (!isa<CallInst>(local_dep.getPointer())) {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
return nextValueNumber++;
}
- CallInst* local_cdep = cast<CallInst>(local_dep);
+ CallInst* local_cdep = cast<CallInst>(local_dep.getPointer());
+ // FIXME: INDENT PROPERLY.
if (local_cdep->getCalledFunction() != C->getCalledFunction() ||
local_cdep->getNumOperands() != C->getNumOperands()) {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
@@ -493,19 +495,20 @@ uint32_t ValueTable::lookup_or_add(Value* V) {
}
- DenseMap<BasicBlock*, Value*> deps;
+ DenseMap<BasicBlock*, MemoryDependenceAnalysis::DepResultTy> deps;
MD->getNonLocalDependency(C, deps);
CallInst* cdep = 0;
- for (DenseMap<BasicBlock*, Value*>::iterator I = deps.begin(),
- E = deps.end(); I != E; ++I) {
- if (I->second == MemoryDependenceAnalysis::None) {
+ for (DenseMap<BasicBlock*, MemoryDependenceAnalysis::DepResultTy>
+ ::iterator I = deps.begin(), E = deps.end(); I != E; ++I) {
+ if (I->second.getInt() == MemoryDependenceAnalysis::None) {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
return nextValueNumber++;
- } else if (I->second != MemoryDependenceAnalysis::NonLocal) {
+ } else if (I->second.getInt() != MemoryDependenceAnalysis::NonLocal) {
+ // FIXME: INDENT PROPERLY
if (DT->properlyDominates(I->first, C->getParent())) {
- if (CallInst* CD = dyn_cast<CallInst>(I->second))
+ if (CallInst* CD = dyn_cast<CallInst>(I->second.getPointer()))
cdep = CD;
else {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
@@ -718,6 +721,8 @@ namespace {
AU.addPreserved<AliasAnalysis>();
}
+ typedef MemoryDependenceAnalysis::DepResultTy DepResultTy;
+
// Helper fuctions
// FIXME: eliminate or document these better
bool processLoad(LoadInst* L,
@@ -861,7 +866,7 @@ bool GVN::processNonLocalLoad(LoadInst* L,
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
// Find the non-local dependencies of the load
- DenseMap<BasicBlock*, Value*> deps;
+ DenseMap<BasicBlock*, DepResultTy> deps;
MD.getNonLocalDependency(L, deps);
// If we had to process more than one hundred blocks to find the
@@ -873,19 +878,19 @@ bool GVN::processNonLocalLoad(LoadInst* L,
DenseMap<BasicBlock*, Value*> repl;
// Filter out useless results (non-locals, etc)
- for (DenseMap<BasicBlock*, Value*>::iterator I = deps.begin(), E = deps.end();
- I != E; ++I) {
- if (I->second == MemoryDependenceAnalysis::None)
+ for (DenseMap<BasicBlock*, DepResultTy>::iterator I = deps.begin(),
+ E = deps.end(); I != E; ++I) {
+ if (I->second.getInt() == MemoryDependenceAnalysis::None)
return false;
- if (I->second == MemoryDependenceAnalysis::NonLocal)
+ if (I->second.getInt() == MemoryDependenceAnalysis::NonLocal)
continue;
- if (StoreInst* S = dyn_cast<StoreInst>(I->second)) {
+ if (StoreInst* S = dyn_cast<StoreInst>(I->second.getPointer())) {
if (S->getPointerOperand() != L->getPointerOperand())
return false;
repl[I->first] = S->getOperand(0);
- } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second)) {
+ } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second.getPointer())) {
if (LD->getPointerOperand() != L->getPointerOperand())
return false;
repl[I->first] = LD;
@@ -936,8 +941,8 @@ bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
// ... to a pointer that has been loaded from before...
MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
bool removedNonLocal = false;
- Instruction* dep = MD.getDependency(L);
- if (dep == MemoryDependenceAnalysis::NonLocal &&
+ DepResultTy dep = MD.getDependency(L);
+ if (dep.getInt() == MemoryDependenceAnalysis::NonLocal &&
L->getParent() != &L->getParent()->getParent()->getEntryBlock()) {
removedNonLocal = processNonLocalLoad(L, toErase);
@@ -952,11 +957,10 @@ bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
// Walk up the dependency chain until we either find
// a dependency we can use, or we can't walk any further
- while (dep != MemoryDependenceAnalysis::None &&
- dep != MemoryDependenceAnalysis::NonLocal &&
- (isa<LoadInst>(dep) || isa<StoreInst>(dep))) {
+ while (dep.getInt() == MemoryDependenceAnalysis::Normal &&
+ (isa<LoadInst>(dep.getPointer()) || isa<StoreInst>(dep.getPointer()))){
// ... that depends on a store ...
- if (StoreInst* S = dyn_cast<StoreInst>(dep)) {
+ if (StoreInst* S = dyn_cast<StoreInst>(dep.getPointer())) {
if (S->getPointerOperand() == pointer) {
// Remove it!
MD.removeInstruction(L);
@@ -974,7 +978,7 @@ bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
// If we don't depend on a store, and we haven't
// been loaded before, bail.
break;
- } else if (dep == last) {
+ } else if (dep.getPointer() == last) {
// Remove it!
MD.removeInstruction(L);
@@ -985,16 +989,15 @@ bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
break;
} else {
- dep = MD.getDependency(L, dep);
+ dep = MD.getDependency(L, dep.getPointer());
}
}
- if (dep != MemoryDependenceAnalysis::None &&
- dep != MemoryDependenceAnalysis::NonLocal &&
- isa<AllocationInst>(dep)) {
+ if (dep.getInt() == MemoryDependenceAnalysis::Normal &&
+ isa<AllocationInst>(dep.getPointer())) {
// Check that this load is actually from the
// allocation we found
- if (L->getOperand(0)->getUnderlyingObject() == dep) {
+ if (L->getOperand(0)->getUnderlyingObject() == dep.getPointer()) {
// If this load depends directly on an allocation, there isn't
// anything stored there; therefore, we can optimize this load
// to undef.
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 6d27327..acc6630 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -629,18 +629,18 @@ bool MemCpyOpt::processMemCpy(MemCpyInst* M) {
// The are two possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE
// b) call-memcpy xform for return slot optimization
- Instruction* dep = MD.getDependency(M);
- if (dep == MemoryDependenceAnalysis::None ||
- dep == MemoryDependenceAnalysis::NonLocal)
+ MemoryDependenceAnalysis::DepResultTy dep = MD.getDependency(M);
+ if (dep.getInt() == MemoryDependenceAnalysis::None ||
+ dep.getInt() == MemoryDependenceAnalysis::NonLocal)
return false;
- else if (!isa<MemCpyInst>(dep)) {
- if (CallInst* C = dyn_cast<CallInst>(dep))
+ else if (!isa<MemCpyInst>(dep.getPointer())) {
+ if (CallInst* C = dyn_cast<CallInst>(dep.getPointer()))
return performCallSlotOptzn(M, C);
else
return false;
}
- MemCpyInst* MDep = cast<MemCpyInst>(dep);
+ MemCpyInst* MDep = cast<MemCpyInst>(dep.getPointer());
// We can only transforms memcpy's where the dest of one is the source of the
// other
@@ -691,7 +691,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst* M) {
// If C and M don't interfere, then this is a valid transformation. If they
// did, this would mean that the two sources overlap, which would be bad.
- if (MD.getDependency(C) == MDep) {
+ if (MD.getDependency(C) == dep) {
MD.dropInstruction(M);
M->eraseFromParent();