aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp3
-rw-r--r--lib/Analysis/BranchProbabilityInfo.cpp399
-rw-r--r--lib/Analysis/CFGPrinter.cpp8
-rw-r--r--lib/Analysis/CMakeLists.txt6
-rw-r--r--lib/Analysis/CaptureTracking.cpp95
-rw-r--r--lib/Analysis/ConstantFolding.cpp133
-rw-r--r--lib/Analysis/DIBuilder.cpp4
-rw-r--r--lib/Analysis/IPA/CMakeLists.txt6
-rw-r--r--lib/Analysis/IPA/CallGraph.cpp13
-rw-r--r--lib/Analysis/IPA/LLVMBuild.txt23
-rw-r--r--lib/Analysis/InlineCost.cpp6
-rw-r--r--lib/Analysis/InstructionSimplify.cpp661
-rw-r--r--lib/Analysis/LLVMBuild.txt25
-rw-r--r--lib/Analysis/LazyValueInfo.cpp47
-rw-r--r--lib/Analysis/Lint.cpp9
-rw-r--r--lib/Analysis/LoopInfo.cpp116
-rw-r--r--lib/Analysis/MemDepPrinter.cpp2
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp8
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp79
-rw-r--r--lib/Analysis/PHITransAddr.cpp9
-rw-r--r--lib/Analysis/PathProfileVerifier.cpp16
-rw-r--r--lib/Analysis/ProfileEstimatorPass.cpp2
-rw-r--r--lib/Analysis/ProfileInfoLoaderPass.cpp4
-rw-r--r--lib/Analysis/ProfileVerifierPass.cpp18
-rw-r--r--lib/Analysis/RegionInfo.cpp6
-rw-r--r--lib/Analysis/ScalarEvolution.cpp341
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp115
-rw-r--r--lib/Analysis/SparsePropagation.cpp4
-rw-r--r--lib/Analysis/Trace.cpp2
-rw-r--r--lib/Analysis/ValueTracking.cpp213
30 files changed, 1496 insertions, 877 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index af400ba..568983a 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -706,8 +706,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// pointer were passed to arguments that were neither of these, then it
// couldn't be no-capture.
if (!(*CI)->getType()->isPointerTy() ||
- (!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture) &&
- !CS.paramHasAttr(ArgNo+1, Attribute::ByVal)))
+ (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
continue;
// If this is a no-capture pointer argument, see if we can tell that it
diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp
index 52090c9..f9461c0 100644
--- a/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/lib/Analysis/BranchProbabilityInfo.cpp
@@ -12,11 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/Constants.h"
+#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -29,118 +32,117 @@ INITIALIZE_PASS_END(BranchProbabilityInfo, "branch-prob",
char BranchProbabilityInfo::ID = 0;
-namespace {
-// Please note that BranchProbabilityAnalysis is not a FunctionPass.
-// It is created by BranchProbabilityInfo (which is a FunctionPass), which
-// provides a clear interface. Thanks to that, all heuristics and other
-// private methods are hidden in the .cpp file.
-class BranchProbabilityAnalysis {
-
- typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
-
- BranchProbabilityInfo *BP;
-
- LoopInfo *LI;
-
-
- // Weights are for internal use only. They are used by heuristics to help to
- // estimate edges' probability. Example:
- //
- // Using "Loop Branch Heuristics" we predict weights of edges for the
- // block BB2.
- // ...
- // |
- // V
- // BB1<-+
- // | |
- // | | (Weight = 124)
- // V |
- // BB2--+
- // |
- // | (Weight = 4)
- // V
- // BB3
- //
- // Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875
- // Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125
-
- static const uint32_t LBH_TAKEN_WEIGHT = 124;
- static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
-
- static const uint32_t RH_TAKEN_WEIGHT = 24;
- static const uint32_t RH_NONTAKEN_WEIGHT = 8;
-
- static const uint32_t PH_TAKEN_WEIGHT = 20;
- static const uint32_t PH_NONTAKEN_WEIGHT = 12;
-
- static const uint32_t ZH_TAKEN_WEIGHT = 20;
- static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
-
- // Standard weight value. Used when none of the heuristics set weight for
- // the edge.
- static const uint32_t NORMAL_WEIGHT = 16;
-
- // Minimum weight of an edge. Please note, that weight is NEVER 0.
- static const uint32_t MIN_WEIGHT = 1;
-
- // Return TRUE if BB leads directly to a Return Instruction.
- static bool isReturningBlock(BasicBlock *BB) {
- SmallPtrSet<BasicBlock *, 8> Visited;
-
- while (true) {
- TerminatorInst *TI = BB->getTerminator();
- if (isa<ReturnInst>(TI))
- return true;
-
- if (TI->getNumSuccessors() > 1)
- break;
-
- // It is unreachable block which we can consider as a return instruction.
- if (TI->getNumSuccessors() == 0)
- return true;
-
- Visited.insert(BB);
- BB = TI->getSuccessor(0);
-
- // Stop if cycle is detected.
- if (Visited.count(BB))
- return false;
- }
+// Weights are for internal use only. They are used by heuristics to help to
+// estimate edges' probability. Example:
+//
+// Using "Loop Branch Heuristics" we predict weights of edges for the
+// block BB2.
+// ...
+// |
+// V
+// BB1<-+
+// | |
+// | | (Weight = 124)
+// V |
+// BB2--+
+// |
+// | (Weight = 4)
+// V
+// BB3
+//
+// Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875
+// Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125
+static const uint32_t LBH_TAKEN_WEIGHT = 124;
+static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
+
+/// \brief Unreachable-terminating branch taken weight.
+///
+/// This is the weight for a branch being taken to a block that terminates
+/// (eventually) in unreachable. These are predicted as unlikely as possible.
+static const uint32_t UR_TAKEN_WEIGHT = 1;
+
+/// \brief Unreachable-terminating branch not-taken weight.
+///
+/// This is the weight for a branch not being taken toward a block that
+/// terminates (eventually) in unreachable. Such a branch is essentially never
+/// taken.
+static const uint32_t UR_NONTAKEN_WEIGHT = 1023;
+
+static const uint32_t PH_TAKEN_WEIGHT = 20;
+static const uint32_t PH_NONTAKEN_WEIGHT = 12;
+
+static const uint32_t ZH_TAKEN_WEIGHT = 20;
+static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
+
+static const uint32_t FPH_TAKEN_WEIGHT = 20;
+static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
+
+// Standard weight value. Used when none of the heuristics set weight for
+// the edge.
+static const uint32_t NORMAL_WEIGHT = 16;
+
+// Minimum weight of an edge. Please note, that weight is NEVER 0.
+static const uint32_t MIN_WEIGHT = 1;
+
+static uint32_t getMaxWeightFor(BasicBlock *BB) {
+ return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
+}
+
+/// \brief Calculate edge weights for successors lead to unreachable.
+///
+/// Predict that a successor which leads necessarily to an
+/// unreachable-terminated block as extremely unlikely.
+bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ if (TI->getNumSuccessors() == 0) {
+ if (isa<UnreachableInst>(TI))
+ PostDominatedByUnreachable.insert(BB);
return false;
}
- uint32_t getMaxWeightFor(BasicBlock *BB) const {
- return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
- }
+ SmallPtrSet<BasicBlock *, 4> UnreachableEdges;
+ SmallPtrSet<BasicBlock *, 4> ReachableEdges;
-public:
- BranchProbabilityAnalysis(BranchProbabilityInfo *BP, LoopInfo *LI)
- : BP(BP), LI(LI) {
+ for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
+ if (PostDominatedByUnreachable.count(*I))
+ UnreachableEdges.insert(*I);
+ else
+ ReachableEdges.insert(*I);
}
- // Metadata Weights
- bool calcMetadataWeights(BasicBlock *BB);
+ // If all successors are in the set of blocks post-dominated by unreachable,
+ // this block is too.
+ if (UnreachableEdges.size() == TI->getNumSuccessors())
+ PostDominatedByUnreachable.insert(BB);
- // Return Heuristics
- bool calcReturnHeuristics(BasicBlock *BB);
-
- // Pointer Heuristics
- bool calcPointerHeuristics(BasicBlock *BB);
-
- // Loop Branch Heuristics
- bool calcLoopBranchHeuristics(BasicBlock *BB);
+ // Skip probabilities if this block has a single successor or if all were
+ // reachable.
+ if (TI->getNumSuccessors() == 1 || UnreachableEdges.empty())
+ return false;
- // Zero Heurestics
- bool calcZeroHeuristics(BasicBlock *BB);
+ uint32_t UnreachableWeight =
+ std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT);
+ for (SmallPtrSet<BasicBlock *, 4>::iterator I = UnreachableEdges.begin(),
+ E = UnreachableEdges.end();
+ I != E; ++I)
+ setEdgeWeight(BB, *I, UnreachableWeight);
+
+ if (ReachableEdges.empty())
+ return true;
+ uint32_t ReachableWeight =
+ std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT);
+ for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReachableEdges.begin(),
+ E = ReachableEdges.end();
+ I != E; ++I)
+ setEdgeWeight(BB, *I, ReachableWeight);
- bool runOnFunction(Function &F);
-};
-} // end anonymous namespace
+ return true;
+}
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
-bool BranchProbabilityAnalysis::calcMetadataWeights(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 1)
return false;
@@ -171,54 +173,14 @@ bool BranchProbabilityAnalysis::calcMetadataWeights(BasicBlock *BB) {
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- BP->setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
+ setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
return true;
}
-// Calculate Edge Weights using "Return Heuristics". Predict a successor which
-// leads directly to Return Instruction will not be taken.
-bool BranchProbabilityAnalysis::calcReturnHeuristics(BasicBlock *BB){
- if (BB->getTerminator()->getNumSuccessors() == 1)
- return false;
-
- SmallPtrSet<BasicBlock *, 4> ReturningEdges;
- SmallPtrSet<BasicBlock *, 4> StayEdges;
-
- for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- BasicBlock *Succ = *I;
- if (isReturningBlock(Succ))
- ReturningEdges.insert(Succ);
- else
- StayEdges.insert(Succ);
- }
-
- if (uint32_t numStayEdges = StayEdges.size()) {
- uint32_t stayWeight = RH_TAKEN_WEIGHT / numStayEdges;
- if (stayWeight < NORMAL_WEIGHT)
- stayWeight = NORMAL_WEIGHT;
-
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = StayEdges.begin(),
- E = StayEdges.end(); I != E; ++I)
- BP->setEdgeWeight(BB, *I, stayWeight);
- }
-
- if (uint32_t numRetEdges = ReturningEdges.size()) {
- uint32_t retWeight = RH_NONTAKEN_WEIGHT / numRetEdges;
- if (retWeight < MIN_WEIGHT)
- retWeight = MIN_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReturningEdges.begin(),
- E = ReturningEdges.end(); I != E; ++I) {
- BP->setEdgeWeight(BB, *I, retWeight);
- }
- }
-
- return ReturningEdges.size() > 0;
-}
-
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
// between two pointer or pointer and NULL will fail.
-bool BranchProbabilityAnalysis::calcPointerHeuristics(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
@@ -246,16 +208,14 @@ bool BranchProbabilityAnalysis::calcPointerHeuristics(BasicBlock *BB) {
if (!isProb)
std::swap(Taken, NonTaken);
- BP->setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
- BP->setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
return true;
}
// Calculate Edge Weights using "Loop Branch Heuristics". Predict backedges
// as taken, exiting edges as not-taken.
-bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
- uint32_t numSuccs = BB->getTerminator()->getNumSuccessors();
-
+bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
Loop *L = LI->getLoopFor(BB);
if (!L)
return false;
@@ -264,17 +224,13 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
SmallPtrSet<BasicBlock *, 8> ExitingEdges;
SmallPtrSet<BasicBlock *, 8> InEdges; // Edges from header to the loop.
- bool isHeader = BB == L->getHeader();
-
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- BasicBlock *Succ = *I;
- Loop *SuccL = LI->getLoopFor(Succ);
- if (SuccL != L)
- ExitingEdges.insert(Succ);
- else if (Succ == L->getHeader())
- BackEdges.insert(Succ);
- else if (isHeader)
- InEdges.insert(Succ);
+ if (!L->contains(*I))
+ ExitingEdges.insert(*I);
+ else if (L->getHeader() == *I)
+ BackEdges.insert(*I);
+ else
+ InEdges.insert(*I);
}
if (uint32_t numBackEdges = BackEdges.size()) {
@@ -285,7 +241,7 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
BasicBlock *Back = *EI;
- BP->setEdgeWeight(BB, Back, backWeight);
+ setEdgeWeight(BB, Back, backWeight);
}
}
@@ -297,27 +253,26 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
BasicBlock *Back = *EI;
- BP->setEdgeWeight(BB, Back, inWeight);
+ setEdgeWeight(BB, Back, inWeight);
}
}
- uint32_t numExitingEdges = ExitingEdges.size();
- if (uint32_t numNonExitingEdges = numSuccs - numExitingEdges) {
- uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numNonExitingEdges;
+ if (uint32_t numExitingEdges = ExitingEdges.size()) {
+ uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numExitingEdges;
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
BasicBlock *Exiting = *EI;
- BP->setEdgeWeight(BB, Exiting, exitWeight);
+ setEdgeWeight(BB, Exiting, exitWeight);
}
}
return true;
}
-bool BranchProbabilityAnalysis::calcZeroHeuristics(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
@@ -372,45 +327,94 @@ bool BranchProbabilityAnalysis::calcZeroHeuristics(BasicBlock *BB) {
if (!isProb)
std::swap(Taken, NonTaken);
- BP->setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
- BP->setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
return true;
}
+bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
+ BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
+ if (!BI || !BI->isConditional())
+ return false;
-bool BranchProbabilityAnalysis::runOnFunction(Function &F) {
-
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
- BasicBlock *BB = I++;
-
- if (calcMetadataWeights(BB))
- continue;
+ Value *Cond = BI->getCondition();
+ FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond);
+ if (!FCmp)
+ return false;
- if (calcLoopBranchHeuristics(BB))
- continue;
+ bool isProb;
+ if (FCmp->isEquality()) {
+ // f1 == f2 -> Unlikely
+ // f1 != f2 -> Likely
+ isProb = !FCmp->isTrueWhenEqual();
+ } else if (FCmp->getPredicate() == FCmpInst::FCMP_ORD) {
+ // !isnan -> Likely
+ isProb = true;
+ } else if (FCmp->getPredicate() == FCmpInst::FCMP_UNO) {
+ // isnan -> Unlikely
+ isProb = false;
+ } else {
+ return false;
+ }
- if (calcReturnHeuristics(BB))
- continue;
+ BasicBlock *Taken = BI->getSuccessor(0);
+ BasicBlock *NonTaken = BI->getSuccessor(1);
- if (calcPointerHeuristics(BB))
- continue;
+ if (!isProb)
+ std::swap(Taken, NonTaken);
- calcZeroHeuristics(BB);
- }
+ setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT);
- return false;
+ return true;
}
void BranchProbabilityInfo::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<LoopInfo>();
- AU.setPreservesAll();
+ AU.addRequired<LoopInfo>();
+ AU.setPreservesAll();
}
bool BranchProbabilityInfo::runOnFunction(Function &F) {
- LoopInfo &LI = getAnalysis<LoopInfo>();
- BranchProbabilityAnalysis BPA(this, &LI);
- return BPA.runOnFunction(F);
+ LastF = &F; // Store the last function we ran on for printing.
+ LI = &getAnalysis<LoopInfo>();
+ assert(PostDominatedByUnreachable.empty());
+
+ // Walk the basic blocks in post-order so that we can build up state about
+ // the successors of a block iteratively.
+ for (po_iterator<BasicBlock *> I = po_begin(&F.getEntryBlock()),
+ E = po_end(&F.getEntryBlock());
+ I != E; ++I) {
+ DEBUG(dbgs() << "Computing probabilities for " << I->getName() << "\n");
+ if (calcUnreachableHeuristics(*I))
+ continue;
+ if (calcMetadataWeights(*I))
+ continue;
+ if (calcLoopBranchHeuristics(*I))
+ continue;
+ if (calcPointerHeuristics(*I))
+ continue;
+ if (calcZeroHeuristics(*I))
+ continue;
+ calcFloatingPointHeuristics(*I);
+ }
+
+ PostDominatedByUnreachable.clear();
+ return false;
+}
+
+void BranchProbabilityInfo::print(raw_ostream &OS, const Module *) const {
+ OS << "---- Branch Probabilities ----\n";
+ // We print the probabilities from the last function the analysis ran over,
+ // or the function it is currently running over.
+ assert(LastF && "Cannot print prior to running over a function");
+ for (Function::const_iterator BI = LastF->begin(), BE = LastF->end();
+ BI != BE; ++BI) {
+ for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI);
+ SI != SE; ++SI) {
+ printEdgeProbability(OS << " ", BI, *SI);
+ }
+ }
}
uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
@@ -431,12 +435,8 @@ uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
bool BranchProbabilityInfo::
isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
- uint32_t Weight = getEdgeWeight(Src, Dst);
- uint32_t Sum = getSumForBlock(Src);
-
- // FIXME: Implement BranchProbability::compare then change this code to
- // compare this BranchProbability against a static "hot" BranchProbability.
- return (uint64_t)Weight * 5 > (uint64_t)Sum * 4;
+ // FIXME: Compare against a static "hot" BranchProbability.
+ return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
}
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
@@ -458,8 +458,8 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
}
}
- // FIXME: Use BranchProbability::compare.
- if ((uint64_t)MaxWeight * 5 > (uint64_t)Sum * 4)
+ // Hot probability is at least 4/5 = 80%
+ if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
return MaxSucc;
return 0;
@@ -480,8 +480,8 @@ getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
void BranchProbabilityInfo::
setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) {
Weights[std::make_pair(Src, Dst)] = Weight;
- DEBUG(dbgs() << "set edge " << Src->getNameStr() << " -> "
- << Dst->getNameStr() << " weight to " << Weight
+ DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
+ << Dst->getName() << " weight to " << Weight
<< (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
}
@@ -496,11 +496,12 @@ getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
}
raw_ostream &
-BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, BasicBlock *Src,
- BasicBlock *Dst) const {
+BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS,
+ const BasicBlock *Src,
+ const BasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
- OS << "edge " << Src->getNameStr() << " -> " << Dst->getNameStr()
+ OS << "edge " << Src->getName() << " -> " << Dst->getName()
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
diff --git a/lib/Analysis/CFGPrinter.cpp b/lib/Analysis/CFGPrinter.cpp
index 7bb063f..7685400 100644
--- a/lib/Analysis/CFGPrinter.cpp
+++ b/lib/Analysis/CFGPrinter.cpp
@@ -77,7 +77,7 @@ namespace {
}
virtual bool runOnFunction(Function &F) {
- std::string Filename = "cfg." + F.getNameStr() + ".dot";
+ std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
@@ -111,7 +111,7 @@ namespace {
}
virtual bool runOnFunction(Function &F) {
- std::string Filename = "cfg." + F.getNameStr() + ".dot";
+ std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
@@ -143,7 +143,7 @@ INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
/// being a 'dot' and 'gv' program in your path.
///
void Function::viewCFG() const {
- ViewGraph(this, "cfg" + getNameStr());
+ ViewGraph(this, "cfg" + getName());
}
/// viewCFGOnly - This function is meant for use from the debugger. It works
@@ -152,7 +152,7 @@ void Function::viewCFG() const {
/// his can make the graph smaller.
///
void Function::viewCFGOnly() const {
- ViewGraph(this, "cfg" + getNameStr(), true);
+ ViewGraph(this, "cfg" + getName(), true);
}
FunctionPass *llvm::createCFGPrinterPass () {
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index e79459d..cb1e1eb 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -58,10 +58,4 @@ add_llvm_library(LLVMAnalysis
ValueTracking.cpp
)
-add_llvm_library_dependencies(LLVMAnalysis
- LLVMCore
- LLVMSupport
- LLVMTarget
- )
-
add_subdirectory(IPA)
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index b2c27d1..9a7992e 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -17,24 +17,32 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Constants.h"
-#include "llvm/Instructions.h"
-#include "llvm/Value.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
-/// As its comment mentions, PointerMayBeCaptured can be expensive.
-/// However, it's not easy for BasicAA to cache the result, because
-/// it's an ImmutablePass. To work around this, bound queries at a
-/// fixed number of uses.
-///
-/// TODO: Write a new FunctionPass AliasAnalysis so that it can keep
-/// a cache. Then we can move the code from BasicAliasAnalysis into
-/// that path, and remove this threshold.
-static int const Threshold = 20;
+CaptureTracker::~CaptureTracker() {}
+
+namespace {
+ struct SimpleCaptureTracker : public CaptureTracker {
+ explicit SimpleCaptureTracker(bool ReturnCaptures)
+ : ReturnCaptures(ReturnCaptures), Captured(false) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) { return true; }
+
+ bool captured(Instruction *I) {
+ if (isa<ReturnInst>(I) && !ReturnCaptures)
+ return false;
+
+ Captured = true;
+ return true;
+ }
+
+ bool ReturnCaptures;
+
+ bool Captured;
+ };
+}
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
@@ -45,6 +53,26 @@ static int const Threshold = 20;
/// counts as capturing it or not.
bool llvm::PointerMayBeCaptured(const Value *V,
bool ReturnCaptures, bool StoreCaptures) {
+ assert(!isa<GlobalValue>(V) &&
+ "It doesn't make sense to ask whether a global is captured.");
+
+ // TODO: If StoreCaptures is not true, we could do Fancy analysis
+ // to determine whether this store is not actually an escape point.
+ // In that case, BasicAliasAnalysis should be updated as well to
+ // take advantage of this.
+ (void)StoreCaptures;
+
+ SimpleCaptureTracker SCT(ReturnCaptures);
+ PointerMayBeCaptured(V, &SCT);
+ return SCT.Captured;
+}
+
+/// TODO: Write a new FunctionPass AliasAnalysis so that it can keep
+/// a cache. Then we can move the code from BasicAliasAnalysis into
+/// that path, and remove this threshold.
+static int const Threshold = 20;
+
+void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
SmallVector<Use*, Threshold> Worklist;
SmallSet<Use*, Threshold> Visited;
@@ -55,9 +83,10 @@ bool llvm::PointerMayBeCaptured(const Value *V,
// If there are lots of uses, conservatively say that the value
// is captured to avoid taking too much compile time.
if (Count++ >= Threshold)
- return true;
+ return Tracker->tooManyUses();
Use *U = &UI.getUse();
+ if (!Tracker->shouldExplore(U)) continue;
Visited.insert(U);
Worklist.push_back(U);
}
@@ -86,11 +115,10 @@ bool llvm::PointerMayBeCaptured(const Value *V,
// (think of self-referential objects).
CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
for (CallSite::arg_iterator A = B; A != E; ++A)
- if (A->get() == V && !CS.paramHasAttr(A - B + 1, Attribute::NoCapture))
+ if (A->get() == V && !CS.doesNotCapture(A - B))
// The parameter is not marked 'nocapture' - captured.
- return true;
- // Only passed via 'nocapture' arguments, or is the called function - not
- // captured.
+ if (Tracker->captured(I))
+ return;
break;
}
case Instruction::Load:
@@ -99,18 +127,11 @@ bool llvm::PointerMayBeCaptured(const Value *V,
case Instruction::VAArg:
// "va-arg" from a pointer does not cause it to be captured.
break;
- case Instruction::Ret:
- if (ReturnCaptures)
- return true;
- break;
case Instruction::Store:
if (V == I->getOperand(0))
// Stored the pointer - conservatively assume it may be captured.
- // TODO: If StoreCaptures is not true, we could do Fancy analysis
- // to determine whether this store is not actually an escape point.
- // In that case, BasicAliasAnalysis should be updated as well to
- // take advantage of this.
- return true;
+ if (Tracker->captured(I))
+ return;
// Storing to the pointee does not cause the pointer to be captured.
break;
case Instruction::BitCast:
@@ -122,7 +143,8 @@ bool llvm::PointerMayBeCaptured(const Value *V,
UI != UE; ++UI) {
Use *U = &UI.getUse();
if (Visited.insert(U))
- Worklist.push_back(U);
+ if (Tracker->shouldExplore(U))
+ Worklist.push_back(U);
}
break;
case Instruction::ICmp:
@@ -136,13 +158,16 @@ bool llvm::PointerMayBeCaptured(const Value *V,
break;
// Otherwise, be conservative. There are crazy ways to capture pointers
// using comparisons.
- return true;
+ if (Tracker->captured(I))
+ return;
+ break;
default:
// Something else - be conservative and say it is captured.
- return true;
+ if (Tracker->captured(I))
+ return;
+ break;
}
}
- // All uses examined - not captured.
- return false;
+ // All uses examined.
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index df79849..8e2f263 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -26,6 +26,7 @@
#include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
@@ -542,8 +543,8 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
- Type *ResultTy,
- const TargetData *TD) {
+ Type *ResultTy, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -568,7 +569,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Constant *C =
ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
return C;
}
@@ -576,10 +577,11 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
- Type *ResultTy,
- const TargetData *TD) {
+ Type *ResultTy, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
- if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
+ if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
+ !Ptr->getType()->isPointerTy())
return 0;
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
@@ -602,7 +604,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
Res = ConstantExpr::getIntToPtr(Res, ResultTy);
if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
- Res = ConstantFoldConstantExpression(ResCE, TD);
+ Res = ConstantFoldConstantExpression(ResCE, TD, TLI);
return Res;
}
}
@@ -729,7 +731,9 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
-Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
+Constant *llvm::ConstantFoldInstruction(Instruction *I,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
Constant *CommonValue = 0;
@@ -765,7 +769,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
- TD);
+ TD, TLI);
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -781,28 +785,29 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
cast<Constant>(EVI->getAggregateOperand()),
EVI->getIndices());
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD);
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI);
}
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
i != e; ++i) {
Constant *NewC = cast<Constant>(*i);
// Recursively fold the ConstantExpr's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
- NewC = ConstantFoldConstantExpression(NewCE, TD);
+ NewC = ConstantFoldConstantExpression(NewCE, TD, TLI);
Ops.push_back(NewC);
}
if (CE->isCompare())
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
- TD);
- return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD);
+ TD, TLI);
+ return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI);
}
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -817,7 +822,8 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
@@ -834,7 +840,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
- return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1));
+ return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
return 0;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
@@ -888,9 +894,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
- if (Constant *C = CastGEPIndices(Ops, DestTy, TD))
+ if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI))
return C;
- if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD))
+ if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
@@ -903,7 +909,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Ops0, Constant *Ops1,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
@@ -920,7 +927,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
IntPtrTy, false);
Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
@@ -929,7 +936,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
}
@@ -944,7 +951,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
IntPtrTy, false);
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
IntPtrTy, false);
- return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
@@ -953,7 +960,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE0->getType() == IntPtrTy &&
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
- CE1->getOperand(0), TD);
+ CE1->getOperand(0), TD, TLI);
}
}
@@ -962,13 +969,15 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
Constant *LHS =
- ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,TD);
+ ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
+ TD, TLI);
Constant *RHS =
- ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,TD);
+ ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
+ TD, TLI);
unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
- return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD);
+ return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
@@ -1045,6 +1054,7 @@ bool
llvm::canConstantFoldCallTo(const Function *F) {
switch (F->getIntrinsicID()) {
case Intrinsic::sqrt:
+ case Intrinsic::pow:
case Intrinsic::powi:
case Intrinsic::bswap:
case Intrinsic::ctpop:
@@ -1168,7 +1178,8 @@ static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *
-llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
+llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI) {
if (!F->hasName()) return 0;
StringRef Name = F->getName();
@@ -1183,6 +1194,8 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
}
+ if (!TLI)
+ return 0;
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
@@ -1201,43 +1214,43 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
Op->getValueAPF().convertToDouble();
switch (Name[0]) {
case 'a':
- if (Name == "acos")
+ if (Name == "acos" && TLI->has(LibFunc::acos))
return ConstantFoldFP(acos, V, Ty);
- else if (Name == "asin")
+ else if (Name == "asin" && TLI->has(LibFunc::asin))
return ConstantFoldFP(asin, V, Ty);
- else if (Name == "atan")
+ else if (Name == "atan" && TLI->has(LibFunc::atan))
return ConstantFoldFP(atan, V, Ty);
break;
case 'c':
- if (Name == "ceil")
+ if (Name == "ceil" && TLI->has(LibFunc::ceil))
return ConstantFoldFP(ceil, V, Ty);
- else if (Name == "cos")
+ else if (Name == "cos" && TLI->has(LibFunc::cos))
return ConstantFoldFP(cos, V, Ty);
- else if (Name == "cosh")
+ else if (Name == "cosh" && TLI->has(LibFunc::cosh))
return ConstantFoldFP(cosh, V, Ty);
- else if (Name == "cosf")
+ else if (Name == "cosf" && TLI->has(LibFunc::cosf))
return ConstantFoldFP(cos, V, Ty);
break;
case 'e':
- if (Name == "exp")
+ if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
- if (Name == "exp2") {
+ if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
}
break;
case 'f':
- if (Name == "fabs")
+ if (Name == "fabs" && TLI->has(LibFunc::fabs))
return ConstantFoldFP(fabs, V, Ty);
- else if (Name == "floor")
+ else if (Name == "floor" && TLI->has(LibFunc::floor))
return ConstantFoldFP(floor, V, Ty);
break;
case 'l':
- if (Name == "log" && V > 0)
+ if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
return ConstantFoldFP(log, V, Ty);
- else if (Name == "log10" && V > 0)
+ else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
return ConstantFoldFP(log10, V, Ty);
else if (F->getIntrinsicID() == Intrinsic::sqrt &&
(Ty->isFloatTy() || Ty->isDoubleTy())) {
@@ -1248,21 +1261,21 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
}
break;
case 's':
- if (Name == "sin")
+ if (Name == "sin" && TLI->has(LibFunc::sin))
return ConstantFoldFP(sin, V, Ty);
- else if (Name == "sinh")
+ else if (Name == "sinh" && TLI->has(LibFunc::sinh))
return ConstantFoldFP(sinh, V, Ty);
- else if (Name == "sqrt" && V >= 0)
+ else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
return ConstantFoldFP(sqrt, V, Ty);
- else if (Name == "sqrtf" && V >= 0)
+ else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
return ConstantFoldFP(sqrt, V, Ty);
- else if (Name == "sinf")
+ else if (Name == "sinf" && TLI->has(LibFunc::sinf))
return ConstantFoldFP(sin, V, Ty);
break;
case 't':
- if (Name == "tan")
+ if (Name == "tan" && TLI->has(LibFunc::tan))
return ConstantFoldFP(tan, V, Ty);
- else if (Name == "tanh")
+ else if (Name == "tanh" && TLI->has(LibFunc::tanh))
return ConstantFoldFP(tanh, V, Ty);
break;
default:
@@ -1277,10 +1290,6 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
case Intrinsic::ctpop:
return ConstantInt::get(Ty, Op->getValue().countPopulation());
- case Intrinsic::cttz:
- return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
- case Intrinsic::ctlz:
- return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
case Intrinsic::convert_from_fp16: {
APFloat Val(Op->getValue());
@@ -1337,16 +1346,21 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
-
+
double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
- if (Name == "pow")
+ if (F->getIntrinsicID() == Intrinsic::pow) {
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
- if (Name == "fmod")
+ }
+ if (!TLI)
+ return 0;
+ if (Name == "pow" && TLI->has(LibFunc::pow))
+ return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
+ if (Name == "fmod" && TLI->has(LibFunc::fmod))
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
- if (Name == "atan2")
+ if (Name == "atan2" && TLI->has(LibFunc::atan2))
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
@@ -1361,7 +1375,6 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return 0;
}
-
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1401,6 +1414,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
};
return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops);
}
+ case Intrinsic::cttz:
+ // FIXME: This should check for Op2 == 1, and become unreachable if
+ // Op1 == 0.
+ return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
+ case Intrinsic::ctlz:
+ // FIXME: This should check for Op2 == 1, and become unreachable if
+ // Op1 == 0.
+ return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
diff --git a/lib/Analysis/DIBuilder.cpp b/lib/Analysis/DIBuilder.cpp
index bfa429d..85dcc46 100644
--- a/lib/Analysis/DIBuilder.cpp
+++ b/lib/Analysis/DIBuilder.cpp
@@ -189,7 +189,7 @@ DIType DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
return DIType(MDNode::get(VMContext, Elts));
}
-/// createQaulifiedType - Create debugging information entry for a qualified
+/// createQualifiedType - Create debugging information entry for a qualified
/// type, e.g. 'const int'.
DIType DIBuilder::createQualifiedType(unsigned Tag, DIType FromTy) {
// Qualified types are encoded in DIDerivedType format.
@@ -777,7 +777,7 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context,
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
Fn,
diff --git a/lib/Analysis/IPA/CMakeLists.txt b/lib/Analysis/IPA/CMakeLists.txt
index eae83fd..8ffef29 100644
--- a/lib/Analysis/IPA/CMakeLists.txt
+++ b/lib/Analysis/IPA/CMakeLists.txt
@@ -5,9 +5,3 @@ add_llvm_library(LLVMipa
GlobalsModRef.cpp
IPA.cpp
)
-
-add_llvm_library_dependencies(LLVMipa
- LLVMAnalysis
- LLVMCore
- LLVMSupport
- )
diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp
index 2e79eab..0df3e8a 100644
--- a/lib/Analysis/IPA/CallGraph.cpp
+++ b/lib/Analysis/IPA/CallGraph.cpp
@@ -127,16 +127,9 @@ private:
}
}
- // Loop over all of the users of the function, looking for non-call uses.
- for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I){
- User *U = *I;
- if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
- || !CallSite(cast<Instruction>(U)).isCallee(I)) {
- // Not a call, or being used as a parameter rather than as the callee.
- ExternalCallingNode->addCalledFunction(CallSite(), Node);
- break;
- }
- }
+ // If this function has its address taken, anything could call it.
+ if (F->hasAddressTaken())
+ ExternalCallingNode->addCalledFunction(CallSite(), Node);
// If this function is not defined in this translation unit, it could call
// anything.
diff --git a/lib/Analysis/IPA/LLVMBuild.txt b/lib/Analysis/IPA/LLVMBuild.txt
new file mode 100644
index 0000000..980e918
--- /dev/null
+++ b/lib/Analysis/IPA/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Analysis/IPA/LLVMBuild.txt -------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = IPA
+parent = Libraries
+library_name = ipa
+required_libraries = Analysis Core Support
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 40ac9a2..1f332e8 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -135,6 +135,12 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
// for example) would be referring to the original function, and this indirect
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
+ // FIXME: This logic isn't really right; we can safely inline functions
+ // with indirectbr's as long as no other function or global references the
+ // blockaddress of a block within the current function. And as a QOI issue,
+ // if someone is using a blockaddress wihtout an indirectbr, and that
+ // reference somehow ends up in another function or global, we probably
+ // don't want to inline this function.
if (isa<IndirectBrInst>(BB->getTerminator()))
containsIndirectBr = true;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 131cc97..f1cfd6c 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -38,22 +38,25 @@ STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations");
static Value *SimplifyAndInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+ const TargetLibraryInfo *, const DominatorTree *,
+ unsigned);
static Value *SimplifyBinOp(unsigned, Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+ const TargetLibraryInfo *, const DominatorTree *,
+ unsigned);
static Value *SimplifyCmpInst(unsigned, Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+ const TargetLibraryInfo *, const DominatorTree *,
+ unsigned);
static Value *SimplifyOrInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+ const TargetLibraryInfo *, const DominatorTree *,
+ unsigned);
static Value *SimplifyXorInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+ const TargetLibraryInfo *, const DominatorTree *,
+ unsigned);
/// getFalse - For a boolean type, or a vector of boolean type, return false, or
/// a vector with every element false, as appropriate for the type.
static Constant *getFalse(Type *Ty) {
- assert((Ty->isIntegerTy(1) ||
- (Ty->isVectorTy() &&
- cast<VectorType>(Ty)->getElementType()->isIntegerTy(1))) &&
+ assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getNullValue(Ty);
}
@@ -61,13 +64,25 @@ static Constant *getFalse(Type *Ty) {
/// getTrue - For a boolean type, or a vector of boolean type, return true, or
/// a vector with every element true, as appropriate for the type.
static Constant *getTrue(Type *Ty) {
- assert((Ty->isIntegerTy(1) ||
- (Ty->isVectorTy() &&
- cast<VectorType>(Ty)->getElementType()->isIntegerTy(1))) &&
+ assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getAllOnesValue(Ty);
}
+/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
+static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
+ Value *RHS) {
+ CmpInst *Cmp = dyn_cast<CmpInst>(V);
+ if (!Cmp)
+ return false;
+ CmpInst::Predicate CPred = Cmp->getPredicate();
+ Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
+ if (CPred == Pred && CLHS == LHS && CRHS == RHS)
+ return true;
+ return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
+ CRHS == LHS;
+}
+
/// ValueDominatesPHI - Does the given value dominate the specified phi node?
static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
Instruction *I = dyn_cast<Instruction>(V);
@@ -95,7 +110,8 @@ static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
/// Returns the simplified value, or null if no simplification was performed.
static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
unsigned OpcToExpand, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+ const TargetLibraryInfo *TLI, const DominatorTree *DT,
+ unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -107,8 +123,8 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
// It does! Try turning it into "(A op C) op' (B op C)".
Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
// Do "A op C" and "B op C" both simplify?
- if (Value *L = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse))
- if (Value *R = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ if (Value *L = SimplifyBinOp(Opcode, A, C, TD, TLI, DT, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, B, C, TD, TLI, DT, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
@@ -117,7 +133,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return LHS;
}
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, TLI, DT,
MaxRecurse)) {
++NumExpand;
return V;
@@ -131,8 +147,8 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
// It does! Try turning it into "(A op B) op' (A op C)".
Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
// Do "A op B" and "A op C" both simplify?
- if (Value *L = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse))
- if (Value *R = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse)) {
+ if (Value *L = SimplifyBinOp(Opcode, A, B, TD, TLI, DT, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, A, C, TD, TLI, DT, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
@@ -141,7 +157,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return RHS;
}
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, TLI, DT,
MaxRecurse)) {
++NumExpand;
return V;
@@ -157,8 +173,10 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
/// OpCodeToExtract is Mul then this tries to turn "(A*B)+(A*C)" into "A*(B+C)".
/// Returns the simplified value, or null if no simplification was performed.
static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- unsigned OpcToExtract, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+ unsigned OpcToExtract, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -182,7 +200,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *DD = A == C ? D : C;
// Form "A op' (B op DD)" if it simplifies completely.
// Does "B op DD" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, DD, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, DD, TD, TLI, DT, MaxRecurse)) {
// It does! Return "A op' V" if it simplifies or is already available.
// If V equals B then "A op' V" is just the LHS. If V equals DD then
// "A op' V" is just the RHS.
@@ -191,7 +209,8 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return V == B ? LHS : RHS;
}
// Otherwise return "A op' V" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, TD, TLI, DT,
+ MaxRecurse)) {
++NumFactor;
return W;
}
@@ -205,7 +224,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *CC = B == D ? C : D;
// Form "(A op CC) op' B" if it simplifies completely..
// Does "A op CC" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, CC, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, CC, TD, TLI, DT, MaxRecurse)) {
// It does! Return "V op' B" if it simplifies or is already available.
// If V equals A then "V op' B" is just the LHS. If V equals CC then
// "V op' B" is just the RHS.
@@ -214,7 +233,8 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return V == A ? LHS : RHS;
}
// Otherwise return "V op' B" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, TD, TLI, DT,
+ MaxRecurse)) {
++NumFactor;
return W;
}
@@ -228,6 +248,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
/// operations. Returns the simpler value, or null if none was found.
static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT,
unsigned MaxRecurse) {
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc;
@@ -247,12 +268,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = RHS;
// Does "B op C" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, C, TD, TLI, DT, MaxRecurse)) {
// It does! Return "A op V" if it simplifies or is already available.
// If V equals B then "A op V" is just the LHS.
if (V == B) return LHS;
// Otherwise return "A op V" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, A, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, A, V, TD, TLI, DT, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -266,12 +287,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, B, TD, TLI, DT, MaxRecurse)) {
// It does! Return "V op C" if it simplifies or is already available.
// If V equals B then "V op C" is just the RHS.
if (V == B) return RHS;
// Otherwise return "V op C" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, V, C, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, V, C, TD, TLI, DT, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -289,12 +310,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = RHS;
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD, TLI, DT, MaxRecurse)) {
// It does! Return "V op B" if it simplifies or is already available.
// If V equals A then "V op B" is just the LHS.
if (V == A) return LHS;
// Otherwise return "V op B" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, V, B, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, V, B, TD, TLI, DT, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -308,12 +329,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, TD, TLI, DT, MaxRecurse)) {
// It does! Return "B op V" if it simplifies or is already available.
// If V equals C then "B op V" is just the RHS.
if (V == C) return RHS;
// Otherwise return "B op V" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, B, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, B, V, TD, TLI, DT, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -329,6 +350,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
/// Returns the common value if so, otherwise returns null.
static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
@@ -347,11 +369,11 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
Value *TV;
Value *FV;
if (SI == LHS) {
- TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, TD, DT, MaxRecurse);
- FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, TD, DT, MaxRecurse);
+ TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, TD, TLI, DT, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, TD, TLI, DT, MaxRecurse);
} else {
- TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), TD, DT, MaxRecurse);
- FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), TD, DT, MaxRecurse);
+ TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), TD, TLI, DT, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), TD, TLI, DT, MaxRecurse);
}
// If they simplified to the same value, then return the common value.
@@ -403,6 +425,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
/// null.
static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
Value *RHS, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
@@ -416,40 +439,62 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
}
assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
SelectInst *SI = cast<SelectInst>(LHS);
+ Value *Cond = SI->getCondition();
+ Value *TV = SI->getTrueValue();
+ Value *FV = SI->getFalseValue();
// Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
// Does "cmp TV, RHS" simplify?
- if (Value *TCmp = SimplifyCmpInst(Pred, SI->getTrueValue(), RHS, TD, DT,
- MaxRecurse)) {
- // It does! Does "cmp FV, RHS" simplify?
- if (Value *FCmp = SimplifyCmpInst(Pred, SI->getFalseValue(), RHS, TD, DT,
- MaxRecurse)) {
- // It does! If they simplified to the same value, then use it as the
- // result of the original comparison.
- if (TCmp == FCmp)
- return TCmp;
- Value *Cond = SI->getCondition();
- // If the false value simplified to false, then the result of the compare
- // is equal to "Cond && TCmp". This also catches the case when the false
- // value simplified to false and the true value to true, returning "Cond".
- if (match(FCmp, m_Zero()))
- if (Value *V = SimplifyAndInst(Cond, TCmp, TD, DT, MaxRecurse))
- return V;
- // If the true value simplified to true, then the result of the compare
- // is equal to "Cond || FCmp".
- if (match(TCmp, m_One()))
- if (Value *V = SimplifyOrInst(Cond, FCmp, TD, DT, MaxRecurse))
- return V;
- // Finally, if the false value simplified to true and the true value to
- // false, then the result of the compare is equal to "!Cond".
- if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
- if (Value *V =
- SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
- TD, DT, MaxRecurse))
- return V;
- }
+ Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, TD, TLI, DT, MaxRecurse);
+ if (TCmp == Cond) {
+ // It not only simplified, it simplified to the select condition. Replace
+ // it with 'true'.
+ TCmp = getTrue(Cond->getType());
+ } else if (!TCmp) {
+ // It didn't simplify. However if "cmp TV, RHS" is equal to the select
+ // condition then we can replace it with 'true'. Otherwise give up.
+ if (!isSameCompare(Cond, Pred, TV, RHS))
+ return 0;
+ TCmp = getTrue(Cond->getType());
+ }
+
+ // Does "cmp FV, RHS" simplify?
+ Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, TD, TLI, DT, MaxRecurse);
+ if (FCmp == Cond) {
+ // It not only simplified, it simplified to the select condition. Replace
+ // it with 'false'.
+ FCmp = getFalse(Cond->getType());
+ } else if (!FCmp) {
+ // It didn't simplify. However if "cmp FV, RHS" is equal to the select
+ // condition then we can replace it with 'false'. Otherwise give up.
+ if (!isSameCompare(Cond, Pred, FV, RHS))
+ return 0;
+ FCmp = getFalse(Cond->getType());
}
+ // If both sides simplified to the same value, then use it as the result of
+ // the original comparison.
+ if (TCmp == FCmp)
+ return TCmp;
+ // If the false value simplified to false, then the result of the compare
+ // is equal to "Cond && TCmp". This also catches the case when the false
+ // value simplified to false and the true value to true, returning "Cond".
+ if (match(FCmp, m_Zero()))
+ if (Value *V = SimplifyAndInst(Cond, TCmp, TD, TLI, DT, MaxRecurse))
+ return V;
+ // If the true value simplified to true, then the result of the compare
+ // is equal to "Cond || FCmp".
+ if (match(TCmp, m_One()))
+ if (Value *V = SimplifyOrInst(Cond, FCmp, TD, TLI, DT, MaxRecurse))
+ return V;
+ // Finally, if the false value simplified to true and the true value to
+ // false, then the result of the compare is equal to "!Cond".
+ if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
+ if (Value *V =
+ SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
+ TD, TLI, DT, MaxRecurse))
+ return V;
+
return 0;
}
@@ -458,7 +503,9 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
/// it on the incoming phi values yields the same result for every value. If so
/// returns the common value, otherwise returns null.
static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -485,8 +532,8 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = PI == LHS ?
- SimplifyBinOp(Opcode, Incoming, RHS, TD, DT, MaxRecurse) :
- SimplifyBinOp(Opcode, LHS, Incoming, TD, DT, MaxRecurse);
+ SimplifyBinOp(Opcode, Incoming, RHS, TD, TLI, DT, MaxRecurse) :
+ SimplifyBinOp(Opcode, LHS, Incoming, TD, TLI, DT, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
@@ -502,7 +549,9 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
/// incoming phi values yields the same result every time. If so returns the
/// common result, otherwise returns null.
static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -526,7 +575,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
- Value *V = SimplifyCmpInst(Pred, Incoming, RHS, TD, DT, MaxRecurse);
+ Value *V = SimplifyCmpInst(Pred, Incoming, RHS, TD, TLI, DT, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
@@ -540,13 +589,15 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// Canonicalize the constant to the RHS.
@@ -576,17 +627,17 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// i1 add -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, TD, TLI, DT, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
// Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Add, Op0, Op1, Instruction::Mul,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// Threading Add over selects and phi nodes is pointless, so don't bother.
@@ -602,20 +653,23 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, TD, TLI, DT, RecursionLimit);
}
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0))
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// X - undef -> undef
@@ -643,18 +697,18 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *Y = 0, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, TD, TLI, DT, MaxRecurse-1))
// It does! Now see if "X + V" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, X, V, TD, DT,
+ if (Value *W = SimplifyBinOp(Instruction::Add, X, V, TD, TLI, DT,
MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, TLI, DT, MaxRecurse-1))
// It does! Now see if "Y + V" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, TD, DT,
+ if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, TD, TLI, DT,
MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
@@ -667,18 +721,18 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
X = Op0;
if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
// See if "V === X - Y" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, TD, TLI, DT, MaxRecurse-1))
// It does! Now see if "V - Z" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, TD, DT,
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, TD, TLI, DT,
MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, TLI, DT, MaxRecurse-1))
// It does! Now see if "V - Y" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, TD, DT,
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, TD, TLI, DT,
MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
@@ -691,9 +745,9 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Z = Op0;
if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
// See if "V === Z - X" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, TD, TLI, DT, MaxRecurse-1))
// It does! Now see if "V + Y" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, TD, DT,
+ if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, TD, TLI, DT,
MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
@@ -702,12 +756,12 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// Mul distributes over Sub. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Sub, Op0, Op1, Instruction::Mul,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// i1 sub -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, TD, TLI, DT, MaxRecurse-1))
return V;
// Threading Sub over selects and phi nodes is pointless, so don't bother.
@@ -723,19 +777,22 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, TD, TLI, DT, RecursionLimit);
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// Canonicalize the constant to the RHS.
@@ -758,37 +815,38 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
Value *X = 0, *Y = 0;
if ((match(Op0, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op1) || // (X / Y) * Y
(match(Op1, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op0)) { // Y * (X / Y)
- BinaryOperator *Div = cast<BinaryOperator>(Y == Op1 ? Op0 : Op1);
+ PossiblyExactOperator *Div =
+ cast<PossiblyExactOperator>(Y == Op1 ? Op0 : Op1);
if (Div->isExact())
return X;
}
// i1 mul -> and.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyAndInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyAndInst(Op0, Op1, TD, TLI, DT, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
// Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
@@ -796,19 +854,20 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyMulInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyMulInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD, TLI);
}
}
@@ -842,7 +901,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
Value *X = 0, *Y = 0;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
- BinaryOperator *Mul = cast<BinaryOperator>(Op0);
+ OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
// If the Mul knows it does not overflow, then we are good to go.
if ((isSigned && Mul->hasNoSignedWrap()) ||
(!isSigned && Mul->hasNoUnsignedWrap()))
@@ -861,13 +920,15 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, TLI, DT,
+ MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, TLI, DT,
+ MaxRecurse))
return V;
return 0;
@@ -876,34 +937,41 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, TD, TLI, DT,
+ MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifySDivInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, TD, TLI, DT,
+ MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyUDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyUDivInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *,
+ const TargetLibraryInfo *,
const DominatorTree *, unsigned) {
// undef / X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
@@ -917,19 +985,20 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *,
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyFDivInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyRem - Given operands for an SRem or URem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD, TLI);
}
}
@@ -964,13 +1033,13 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
return 0;
@@ -979,35 +1048,43 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, TD, DT, MaxRecurse))
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySRemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifySRemInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, TD, DT, MaxRecurse))
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyURemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyURemInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *,
- const DominatorTree *, unsigned) {
+ const TargetLibraryInfo *,
+ const DominatorTree *,
+ unsigned) {
// undef % X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
@@ -1020,19 +1097,20 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *,
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFRemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyFRemInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD, TLI);
}
}
@@ -1057,13 +1135,13 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
return 0;
@@ -1072,9 +1150,10 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
/// SimplifyShlInst - Given operands for an Shl, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, TD, DT, MaxRecurse))
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
// undef << X -> 0
@@ -1090,16 +1169,19 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, TD, TLI, DT, RecursionLimit);
}
/// SimplifyLShrInst - Given operands for an LShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
// undef >>l X -> 0
@@ -1116,16 +1198,20 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyLShrInst(Op0, Op1, isExact, TD, TLI, DT, RecursionLimit);
}
/// SimplifyAShrInst - Given operands for an AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, TD, TLI, DT, MaxRecurse))
return V;
// all ones >>a X -> all ones
@@ -1146,19 +1232,23 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyAShrInst(Op0, Op1, isExact, TD, TLI, DT, RecursionLimit);
}
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// Canonicalize the constant to the RHS.
@@ -1197,37 +1287,46 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
(A == Op0 || B == Op0))
return Op0;
+ // A & (-A) = A if A is a power of two or zero.
+ if (match(Op0, m_Neg(m_Specific(Op1))) ||
+ match(Op1, m_Neg(m_Specific(Op0)))) {
+ if (isPowerOfTwo(Op0, TD, /*OrZero*/true))
+ return Op0;
+ if (isPowerOfTwo(Op1, TD, /*OrZero*/true))
+ return Op1;
+ }
+
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, TD, DT,
- MaxRecurse))
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, TD, TLI,
+ DT, MaxRecurse))
return V;
// And distributes over Or. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// And distributes over Xor. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// Or distributes over And. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::And, Op0, Op1, Instruction::Or,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, TD, DT,
- MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, TD, TLI,
+ DT, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
@@ -1235,19 +1334,21 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAndInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyAndInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// Canonicalize the constant to the RHS.
@@ -1297,31 +1398,31 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
return Constant::getAllOnesValue(Op0->getType());
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, TD, DT,
- MaxRecurse))
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, TD, TLI,
+ DT, MaxRecurse))
return V;
// Or distributes over And. Try some generic simplifications based on this.
- if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, TD,
+ TLI, DT, MaxRecurse))
return V;
// And distributes over Or. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Or, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, TD, TLI, DT,
MaxRecurse))
return V;
@@ -1329,19 +1430,21 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyOrInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyOrInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
- Ops, TD);
+ Ops, TD, TLI);
}
// Canonicalize the constant to the RHS.
@@ -1366,13 +1469,13 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
return Constant::getAllOnesValue(Op0->getType());
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, TD, DT,
- MaxRecurse))
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, TD, TLI,
+ DT, MaxRecurse))
return V;
// And distributes over Xor. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Xor, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ TD, TLI, DT, MaxRecurse))
return V;
// Threading Xor over selects and phi nodes is pointless, so don't bother.
@@ -1388,8 +1491,9 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyXorInst(Op0, Op1, TD, TLI, DT, RecursionLimit);
}
static Type *GetCompareTy(Value *Op) {
@@ -1419,14 +1523,16 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD, TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -1443,8 +1549,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
// Special case logic when the operands have i1 type.
- if (OpTy->isIntegerTy(1) || (OpTy->isVectorTy() &&
- cast<VectorType>(OpTy)->getElementType()->isIntegerTy(1))) {
+ if (OpTy->getScalarType()->isIntegerTy(1)) {
switch (Pred) {
default: break;
case ICmpInst::ICMP_EQ:
@@ -1564,6 +1669,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// 'srem x, CI2' produces (-|CI2|, |CI2|).
Upper = CI2->getValue().abs();
Lower = (-Upper) + 1;
+ } else if (match(LHS, m_UDiv(m_ConstantInt(CI2), m_Value()))) {
+ // 'udiv CI2, x' produces [0, CI2].
+ Upper = CI2->getValue() + 1;
} else if (match(LHS, m_UDiv(m_Value(), m_ConstantInt(CI2)))) {
// 'udiv x, CI2' produces [0, UINT_MAX / CI2].
APInt NegOne = APInt::getAllOnesValue(Width);
@@ -1622,13 +1730,13 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
ConstantExpr::getIntToPtr(RHSC, SrcTy),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
} else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
if (RI->getOperand(0)->getType() == SrcTy)
// Compare without the cast.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
}
}
@@ -1640,7 +1748,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that signed predicates become unsigned.
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
- SrcOp, RI->getOperand(0), TD, DT,
+ SrcOp, RI->getOperand(0), TD, TLI, DT,
MaxRecurse-1))
return V;
}
@@ -1656,7 +1764,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// also a case of comparing two zero-extended values.
if (RExt == CI && MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
- SrcOp, Trunc, TD, DT, MaxRecurse-1))
+ SrcOp, Trunc, TD, TLI, DT, MaxRecurse-1))
return V;
// Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
@@ -1701,7 +1809,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that the predicate does not change.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
}
// Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
@@ -1715,7 +1823,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If the re-extended constant didn't change then this is effectively
// also a case of comparing two sign-extended values.
if (RExt == CI && MaxRecurse)
- if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, TD, DT,
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, TD, TLI, DT,
MaxRecurse-1))
return V;
@@ -1751,7 +1859,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
Constant::getNullValue(SrcTy),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
break;
case ICmpInst::ICMP_ULT:
@@ -1760,7 +1868,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
Constant::getNullValue(SrcTy),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
break;
}
@@ -1794,14 +1902,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if ((A == RHS || B == RHS) && NoLHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
Constant::getNullValue(RHS->getType()),
- TD, DT, MaxRecurse-1))
+ TD, TLI, DT, MaxRecurse-1))
return V;
// icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
if ((C == LHS || D == LHS) && NoRHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred,
Constant::getNullValue(LHS->getType()),
- C == LHS ? D : C, TD, DT, MaxRecurse-1))
+ C == LHS ? D : C, TD, TLI, DT, MaxRecurse-1))
return V;
// icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
@@ -1810,7 +1918,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Determine Y and Z in the form icmp (X+Y), (X+Z).
Value *Y = (A == C || A == D) ? B : A;
Value *Z = (C == A || C == B) ? D : C;
- if (Value *V = SimplifyICmpInst(Pred, Y, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(Pred, Y, Z, TD, TLI, DT, MaxRecurse-1))
return V;
}
}
@@ -1870,6 +1978,15 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
+ // x udiv y <=u x.
+ if (LBO && match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
+ // icmp pred (X /u Y), X
+ if (Pred == ICmpInst::ICMP_UGT)
+ return getFalse(ITy);
+ if (Pred == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ }
+
if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
LBO->getOperand(1) == RBO->getOperand(1)) {
switch (LBO->getOpcode()) {
@@ -1884,7 +2001,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (!LBO->isExact() || !RBO->isExact())
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
- RBO->getOperand(0), TD, DT, MaxRecurse-1))
+ RBO->getOperand(0), TD, TLI, DT, MaxRecurse-1))
return V;
break;
case Instruction::Shl: {
@@ -1895,7 +2012,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (!NSW && ICmpInst::isSigned(Pred))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
- RBO->getOperand(0), TD, DT, MaxRecurse-1))
+ RBO->getOperand(0), TD, TLI, DT, MaxRecurse-1))
return V;
break;
}
@@ -1949,7 +2066,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(EqP, A, B, TD, TLI, DT, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
@@ -1963,7 +2080,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, TLI, DT, MaxRecurse-1))
return V;
break;
}
@@ -2019,7 +2136,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(EqP, A, B, TD, TLI, DT, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
@@ -2033,7 +2150,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, TLI, DT, MaxRecurse-1))
return V;
break;
}
@@ -2093,34 +2210,38 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, TLI, DT, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, TLI, DT, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, TD, TLI, DT, RecursionLimit);
}
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD, TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -2188,21 +2309,23 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, TLI, DT, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, TLI, DT, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, TD, TLI, DT, RecursionLimit);
}
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
@@ -2233,10 +2356,13 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops,
- const TargetData *TD, const DominatorTree *) {
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD,
+ const DominatorTree *) {
// The type of the GEP pointer operand.
- PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
+ PointerType *PtrTy = dyn_cast<PointerType>(Ops[0]->getType());
+ // The GEP pointer operand is not a pointer, it's a vector of pointers.
+ if (!PtrTy)
+ return 0;
// getelementptr P -> P.
if (Ops.size() == 1)
@@ -2334,62 +2460,76 @@ static Value *SimplifyPHINode(PHINode *PN, const DominatorTree *DT) {
return CommonValue;
}
-
//=== Helper functions for higher up the class hierarchy.
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
switch (Opcode) {
case Instruction::Add:
return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
+ TD, TLI, DT, MaxRecurse);
case Instruction::Sub:
return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
- case Instruction::Mul: return SimplifyMulInst (LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::SRem: return SimplifySRemInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::URem: return SimplifyURemInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, TD, DT, MaxRecurse);
+ TD, TLI, DT, MaxRecurse);
+ case Instruction::Mul: return SimplifyMulInst (LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::SRem: return SimplifySRemInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::URem: return SimplifyURemInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
case Instruction::Shl:
return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
+ TD, TLI, DT, MaxRecurse);
case Instruction::LShr:
- return SimplifyLShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
+ return SimplifyLShrInst(LHS, RHS, /*isExact*/false, TD, TLI, DT,
+ MaxRecurse);
case Instruction::AShr:
- return SimplifyAShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
- case Instruction::And: return SimplifyAndInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::Or: return SimplifyOrInst (LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::Xor: return SimplifyXorInst(LHS, RHS, TD, DT, MaxRecurse);
+ return SimplifyAShrInst(LHS, RHS, /*isExact*/false, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::And: return SimplifyAndInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::Or: return SimplifyOrInst (LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
+ case Instruction::Xor: return SimplifyXorInst(LHS, RHS, TD, TLI, DT,
+ MaxRecurse);
default:
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
- return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, TD);
+ return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, TD, TLI);
}
// If the operation is associative, try some generic simplifications.
if (Instruction::isAssociative(Opcode))
- if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, TD, TLI, DT,
MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, TD, TLI, DT,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, TD, TLI, DT,
+ MaxRecurse))
return V;
return 0;
@@ -2397,100 +2537,113 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyBinOp(Opcode, LHS, RHS, TD, TLI, DT, RecursionLimit);
}
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result.
static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
unsigned MaxRecurse) {
if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
- return SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
- return SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
+ return SimplifyICmpInst(Predicate, LHS, RHS, TD, TLI, DT, MaxRecurse);
+ return SimplifyFCmpInst(Predicate, LHS, RHS, TD, TLI, DT, MaxRecurse);
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, TD, TLI, DT, RecursionLimit);
+}
+
+static Value *SimplifyCallInst(CallInst *CI) {
+ // call undef -> undef
+ if (isa<UndefValue>(CI->getCalledValue()))
+ return UndefValue::get(CI->getType());
+
+ return 0;
}
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
switch (I->getOpcode()) {
default:
- Result = ConstantFoldInstruction(I, TD);
+ Result = ConstantFoldInstruction(I, TD, TLI);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::Mul:
- Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::SDiv:
- Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::UDiv:
- Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FDiv:
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::SRem:
- Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::URem:
- Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FRem:
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::And:
- Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Or:
- Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Xor:
- Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::ICmp:
Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, DT);
+ I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, DT);
+ I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
@@ -2511,6 +2664,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
case Instruction::PHI:
Result = SimplifyPHINode(cast<PHINode>(I), DT);
break;
+ case Instruction::Call:
+ Result = SimplifyCallInst(cast<CallInst>(I));
+ break;
}
/// If called on unreachable code, the above logic may report that the
@@ -2527,6 +2683,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
///
void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!");
@@ -2551,12 +2708,12 @@ void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
// SimplifyInstruction.
AssertingVH<> UserHandle(User);
- SimplifiedVal = SimplifyInstruction(User, TD, DT);
+ SimplifiedVal = SimplifyInstruction(User, TD, TLI, DT);
if (SimplifiedVal == 0) continue;
}
// Recursively simplify this user to the new value.
- ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD, DT);
+ ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD, TLI, DT);
From = dyn_cast_or_null<Instruction>((Value*)FromHandle);
To = ToHandle;
diff --git a/lib/Analysis/LLVMBuild.txt b/lib/Analysis/LLVMBuild.txt
new file mode 100644
index 0000000..a8a8079
--- /dev/null
+++ b/lib/Analysis/LLVMBuild.txt
@@ -0,0 +1,25 @@
+;===- ./lib/Analysis/LLVMBuild.txt -----------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[common]
+subdirectories = IPA
+
+[component_0]
+type = Library
+name = Analysis
+parent = Libraries
+required_libraries = Core Support Target
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index f80595c..d27d911 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -20,6 +20,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
@@ -33,7 +34,10 @@
using namespace llvm;
char LazyValueInfo::ID = 0;
-INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
+INITIALIZE_PASS_BEGIN(LazyValueInfo, "lazy-value-info",
+ "Lazy Value Information Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(LazyValueInfo, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
namespace llvm {
@@ -61,10 +65,10 @@ class LVILatticeVal {
constant,
/// notconstant - This Value is known to not have the specified value.
notconstant,
-
+
/// constantrange - The Value falls within this range.
constantrange,
-
+
/// overdefined - This value is not known to be constant, and we know that
/// it has a value.
overdefined
@@ -207,7 +211,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData for smarter constant folding.
+ // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
@@ -233,7 +237,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData for smarter constant folding.
+ // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
@@ -367,7 +371,11 @@ namespace {
/// for cache updating.
typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
DenseSet<OverDefinedPairTy> OverDefinedCache;
-
+
+ /// SeenBlocks - Keep track of all blocks that we have ever seen, so we
+ /// don't spend time removing unused blocks from our caches.
+ DenseSet<AssertingVH<BasicBlock> > SeenBlocks;
+
/// BlockValueStack - This stack holds the state of the value solver
/// during a query. It basically emulates the callstack of the naive
/// recursive value lookup process.
@@ -438,6 +446,7 @@ namespace {
/// clear - Empty the cache.
void clear() {
+ SeenBlocks.clear();
ValueCache.clear();
OverDefinedCache.clear();
}
@@ -466,6 +475,12 @@ void LVIValueHandle::deleted() {
}
void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+ // Shortcut if we have never seen this block.
+ DenseSet<AssertingVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
+ if (I == SeenBlocks.end())
+ return;
+ SeenBlocks.erase(I);
+
SmallVector<OverDefinedPairTy, 4> ToErase;
for (DenseSet<OverDefinedPairTy>::iterator I = OverDefinedCache.begin(),
E = OverDefinedCache.end(); I != E; ++I) {
@@ -505,6 +520,7 @@ LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
if (Constant *VC = dyn_cast<Constant>(Val))
return LVILatticeVal::get(VC);
+ SeenBlocks.insert(BB);
return lookup(Val)[BB];
}
@@ -513,6 +529,7 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
return true;
ValueCacheEntryTy &Cache = lookup(Val);
+ SeenBlocks.insert(BB);
LVILatticeVal &BBLV = Cache[BB];
// OverDefinedCacheUpdater is a helper object that will update
@@ -1007,12 +1024,19 @@ static LazyValueInfoCache &getCache(void *&PImpl) {
bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
-
+
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
// Fully lazy.
return false;
}
+void LazyValueInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<TargetLibraryInfo>();
+}
+
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
@@ -1061,7 +1085,8 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
// If we know the value is a constant, evaluate the conditional.
Constant *Res = 0;
if (Result.isConstant()) {
- Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD);
+ Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
+ TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? False : True;
return Unknown;
@@ -1102,13 +1127,15 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD);
+ Result.getNotConstant(), C, TD,
+ TLI);
if (Res->isNullValue())
return False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD);
+ Result.getNotConstant(), C, TD,
+ TLI);
if (Res->isNullValue())
return True;
}
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 38d677d..971065f 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -44,6 +44,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/IntrinsicInst.h"
@@ -103,6 +104,7 @@ namespace {
AliasAnalysis *AA;
DominatorTree *DT;
TargetData *TD;
+ TargetLibraryInfo *TLI;
std::string Messages;
raw_string_ostream MessagesStr;
@@ -117,6 +119,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
+ AU.addRequired<TargetLibraryInfo>();
AU.addRequired<DominatorTree>();
}
virtual void print(raw_ostream &O, const Module *M) const {}
@@ -149,6 +152,7 @@ namespace {
char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
@@ -174,6 +178,7 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
Messages.clear();
@@ -614,10 +619,10 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, TD, DT))
+ if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT))
return findValueImpl(W, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (Value *W = ConstantFoldConstantExpression(CE, TD))
+ if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
}
diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp
index 85aacca..858cc64 100644
--- a/lib/Analysis/LoopInfo.cpp
+++ b/lib/Analysis/LoopInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopIterator.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
@@ -95,7 +96,7 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
// Test if the value is already loop-invariant.
if (isLoopInvariant(I))
return true;
- if (!I->isSafeToSpeculativelyExecute())
+ if (!isSafeToSpeculativelyExecute(I))
return false;
if (I->mayReadFromMemory())
return false;
@@ -165,99 +166,6 @@ PHINode *Loop::getCanonicalInductionVariable() const {
return 0;
}
-/// getTripCount - Return a loop-invariant LLVM value indicating the number of
-/// times the loop will be executed. Note that this means that the backedge
-/// of the loop executes N-1 times. If the trip-count cannot be determined,
-/// this returns null.
-///
-/// The IndVarSimplify pass transforms loops to have a form that this
-/// function easily understands.
-///
-Value *Loop::getTripCount() const {
- // Canonical loops will end with a 'cmp ne I, V', where I is the incremented
- // canonical induction variable and V is the trip count of the loop.
- PHINode *IV = getCanonicalInductionVariable();
- if (IV == 0 || IV->getNumIncomingValues() != 2) return 0;
-
- bool P0InLoop = contains(IV->getIncomingBlock(0));
- Value *Inc = IV->getIncomingValue(!P0InLoop);
- BasicBlock *BackedgeBlock = IV->getIncomingBlock(!P0InLoop);
-
- if (BranchInst *BI = dyn_cast<BranchInst>(BackedgeBlock->getTerminator()))
- if (BI->isConditional()) {
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) {
- if (ICI->getOperand(0) == Inc) {
- if (BI->getSuccessor(0) == getHeader()) {
- if (ICI->getPredicate() == ICmpInst::ICMP_NE)
- return ICI->getOperand(1);
- } else if (ICI->getPredicate() == ICmpInst::ICMP_EQ) {
- return ICI->getOperand(1);
- }
- }
- }
- }
-
- return 0;
-}
-
-/// getSmallConstantTripCount - Returns the trip count of this loop as a
-/// normal unsigned value, if possible. Returns 0 if the trip count is unknown
-/// or not constant. Will also return 0 if the trip count is very large
-/// (>= 2^32)
-unsigned Loop::getSmallConstantTripCount() const {
- Value* TripCount = this->getTripCount();
- if (TripCount) {
- if (ConstantInt *TripCountC = dyn_cast<ConstantInt>(TripCount)) {
- // Guard against huge trip counts.
- if (TripCountC->getValue().getActiveBits() <= 32) {
- return (unsigned)TripCountC->getZExtValue();
- }
- }
- }
- return 0;
-}
-
-/// getSmallConstantTripMultiple - Returns the largest constant divisor of the
-/// trip count of this loop as a normal unsigned value, if possible. This
-/// means that the actual trip count is always a multiple of the returned
-/// value (don't forget the trip count could very well be zero as well!).
-///
-/// Returns 1 if the trip count is unknown or not guaranteed to be the
-/// multiple of a constant (which is also the case if the trip count is simply
-/// constant, use getSmallConstantTripCount for that case), Will also return 1
-/// if the trip count is very large (>= 2^32).
-unsigned Loop::getSmallConstantTripMultiple() const {
- Value* TripCount = this->getTripCount();
- // This will hold the ConstantInt result, if any
- ConstantInt *Result = NULL;
- if (TripCount) {
- // See if the trip count is constant itself
- Result = dyn_cast<ConstantInt>(TripCount);
- // if not, see if it is a multiplication
- if (!Result)
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TripCount)) {
- switch (BO->getOpcode()) {
- case BinaryOperator::Mul:
- Result = dyn_cast<ConstantInt>(BO->getOperand(1));
- break;
- case BinaryOperator::Shl:
- if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1)))
- if (CI->getValue().getActiveBits() <= 5)
- return 1u << CI->getZExtValue();
- break;
- default:
- break;
- }
- }
- }
- // Guard against huge trip counts.
- if (Result && Result->getValue().getActiveBits() <= 32) {
- return (unsigned)Result->getZExtValue();
- } else {
- return 1;
- }
-}
-
/// isLCSSAForm - Return true if the Loop is in LCSSA form
bool Loop::isLCSSAForm(DominatorTree &DT) const {
// Sort the blocks vector so that we can use binary search to do quick
@@ -477,21 +385,19 @@ void UnloopUpdater::updateBlockParents() {
/// removeBlocksFromAncestors - Remove unloop's blocks from all ancestors below
/// their new parents.
void UnloopUpdater::removeBlocksFromAncestors() {
- // Remove unloop's blocks from all ancestors below their new parents.
+ // Remove all unloop's blocks (including those in nested subloops) from
+ // ancestors below the new parent loop.
for (Loop::block_iterator BI = Unloop->block_begin(),
BE = Unloop->block_end(); BI != BE; ++BI) {
- Loop *NewParent = LI->getLoopFor(*BI);
- // If this block is an immediate subloop, remove all blocks (including
- // nested subloops) from ancestors below the new parent loop.
- // Otherwise, if this block is in a nested subloop, skip it.
- if (SubloopParents.count(NewParent))
- NewParent = SubloopParents[NewParent];
- else if (Unloop->contains(NewParent))
- continue;
-
+ Loop *OuterParent = LI->getLoopFor(*BI);
+ if (Unloop->contains(OuterParent)) {
+ while (OuterParent->getParentLoop() != Unloop)
+ OuterParent = OuterParent->getParentLoop();
+ OuterParent = SubloopParents[OuterParent];
+ }
// Remove blocks from former Ancestors except Unloop itself which will be
// deleted.
- for (Loop *OldParent = Unloop->getParentLoop(); OldParent != NewParent;
+ for (Loop *OldParent = Unloop->getParentLoop(); OldParent != OuterParent;
OldParent = OldParent->getParentLoop()) {
assert(OldParent && "new loop is not an ancestor of the original");
OldParent->removeBlockFromLoop(*BI);
diff --git a/lib/Analysis/MemDepPrinter.cpp b/lib/Analysis/MemDepPrinter.cpp
index fde07ea..22414b3 100644
--- a/lib/Analysis/MemDepPrinter.cpp
+++ b/lib/Analysis/MemDepPrinter.cpp
@@ -130,7 +130,7 @@ bool MemDepPrinter::runOnFunction(Function &F) {
AliasAnalysis::Location Loc = AA.getLocation(LI);
MDA.getNonLocalPointerDependency(Loc, true, LI->getParent(), NLDI);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- if (!LI->isUnordered()) {
+ if (!SI->isUnordered()) {
// FIXME: Handle atomic/volatile stores.
Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
static_cast<BasicBlock *>(0)));
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 8d451c4..b145650 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -48,10 +48,10 @@ static bool isMallocCall(const CallInst *CI) {
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
FunctionType *FTy = Callee->getFunctionType();
- if (FTy->getNumParams() != 1)
- return false;
- return FTy->getParamType(0)->isIntegerTy(32) ||
- FTy->getParamType(0)->isIntegerTy(64);
+ return FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
+ FTy->getNumParams() == 1 &&
+ (FTy->getParamType(0)->isIntegerTy(32) ||
+ FTy->getParamType(0)->isIntegerTy(64));
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 92967c0..704e27b 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -22,6 +22,7 @@
#include "llvm/Function.h"
#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
@@ -91,6 +92,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
TD = getAnalysisIfAvailable<TargetData>();
+ DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
return false;
@@ -331,6 +333,81 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
return 0;
}
+namespace {
+ /// Only find pointer captures which happen before the given instruction. Uses
+ /// the dominator tree to determine whether one instruction is before another.
+ struct CapturesBefore : public CaptureTracker {
+ CapturesBefore(const Instruction *I, DominatorTree *DT)
+ : BeforeHere(I), DT(DT), Captured(false) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ if (BeforeHere != I && DT->dominates(BeforeHere, I))
+ return false;
+ return true;
+ }
+
+ bool captured(Instruction *I) {
+ if (BeforeHere != I && DT->dominates(BeforeHere, I))
+ return false;
+ Captured = true;
+ return true;
+ }
+
+ const Instruction *BeforeHere;
+ DominatorTree *DT;
+
+ bool Captured;
+ };
+}
+
+AliasAnalysis::ModRefResult
+MemoryDependenceAnalysis::getModRefInfo(const Instruction *Inst,
+ const AliasAnalysis::Location &MemLoc) {
+ AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
+ if (MR != AliasAnalysis::ModRef) return MR;
+
+ // FIXME: this is really just shoring-up a deficiency in alias analysis.
+ // BasicAA isn't willing to spend linear time determining whether an alloca
+ // was captured before or after this particular call, while we are. However,
+ // with a smarter AA in place, this test is just wasting compile time.
+ if (!DT) return AliasAnalysis::ModRef;
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+ if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object))
+ return AliasAnalysis::ModRef;
+ ImmutableCallSite CS(Inst);
+ if (!CS.getInstruction()) return AliasAnalysis::ModRef;
+
+ CapturesBefore CB(Inst, DT);
+ llvm::PointerMayBeCaptured(Object, &CB);
+
+ if (isa<Constant>(Object) || CS.getInstruction() == Object || CB.Captured)
+ return AliasAnalysis::ModRef;
+
+ unsigned ArgNo = 0;
+ for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ CI != CE; ++CI, ++ArgNo) {
+ // Only look at the no-capture or byval pointer arguments. If this
+ // pointer were passed to arguments that were neither of these, then it
+ // couldn't be no-capture.
+ if (!(*CI)->getType()->isPointerTy() ||
+ (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
+ continue;
+
+ // If this is a no-capture pointer argument, see if we can tell that it
+ // is impossible to alias the pointer we're checking. If not, we have to
+ // assume that the call could touch the pointer, even though it doesn't
+ // escape.
+ if (!AA->isNoAlias(AliasAnalysis::Location(*CI),
+ AliasAnalysis::Location(Object))) {
+ return AliasAnalysis::ModRef;
+ }
+ }
+ return AliasAnalysis::NoModRef;
+}
+
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignores may-aliases with
/// read-only operations. If isLoad is false, this routine ignores may-aliases
@@ -478,7 +555,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
- switch (AA->getModRefInfo(Inst, MemLoc)) {
+ switch (getModRefInfo(Inst, MemLoc)) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index 7e22ddc..80ea219 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
@@ -27,7 +28,7 @@ static bool CanPHITrans(Instruction *Inst) {
return true;
if (isa<CastInst>(Inst) &&
- Inst->isSafeToSpeculativelyExecute())
+ isSafeToSpeculativelyExecute(Inst))
return true;
if (Inst->getOpcode() == Instruction::Add &&
@@ -186,7 +187,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// operands need to be phi translated, and if so, reconstruct it.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return 0;
Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
if (PHIIn == 0) return 0;
if (PHIIn == Cast->getOperand(0))
@@ -284,7 +285,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, DT)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -381,7 +382,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
// Handle cast of PHI translatable value.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return 0;
Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
CurBB, PredBB, DT, NewInsts);
if (OpVal == 0) return 0;
diff --git a/lib/Analysis/PathProfileVerifier.cpp b/lib/Analysis/PathProfileVerifier.cpp
index 0ae734e..0fcdfe7 100644
--- a/lib/Analysis/PathProfileVerifier.cpp
+++ b/lib/Analysis/PathProfileVerifier.cpp
@@ -137,22 +137,22 @@ bool PathProfileVerifier::runOnModule (Module &M) {
BasicBlock* source = nextEdge->getSource();
BasicBlock* target = nextEdge->getTarget();
unsigned duplicateNumber = nextEdge->getDuplicateNumber();
- DEBUG(dbgs () << source->getNameStr() << " --{" << duplicateNumber
- << "}--> " << target->getNameStr());
+ DEBUG(dbgs() << source->getName() << " --{" << duplicateNumber
+ << "}--> " << target->getName());
// Ensure all the referenced edges exist
// TODO: make this a separate function
if( !arrayMap.count(source) ) {
- errs() << " error [" << F->getNameStr() << "()]: source '"
- << source->getNameStr()
+ errs() << " error [" << F->getName() << "()]: source '"
+ << source->getName()
<< "' does not exist in the array map.\n";
} else if( !arrayMap[source].count(target) ) {
- errs() << " error [" << F->getNameStr() << "()]: target '"
- << target->getNameStr()
+ errs() << " error [" << F->getName() << "()]: target '"
+ << target->getName()
<< "' does not exist in the array map.\n";
} else if( !arrayMap[source][target].count(duplicateNumber) ) {
- errs() << " error [" << F->getNameStr() << "()]: edge "
- << source->getNameStr() << " -> " << target->getNameStr()
+ errs() << " error [" << F->getName() << "()]: edge "
+ << source->getName() << " -> " << target->getName()
<< " duplicate number " << duplicateNumber
<< " does not exist in the array map.\n";
} else {
diff --git a/lib/Analysis/ProfileEstimatorPass.cpp b/lib/Analysis/ProfileEstimatorPass.cpp
index b594e2b..63468f8 100644
--- a/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/lib/Analysis/ProfileEstimatorPass.cpp
@@ -332,7 +332,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) {
// Clear Minimal Edges.
MinimalWeight.clear();
- DEBUG(dbgs() << "Working on function " << F.getNameStr() << "\n");
+ DEBUG(dbgs() << "Working on function " << F.getName() << "\n");
// Since the entry block is the first one and has no predecessors, the edge
// (0,entry) is inserted with the starting weight of 1.
diff --git a/lib/Analysis/ProfileInfoLoaderPass.cpp b/lib/Analysis/ProfileInfoLoaderPass.cpp
index 098079b..c4da807 100644
--- a/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -160,7 +160,7 @@ bool LoaderPass::runOnModule(Module &M) {
ReadCount = 0;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
- DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n");
+ DEBUG(dbgs() << "Working on " << F->getName() << "\n");
readEdge(getEdge(0,&F->getEntryBlock()), Counters);
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
TerminatorInst *TI = BB->getTerminator();
@@ -181,7 +181,7 @@ bool LoaderPass::runOnModule(Module &M) {
ReadCount = 0;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
- DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n");
+ DEBUG(dbgs() << "Working on " << F->getName() << "\n");
readEdge(getEdge(0,&F->getEntryBlock()), Counters);
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
TerminatorInst *TI = BB->getTerminator();
diff --git a/lib/Analysis/ProfileVerifierPass.cpp b/lib/Analysis/ProfileVerifierPass.cpp
index a017518..0cb1588 100644
--- a/lib/Analysis/ProfileVerifierPass.cpp
+++ b/lib/Analysis/ProfileVerifierPass.cpp
@@ -30,7 +30,7 @@ static cl::opt<bool,false>
ProfileVerifierDisableAssertions("profile-verifier-noassert",
cl::desc("Disable assertions"));
-namespace llvm {
+namespace {
template<class FType, class BType>
class ProfileVerifierPassT : public FunctionPass {
@@ -125,8 +125,8 @@ namespace llvm {
outCount++;
}
}
- dbgs() << "Block " << BB->getNameStr() << " in "
- << BB->getParent()->getNameStr() << ":"
+ dbgs() << "Block " << BB->getName() << " in "
+ << BB->getParent()->getName() << ":"
<< "BBWeight=" << format("%20.20g",BBWeight) << ","
<< "inWeight=" << format("%20.20g",inWeight) << ","
<< "inCount=" << inCount << ","
@@ -143,8 +143,8 @@ namespace llvm {
template<class FType, class BType>
void ProfileVerifierPassT<FType, BType>::debugEntry (DetailedBlockInfo *DI) {
- dbgs() << "TROUBLE: Block " << DI->BB->getNameStr() << " in "
- << DI->BB->getParent()->getNameStr() << ":"
+ dbgs() << "TROUBLE: Block " << DI->BB->getName() << " in "
+ << DI->BB->getParent()->getName() << ":"
<< "BBWeight=" << format("%20.20g",DI->BBWeight) << ","
<< "inWeight=" << format("%20.20g",DI->inWeight) << ","
<< "inCount=" << DI->inCount << ","
@@ -201,13 +201,13 @@ namespace llvm {
double EdgeWeight = PI->getEdgeWeight(E);
if (EdgeWeight == ProfileInfoT<FType, BType>::MissingValue) {
dbgs() << "Edge " << E << " in Function "
- << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": ";
+ << ProfileInfoT<FType, BType>::getFunction(E)->getName() << ": ";
ASSERTMESSAGE("Edge has missing value");
return 0;
} else {
if (EdgeWeight < 0) {
dbgs() << "Edge " << E << " in Function "
- << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": ";
+ << ProfileInfoT<FType, BType>::getFunction(E)->getName() << ": ";
ASSERTMESSAGE("Edge has negative value");
}
return EdgeWeight;
@@ -220,8 +220,8 @@ namespace llvm {
DetailedBlockInfo *DI) {
if (Error) {
DEBUG(debugEntry(DI));
- dbgs() << "Block " << DI->BB->getNameStr() << " in Function "
- << DI->BB->getParent()->getNameStr() << ": ";
+ dbgs() << "Block " << DI->BB->getName() << " in Function "
+ << DI->BB->getParent()->getName() << ": ";
ASSERTMESSAGE(Message);
}
return;
diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp
index 52753cb..828913d 100644
--- a/lib/Analysis/RegionInfo.cpp
+++ b/lib/Analysis/RegionInfo.cpp
@@ -186,18 +186,16 @@ std::string Region::getNameStr() const {
raw_string_ostream OS(entryName);
WriteAsOperand(OS, getEntry(), false);
- entryName = OS.str();
} else
- entryName = getEntry()->getNameStr();
+ entryName = getEntry()->getName();
if (getExit()) {
if (getExit()->getName().empty()) {
raw_string_ostream OS(exitName);
WriteAsOperand(OS, getExit(), false);
- exitName = OS.str();
} else
- exitName = getExit()->getNameStr();
+ exitName = getExit()->getName();
} else
exitName = "<Function Return>";
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index e0ac56c..daf7742 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -74,6 +74,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
@@ -108,6 +109,7 @@ INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
char ScalarEvolution::ID = 0;
@@ -188,6 +190,14 @@ void SCEV::print(raw_ostream &OS) const {
OS << OpStr;
}
OS << ")";
+ switch (NAry->getSCEVType()) {
+ case scAddExpr:
+ case scMulExpr:
+ if (NAry->getNoWrapFlags(FlagNUW))
+ OS << "<nuw>";
+ if (NAry->getNoWrapFlags(FlagNSW))
+ OS << "<nsw>";
+ }
return;
}
case scUDivExpr: {
@@ -2581,7 +2591,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2590,7 +2600,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2607,7 +2617,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2617,7 +2627,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -3108,7 +3118,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, TD, DT))
+ if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -3584,6 +3594,12 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// because it leads to N-1 getAddExpr calls for N ultimate operands.
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
+ //
+ // Don't apply this instruction's NSW or NUW flags to the new
+ // expression. The instruction may be guarded by control flow that the
+ // no-wrap behavior depends on. Non-control-equivalent instructions can be
+ // mapped to the same SCEV expression, and it would be incorrect to transfer
+ // NSW/NUW semantics to those operations.
SmallVector<const SCEV *, 4> AddOps;
AddOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
@@ -3598,16 +3614,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
AddOps.push_back(Op1);
}
AddOps.push_back(getSCEV(U->getOperand(0)));
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
- OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(V);
- if (OBO->hasNoSignedWrap())
- setFlags(Flags, SCEV::FlagNSW);
- if (OBO->hasNoUnsignedWrap())
- setFlags(Flags, SCEV::FlagNUW);
- return getAddExpr(AddOps, Flags);
+ return getAddExpr(AddOps);
}
case Instruction::Mul: {
- // See the Add code above.
+ // Don't transfer NSW/NUW for the same reason as AddExpr.
SmallVector<const SCEV *, 4> MulOps;
MulOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0);
@@ -4153,13 +4163,19 @@ void ScalarEvolution::forgetValue(Value *V) {
}
/// getExact - Get the exact loop backedge taken count considering all loop
-/// exits. If all exits are computable, this is the minimum computed count.
+/// exits. A computable result can only be return for loops with a single exit.
+/// Returning the minimum taken count among all exits is incorrect because one
+/// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
+/// the limit of each loop test is never skipped. This is a valid assumption as
+/// long as the loop exits via that test. For precise results, it is the
+/// caller's responsibility to specify the relevant loop exit using
+/// getExact(ExitingBlock, SE).
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
// If any exits were not computable, the loop is not computable.
if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
- // We need at least one computable exit.
+ // We need exactly one computable exit.
if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
@@ -4171,8 +4187,8 @@ ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
if (!BECount)
BECount = ENT->ExactNotTaken;
- else
- BECount = SE->getUMinFromMismatchedTypes(BECount, ENT->ExactNotTaken);
+ else if (BECount != ENT->ExactNotTaken)
+ return SE->getCouldNotCompute();
}
assert(BECount && "Invalid not taken count for loop exit");
return BECount;
@@ -4253,8 +4269,15 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
if (MaxBECount == getCouldNotCompute())
MaxBECount = EL.Max;
- else if (EL.Max != getCouldNotCompute())
- MaxBECount = getUMinFromMismatchedTypes(MaxBECount, EL.Max);
+ else if (EL.Max != getCouldNotCompute()) {
+ // We cannot take the "min" MaxBECount, because non-unit stride loops may
+ // skip some loop tests. Taking the max over the exits is sufficiently
+ // conservative. TODO: We could do better taking into consideration
+ // that (1) the loop has unit stride (2) the last loop test is
+ // less-than/greater-than (3) any loop test is less-than/greater-than AND
+ // falls-through some constant times less then the other tests.
+ MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
+ }
}
return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
@@ -4658,7 +4681,8 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
/// specified type, assuming that all operands were constants.
static bool CanConstantFold(const Instruction *I) {
if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
- isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
+ isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
+ isa<LoadInst>(I))
return true;
if (const CallInst *CI = dyn_cast<CallInst>(I))
@@ -4748,16 +4772,23 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (!I) return 0;
- Instruction *I = cast<Instruction>(V);
if (Constant *C = Vals.lookup(I)) return C;
- assert(!isa<PHINode>(I) && "loop header phis should be mapped to constant");
- assert(canConstantEvolve(I, L) && "cannot evaluate expression in this loop");
- (void)L;
+ // An instruction inside the loop depends on a value outside the loop that we
+ // weren't given a mapping for, or a value such as a call inside the loop.
+ if (!canConstantEvolve(I, L)) return 0;
+
+ // An unmapped PHI can be due to a branch or another loop inside this loop,
+ // or due to this not being the initial iteration through a loop where we
+ // couldn't compute the evolution of this particular PHI last time.
+ if (isa<PHINode>(I)) return 0;
std::vector<Constant*> Operands(I->getNumOperands());
@@ -4768,16 +4799,21 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
if (!Operands[i]) return 0;
continue;
}
- Constant *C = EvaluateExpression(Operand, L, Vals, TD);
+ Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
Vals[Operand] = C;
if (!C) return 0;
Operands[i] = C;
}
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ if (CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
- Operands[1], TD);
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD);
+ Operands[1], TD, TLI);
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isVolatile())
+ return ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ }
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
+ TLI);
}
/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
@@ -4798,23 +4834,26 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
- // FIXME: Nick's fix for PR11034 will seed constants for multiple header phis.
DenseMap<Instruction *, Constant *> CurrentIterVals;
+ BasicBlock *Header = L->getHeader();
+ assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
// Since the loop is canonicalized, the PHI node must have two entries. One
// entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- Constant *StartCST =
- dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0)
- return RetVal = 0; // Must be a constant.
- CurrentIterVals[PN] = StartCST;
+ PHINode *PHI = 0;
+ for (BasicBlock::iterator I = Header->begin();
+ (PHI = dyn_cast<PHINode>(I)); ++I) {
+ Constant *StartCST =
+ dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
+ if (StartCST == 0) continue;
+ CurrentIterVals[PHI] = StartCST;
+ }
+ if (!CurrentIterVals.count(PN))
+ return RetVal = 0;
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- if (getConstantEvolvingPHI(BEValue, L) != PN &&
- !isa<Constant>(BEValue))
- return RetVal = 0; // Not derived from same PHI.
// Execute the loop symbolically to determine the exit value.
if (BEs.getActiveBits() >= 32)
@@ -4826,15 +4865,46 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
if (IterationNum == NumIterations)
return RetVal = CurrentIterVals[PN]; // Got exit value!
- // Compute the value of the PHI node for the next iteration.
+ // Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
- Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD);
- if (NextPHI == CurrentIterVals[PN])
- return RetVal = NextPHI; // Stopped evolving!
+ DenseMap<Instruction *, Constant *> NextIterVals;
+ Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
+ TLI);
if (NextPHI == 0)
return 0; // Couldn't evaluate!
- DenseMap<Instruction *, Constant *> NextIterVals;
NextIterVals[PN] = NextPHI;
+
+ bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
+
+ // Also evaluate the other PHI nodes. However, we don't get to stop if we
+ // cease to be able to evaluate one of them or if they stop evolving,
+ // because that doesn't necessarily prevent us from computing PN.
+ SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
+ for (DenseMap<Instruction *, Constant *>::const_iterator
+ I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
+ PHINode *PHI = dyn_cast<PHINode>(I->first);
+ if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
+ PHIsToCompute.push_back(std::make_pair(PHI, I->second));
+ }
+ // We use two distinct loops because EvaluateExpression may invalidate any
+ // iterators into CurrentIterVals.
+ for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
+ I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
+ PHINode *PHI = I->first;
+ Constant *&NextPHI = NextIterVals[PHI];
+ if (!NextPHI) { // Not already computed.
+ Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ }
+ if (NextPHI != I->second)
+ StoppedEvolving = false;
+ }
+
+ // If all entries in CurrentIterVals == NextIterVals then we can stop
+ // iterating, the loop can't continue to change.
+ if (StoppedEvolving)
+ return RetVal = CurrentIterVals[PN];
+
CurrentIterVals.swap(NextIterVals);
}
}
@@ -4844,9 +4914,9 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
/// try to evaluate a few iterations of the loop until we get the exit
/// condition gets a value of ExitWhen (true or false). If we cannot
/// evaluate the trip count of the loop, return getCouldNotCompute().
-const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
- Value *Cond,
- bool ExitWhen) {
+const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
+ Value *Cond,
+ bool ExitWhen) {
PHINode *PN = getConstantEvolvingPHI(Cond, L);
if (PN == 0) return getCouldNotCompute();
@@ -4854,29 +4924,33 @@ const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// That's the only form we support here.
if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
+ DenseMap<Instruction *, Constant *> CurrentIterVals;
+ BasicBlock *Header = L->getHeader();
+ assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
+
// One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- Constant *StartCST =
- dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
-
- Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- if (getConstantEvolvingPHI(BEValue, L) != PN &&
- !isa<Constant>(BEValue))
- return getCouldNotCompute(); // Not derived from same PHI.
+ PHINode *PHI = 0;
+ for (BasicBlock::iterator I = Header->begin();
+ (PHI = dyn_cast<PHINode>(I)); ++I) {
+ Constant *StartCST =
+ dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
+ if (StartCST == 0) continue;
+ CurrentIterVals[PHI] = StartCST;
+ }
+ if (!CurrentIterVals.count(PN))
+ return getCouldNotCompute();
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
// "ExitWhen".
- unsigned IterationNum = 0;
+
unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
- for (Constant *PHIVal = StartCST;
- IterationNum != MaxIterations; ++IterationNum) {
- DenseMap<Instruction *, Constant *> PHIValMap;
- PHIValMap[PN] = PHIVal;
+ for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal =
- dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, PHIValMap, TD));
+ dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
+ TD, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@@ -4886,11 +4960,29 @@ const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
return getConstant(Type::getInt32Ty(getContext()), IterationNum);
}
- // Compute the value of the PHI node for the next iteration.
- Constant *NextPHI = EvaluateExpression(BEValue, L, PHIValMap, TD);
- if (NextPHI == 0 || NextPHI == PHIVal)
- return getCouldNotCompute();// Couldn't evaluate or not making progress...
- PHIVal = NextPHI;
+ // Update all the PHI nodes for the next iteration.
+ DenseMap<Instruction *, Constant *> NextIterVals;
+
+ // Create a list of which PHIs we need to compute. We want to do this before
+ // calling EvaluateExpression on them because that may invalidate iterators
+ // into CurrentIterVals.
+ SmallVector<PHINode *, 8> PHIsToCompute;
+ for (DenseMap<Instruction *, Constant *>::const_iterator
+ I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
+ PHINode *PHI = dyn_cast<PHINode>(I->first);
+ if (!PHI || PHI->getParent() != Header) continue;
+ PHIsToCompute.push_back(PHI);
+ }
+ for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
+ E = PHIsToCompute.end(); I != E; ++I) {
+ PHINode *PHI = *I;
+ Constant *&NextPHI = NextIterVals[PHI];
+ if (NextPHI) continue; // Already computed!
+
+ Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ }
+ CurrentIterVals.swap(NextIterVals);
}
// Too many iterations were needed to evaluate.
@@ -4921,6 +5013,98 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
return C;
}
+/// This builds up a Constant using the ConstantExpr interface. That way, we
+/// will return Constants for objects which aren't represented by a
+/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
+/// Returns NULL if the SCEV isn't representable as a Constant.
+static Constant *BuildConstantFromSCEV(const SCEV *V) {
+ switch (V->getSCEVType()) {
+ default: // TODO: smax, umax.
+ case scCouldNotCompute:
+ case scAddRecExpr:
+ break;
+ case scConstant:
+ return cast<SCEVConstant>(V)->getValue();
+ case scUnknown:
+ return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
+ case scSignExtend: {
+ const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
+ return ConstantExpr::getSExt(CastOp, SS->getType());
+ break;
+ }
+ case scZeroExtend: {
+ const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
+ return ConstantExpr::getZExt(CastOp, SZ->getType());
+ break;
+ }
+ case scTruncate: {
+ const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
+ return ConstantExpr::getTrunc(CastOp, ST->getType());
+ break;
+ }
+ case scAddExpr: {
+ const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
+ if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
+ if (C->getType()->isPointerTy())
+ C = ConstantExpr::getBitCast(C, Type::getInt8PtrTy(C->getContext()));
+ for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
+ Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
+ if (!C2) return 0;
+
+ // First pointer!
+ if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
+ std::swap(C, C2);
+ // The offsets have been converted to bytes. We can add bytes to an
+ // i8* by GEP with the byte count in the first index.
+ C = ConstantExpr::getBitCast(C,Type::getInt8PtrTy(C->getContext()));
+ }
+
+ // Don't bother trying to sum two pointers. We probably can't
+ // statically compute a load that results from it anyway.
+ if (C2->getType()->isPointerTy())
+ return 0;
+
+ if (C->getType()->isPointerTy()) {
+ if (cast<PointerType>(C->getType())->getElementType()->isStructTy())
+ C2 = ConstantExpr::getIntegerCast(
+ C2, Type::getInt32Ty(C->getContext()), true);
+ C = ConstantExpr::getGetElementPtr(C, C2);
+ } else
+ C = ConstantExpr::getAdd(C, C2);
+ }
+ return C;
+ }
+ break;
+ }
+ case scMulExpr: {
+ const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
+ if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
+ // Don't bother with pointers at all.
+ if (C->getType()->isPointerTy()) return 0;
+ for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
+ Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
+ if (!C2 || C2->getType()->isPointerTy()) return 0;
+ C = ConstantExpr::getMul(C, C2);
+ }
+ return C;
+ }
+ break;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
+ if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
+ if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
+ if (LHS->getType() == RHS->getType())
+ return ConstantExpr::getUDiv(LHS, RHS);
+ break;
+ }
+ }
+ return 0;
+}
+
const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
if (isa<SCEVConstant>(V)) return V;
@@ -4973,11 +5157,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
const SCEV *OpV = getSCEVAtScope(OrigV, L);
MadeImprovement |= OrigV != OpV;
- Constant *C = 0;
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
- C = SC->getValue();
- if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
- C = dyn_cast<Constant>(SU->getValue());
+ Constant *C = BuildConstantFromSCEV(OpV);
if (!C) return V;
if (C->getType() != Op->getType())
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
@@ -4992,10 +5172,14 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
Constant *C = 0;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD);
- else
+ Operands[0], Operands[1], TD,
+ TLI);
+ else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isVolatile())
+ C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ } else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- Operands, TD);
+ Operands, TD, TLI);
if (!C) return V;
return getSCEV(C);
}
@@ -5350,10 +5534,10 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
// behavior. Loops must exhibit defined behavior until a wrapped value is
// actually used. So the trip count computed by udiv could be smaller than the
// number of well-defined iterations.
- if (AddRec->getNoWrapFlags(SCEV::FlagNW))
+ if (AddRec->getNoWrapFlags(SCEV::FlagNW)) {
// FIXME: We really want an "isexact" bit for udiv.
return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
-
+ }
// Then, try to solve the above equation provided that Start is constant.
if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
@@ -6089,8 +6273,9 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
return getCouldNotCompute();
// Check to see if we have a flag which makes analysis easy.
- bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
- AddRec->getNoWrapFlags(SCEV::FlagNUW);
+ bool NoWrap = isSigned ?
+ AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNW)) :
+ AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNW));
if (AddRec->isAffine()) {
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
@@ -6381,6 +6566,7 @@ bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
return false;
}
@@ -6417,6 +6603,7 @@ void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<LoopInfo>();
AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequired<TargetLibraryInfo>();
}
bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 47f0f32..f3cf549 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -73,9 +73,14 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
"InsertNoopCastOfTo cannot change sizes!");
// Short-circuit unnecessary bitcasts.
- if (Op == Instruction::BitCast && V->getType() == Ty)
- return V;
-
+ if (Op == Instruction::BitCast) {
+ if (V->getType() == Ty)
+ return V;
+ if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ if (CI->getOperand(0)->getType() == Ty)
+ return CI->getOperand(0);
+ }
+ }
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
@@ -929,6 +934,36 @@ bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
}
}
+/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
+/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
+/// need to materialize IV increments elsewhere to handle difficult situations.
+Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
+ Type *ExpandTy, Type *IntTy,
+ bool useSubtract) {
+ Value *IncV;
+ // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
+ if (ExpandTy->isPointerTy()) {
+ PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
+ // If the step isn't constant, don't use an implicitly scaled GEP, because
+ // that would require a multiply inside the loop.
+ if (!isa<ConstantInt>(StepV))
+ GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
+ GEPPtrTy->getAddressSpace());
+ const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
+ IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
+ if (IncV->getType() != PN->getType()) {
+ IncV = Builder.CreateBitCast(IncV, PN->getType());
+ rememberInstruction(IncV);
+ }
+ } else {
+ IncV = useSubtract ?
+ Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
+ Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
+ rememberInstruction(IncV);
+ }
+ return IncV;
+}
+
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
/// the base addrec, which is the addrec without any non-loop-dominating
/// values, and return the PHI.
@@ -993,16 +1028,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
L->getHeader()));
- // Expand code for the step value. Insert instructions right before the
- // terminator corresponding to the back-edge. Do this before creating the PHI
- // so that PHI reuse code doesn't see an incomplete PHI. If the stride is
- // negative, insert a sub instead of an add for the increment (unless it's a
- // constant, because subtracts of constants are canonicalized to adds).
+ // Expand code for the step value. Do this before creating the PHI so that PHI
+ // reuse code doesn't see an incomplete PHI.
const SCEV *Step = Normalized->getStepRecurrence(SE);
- bool isPointer = ExpandTy->isPointerTy();
- bool isNegative = !isPointer && isNonConstantNegative(Step);
- if (isNegative)
+ // If the stride is negative, insert a sub instead of an add for the increment
+ // (unless it's a constant, because subtracts of constants are canonicalized
+ // to adds).
+ bool useSubtract = !ExpandTy->isPointerTy() && isNonConstantNegative(Step);
+ if (useSubtract)
Step = SE.getNegativeSCEV(Step);
+ // Expand the step somewhere that dominates the loop header.
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
// Create the PHI.
@@ -1023,33 +1058,14 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
continue;
}
- // Create a step value and add it to the PHI. If IVIncInsertLoop is
- // non-null and equal to the addrec's loop, insert the instructions
- // at IVIncInsertPos.
+ // Create a step value and add it to the PHI.
+ // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
+ // instructions at IVIncInsertPos.
Instruction *InsertPos = L == IVIncInsertLoop ?
IVIncInsertPos : Pred->getTerminator();
Builder.SetInsertPoint(InsertPos);
- Value *IncV;
- // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
- if (isPointer) {
- PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
- // If the step isn't constant, don't use an implicitly scaled GEP, because
- // that would require a multiply inside the loop.
- if (!isa<ConstantInt>(StepV))
- GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
- GEPPtrTy->getAddressSpace());
- const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
- IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
- if (IncV->getType() != PN->getType()) {
- IncV = Builder.CreateBitCast(IncV, PN->getType());
- rememberInstruction(IncV);
- }
- } else {
- IncV = isNegative ?
- Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
- Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
- rememberInstruction(IncV);
- }
+ Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
+
PN->addIncoming(IncV, Pred);
}
@@ -1124,10 +1140,31 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// For an expansion to use the postinc form, the client must call
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
- assert((!isa<Instruction>(Result) ||
- SE.DT->dominates(cast<Instruction>(Result),
- Builder.GetInsertPoint())) &&
- "postinc expansion does not dominate use");
+ if (isa<Instruction>(Result)
+ && !SE.DT->dominates(cast<Instruction>(Result),
+ Builder.GetInsertPoint())) {
+ // The induction variable's postinc expansion does not dominate this use.
+ // IVUsers tries to prevent this case, so it is rare. However, it can
+ // happen when an IVUser outside the loop is not dominated by the latch
+ // block. Adjusting IVIncInsertPos before expansion begins cannot handle
+ // all cases. Consider a phi outide whose operand is replaced during
+ // expansion with the value of the postinc user. Without fundamentally
+ // changing the way postinc users are tracked, the only remedy is
+ // inserting an extra IV increment. StepV might fold into PostLoopOffset,
+ // but hopefully expandCodeFor handles that.
+ bool useSubtract =
+ !ExpandTy->isPointerTy() && isNonConstantNegative(Step);
+ if (useSubtract)
+ Step = SE.getNegativeSCEV(Step);
+ // Expand the step somewhere that dominates the loop header.
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+ Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
+ // Restore the insertion point to the place where the caller has
+ // determined dominates all uses.
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+ Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
+ }
}
// Re-apply any non-loop-dominating scale.
diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp
index d8c207b..035bcea 100644
--- a/lib/Analysis/SparsePropagation.cpp
+++ b/lib/Analysis/SparsePropagation.cpp
@@ -327,13 +327,13 @@ void SparseSolver::Solve(Function &F) {
}
void SparseSolver::Print(Function &F, raw_ostream &OS) const {
- OS << "\nFUNCTION: " << F.getNameStr() << "\n";
+ OS << "\nFUNCTION: " << F.getName() << "\n";
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!BBExecutable.count(BB))
OS << "INFEASIBLE: ";
OS << "\t";
if (BB->hasName())
- OS << BB->getNameStr() << ":\n";
+ OS << BB->getName() << ":\n";
else
OS << "; anon bb\n";
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
diff --git a/lib/Analysis/Trace.cpp b/lib/Analysis/Trace.cpp
index 68a39cd..ff5010b 100644
--- a/lib/Analysis/Trace.cpp
+++ b/lib/Analysis/Trace.cpp
@@ -34,7 +34,7 @@ Module *Trace::getModule() const {
///
void Trace::print(raw_ostream &O) const {
Function *F = getFunction();
- O << "; Trace from function " << F->getNameStr() << ", blocks:\n";
+ O << "; Trace from function " << F->getName() << ", blocks:\n";
for (const_iterator i = begin(), e = end(); i != e; ++i) {
O << "; ";
WriteAsOperand(O, *i, true, getModule());
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 4d94f61..ef19e06 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -63,13 +63,14 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Mask.getBitWidth();
- assert((V->getType()->isIntOrIntVectorTy() || V->getType()->isPointerTy())
- && "Not integer or pointer type!");
+ assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarType()->isPointerTy()) &&
+ "Not integer or pointer type!");
assert((!TD ||
TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
(!V->getType()->isIntOrIntVectorTy() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
+ KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
"V, Mask, KnownOne and KnownZero should have same BitWidth");
@@ -103,14 +104,16 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
- Type *ObjectType = GV->getType()->getElementType();
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (!GV->isDeclaration() && !GV->mayBeOverridden())
- Align = TD->getPrefTypeAlignment(ObjectType);
- else
- Align = TD->getABITypeAlignment(ObjectType);
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
+ Align = TD->getPreferredAlignment(GVar);
+ else
+ Align = TD->getABITypeAlignment(ObjectType);
+ }
}
if (Align > 0)
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
@@ -201,9 +204,36 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1);
ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
+ bool isKnownNegative = false;
+ bool isKnownNonNegative = false;
+ // If the multiplication is known not to overflow, compute the sign bit.
+ if (Mask.isNegative() &&
+ cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap()) {
+ Value *Op1 = I->getOperand(1), *Op2 = I->getOperand(0);
+ if (Op1 == Op2) {
+ // The product of a number with itself is non-negative.
+ isKnownNonNegative = true;
+ } else {
+ bool isKnownNonNegative1 = KnownZero.isNegative();
+ bool isKnownNonNegative2 = KnownZero2.isNegative();
+ bool isKnownNegative1 = KnownOne.isNegative();
+ bool isKnownNegative2 = KnownOne2.isNegative();
+ // The product of two numbers with the same sign is non-negative.
+ isKnownNonNegative = (isKnownNegative1 && isKnownNegative2) ||
+ (isKnownNonNegative1 && isKnownNonNegative2);
+ // The product of a negative number and a non-negative number is either
+ // negative or zero.
+ if (!isKnownNonNegative)
+ isKnownNegative = (isKnownNegative1 && isKnownNonNegative2 &&
+ isKnownNonZero(Op2, TD, Depth)) ||
+ (isKnownNegative2 && isKnownNonNegative1 &&
+ isKnownNonZero(Op1, TD, Depth));
+ }
+ }
+
// If low bits are zero in either operand, output low known-0 bits.
// Also compute a conserative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
@@ -220,6 +250,17 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
APInt::getHighBitsSet(BitWidth, LeadZ);
KnownZero &= Mask;
+
+ // Only make use of no-wrap flags if we failed to compute the sign bit
+ // directly. This matters if the multiplication always overflows, in
+ // which case we prefer to follow the result of the direct computation,
+ // though as the program is invoking undefined behaviour we can choose
+ // whatever we like here.
+ if (isKnownNonNegative && !KnownOne.isNegative())
+ KnownZero.setBit(BitWidth - 1);
+ else if (isKnownNegative && !KnownZero.isNegative())
+ KnownOne.setBit(BitWidth - 1);
+
return;
}
case Instruction::UDiv: {
@@ -712,10 +753,15 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
- return CI->getValue().isPowerOf2();
- // TODO: Handle vector constants.
+bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+ unsigned Depth) {
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ if (C->isNullValue())
+ return OrZero;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ return CI->getValue().isPowerOf2();
+ // TODO: Handle vector constants.
+ }
// 1 << X is clearly a power of two if the one is not shifted off the end. If
// it is shifted off the end then the result is undefined.
@@ -731,12 +777,29 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
if (Depth++ == MaxDepth)
return false;
+ Value *X = 0, *Y = 0;
+ // A shift of a power of two is a power of two or zero.
+ if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
+ match(V, m_Shr(m_Value(X), m_Value()))))
+ return isPowerOfTwo(X, TD, /*OrZero*/true, Depth);
+
if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
- return isPowerOfTwo(ZI->getOperand(0), TD, Depth);
+ return isPowerOfTwo(ZI->getOperand(0), TD, OrZero, Depth);
if (SelectInst *SI = dyn_cast<SelectInst>(V))
- return isPowerOfTwo(SI->getTrueValue(), TD, Depth) &&
- isPowerOfTwo(SI->getFalseValue(), TD, Depth);
+ return isPowerOfTwo(SI->getTrueValue(), TD, OrZero, Depth) &&
+ isPowerOfTwo(SI->getFalseValue(), TD, OrZero, Depth);
+
+ if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
+ // A power of two and'd with anything is a power of two or zero.
+ if (isPowerOfTwo(X, TD, /*OrZero*/true, Depth) ||
+ isPowerOfTwo(Y, TD, /*OrZero*/true, Depth))
+ return true;
+ // X & (-X) is always a power of two or zero.
+ if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
+ return true;
+ return false;
+ }
// An exact divide or right shift can only shift off zero bits, so the result
// is a power of two only if the first operand is a power of two and not
@@ -745,7 +808,7 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
match(V, m_UDiv(m_Value(), m_Value()))) {
PossiblyExactOperator *PEO = cast<PossiblyExactOperator>(V);
if (PEO->isExact())
- return isPowerOfTwo(PEO->getOperand(0), TD, Depth);
+ return isPowerOfTwo(PEO->getOperand(0), TD, OrZero, Depth);
}
return false;
@@ -767,7 +830,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
}
// The remaining tests are all recursive, so bail out if we hit the limit.
- if (Depth++ == MaxDepth)
+ if (Depth++ >= MaxDepth)
return false;
unsigned BitWidth = getBitWidth(V->getType(), TD);
@@ -785,7 +848,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
// if the lowest bit is shifted off the end.
if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
- BinaryOperator *BO = cast<BinaryOperator>(V);
+ OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
return isKnownNonZero(X, TD, Depth);
@@ -799,7 +862,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
// defined if the sign bit is shifted off the end.
else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
// shr exact can only shift out zero bits.
- BinaryOperator *BO = cast<BinaryOperator>(V);
+ PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
if (BO->isExact())
return isKnownNonZero(X, TD, Depth);
@@ -810,7 +873,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
}
// div exact can only produce a zero if the dividend is zero.
else if (match(V, m_IDiv(m_Value(X), m_Value()))) {
- BinaryOperator *BO = cast<BinaryOperator>(V);
+ PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
if (BO->isExact())
return isKnownNonZero(X, TD, Depth);
}
@@ -846,9 +909,18 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
}
// The sum of a non-negative number and a power of two is not zero.
- if (XKnownNonNegative && isPowerOfTwo(Y, TD, Depth))
+ if (XKnownNonNegative && isPowerOfTwo(Y, TD, /*OrZero*/false, Depth))
return true;
- if (YKnownNonNegative && isPowerOfTwo(X, TD, Depth))
+ if (YKnownNonNegative && isPowerOfTwo(X, TD, /*OrZero*/false, Depth))
+ return true;
+ }
+ // X * Y.
+ else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
+ OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
+ // If X and Y are non-zero then so is X * Y as long as the multiplication
+ // does not overflow.
+ if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
+ isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth))
return true;
}
// (C ? X : Y) != 0 if X != 0 and Y != 0.
@@ -1298,6 +1370,8 @@ Value *llvm::isBytewiseValue(Value *V) {
return Val;
}
+
+ // FIXME: Vector types (e.g., <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>).
// Conceptually, we could handle things like:
// %a = zext i8 %X to i16
@@ -1486,7 +1560,8 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const TargetData &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr);
- if (PtrOp == 0) return Ptr;
+ if (PtrOp == 0 || Ptr->getType()->isVectorTy())
+ return Ptr;
// Just look through bitcasts.
if (PtrOp->getOpcode() == Instruction::BitCast)
@@ -1525,8 +1600,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false.
bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
- uint64_t Offset,
- bool StopAtNul) {
+ uint64_t Offset, bool StopAtNul) {
// If V is NULL then return false;
if (V == NULL) return false;
@@ -1536,7 +1610,7 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
// If the value is not a GEP instruction nor a constant expression with a
// GEP instruction, then return false because ConstantArray can't occur
- // any other way
+ // any other way.
const User *GEP = 0;
if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
GEP = GEPI;
@@ -1576,7 +1650,7 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset,
StopAtNul);
}
-
+
// The GEP instruction, constant or instruction, must reference a global
// variable that is a constant and is initialized. The referenced constant
// initializer is the array that we'll use for optimization.
@@ -1585,8 +1659,8 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
return false;
const Constant *GlobalInit = GV->getInitializer();
- // Handle the ConstantAggregateZero case
- if (isa<ConstantAggregateZero>(GlobalInit)) {
+ // Handle the all-zeros case
+ if (GlobalInit->isNullValue()) {
// This is a degenerate case. The initializer is constant zero so the
// length of the string must be zero.
Str.clear();
@@ -1667,6 +1741,14 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
return Len1;
}
+ // As a special-case, "@string = constant i8 0" is also a string with zero
+ // length, not wrapped in a bitcast or GEP.
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ if (GV->isConstant() && GV->hasDefinitiveInitializer())
+ if (GV->getInitializer()->isNullValue()) return 1;
+ return 0;
+ }
+
// If the value is not a GEP instruction nor a constant expression with a
// GEP instruction, then return unknown.
User *GEP = 0;
@@ -1793,3 +1875,64 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
}
return true;
}
+
+bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
+ const TargetData *TD) {
+ for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
+ if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
+ if (C->canTrap())
+ return false;
+
+ switch (Inst->getOpcode()) {
+ default:
+ return true;
+ case Instruction::UDiv:
+ case Instruction::URem:
+ // x / y is undefined if y == 0, but calcuations like x / 3 are safe.
+ return isKnownNonZero(Inst->getOperand(1), TD);
+ case Instruction::SDiv:
+ case Instruction::SRem: {
+ Value *Op = Inst->getOperand(1);
+ // x / y is undefined if y == 0
+ if (!isKnownNonZero(Op, TD))
+ return false;
+ // x / y might be undefined if y == -1
+ unsigned BitWidth = getBitWidth(Op->getType(), TD);
+ if (BitWidth == 0)
+ return false;
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ ComputeMaskedBits(Op, APInt::getAllOnesValue(BitWidth),
+ KnownZero, KnownOne, TD);
+ return !!KnownZero;
+ }
+ case Instruction::Load: {
+ const LoadInst *LI = cast<LoadInst>(Inst);
+ if (!LI->isUnordered())
+ return false;
+ return LI->getPointerOperand()->isDereferenceablePointer();
+ }
+ case Instruction::Call:
+ return false; // The called function could have undefined behavior or
+ // side-effects.
+ // FIXME: We should special-case some intrinsics (bswap,
+ // overflow-checking arithmetic, etc.)
+ case Instruction::VAArg:
+ case Instruction::Alloca:
+ case Instruction::Invoke:
+ case Instruction::PHI:
+ case Instruction::Store:
+ case Instruction::Ret:
+ case Instruction::Br:
+ case Instruction::IndirectBr:
+ case Instruction::Switch:
+ case Instruction::Unwind:
+ case Instruction::Unreachable:
+ case Instruction::Fence:
+ case Instruction::LandingPad:
+ case Instruction::AtomicRMW:
+ case Instruction::AtomicCmpXchg:
+ case Instruction::Resume:
+ return false; // Misc instructions which have effects
+ }
+}