aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/IPO
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-04-23 16:57:46 -0700
committerStephen Hines <srhines@google.com>2014-04-24 15:53:16 -0700
commit36b56886974eae4f9c5ebc96befd3e7bfe5de338 (patch)
treee6cfb69fbbd937f450eeb83bfb83b9da3b01275a /lib/Transforms/IPO
parent69a8640022b04415ae9fac62f8ab090601d8f889 (diff)
downloadexternal_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.zip
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.gz
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.bz2
Update to LLVM 3.5a.
Change-Id: Ifadecab779f128e62e430c2b4f6ddd84953ed617
Diffstat (limited to 'lib/Transforms/IPO')
-rw-r--r--lib/Transforms/IPO/Android.mk2
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp87
-rw-r--r--lib/Transforms/IPO/BarrierNoopPass.cpp2
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp19
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp69
-rw-r--r--lib/Transforms/IPO/ExtractGV.cpp2
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp39
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp2
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp313
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp31
-rw-r--r--lib/Transforms/IPO/IPO.cpp1
-rw-r--r--lib/Transforms/IPO/InlineAlways.cpp12
-rw-r--r--lib/Transforms/IPO/InlineSimple.cpp27
-rw-r--r--lib/Transforms/IPO/Inliner.cpp40
-rw-r--r--lib/Transforms/IPO/Internalize.cpp24
-rw-r--r--lib/Transforms/IPO/LLVMBuild.txt2
-rw-r--r--lib/Transforms/IPO/LoopExtractor.cpp17
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp230
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp25
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp44
-rw-r--r--lib/Transforms/IPO/PruneEH.cpp10
-rw-r--r--lib/Transforms/IPO/StripDeadPrototypes.cpp2
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp32
23 files changed, 563 insertions, 469 deletions
diff --git a/lib/Transforms/IPO/Android.mk b/lib/Transforms/IPO/Android.mk
index dcf48df..1fe7d63 100644
--- a/lib/Transforms/IPO/Android.mk
+++ b/lib/Transforms/IPO/Android.mk
@@ -38,6 +38,7 @@ include $(BUILD_HOST_STATIC_LIBRARY)
# For the device
# =====================================================
+ifneq (true,$(DISABLE_LLVM_DEVICE_BUILDS))
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(transforms_ipo_SRC_FILES)
@@ -48,3 +49,4 @@ LOCAL_MODULE_TAGS := optional
include $(LLVM_DEVICE_BUILD_MK)
include $(LLVM_GEN_INTRINSICS_MK)
include $(BUILD_STATIC_LIBRARY)
+endif
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index df08091..48d3fba 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -37,13 +37,13 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <set>
@@ -58,12 +58,12 @@ namespace {
/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
///
struct ArgPromotion : public CallGraphSCCPass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
CallGraphSCCPass::getAnalysisUsage(AU);
}
- virtual bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
: CallGraphSCCPass(ID), maxElements(maxElements) {
@@ -88,7 +88,7 @@ char ArgPromotion::ID = 0;
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
"Promote 'by reference' arguments to scalars", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
"Promote 'by reference' arguments to scalars", false, false)
@@ -136,11 +136,10 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// transform functions that have indirect callers. Also see if the function
// is self-recursive.
bool isSelfRecursive = false;
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end();
- UI != E; ++UI) {
- CallSite CS(*UI);
+ for (Use &U : F->uses()) {
+ CallSite CS(U.getUser());
// Must be a direct call.
- if (CS.getInstruction() == 0 || !CS.isCallee(UI)) return 0;
+ if (CS.getInstruction() == 0 || !CS.isCallee(&U)) return 0;
if (CS.getInstruction()->getParent()->getParent() == F)
isSelfRecursive = true;
@@ -155,7 +154,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
// If this is a byval argument, and if the aggregate type is small, just
- // pass the elements, which is always safe.
+ // pass the elements, which is always safe. This does not apply to
+ // inalloca.
if (PtrArg->hasByValAttr()) {
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
if (maxElements > 0 && STy->getNumElements() > maxElements) {
@@ -201,7 +201,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
}
// Otherwise, see if we can promote the pointer to its value.
- if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValAttr()))
+ if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr()))
ArgsToPromote.insert(PtrArg);
}
@@ -221,9 +221,8 @@ static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
// Look at all call sites of the function. At this pointer we know we only
// have direct callees.
- for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
- UI != E; ++UI) {
- CallSite CS(*UI);
+ for (User *U : Callee->users()) {
+ CallSite CS(U);
assert(CS && "Should only have direct calls!");
if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
@@ -301,7 +300,8 @@ static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark,
/// This method limits promotion of aggregates to only promote up to three
/// elements of the aggregate in order to avoid exploding the number of
/// arguments passed in.
-bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
+bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
+ bool isByValOrInAlloca) const {
typedef std::set<IndicesVector> GEPIndicesSet;
// Quick exit for unused arguments
@@ -323,6 +323,9 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
//
// This set will contain all sets of indices that are loaded in the entry
// block, and thus are safe to unconditionally load in the caller.
+ //
+ // This optimization is also safe for InAlloca parameters, because it verifies
+ // that the address isn't captured.
GEPIndicesSet SafeToUnconditionallyLoad;
// This set contains all the sets of indices that we are planning to promote.
@@ -330,7 +333,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
GEPIndicesSet ToPromote;
// If the pointer is always valid, any load with first index 0 is valid.
- if (isByVal || AllCallersPassInValidPointerForArgument(Arg))
+ if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg))
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
// First, iterate the entry block and mark loads of (geps of) arguments as
@@ -370,17 +373,16 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
SmallVector<LoadInst*, 16> Loads;
IndicesVector Operands;
- for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
- UI != E; ++UI) {
- User *U = *UI;
+ for (Use &U : Arg->uses()) {
+ User *UR = U.getUser();
Operands.clear();
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
// Don't hack volatile/atomic loads
if (!LI->isSimple()) return false;
Loads.push_back(LI);
// Direct loads are equivalent to a GEP with a zero index and then a load.
Operands.push_back(0);
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
if (GEP->use_empty()) {
// Dead GEP's cause trouble later. Just remove them if we run into
// them.
@@ -389,7 +391,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// TODO: This runs the above loop over and over again for dead GEPs
// Couldn't we just do increment the UI iterator earlier and erase the
// use?
- return isSafeToPromoteArgument(Arg, isByVal);
+ return isSafeToPromoteArgument(Arg, isByValOrInAlloca);
}
// Ensure that all of the indices are constants.
@@ -401,9 +403,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
return false; // Not a constant operand GEP!
// Ensure that the only users of the GEP are load instructions.
- for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
- UI != E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (User *GEPU : GEP->users())
+ if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
// Don't hack volatile/atomic loads
if (!LI->isSimple()) return false;
Loads.push_back(LI);
@@ -549,16 +550,15 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// In this table, we will track which indices are loaded from the argument
// (where direct loads are tracked as no indices).
ScalarizeTable &ArgIndices = ScalarizedElements[I];
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
- ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User));
+ for (User *U : I->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ assert(isa<LoadInst>(UI) || isa<GetElementPtrInst>(UI));
IndicesVector Indices;
- Indices.reserve(User->getNumOperands() - 1);
+ Indices.reserve(UI->getNumOperands() - 1);
// Since loads will only have a single operand, and GEPs only a single
// non-index operand, this will record direct loads without any indices,
// and gep+loads with the GEP indices.
- for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end();
+ for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
II != IE; ++II)
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
// GEPs with a single 0 index can be merged with direct loads
@@ -566,11 +566,11 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Indices.clear();
ArgIndices.insert(Indices);
LoadInst *OrigLoad;
- if (LoadInst *L = dyn_cast<LoadInst>(User))
+ if (LoadInst *L = dyn_cast<LoadInst>(UI))
OrigLoad = L;
else
// Take any load, we will use it only to update Alias Analysis
- OrigLoad = cast<LoadInst>(User->use_back());
+ OrigLoad = cast<LoadInst>(UI->user_back());
OriginalLoads[std::make_pair(I, Indices)] = OrigLoad;
}
@@ -621,8 +621,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Get the callgraph information that we need to update to reflect our
// changes.
- CallGraph &CG = getAnalysis<CallGraph>();
-
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+
// Get a new callgraph node for NF.
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
@@ -631,7 +631,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
//
SmallVector<Value*, 16> Args;
while (!F->use_empty()) {
- CallSite CS(F->use_back());
+ CallSite CS(F->user_back());
assert(CS.getCalledFunction() == F);
Instruction *Call = CS.getInstruction();
const AttributeSet &CallPAL = CS.getAttributes();
@@ -807,6 +807,15 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
I->replaceAllUsesWith(TheAlloca);
TheAlloca->takeName(I);
AA.replaceWithNewValue(I, TheAlloca);
+
+ // If the alloca is used in a call, we must clear the tail flag since
+ // the callee now uses an alloca from the caller.
+ for (User *U : TheAlloca->users()) {
+ CallInst *Call = dyn_cast<CallInst>(U);
+ if (!Call)
+ continue;
+ Call->setTailCall(false);
+ }
continue;
}
@@ -821,7 +830,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
ScalarizeTable &ArgIndices = ScalarizedElements[I];
while (!I->use_empty()) {
- if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
assert(ArgIndices.begin()->empty() &&
"Load element should sort to front!");
I2->setName(I->getName()+".val");
@@ -831,7 +840,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
<< "' in function '" << F->getName() << "'\n");
} else {
- GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
+ GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
IndicesVector Operands;
Operands.reserve(GEP->getNumIndices());
for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
@@ -861,7 +870,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// All of the uses must be load instructions. Replace them all with
// the argument specified by ArgNo.
while (!GEP->use_empty()) {
- LoadInst *L = cast<LoadInst>(GEP->use_back());
+ LoadInst *L = cast<LoadInst>(GEP->user_back());
L->replaceAllUsesWith(TheArg);
AA.replaceWithNewValue(L, TheArg);
L->eraseFromParent();
diff --git a/lib/Transforms/IPO/BarrierNoopPass.cpp b/lib/Transforms/IPO/BarrierNoopPass.cpp
index 2e32240..6af1043 100644
--- a/lib/Transforms/IPO/BarrierNoopPass.cpp
+++ b/lib/Transforms/IPO/BarrierNoopPass.cpp
@@ -36,7 +36,7 @@ public:
initializeBarrierNoopPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M) { return false; }
+ bool runOnModule(Module &M) override { return false; }
};
}
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index d94c0f4..5c3acea 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -42,7 +42,7 @@ namespace {
// For this pass, process all of the globals in the module, eliminating
// duplicate constants.
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
// Return true iff we can determine the alignment of this global variable.
bool hasKnownAlignment(GlobalVariable *GV) const;
@@ -51,7 +51,7 @@ namespace {
// alignment to a concrete value.
unsigned getAlignment(GlobalVariable *GV) const;
- const DataLayout *TD;
+ const DataLayout *DL;
};
}
@@ -77,8 +77,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed,
}
// True if A is better than B.
-static bool IsBetterCannonical(const GlobalVariable &A,
- const GlobalVariable &B) {
+static bool IsBetterCanonical(const GlobalVariable &A,
+ const GlobalVariable &B) {
if (!A.hasLocalLinkage() && B.hasLocalLinkage())
return true;
@@ -89,20 +89,21 @@ static bool IsBetterCannonical(const GlobalVariable &A,
}
bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {
- return TD || GV->getAlignment() != 0;
+ return DL || GV->getAlignment() != 0;
}
unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
unsigned Align = GV->getAlignment();
if (Align)
return Align;
- if (TD)
- return TD->getPreferredAlignment(GV);
+ if (DL)
+ return DL->getPreferredAlignment(GV);
return 0;
}
bool ConstantMerge::runOnModule(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : 0;
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
@@ -160,7 +161,7 @@ bool ConstantMerge::runOnModule(Module &M) {
// If this is the first constant we find or if the old one is local,
// replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
+ if (Slot == 0 || IsBetterCanonical(*GV, *Slot))
Slot = GV;
}
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 911c14e..1aba3df 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -23,17 +23,17 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -62,12 +62,7 @@ namespace {
/// Make RetOrArg comparable, so we can put it into a map.
bool operator<(const RetOrArg &O) const {
- if (F != O.F)
- return F < O.F;
- else if (Idx != O.Idx)
- return Idx < O.Idx;
- else
- return IsArg < O.IsArg;
+ return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg);
}
/// Make RetOrArg comparable, so we can easily iterate the multimap.
@@ -143,13 +138,13 @@ namespace {
initializeDAEPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
virtual bool ShouldHackArguments() const { return false; }
private:
Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
- Liveness SurveyUse(Value::const_use_iterator U, UseVector &MaybeLiveUses,
+ Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses,
unsigned RetValNum = 0);
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
@@ -178,7 +173,7 @@ namespace {
static char ID;
DAH() : DAE(ID) {}
- virtual bool ShouldHackArguments() const { return true; }
+ bool ShouldHackArguments() const override { return true; }
};
}
@@ -265,7 +260,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
// to pass in a smaller number of arguments into the new function.
//
std::vector<Value*> Args;
- for (Value::use_iterator I = Fn.use_begin(), E = Fn.use_end(); I != E; ) {
+ for (Value::user_iterator I = Fn.user_begin(), E = Fn.user_end(); I != E; ) {
CallSite CS(*I++);
if (!CS)
continue;
@@ -378,7 +373,7 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
I != E; ++I) {
Argument *Arg = I;
- if (Arg->use_empty() && !Arg->hasByValAttr())
+ if (Arg->use_empty() && !Arg->hasByValOrInAllocaAttr())
UnusedArgs.push_back(Arg->getArgNo());
}
@@ -387,10 +382,9 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
bool Changed = false;
- for (Function::use_iterator I = Fn.use_begin(), E = Fn.use_end();
- I != E; ++I) {
- CallSite CS(*I);
- if (!CS || !CS.isCallee(I))
+ for (Use &U : Fn.uses()) {
+ CallSite CS(U.getUser());
+ if (!CS || !CS.isCallee(&U))
continue;
// Now go through all unused args and replace them with "undef".
@@ -441,9 +435,9 @@ DAE::Liveness DAE::MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses) {
/// RetValNum is the return value number to use when this use is used in a
/// return instruction. This is used in the recursion, you should always leave
/// it at 0.
-DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
+DAE::Liveness DAE::SurveyUse(const Use *U,
UseVector &MaybeLiveUses, unsigned RetValNum) {
- const User *V = *U;
+ const User *V = U->getUser();
if (const ReturnInst *RI = dyn_cast<ReturnInst>(V)) {
// The value is returned from a function. It's only live when the
// function's return value is live. We use RetValNum here, for the case
@@ -454,7 +448,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
return MarkIfNotLive(Use, MaybeLiveUses);
}
if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) {
- if (U.getOperandNo() != InsertValueInst::getAggregateOperandIndex()
+ if (U->getOperandNo() != InsertValueInst::getAggregateOperandIndex()
&& IV->hasIndices())
// The use we are examining is inserted into an aggregate. Our liveness
// depends on all uses of that aggregate, but if it is used as a return
@@ -465,9 +459,8 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
// we don't change RetValNum, but do survey all our uses.
Liveness Result = MaybeLive;
- for (Value::const_use_iterator I = IV->use_begin(),
- E = V->use_end(); I != E; ++I) {
- Result = SurveyUse(I, MaybeLiveUses, RetValNum);
+ for (const Use &UU : IV->uses()) {
+ Result = SurveyUse(&UU, MaybeLiveUses, RetValNum);
if (Result == Live)
break;
}
@@ -490,7 +483,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
return Live;
assert(CS.getArgument(ArgNo)
- == CS->getOperand(U.getOperandNo())
+ == CS->getOperand(U->getOperandNo())
&& "Argument is not where we expected it");
// Value passed to a normal call. It's only live when the corresponding
@@ -513,9 +506,8 @@ DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
// Assume it's dead (which will only hold if there are no uses at all..).
Liveness Result = MaybeLive;
// Check each use.
- for (Value::const_use_iterator I = V->use_begin(),
- E = V->use_end(); I != E; ++I) {
- Result = SurveyUse(I, MaybeLiveUses);
+ for (const Use &U : V->uses()) {
+ Result = SurveyUse(&U, MaybeLiveUses);
if (Result == Live)
break;
}
@@ -531,6 +523,13 @@ DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
// well as arguments to functions which have their "address taken".
//
void DAE::SurveyFunction(const Function &F) {
+ // Functions with inalloca parameters are expecting args in a particular
+ // register and memory layout.
+ if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca)) {
+ MarkLive(F);
+ return;
+ }
+
unsigned RetCount = NumRetVals(&F);
// Assume all return values are dead
typedef SmallVector<Liveness, 5> RetVals;
@@ -562,12 +561,11 @@ void DAE::SurveyFunction(const Function &F) {
unsigned NumLiveRetVals = 0;
Type *STy = dyn_cast<StructType>(F.getReturnType());
// Loop all uses of the function.
- for (Value::const_use_iterator I = F.use_begin(), E = F.use_end();
- I != E; ++I) {
+ for (const Use &U : F.uses()) {
// If the function is PASSED IN as an argument, its address has been
// taken.
- ImmutableCallSite CS(*I);
- if (!CS || !CS.isCallee(I)) {
+ ImmutableCallSite CS(U.getUser());
+ if (!CS || !CS.isCallee(&U)) {
MarkLive(F);
return;
}
@@ -586,9 +584,8 @@ void DAE::SurveyFunction(const Function &F) {
if (NumLiveRetVals != RetCount) {
if (STy) {
// Check all uses of the return value.
- for (Value::const_use_iterator I = TheCall->use_begin(),
- E = TheCall->use_end(); I != E; ++I) {
- const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(*I);
+ for (const User *U : TheCall->users()) {
+ const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(U);
if (Ext && Ext->hasIndices()) {
// This use uses a part of our return value, survey the uses of
// that part and store the results for this index only.
@@ -891,7 +888,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
//
std::vector<Value*> Args;
while (!F->use_empty()) {
- CallSite CS(F->use_back());
+ CallSite CS(F->user_back());
Instruction *Call = CS.getInstruction();
AttributesVec.clear();
diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp
index 50fb3e6..4211f12 100644
--- a/lib/Transforms/IPO/ExtractGV.cpp
+++ b/lib/Transforms/IPO/ExtractGV.cpp
@@ -68,7 +68,7 @@ namespace {
explicit GVExtractorPass(std::vector<GlobalValue*>& GVs, bool deleteS = true)
: ModulePass(ID), Named(GVs.begin(), GVs.end()), deleteStuff(deleteS) {}
- bool runOnModule(Module &M) {
+ bool runOnModule(Module &M) override {
// Visit the global inline asm.
if (!deleteStuff)
M.setModuleInlineAsm("");
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index 60e5f06..b716718 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -29,9 +29,9 @@
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/InstIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
@@ -51,7 +51,7 @@ namespace {
}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
// AddReadAttrs - Deduce readonly/readnone attributes for the SCC.
bool AddReadAttrs(const CallGraphSCC &SCC);
@@ -120,7 +120,7 @@ namespace {
// call declarations.
bool annotateLibraryCalls(const CallGraphSCC &SCC);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetLibraryInfo>();
@@ -137,7 +137,7 @@ char FunctionAttrs::ID = 0;
INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs",
"Deduce function attributes", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(FunctionAttrs, "functionattrs",
"Deduce function attributes", false, false)
@@ -342,9 +342,9 @@ namespace {
ArgumentUsesTracker(const SmallPtrSet<Function*, 8> &SCCNodes)
: Captured(false), SCCNodes(SCCNodes) {}
- void tooManyUses() { Captured = true; }
+ void tooManyUses() override { Captured = true; }
- bool captured(Use *U) {
+ bool captured(const Use *U) override {
CallSite CS(U->getUser());
if (!CS.getInstruction()) { Captured = true; return true; }
@@ -414,17 +414,19 @@ determinePointerReadAttrs(Argument *A,
SmallSet<Use*, 32> Visited;
int Count = 0;
+ // inalloca arguments are always clobbered by the call.
+ if (A->hasInAllocaAttr())
+ return Attribute::None;
+
bool IsRead = false;
// We don't need to track IsWritten. If A is written to, return immediately.
- for (Value::use_iterator UI = A->use_begin(), UE = A->use_end();
- UI != UE; ++UI) {
+ for (Use &U : A->uses()) {
if (Count++ >= 20)
return Attribute::None;
- Use *U = &UI.getUse();
- Visited.insert(U);
- Worklist.push_back(U);
+ Visited.insert(&U);
+ Worklist.push_back(&U);
}
while (!Worklist.empty()) {
@@ -437,13 +439,11 @@ determinePointerReadAttrs(Argument *A,
case Instruction::GetElementPtr:
case Instruction::PHI:
case Instruction::Select:
+ case Instruction::AddrSpaceCast:
// The original value is not read/written via this if the new value isn't.
- for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
- UI != UE; ++UI) {
- Use *U = &UI.getUse();
- if (Visited.insert(U))
- Worklist.push_back(U);
- }
+ for (Use &UU : I->uses())
+ if (Visited.insert(&UU))
+ Worklist.push_back(&UU);
break;
case Instruction::Call:
@@ -599,8 +599,7 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
// made. If the definition doesn't have a 'nocapture' attribute by now, it
// captures.
- for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG), E = scc_end(&AG);
- I != E; ++I) {
+ for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
std::vector<ArgumentGraphNode*> &ArgumentSCC = *I;
if (ArgumentSCC.size() == 1) {
if (!ArgumentSCC[0]->Definition) continue; // synthetic root node
@@ -723,6 +722,7 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F,
// Extend the analysis by looking upwards.
case Instruction::BitCast:
case Instruction::GetElementPtr:
+ case Instruction::AddrSpaceCast:
FlowsToReturn.insert(RVI->getOperand(0));
continue;
case Instruction::Select: {
@@ -1649,6 +1649,7 @@ bool FunctionAttrs::inferPrototypeAttributes(Function &F) {
setDoesNotThrow(F);
setDoesNotCapture(F, 1);
setDoesNotCapture(F, 2);
+ break;
default:
// Didn't mark any attributes.
return false;
diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp
index 901295d..0c081f1 100644
--- a/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/lib/Transforms/IPO/GlobalDCE.cpp
@@ -38,7 +38,7 @@ namespace {
// run - Do the GlobalDCE pass on the specified module, optionally updating
// the specified callgraph to reflect the changes.
//
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
SmallPtrSet<GlobalValue*, 32> AliveGlobals;
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 2ea89a1..1a510cf 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -22,22 +22,22 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
@@ -63,7 +63,7 @@ STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
namespace {
struct GlobalOpt : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfo>();
}
static char ID; // Pass identification, replacement for typeid
@@ -71,7 +71,7 @@ namespace {
initializeGlobalOptPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
GlobalVariable *FindGlobalCtors(Module &M);
@@ -84,7 +84,7 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- DataLayout *TD;
+ const DataLayout *DL;
TargetLibraryInfo *TLI;
};
}
@@ -196,7 +196,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
// Constants can't be pointers to dynamically allocated memory.
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
+ for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
UI != E;) {
User *U = *UI++;
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
@@ -266,13 +266,14 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
// we delete a constant array, we may also be holding pointer to one of its
// elements (or an element of one of its elements if we're dealing with an
// array of arrays) in the worklist.
- SmallVector<WeakVH, 8> WorkList(V->use_begin(), V->use_end());
+ SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
while (!WorkList.empty()) {
Value *UV = WorkList.pop_back_val();
if (!UV)
@@ -296,11 +297,12 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Constant *SubInit = 0;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
- Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
- } else if (CE->getOpcode() == Instruction::BitCast &&
- CE->getType()->isPointerTy()) {
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
+ } else if ((CE->getOpcode() == Instruction::BitCast &&
+ CE->getType()->isPointerTy()) ||
+ CE->getOpcode() == Instruction::AddrSpaceCast) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);
}
if (CE->use_empty()) {
@@ -314,7 +316,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Constant *SubInit = 0;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@@ -324,7 +326,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
SubInit = Constant::getNullValue(GEP->getType()->getElementType());
}
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
if (GEP->use_empty()) {
GEP->eraseFromParent();
@@ -341,7 +343,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// us, and if they are all dead, nuke them without remorse.
if (isSafeToDestroyConstant(C)) {
C->destroyConstant();
- CleanupConstantGlobalUsers(V, Init, TD, TLI);
+ CleanupConstantGlobalUsers(V, Init, DL, TLI);
return true;
}
}
@@ -374,9 +376,8 @@ static bool isSafeSROAElementUse(Value *V) {
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
return false;
- for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
- I != E; ++I)
- if (!isSafeSROAElementUse(*I))
+ for (User *U : GEPI->users())
+ if (!isSafeSROAElementUse(U))
return false;
return true;
}
@@ -442,9 +443,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
}
}
- for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
- if (!isSafeSROAElementUse(*I))
+ for (User *UU : U->users())
+ if (!isSafeSROAElementUse(UU))
return false;
+
return true;
}
@@ -452,11 +454,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
/// is safe for us to perform this transformation.
///
static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI) {
- if (!IsUserOfGlobalSafeForSRA(*UI, GV))
+ for (User *U : GV->users())
+ if (!IsUserOfGlobalSafeForSRA(U, GV))
return false;
- }
+
return true;
}
@@ -466,7 +467,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
/// behavior of the program in a more fine-grained way. We have determined that
/// this transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
return 0;
@@ -481,11 +482,11 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Get the alignment of the global, either explicit or target-specific.
unsigned StartAlignment = GV->getAlignment();
if (StartAlignment == 0)
- StartAlignment = TD.getABITypeAlignment(GV->getType());
+ StartAlignment = DL.getABITypeAlignment(GV->getType());
if (StructType *STy = dyn_cast<StructType>(Ty)) {
NewGlobals.reserve(STy->getNumElements());
- const StructLayout &Layout = *TD.getStructLayout(STy);
+ const StructLayout &Layout = *DL.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -502,7 +503,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// propagate info to each field.
uint64_t FieldOffset = Layout.getElementOffset(i);
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
- if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
+ if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
NGV->setAlignment(NewAlign);
}
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
@@ -516,8 +517,8 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
return 0; // It's not worth it.
NewGlobals.reserve(NumElements);
- uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
- unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
+ unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -549,7 +550,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Loop over all of the uses of the global, replacing the constantexpr geps,
// with smaller constantexpr geps or direct references.
while (!GV->use_empty()) {
- User *GEP = GV->use_back();
+ User *GEP = GV->user_back();
assert(((isa<ConstantExpr>(GEP) &&
cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
@@ -610,10 +611,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
/// phi nodes we've seen to avoid reprocessing them.
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
SmallPtrSet<const PHINode*, 8> &PHIs) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI) {
- const User *U = *UI;
-
+ for (const User *U : V->users())
if (isa<LoadInst>(U)) {
// Will trap.
} else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
@@ -641,13 +639,13 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
return false;
} else if (isa<ICmpInst>(U) &&
- isa<ConstantPointerNull>(UI->getOperand(1))) {
+ isa<ConstantPointerNull>(U->getOperand(1))) {
// Ignore icmp X, null
} else {
//cerr << "NONTRAPPING USE: " << *U;
return false;
}
- }
+
return true;
}
@@ -655,10 +653,7 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
/// from GV will trap if the loaded value is null. Note that this also permits
/// comparisons of the loaded value against null, as a special case.
static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
- for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI) {
- const User *U = *UI;
-
+ for (const User *U : GV->users())
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
SmallPtrSet<const PHINode*, 8> PHIs;
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
@@ -670,13 +665,12 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
//cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
return false;
}
- }
return true;
}
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
bool Changed = false;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
+ for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
Instruction *I = cast<Instruction>(*UI++);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
LI->setOperand(0, NewV);
@@ -702,7 +696,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
if (PassedAsArg) {
// Being passed as an argument also. Be careful to not invalidate UI!
- UI = V->use_begin();
+ UI = V->user_begin();
}
}
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
@@ -742,7 +736,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
@@ -751,7 +745,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
bool AllNonStoreUsesGone = true;
// Replace all uses of loads with uses of uses of the stored value.
- for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
+ for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
User *GlobalUser = *GUI++;
if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
@@ -791,7 +785,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
Changed |= CleanupPointerRootUsers(GV, TLI);
} else {
Changed = true;
- CleanupConstantGlobalUsers(GV, 0, TD, TLI);
+ CleanupConstantGlobalUsers(GV, 0, DL, TLI);
}
if (GV->use_empty()) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
@@ -805,11 +799,11 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V,
- DataLayout *TD, TargetLibraryInfo *TLI) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
+static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
+ for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -829,7 +823,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
@@ -855,7 +849,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// other users to use the global as well.
BitCastInst *TheBC = 0;
while (!CI->use_empty()) {
- Instruction *User = cast<Instruction>(CI->use_back());
+ Instruction *User = cast<Instruction>(CI->user_back());
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
if (BCI->getType() == NewGV->getType()) {
BCI->replaceAllUsesWith(NewGV);
@@ -886,7 +880,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// Loop over all uses of GV, processing them in turn.
while (!GV->use_empty()) {
- if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
+ if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
// The global is initialized when the store to it occurs.
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
SI->getOrdering(), SI->getSynchScope(), SI);
@@ -894,15 +888,15 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
continue;
}
- LoadInst *LI = cast<LoadInst>(GV->use_back());
+ LoadInst *LI = cast<LoadInst>(GV->user_back());
while (!LI->use_empty()) {
- Use &LoadUse = LI->use_begin().getUse();
- if (!isa<ICmpInst>(LoadUse.getUser())) {
+ Use &LoadUse = *LI->use_begin();
+ ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
+ if (!ICI) {
LoadUse = RepValue;
continue;
}
- ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
@@ -936,7 +930,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// If the initialization boolean was used, insert it, otherwise delete it.
if (!InitBoolUsed) {
while (!InitBool->use_empty()) // Delete initializations
- cast<StoreInst>(InitBool->use_back())->eraseFromParent();
+ cast<StoreInst>(InitBool->user_back())->eraseFromParent();
delete InitBool;
} else
GV->getParent()->getGlobalList().insert(GV, InitBool);
@@ -948,9 +942,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
- ConstantPropUsersOf(NewGV, TD, TLI);
+ ConstantPropUsersOf(NewGV, DL, TLI);
if (RepValue != NewGV)
- ConstantPropUsersOf(RepValue, TD, TLI);
+ ConstantPropUsersOf(RepValue, DL, TLI);
return NewGV;
}
@@ -962,9 +956,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
const GlobalVariable *GV,
SmallPtrSet<const PHINode*, 8> &PHIs) {
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- const Instruction *Inst = cast<Instruction>(*UI);
+ for (const User *U : V->users()) {
+ const Instruction *Inst = cast<Instruction>(U);
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
continue; // Fine, ignore.
@@ -1011,7 +1004,7 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
GlobalVariable *GV) {
while (!Alloc->use_empty()) {
- Instruction *U = cast<Instruction>(*Alloc->use_begin());
+ Instruction *U = cast<Instruction>(*Alloc->user_begin());
Instruction *InsertPt = U;
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// If this is the store of the allocation into the global, remove it.
@@ -1022,7 +1015,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
} else if (PHINode *PN = dyn_cast<PHINode>(U)) {
// Insert the load in the corresponding predecessor, not right before the
// PHI.
- InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
+ InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
} else if (isa<BitCastInst>(U)) {
// Must be bitcast between the malloc and store to initialize the global.
ReplaceUsesOfMallocWithGlobal(U, GV);
@@ -1032,7 +1025,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
// If this is a "GEP bitcast" and the user is a store to the global, then
// just process it as a bitcast.
if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
- if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
+ if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
if (SI->getOperand(1) == GV) {
// Must be bitcast GEP between the malloc and store to initialize
// the global.
@@ -1056,19 +1049,18 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
// We permit two users of the load: setcc comparing against the null
// pointer, and a getelementptr of a specific form.
- for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI) {
- const Instruction *User = cast<Instruction>(*UI);
+ for (const User *U : V->users()) {
+ const Instruction *UI = cast<Instruction>(U);
// Comparison against null is ok.
- if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return false;
continue;
}
// getelementptr is also ok, but only a simple form.
- if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
// Must index into the array and into the struct.
if (GEPI->getNumOperands() < 3)
return false;
@@ -1077,7 +1069,7 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
continue;
}
- if (const PHINode *PN = dyn_cast<PHINode>(User)) {
+ if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
if (!LoadUsingPHIsPerLoad.insert(PN))
// This means some phi nodes are dependent on each other.
// Avoid infinite looping!
@@ -1108,9 +1100,8 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
Instruction *StoredVal) {
SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
- for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI)
- if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (const User *U : GV->users())
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
LoadUsingPHIsPerLoad))
return false;
@@ -1249,7 +1240,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
// If this is the first time we've seen this PHI, recursively process all
// users.
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
+ for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
}
@@ -1262,8 +1253,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
- for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
- UI != E; ) {
+ for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
}
@@ -1277,7 +1267,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, DataLayout *TD,
+ Value *NElems, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
@@ -1306,10 +1296,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
- unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
+ unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
- TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
+ TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, 0,
@@ -1394,7 +1384,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Okay, the malloc site is completely handled. All of the uses of GV are now
// loads, and all uses of those loads are simple. Rewrite them to use loads
// of the per-field globals instead.
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
+ for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -1469,9 +1459,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD,
+ const DataLayout *DL,
TargetLibraryInfo *TLI) {
- if (!TD)
+ if (!DL)
return false;
// If this is a malloc of an abstract type, don't touch it.
@@ -1501,7 +1491,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, TD, TLI, true);
+ Value *NElems = getMallocArraySize(CI, DL, TLI, true);
if (!NElems)
return false;
@@ -1509,8 +1499,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
- if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
+ if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
return true;
}
@@ -1539,8 +1529,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
- unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
+ unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
@@ -1555,8 +1545,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
- TD, TLI);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
+ DL, TLI);
return true;
}
@@ -1568,7 +1558,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ const DataLayout *DL,
+ TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1583,13 +1574,13 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
Type *MallocType = getMallocAllocatedType(CI, TLI);
if (MallocType &&
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
}
}
@@ -1616,11 +1607,9 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// Walk the use list of the global seeing if all the uses are load or store.
// If there is anything else, bail out.
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
- User *U = *I;
+ for (User *U : GV->users())
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
- }
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
@@ -1645,7 +1634,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
IsOneZero = InitVal->isNullValue() && CI->isOne();
while (!GV->use_empty()) {
- Instruction *UI = cast<Instruction>(GV->use_back());
+ Instruction *UI = cast<Instruction>(GV->user_back());
if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
// Change the store into a boolean store.
bool StoringOther = SI->getOperand(0) == OtherVal;
@@ -1746,7 +1735,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// and this function is main (which we know is not recursive), we replace
// the global with a local alloca in this function.
//
- // NOTE: It doesn't make sense to promote non single-value types since we
+ // NOTE: It doesn't make sense to promote non-single-value types since we
// are just replacing static memory to stack memory.
//
// If the global is in different address space, don't bring it to stack.
@@ -1783,7 +1772,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
} else {
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
- Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
}
// If the global is dead now, delete it.
@@ -1799,7 +1788,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
@@ -1812,11 +1801,13 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
+ if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) {
+ const DataLayout &DL = DLP->getDataLayout();
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
GVI = FirstNewGV; // Don't skip the newly produced globals!
return true;
}
+ }
} else if (GS.StoredType == GlobalStatus::StoredOnce) {
// If the initial value for the global was an undef value, and if only
// one other value was stored into it, we can just change the
@@ -1828,7 +1819,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
@@ -1845,7 +1836,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Try to optimize globals based on the knowledge that only one value
// (besides its initializer) is ever stored to the global.
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
// Otherwise, if the global was not a boolean, we can shrink it to be a
@@ -1866,11 +1857,11 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
/// function, changing them to FastCC.
static void ChangeCalleesToFastCall(Function *F) {
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
- if (isa<BlockAddress>(*UI))
+ for (User *U : F->users()) {
+ if (isa<BlockAddress>(U))
continue;
- CallSite User(cast<Instruction>(*UI));
- User.setCallingConv(CallingConv::Fast);
+ CallSite CS(cast<Instruction>(U));
+ CS.setCallingConv(CallingConv::Fast);
}
}
@@ -1889,14 +1880,24 @@ static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
static void RemoveNestAttribute(Function *F) {
F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
- for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
- if (isa<BlockAddress>(*UI))
+ for (User *U : F->users()) {
+ if (isa<BlockAddress>(U))
continue;
- CallSite User(cast<Instruction>(*UI));
- User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
+ CallSite CS(cast<Instruction>(U));
+ CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
}
}
+/// Return true if this is a calling convention that we'd like to change. The
+/// idea here is that we don't want to mess with the convention if the user
+/// explicitly requested something with performance implications like coldcc,
+/// GHC, or anyregcc.
+static bool isProfitableToMakeFastCC(Function *F) {
+ CallingConv::ID CC = F->getCallingConv();
+ // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
+ return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
+}
+
bool GlobalOpt::OptimizeFunctions(Module &M) {
bool Changed = false;
// Optimize functions.
@@ -1911,11 +1912,11 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
Changed = true;
++NumFnDeleted;
} else if (F->hasLocalLinkage()) {
- if (F->getCallingConv() == CallingConv::C && !F->isVarArg() &&
+ if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
!F->hasAddressTaken()) {
- // If this function has C calling conventions, is not a varargs
- // function, and is only called directly, promote it to use the Fast
- // calling convention.
+ // If this function has a calling convention worth changing, is not a
+ // varargs function, and is only called directly, promote it to use the
+ // Fast calling convention.
F->setCallingConv(CallingConv::Fast);
ChangeCalleesToFastCall(F);
++NumFastCallFns;
@@ -1946,7 +1947,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
+ Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@@ -2069,7 +2070,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD);
+ const DataLayout *DL);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2082,7 +2083,7 @@ isSimpleEnoughValueToCommit(Constant *C,
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// Simple integer, undef, constant aggregate zero, global addresses, etc are
// all supported.
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
@@ -2094,7 +2095,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
isa<ConstantVector>(C)) {
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
return false;
}
return true;
@@ -2107,29 +2108,29 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
switch (CE->getOpcode()) {
case Instruction::BitCast:
// Bitcast is fine if the casted value is fine.
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::IntToPtr:
case Instruction::PtrToInt:
// int <=> ptr is fine if the int type is the same size as the
// pointer type.
- if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
- TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
+ DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
// GEP is fine if it is simple + constant offset.
case Instruction::GetElementPtr:
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
if (!isa<ConstantInt>(CE->getOperand(i)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::Add:
// We allow simple+cst.
if (!isa<ConstantInt>(CE->getOperand(1)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
}
return false;
}
@@ -2137,11 +2138,11 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
@@ -2173,7 +2174,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
// The first index must be zero.
- ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin()));
+ ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
if (!CI || !CI->isZero()) return false;
// The remaining indices must be compile-time known integers within the
@@ -2268,8 +2269,8 @@ namespace {
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
- : TD(TD), TLI(TLI) {
+ Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
ValueStack.push_back(new DenseMap<Value*, Constant*>);
}
@@ -2349,7 +2350,7 @@ private:
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
};
@@ -2402,7 +2403,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
@@ -2415,7 +2416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) {
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
<< "\n");
return false;
@@ -2447,7 +2448,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
@@ -2511,7 +2512,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
@@ -2580,7 +2581,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// We don't insert an entry into Values, as it doesn't have a
// meaningful return value.
if (!II->use_empty()) {
- DEBUG(dbgs() << "Found unused invariant_start. Cant evaluate.\n");
+ DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
return false;
}
ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
@@ -2588,9 +2589,9 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
- if (TD && !Size->isAllOnesValue() &&
+ if (DL && !Size->isAllOnesValue() &&
Size->getValue().getLimitedValue() >=
- TD->getTypeStoreSize(ElemTy)) {
+ DL->getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
<< "\n");
@@ -2696,7 +2697,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
+ InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
setVal(CurInst, InstResult);
}
@@ -2779,10 +2780,10 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
// Call the function.
- Evaluator Eval(TD, TLI);
+ Evaluator Eval(DL, TLI);
Constant *RetValDummy;
bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
SmallVector<Constant*, 0>());
@@ -2830,7 +2831,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
if (F->empty()) continue;
// If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F, TD, TLI)) {
+ if (EvaluateStaticConstructor(F, DL, TLI)) {
Ctors.erase(Ctors.begin()+i);
MadeChange = true;
--i;
@@ -2856,12 +2857,14 @@ static void setUsedInitializer(GlobalVariable &V,
return;
}
- SmallVector<llvm::Constant *, 8> UsedArray;
- PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext());
+ // Type of pointer to the array of pointers.
+ PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
+ SmallVector<llvm::Constant *, 8> UsedArray;
for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end();
I != E; ++I) {
- Constant *Cast = llvm::ConstantExpr::getBitCast(*I, Int8PtrTy);
+ Constant *Cast
+ = ConstantExpr::getPointerBitCastOrAddrSpaceCast(*I, Int8PtrTy);
UsedArray.push_back(Cast);
}
// Sort to get deterministic order.
@@ -3015,7 +3018,8 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
// Give the aliasee the name, linkage and other attributes of the alias.
Target->takeName(J);
Target->setLinkage(J->getLinkage());
- Target->GlobalValue::copyAttributesFrom(J);
+ Target->setVisibility(J->getVisibility());
+ Target->setDLLStorageClass(J->getDLLStorageClass());
if (Used.usedErase(J))
Used.usedInsert(Target);
@@ -3122,8 +3126,8 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
// and remove them.
bool Changed = false;
- for (Function::use_iterator I = CXAAtExitFn->use_begin(),
- E = CXAAtExitFn->use_end(); I != E;) {
+ for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
+ I != E;) {
// We're only interested in calls. Theoretically, we could handle invoke
// instructions as well, but neither llvm-gcc nor clang generate invokes
// to __cxa_atexit.
@@ -3155,7 +3159,8 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : 0;
TLI = &getAnalysis<TargetLibraryInfo>();
// Try to find the llvm.globalctors list.
diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp
index 4ac1dfc..8684796 100644
--- a/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -20,11 +20,11 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
using namespace llvm;
STATISTIC(NumArgumentsProped, "Number of args turned into constants");
@@ -39,7 +39,7 @@ namespace {
initializeIPCPPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
bool PropagateConstantsIntoArguments(Function &F);
bool PropagateConstantReturn(Function &F);
@@ -86,18 +86,18 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
ArgumentConstants.resize(F.arg_size());
unsigned NumNonconstant = 0;
- for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
- User *U = *UI;
+ for (Use &U : F.uses()) {
+ User *UR = U.getUser();
// Ignore blockaddress uses.
- if (isa<BlockAddress>(U)) continue;
+ if (isa<BlockAddress>(UR)) continue;
// Used by a non-instruction, or not the callee of a function, do not
// transform.
- if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
+ if (!isa<CallInst>(UR) && !isa<InvokeInst>(UR))
return false;
- CallSite CS(cast<Instruction>(U));
- if (!CS.isCallee(UI))
+ CallSite CS(cast<Instruction>(UR));
+ if (!CS.isCallee(&U))
return false;
// Check out all of the potentially constant arguments. Note that we don't
@@ -135,7 +135,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI) {
// Do we have a constant argument?
if (ArgumentConstants[i].second || AI->use_empty() ||
- (AI->hasByValAttr() && !F.onlyReadsMemory()))
+ AI->hasInAllocaAttr() || (AI->hasByValAttr() && !F.onlyReadsMemory()))
continue;
Value *V = ArgumentConstants[i].first;
@@ -210,7 +210,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
// Different or no known return value? Don't propagate this return
// value.
RetVals[i] = 0;
- // All values non constant? Stop looking.
+ // All values non-constant? Stop looking.
if (++NumNonConstant == RetVals.size())
return false;
}
@@ -220,13 +220,13 @@ bool IPCP::PropagateConstantReturn(Function &F) {
// over all users, replacing any uses of the return value with the returned
// constant.
bool MadeChange = false;
- for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
- CallSite CS(*UI);
+ for (Use &U : F.uses()) {
+ CallSite CS(U.getUser());
Instruction* Call = CS.getInstruction();
// Not a call instruction or a call instruction that's not calling F
// directly?
- if (!Call || !CS.isCallee(UI))
+ if (!Call || !CS.isCallee(&U))
continue;
// Call result not used?
@@ -244,9 +244,8 @@ bool IPCP::PropagateConstantReturn(Function &F) {
Call->replaceAllUsesWith(New);
continue;
}
-
- for (Value::use_iterator I = Call->use_begin(), E = Call->use_end();
- I != E;) {
+
+ for (auto I = Call->user_begin(), E = Call->user_end(); I != E;) {
Instruction *Ins = cast<Instruction>(*I);
// Increment now, so we can remove the use
diff --git a/lib/Transforms/IPO/IPO.cpp b/lib/Transforms/IPO/IPO.cpp
index 5d563d8..b4d31d8 100644
--- a/lib/Transforms/IPO/IPO.cpp
+++ b/lib/Transforms/IPO/IPO.cpp
@@ -44,6 +44,7 @@ void llvm::initializeIPO(PassRegistry &Registry) {
initializeStripDebugDeclarePass(Registry);
initializeStripDeadDebugInfoPass(Registry);
initializeStripNonDebugSymbolsPass(Registry);
+ initializeBarrierNoopPass(Registry);
}
void LLVMInitializeIPO(LLVMPassRegistryRef R) {
diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp
index 437597e..6cf3040 100644
--- a/lib/Transforms/IPO/InlineAlways.cpp
+++ b/lib/Transforms/IPO/InlineAlways.cpp
@@ -17,13 +17,13 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
using namespace llvm;
@@ -47,13 +47,13 @@ public:
static char ID; // Pass identification, replacement for typeid
- virtual InlineCost getInlineCost(CallSite CS);
+ InlineCost getInlineCost(CallSite CS) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool runOnSCC(CallGraphSCC &SCC);
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnSCC(CallGraphSCC &SCC) override;
using llvm::Pass::doFinalization;
- virtual bool doFinalization(CallGraph &CG) {
+ bool doFinalization(CallGraph &CG) override {
return removeDeadFunctions(CG, /*AlwaysInlineOnly=*/ true);
}
};
@@ -63,7 +63,7 @@ public:
char AlwaysInliner::ID = 0;
INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline",
"Inliner for always_inline functions", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis)
INITIALIZE_PASS_END(AlwaysInliner, "always-inline",
"Inliner for always_inline functions", false, false)
diff --git a/lib/Transforms/IPO/InlineSimple.cpp b/lib/Transforms/IPO/InlineSimple.cpp
index 57379a3..7141064 100644
--- a/lib/Transforms/IPO/InlineSimple.cpp
+++ b/lib/Transforms/IPO/InlineSimple.cpp
@@ -15,13 +15,13 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
using namespace llvm;
@@ -48,20 +48,31 @@ public:
static char ID; // Pass identification, replacement for typeid
- InlineCost getInlineCost(CallSite CS) {
+ InlineCost getInlineCost(CallSite CS) override {
return ICA->getInlineCost(CS, getInlineThreshold(CS));
}
- virtual bool runOnSCC(CallGraphSCC &SCC);
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnSCC(CallGraphSCC &SCC) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
};
+static int computeThresholdFromOptLevels(unsigned OptLevel,
+ unsigned SizeOptLevel) {
+ if (OptLevel > 2)
+ return 275;
+ if (SizeOptLevel == 1) // -Os
+ return 75;
+ if (SizeOptLevel == 2) // -Oz
+ return 25;
+ return 225;
+}
+
} // end anonymous namespace
char SimpleInliner::ID = 0;
INITIALIZE_PASS_BEGIN(SimpleInliner, "inline",
"Function Integration/Inlining", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis)
INITIALIZE_PASS_END(SimpleInliner, "inline",
"Function Integration/Inlining", false, false)
@@ -72,6 +83,12 @@ Pass *llvm::createFunctionInliningPass(int Threshold) {
return new SimpleInliner(Threshold);
}
+Pass *llvm::createFunctionInliningPass(unsigned OptLevel,
+ unsigned SizeOptLevel) {
+ return new SimpleInliner(
+ computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
+}
+
bool SimpleInliner::runOnSCC(CallGraphSCC &SCC) {
ICA = &getAnalysis<InlineCostAnalysis>();
return Inliner::runOnSCC(SCC);
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index d75d6ca..e97fb83 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -19,11 +19,11 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -50,6 +50,13 @@ static cl::opt<int>
HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
cl::desc("Threshold for inlining functions with inline hint"));
+// We instroduce this threshold to help performance of instrumentation based
+// PGO before we actually hook up inliner with analysis passes such as BPI and
+// BFI.
+static cl::opt<int>
+ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225),
+ cl::desc("Threshold for inlining functions with cold attribute"));
+
// Threshold to use when optsize is specified (and there is no -inline-limit).
const int OptSizeThreshold = 75;
@@ -117,7 +124,7 @@ static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
int InlineHistory, bool InsertLifetime,
- const DataLayout *TD) {
+ const DataLayout *DL) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
@@ -196,7 +203,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// If we don't have data layout information, and only one alloca is using
// the target default, then we can't safely merge them because we can't
// pick the greater alignment.
- if (!TD && (!Align1 || !Align2) && Align1 != Align2)
+ if (!DL && (!Align1 || !Align2) && Align1 != Align2)
continue;
// The available alloca has to be in the right function, not in some other
@@ -218,8 +225,8 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
if (Align1 != Align2) {
if (!Align1 || !Align2) {
- assert(TD && "DataLayout required to compare default alignments");
- unsigned TypeAlign = TD->getABITypeAlignment(AI->getAllocatedType());
+ assert(DL && "DataLayout required to compare default alignments");
+ unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
Align1 = Align1 ? Align1 : TypeAlign;
Align2 = Align2 ? Align2 : TypeAlign;
@@ -277,6 +284,13 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
Attribute::MinSize))
thres = HintThreshold;
+ // Listen to the cold attribute when it would decrease the threshold.
+ bool ColdCallee = Callee && !Callee->isDeclaration() &&
+ Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::Cold);
+ if (ColdCallee && ColdThreshold < thres)
+ thres = ColdThreshold;
+
return thres;
}
@@ -330,9 +344,8 @@ bool Inliner::shouldInline(CallSite CS) {
bool callerWillBeRemoved = Caller->hasLocalLinkage();
// This bool tracks what happens if we DO inline C into B.
bool inliningPreventsSomeOuterInline = false;
- for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
- I != E; ++I) {
- CallSite CS2(*I);
+ for (User *U : Caller->users()) {
+ CallSite CS2(U);
// If this isn't a call to Caller (it could be some other sort
// of reference) skip it. Such references will prevent the caller
@@ -363,7 +376,7 @@ bool Inliner::shouldInline(CallSite CS) {
// one is set very low by getInlineCost, in anticipation that Caller will
// be removed entirely. We did not account for this above unless there
// is only one caller of Caller.
- if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
+ if (callerWillBeRemoved && !Caller->use_empty())
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
@@ -395,8 +408,9 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
}
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
- CallGraph &CG = getAnalysis<CallGraph>();
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
SmallPtrSet<Function*, 8> SCCFunctions;
@@ -456,7 +470,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
InlinedArrayAllocasTy InlinedArrayAllocas;
- InlineFunctionInfo InlineInfo(&CG, TD);
+ InlineFunctionInfo InlineInfo(&CG, DL);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
@@ -505,7 +519,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID, InsertLifetime, TD))
+ InlineHistoryID, InsertLifetime, DL))
continue;
++NumInlined;
diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp
index 64e2ced..c1fe01c 100644
--- a/lib/Transforms/IPO/Internalize.cpp
+++ b/lib/Transforms/IPO/Internalize.cpp
@@ -59,11 +59,11 @@ namespace {
explicit InternalizePass();
explicit InternalizePass(ArrayRef<const char *> ExportList);
void LoadFile(const char *Filename);
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addPreserved<CallGraph>();
+ AU.addPreserved<CallGraphWrapperPass>();
}
};
} // end anonymous namespace
@@ -72,8 +72,7 @@ char InternalizePass::ID = 0;
INITIALIZE_PASS(InternalizePass, "internalize",
"Internalize Global Symbols", false, false)
-InternalizePass::InternalizePass()
- : ModulePass(ID) {
+InternalizePass::InternalizePass() : ModulePass(ID) {
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
if (!APIFile.empty()) // If a filename is specified, use it.
LoadFile(APIFile.c_str());
@@ -81,7 +80,7 @@ InternalizePass::InternalizePass()
}
InternalizePass::InternalizePass(ArrayRef<const char *> ExportList)
- : ModulePass(ID){
+ : ModulePass(ID) {
initializeInternalizePassPass(*PassRegistry::getPassRegistry());
for(ArrayRef<const char *>::const_iterator itr = ExportList.begin();
itr != ExportList.end(); itr++) {
@@ -115,6 +114,10 @@ static bool shouldInternalize(const GlobalValue &GV,
if (GV.hasAvailableExternallyLinkage())
return false;
+ // Assume that dllexported symbols are referenced elsewhere
+ if (GV.hasDLLExportStorageClass())
+ return false;
+
// Already has internal linkage
if (GV.hasLocalLinkage())
return false;
@@ -127,7 +130,8 @@ static bool shouldInternalize(const GlobalValue &GV,
}
bool InternalizePass::runOnModule(Module &M) {
- CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
+ CallGraphWrapperPass *CGPass = getAnalysisIfAvailable<CallGraphWrapperPass>();
+ CallGraph *CG = CGPass ? &CGPass->getCallGraph() : 0;
CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
bool Changed = false;
@@ -150,7 +154,6 @@ bool InternalizePass::runOnModule(Module &M) {
}
// Mark all functions not in the api as internal.
- // FIXME: maybe use private linkage?
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!shouldInternalize(*I, ExternalNames))
continue;
@@ -186,7 +189,6 @@ bool InternalizePass::runOnModule(Module &M) {
// Mark all global variables with initializers that are not in the api as
// internal as well.
- // FIXME: maybe use private linkage?
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
if (!shouldInternalize(*I, ExternalNames))
@@ -213,9 +215,7 @@ bool InternalizePass::runOnModule(Module &M) {
return Changed;
}
-ModulePass *llvm::createInternalizePass() {
- return new InternalizePass();
-}
+ModulePass *llvm::createInternalizePass() { return new InternalizePass(); }
ModulePass *llvm::createInternalizePass(ArrayRef<const char *> ExportList) {
return new InternalizePass(ExportList);
diff --git a/lib/Transforms/IPO/LLVMBuild.txt b/lib/Transforms/IPO/LLVMBuild.txt
index 124cbb6..77e0b22 100644
--- a/lib/Transforms/IPO/LLVMBuild.txt
+++ b/lib/Transforms/IPO/LLVMBuild.txt
@@ -20,4 +20,4 @@ type = Library
name = IPO
parent = Transforms
library_name = ipo
-required_libraries = Analysis Core IPA InstCombine Scalar Vectorize Support Target TransformUtils ObjCARC
+required_libraries = Analysis Core IPA InstCombine Scalar Support Target TransformUtils Vectorize
diff --git a/lib/Transforms/IPO/LoopExtractor.cpp b/lib/Transforms/IPO/LoopExtractor.cpp
index 8282a8e..464aa99 100644
--- a/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/lib/Transforms/IPO/LoopExtractor.cpp
@@ -17,8 +17,8 @@
#define DEBUG_TYPE "loop-extract"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
@@ -42,12 +42,12 @@ namespace {
initializeLoopExtractorPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredID(BreakCriticalEdgesID);
AU.addRequiredID(LoopSimplifyID);
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
}
};
}
@@ -57,7 +57,7 @@ INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract",
"Extract loops into new functions", false, false)
INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(LoopExtractor, "loop-extract",
"Extract loops into new functions", false, false)
@@ -79,6 +79,9 @@ INITIALIZE_PASS(SingleLoopExtractor, "loop-extract-single",
Pass *llvm::createLoopExtractorPass() { return new LoopExtractor(); }
bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
+ return false;
+
// Only visit top-level loops.
if (L->getParentLoop())
return false;
@@ -87,7 +90,7 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!L->isLoopSimplifyForm())
return false;
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
bool Changed = false;
// If there is more than one top-level loop in this function, extract all of
@@ -177,7 +180,7 @@ namespace {
LoadFile(BlockFile.c_str());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
}
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 3861421..8555d2c 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -50,6 +50,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
@@ -58,11 +59,10 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include <vector>
using namespace llvm;
@@ -108,12 +108,12 @@ public:
static const ComparableFunction TombstoneKey;
static DataLayout * const LookupOnly;
- ComparableFunction(Function *Func, DataLayout *TD)
- : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
+ ComparableFunction(Function *Func, const DataLayout *DL)
+ : Func(Func), Hash(profileFunction(Func)), DL(DL) {}
Function *getFunc() const { return Func; }
unsigned getHash() const { return Hash; }
- DataLayout *getTD() const { return TD; }
+ const DataLayout *getDataLayout() const { return DL; }
// Drops AssertingVH reference to the function. Outside of debug mode, this
// does nothing.
@@ -125,11 +125,11 @@ public:
private:
explicit ComparableFunction(unsigned Hash)
- : Func(NULL), Hash(Hash), TD(NULL) {}
+ : Func(NULL), Hash(Hash), DL(NULL) {}
AssertingVH<Function> Func;
unsigned Hash;
- DataLayout *TD;
+ const DataLayout *DL;
};
const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
@@ -164,9 +164,9 @@ namespace {
/// side of claiming that two functions are different).
class FunctionComparator {
public:
- FunctionComparator(const DataLayout *TD, const Function *F1,
+ FunctionComparator(const DataLayout *DL, const Function *F1,
const Function *F2)
- : F1(F1), F2(F2), TD(TD) {}
+ : F1(F1), F2(F2), DL(DL) {}
/// Test whether the two functions have equivalent behaviour.
bool compare();
@@ -193,13 +193,58 @@ private:
return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
}
- /// Compare two Types, treating all pointer types as equal.
- bool isEquivalentType(Type *Ty1, Type *Ty2) const;
+ /// cmpType - compares two types,
+ /// defines total ordering among the types set.
+ ///
+ /// Return values:
+ /// 0 if types are equal,
+ /// -1 if Left is less than Right,
+ /// +1 if Left is greater than Right.
+ ///
+ /// Description:
+ /// Comparison is broken onto stages. Like in lexicographical comparison
+ /// stage coming first has higher priority.
+ /// On each explanation stage keep in mind total ordering properties.
+ ///
+ /// 0. Before comparison we coerce pointer types of 0 address space to
+ /// integer.
+ /// We also don't bother with same type at left and right, so
+ /// just return 0 in this case.
+ ///
+ /// 1. If types are of different kind (different type IDs).
+ /// Return result of type IDs comparison, treating them as numbers.
+ /// 2. If types are vectors or integers, compare Type* values as numbers.
+ /// 3. Types has same ID, so check whether they belongs to the next group:
+ /// * Void
+ /// * Float
+ /// * Double
+ /// * X86_FP80
+ /// * FP128
+ /// * PPC_FP128
+ /// * Label
+ /// * Metadata
+ /// If so - return 0, yes - we can treat these types as equal only because
+ /// their IDs are same.
+ /// 4. If Left and Right are pointers, return result of address space
+ /// comparison (numbers comparison). We can treat pointer types of same
+ /// address space as equal.
+ /// 5. If types are complex.
+ /// Then both Left and Right are to be expanded and their element types will
+ /// be checked with the same way. If we get Res != 0 on some stage, return it.
+ /// Otherwise return 0.
+ /// 6. For all other cases put llvm_unreachable.
+ int cmpType(Type *TyL, Type *TyR) const;
+
+ bool isEquivalentType(Type *Ty1, Type *Ty2) const {
+ return cmpType(Ty1, Ty2) == 0;
+ }
+
+ int cmpNumbers(uint64_t L, uint64_t R) const;
// The two functions undergoing comparison.
const Function *F1, *F2;
- const DataLayout *TD;
+ const DataLayout *DL;
DenseMap<const Value *, const Value *> id_map;
DenseSet<const Value *> seen_values;
@@ -207,32 +252,39 @@ private:
}
-// Any two pointers in the same address space are equivalent, intptr_t and
-// pointers are equivalent. Otherwise, standard type equivalence rules apply.
-bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
+int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
+ if (L < R) return -1;
+ if (L > R) return 1;
+ return 0;
+}
- PointerType *PTy1 = dyn_cast<PointerType>(Ty1);
- PointerType *PTy2 = dyn_cast<PointerType>(Ty2);
+/// cmpType - compares two types,
+/// defines total ordering among the types set.
+/// See method declaration comments for more details.
+int FunctionComparator::cmpType(Type *TyL, Type *TyR) const {
- if (TD) {
- if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = TD->getIntPtrType(Ty1);
- if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = TD->getIntPtrType(Ty2);
+ PointerType *PTyL = dyn_cast<PointerType>(TyL);
+ PointerType *PTyR = dyn_cast<PointerType>(TyR);
+
+ if (DL) {
+ if (PTyL && PTyL->getAddressSpace() == 0) TyL = DL->getIntPtrType(TyL);
+ if (PTyR && PTyR->getAddressSpace() == 0) TyR = DL->getIntPtrType(TyR);
}
- if (Ty1 == Ty2)
- return true;
+ if (TyL == TyR)
+ return 0;
- if (Ty1->getTypeID() != Ty2->getTypeID())
- return false;
+ if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
+ return Res;
- switch (Ty1->getTypeID()) {
+ switch (TyL->getTypeID()) {
default:
llvm_unreachable("Unknown type!");
// Fall through in Release mode.
case Type::IntegerTyID:
case Type::VectorTyID:
- // Ty1 == Ty2 would have returned true earlier.
- return false;
+ // TyL == TyR would have returned true earlier.
+ return cmpNumbers((uint64_t)TyL, (uint64_t)TyR);
case Type::VoidTyID:
case Type::FloatTyID:
@@ -242,51 +294,55 @@ bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
case Type::PPC_FP128TyID:
case Type::LabelTyID:
case Type::MetadataTyID:
- return true;
+ return 0;
case Type::PointerTyID: {
- assert(PTy1 && PTy2 && "Both types must be pointers here.");
- return PTy1->getAddressSpace() == PTy2->getAddressSpace();
+ assert(PTyL && PTyR && "Both types must be pointers here.");
+ return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
}
case Type::StructTyID: {
- StructType *STy1 = cast<StructType>(Ty1);
- StructType *STy2 = cast<StructType>(Ty2);
- if (STy1->getNumElements() != STy2->getNumElements())
- return false;
-
- if (STy1->isPacked() != STy2->isPacked())
- return false;
-
- for (unsigned i = 0, e = STy1->getNumElements(); i != e; ++i) {
- if (!isEquivalentType(STy1->getElementType(i), STy2->getElementType(i)))
- return false;
+ StructType *STyL = cast<StructType>(TyL);
+ StructType *STyR = cast<StructType>(TyR);
+ if (STyL->getNumElements() != STyR->getNumElements())
+ return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
+
+ if (STyL->isPacked() != STyR->isPacked())
+ return cmpNumbers(STyL->isPacked(), STyR->isPacked());
+
+ for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
+ if (int Res = cmpType(STyL->getElementType(i),
+ STyR->getElementType(i)))
+ return Res;
}
- return true;
+ return 0;
}
case Type::FunctionTyID: {
- FunctionType *FTy1 = cast<FunctionType>(Ty1);
- FunctionType *FTy2 = cast<FunctionType>(Ty2);
- if (FTy1->getNumParams() != FTy2->getNumParams() ||
- FTy1->isVarArg() != FTy2->isVarArg())
- return false;
+ FunctionType *FTyL = cast<FunctionType>(TyL);
+ FunctionType *FTyR = cast<FunctionType>(TyR);
+ if (FTyL->getNumParams() != FTyR->getNumParams())
+ return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
- if (!isEquivalentType(FTy1->getReturnType(), FTy2->getReturnType()))
- return false;
+ if (FTyL->isVarArg() != FTyR->isVarArg())
+ return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
- for (unsigned i = 0, e = FTy1->getNumParams(); i != e; ++i) {
- if (!isEquivalentType(FTy1->getParamType(i), FTy2->getParamType(i)))
- return false;
+ if (int Res = cmpType(FTyL->getReturnType(), FTyR->getReturnType()))
+ return Res;
+
+ for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
+ if (int Res = cmpType(FTyL->getParamType(i), FTyR->getParamType(i)))
+ return Res;
}
- return true;
+ return 0;
}
case Type::ArrayTyID: {
- ArrayType *ATy1 = cast<ArrayType>(Ty1);
- ArrayType *ATy2 = cast<ArrayType>(Ty2);
- return ATy1->getNumElements() == ATy2->getNumElements() &&
- isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
+ ArrayType *ATyL = cast<ArrayType>(TyL);
+ ArrayType *ATyR = cast<ArrayType>(TyR);
+ if (ATyL->getNumElements() != ATyR->getNumElements())
+ return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements());
+ return cmpType(ATyL->getElementType(), ATyR->getElementType());
}
}
}
@@ -341,7 +397,10 @@ bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
- CXI->getOrdering() == cast<AtomicCmpXchgInst>(I2)->getOrdering() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
@@ -359,13 +418,13 @@ bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
if (AS != GEP2->getPointerAddressSpace())
return false;
- if (TD) {
+ if (DL) {
// When we have target data, we can reduce the GEP down to the value in bytes
// added to the address.
- unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 1;
+ unsigned BitWidth = DL ? DL->getPointerSizeInBits(AS) : 1;
APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);
- if (GEP1->accumulateConstantOffset(*TD, Offset1) &&
- GEP2->accumulateConstantOffset(*TD, Offset2)) {
+ if (GEP1->accumulateConstantOffset(*DL, Offset1) &&
+ GEP2->accumulateConstantOffset(*DL, Offset2)) {
return Offset1 == Offset2;
}
}
@@ -561,7 +620,7 @@ public:
initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
}
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
private:
typedef DenseSet<ComparableFunction> FnSetType;
@@ -606,7 +665,7 @@ private:
FnSetType FnSet;
/// DataLayout for more accurate GEP comparisons. May be NULL.
- DataLayout *TD;
+ const DataLayout *DL;
/// Whether or not the target supports global aliases.
bool HasGlobalAliases;
@@ -623,7 +682,8 @@ ModulePass *llvm::createMergeFunctionsPass() {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : 0;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
@@ -646,7 +706,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
!F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
+ ComparableFunction CF = ComparableFunction(F, DL);
Changed |= insert(CF);
}
}
@@ -661,7 +721,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
+ ComparableFunction CF = ComparableFunction(F, DL);
Changed |= insert(CF);
}
}
@@ -682,28 +742,27 @@ bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
return false;
// One of these is a special "underlying pointer comparison only" object.
- if (LHS.getTD() == ComparableFunction::LookupOnly ||
- RHS.getTD() == ComparableFunction::LookupOnly)
+ if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||
+ RHS.getDataLayout() == ComparableFunction::LookupOnly)
return false;
- assert(LHS.getTD() == RHS.getTD() &&
+ assert(LHS.getDataLayout() == RHS.getDataLayout() &&
"Comparing functions for different targets");
- return FunctionComparator(LHS.getTD(), LHS.getFunc(),
+ return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(),
RHS.getFunc()).compare();
}
// Replace direct callers of Old with New.
void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
- for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
- UI != UE;) {
- Value::use_iterator TheIter = UI;
+ for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
+ Use *U = &*UI;
++UI;
- CallSite CS(*TheIter);
- if (CS && CS.isCallee(TheIter)) {
+ CallSite CS(U->getUser());
+ if (CS && CS.isCallee(U)) {
remove(CS.getInstruction()->getParent()->getParent());
- TheIter.getUse().set(BitcastNew);
+ U->set(BitcastNew);
}
}
}
@@ -723,7 +782,7 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
// Helper for writeThunk,
// Selects proper bitcast operation,
-// but a bit simplier then CastInst::getCastOpcode.
+// but a bit simpler then CastInst::getCastOpcode.
static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
Type *SrcTy = V->getType();
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
@@ -894,17 +953,14 @@ void MergeFunctions::removeUsers(Value *V) {
Value *V = Worklist.back();
Worklist.pop_back();
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- Use &U = UI.getUse();
- if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
+ for (User *U : V->users()) {
+ if (Instruction *I = dyn_cast<Instruction>(U)) {
remove(I->getParent()->getParent());
- } else if (isa<GlobalValue>(U.getUser())) {
+ } else if (isa<GlobalValue>(U)) {
// do nothing
- } else if (Constant *C = dyn_cast<Constant>(U.getUser())) {
- for (Value::use_iterator CUI = C->use_begin(), CUE = C->use_end();
- CUI != CUE; ++CUI)
- Worklist.push_back(*CUI);
+ } else if (Constant *C = dyn_cast<Constant>(U)) {
+ for (User *UU : C->users())
+ Worklist.push_back(UU);
}
}
}
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index fa518cb..ac88aee 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -15,11 +15,11 @@
#define DEBUG_TYPE "partialinlining"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/CodeExtractor.h"
using namespace llvm;
@@ -28,14 +28,14 @@ STATISTIC(NumPartialInlined, "Number of functions partially inlined");
namespace {
struct PartialInliner : public ModulePass {
- virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
+ void getAnalysisUsage(AnalysisUsage &AU) const override { }
static char ID; // Pass identification, replacement for typeid
PartialInliner() : ModulePass(ID) {
initializePartialInlinerPass(*PassRegistry::getPassRegistry());
}
-
- bool runOnModule(Module& M);
-
+
+ bool runOnModule(Module& M) override;
+
private:
Function* unswitchFunction(Function* F);
};
@@ -119,8 +119,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
// The CodeExtractor needs a dominator tree.
DominatorTree DT;
- DT.runOnFunction(*duplicateFunction);
-
+ DT.recalculate(*duplicateFunction);
+
// Extract the body of the if.
Function* extractedFunction
= CodeExtractor(toExtract, &DT).extractCodeRegion();
@@ -128,8 +128,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
InlineFunctionInfo IFI;
// Inline the top-level if test into all callers.
- std::vector<User*> Users(duplicateFunction->use_begin(),
- duplicateFunction->use_end());
+ std::vector<User *> Users(duplicateFunction->user_begin(),
+ duplicateFunction->user_end());
for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end();
UI != UE; ++UI)
if (CallInst *CI = dyn_cast<CallInst>(*UI))
@@ -162,9 +162,8 @@ bool PartialInliner::runOnModule(Module& M) {
if (currFunc->use_empty()) continue;
bool recursive = false;
- for (Function::use_iterator UI = currFunc->use_begin(),
- UE = currFunc->use_end(); UI != UE; ++UI)
- if (Instruction* I = dyn_cast<Instruction>(*UI))
+ for (User *U : currFunc->users())
+ if (Instruction* I = dyn_cast<Instruction>(U))
if (I->getParent()->getParent() == currFunc) {
recursive = true;
break;
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 24c5018..4a28b34 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -17,7 +17,7 @@
#include "llvm-c/Transforms/PassManagerBuilder.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
+#include "llvm/IR/Verifier.h"
#include "llvm/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"
@@ -33,11 +33,6 @@ RunLoopVectorization("vectorize-loops", cl::Hidden,
cl::desc("Run the Loop vectorization passes"));
static cl::opt<bool>
-LateVectorization("late-vectorize", cl::init(true), cl::Hidden,
- cl::desc("Run the vectorization pasess late in the pass "
- "pipeline (after the inliner)"));
-
-static cl::opt<bool>
RunSLPVectorization("vectorize-slp", cl::Hidden,
cl::desc("Run the SLP vectorization passes"));
@@ -68,7 +63,6 @@ PassManagerBuilder::PassManagerBuilder() {
BBVectorize = RunBBVectorization;
SLPVectorize = RunSLPVectorization;
LoopVectorize = RunLoopVectorization;
- LateVectorize = LateVectorization;
RerollLoops = RunLoopRerolling;
}
@@ -200,11 +194,8 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
MPM.add(createLoopDeletionPass()); // Delete dead loops
- if (!LateVectorize && LoopVectorize)
- MPM.add(createLoopVectorizePass(DisableUnrollLoops));
-
if (!DisableUnrollLoops)
- MPM.add(createLoopUnrollPass()); // Unroll small loops
+ MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
if (OptLevel > 1)
@@ -243,21 +234,21 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
- // As an experimental mode, run any vectorization passes in a separate
- // pipeline from the CGSCC pass manager that runs iteratively with the
- // inliner.
- if (LateVectorize && LoopVectorize) {
- // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
- // pass manager that we are specifically trying to avoid. To prevent this
- // we must insert a no-op module pass to reset the pass manager.
- MPM.add(createBarrierNoopPass());
+ // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
+ // pass manager that we are specifically trying to avoid. To prevent this
+ // we must insert a no-op module pass to reset the pass manager.
+ MPM.add(createBarrierNoopPass());
+ MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
+ // FIXME: Because of #pragma vectorize enable, the passes below are always
+ // inserted in the pipeline, even when the vectorizer doesn't run (ex. when
+ // on -O1 and no #pragma is found). Would be good to have these two passes
+ // as function calls, so that we can only pass them when the vectorizer
+ // changed the code.
+ MPM.add(createInstructionCombiningPass());
+ MPM.add(createCFGSimplificationPass());
- // Add the various vectorization passes and relevant cleanup passes for
- // them since we are no longer in the middle of the main scalar pipeline.
- MPM.add(createLoopVectorizePass(DisableUnrollLoops));
- MPM.add(createInstructionCombiningPass());
- MPM.add(createCFGSimplificationPass());
- }
+ if (!DisableUnrollLoops)
+ MPM.add(createLoopUnrollPass()); // Unroll small loops
if (!DisableUnitAtATime) {
// FIXME: We shouldn't bother with this anymore.
@@ -343,6 +334,9 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
+ // More loops are countable try to vectorize them.
+ PM.add(createLoopVectorizePass(true, true));
+
// Cleanup and simplify the code after the scalar optimizations.
PM.add(createInstructionCombiningPass());
diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp
index b160913..c61ec5e 100644
--- a/lib/Transforms/IPO/PruneEH.cpp
+++ b/lib/Transforms/IPO/PruneEH.cpp
@@ -21,12 +21,12 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CFG.h"
#include <algorithm>
using namespace llvm;
@@ -41,7 +41,7 @@ namespace {
}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(CallGraphSCC &SCC);
+ bool runOnSCC(CallGraphSCC &SCC) override;
bool SimplifyFunction(Function *F);
void DeleteBasicBlock(BasicBlock *BB);
@@ -51,7 +51,7 @@ namespace {
char PruneEH::ID = 0;
INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh",
"Remove unused exception handling info", false, false)
-INITIALIZE_PASS_DEPENDENCY(CallGraph)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
INITIALIZE_PASS_END(PruneEH, "prune-eh",
"Remove unused exception handling info", false, false)
@@ -60,7 +60,7 @@ Pass *llvm::createPruneEHPass() { return new PruneEH(); }
bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
SmallPtrSet<CallGraphNode *, 8> SCCNodes;
- CallGraph &CG = getAnalysis<CallGraph>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
bool MadeChange = false;
// Fill SCCNodes with the elements of the SCC. Used for quickly
@@ -234,7 +234,7 @@ bool PruneEH::SimplifyFunction(Function *F) {
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
assert(pred_begin(BB) == pred_end(BB) && "BB is not dead!");
- CallGraph &CG = getAnalysis<CallGraph>();
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
CallGraphNode *CGN = CG[BB->getParent()];
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
diff --git a/lib/Transforms/IPO/StripDeadPrototypes.cpp b/lib/Transforms/IPO/StripDeadPrototypes.cpp
index f00830a..1c6532d 100644
--- a/lib/Transforms/IPO/StripDeadPrototypes.cpp
+++ b/lib/Transforms/IPO/StripDeadPrototypes.cpp
@@ -32,7 +32,7 @@ public:
StripDeadPrototypesPass() : ModulePass(ID) {
initializeStripDeadPrototypesPassPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
};
} // end anonymous namespace
diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp
index c4f5cfc..6d0be8f 100644
--- a/lib/Transforms/IPO/StripSymbols.cpp
+++ b/lib/Transforms/IPO/StripSymbols.cpp
@@ -23,8 +23,8 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
@@ -44,9 +44,9 @@ namespace {
initializeStripSymbolsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -59,9 +59,9 @@ namespace {
initializeStripNonDebugSymbolsPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -74,9 +74,9 @@ namespace {
initializeStripDebugDeclarePass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -89,9 +89,9 @@ namespace {
initializeStripDeadDebugInfoPass(*PassRegistry::getPassRegistry());
}
- virtual bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
};
@@ -132,11 +132,10 @@ ModulePass *llvm::createStripDeadDebugInfoPass() {
/// OnlyUsedBy - Return true if V is only used by Usr.
static bool OnlyUsedBy(Value *V, Value *Usr) {
- for(Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I) {
- User *U = *I;
+ for (User *U : V->users())
if (U != Usr)
return false;
- }
+
return true;
}
@@ -147,7 +146,7 @@ static void RemoveDeadConstant(Constant *C) {
if (OnlyUsedBy(C->getOperand(i), C))
Operands.insert(cast<Constant>(C->getOperand(i)));
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
- if (!GV->hasLocalLinkage()) return; // Don't delete non static globals.
+ if (!GV->hasLocalLinkage()) return; // Don't delete non-static globals.
GV->eraseFromParent();
}
else if (!isa<Function>(C))
@@ -250,7 +249,7 @@ bool StripDebugDeclare::runOnModule(Module &M) {
if (Declare) {
while (!Declare->use_empty()) {
- CallInst *CI = cast<CallInst>(Declare->use_back());
+ CallInst *CI = cast<CallInst>(Declare->user_back());
Value *Arg1 = CI->getArgOperand(0);
Value *Arg2 = CI->getArgOperand(1);
assert(CI->use_empty() && "llvm.dbg intrinsic should have void result");
@@ -307,10 +306,7 @@ bool StripDeadDebugInfo::runOnModule(Module &M) {
SmallVector<Value *, 64> LiveSubprograms;
DenseSet<const MDNode *> VisitedSet;
- for (DebugInfoFinder::iterator CI = F.compile_unit_begin(),
- CE = F.compile_unit_end(); CI != CE; ++CI) {
- // Create our compile unit.
- DICompileUnit DIC(*CI);
+ for (DICompileUnit DIC : F.compile_units()) {
assert(DIC.Verify() && "DIC must verify as a DICompileUnit.");
// Create our live subprogram list.