aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
committerStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
commitc6a4f5e819217e1e12c458aed8e7b122e23a3a58 (patch)
tree81b7dd2bb4370a392f31d332a566c903b5744764 /lib/Transforms
parent19c6fbb3e8aaf74093afa08013134b61fa08f245 (diff)
downloadexternal_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.zip
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.gz
external_llvm-c6a4f5e819217e1e12c458aed8e7b122e23a3a58.tar.bz2
Update LLVM for rebase to r212749.
Includes a cherry-pick of: r212948 - fixes a small issue with atomic calls Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp27
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp39
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp19
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp43
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp45
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp530
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp13
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h6
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp266
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp23
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp40
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp26
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp82
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp153
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp11
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp102
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp5
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp310
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp397
-rw-r--r--lib/Transforms/Instrumentation/DataFlowSanitizer.cpp65
-rw-r--r--lib/Transforms/Instrumentation/DebugIR.cpp5
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp20
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp332
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp78
-rw-r--r--lib/Transforms/Scalar/Android.mk2
-rw-r--r--lib/Transforms/Scalar/CMakeLists.txt2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp20
-rw-r--r--lib/Transforms/Scalar/GlobalMerge.cpp313
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp9
-rw-r--r--lib/Transforms/Scalar/LICM.cpp73
-rw-r--r--lib/Transforms/Scalar/LoadCombine.cpp268
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp360
-rw-r--r--lib/Transforms/Scalar/LowerAtomic.cpp5
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp31
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp4
-rw-r--r--lib/Transforms/Scalar/SROA.cpp18
-rw-r--r--lib/Transforms/Scalar/SampleProfile.cpp7
-rw-r--r--lib/Transforms/Scalar/Scalar.cpp1
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp6
-rw-r--r--lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp633
-rw-r--r--lib/Transforms/Scalar/Sink.cpp6
-rw-r--r--lib/Transforms/Utils/Android.mk1
-rw-r--r--lib/Transforms/Utils/CMakeLists.txt1
-rw-r--r--lib/Transforms/Utils/CloneModule.cpp2
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp9
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp17
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp28
-rw-r--r--lib/Transforms/Utils/LoopUnrollRuntime.cpp23
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp129
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp94
-rw-r--r--lib/Transforms/Utils/SpecialCaseList.cpp221
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp373
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp297
56 files changed, 3592 insertions, 2016 deletions
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 377fa15..f9de54a 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -39,6 +39,8 @@
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
@@ -67,21 +69,24 @@ namespace {
bool runOnSCC(CallGraphSCC &SCC) override;
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
- : CallGraphSCCPass(ID), maxElements(maxElements) {
+ : CallGraphSCCPass(ID), DL(nullptr), maxElements(maxElements) {
initializeArgPromotionPass(*PassRegistry::getPassRegistry());
}
/// A vector used to hold the indices of a single GEP instruction
typedef std::vector<uint64_t> IndicesVector;
+ const DataLayout *DL;
private:
CallGraphNode *PromoteArguments(CallGraphNode *CGN);
bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const;
CallGraphNode *DoPromotion(Function *F,
SmallPtrSet<Argument*, 8> &ArgsToPromote,
SmallPtrSet<Argument*, 8> &ByValArgsToTransform);
+ bool doInitialization(CallGraph &CG) override;
/// The maximum number of elements to expand, or 0 for unlimited.
unsigned maxElements;
+ DenseMap<const Function *, DISubprogram> FunctionDIs;
};
}
@@ -100,6 +105,9 @@ Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false, LocalChange;
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+
do { // Iterate until we stop promoting from this SCC.
LocalChange = false;
// Attempt to promote arguments from all functions in this SCC.
@@ -215,7 +223,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
/// AllCallersPassInValidPointerForArgument - Return true if we can prove that
/// all callees pass in a valid pointer for the specified function argument.
-static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
+static bool AllCallersPassInValidPointerForArgument(Argument *Arg,
+ const DataLayout *DL) {
Function *Callee = Arg->getParent();
unsigned ArgNo = Arg->getArgNo();
@@ -226,7 +235,7 @@ static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
CallSite CS(U);
assert(CS && "Should only have direct calls!");
- if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
+ if (!CS.getArgument(ArgNo)->isDereferenceablePointer(DL))
return false;
}
return true;
@@ -334,7 +343,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
GEPIndicesSet ToPromote;
// If the pointer is always valid, any load with first index 0 is valid.
- if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg))
+ if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg, DL))
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
// First, iterate the entry block and mark loads of (geps of) arguments as
@@ -604,6 +613,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
NF->copyAttributesFrom(F);
+ // Patch the pointer to LLVM function in debug info descriptor.
+ auto DI = FunctionDIs.find(F);
+ if (DI != FunctionDIs.end())
+ DI->second.replaceFunction(NF);
DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
<< "From: " << *F);
@@ -741,6 +754,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
+ New->setDebugLoc(Call->getDebugLoc());
Args.clear();
AttributesVec.clear();
@@ -902,3 +916,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
return NF_CGN;
}
+
+bool ArgPromotion::doInitialization(CallGraph &CG) {
+ FunctionDIs = makeSubprogramMap(CG.getModule());
+ return CallGraphSCCPass::doInitialization(CG);
+}
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 284b896..ac3853d 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -127,8 +127,7 @@ namespace {
// As the code generation for module is finished (and DIBuilder is
// finalized) we assume that subprogram descriptors won't be changed, and
// they are stored in map for short duration anyway.
- typedef DenseMap<Function*, DISubprogram> FunctionDIMap;
- FunctionDIMap FunctionDIs;
+ DenseMap<const Function *, DISubprogram> FunctionDIs;
protected:
// DAH uses this to specify a different ID.
@@ -150,7 +149,6 @@ namespace {
unsigned RetValNum = 0);
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
- void CollectFunctionDIs(Module &M);
void SurveyFunction(const Function &F);
void MarkValue(const RetOrArg &RA, Liveness L,
const UseVector &MaybeLiveUses);
@@ -190,35 +188,6 @@ INITIALIZE_PASS(DAH, "deadarghaX0r",
ModulePass *llvm::createDeadArgEliminationPass() { return new DAE(); }
ModulePass *llvm::createDeadArgHackingPass() { return new DAH(); }
-/// CollectFunctionDIs - Map each function in the module to its debug info
-/// descriptor.
-void DAE::CollectFunctionDIs(Module &M) {
- FunctionDIs.clear();
-
- for (Module::named_metadata_iterator I = M.named_metadata_begin(),
- E = M.named_metadata_end(); I != E; ++I) {
- NamedMDNode &NMD = *I;
- for (unsigned MDIndex = 0, MDNum = NMD.getNumOperands();
- MDIndex < MDNum; ++MDIndex) {
- MDNode *Node = NMD.getOperand(MDIndex);
- if (!DIDescriptor(Node).isCompileUnit())
- continue;
- DICompileUnit CU(Node);
- const DIArray &SPs = CU.getSubprograms();
- for (unsigned SPIndex = 0, SPNum = SPs.getNumElements();
- SPIndex < SPNum; ++SPIndex) {
- DISubprogram SP(SPs.getElement(SPIndex));
- assert((!SP || SP.isSubprogram()) &&
- "A MDNode in subprograms of a CU should be null or a DISubprogram.");
- if (!SP)
- continue;
- if (Function *F = SP.getFunction())
- FunctionDIs[F] = SP;
- }
- }
- }
-}
-
/// DeleteDeadVarargs - If this is an function that takes a ... list, and if
/// llvm.vastart is never called, the varargs list is dead for the function.
bool DAE::DeleteDeadVarargs(Function &Fn) {
@@ -327,7 +296,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
}
// Patch the pointer to LLVM function in debug info descriptor.
- FunctionDIMap::iterator DI = FunctionDIs.find(&Fn);
+ auto DI = FunctionDIs.find(&Fn);
if (DI != FunctionDIs.end())
DI->second.replaceFunction(NF);
@@ -1087,7 +1056,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
}
// Patch the pointer to LLVM function in debug info descriptor.
- FunctionDIMap::iterator DI = FunctionDIs.find(F);
+ auto DI = FunctionDIs.find(F);
if (DI != FunctionDIs.end())
DI->second.replaceFunction(NF);
@@ -1101,7 +1070,7 @@ bool DAE::runOnModule(Module &M) {
bool Changed = false;
// Collect debug info descriptors for functions.
- CollectFunctionDIs(M);
+ FunctionDIs = makeSubprogramMap(M);
// First pass: Do a simple check to see if any functions can have their "..."
// removed. We can do this if they never call va_start. This loop cannot be
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index fed8839..8174df9 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -449,14 +449,29 @@ determinePointerReadAttrs(Argument *A,
case Instruction::Call:
case Instruction::Invoke: {
+ bool Captures = true;
+
+ if (I->getType()->isVoidTy())
+ Captures = false;
+
+ auto AddUsersToWorklistIfCapturing = [&] {
+ if (Captures)
+ for (Use &UU : I->uses())
+ if (Visited.insert(&UU))
+ Worklist.push_back(&UU);
+ };
+
CallSite CS(I);
- if (CS.doesNotAccessMemory())
+ if (CS.doesNotAccessMemory()) {
+ AddUsersToWorklistIfCapturing();
continue;
+ }
Function *F = CS.getCalledFunction();
if (!F) {
if (CS.onlyReadsMemory()) {
IsRead = true;
+ AddUsersToWorklistIfCapturing();
continue;
}
return Attribute::None;
@@ -471,6 +486,7 @@ determinePointerReadAttrs(Argument *A,
"More params than args in non-varargs call.");
return Attribute::None;
}
+ Captures &= !CS.doesNotCapture(A - B);
if (SCCNodes.count(AI))
continue;
if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(A - B))
@@ -479,6 +495,7 @@ determinePointerReadAttrs(Argument *A,
IsRead = true;
}
}
+ AddUsersToWorklistIfCapturing();
break;
}
diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp
index 9decddc..7e7a4c0 100644
--- a/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/lib/Transforms/IPO/GlobalDCE.cpp
@@ -62,7 +62,7 @@ static bool isEmptyFunction(Function *F) {
if (Entry.size() != 1 || !isa<ReturnInst>(Entry.front()))
return false;
ReturnInst &RI = cast<ReturnInst>(Entry.front());
- return RI.getReturnValue() == NULL;
+ return RI.getReturnValue() == nullptr;
}
char GlobalDCE::ID = 0;
@@ -77,13 +77,19 @@ bool GlobalDCE::runOnModule(Module &M) {
// Remove empty functions from the global ctors list.
Changed |= optimizeGlobalCtorsList(M, isEmptyFunction);
+ typedef std::multimap<const Comdat *, GlobalValue *> ComdatGVPairsTy;
+ ComdatGVPairsTy ComdatGVPairs;
+
// Loop over the module, adding globals which are obviously necessary.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Functions with external linkage are needed if they have a body
- if (!I->isDiscardableIfUnused() &&
- !I->isDeclaration() && !I->hasAvailableExternallyLinkage())
- GlobalIsNeeded(I);
+ if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) {
+ if (!I->isDiscardableIfUnused())
+ GlobalIsNeeded(I);
+ else if (const Comdat *C = I->getComdat())
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
}
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
@@ -91,17 +97,38 @@ bool GlobalDCE::runOnModule(Module &M) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible & appending globals are needed, if they have an
// initializer.
- if (!I->isDiscardableIfUnused() &&
- !I->isDeclaration() && !I->hasAvailableExternallyLinkage())
- GlobalIsNeeded(I);
+ if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) {
+ if (!I->isDiscardableIfUnused())
+ GlobalIsNeeded(I);
+ else if (const Comdat *C = I->getComdat())
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
}
for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I) {
Changed |= RemoveUnusedGlobalValue(*I);
// Externally visible aliases are needed.
- if (!I->isDiscardableIfUnused())
+ if (!I->isDiscardableIfUnused()) {
GlobalIsNeeded(I);
+ } else if (const Comdat *C = I->getComdat()) {
+ ComdatGVPairs.insert(std::make_pair(C, I));
+ }
+ }
+
+ for (ComdatGVPairsTy::iterator I = ComdatGVPairs.begin(),
+ E = ComdatGVPairs.end();
+ I != E;) {
+ ComdatGVPairsTy::iterator UB = ComdatGVPairs.upper_bound(I->first);
+ bool CanDiscard = std::all_of(I, UB, [](ComdatGVPairsTy::value_type Pair) {
+ return Pair.second->isDiscardableIfUnused();
+ });
+ if (!CanDiscard) {
+ std::for_each(I, UB, [this](ComdatGVPairsTy::value_type Pair) {
+ GlobalIsNeeded(Pair.second);
+ });
+ }
+ I = UB;
}
// Now that all globals which are needed are in the AliveGlobals set, we loop
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index ae80c43..c1d0d3b 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
@@ -1699,9 +1700,6 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
/// possible. If we make a change, return true.
bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
- if (!GV->isDiscardableIfUnused())
- return false;
-
// Do more involved optimizations if the global is internal.
GV->removeDeadConstantUsers();
@@ -1910,7 +1908,7 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
Function *F = FI++;
// Functions without names cannot be referenced outside this module.
- if (!F->hasName() && !F->isDeclaration())
+ if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
F->setLinkage(GlobalValue::InternalLinkage);
F->removeDeadConstantUsers();
if (F->isDefTriviallyDead()) {
@@ -1944,11 +1942,18 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
bool GlobalOpt::OptimizeGlobalVars(Module &M) {
bool Changed = false;
+
+ SmallSet<const Comdat *, 8> NotDiscardableComdats;
+ for (const GlobalVariable &GV : M.globals())
+ if (const Comdat *C = GV.getComdat())
+ if (!GV.isDiscardableIfUnused())
+ NotDiscardableComdats.insert(C);
+
for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
GVI != E; ) {
GlobalVariable *GV = GVI++;
// Global variables without names cannot be referenced outside this module.
- if (!GV->hasName() && !GV->isDeclaration())
+ if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
GV->setLinkage(GlobalValue::InternalLinkage);
// Simplify the initializer.
if (GV->hasInitializer())
@@ -1958,7 +1963,12 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
GV->setInitializer(New);
}
- Changed |= ProcessGlobal(GV, GVI);
+ if (GV->isDiscardableIfUnused()) {
+ if (const Comdat *C = GV->getComdat())
+ if (NotDiscardableComdats.count(C))
+ continue;
+ Changed |= ProcessGlobal(GV, GVI);
+ }
}
return Changed;
}
@@ -1980,10 +1990,13 @@ isSimpleEnoughValueToCommit(Constant *C,
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
const DataLayout *DL) {
- // Simple integer, undef, constant aggregate zero, global addresses, etc are
- // all supported.
- if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
- isa<GlobalValue>(C))
+ // Simple global addresses are supported, do not allow dllimport or
+ // thread-local globals.
+ if (auto *GV = dyn_cast<GlobalValue>(C))
+ return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
+
+ // Simple integer, undef, constant aggregate zero, etc are all supported.
+ if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
return true;
// Aggregate values are safe if all their elements are.
@@ -2054,8 +2067,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
- // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
- // external globals.
+ // Do not allow weak/*_odr/linkonce linkage or external globals.
return GV->hasUniqueInitializer();
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
@@ -2846,14 +2858,19 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
I != E;) {
Module::alias_iterator J = I++;
// Aliases without names cannot be referenced outside this module.
- if (!J->hasName() && !J->isDeclaration())
+ if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
J->setLinkage(GlobalValue::InternalLinkage);
// If the aliasee may change at link time, nothing can be done - bail out.
if (J->mayBeOverridden())
continue;
Constant *Aliasee = J->getAliasee();
- GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
+ GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
+ // We can't trivially replace the alias with the aliasee if the aliasee is
+ // non-trivial in some way.
+ // TODO: Try to handle non-zero GEPs of local aliasees.
+ if (!Target)
+ continue;
Target->removeDeadConstantUsers();
// Make all users of the alias use the aliasee instead.
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index c3a2b12..559ef0b 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -9,13 +9,24 @@
//
// This pass looks for equivalent functions that are mergable and folds them.
//
-// A hash is computed from the function, based on its type and number of
-// basic blocks.
+// Order relation is defined on set of functions. It was made through
+// special function comparison procedure that returns
+// 0 when functions are equal,
+// -1 when Left function is less than right function, and
+// 1 for opposite case. We need total-ordering, so we need to maintain
+// four properties on the functions set:
+// a <= a (reflexivity)
+// if a <= b and b <= a then a = b (antisymmetry)
+// if a <= b and b <= c then a <= c (transitivity).
+// for all a and b: a <= b or b <= a (totality).
//
-// Once all hashes are computed, we perform an expensive equality comparison
-// on each function pair. This takes n^2/2 comparisons per bucket, so it's
-// important that the hash function be high quality. The equality comparison
-// iterates through each instruction in each basic block.
+// Comparison iterates through each instruction in each basic block.
+// Functions are kept on binary tree. For each new function F we perform
+// lookup in binary tree.
+// In practice it works the following way:
+// -- We define Function* container class with custom "operator<" (FunctionPtr).
+// -- "FunctionPtr" instances are stored in std::set collection, so every
+// std::set::insert operation will give you result in log(N) time.
//
// When a match is found the functions are folded. If both functions are
// overridable, we move the functionality into a new internal function and
@@ -31,9 +42,6 @@
// the object they belong to. However, as long as it's only used for a lookup
// and call, this is irrelevant, and we'd like to fold such functions.
//
-// * switch from n^2 pair-wise comparisons to an n-way comparison for each
-// bucket.
-//
// * be smarter about bitcasts.
//
// In order to fold functions, we will sometimes add either bitcast instructions
@@ -41,6 +49,36 @@
// analysis since the two functions differ where one has a bitcast and the
// other doesn't. We should learn to look through bitcasts.
//
+// * Compare complex types with pointer types inside.
+// * Compare cross-reference cases.
+// * Compare complex expressions.
+//
+// All the three issues above could be described as ability to prove that
+// fA == fB == fC == fE == fF == fG in example below:
+//
+// void fA() {
+// fB();
+// }
+// void fB() {
+// fA();
+// }
+//
+// void fE() {
+// fF();
+// }
+// void fF() {
+// fG();
+// }
+// void fG() {
+// fE();
+// }
+//
+// Simplest cross-reference case (fA <--> fB) was implemented in previous
+// versions of MergeFunctions, though it presented only in two function pairs
+// in test-suite (that counts >50k functions)
+// Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A)
+// could cover much more cases.
+//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/IPO.h"
@@ -60,6 +98,7 @@
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -73,89 +112,12 @@ STATISTIC(NumThunksWritten, "Number of thunks generated");
STATISTIC(NumAliasesWritten, "Number of aliases generated");
STATISTIC(NumDoubleWeak, "Number of new functions created");
-/// Returns the type id for a type to be hashed. We turn pointer types into
-/// integers here because the actual compare logic below considers pointers and
-/// integers of the same size as equal.
-static Type::TypeID getTypeIDForHash(Type *Ty) {
- if (Ty->isPointerTy())
- return Type::IntegerTyID;
- return Ty->getTypeID();
-}
-
-/// Creates a hash-code for the function which is the same for any two
-/// functions that will compare equal, without looking at the instructions
-/// inside the function.
-static unsigned profileFunction(const Function *F) {
- FunctionType *FTy = F->getFunctionType();
-
- FoldingSetNodeID ID;
- ID.AddInteger(F->size());
- ID.AddInteger(F->getCallingConv());
- ID.AddBoolean(F->hasGC());
- ID.AddBoolean(FTy->isVarArg());
- ID.AddInteger(getTypeIDForHash(FTy->getReturnType()));
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- ID.AddInteger(getTypeIDForHash(FTy->getParamType(i)));
- return ID.ComputeHash();
-}
-
-namespace {
-
-/// ComparableFunction - A struct that pairs together functions with a
-/// DataLayout so that we can keep them together as elements in the DenseSet.
-class ComparableFunction {
-public:
- static const ComparableFunction EmptyKey;
- static const ComparableFunction TombstoneKey;
- static DataLayout * const LookupOnly;
-
- ComparableFunction(Function *Func, const DataLayout *DL)
- : Func(Func), Hash(profileFunction(Func)), DL(DL) {}
-
- Function *getFunc() const { return Func; }
- unsigned getHash() const { return Hash; }
- const DataLayout *getDataLayout() const { return DL; }
-
- // Drops AssertingVH reference to the function. Outside of debug mode, this
- // does nothing.
- void release() {
- assert(Func &&
- "Attempted to release function twice, or release empty/tombstone!");
- Func = nullptr;
- }
-
-private:
- explicit ComparableFunction(unsigned Hash)
- : Func(nullptr), Hash(Hash), DL(nullptr) {}
-
- AssertingVH<Function> Func;
- unsigned Hash;
- const DataLayout *DL;
-};
-
-const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
-const ComparableFunction ComparableFunction::TombstoneKey =
- ComparableFunction(1);
-DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1);
-
-}
-
-namespace llvm {
- template <>
- struct DenseMapInfo<ComparableFunction> {
- static ComparableFunction getEmptyKey() {
- return ComparableFunction::EmptyKey;
- }
- static ComparableFunction getTombstoneKey() {
- return ComparableFunction::TombstoneKey;
- }
- static unsigned getHashValue(const ComparableFunction &CF) {
- return CF.getHash();
- }
- static bool isEqual(const ComparableFunction &LHS,
- const ComparableFunction &RHS);
- };
-}
+static cl::opt<unsigned> NumFunctionsForSanityCheck(
+ "mergefunc-sanity",
+ cl::desc("How many functions in module could be used for "
+ "MergeFunctions pass sanity check. "
+ "'0' disables this check. Works only with '-debug' key."),
+ cl::init(0), cl::Hidden);
namespace {
@@ -167,14 +129,14 @@ class FunctionComparator {
public:
FunctionComparator(const DataLayout *DL, const Function *F1,
const Function *F2)
- : F1(F1), F2(F2), DL(DL) {}
+ : FnL(F1), FnR(F2), DL(DL) {}
/// Test whether the two functions have equivalent behaviour.
- bool compare();
+ int compare();
private:
/// Test whether two basic blocks have equivalent behaviour.
- bool compare(const BasicBlock *BB1, const BasicBlock *BB2);
+ int compare(const BasicBlock *BBL, const BasicBlock *BBR);
/// Constants comparison.
/// Its analog to lexicographical comparison between hypothetical numbers
@@ -300,10 +262,6 @@ private:
/// see comments for sn_mapL and sn_mapR.
int cmpValues(const Value *L, const Value *R);
- bool enumerate(const Value *V1, const Value *V2) {
- return cmpValues(V1, V2) == 0;
- }
-
/// Compare two Instructions for equivalence, similar to
/// Instruction::isSameOperationAs but with modifications to the type
/// comparison.
@@ -325,15 +283,11 @@ private:
/// 6.1.Load: volatile (as boolean flag)
/// 6.2.Load: alignment (as integer numbers)
/// 6.3.Load: synch-scope (as integer numbers)
+ /// 6.4.Load: range metadata (as integer numbers)
/// On this stage its better to see the code, since its not more than 10-15
/// strings for particular instruction, and could change sometimes.
int cmpOperation(const Instruction *L, const Instruction *R) const;
- bool isEquivalentOperation(const Instruction *I1,
- const Instruction *I2) const {
- return cmpOperation(I1, I2) == 0;
- }
-
/// Compare two GEPs for equivalent pointer arithmetic.
/// Parts to be compared for each comparison stage,
/// most significant stage first:
@@ -348,14 +302,6 @@ private:
return cmpGEP(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
}
- bool isEquivalentGEP(const GEPOperator *GEP1, const GEPOperator *GEP2) {
- return cmpGEP(GEP1, GEP2) == 0;
- }
- bool isEquivalentGEP(const GetElementPtrInst *GEP1,
- const GetElementPtrInst *GEP2) {
- return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
- }
-
/// cmpType - compares two types,
/// defines total ordering among the types set.
///
@@ -398,10 +344,6 @@ private:
/// 6. For all other cases put llvm_unreachable.
int cmpType(Type *TyL, Type *TyR) const;
- bool isEquivalentType(Type *Ty1, Type *Ty2) const {
- return cmpType(Ty1, Ty2) == 0;
- }
-
int cmpNumbers(uint64_t L, uint64_t R) const;
int cmpAPInt(const APInt &L, const APInt &R) const;
@@ -410,7 +352,7 @@ private:
int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
// The two functions undergoing comparison.
- const Function *F1, *F2;
+ const Function *FnL, *FnR;
const DataLayout *DL;
@@ -450,6 +392,18 @@ private:
DenseMap<const Value*, int> sn_mapL, sn_mapR;
};
+class FunctionPtr {
+ AssertingVH<Function> F;
+ const DataLayout *DL;
+
+public:
+ FunctionPtr(Function *F, const DataLayout *DL) : F(F), DL(DL) {}
+ Function *getFunc() const { return F; }
+ void release() { F = 0; }
+ bool operator<(const FunctionPtr &RHS) const {
+ return (FunctionComparator(DL, F, RHS.getFunc()).compare()) == -1;
+ }
+};
}
int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
@@ -788,7 +742,11 @@ int FunctionComparator::cmpOperation(const Instruction *L,
if (int Res =
cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope());
+ if (int Res =
+ cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
+ return Res;
+ return cmpNumbers((uint64_t)LI->getMetadata(LLVMContext::MD_range),
+ (uint64_t)cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
}
if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
if (int Res =
@@ -847,6 +805,9 @@ int FunctionComparator::cmpOperation(const Instruction *L,
if (int Res = cmpNumbers(CXI->isVolatile(),
cast<AtomicCmpXchgInst>(R)->isVolatile()))
return Res;
+ if (int Res = cmpNumbers(CXI->isWeak(),
+ cast<AtomicCmpXchgInst>(R)->isWeak()))
+ return Res;
if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
return Res;
@@ -914,13 +875,13 @@ int FunctionComparator::cmpGEP(const GEPOperator *GEPL,
/// See comments in declaration for more details.
int FunctionComparator::cmpValues(const Value *L, const Value *R) {
// Catch self-reference case.
- if (L == F1) {
- if (R == F2)
+ if (L == FnL) {
+ if (R == FnR)
return 0;
return -1;
}
- if (R == F2) {
- if (L == F1)
+ if (R == FnR) {
+ if (L == FnL)
return 0;
return 1;
}
@@ -954,90 +915,102 @@ int FunctionComparator::cmpValues(const Value *L, const Value *R) {
return cmpNumbers(LeftSN.first->second, RightSN.first->second);
}
// Test whether two basic blocks have equivalent behaviour.
-bool FunctionComparator::compare(const BasicBlock *BB1, const BasicBlock *BB2) {
- BasicBlock::const_iterator F1I = BB1->begin(), F1E = BB1->end();
- BasicBlock::const_iterator F2I = BB2->begin(), F2E = BB2->end();
+int FunctionComparator::compare(const BasicBlock *BBL, const BasicBlock *BBR) {
+ BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
+ BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
do {
- if (!enumerate(F1I, F2I))
- return false;
+ if (int Res = cmpValues(InstL, InstR))
+ return Res;
- if (const GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(F1I)) {
- const GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(F2I);
- if (!GEP2)
- return false;
+ const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL);
+ const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR);
- if (!enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
- return false;
+ if (GEPL && !GEPR)
+ return 1;
+ if (GEPR && !GEPL)
+ return -1;
- if (!isEquivalentGEP(GEP1, GEP2))
- return false;
+ if (GEPL && GEPR) {
+ if (int Res =
+ cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
+ return Res;
+ if (int Res = cmpGEP(GEPL, GEPR))
+ return Res;
} else {
- if (!isEquivalentOperation(F1I, F2I))
- return false;
-
- assert(F1I->getNumOperands() == F2I->getNumOperands());
- for (unsigned i = 0, e = F1I->getNumOperands(); i != e; ++i) {
- Value *OpF1 = F1I->getOperand(i);
- Value *OpF2 = F2I->getOperand(i);
-
- if (!enumerate(OpF1, OpF2))
- return false;
+ if (int Res = cmpOperation(InstL, InstR))
+ return Res;
+ assert(InstL->getNumOperands() == InstR->getNumOperands());
- if (OpF1->getValueID() != OpF2->getValueID() ||
- !isEquivalentType(OpF1->getType(), OpF2->getType()))
- return false;
+ for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
+ Value *OpL = InstL->getOperand(i);
+ Value *OpR = InstR->getOperand(i);
+ if (int Res = cmpValues(OpL, OpR))
+ return Res;
+ if (int Res = cmpNumbers(OpL->getValueID(), OpR->getValueID()))
+ return Res;
+ // TODO: Already checked in cmpOperation
+ if (int Res = cmpType(OpL->getType(), OpR->getType()))
+ return Res;
}
}
- ++F1I, ++F2I;
- } while (F1I != F1E && F2I != F2E);
+ ++InstL, ++InstR;
+ } while (InstL != InstLE && InstR != InstRE);
- return F1I == F1E && F2I == F2E;
+ if (InstL != InstLE && InstR == InstRE)
+ return 1;
+ if (InstL == InstLE && InstR != InstRE)
+ return -1;
+ return 0;
}
// Test whether the two functions have equivalent behaviour.
-bool FunctionComparator::compare() {
- // We need to recheck everything, but check the things that weren't included
- // in the hash first.
+int FunctionComparator::compare() {
sn_mapL.clear();
sn_mapR.clear();
- if (F1->getAttributes() != F2->getAttributes())
- return false;
+ if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
+ return Res;
- if (F1->hasGC() != F2->hasGC())
- return false;
+ if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
+ return Res;
- if (F1->hasGC() && F1->getGC() != F2->getGC())
- return false;
+ if (FnL->hasGC()) {
+ if (int Res = cmpNumbers((uint64_t)FnL->getGC(), (uint64_t)FnR->getGC()))
+ return Res;
+ }
- if (F1->hasSection() != F2->hasSection())
- return false;
+ if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
+ return Res;
- if (F1->hasSection() && F1->getSection() != F2->getSection())
- return false;
+ if (FnL->hasSection()) {
+ if (int Res = cmpStrings(FnL->getSection(), FnR->getSection()))
+ return Res;
+ }
- if (F1->isVarArg() != F2->isVarArg())
- return false;
+ if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
+ return Res;
// TODO: if it's internal and only used in direct calls, we could handle this
// case too.
- if (F1->getCallingConv() != F2->getCallingConv())
- return false;
+ if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
+ return Res;
- if (!isEquivalentType(F1->getFunctionType(), F2->getFunctionType()))
- return false;
+ if (int Res = cmpType(FnL->getFunctionType(), FnR->getFunctionType()))
+ return Res;
- assert(F1->arg_size() == F2->arg_size() &&
+ assert(FnL->arg_size() == FnR->arg_size() &&
"Identically typed functions have different numbers of args!");
// Visit the arguments so that they get enumerated in the order they're
// passed in.
- for (Function::const_arg_iterator f1i = F1->arg_begin(),
- f2i = F2->arg_begin(), f1e = F1->arg_end(); f1i != f1e; ++f1i, ++f2i) {
- if (!enumerate(f1i, f2i))
+ for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
+ ArgRI = FnR->arg_begin(),
+ ArgLE = FnL->arg_end();
+ ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
+ if (cmpValues(ArgLI, ArgRI) != 0)
llvm_unreachable("Arguments repeat!");
}
@@ -1045,33 +1018,36 @@ bool FunctionComparator::compare() {
// linked list is immaterial. Our walk starts at the entry block for both
// functions, then takes each block from each terminator in order. As an
// artifact, this also means that unreachable blocks are ignored.
- SmallVector<const BasicBlock *, 8> F1BBs, F2BBs;
+ SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
- F1BBs.push_back(&F1->getEntryBlock());
- F2BBs.push_back(&F2->getEntryBlock());
+ FnLBBs.push_back(&FnL->getEntryBlock());
+ FnRBBs.push_back(&FnR->getEntryBlock());
- VisitedBBs.insert(F1BBs[0]);
- while (!F1BBs.empty()) {
- const BasicBlock *F1BB = F1BBs.pop_back_val();
- const BasicBlock *F2BB = F2BBs.pop_back_val();
+ VisitedBBs.insert(FnLBBs[0]);
+ while (!FnLBBs.empty()) {
+ const BasicBlock *BBL = FnLBBs.pop_back_val();
+ const BasicBlock *BBR = FnRBBs.pop_back_val();
- if (!enumerate(F1BB, F2BB) || !compare(F1BB, F2BB))
- return false;
+ if (int Res = cmpValues(BBL, BBR))
+ return Res;
+
+ if (int Res = compare(BBL, BBR))
+ return Res;
- const TerminatorInst *F1TI = F1BB->getTerminator();
- const TerminatorInst *F2TI = F2BB->getTerminator();
+ const TerminatorInst *TermL = BBL->getTerminator();
+ const TerminatorInst *TermR = BBR->getTerminator();
- assert(F1TI->getNumSuccessors() == F2TI->getNumSuccessors());
- for (unsigned i = 0, e = F1TI->getNumSuccessors(); i != e; ++i) {
- if (!VisitedBBs.insert(F1TI->getSuccessor(i)))
+ assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
+ for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
+ if (!VisitedBBs.insert(TermL->getSuccessor(i)))
continue;
- F1BBs.push_back(F1TI->getSuccessor(i));
- F2BBs.push_back(F2TI->getSuccessor(i));
+ FnLBBs.push_back(TermL->getSuccessor(i));
+ FnRBBs.push_back(TermR->getSuccessor(i));
}
}
- return true;
+ return 0;
}
namespace {
@@ -1092,21 +1068,25 @@ public:
bool runOnModule(Module &M) override;
private:
- typedef DenseSet<ComparableFunction> FnSetType;
+ typedef std::set<FunctionPtr> FnTreeType;
/// A work queue of functions that may have been modified and should be
/// analyzed again.
std::vector<WeakVH> Deferred;
- /// Insert a ComparableFunction into the FnSet, or merge it away if it's
+ /// Checks the rules of order relation introduced among functions set.
+ /// Returns true, if sanity check has been passed, and false if failed.
+ bool doSanityCheck(std::vector<WeakVH> &Worklist);
+
+ /// Insert a ComparableFunction into the FnTree, or merge it away if it's
/// equal to one that's already present.
- bool insert(ComparableFunction &NewF);
+ bool insert(Function *NewFunction);
- /// Remove a Function from the FnSet and queue it up for a second sweep of
+ /// Remove a Function from the FnTree and queue it up for a second sweep of
/// analysis.
void remove(Function *F);
- /// Find the functions that use this Value and remove them from FnSet and
+ /// Find the functions that use this Value and remove them from FnTree and
/// queue the functions.
void removeUsers(Value *V);
@@ -1131,7 +1111,7 @@ private:
/// The set of all distinct functions. Use the insert() and remove() methods
/// to modify it.
- FnSetType FnSet;
+ FnTreeType FnTree;
/// DataLayout for more accurate GEP comparisons. May be NULL.
const DataLayout *DL;
@@ -1149,6 +1129,78 @@ ModulePass *llvm::createMergeFunctionsPass() {
return new MergeFunctions();
}
+bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
+ if (const unsigned Max = NumFunctionsForSanityCheck) {
+ unsigned TripleNumber = 0;
+ bool Valid = true;
+
+ dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
+
+ unsigned i = 0;
+ for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
+ I != E && i < Max; ++I, ++i) {
+ unsigned j = i;
+ for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
+ Function *F1 = cast<Function>(*I);
+ Function *F2 = cast<Function>(*J);
+ int Res1 = FunctionComparator(DL, F1, F2).compare();
+ int Res2 = FunctionComparator(DL, F2, F1).compare();
+
+ // If F1 <= F2, then F2 >= F1, otherwise report failure.
+ if (Res1 != -Res2) {
+ dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
+ << "\n";
+ F1->dump();
+ F2->dump();
+ Valid = false;
+ }
+
+ if (Res1 == 0)
+ continue;
+
+ unsigned k = j;
+ for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
+ ++k, ++K, ++TripleNumber) {
+ if (K == J)
+ continue;
+
+ Function *F3 = cast<Function>(*K);
+ int Res3 = FunctionComparator(DL, F1, F3).compare();
+ int Res4 = FunctionComparator(DL, F2, F3).compare();
+
+ bool Transitive = true;
+
+ if (Res1 != 0 && Res1 == Res4) {
+ // F1 > F2, F2 > F3 => F1 > F3
+ Transitive = Res3 == Res1;
+ } else if (Res3 != 0 && Res3 == -Res4) {
+ // F1 > F3, F3 > F2 => F1 > F2
+ Transitive = Res3 == Res1;
+ } else if (Res4 != 0 && -Res3 == Res4) {
+ // F2 > F3, F3 > F1 => F2 > F1
+ Transitive = Res4 == -Res1;
+ }
+
+ if (!Transitive) {
+ dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
+ << TripleNumber << "\n";
+ dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
+ << Res4 << "\n";
+ F1->dump();
+ F2->dump();
+ F3->dump();
+ Valid = false;
+ }
+ }
+ }
+ }
+
+ dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
+ return Valid;
+ }
+ return true;
+}
+
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
@@ -1158,12 +1210,13 @@ bool MergeFunctions::runOnModule(Module &M) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
Deferred.push_back(WeakVH(I));
}
- FnSet.resize(Deferred.size());
do {
std::vector<WeakVH> Worklist;
Deferred.swap(Worklist);
+ DEBUG(doSanityCheck(Worklist));
+
DEBUG(dbgs() << "size of module: " << M.size() << '\n');
DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
@@ -1175,8 +1228,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
!F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, DL);
- Changed |= insert(CF);
+ Changed |= insert(F);
}
}
@@ -1190,38 +1242,17 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, DL);
- Changed |= insert(CF);
+ Changed |= insert(F);
}
}
- DEBUG(dbgs() << "size of FnSet: " << FnSet.size() << '\n');
+ DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
} while (!Deferred.empty());
- FnSet.clear();
+ FnTree.clear();
return Changed;
}
-bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
- const ComparableFunction &RHS) {
- if (LHS.getFunc() == RHS.getFunc() &&
- LHS.getHash() == RHS.getHash())
- return true;
- if (!LHS.getFunc() || !RHS.getFunc())
- return false;
-
- // One of these is a special "underlying pointer comparison only" object.
- if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||
- RHS.getDataLayout() == ComparableFunction::LookupOnly)
- return false;
-
- assert(LHS.getDataLayout() == RHS.getDataLayout() &&
- "Comparing functions for different targets");
-
- return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(),
- RHS.getFunc()).compare();
-}
-
// Replace direct callers of Old with New.
void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
@@ -1376,54 +1407,57 @@ void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
++NumFunctionsMerged;
}
-// Insert a ComparableFunction into the FnSet, or merge it away if equal to one
+// Insert a ComparableFunction into the FnTree, or merge it away if equal to one
// that was already inserted.
-bool MergeFunctions::insert(ComparableFunction &NewF) {
- std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
+bool MergeFunctions::insert(Function *NewFunction) {
+ std::pair<FnTreeType::iterator, bool> Result =
+ FnTree.insert(FunctionPtr(NewFunction, DL));
+
if (Result.second) {
- DEBUG(dbgs() << "Inserting as unique: " << NewF.getFunc()->getName() << '\n');
+ DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
return false;
}
- const ComparableFunction &OldF = *Result.first;
+ const FunctionPtr &OldF = *Result.first;
// Don't merge tiny functions, since it can just end up making the function
// larger.
// FIXME: Should still merge them if they are unnamed_addr and produce an
// alias.
- if (NewF.getFunc()->size() == 1) {
- if (NewF.getFunc()->front().size() <= 2) {
- DEBUG(dbgs() << NewF.getFunc()->getName()
- << " is to small to bother merging\n");
+ if (NewFunction->size() == 1) {
+ if (NewFunction->front().size() <= 2) {
+ DEBUG(dbgs() << NewFunction->getName()
+ << " is to small to bother merging\n");
return false;
}
}
// Never thunk a strong function to a weak function.
- assert(!OldF.getFunc()->mayBeOverridden() ||
- NewF.getFunc()->mayBeOverridden());
+ assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden());
- DEBUG(dbgs() << " " << OldF.getFunc()->getName() << " == "
- << NewF.getFunc()->getName() << '\n');
+ DEBUG(dbgs() << " " << OldF.getFunc()->getName()
+ << " == " << NewFunction->getName() << '\n');
- Function *DeleteF = NewF.getFunc();
- NewF.release();
+ Function *DeleteF = NewFunction;
mergeTwoFunctions(OldF.getFunc(), DeleteF);
return true;
}
-// Remove a function from FnSet. If it was already in FnSet, add it to Deferred
-// so that we'll look at it in the next round.
+// Remove a function from FnTree. If it was already in FnTree, add
+// it to Deferred so that we'll look at it in the next round.
void MergeFunctions::remove(Function *F) {
// We need to make sure we remove F, not a function "equal" to F per the
// function equality comparator.
- //
- // The special "lookup only" ComparableFunction bypasses the expensive
- // function comparison in favour of a pointer comparison on the underlying
- // Function*'s.
- ComparableFunction CF = ComparableFunction(F, ComparableFunction::LookupOnly);
- if (FnSet.erase(CF)) {
- DEBUG(dbgs() << "Removed " << F->getName() << " from set and deferred it.\n");
+ FnTreeType::iterator found = FnTree.find(FunctionPtr(F, DL));
+ size_t Erased = 0;
+ if (found != FnTree.end() && found->getFunc() == F) {
+ Erased = 1;
+ FnTree.erase(found);
+ }
+
+ if (Erased) {
+ DEBUG(dbgs() << "Removed " << F->getName()
+ << " from set and deferred it.\n");
Deferred.push_back(F);
}
}
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 38e1b8e..46a3187 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -53,6 +53,10 @@ static cl::opt<bool>
RunLoopRerolling("reroll-loops", cl::Hidden,
cl::desc("Run the loop rerolling pass"));
+static cl::opt<bool> RunLoadCombine("combine-loads", cl::init(false),
+ cl::Hidden,
+ cl::desc("Run the load combining pass"));
+
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
@@ -65,6 +69,7 @@ PassManagerBuilder::PassManagerBuilder() {
SLPVectorize = RunSLPVectorization;
LoopVectorize = RunLoopVectorization;
RerollLoops = RunLoopRerolling;
+ LoadCombine = RunLoadCombine;
}
PassManagerBuilder::~PassManagerBuilder() {
@@ -151,9 +156,9 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (!DisableUnitAtATime) {
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
+ MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
- MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
MPM.add(createInstructionCombiningPass());// Clean up after IPCP & DAE
@@ -236,6 +241,9 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createLoopUnrollPass());
}
+ if (LoadCombine)
+ MPM.add(createLoadCombinePass());
+
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
@@ -352,6 +360,9 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
// More scalar chains could be vectorized due to more alias information
PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
+ if (LoadCombine)
+ PM.add(createLoadCombinePass());
+
// Cleanup and simplify the code after the scalar optimizations.
PM.add(createInstructionCombiningPass());
addExtensionsToPM(EP_Peephole, PM);
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index e04b1be..ab4dc1c 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -37,8 +37,9 @@ enum SelectPatternFlavor {
SPF_SMIN,
SPF_UMIN,
SPF_SMAX,
- SPF_UMAX
- // SPF_ABS - TODO.
+ SPF_UMAX,
+ SPF_ABS,
+ SPF_NABS
};
/// getComplexity: Assign a complexity or rank value to LLVM Values...
@@ -246,6 +247,7 @@ private:
bool DoXform = true);
Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
+ bool WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS);
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c37a9cf..99f0f1f 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -865,69 +865,170 @@ Value *FAddCombine::createAddendVal
return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
}
-// dyn_castFoldableMul - If this value is a multiply that can be folded into
-// other computations (because it has a constant operand), return the
-// non-constant operand of the multiply, and set CST to point to the multiplier.
-// Otherwise, return null.
-//
-static inline Value *dyn_castFoldableMul(Value *V, Constant *&CST) {
- if (!V->hasOneUse() || !V->getType()->isIntOrIntVectorTy())
- return nullptr;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return nullptr;
-
- if (I->getOpcode() == Instruction::Mul)
- if ((CST = dyn_cast<Constant>(I->getOperand(1))))
- return I->getOperand(0);
- if (I->getOpcode() == Instruction::Shl)
- if ((CST = dyn_cast<Constant>(I->getOperand(1)))) {
- // The multiplier is really 1 << CST.
- CST = ConstantExpr::getShl(ConstantInt::get(V->getType(), 1), CST);
- return I->getOperand(0);
- }
- return nullptr;
+// If one of the operands only has one non-zero bit, and if the other
+// operand has a known-zero bit in a more significant place than it (not
+// including the sign bit) the ripple may go up to and fill the zero, but
+// won't change the sign. For example, (X & ~4) + 1.
+static bool checkRippleForAdd(const APInt &Op0KnownZero,
+ const APInt &Op1KnownZero) {
+ APInt Op1MaybeOne = ~Op1KnownZero;
+ // Make sure that one of the operand has at most one bit set to 1.
+ if (Op1MaybeOne.countPopulation() != 1)
+ return false;
+
+ // Find the most significant known 0 other than the sign bit.
+ int BitWidth = Op0KnownZero.getBitWidth();
+ APInt Op0KnownZeroTemp(Op0KnownZero);
+ Op0KnownZeroTemp.clearBit(BitWidth - 1);
+ int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
+
+ int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
+ assert(Op1OnePosition >= 0);
+
+ // This also covers the case of no known zero, since in that case
+ // Op0ZeroPosition is -1.
+ return Op0ZeroPosition >= Op1OnePosition;
}
-
/// WillNotOverflowSignedAdd - Return true if we can prove that:
/// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
/// This basically requires proving that the add in the original type would not
/// overflow to change the sign bit or have a carry out.
+/// TODO: Handle this for Vectors.
bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
// There are different heuristics we can use for this. Here are some simple
// ones.
- // Add has the property that adding any two 2's complement numbers can only
- // have one carry bit which can change a sign. As such, if LHS and RHS each
- // have at least two sign bits, we know that the addition of the two values
- // will sign extend fine.
+ // If LHS and RHS each have at least two sign bits, the addition will look
+ // like
+ //
+ // XX..... +
+ // YY.....
+ //
+ // If the carry into the most significant position is 0, X and Y can't both
+ // be 1 and therefore the carry out of the addition is also 0.
+ //
+ // If the carry into the most significant position is 1, X and Y can't both
+ // be 0 and therefore the carry out of the addition is also 1.
+ //
+ // Since the carry into the most significant position is always equal to
+ // the carry out of the addition, there is no signed overflow.
if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
return true;
+ if (IntegerType *IT = dyn_cast<IntegerType>(LHS->getType())) {
+ int BitWidth = IT->getBitWidth();
+ APInt LHSKnownZero(BitWidth, 0);
+ APInt LHSKnownOne(BitWidth, 0);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
- // If one of the operands only has one non-zero bit, and if the other operand
- // has a known-zero bit in a more significant place than it (not including the
- // sign bit) the ripple may go up to and fill the zero, but won't change the
- // sign. For example, (X & ~4) + 1.
+ APInt RHSKnownZero(BitWidth, 0);
+ APInt RHSKnownOne(BitWidth, 0);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
+
+ // Addition of two 2's compliment numbers having opposite signs will never
+ // overflow.
+ if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
+ (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
+ return true;
+
+ // Check if carry bit of addition will not cause overflow.
+ if (checkRippleForAdd(LHSKnownZero, RHSKnownZero))
+ return true;
+ if (checkRippleForAdd(RHSKnownZero, LHSKnownZero))
+ return true;
+ }
+ return false;
+}
- // TODO: Implement.
+/// WillNotOverflowUnsignedAdd - Return true if we can prove that:
+/// (zext (add LHS, RHS)) === (add (zext LHS), (zext RHS))
+bool InstCombiner::WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS) {
+ // There are different heuristics we can use for this. Here is a simple one.
+ // If the sign bit of LHS and that of RHS are both zero, no unsigned wrap.
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ bool RHSKnownNonNegative, RHSKnownNegative;
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, 0);
+ ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, 0);
+ if (LHSKnownNonNegative && RHSKnownNonNegative)
+ return true;
return false;
}
-Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
- bool Changed = SimplifyAssociativeOrCommutative(I);
+// Checks if any operand is negative and we can convert add to sub.
+// This function checks for following negative patterns
+// ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
+// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
+// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
+static Value *checkForNegativeOperand(BinaryOperator &I,
+ InstCombiner::BuilderTy *Builder) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- if (Value *V = SimplifyVectorOp(I))
- return ReplaceInstUsesWith(I, V);
+ // This function creates 2 instructions to replace ADD, we need at least one
+ // of LHS or RHS to have one use to ensure benefit in transform.
+ if (!LHS->hasOneUse() && !RHS->hasOneUse())
+ return nullptr;
- if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), DL))
- return ReplaceInstUsesWith(I, V);
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
+ const APInt *C1 = nullptr, *C2 = nullptr;
+
+ // if ONE is on other side, swap
+ if (match(RHS, m_Add(m_Value(X), m_One())))
+ std::swap(LHS, RHS);
+
+ if (match(LHS, m_Add(m_Value(X), m_One()))) {
+ // if XOR on other side, swap
+ if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ std::swap(X, RHS);
+
+ if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
+ // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
+ // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
+ if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
+ Value *NewAnd = Builder->CreateAnd(Z, *C1);
+ return Builder->CreateSub(RHS, NewAnd, "sub");
+ } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
+ // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
+ // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
+ Value *NewOr = Builder->CreateOr(Z, ~(*C1));
+ return Builder->CreateSub(RHS, NewOr, "sub");
+ }
+ }
+ }
+
+ // Restore LHS and RHS
+ LHS = I.getOperand(0);
+ RHS = I.getOperand(1);
+
+ // if XOR is on other side, swap
+ if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ std::swap(LHS, RHS);
+
+ // C2 is ODD
+ // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
+ // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
+ if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
+ if (C1->countTrailingZeros() == 0)
+ if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
+ Value *NewOr = Builder->CreateOr(Z, ~(*C2));
+ return Builder->CreateSub(RHS, NewOr, "sub");
+ }
+ return nullptr;
+}
+
+Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
+ bool Changed = SimplifyAssociativeOrCommutative(I);
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+
+ if (Value *V = SimplifyVectorOp(I))
+ return ReplaceInstUsesWith(I, V);
- // (A*B)+(A*C) -> A*(B+C) etc
+ if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
+ I.hasNoUnsignedWrap(), DL))
+ return ReplaceInstUsesWith(I, V);
+
+ // (A*B)+(A*C) -> A*(B+C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return ReplaceInstUsesWith(I, V);
@@ -1025,23 +1126,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = dyn_castNegVal(RHS))
return BinaryOperator::CreateSub(LHS, V);
-
- {
- Constant *C2;
- if (Value *X = dyn_castFoldableMul(LHS, C2)) {
- if (X == RHS) // X*C + X --> X * (C+1)
- return BinaryOperator::CreateMul(RHS, AddOne(C2));
-
- // X*C1 + X*C2 --> X * (C1+C2)
- Constant *C1;
- if (X == dyn_castFoldableMul(RHS, C1))
- return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
- }
-
- // X + X*C --> X * (C+1)
- if (dyn_castFoldableMul(RHS, C2) == LHS)
- return BinaryOperator::CreateMul(LHS, AddOne(C2));
- }
+ if (Value *V = checkForNegativeOperand(I, Builder))
+ return ReplaceInstUsesWith(I, V);
// A+B --> A|B iff A and B have no bits set in common.
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
@@ -1059,29 +1145,6 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- // W*X + Y*Z --> W * (X+Z) iff W == Y
- {
- Value *W, *X, *Y, *Z;
- if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
- match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
- if (W != Y) {
- if (W == Z) {
- std::swap(Y, Z);
- } else if (Y == X) {
- std::swap(W, X);
- } else if (X == Z) {
- std::swap(Y, Z);
- std::swap(W, X);
- }
- }
-
- if (W == Y) {
- Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
- return BinaryOperator::CreateMul(W, NewAdd);
- }
- }
- }
-
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Value *X;
if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
@@ -1191,6 +1254,18 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateOr(A, B);
}
+ // TODO(jingyue): Consider WillNotOverflowSignedAdd and
+ // WillNotOverflowUnsignedAdd to reduce the number of invocations of
+ // computeKnownBits.
+ if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS)) {
+ Changed = true;
+ I.setHasNoSignedWrap(true);
+ }
+ if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedAdd(LHS, RHS)) {
+ Changed = true;
+ I.setHasNoUnsignedWrap(true);
+ }
+
return Changed ? &I : nullptr;
}
@@ -1478,9 +1553,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return BinaryOperator::CreateAnd(Op0,
Builder->CreateNot(Y, Y->getName() + ".not"));
- // 0 - (X sdiv C) -> (X sdiv -C)
- if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) &&
- match(Op0, m_Zero()))
+ // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
+ if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
+ !C->isMinSignedValue())
return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
// 0 - (X << Y) -> (-X << Y) when X is freely negatable.
@@ -1488,19 +1563,6 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (Value *XNeg = dyn_castNegVal(X))
return BinaryOperator::CreateShl(XNeg, Y);
- // X - X*C --> X * (1-C)
- if (match(Op1, m_Mul(m_Specific(Op0), m_Constant(CI)))) {
- Constant *CP1 = ConstantExpr::getSub(ConstantInt::get(I.getType(),1), CI);
- return BinaryOperator::CreateMul(Op0, CP1);
- }
-
- // X - X<<C --> X * (1-(1<<C))
- if (match(Op1, m_Shl(m_Specific(Op0), m_Constant(CI)))) {
- Constant *One = ConstantInt::get(I.getType(), 1);
- C = ConstantExpr::getSub(One, ConstantExpr::getShl(One, CI));
- return BinaryOperator::CreateMul(Op0, C);
- }
-
// X - A*-B -> X + A*B
// X - -A*B -> X + A*B
Value *A, *B;
@@ -1517,16 +1579,6 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
}
}
- Constant *C1;
- if (Value *X = dyn_castFoldableMul(Op0, C1)) {
- if (X == Op1) // X*C - X --> X * (C-1)
- return BinaryOperator::CreateMul(Op1, SubOne(C1));
-
- Constant *C2; // X*C1 - X*C2 -> X * (C1-C2)
- if (X == dyn_castFoldableMul(Op1, C2))
- return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
- }
-
// Optimize pointer differences into the same array into a size. Consider:
// &A[10] - &A[0]: we should compile this to "10".
if (DL) {
@@ -1541,7 +1593,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
- }
+ }
return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4f5d65a..b23a606 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1996,29 +1996,6 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
C1 = dyn_cast<ConstantInt>(C);
C2 = dyn_cast<ConstantInt>(D);
if (C1 && C2) { // (A & C1)|(B & C2)
- // If we have: ((V + N) & C1) | (V & C2)
- // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
- // replace with V+N.
- if (C1->getValue() == ~C2->getValue()) {
- if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
- match(A, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- }
- // Or commutes, try both ways.
- if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
- match(B, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- }
- }
-
if ((C1->getValue() & C2->getValue()) == 0) {
// ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
// iff (C1&C2) == 0 and (N&~C1) == 0
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index d4b583b..658178d 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -421,6 +421,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
+
+ // We can strength reduce reduce this signed add into a regular add if we
+ // can prove that it will never overflow.
+ if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
+ if (WillNotOverflowSignedAdd(LHS, RHS)) {
+ Value *Add = Builder->CreateNSWAdd(LHS, RHS);
+ Add->takeName(&CI);
+ Constant *V[] = {UndefValue::get(Add->getType()), Builder->getFalse()};
+ StructType *ST = cast<StructType>(II->getType());
+ Constant *Struct = ConstantStruct::get(ST, V);
+ return InsertValueInst::Create(Struct, Add, 0);
+ }
+ }
+
break;
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
@@ -800,6 +815,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
+ // Note that ppc_altivec_vperm has a big-endian bias, so when creating
+ // a vectorshuffle for little endian, we must undo the transformation
+ // performed on vec_perm in altivec.h. That is, we must complement
+ // the permutation mask with respect to 31 and reverse the order of
+ // V1 and V2.
if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
assert(Mask->getType()->getVectorNumElements() == 16 &&
"Bad type for intrinsic!");
@@ -832,10 +852,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
unsigned Idx =
cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Idx &= 31; // Match the hardware behavior.
+ if (DL && DL->isLittleEndian())
+ Idx = 31 - Idx;
if (!ExtractedElts[Idx]) {
+ Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
+ Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
ExtractedElts[Idx] =
- Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
+ Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
Builder->getInt32(Idx&15));
}
@@ -913,6 +937,20 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}
+ case Intrinsic::AMDGPU_rcp: {
+ if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
+ const APFloat &ArgVal = C->getValueAPF();
+ APFloat Val(ArgVal.getSemantics(), 1.0);
+ APFloat::opStatus Status = Val.divide(ArgVal,
+ APFloat::rmNearestTiesToEven);
+ // Only do this if it was exact and therefore not dependent on the
+ // rounding mode.
+ if (Status == APFloat::opOK)
+ return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
+ }
+
+ break;
+ }
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 356803a..ff083d7 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1434,7 +1434,12 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
// If casting the result of a getelementptr instruction with no offset, turn
// this into a cast of the original pointer!
- if (GEP->hasAllZeroIndices()) {
+ if (GEP->hasAllZeroIndices() &&
+ // If CI is an addrspacecast and GEP changes the poiner type, merging
+ // GEP into CI would undo canonicalizing addrspacecast with different
+ // pointer types, causing infinite loops.
+ (!isa<AddrSpaceCastInst>(CI) ||
+ GEP->getType() == GEP->getPointerOperand()->getType())) {
// Changing the cast operand is usually not a good idea but it is safe
// here because the pointer operand is being replaced with another
// pointer operand so the opcode doesn't need to change.
@@ -1904,5 +1909,24 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
+ // If the destination pointer element type is not the the same as the source's
+ // do the addrspacecast to the same type, and then the bitcast in the new
+ // address space. This allows the cast to be exposed to other transforms.
+ Value *Src = CI.getOperand(0);
+ PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
+ PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
+
+ Type *DestElemTy = DestTy->getElementType();
+ if (SrcTy->getElementType() != DestElemTy) {
+ Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
+ // Handle vectors of pointers.
+ MidTy = VectorType::get(MidTy, VT->getNumElements());
+ }
+
+ Value *NewBitCast = Builder->CreateBitCast(Src, MidTy);
+ return new AddrSpaceCastInst(NewBitCast, CI.getType());
+ }
+
return commonPointerCastTransforms(CI);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 02e8bf1..5e71c5c 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -612,9 +612,10 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (ICmpInst::isSigned(Cond))
return nullptr;
- // Look through bitcasts.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
- RHS = BCI->getOperand(0);
+ // Look through bitcasts and addrspacecasts. We do not however want to remove
+ // 0 GEPs.
+ if (!isa<GetElementPtrInst>(RHS))
+ RHS = RHS->stripPointerCasts();
Value *PtrBase = GEPLHS->getOperand(0);
if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
@@ -655,9 +656,24 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
PtrBase->stripPointerCasts() ==
GEPRHS->getOperand(0)->stripPointerCasts()) {
+ Value *LOffset = EmitGEPOffset(GEPLHS);
+ Value *ROffset = EmitGEPOffset(GEPRHS);
+
+ // If we looked through an addrspacecast between different sized address
+ // spaces, the LHS and RHS pointers are different sized
+ // integers. Truncate to the smaller one.
+ Type *LHSIndexTy = LOffset->getType();
+ Type *RHSIndexTy = ROffset->getType();
+ if (LHSIndexTy != RHSIndexTy) {
+ if (LHSIndexTy->getPrimitiveSizeInBits() <
+ RHSIndexTy->getPrimitiveSizeInBits()) {
+ ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
+ } else
+ LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
+ }
+
Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
- EmitGEPOffset(GEPLHS),
- EmitGEPOffset(GEPRHS));
+ LOffset, ROffset);
return ReplaceInstUsesWith(I, Cmp);
}
@@ -667,26 +683,12 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
}
// If one of the GEPs has all zero indices, recurse.
- bool AllZeros = true;
- for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPLHS->getOperand(i)) ||
- !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
+ if (GEPLHS->hasAllZeroIndices())
return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
ICmpInst::getSwappedPredicate(Cond), I);
// If the other GEP has all zero indices, recurse.
- AllZeros = true;
- for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPRHS->getOperand(i)) ||
- !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
+ if (GEPRHS->hasAllZeroIndices())
return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
@@ -2026,9 +2028,13 @@ static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV,
/// replacement required.
static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
Value *OtherVal, InstCombiner &IC) {
+ // Don't bother doing this transformation for pointers, don't do it for
+ // vectors.
+ if (!isa<IntegerType>(MulVal->getType()))
+ return nullptr;
+
assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
- assert(isa<IntegerType>(MulVal->getType()));
Instruction *MulInstr = cast<Instruction>(MulVal);
assert(MulInstr->getOpcode() == Instruction::Mul);
@@ -2523,7 +2529,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// bit is set. If the comparison is against zero, then this is a check
// to see if *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0KnownZero;
- if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ if (~Op1KnownZero == 0) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
ConstantInt *LHSC = nullptr;
@@ -2533,11 +2539,19 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) == 0" into "x != 3".
+ // or turn "((1 << x)&7) == 0" into "x > 2".
Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
- unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
- return new ICmpInst(ICmpInst::ICMP_NE, X,
- ConstantInt::get(X->getType(), CmpVal));
+ APInt ValToCheck = Op0KnownZeroInverted;
+ if (ValToCheck.isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_NE, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ } else if ((++ValToCheck).isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros() - 1;
+ return new ICmpInst(ICmpInst::ICMP_UGT, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
}
// If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
@@ -2560,7 +2574,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// bit is set. If the comparison is against zero, then this is a check
// to see if *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0KnownZero;
- if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
+ if (~Op1KnownZero == 0) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
ConstantInt *LHSC = nullptr;
@@ -2570,11 +2584,19 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) != 0" into "x == 3".
+ // or turn "((1 << x)&7) != 0" into "x < 3".
Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
- unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
- return new ICmpInst(ICmpInst::ICMP_EQ, X,
- ConstantInt::get(X->getType(), CmpVal));
+ APInt ValToCheck = Op0KnownZeroInverted;
+ if (ValToCheck.isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_EQ, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ } else if ((++ValToCheck).isPowerOf2()) {
+ unsigned CmpVal = ValToCheck.countTrailingZeros();
+ return new ICmpInst(ICmpInst::ICMP_ULT, X,
+ ConstantInt::get(X->getType(), CmpVal));
+ }
}
// If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 66d0938..c10e92a 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -50,99 +50,102 @@ static bool pointsToConstantGlobal(Value *V) {
/// can optimize this.
static bool
isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
- SmallVectorImpl<Instruction *> &ToDelete,
- bool IsOffset = false) {
+ SmallVectorImpl<Instruction *> &ToDelete) {
// We track lifetime intrinsics as we encounter them. If we decide to go
// ahead and replace the value with the global, this lets the caller quickly
// eliminate the markers.
- for (Use &U : V->uses()) {
- Instruction *I = cast<Instruction>(U.getUser());
-
- if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- // Ignore non-volatile loads, they are always ok.
- if (!LI->isSimple()) return false;
- continue;
- }
-
- if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
- // If uses of the bitcast are ok, we are ok.
- if (!isOnlyCopiedFromConstantGlobal(I, TheCopy, ToDelete, IsOffset))
- return false;
- continue;
- }
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has all zero indices, it doesn't offset the pointer. If it
- // doesn't, it does.
- if (!isOnlyCopiedFromConstantGlobal(
- GEP, TheCopy, ToDelete, IsOffset || !GEP->hasAllZeroIndices()))
- return false;
- continue;
- }
+ SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
+ ValuesToInspect.push_back(std::make_pair(V, false));
+ while (!ValuesToInspect.empty()) {
+ auto ValuePair = ValuesToInspect.pop_back_val();
+ const bool IsOffset = ValuePair.second;
+ for (auto &U : ValuePair.first->uses()) {
+ Instruction *I = cast<Instruction>(U.getUser());
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ // Ignore non-volatile loads, they are always ok.
+ if (!LI->isSimple()) return false;
+ continue;
+ }
- if (CallSite CS = I) {
- // If this is the function being called then we treat it like a load and
- // ignore it.
- if (CS.isCallee(&U))
+ if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
+ // If uses of the bitcast are ok, we are ok.
+ ValuesToInspect.push_back(std::make_pair(I, IsOffset));
continue;
+ }
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has all zero indices, it doesn't offset the pointer. If it
+ // doesn't, it does.
+ ValuesToInspect.push_back(
+ std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
+ continue;
+ }
- // Inalloca arguments are clobbered by the call.
- unsigned ArgNo = CS.getArgumentNo(&U);
- if (CS.isInAllocaArgument(ArgNo))
- return false;
+ if (CallSite CS = I) {
+ // If this is the function being called then we treat it like a load and
+ // ignore it.
+ if (CS.isCallee(&U))
+ continue;
- // If this is a readonly/readnone call site, then we know it is just a
- // load (but one that potentially returns the value itself), so we can
- // ignore it if we know that the value isn't captured.
- if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
- continue;
+ // Inalloca arguments are clobbered by the call.
+ unsigned ArgNo = CS.getArgumentNo(&U);
+ if (CS.isInAllocaArgument(ArgNo))
+ return false;
- // If this is being passed as a byval argument, the caller is making a
- // copy, so it is only a read of the alloca.
- if (CS.isByValArgument(ArgNo))
- continue;
- }
+ // If this is a readonly/readnone call site, then we know it is just a
+ // load (but one that potentially returns the value itself), so we can
+ // ignore it if we know that the value isn't captured.
+ if (CS.onlyReadsMemory() &&
+ (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
+ continue;
+
+ // If this is being passed as a byval argument, the caller is making a
+ // copy, so it is only a read of the alloca.
+ if (CS.isByValArgument(ArgNo))
+ continue;
+ }
- // Lifetime intrinsics can be handled by the caller.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end) {
- assert(II->use_empty() && "Lifetime markers have no result to use!");
- ToDelete.push_back(II);
- continue;
+ // Lifetime intrinsics can be handled by the caller.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ assert(II->use_empty() && "Lifetime markers have no result to use!");
+ ToDelete.push_back(II);
+ continue;
+ }
}
- }
- // If this is isn't our memcpy/memmove, reject it as something we can't
- // handle.
- MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
- if (!MI)
- return false;
+ // If this is isn't our memcpy/memmove, reject it as something we can't
+ // handle.
+ MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
+ if (!MI)
+ return false;
- // If the transfer is using the alloca as a source of the transfer, then
- // ignore it since it is a load (unless the transfer is volatile).
- if (U.getOperandNo() == 1) {
- if (MI->isVolatile()) return false;
- continue;
- }
+ // If the transfer is using the alloca as a source of the transfer, then
+ // ignore it since it is a load (unless the transfer is volatile).
+ if (U.getOperandNo() == 1) {
+ if (MI->isVolatile()) return false;
+ continue;
+ }
- // If we already have seen a copy, reject the second one.
- if (TheCopy) return false;
+ // If we already have seen a copy, reject the second one.
+ if (TheCopy) return false;
- // If the pointer has been offset from the start of the alloca, we can't
- // safely handle this.
- if (IsOffset) return false;
+ // If the pointer has been offset from the start of the alloca, we can't
+ // safely handle this.
+ if (IsOffset) return false;
- // If the memintrinsic isn't using the alloca as the dest, reject it.
- if (U.getOperandNo() != 0) return false;
+ // If the memintrinsic isn't using the alloca as the dest, reject it.
+ if (U.getOperandNo() != 0) return false;
- // If the source of the memcpy/move is not a constant global, reject it.
- if (!pointsToConstantGlobal(MI->getSource()))
- return false;
+ // If the source of the memcpy/move is not a constant global, reject it.
+ if (!pointsToConstantGlobal(MI->getSource()))
+ return false;
- // Otherwise, the transform is safe. Remember the copy instruction.
- TheCopy = MI;
+ // Otherwise, the transform is safe. Remember the copy instruction.
+ TheCopy = MI;
+ }
}
return true;
}
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 9996ebc..6c6e7d8 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -203,8 +203,11 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
Value *X;
Constant *C1;
if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
- Value *Add = Builder->CreateMul(X, Op1);
- return BinaryOperator::CreateAdd(Add, Builder->CreateMul(C1, Op1));
+ Value *Mul = Builder->CreateMul(C1, Op1);
+ // Only go forward with the transform if C1*CI simplifies to a tidier
+ // constant.
+ if (!match(Mul, m_Mul(m_Value(), m_Value())))
+ return BinaryOperator::CreateAdd(Builder->CreateMul(X, Op1), Mul);
}
}
}
@@ -990,6 +993,10 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
}
if (Constant *RHS = dyn_cast<Constant>(Op1)) {
+ // X/INT_MIN -> X == INT_MIN
+ if (RHS->isMinSignedValue())
+ return new ZExtInst(Builder->CreateICmpEQ(Op0, Op1), I.getType());
+
// -X/C --> X/-C provided the negation doesn't overflow.
if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
if (match(Sub->getOperand(0), m_Zero()) && Sub->hasNoSignedWrap())
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 9a41e4b..06c9e29 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -31,13 +31,18 @@ MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
if (!ICI) return SPF_UNKNOWN;
- LHS = ICI->getOperand(0);
- RHS = ICI->getOperand(1);
+ ICmpInst::Predicate Pred = ICI->getPredicate();
+ Value *CmpLHS = ICI->getOperand(0);
+ Value *CmpRHS = ICI->getOperand(1);
+ Value *TrueVal = SI->getTrueValue();
+ Value *FalseVal = SI->getFalseValue();
+
+ LHS = CmpLHS;
+ RHS = CmpRHS;
// (icmp X, Y) ? X : Y
- if (SI->getTrueValue() == ICI->getOperand(0) &&
- SI->getFalseValue() == ICI->getOperand(1)) {
- switch (ICI->getPredicate()) {
+ if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
+ switch (Pred) {
default: return SPF_UNKNOWN; // Equality.
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE: return SPF_UMAX;
@@ -51,18 +56,35 @@ MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
}
// (icmp X, Y) ? Y : X
- if (SI->getTrueValue() == ICI->getOperand(1) &&
- SI->getFalseValue() == ICI->getOperand(0)) {
- switch (ICI->getPredicate()) {
- default: return SPF_UNKNOWN; // Equality.
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMIN;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMIN;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMAX;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMAX;
+ if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
+ switch (Pred) {
+ default: return SPF_UNKNOWN; // Equality.
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE: return SPF_UMIN;
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE: return SPF_SMIN;
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_ULE: return SPF_UMAX;
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_SLE: return SPF_SMAX;
+ }
+ }
+
+ if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
+ if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
+ (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
+
+ // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
+ // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
+ if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
+ return (CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS;
+ }
+
+ // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
+ // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
+ if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
+ return (CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS;
+ }
}
}
@@ -365,7 +387,15 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
/// 1. The icmp predicate is inverted
/// 2. The select operands are reversed
/// 3. The magnitude of C2 and C1 are flipped
-static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
+///
+/// This also tries to turn
+/// --- Single bit tests:
+/// if ((x & C) == 0) x |= C to x |= C
+/// if ((x & C) != 0) x ^= C to x &= ~C
+/// if ((x & C) == 0) x ^= C to x |= C
+/// if ((x & C) != 0) x &= ~C to x &= ~C
+/// if ((x & C) == 0) x &= ~C to nothing
+static Value *foldSelectICmpAndOr(SelectInst &SI, Value *TrueVal,
Value *FalseVal,
InstCombiner::BuilderTy *Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
@@ -384,6 +414,25 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
return nullptr;
const APInt *C2;
+ if (match(TrueVal, m_Specific(X))) {
+ // if ((X & C) != 0) X ^= C becomes X &= ~C
+ if (match(FalseVal, m_Xor(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return Builder->CreateAnd(X, ~(*C1));
+ // if ((X & C) != 0) X &= ~C becomes X &= ~C
+ if (match(FalseVal, m_And(m_Specific(X), m_APInt(C2))) && *C1 == ~(*C2))
+ return FalseVal;
+ } else if (match(FalseVal, m_Specific(X))) {
+ // if ((X & C) == 0) X ^= C becomes X |= C
+ if (match(TrueVal, m_Xor(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return Builder->CreateOr(X, *C1);
+ // if ((X & C) == 0) X &= ~C becomes nothing
+ if (match(TrueVal, m_And(m_Specific(X), m_APInt(C2))) && *C1 == ~(*C2))
+ return X;
+ // if ((X & C) == 0) X |= C becomes X |= C
+ if (match(TrueVal, m_Or(m_Specific(X), m_APInt(C2))) && C1 == C2)
+ return TrueVal;
+ }
+
bool OrOnTrueVal = false;
bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
if (!OrOnFalseVal)
@@ -677,6 +726,22 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
}
}
}
+
+ // ABS(ABS(X)) -> ABS(X)
+ // NABS(NABS(X)) -> NABS(X)
+ if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
+ return ReplaceInstUsesWith(Outer, Inner);
+ }
+
+ // ABS(NABS(X)) -> ABS(X)
+ // NABS(ABS(X)) -> NABS(X)
+ if ((SPF1 == SPF_ABS && SPF2 == SPF_NABS) ||
+ (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
+ SelectInst *SI = cast<SelectInst>(Inner);
+ Value *NewSI = Builder->CreateSelect(
+ SI->getCondition(), SI->getFalseValue(), SI->getTrueValue());
+ return ReplaceInstUsesWith(Outer, NewSI);
+ }
return nullptr;
}
@@ -981,7 +1046,6 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// TODO.
// ABS(-X) -> ABS(X)
- // ABS(ABS(X)) -> ABS(X)
}
// See if we can fold the select into a phi node if the condition is a select.
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index cc6665c..2495747 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -789,11 +789,6 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
// have a sign-extend idiom.
Value *X;
if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
- // If the left shift is just shifting out partial signbits, delete the
- // extension.
- if (cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
- return ReplaceInstUsesWith(I, X);
-
// If the input is an extension from the shifted amount value, e.g.
// %x = zext i8 %A to i32
// %y = shl i32 %x, 24
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 8c5e202..cb16584 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -144,7 +144,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// If the operand is the PHI induction variable:
if (PHIInVal == PHIUser) {
// Scalarize the binary operation. Its first operand is the
- // scalar PHI and the second operand is extracted from the other
+ // scalar PHI, and the second operand is extracted from the other
// vector operand.
BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
@@ -361,7 +361,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
- // Okay, we can handle this if the vector we are insertinting into is
+ // We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted undef.
@@ -376,7 +376,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
// This must be extracting from either LHS or RHS.
if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
- // Okay, we can handle this if the vector we are insertinting into is
+ // We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted value.
@@ -403,7 +403,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
/// We are building a shuffle to create V, which is a sequence of insertelement,
/// extractelement pairs. If PermittedRHS is set, then we must either use it or
-/// not rely on the second vector source. Return an std::pair containing the
+/// not rely on the second vector source. Return a std::pair containing the
/// left and right vectors of the proposed shuffle (or 0), and set the Mask
/// parameter as required.
///
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 4c36887..08e2446 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -42,6 +42,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
@@ -395,6 +396,127 @@ static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
return false;
}
+/// This function returns identity value for given opcode, which can be used to
+/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
+static Value *getIdentityValue(Instruction::BinaryOps OpCode, Value *V) {
+ if (isa<Constant>(V))
+ return nullptr;
+
+ if (OpCode == Instruction::Mul)
+ return ConstantInt::get(V->getType(), 1);
+
+ // TODO: We can handle other cases e.g. Instruction::And, Instruction::Or etc.
+
+ return nullptr;
+}
+
+/// This function factors binary ops which can be combined using distributive
+/// laws. This also factor SHL as MUL e.g. SHL(X, 2) ==> MUL(X, 4).
+static Instruction::BinaryOps
+getBinOpsForFactorization(BinaryOperator *Op, Value *&LHS, Value *&RHS) {
+ if (!Op)
+ return Instruction::BinaryOpsEnd;
+
+ if (Op->getOpcode() == Instruction::Shl) {
+ if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
+ // The multiplier is really 1 << CST.
+ RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
+ LHS = Op->getOperand(0);
+ return Instruction::Mul;
+ }
+ }
+
+ // TODO: We can add other conversions e.g. shr => div etc.
+
+ LHS = Op->getOperand(0);
+ RHS = Op->getOperand(1);
+ return Op->getOpcode();
+}
+
+/// This tries to simplify binary operations by factorizing out common terms
+/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
+static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
+ const DataLayout *DL, BinaryOperator &I,
+ Instruction::BinaryOps InnerOpcode, Value *A,
+ Value *B, Value *C, Value *D) {
+
+ // If any of A, B, C, D are null, we can not factor I, return early.
+ // Checking A and C should be enough.
+ if (!A || !C || !B || !D)
+ return nullptr;
+
+ Value *SimplifiedInst = nullptr;
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
+
+ // Does "X op' Y" always equal "Y op' X"?
+ bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
+
+ // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
+ if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
+ // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
+ // commutative case, "(A op' B) op (C op' A)"?
+ if (A == C || (InnerCommutative && A == D)) {
+ if (A != C)
+ std::swap(C, D);
+ // Consider forming "A op' (B op D)".
+ // If "B op D" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
+ // If "B op D" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && LHS->hasOneUse() && RHS->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
+ if (V) {
+ SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
+ }
+ }
+
+ // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
+ if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
+ // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
+ // commutative case, "(A op' B) op (B op' D)"?
+ if (B == D || (InnerCommutative && B == C)) {
+ if (B != D)
+ std::swap(C, D);
+ // Consider forming "(A op C) op' B".
+ // If "A op C" simplifies then it can be formed with no cost.
+ Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
+
+ // If "A op C" doesn't simplify then only go on if both of the existing
+ // operations "A op' B" and "C op' D" will be zapped as no longer used.
+ if (!V && LHS->hasOneUse() && RHS->hasOneUse())
+ V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
+ if (V) {
+ SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
+ }
+ }
+
+ if (SimplifiedInst) {
+ ++NumFactor;
+ SimplifiedInst->takeName(&I);
+
+ // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
+ // TODO: Check for NUW.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
+ if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
+ bool HasNSW = false;
+ if (isa<OverflowingBinaryOperator>(&I))
+ HasNSW = I.hasNoSignedWrap();
+
+ if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
+ if (isa<OverflowingBinaryOperator>(Op0))
+ HasNSW &= Op0->hasNoSignedWrap();
+
+ if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
+ if (isa<OverflowingBinaryOperator>(Op1))
+ HasNSW &= Op1->hasNoSignedWrap();
+ BO->setHasNoSignedWrap(HasNSW);
+ }
+ }
+ }
+ return SimplifiedInst;
+}
+
/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
/// which some other binary operation distributes over either by factorizing
/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
@@ -404,65 +526,33 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
// Factorization.
- if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
- // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
- // a common term.
- Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
- Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
- Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
+ Instruction::BinaryOps LHSOpcode = getBinOpsForFactorization(Op0, A, B);
+ Instruction::BinaryOps RHSOpcode = getBinOpsForFactorization(Op1, C, D);
+
+ // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
+ // a common term.
+ if (LHSOpcode == RHSOpcode) {
+ if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D))
+ return V;
+ }
- // Does "X op' Y" always equal "Y op' X"?
- bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
-
- // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
- if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
- // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
- // commutative case, "(A op' B) op (C op' A)"?
- if (A == C || (InnerCommutative && A == D)) {
- if (A != C)
- std::swap(C, D);
- // Consider forming "A op' (B op D)".
- // If "B op D" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
- // If "B op D" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && Op0->hasOneUse() && Op1->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
- if (V) {
- ++NumFactor;
- V = Builder->CreateBinOp(InnerOpcode, A, V);
- V->takeName(&I);
- return V;
- }
- }
+ // The instruction has the form "(A op' B) op (C)". Try to factorize common
+ // term.
+ if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS,
+ getIdentityValue(LHSOpcode, RHS)))
+ return V;
- // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
- if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
- // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
- // commutative case, "(A op' B) op (B op' D)"?
- if (B == D || (InnerCommutative && B == C)) {
- if (B != D)
- std::swap(C, D);
- // Consider forming "(A op C) op' B".
- // If "A op C" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
- // If "A op C" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && Op0->hasOneUse() && Op1->hasOneUse())
- V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
- if (V) {
- ++NumFactor;
- V = Builder->CreateBinOp(InnerOpcode, V, B);
- V->takeName(&I);
- return V;
- }
- }
- }
+ // The instruction has the form "(B) op (C op' D)". Try to factorize common
+ // term.
+ if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS,
+ getIdentityValue(RHSOpcode, LHS), C, D))
+ return V;
// Expansion.
+ Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
// The instruction has the form "(A op' B) op C". See if expanding it out
// to "(A op C) op' (B op C)" results in simplifications.
@@ -1030,6 +1120,12 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
return nullptr;
}
+ // If Op is zero then Val = Op * Scale.
+ if (match(Op, m_Zero())) {
+ NoSignedWrap = true;
+ return Op;
+ }
+
// We know that we can successfully descale, so from here on we can safely
// modify the IR. Op holds the descaled version of the deepest term in the
// expression. NoSignedWrap is 'true' if multiplying Op by Scale is known
@@ -1106,6 +1202,11 @@ static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
if (!Inst.getType()->isVectorTy()) return nullptr;
+ // It may not be safe to reorder shuffles and things like div, urem, etc.
+ // because we may trap when executing those ops on unknown vector elements.
+ // See PR20059.
+ if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr;
+
unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
@@ -1138,7 +1239,9 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
- if (Shuffle && C1 && isa<UndefValue>(Shuffle->getOperand(1)) &&
+ if (Shuffle && C1 &&
+ (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
+ isa<UndefValue>(Shuffle->getOperand(1)) &&
Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
// Find constant C2 that has property:
@@ -1220,6 +1323,91 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (MadeChange) return &GEP;
}
+ // Check to see if the inputs to the PHI node are getelementptr instructions.
+ if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
+ GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
+ if (!Op1)
+ return nullptr;
+
+ signed DI = -1;
+
+ for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
+ GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
+ if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
+ return nullptr;
+
+ // Keep track of the type as we walk the GEP.
+ Type *CurTy = Op1->getOperand(0)->getType()->getScalarType();
+
+ for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
+ if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
+ return nullptr;
+
+ if (Op1->getOperand(J) != Op2->getOperand(J)) {
+ if (DI == -1) {
+ // We have not seen any differences yet in the GEPs feeding the
+ // PHI yet, so we record this one if it is allowed to be a
+ // variable.
+
+ // The first two arguments can vary for any GEP, the rest have to be
+ // static for struct slots
+ if (J > 1 && CurTy->isStructTy())
+ return nullptr;
+
+ DI = J;
+ } else {
+ // The GEP is different by more than one input. While this could be
+ // extended to support GEPs that vary by more than one variable it
+ // doesn't make sense since it greatly increases the complexity and
+ // would result in an R+R+R addressing mode which no backend
+ // directly supports and would need to be broken into several
+ // simpler instructions anyway.
+ return nullptr;
+ }
+ }
+
+ // Sink down a layer of the type for the next iteration.
+ if (J > 0) {
+ if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
+ CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
+ } else {
+ CurTy = nullptr;
+ }
+ }
+ }
+ }
+
+ GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
+
+ if (DI == -1) {
+ // All the GEPs feeding the PHI are identical. Clone one down into our
+ // BB so that it can be merged with the current GEP.
+ GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
+ NewGEP);
+ } else {
+ // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
+ // into the current block so it can be merged, and create a new PHI to
+ // set that index.
+ Instruction *InsertPt = Builder->GetInsertPoint();
+ Builder->SetInsertPoint(PN);
+ PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
+ PN->getNumOperands());
+ Builder->SetInsertPoint(InsertPt);
+
+ for (auto &I : PN->operands())
+ NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
+ PN->getIncomingBlock(I));
+
+ NewGEP->setOperand(DI, NewPN);
+ GEP.getParent()->getInstList().insert(GEP.getParent()->getFirstNonPHI(),
+ NewGEP);
+ NewGEP->setOperand(DI, NewPN);
+ }
+
+ GEP.setOperand(0, NewGEP);
+ PtrOp = NewGEP;
+ }
+
// Combine Indices - If the source pointer to this getelementptr instruction
// is a getelementptr instruction, combine the indices of the two
// getelementptr instructions into a single instruction.
@@ -2014,7 +2202,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// Simplify the list of clauses, eg by removing repeated catch clauses
// (these are often created by inlining).
bool MakeNewInstruction = false; // If true, recreate using the following:
- SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
+ SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
@@ -2022,8 +2210,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
bool isLastClause = i + 1 == e;
if (LI.isCatch(i)) {
// A catch clause.
- Value *CatchClause = LI.getClause(i);
- Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
+ Constant *CatchClause = LI.getClause(i);
+ Constant *TypeInfo = CatchClause->stripPointerCasts();
// If we already saw this clause, there is no point in having a second
// copy of it.
@@ -2052,7 +2240,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// equal (for example if one represents a C++ class, and the other some
// class derived from it).
assert(LI.isFilter(i) && "Unsupported landingpad clause!");
- Value *FilterClause = LI.getClause(i);
+ Constant *FilterClause = LI.getClause(i);
ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
unsigned NumTypeInfos = FilterType->getNumElements();
@@ -2096,8 +2284,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// catch-alls. If so, the filter can be discarded.
bool SawCatchAll = false;
for (unsigned j = 0; j != NumTypeInfos; ++j) {
- Value *Elt = Filter->getOperand(j);
- Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
+ Constant *Elt = Filter->getOperand(j);
+ Constant *TypeInfo = Elt->stripPointerCasts();
if (isCatchAll(Personality, TypeInfo)) {
// This element is a catch-all. Bail out, noting this fact.
SawCatchAll = true;
@@ -2202,7 +2390,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
continue;
// If Filter is a subset of LFilter, i.e. every element of Filter is also
// an element of LFilter, then discard LFilter.
- SmallVectorImpl<Value *>::iterator J = NewClauses.begin() + j;
+ SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
// If Filter is empty then it is a subset of LFilter.
if (!FElts) {
// Discard LFilter.
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 95fca75..5e5ddc1 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -16,6 +16,7 @@
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
@@ -39,15 +40,14 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
-#include "llvm/Support/system_error.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
#include <algorithm>
#include <string>
+#include <system_error>
using namespace llvm;
@@ -70,7 +70,7 @@ static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
-static const int kAsanCtorAndCtorPriority = 1;
+static const int kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
static const char *const kAsanReportLoadN = "__asan_report_load_n";
static const char *const kAsanReportStoreN = "__asan_report_store_n";
@@ -79,7 +79,7 @@ static const char *const kAsanUnregisterGlobalsName =
"__asan_unregister_globals";
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
-static const char *const kAsanInitName = "__asan_init_v3";
+static const char *const kAsanInitName = "__asan_init_v4";
static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
static const char *const kAsanCovName = "__sanitizer_cov";
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
@@ -128,9 +128,8 @@ static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
// This flag may need to be replaced with -f[no]asan-stack.
static cl::opt<bool> ClStack("asan-stack",
cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
-// This flag may need to be replaced with -f[no]asan-use-after-return.
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
- cl::desc("Check return-after-free"), cl::Hidden, cl::init(false));
+ cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
@@ -142,16 +141,13 @@ static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
"are more than this number of blocks."),
cl::Hidden, cl::init(1500));
static cl::opt<bool> ClInitializers("asan-initialization-order",
- cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false));
+ cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
cl::Hidden, cl::init(false));
static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
cl::desc("Realign stack to the value of this flag (power of two)"),
cl::Hidden, cl::init(32));
-static cl::opt<std::string> ClBlacklistFile("asan-blacklist",
- cl::desc("File containing the list of objects to ignore "
- "during instrumentation"), cl::Hidden);
static cl::opt<int> ClInstrumentationWithCallsThreshold(
"asan-instrumentation-with-call-threshold",
cl::desc("If the function being instrumented contains more than "
@@ -216,29 +212,87 @@ STATISTIC(NumOptimizedAccessesToGlobalVar,
"Number of optimized accesses to global vars");
namespace {
-/// A set of dynamically initialized globals extracted from metadata.
-class SetOfDynamicallyInitializedGlobals {
+/// Frontend-provided metadata for global variables.
+class GlobalsMetadata {
public:
- void Init(Module& M) {
- // Clang generates metadata identifying all dynamically initialized globals.
- NamedMDNode *DynamicGlobals =
- M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
- if (!DynamicGlobals)
+ GlobalsMetadata() : inited_(false) {}
+ void init(Module& M) {
+ assert(!inited_);
+ inited_ = true;
+ NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
+ if (!Globals)
return;
- for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
- MDNode *MDN = DynamicGlobals->getOperand(i);
- assert(MDN->getNumOperands() == 1);
- Value *VG = MDN->getOperand(0);
- // The optimizer may optimize away a global entirely, in which case we
- // cannot instrument access to it.
- if (!VG)
+ for (auto MDN : Globals->operands()) {
+ // Format of the metadata node for the global:
+ // {
+ // global,
+ // source_location,
+ // i1 is_dynamically_initialized,
+ // i1 is_blacklisted
+ // }
+ assert(MDN->getNumOperands() == 4);
+ Value *V = MDN->getOperand(0);
+ // The optimizer may optimize away a global entirely.
+ if (!V)
continue;
- DynInitGlobals.insert(cast<GlobalVariable>(VG));
+ GlobalVariable *GV = cast<GlobalVariable>(V);
+ if (Value *Loc = MDN->getOperand(1)) {
+ GlobalVariable *GVLoc = cast<GlobalVariable>(Loc);
+ // We may already know the source location for GV, if it was merged
+ // with another global.
+ if (SourceLocation.insert(std::make_pair(GV, GVLoc)).second)
+ addSourceLocationGlobal(GVLoc);
+ }
+ ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(2));
+ if (IsDynInit->isOne())
+ DynInitGlobals.insert(GV);
+ ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(3));
+ if (IsBlacklisted->isOne())
+ BlacklistedGlobals.insert(GV);
}
}
- bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; }
+
+ GlobalVariable *getSourceLocation(GlobalVariable *G) const {
+ auto Pos = SourceLocation.find(G);
+ return (Pos != SourceLocation.end()) ? Pos->second : nullptr;
+ }
+
+ /// Check if the global is dynamically initialized.
+ bool isDynInit(GlobalVariable *G) const {
+ return DynInitGlobals.count(G);
+ }
+
+ /// Check if the global was blacklisted.
+ bool isBlacklisted(GlobalVariable *G) const {
+ return BlacklistedGlobals.count(G);
+ }
+
+ /// Check if the global was generated to describe source location of another
+ /// global (we don't want to instrument them).
+ bool isSourceLocationGlobal(GlobalVariable *G) const {
+ return LocationGlobals.count(G);
+ }
+
private:
- SmallSet<GlobalValue*, 32> DynInitGlobals;
+ bool inited_;
+ DenseMap<GlobalVariable*, GlobalVariable*> SourceLocation;
+ DenseSet<GlobalVariable*> DynInitGlobals;
+ DenseSet<GlobalVariable*> BlacklistedGlobals;
+ DenseSet<GlobalVariable*> LocationGlobals;
+
+ void addSourceLocationGlobal(GlobalVariable *SourceLocGV) {
+ // Source location global is a struct with layout:
+ // {
+ // filename,
+ // i32 line_number,
+ // i32 column_number,
+ // }
+ LocationGlobals.insert(SourceLocGV);
+ ConstantStruct *Contents =
+ cast<ConstantStruct>(SourceLocGV->getInitializer());
+ GlobalVariable *FilenameGV = cast<GlobalVariable>(Contents->getOperand(0));
+ LocationGlobals.insert(FilenameGV);
+ }
};
/// This struct defines the shadow mapping using the rule:
@@ -306,16 +360,7 @@ static size_t RedzoneSizeForScale(int MappingScale) {
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
- AddressSanitizer(bool CheckInitOrder = true,
- bool CheckUseAfterReturn = false,
- bool CheckLifetime = false,
- StringRef BlacklistFile = StringRef())
- : FunctionPass(ID),
- CheckInitOrder(CheckInitOrder || ClInitializers),
- CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn),
- CheckLifetime(CheckLifetime || ClCheckLifetime),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile) {}
+ AddressSanitizer() : FunctionPass(ID) {}
const char *getPassName() const override {
return "AddressSanitizerFunctionPass";
}
@@ -344,11 +389,6 @@ struct AddressSanitizer : public FunctionPass {
bool InjectCoverage(Function &F, const ArrayRef<BasicBlock*> AllBlocks);
void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
- bool CheckInitOrder;
- bool CheckUseAfterReturn;
- bool CheckLifetime;
- SmallString<64> BlacklistFile;
-
LLVMContext *C;
const DataLayout *DL;
int LongSize;
@@ -359,7 +399,6 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanHandleNoReturnFunc;
Function *AsanCovFunction;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
- std::unique_ptr<SpecialCaseList> BL;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
@@ -368,19 +407,14 @@ struct AddressSanitizer : public FunctionPass {
*AsanMemoryAccessCallbackSized[2];
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
- SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+ GlobalsMetadata GlobalsMD;
friend struct FunctionStackPoisoner;
};
class AddressSanitizerModule : public ModulePass {
public:
- AddressSanitizerModule(bool CheckInitOrder = true,
- StringRef BlacklistFile = StringRef())
- : ModulePass(ID),
- CheckInitOrder(CheckInitOrder || ClInitializers),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile) {}
+ AddressSanitizerModule() : ModulePass(ID) {}
bool runOnModule(Module &M) override;
static char ID; // Pass identification, replacement for typeid
const char *getPassName() const override {
@@ -390,17 +424,15 @@ class AddressSanitizerModule : public ModulePass {
private:
void initializeCallbacks(Module &M);
+ bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
bool ShouldInstrumentGlobal(GlobalVariable *G);
+ void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
size_t MinRedzoneSizeForGlobal() const {
return RedzoneSizeForScale(Mapping.Scale);
}
- bool CheckInitOrder;
- SmallString<64> BlacklistFile;
-
- std::unique_ptr<SpecialCaseList> BL;
- SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
+ GlobalsMetadata GlobalsMD;
Type *IntptrTy;
LLVMContext *C;
const DataLayout *DL;
@@ -497,7 +529,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
/// \brief Collect lifetime intrinsic calls to check for use-after-scope
/// errors.
void visitIntrinsicInst(IntrinsicInst &II) {
- if (!ASan.CheckLifetime) return;
+ if (!ClCheckLifetime) return;
Intrinsic::ID ID = II.getIntrinsicID();
if (ID != Intrinsic::lifetime_start &&
ID != Intrinsic::lifetime_end)
@@ -552,20 +584,16 @@ char AddressSanitizer::ID = 0;
INITIALIZE_PASS(AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
false, false)
-FunctionPass *llvm::createAddressSanitizerFunctionPass(
- bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime,
- StringRef BlacklistFile) {
- return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn,
- CheckLifetime, BlacklistFile);
+FunctionPass *llvm::createAddressSanitizerFunctionPass() {
+ return new AddressSanitizer();
}
char AddressSanitizerModule::ID = 0;
INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass", false, false)
-ModulePass *llvm::createAddressSanitizerModulePass(
- bool CheckInitOrder, StringRef BlacklistFile) {
- return new AddressSanitizerModule(CheckInitOrder, BlacklistFile);
+ModulePass *llvm::createAddressSanitizerModulePass() {
+ return new AddressSanitizerModule();
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
@@ -682,7 +710,7 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
// If a global variable does not have dynamic initialization we don't
// have to instrument it. However, if a global does not have initializer
// at all, we assume it has dynamic initializer (in other TU).
- return G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G);
+ return G->hasInitializer() && !GlobalsMD.isDynInit(G);
}
void
@@ -706,7 +734,7 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- if (!CheckInitOrder || GlobalIsLinkerInitialized(G)) {
+ if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
NumOptimizedAccessesToGlobalVar++;
return;
}
@@ -851,48 +879,36 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
-void AddressSanitizerModule::createInitializerPoisonCalls(
- Module &M, GlobalValue *ModuleName) {
- // We do all of our poisoning and unpoisoning within a global constructor.
- // These are called _GLOBAL__(sub_)?I_.*.
- // TODO: Consider looking through the functions in
- // M.getGlobalVariable("llvm.global_ctors") instead of using this stringly
- // typed approach.
- Function *GlobalInit = nullptr;
- for (auto &F : M.getFunctionList()) {
- StringRef FName = F.getName();
-
- const char kGlobalPrefix[] = "_GLOBAL__";
- if (!FName.startswith(kGlobalPrefix))
- continue;
- FName = FName.substr(strlen(kGlobalPrefix));
-
- const char kOptionalSub[] = "sub_";
- if (FName.startswith(kOptionalSub))
- FName = FName.substr(strlen(kOptionalSub));
-
- if (FName.startswith("I_")) {
- GlobalInit = &F;
- break;
- }
- }
- // If that function is not present, this TU contains no globals, or they have
- // all been optimized away
- if (!GlobalInit)
- return;
-
+void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
+ GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
- IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt());
+ IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
// Add a call to poison all external globals before the given function starts.
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
// Add calls to unpoison all globals before each return instruction.
- for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end();
- I != E; ++I) {
- if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) {
+ for (auto &BB : GlobalInit.getBasicBlockList())
+ if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
CallInst::Create(AsanUnpoisonGlobals, "", RI);
+}
+
+void AddressSanitizerModule::createInitializerPoisonCalls(
+ Module &M, GlobalValue *ModuleName) {
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+
+ ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
+ for (Use &OP : CA->operands()) {
+ if (isa<ConstantAggregateZero>(OP))
+ continue;
+ ConstantStruct *CS = cast<ConstantStruct>(OP);
+
+ // Must have a function or null ptr.
+ // (CS->getOperand(0) is the init priority.)
+ if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
+ if (F->getName() != kAsanModuleCtorName)
+ poisonOneInitializer(*F, ModuleName);
}
}
}
@@ -901,16 +917,20 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = cast<PointerType>(G->getType())->getElementType();
DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
- if (BL->isIn(*G)) return false;
+ if (GlobalsMD.isBlacklisted(G)) return false;
+ if (GlobalsMD.isSourceLocationGlobal(G)) return false;
if (!Ty->isSized()) return false;
if (!G->hasInitializer()) return false;
if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
// Touch only those globals that will not be defined in other modules.
- // Don't handle ODR type linkages since other modules may be built w/o asan.
+ // Don't handle ODR linkage types and COMDATs since other modules may be built
+ // without ASan.
if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
G->getLinkage() != GlobalVariable::PrivateLinkage &&
G->getLinkage() != GlobalVariable::InternalLinkage)
return false;
+ if (G->hasComdat())
+ return false;
// Two problems with thread-locals:
// - The address of the main thread's copy can't be computed at link-time.
// - Need to poison all copies, not just the main thread's one.
@@ -1001,39 +1021,16 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) {
// This function replaces all global variables with new variables that have
// trailing redzones. It also creates a function that poisons
// redzones and inserts this function into llvm.global_ctors.
-bool AddressSanitizerModule::runOnModule(Module &M) {
- if (!ClGlobals) return false;
-
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- return false;
- DL = &DLP->getDataLayout();
-
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
- if (BL->isIn(M)) return false;
- C = &(M.getContext());
- int LongSize = DL->getPointerSizeInBits();
- IntptrTy = Type::getIntNTy(*C, LongSize);
- Mapping = getShadowMapping(M, LongSize);
- initializeCallbacks(M);
- DynamicallyInitializedGlobals.Init(M);
+bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
+ GlobalsMD.init(M);
SmallVector<GlobalVariable *, 16> GlobalsToChange;
- for (Module::GlobalListType::iterator G = M.global_begin(),
- E = M.global_end(); G != E; ++G) {
- if (ShouldInstrumentGlobal(G))
- GlobalsToChange.push_back(G);
+ for (auto &G : M.globals()) {
+ if (ShouldInstrumentGlobal(&G))
+ GlobalsToChange.push_back(&G);
}
- Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
- assert(CtorFunc);
- IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
-
- Function *CovFunc = M.getFunction(kAsanCovName);
- int nCov = CovFunc ? CovFunc->getNumUses() : 0;
- IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
-
size_t n = GlobalsToChange.size();
if (n == 0) return false;
@@ -1044,10 +1041,11 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
// const char *name;
// const char *module_name;
// size_t has_dynamic_init;
+ // void *source_location;
// We initialize an array of such structures and pass it to a run-time call.
- StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy,
- IntptrTy, IntptrTy,
- IntptrTy, IntptrTy, NULL);
+ StructType *GlobalStructTy =
+ StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
+ IntptrTy, IntptrTy, NULL);
SmallVector<Constant *, 16> Initializers(n);
bool HasDynamicallyInitializedGlobals = false;
@@ -1075,11 +1073,6 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
- // Determine whether this global should be poisoned in initialization.
- bool GlobalHasDynamicInitializer =
- DynamicallyInitializedGlobals.Contains(G);
- // Don't check initialization order if this global is blacklisted.
- GlobalHasDynamicInitializer &= !BL->isIn(*G, "init");
StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
Constant *NewInitializer = ConstantStruct::get(
@@ -1108,18 +1101,21 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
NewGlobal->takeName(G);
G->eraseFromParent();
+ bool GlobalHasDynamicInitializer = GlobalsMD.isDynInit(G);
+ GlobalVariable *SourceLoc = GlobalsMD.getSourceLocation(G);
+
Initializers[i] = ConstantStruct::get(
- GlobalStructTy,
- ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
+ GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
ConstantInt::get(IntptrTy, SizeInBytes),
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
ConstantExpr::getPointerCast(ModuleName, IntptrTy),
ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer),
+ SourceLoc ? ConstantExpr::getPointerCast(SourceLoc, IntptrTy)
+ : ConstantInt::get(IntptrTy, 0),
NULL);
- // Populate the first and last globals declared in this TU.
- if (CheckInitOrder && GlobalHasDynamicInitializer)
+ if (ClInitializers && GlobalHasDynamicInitializer)
HasDynamicallyInitializedGlobals = true;
DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
@@ -1131,7 +1127,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
// Create calls for poisoning before initializers run and unpoisoning after.
- if (CheckInitOrder && HasDynamicallyInitializedGlobals)
+ if (HasDynamicallyInitializedGlobals)
createInitializerPoisonCalls(M, ModuleName);
IRB.CreateCall2(AsanRegisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
@@ -1147,12 +1143,42 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
IRB.CreatePointerCast(AllGlobals, IntptrTy),
ConstantInt::get(IntptrTy, n));
- appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndCtorPriority);
+ appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
DEBUG(dbgs() << M);
return true;
}
+bool AddressSanitizerModule::runOnModule(Module &M) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP)
+ return false;
+ DL = &DLP->getDataLayout();
+ C = &(M.getContext());
+ int LongSize = DL->getPointerSizeInBits();
+ IntptrTy = Type::getIntNTy(*C, LongSize);
+ Mapping = getShadowMapping(M, LongSize);
+ initializeCallbacks(M);
+
+ bool Changed = false;
+
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+
+ if (ClCoverage > 0) {
+ Function *CovFunc = M.getFunction(kAsanCovName);
+ int nCov = CovFunc ? CovFunc->getNumUses() : 0;
+ IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
+ Changed = true;
+ }
+
+ if (ClGlobals)
+ Changed |= InstrumentGlobals(IRB, M);
+
+ return Changed;
+}
+
void AddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
@@ -1216,8 +1242,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
report_fatal_error("data layout missing");
DL = &DLP->getDataLayout();
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
- DynamicallyInitializedGlobals.Init(M);
+ GlobalsMD.init(M);
C = &(M.getContext());
LongSize = DL->getPointerSizeInBits();
@@ -1236,7 +1261,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
Mapping = getShadowMapping(M, LongSize);
- appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
return true;
}
@@ -1267,7 +1292,9 @@ void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
break;
}
+ DebugLoc EntryLoc = IP->getDebugLoc().getFnDebugLoc(*C);
IRBuilder<> IRB(IP);
+ IRB.SetCurrentDebugLocation(EntryLoc);
Type *Int8Ty = IRB.getInt8Ty();
GlobalVariable *Guard = new GlobalVariable(
*F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
@@ -1279,10 +1306,10 @@ void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
Instruction *Ins = SplitBlockAndInsertIfThen(
Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
IRB.SetInsertPoint(Ins);
+ IRB.SetCurrentDebugLocation(EntryLoc);
// We pass &F to __sanitizer_cov. We could avoid this and rely on
// GET_CALLER_PC, but having the PC of the first instruction is just nice.
- Instruction *Call = IRB.CreateCall(AsanCovFunction);
- Call->setDebugLoc(IP->getDebugLoc());
+ IRB.CreateCall(AsanCovFunction);
StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
Store->setAtomic(Monotonic);
Store->setAlignment(1);
@@ -1316,14 +1343,13 @@ bool AddressSanitizer::InjectCoverage(Function &F,
(unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
InjectCoverageAtBlock(F, F.getEntryBlock());
} else {
- for (size_t i = 0, n = AllBlocks.size(); i < n; i++)
- InjectCoverageAtBlock(F, *AllBlocks[i]);
+ for (auto BB : AllBlocks)
+ InjectCoverageAtBlock(F, *BB);
}
return true;
}
bool AddressSanitizer::runOnFunction(Function &F) {
- if (BL->isIn(F)) return false;
if (&F == AsanCtorFunction) return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
@@ -1350,29 +1376,28 @@ bool AddressSanitizer::runOnFunction(Function &F) {
unsigned Alignment;
// Fill the set of memory operations to instrument.
- for (Function::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
- AllBlocks.push_back(FI);
+ for (auto &BB : F) {
+ AllBlocks.push_back(&BB);
TempsToInstrument.clear();
int NumInsnsPerBB = 0;
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI) {
- if (LooksLikeCodeInBug11395(BI)) return false;
- if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) {
+ for (auto &Inst : BB) {
+ if (LooksLikeCodeInBug11395(&Inst)) return false;
+ if (Value *Addr =
+ isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
}
} else if (ClInvalidPointerPairs &&
- isInterestingPointerComparisonOrSubtraction(BI)) {
- PointerComparisonsOrSubtracts.push_back(BI);
+ isInterestingPointerComparisonOrSubtraction(&Inst)) {
+ PointerComparisonsOrSubtracts.push_back(&Inst);
continue;
- } else if (isa<MemIntrinsic>(BI)) {
+ } else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
} else {
- if (isa<AllocaInst>(BI))
+ if (isa<AllocaInst>(Inst))
NumAllocas++;
- CallSite CS(BI);
+ CallSite CS(&Inst);
if (CS) {
// A call inside BB.
TempsToInstrument.clear();
@@ -1381,7 +1406,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
continue;
}
- ToInstrument.push_back(BI);
+ ToInstrument.push_back(&Inst);
NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
break;
@@ -1406,8 +1431,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// Instrument.
int NumInstrumented = 0;
- for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
- Instruction *Inst = ToInstrument[i];
+ for (auto Inst : ToInstrument) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
@@ -1423,14 +1447,13 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
- for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
- Instruction *CI = NoReturnCalls[i];
+ for (auto CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
IRB.CreateCall(AsanHandleNoReturnFunc);
}
- for (size_t i = 0, n = PointerComparisonsOrSubtracts.size(); i != n; i++) {
- instrumentPointerComparisonOrSubtraction(PointerComparisonsOrSubtracts[i]);
+ for (auto Inst : PointerComparisonsOrSubtracts) {
+ instrumentPointerComparisonOrSubtraction(Inst);
NumInstrumented++;
}
@@ -1543,12 +1566,10 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
}
static DebugLoc getFunctionEntryDebugLocation(Function &F) {
- BasicBlock::iterator I = F.getEntryBlock().begin(),
- E = F.getEntryBlock().end();
- for (; I != E; ++I)
- if (!isa<AllocaInst>(I))
- break;
- return I->getDebugLoc();
+ for (const auto &Inst : F.getEntryBlock())
+ if (!isa<AllocaInst>(Inst))
+ return Inst.getDebugLoc();
+ return DebugLoc();
}
void FunctionStackPoisoner::poisonStack() {
@@ -1562,8 +1583,7 @@ void FunctionStackPoisoner::poisonStack() {
SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size());
- for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
- AllocaInst *AI = AllocaVec[i];
+ for (AllocaInst *AI : AllocaVec) {
ASanStackVariableDescription D = { AI->getName().data(),
getAllocaSizeInBytes(AI),
AI->getAlignment(), AI, 0};
@@ -1577,7 +1597,7 @@ void FunctionStackPoisoner::poisonStack() {
DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc =
- ASan.CheckUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
+ ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
AllocaInst *MyAlloca =
@@ -1618,8 +1638,7 @@ void FunctionStackPoisoner::poisonStack() {
// Insert poison calls for lifetime intrinsics for alloca.
bool HavePoisonedAllocas = false;
- for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) {
- const AllocaPoisonCall &APC = AllocaPoisonCallVec[i];
+ for (const auto &APC : AllocaPoisonCallVec) {
assert(APC.InsBefore);
assert(APC.AI);
IRBuilder<> IRB(APC.InsBefore);
@@ -1628,11 +1647,10 @@ void FunctionStackPoisoner::poisonStack() {
}
// Replace Alloca instructions with base+offset.
- for (size_t i = 0, n = SVD.size(); i < n; i++) {
- AllocaInst *AI = SVD[i].AI;
+ for (const auto &Desc : SVD) {
+ AllocaInst *AI = Desc.AI;
Value *NewAllocaPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(LocalStackBase,
- ConstantInt::get(IntptrTy, SVD[i].Offset)),
+ IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
AI->getType());
replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
AI->replaceAllUsesWith(NewAllocaPtr);
@@ -1665,8 +1683,7 @@ void FunctionStackPoisoner::poisonStack() {
poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
// (Un)poison the stack before all ret instructions.
- for (size_t i = 0, n = RetVec.size(); i < n; i++) {
- Instruction *Ret = RetVec[i];
+ for (auto Ret : RetVec) {
IRBuilder<> IRBRet(Ret);
// Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
@@ -1720,8 +1737,8 @@ void FunctionStackPoisoner::poisonStack() {
}
// We are done. Remove the old unused alloca instructions.
- for (size_t i = 0, n = AllocaVec.size(); i < n; i++)
- AllocaVec[i]->eraseFromParent();
+ for (auto AI : AllocaVec)
+ AI->eraseFromParent();
}
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 7f468f7..799e14b 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -59,9 +59,9 @@
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
#include <iterator>
using namespace llvm;
@@ -120,6 +120,51 @@ static cl::opt<bool> ClDebugNonzeroLabels(
namespace {
+StringRef GetGlobalTypeString(const GlobalValue &G) {
+ // Types of GlobalVariables are always pointer types.
+ Type *GType = G.getType()->getElementType();
+ // For now we support blacklisting struct types only.
+ if (StructType *SGType = dyn_cast<StructType>(GType)) {
+ if (!SGType->isLiteral())
+ return SGType->getName();
+ }
+ return "<unknown type>";
+}
+
+class DFSanABIList {
+ std::unique_ptr<SpecialCaseList> SCL;
+
+ public:
+ DFSanABIList(SpecialCaseList *SCL) : SCL(SCL) {}
+
+ /// Returns whether either this function or its source file are listed in the
+ /// given category.
+ bool isIn(const Function &F, const StringRef Category) const {
+ return isIn(*F.getParent(), Category) ||
+ SCL->inSection("fun", F.getName(), Category);
+ }
+
+ /// Returns whether this global alias is listed in the given category.
+ ///
+ /// If GA aliases a function, the alias's name is matched as a function name
+ /// would be. Similarly, aliases of globals are matched like globals.
+ bool isIn(const GlobalAlias &GA, const StringRef Category) const {
+ if (isIn(*GA.getParent(), Category))
+ return true;
+
+ if (isa<FunctionType>(GA.getType()->getElementType()))
+ return SCL->inSection("fun", GA.getName(), Category);
+
+ return SCL->inSection("global", GA.getName(), Category) ||
+ SCL->inSection("type", GetGlobalTypeString(GA), Category);
+ }
+
+ /// Returns whether this module is listed in the given category.
+ bool isIn(const Module &M, const StringRef Category) const {
+ return SCL->inSection("src", M.getModuleIdentifier(), Category);
+ }
+};
+
class DataFlowSanitizer : public ModulePass {
friend struct DFSanFunction;
friend class DFSanVisitor;
@@ -190,7 +235,7 @@ class DataFlowSanitizer : public ModulePass {
Constant *DFSanSetLabelFn;
Constant *DFSanNonzeroLabelFn;
MDNode *ColdCallWeights;
- std::unique_ptr<SpecialCaseList> ABIList;
+ DFSanABIList ABIList;
DenseMap<Value *, Function *> UnwrappedFnMap;
AttributeSet ReadOnlyNoneAttrs;
@@ -395,11 +440,11 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
}
bool DataFlowSanitizer::isInstrumented(const Function *F) {
- return !ABIList->isIn(*F, "uninstrumented");
+ return !ABIList.isIn(*F, "uninstrumented");
}
bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
- return !ABIList->isIn(*GA, "uninstrumented");
+ return !ABIList.isIn(*GA, "uninstrumented");
}
DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
@@ -407,11 +452,11 @@ DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
}
DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
- if (ABIList->isIn(*F, "functional"))
+ if (ABIList.isIn(*F, "functional"))
return WK_Functional;
- if (ABIList->isIn(*F, "discard"))
+ if (ABIList.isIn(*F, "discard"))
return WK_Discard;
- if (ABIList->isIn(*F, "custom"))
+ if (ABIList.isIn(*F, "custom"))
return WK_Custom;
return WK_Warning;
@@ -500,7 +545,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
if (!DL)
return false;
- if (ABIList->isIn(M, "skip"))
+ if (ABIList.isIn(M, "skip"))
return false;
if (!GetArgTLSPtr) {
@@ -557,7 +602,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
++i;
// Don't stop on weak. We assume people aren't playing games with the
// instrumentedness of overridden weak aliases.
- if (Function *F = dyn_cast<Function>(GA->getAliasee())) {
+ if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
if (GAInst && FInst) {
addGlobalNamePrefix(GA);
@@ -567,7 +612,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// below will take care of instrumenting it.
Function *NewF =
buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
- GA->replaceAllUsesWith(NewF);
+ GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
NewF->takeName(GA);
GA->eraseFromParent();
FnsToInstrument.push_back(NewF);
diff --git a/lib/Transforms/Instrumentation/DebugIR.cpp b/lib/Transforms/Instrumentation/DebugIR.cpp
index 18bda1a..f2f1738 100644
--- a/lib/Transforms/Instrumentation/DebugIR.cpp
+++ b/lib/Transforms/Instrumentation/DebugIR.cpp
@@ -354,7 +354,10 @@ private:
std::string getTypeName(Type *T) {
std::string TypeName;
raw_string_ostream TypeStream(TypeName);
- T->print(TypeStream);
+ if (T)
+ T->print(TypeStream);
+ else
+ TypeStream << "Printing <null> Type";
TypeStream.flush();
return TypeName;
}
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 8330a9b..cfeb62e 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -211,6 +211,7 @@ namespace {
class GCOVLines : public GCOVRecord {
public:
void addLine(uint32_t Line) {
+ assert(Line != 0 && "Line zero is not a valid real line number.");
Lines.push_back(Line);
}
@@ -453,10 +454,17 @@ static bool functionHasLines(Function *F) {
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
for (BasicBlock::iterator I = BB->begin(), IE = BB->end();
I != IE; ++I) {
+ // Debug intrinsic locations correspond to the location of the
+ // declaration, not necessarily any statements or expressions.
+ if (isa<DbgInfoIntrinsic>(I)) continue;
+
const DebugLoc &Loc = I->getDebugLoc();
if (Loc.isUnknown()) continue;
- if (Loc.getLine() != 0)
- return true;
+
+ // Artificial lines such as calls to the global constructors.
+ if (Loc.getLine() == 0) continue;
+
+ return true;
}
}
return false;
@@ -515,8 +523,16 @@ void GCOVProfiler::emitProfileNotes() {
uint32_t Line = 0;
for (BasicBlock::iterator I = BB->begin(), IE = BB->end();
I != IE; ++I) {
+ // Debug intrinsic locations correspond to the location of the
+ // declaration, not necessarily any statements or expressions.
+ if (isa<DbgInfoIntrinsic>(I)) continue;
+
const DebugLoc &Loc = I->getDebugLoc();
if (Loc.isUnknown()) continue;
+
+ // Artificial lines such as calls to the global constructors.
+ if (Loc.getLine() == 0) continue;
+
if (Line == Loc.getLine()) continue;
Line = Loc.getLine();
if (SP != getDISubprogram(Loc.getScope(*Ctx))) continue;
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index b8e632e..496ab48 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -10,8 +10,6 @@
/// This file is a part of MemorySanitizer, a detector of uninitialized
/// reads.
///
-/// Status: early prototype.
-///
/// The algorithm of the tool is similar to Memcheck
/// (http://goo.gl/QKbem). We associate a few shadow bits with every
/// byte of the application memory, poison the shadow of the malloc-ed
@@ -117,7 +115,6 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
using namespace llvm;
@@ -178,10 +175,6 @@ static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
cl::desc("print out instructions with default strict semantics"),
cl::Hidden, cl::init(false));
-static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
- cl::desc("File containing the list of functions where MemorySanitizer "
- "should not report bugs"), cl::Hidden);
-
static cl::opt<int> ClInstrumentationWithCallThreshold(
"msan-instrumentation-with-call-threshold",
cl::desc(
@@ -211,13 +204,11 @@ namespace {
/// uninitialized reads.
class MemorySanitizer : public FunctionPass {
public:
- MemorySanitizer(int TrackOrigins = 0,
- StringRef BlacklistFile = StringRef())
+ MemorySanitizer(int TrackOrigins = 0)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
DL(nullptr),
WarningFn(nullptr),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
const char *getPassName() const override { return "MemorySanitizer"; }
bool runOnFunction(Function &F) override;
@@ -282,10 +273,6 @@ class MemorySanitizer : public FunctionPass {
MDNode *ColdCallWeights;
/// \brief Branch weights for origin store.
MDNode *OriginStoreWeights;
- /// \brief Path to blacklist file.
- SmallString<64> BlacklistFile;
- /// \brief The blacklist.
- std::unique_ptr<SpecialCaseList> BL;
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
@@ -305,9 +292,8 @@ INITIALIZE_PASS(MemorySanitizer, "msan",
"MemorySanitizer: detects uninitialized reads.",
false, false)
-FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins,
- StringRef BlacklistFile) {
- return new MemorySanitizer(TrackOrigins, BlacklistFile);
+FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
+ return new MemorySanitizer(TrackOrigins);
}
/// \brief Create a non-const global initialized with the given string.
@@ -431,7 +417,6 @@ bool MemorySanitizer::doInitialization(Module &M) {
report_fatal_error("data layout missing");
DL = &DLP->getDataLayout();
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
C = &(M.getContext());
unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
switch (PtrSize) {
@@ -526,7 +511,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// The following flags disable parts of MSan instrumentation based on
// blacklist contents and command-line options.
bool InsertChecks;
- bool LoadShadow;
+ bool PropagateShadow;
bool PoisonStack;
bool PoisonUndef;
bool CheckReturnValue;
@@ -544,11 +529,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
- bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
- AttributeSet::FunctionIndex,
- Attribute::SanitizeMemory);
+ bool SanitizeFunction = F.getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex, Attribute::SanitizeMemory);
InsertChecks = SanitizeFunction;
- LoadShadow = SanitizeFunction;
+ PropagateShadow = SanitizeFunction;
PoisonStack = SanitizeFunction && ClPoisonStack;
PoisonUndef = SanitizeFunction && ClPoisonUndef;
// FIXME: Consider using SpecialCaseList to specify a list of functions that
@@ -585,7 +569,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
IRB.CreateCall3(Fn, ConvertedShadow2,
IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- updateOrigin(Origin, IRB));
+ Origin);
} else {
Value *Cmp = IRB.CreateICmpNE(
ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
@@ -599,26 +583,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void materializeStores(bool InstrumentWithCalls) {
- for (size_t i = 0, n = StoreList.size(); i < n; i++) {
- StoreInst &I = *dyn_cast<StoreInst>(StoreList[i]);
+ for (auto Inst : StoreList) {
+ StoreInst &SI = *dyn_cast<StoreInst>(Inst);
- IRBuilder<> IRB(&I);
- Value *Val = I.getValueOperand();
- Value *Addr = I.getPointerOperand();
- Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
+ IRBuilder<> IRB(&SI);
+ Value *Val = SI.getValueOperand();
+ Value *Addr = SI.getPointerOperand();
+ Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
- IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
- if (ClCheckAccessAddress) insertShadowCheck(Addr, &I);
+ if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
- if (I.isAtomic()) I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
if (MS.TrackOrigins) {
- unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
+ unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
InstrumentWithCalls);
}
@@ -662,18 +646,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void materializeChecks(bool InstrumentWithCalls) {
- for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
- Instruction *OrigIns = InstrumentationList[i].OrigIns;
- Value *Shadow = InstrumentationList[i].Shadow;
- Value *Origin = InstrumentationList[i].Origin;
+ for (const auto &ShadowData : InstrumentationList) {
+ Instruction *OrigIns = ShadowData.OrigIns;
+ Value *Shadow = ShadowData.Shadow;
+ Value *Origin = ShadowData.Origin;
materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
}
DEBUG(dbgs() << "DONE:\n" << F);
}
void materializeIndirectCalls() {
- for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
- CallSite CS = IndirectCallList[i];
+ for (auto &CS : IndirectCallList) {
Instruction *I = CS.getInstruction();
BasicBlock *B = I->getParent();
IRBuilder<> IRB(I);
@@ -732,15 +715,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Finalize PHI nodes.
- for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
- PHINode *PN = ShadowPHINodes[i];
+ for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues();
for (size_t v = 0; v < NumValues; v++) {
PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
- if (PNO)
- PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
+ if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
}
}
@@ -874,7 +855,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Set SV to be the shadow value for V.
void setShadow(Value *V, Value *SV) {
assert(!ShadowMap.count(V) && "Values may only have one shadow");
- ShadowMap[V] = SV;
+ ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
}
/// \brief Set Origin to be the origin value for V.
@@ -926,6 +907,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// This function either returns the value set earlier with setShadow,
/// or extracts if from ParamTLS (for function arguments).
Value *getShadow(Value *V) {
+ if (!PropagateShadow) return getCleanShadow(V);
if (Instruction *I = dyn_cast<Instruction>(V)) {
// For instructions the shadow is already stored in the map.
Value *Shadow = ShadowMap[V];
@@ -950,22 +932,21 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
- for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- if (!AI->getType()->isSized()) {
+ for (auto &FArg : F->args()) {
+ if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
- unsigned Size = AI->hasByValAttr()
- ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
- : MS.DL->getTypeAllocSize(AI->getType());
- if (A == AI) {
- Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
- if (AI->hasByValAttr()) {
+ unsigned Size = FArg.hasByValAttr()
+ ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
+ : MS.DL->getTypeAllocSize(FArg.getType());
+ if (A == &FArg) {
+ Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
- unsigned ArgAlign = AI->getParamAlignment();
+ unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
ArgAlign = MS.DL->getABITypeAlignment(EltType);
@@ -980,10 +961,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else {
*ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
- DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
+ DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
**ShadowPtr << "\n");
if (MS.TrackOrigins) {
- Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
+ Value *OriginPtr =
+ getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
}
}
@@ -1093,7 +1075,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(I.getNextNode());
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
- if (LoadShadow) {
+ if (PropagateShadow) {
Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
setShadow(&I,
IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
@@ -1108,7 +1090,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
I.setOrdering(addAcquireOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
- if (LoadShadow) {
+ if (PropagateShadow) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
setOrigin(&I,
IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
@@ -1320,10 +1302,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (!Origin) {
Origin = OpOrigin;
} else {
- Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
- Value *Cond = IRB.CreateICmpNE(FlatShadow,
- MSV->getCleanShadow(FlatShadow));
- Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
+ Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
+ // No point in adding something that might result in 0 origin value.
+ if (!ConstOrigin || !ConstOrigin->isNullValue()) {
+ Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
+ Value *Cond =
+ IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
+ Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
+ }
}
}
return *this;
@@ -1411,13 +1397,61 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
SC.Done(&I);
}
+ // \brief Handle multiplication by constant.
+ //
+ // Handle a special case of multiplication by constant that may have one or
+ // more zeros in the lower bits. This makes corresponding number of lower bits
+ // of the result zero as well. We model it by shifting the other operand
+ // shadow left by the required number of bits. Effectively, we transform
+ // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
+ // We use multiplication by 2**N instead of shift to cover the case of
+ // multiplication by 0, which may occur in some elements of a vector operand.
+ void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
+ Value *OtherArg) {
+ Constant *ShadowMul;
+ Type *Ty = ConstArg->getType();
+ if (Ty->isVectorTy()) {
+ unsigned NumElements = Ty->getVectorNumElements();
+ Type *EltTy = Ty->getSequentialElementType();
+ SmallVector<Constant *, 16> Elements;
+ for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
+ ConstantInt *Elt =
+ dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx));
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ Elements.push_back(ConstantInt::get(EltTy, V2));
+ }
+ ShadowMul = ConstantVector::get(Elements);
+ } else {
+ ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg);
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ ShadowMul = ConstantInt::get(Elt->getType(), V2);
+ }
+
+ IRBuilder<> IRB(&I);
+ setShadow(&I,
+ IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
+ setOrigin(&I, getOrigin(OtherArg));
+ }
+
+ void visitMul(BinaryOperator &I) {
+ Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
+ Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
+ if (constOp0 && !constOp1)
+ handleMulByConstant(I, constOp0, I.getOperand(1));
+ else if (constOp1 && !constOp0)
+ handleMulByConstant(I, constOp1, I.getOperand(0));
+ else
+ handleShadowOr(I);
+ }
+
void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
void visitSub(BinaryOperator &I) { handleShadowOr(I); }
void visitXor(BinaryOperator &I) { handleShadowOr(I); }
- void visitMul(BinaryOperator &I) { handleShadowOr(I); }
void handleDiv(Instruction &I) {
IRBuilder<> IRB(&I);
@@ -1723,7 +1757,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Addr = I.getArgOperand(0);
Type *ShadowTy = getShadowTy(&I);
- if (LoadShadow) {
+ if (PropagateShadow) {
Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
// We don't know the pointer alignment (could be unaligned SSE load!).
// Have to assume to worst case.
@@ -1736,7 +1770,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Addr, &I);
if (MS.TrackOrigins) {
- if (LoadShadow)
+ if (PropagateShadow)
setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
else
setOrigin(&I, getCleanOrigin());
@@ -1946,6 +1980,120 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOriginForNaryOp(I);
}
+ // \brief Get an X86_MMX-sized vector type.
+ Type *getMMXVectorTy(unsigned EltSizeInBits) {
+ const unsigned X86_MMXSizeInBits = 64;
+ return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
+ X86_MMXSizeInBits / EltSizeInBits);
+ }
+
+ // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
+ // intrinsic.
+ Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
+ switch (id) {
+ case llvm::Intrinsic::x86_sse2_packsswb_128:
+ case llvm::Intrinsic::x86_sse2_packuswb_128:
+ return llvm::Intrinsic::x86_sse2_packsswb_128;
+
+ case llvm::Intrinsic::x86_sse2_packssdw_128:
+ case llvm::Intrinsic::x86_sse41_packusdw:
+ return llvm::Intrinsic::x86_sse2_packssdw_128;
+
+ case llvm::Intrinsic::x86_avx2_packsswb:
+ case llvm::Intrinsic::x86_avx2_packuswb:
+ return llvm::Intrinsic::x86_avx2_packsswb;
+
+ case llvm::Intrinsic::x86_avx2_packssdw:
+ case llvm::Intrinsic::x86_avx2_packusdw:
+ return llvm::Intrinsic::x86_avx2_packssdw;
+
+ case llvm::Intrinsic::x86_mmx_packsswb:
+ case llvm::Intrinsic::x86_mmx_packuswb:
+ return llvm::Intrinsic::x86_mmx_packsswb;
+
+ case llvm::Intrinsic::x86_mmx_packssdw:
+ return llvm::Intrinsic::x86_mmx_packssdw;
+ default:
+ llvm_unreachable("unexpected intrinsic id");
+ }
+ }
+
+ // \brief Instrument vector pack instrinsic.
+ //
+ // This function instruments intrinsics like x86_mmx_packsswb, that
+ // packs elements of 2 input vectors into half as many bits with saturation.
+ // Shadow is propagated with the signed variant of the same intrinsic applied
+ // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
+ // EltSizeInBits is used only for x86mmx arguments.
+ void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
+ assert(I.getNumArgOperands() == 2);
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ IRBuilder<> IRB(&I);
+ Value *S1 = getShadow(&I, 0);
+ Value *S2 = getShadow(&I, 1);
+ assert(isX86_MMX || S1->getType()->isVectorTy());
+
+ // SExt and ICmpNE below must apply to individual elements of input vectors.
+ // In case of x86mmx arguments, cast them to appropriate vector types and
+ // back.
+ Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
+ if (isX86_MMX) {
+ S1 = IRB.CreateBitCast(S1, T);
+ S2 = IRB.CreateBitCast(S2, T);
+ }
+ Value *S1_ext = IRB.CreateSExt(
+ IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
+ Value *S2_ext = IRB.CreateSExt(
+ IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
+ if (isX86_MMX) {
+ Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
+ S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
+ S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
+ }
+
+ Function *ShadowFn = Intrinsic::getDeclaration(
+ F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
+
+ Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
+ if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Instrument sum-of-absolute-differencies intrinsic.
+ void handleVectorSadIntrinsic(IntrinsicInst &I) {
+ const unsigned SignificantBitsPerResultElement = 16;
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
+ unsigned ZeroBitsPerResultElement =
+ ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
+
+ IRBuilder<> IRB(&I);
+ Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ S = IRB.CreateBitCast(S, ResTy);
+ S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
+ ResTy);
+ S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
+ S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Instrument multiply-add intrinsic.
+ void handleVectorPmaddIntrinsic(IntrinsicInst &I,
+ unsigned EltSizeInBits = 0) {
+ bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
+ Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
+ IRBuilder<> IRB(&I);
+ Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ S = IRB.CreateBitCast(S, ResTy);
+ S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
+ ResTy);
+ S = IRB.CreateBitCast(S, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case llvm::Intrinsic::bswap:
@@ -2062,6 +2210,47 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// case llvm::Intrinsic::x86_sse2_psll_dq_bs:
// case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
+ case llvm::Intrinsic::x86_sse2_packsswb_128:
+ case llvm::Intrinsic::x86_sse2_packssdw_128:
+ case llvm::Intrinsic::x86_sse2_packuswb_128:
+ case llvm::Intrinsic::x86_sse41_packusdw:
+ case llvm::Intrinsic::x86_avx2_packsswb:
+ case llvm::Intrinsic::x86_avx2_packssdw:
+ case llvm::Intrinsic::x86_avx2_packuswb:
+ case llvm::Intrinsic::x86_avx2_packusdw:
+ handleVectorPackIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_packsswb:
+ case llvm::Intrinsic::x86_mmx_packuswb:
+ handleVectorPackIntrinsic(I, 16);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_packssdw:
+ handleVectorPackIntrinsic(I, 32);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_psad_bw:
+ case llvm::Intrinsic::x86_sse2_psad_bw:
+ case llvm::Intrinsic::x86_avx2_psad_bw:
+ handleVectorSadIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_sse2_pmadd_wd:
+ case llvm::Intrinsic::x86_avx2_pmadd_wd:
+ case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
+ case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
+ handleVectorPmaddIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
+ handleVectorPmaddIntrinsic(I, 8);
+ break;
+
+ case llvm::Intrinsic::x86_mmx_pmadd_wd:
+ handleVectorPmaddIntrinsic(I, 16);
+ break;
+
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
@@ -2083,12 +2272,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return;
}
- // Allow only tail calls with the same types, otherwise
- // we may have a false positive: shadow for a non-void RetVal
- // will get propagated to a void RetVal.
- if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
- Call->setTailCall(false);
-
assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
// We are going to insert code that relies on the fact that the callee
@@ -2211,6 +2394,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitPHINode(PHINode &I) {
IRBuilder<> IRB(&I);
+ if (!PropagateShadow) {
+ setShadow(&I, getCleanShadow(&I));
+ return;
+ }
+
ShadowPHINodes.push_back(&I);
setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
"_msphi_s"));
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 8fe9bca..89386a6 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -40,14 +40,11 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
using namespace llvm;
#define DEBUG_TYPE "tsan"
-static cl::opt<std::string> ClBlacklistFile("tsan-blacklist",
- cl::desc("Blacklist file"), cl::Hidden);
static cl::opt<bool> ClInstrumentMemoryAccesses(
"tsan-instrument-memory-accesses", cl::init(true),
cl::desc("Instrument memory accesses"), cl::Hidden);
@@ -76,11 +73,7 @@ namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
- ThreadSanitizer(StringRef BlacklistFile = StringRef())
- : FunctionPass(ID),
- DL(nullptr),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile) { }
+ ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
const char *getPassName() const override;
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
@@ -98,8 +91,6 @@ struct ThreadSanitizer : public FunctionPass {
const DataLayout *DL;
Type *IntptrTy;
- SmallString<64> BlacklistFile;
- std::unique_ptr<SpecialCaseList> BL;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
Function *TsanFuncEntry;
@@ -129,8 +120,8 @@ const char *ThreadSanitizer::getPassName() const {
return "ThreadSanitizer";
}
-FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) {
- return new ThreadSanitizer(BlacklistFile);
+FunctionPass *llvm::createThreadSanitizerPass() {
+ return new ThreadSanitizer();
}
static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
@@ -228,7 +219,6 @@ bool ThreadSanitizer::doInitialization(Module &M) {
if (!DLP)
report_fatal_error("data layout missing");
DL = &DLP->getDataLayout();
- BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
@@ -322,7 +312,6 @@ static bool isAtomic(Instruction *I) {
bool ThreadSanitizer::runOnFunction(Function &F) {
if (!DL) return false;
- if (BL->isIn(F)) return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
@@ -331,22 +320,20 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 8> MemIntrinCalls;
bool Res = false;
bool HasCalls = false;
+ bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
// Traverse all instructions, collect loads/stores/returns, check for calls.
- for (Function::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
- BasicBlock &BB = *FI;
- for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
- BI != BE; ++BI) {
- if (isAtomic(BI))
- AtomicAccesses.push_back(BI);
- else if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
- LocalLoadsAndStores.push_back(BI);
- else if (isa<ReturnInst>(BI))
- RetVec.push_back(BI);
- else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
- if (isa<MemIntrinsic>(BI))
- MemIntrinCalls.push_back(BI);
+ for (auto &BB : F) {
+ for (auto &Inst : BB) {
+ if (isAtomic(&Inst))
+ AtomicAccesses.push_back(&Inst);
+ else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
+ LocalLoadsAndStores.push_back(&Inst);
+ else if (isa<ReturnInst>(Inst))
+ RetVec.push_back(&Inst);
+ else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ if (isa<MemIntrinsic>(Inst))
+ MemIntrinCalls.push_back(&Inst);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
@@ -358,21 +345,22 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// FIXME: many of these accesses do not need to be checked for races
// (e.g. variables that do not escape, etc).
- // Instrument memory accesses.
- if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread))
- for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
- Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
+ // Instrument memory accesses only if we want to report bugs in the function.
+ if (ClInstrumentMemoryAccesses && SanitizeFunction)
+ for (auto Inst : AllLoadsAndStores) {
+ Res |= instrumentLoadOrStore(Inst);
}
- // Instrument atomic memory accesses.
+ // Instrument atomic memory accesses in any case (they can be used to
+ // implement synchronization).
if (ClInstrumentAtomics)
- for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
- Res |= instrumentAtomic(AtomicAccesses[i]);
+ for (auto Inst : AtomicAccesses) {
+ Res |= instrumentAtomic(Inst);
}
- if (ClInstrumentMemIntrinsics)
- for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
- Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
+ if (ClInstrumentMemIntrinsics && SanitizeFunction)
+ for (auto Inst : MemIntrinCalls) {
+ Res |= instrumentMemIntrinsic(Inst);
}
// Instrument function entry/exit points if there were instrumented accesses.
@@ -382,8 +370,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
IRB.getInt32(0));
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
- for (size_t i = 0, n = RetVec.size(); i < n; ++i) {
- IRBuilder<> IRBRet(RetVec[i]);
+ for (auto RetInst : RetVec) {
+ IRBuilder<> IRBRet(RetInst);
IRBRet.CreateCall(TsanFuncExit);
}
Res = true;
@@ -543,8 +531,14 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
createOrdering(&IRB, CASI->getSuccessOrdering()),
createOrdering(&IRB, CASI->getFailureOrdering())};
- CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
- ReplaceInstWithInst(I, C);
+ CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
+ Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
+
+ Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
+ Res = IRB.CreateInsertValue(Res, Success, 1);
+
+ I->replaceAllUsesWith(Res);
+ I->eraseFromParent();
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
Function *F = FI->getSynchScope() == SingleThread ?
diff --git a/lib/Transforms/Scalar/Android.mk b/lib/Transforms/Scalar/Android.mk
index 079cc86..5e22de6 100644
--- a/lib/Transforms/Scalar/Android.mk
+++ b/lib/Transforms/Scalar/Android.mk
@@ -8,11 +8,11 @@ transforms_scalar_SRC_FILES := \
DCE.cpp \
DeadStoreElimination.cpp \
EarlyCSE.cpp \
- GlobalMerge.cpp \
GVN.cpp \
IndVarSimplify.cpp \
JumpThreading.cpp \
LICM.cpp \
+ LoadCombine.cpp \
LoopDeletion.cpp \
LoopIdiomRecognize.cpp \
LoopInstSimplify.cpp \
diff --git a/lib/Transforms/Scalar/CMakeLists.txt b/lib/Transforms/Scalar/CMakeLists.txt
index 3ad1488..2dcfa23 100644
--- a/lib/Transforms/Scalar/CMakeLists.txt
+++ b/lib/Transforms/Scalar/CMakeLists.txt
@@ -8,10 +8,10 @@ add_llvm_library(LLVMScalarOpts
EarlyCSE.cpp
FlattenCFGPass.cpp
GVN.cpp
- GlobalMerge.cpp
IndVarSimplify.cpp
JumpThreading.cpp
LICM.cpp
+ LoadCombine.cpp
LoopDeletion.cpp
LoopIdiomRecognize.cpp
LoopInstSimplify.cpp
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 6d07ddd..106eba0 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -1464,6 +1464,13 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
continue;
}
+ // Loading from calloc (which zero initializes memory) -> zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ ValuesPerBlock.push_back(AvailableValueInBlock::get(
+ DepBB, Constant::getNullValue(LI->getType())));
+ continue;
+ }
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
@@ -1791,6 +1798,10 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
case LLVMContext::MD_fpmath:
ReplInst->setMetadata(Kind, MDNode::getMostGenericFPMath(IMD, ReplMD));
break;
+ case LLVMContext::MD_invariant_load:
+ // Only set the !invariant.load if it is present in both instructions.
+ ReplInst->setMetadata(Kind, IMD);
+ break;
}
}
}
@@ -1988,6 +1999,15 @@ bool GVN::processLoad(LoadInst *L) {
}
}
+ // If this load follows a calloc (which zero initializes memory),
+ // then the loaded value is zero
+ if (isCallocLikeFn(DepInst, TLI)) {
+ L->replaceAllUsesWith(Constant::getNullValue(L->getType()));
+ markInstructionForDeletion(L);
+ ++NumGVNLoad;
+ return true;
+ }
+
return false;
}
diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp
deleted file mode 100644
index 990d067..0000000
--- a/lib/Transforms/Scalar/GlobalMerge.cpp
+++ /dev/null
@@ -1,313 +0,0 @@
-//===-- GlobalMerge.cpp - Internal globals merging -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This pass merges globals with internal linkage into one. This way all the
-// globals which were merged into a biggest one can be addressed using offsets
-// from the same base pointer (no need for separate base pointer for each of the
-// global). Such a transformation can significantly reduce the register pressure
-// when many globals are involved.
-//
-// For example, consider the code which touches several global variables at
-// once:
-//
-// static int foo[N], bar[N], baz[N];
-//
-// for (i = 0; i < N; ++i) {
-// foo[i] = bar[i] * baz[i];
-// }
-//
-// On ARM the addresses of 3 arrays should be kept in the registers, thus
-// this code has quite large register pressure (loop body):
-//
-// ldr r1, [r5], #4
-// ldr r2, [r6], #4
-// mul r1, r2, r1
-// str r1, [r0], #4
-//
-// Pass converts the code to something like:
-//
-// static struct {
-// int foo[N];
-// int bar[N];
-// int baz[N];
-// } merged;
-//
-// for (i = 0; i < N; ++i) {
-// merged.foo[i] = merged.bar[i] * merged.baz[i];
-// }
-//
-// and in ARM code this becomes:
-//
-// ldr r0, [r5, #40]
-// ldr r1, [r5, #80]
-// mul r0, r1, r0
-// str r0, [r5], #4
-//
-// note that we saved 2 registers here almostly "for free".
-// ===---------------------------------------------------------------------===//
-
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "global-merge"
-
-cl::opt<bool>
-EnableGlobalMerge("global-merge", cl::Hidden,
- cl::desc("Enable global merge pass"),
- cl::init(true));
-
-static cl::opt<bool>
-EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
- cl::desc("Enable global merge pass on constants"),
- cl::init(false));
-
-STATISTIC(NumMerged , "Number of globals merged");
-namespace {
- class GlobalMerge : public FunctionPass {
- const TargetMachine *TM;
-
- bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
- Module &M, bool isConst, unsigned AddrSpace) const;
-
- /// \brief Check if the given variable has been identified as must keep
- /// \pre setMustKeepGlobalVariables must have been called on the Module that
- /// contains GV
- bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
- return MustKeepGlobalVariables.count(GV);
- }
-
- /// Collect every variables marked as "used" or used in a landing pad
- /// instruction for this Module.
- void setMustKeepGlobalVariables(Module &M);
-
- /// Collect every variables marked as "used"
- void collectUsedGlobalVariables(Module &M);
-
- /// Keep track of the GlobalVariable that must not be merged away
- SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
-
- public:
- static char ID; // Pass identification, replacement for typeid.
- explicit GlobalMerge(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), TM(TM) {
- initializeGlobalMergePass(*PassRegistry::getPassRegistry());
- }
-
- bool doInitialization(Module &M) override;
- bool runOnFunction(Function &F) override;
- bool doFinalization(Module &M) override;
-
- const char *getPassName() const override {
- return "Merge internal globals";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- FunctionPass::getAnalysisUsage(AU);
- }
- };
-} // end anonymous namespace
-
-char GlobalMerge::ID = 0;
-INITIALIZE_PASS(GlobalMerge, "global-merge",
- "Global Merge", false, false)
-
-
-bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
- Module &M, bool isConst, unsigned AddrSpace) const {
- const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *DL = TLI->getDataLayout();
-
- // FIXME: Infer the maximum possible offset depending on the actual users
- // (these max offsets are different for the users inside Thumb or ARM
- // functions)
- unsigned MaxOffset = TLI->getMaximalGlobalOffset();
-
- // FIXME: Find better heuristics
- std::stable_sort(Globals.begin(), Globals.end(),
- [DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
- Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
- Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
-
- return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
- });
-
- Type *Int32Ty = Type::getInt32Ty(M.getContext());
-
- for (size_t i = 0, e = Globals.size(); i != e; ) {
- size_t j = 0;
- uint64_t MergedSize = 0;
- std::vector<Type*> Tys;
- std::vector<Constant*> Inits;
- for (j = i; j != e; ++j) {
- Type *Ty = Globals[j]->getType()->getElementType();
- MergedSize += DL->getTypeAllocSize(Ty);
- if (MergedSize > MaxOffset) {
- break;
- }
- Tys.push_back(Ty);
- Inits.push_back(Globals[j]->getInitializer());
- }
-
- StructType *MergedTy = StructType::get(M.getContext(), Tys);
- Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
- GlobalVariable *MergedGV = new GlobalVariable(M, MergedTy, isConst,
- GlobalValue::InternalLinkage,
- MergedInit, "_MergedGlobals",
- nullptr,
- GlobalVariable::NotThreadLocal,
- AddrSpace);
- for (size_t k = i; k < j; ++k) {
- Constant *Idx[2] = {
- ConstantInt::get(Int32Ty, 0),
- ConstantInt::get(Int32Ty, k-i)
- };
- Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx);
- Globals[k]->replaceAllUsesWith(GEP);
- Globals[k]->eraseFromParent();
- NumMerged++;
- }
- i = j;
- }
-
- return true;
-}
-
-void GlobalMerge::collectUsedGlobalVariables(Module &M) {
- // Extract global variables from llvm.used array
- const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
- if (!GV || !GV->hasInitializer()) return;
-
- // Should be an array of 'i8*'.
- const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
-
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (const GlobalVariable *G =
- dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
- MustKeepGlobalVariables.insert(G);
-}
-
-void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
- collectUsedGlobalVariables(M);
-
- for (Module::iterator IFn = M.begin(), IEndFn = M.end(); IFn != IEndFn;
- ++IFn) {
- for (Function::iterator IBB = IFn->begin(), IEndBB = IFn->end();
- IBB != IEndBB; ++IBB) {
- // Follow the invoke link to find the landing pad instruction
- const InvokeInst *II = dyn_cast<InvokeInst>(IBB->getTerminator());
- if (!II) continue;
-
- const LandingPadInst *LPInst = II->getUnwindDest()->getLandingPadInst();
- // Look for globals in the clauses of the landing pad instruction
- for (unsigned Idx = 0, NumClauses = LPInst->getNumClauses();
- Idx != NumClauses; ++Idx)
- if (const GlobalVariable *GV =
- dyn_cast<GlobalVariable>(LPInst->getClause(Idx)
- ->stripPointerCasts()))
- MustKeepGlobalVariables.insert(GV);
- }
- }
-}
-
-bool GlobalMerge::doInitialization(Module &M) {
- if (!EnableGlobalMerge)
- return false;
-
- DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
- BSSGlobals;
- const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *DL = TLI->getDataLayout();
- unsigned MaxOffset = TLI->getMaximalGlobalOffset();
- bool Changed = false;
- setMustKeepGlobalVariables(M);
-
- // Grab all non-const globals.
- for (Module::global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I) {
- // Merge is safe for "normal" internal globals only
- if (!I->hasLocalLinkage() || I->isThreadLocal() || I->hasSection())
- continue;
-
- PointerType *PT = dyn_cast<PointerType>(I->getType());
- assert(PT && "Global variable is not a pointer!");
-
- unsigned AddressSpace = PT->getAddressSpace();
-
- // Ignore fancy-aligned globals for now.
- unsigned Alignment = DL->getPreferredAlignment(I);
- Type *Ty = I->getType()->getElementType();
- if (Alignment > DL->getABITypeAlignment(Ty))
- continue;
-
- // Ignore all 'special' globals.
- if (I->getName().startswith("llvm.") ||
- I->getName().startswith(".llvm."))
- continue;
-
- // Ignore all "required" globals:
- if (isMustKeepGlobalVariable(I))
- continue;
-
- if (DL->getTypeAllocSize(Ty) < MaxOffset) {
- if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine())
- .isBSSLocal())
- BSSGlobals[AddressSpace].push_back(I);
- else if (I->isConstant())
- ConstGlobals[AddressSpace].push_back(I);
- else
- Globals[AddressSpace].push_back(I);
- }
- }
-
- for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
- I = Globals.begin(), E = Globals.end(); I != E; ++I)
- if (I->second.size() > 1)
- Changed |= doMerge(I->second, M, false, I->first);
-
- for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
- I = BSSGlobals.begin(), E = BSSGlobals.end(); I != E; ++I)
- if (I->second.size() > 1)
- Changed |= doMerge(I->second, M, false, I->first);
-
- if (EnableGlobalMergeOnConst)
- for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator
- I = ConstGlobals.begin(), E = ConstGlobals.end(); I != E; ++I)
- if (I->second.size() > 1)
- Changed |= doMerge(I->second, M, true, I->first);
-
- return Changed;
-}
-
-bool GlobalMerge::runOnFunction(Function &F) {
- return false;
-}
-
-bool GlobalMerge::doFinalization(Module &M) {
- MustKeepGlobalVariables.clear();
- return false;
-}
-
-Pass *llvm::createGlobalMergePass(const TargetMachine *TM) {
- return new GlobalMerge(TM);
-}
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 230a381..6e50d33 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -158,6 +158,15 @@ bool JumpThreading::runOnFunction(Function &F) {
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
+ // Remove unreachable blocks from function as they may result in infinite
+ // loop. We do threading if we found something profitable. Jump threading a
+ // branch can create other opportunities. If these opportunities form a cycle
+ // i.e. if any jump treading is undoing previous threading in the path, then
+ // we will loop forever. We take care of this issue by not jump threading for
+ // back edges. This works for normal cases but not for unreachable blocks as
+ // they may have cycle with no back edge.
+ removeUnreachableBlocks(F);
+
FindLoopHeaders(F);
bool Changed, EverChanged = false;
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 0a8d16f..abcceb2 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -192,6 +192,14 @@ namespace {
SmallVectorImpl<BasicBlock*> &ExitBlocks,
SmallVectorImpl<Instruction*> &InsertPts,
PredIteratorCache &PIC);
+
+ /// \brief Create a copy of the instruction in the exit block and patch up
+ /// SSA.
+ /// PN is a user of I in ExitBlock that can be used to get the number and
+ /// list of predecessors fast.
+ Instruction *CloneInstructionInExitBlock(Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN);
};
}
@@ -531,6 +539,35 @@ bool LICM::isNotUsedInLoop(Instruction &I) {
return true;
}
+Instruction *LICM::CloneInstructionInExitBlock(Instruction &I,
+ BasicBlock &ExitBlock,
+ PHINode &PN) {
+ Instruction *New = I.clone();
+ ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
+ if (!I.getName().empty()) New->setName(I.getName() + ".le");
+
+ // Build LCSSA PHI nodes for any in-loop operands. Note that this is
+ // particularly cheap because we can rip off the PHI node that we're
+ // replacing for the number and blocks of the predecessors.
+ // OPT: If this shows up in a profile, we can instead finish sinking all
+ // invariant instructions, and then walk their operands to re-establish
+ // LCSSA. That will eliminate creating PHI nodes just to nuke them when
+ // sinking bottom-up.
+ for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
+ ++OI)
+ if (Instruction *OInst = dyn_cast<Instruction>(*OI))
+ if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
+ if (!OLoop->contains(&PN)) {
+ PHINode *OpPN =
+ PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
+ OInst->getName() + ".lcssa", ExitBlock.begin());
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
+ OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
+ *OI = OpPN;
+ }
+ return New;
+}
+
/// sink - When an instruction is found to only be used outside of the loop,
/// this function moves it to the exit blocks and patches up SSA form as needed.
/// This method is guaranteed to remove the original instruction from its
@@ -550,6 +587,9 @@ void LICM::sink(Instruction &I) {
SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end());
#endif
+ // Clones of this instruction. Don't create more than one per exit block!
+ SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
+
// If this instruction is only used outside of the loop, then all users are
// PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
// the instruction.
@@ -561,30 +601,13 @@ void LICM::sink(Instruction &I) {
assert(ExitBlockSet.count(ExitBlock) &&
"The LCSSA PHI is not in an exit block!");
- Instruction *New = I.clone();
- ExitBlock->getInstList().insert(ExitBlock->getFirstInsertionPt(), New);
- if (!I.getName().empty())
- New->setName(I.getName() + ".le");
-
- // Build LCSSA PHI nodes for any in-loop operands. Note that this is
- // particularly cheap because we can rip off the PHI node that we're
- // replacing for the number and blocks of the predecessors.
- // OPT: If this shows up in a profile, we can instead finish sinking all
- // invariant instructions, and then walk their operands to re-establish
- // LCSSA. That will eliminate creating PHI nodes just to nuke them when
- // sinking bottom-up.
- for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
- ++OI)
- if (Instruction *OInst = dyn_cast<Instruction>(*OI))
- if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
- if (!OLoop->contains(PN)) {
- PHINode *OpPN = PHINode::Create(
- OInst->getType(), PN->getNumIncomingValues(),
- OInst->getName() + ".lcssa", ExitBlock->begin());
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- OpPN->addIncoming(OInst, PN->getIncomingBlock(i));
- *OI = OpPN;
- }
+ Instruction *New;
+ auto It = SunkCopies.find(ExitBlock);
+ if (It != SunkCopies.end())
+ New = It->second;
+ else
+ New = SunkCopies[ExitBlock] =
+ CloneInstructionInExitBlock(I, *ExitBlock, *PN);
PN->replaceAllUsesWith(New);
PN->eraseFromParent();
@@ -616,7 +639,7 @@ void LICM::hoist(Instruction &I) {
///
bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
// If it is not a trapping instruction, it is always safe to hoist.
- if (isSafeToSpeculativelyExecute(&Inst))
+ if (isSafeToSpeculativelyExecute(&Inst, DL))
return true;
return isGuaranteedToExecute(Inst);
diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp
new file mode 100644
index 0000000..846aa70
--- /dev/null
+++ b/lib/Transforms/Scalar/LoadCombine.cpp
@@ -0,0 +1,268 @@
+//===- LoadCombine.cpp - Combine Adjacent Loads ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This transformation combines adjacent loads.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Pass.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "load-combine"
+
+STATISTIC(NumLoadsAnalyzed, "Number of loads analyzed for combining");
+STATISTIC(NumLoadsCombined, "Number of loads combined");
+
+namespace {
+struct PointerOffsetPair {
+ Value *Pointer;
+ uint64_t Offset;
+};
+
+struct LoadPOPPair {
+ LoadPOPPair(LoadInst *L, PointerOffsetPair P, unsigned O)
+ : Load(L), POP(P), InsertOrder(O) {}
+ LoadPOPPair() {}
+ LoadInst *Load;
+ PointerOffsetPair POP;
+ /// \brief The new load needs to be created before the first load in IR order.
+ unsigned InsertOrder;
+};
+
+class LoadCombine : public BasicBlockPass {
+ LLVMContext *C;
+ const DataLayout *DL;
+
+public:
+ LoadCombine()
+ : BasicBlockPass(ID),
+ C(nullptr), DL(nullptr) {
+ initializeSROAPass(*PassRegistry::getPassRegistry());
+ }
+ bool doInitialization(Function &) override;
+ bool runOnBasicBlock(BasicBlock &BB) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ const char *getPassName() const override { return "LoadCombine"; }
+ static char ID;
+
+ typedef IRBuilder<true, TargetFolder> BuilderTy;
+
+private:
+ BuilderTy *Builder;
+
+ PointerOffsetPair getPointerOffsetPair(LoadInst &);
+ bool combineLoads(DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &);
+ bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &);
+ bool combineLoads(SmallVectorImpl<LoadPOPPair> &);
+};
+}
+
+bool LoadCombine::doInitialization(Function &F) {
+ DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
+ C = &F.getContext();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP) {
+ DEBUG(dbgs() << " Skipping LoadCombine -- no target data!\n");
+ return false;
+ }
+ DL = &DLP->getDataLayout();
+ return true;
+}
+
+PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) {
+ PointerOffsetPair POP;
+ POP.Pointer = LI.getPointerOperand();
+ POP.Offset = 0;
+ while (isa<BitCastInst>(POP.Pointer) || isa<GetElementPtrInst>(POP.Pointer)) {
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(POP.Pointer)) {
+ unsigned BitWidth = DL->getPointerTypeSizeInBits(GEP->getType());
+ APInt Offset(BitWidth, 0);
+ if (GEP->accumulateConstantOffset(*DL, Offset))
+ POP.Offset += Offset.getZExtValue();
+ else
+ // Can't handle GEPs with variable indices.
+ return POP;
+ POP.Pointer = GEP->getPointerOperand();
+ } else if (auto *BC = dyn_cast<BitCastInst>(POP.Pointer))
+ POP.Pointer = BC->getOperand(0);
+ }
+ return POP;
+}
+
+bool LoadCombine::combineLoads(
+ DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &LoadMap) {
+ bool Combined = false;
+ for (auto &Loads : LoadMap) {
+ if (Loads.second.size() < 2)
+ continue;
+ std::sort(Loads.second.begin(), Loads.second.end(),
+ [](const LoadPOPPair &A, const LoadPOPPair &B) {
+ return A.POP.Offset < B.POP.Offset;
+ });
+ if (aggregateLoads(Loads.second))
+ Combined = true;
+ }
+ return Combined;
+}
+
+/// \brief Try to aggregate loads from a sorted list of loads to be combined.
+///
+/// It is guaranteed that no writes occur between any of the loads. All loads
+/// have the same base pointer. There are at least two loads.
+bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
+ assert(Loads.size() >= 2 && "Insufficient loads!");
+ LoadInst *BaseLoad = nullptr;
+ SmallVector<LoadPOPPair, 8> AggregateLoads;
+ bool Combined = false;
+ uint64_t PrevOffset = -1ull;
+ uint64_t PrevSize = 0;
+ for (auto &L : Loads) {
+ if (PrevOffset == -1ull) {
+ BaseLoad = L.Load;
+ PrevOffset = L.POP.Offset;
+ PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ AggregateLoads.push_back(L);
+ continue;
+ }
+ if (L.Load->getAlignment() > BaseLoad->getAlignment())
+ continue;
+ if (L.POP.Offset > PrevOffset + PrevSize) {
+ // No other load will be combinable
+ if (combineLoads(AggregateLoads))
+ Combined = true;
+ AggregateLoads.clear();
+ PrevOffset = -1;
+ continue;
+ }
+ if (L.POP.Offset != PrevOffset + PrevSize)
+ // This load is offset less than the size of the last load.
+ // FIXME: We may want to handle this case.
+ continue;
+ PrevOffset = L.POP.Offset;
+ PrevSize = DL->getTypeStoreSize(L.Load->getType());
+ AggregateLoads.push_back(L);
+ }
+ if (combineLoads(AggregateLoads))
+ Combined = true;
+ return Combined;
+}
+
+/// \brief Given a list of combinable load. Combine the maximum number of them.
+bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
+ // Remove loads from the end while the size is not a power of 2.
+ unsigned TotalSize = 0;
+ for (const auto &L : Loads)
+ TotalSize += L.Load->getType()->getPrimitiveSizeInBits();
+ while (TotalSize != 0 && !isPowerOf2_32(TotalSize))
+ TotalSize -= Loads.pop_back_val().Load->getType()->getPrimitiveSizeInBits();
+ if (Loads.size() < 2)
+ return false;
+
+ DEBUG({
+ dbgs() << "***** Combining Loads ******\n";
+ for (const auto &L : Loads) {
+ dbgs() << L.POP.Offset << ": " << *L.Load << "\n";
+ }
+ });
+
+ // Find first load. This is where we put the new load.
+ LoadPOPPair FirstLP;
+ FirstLP.InsertOrder = -1u;
+ for (const auto &L : Loads)
+ if (L.InsertOrder < FirstLP.InsertOrder)
+ FirstLP = L;
+
+ unsigned AddressSpace =
+ FirstLP.POP.Pointer->getType()->getPointerAddressSpace();
+
+ Builder->SetInsertPoint(FirstLP.Load);
+ Value *Ptr = Builder->CreateConstGEP1_64(
+ Builder->CreatePointerCast(Loads[0].POP.Pointer,
+ Builder->getInt8PtrTy(AddressSpace)),
+ Loads[0].POP.Offset);
+ LoadInst *NewLoad = new LoadInst(
+ Builder->CreatePointerCast(
+ Ptr, PointerType::get(IntegerType::get(Ptr->getContext(), TotalSize),
+ Ptr->getType()->getPointerAddressSpace())),
+ Twine(Loads[0].Load->getName()) + ".combined", false,
+ Loads[0].Load->getAlignment(), FirstLP.Load);
+
+ for (const auto &L : Loads) {
+ Builder->SetInsertPoint(L.Load);
+ Value *V = Builder->CreateExtractInteger(
+ *DL, NewLoad, cast<IntegerType>(L.Load->getType()),
+ L.POP.Offset - Loads[0].POP.Offset, "combine.extract");
+ L.Load->replaceAllUsesWith(V);
+ }
+
+ NumLoadsCombined = NumLoadsCombined + Loads.size();
+ return true;
+}
+
+bool LoadCombine::runOnBasicBlock(BasicBlock &BB) {
+ if (skipOptnoneFunction(BB) || !DL)
+ return false;
+
+ IRBuilder<true, TargetFolder>
+ TheBuilder(BB.getContext(), TargetFolder(DL));
+ Builder = &TheBuilder;
+
+ DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> LoadMap;
+
+ bool Combined = false;
+ unsigned Index = 0;
+ for (auto &I : BB) {
+ if (I.mayWriteToMemory() || I.mayThrow()) {
+ if (combineLoads(LoadMap))
+ Combined = true;
+ LoadMap.clear();
+ continue;
+ }
+ LoadInst *LI = dyn_cast<LoadInst>(&I);
+ if (!LI)
+ continue;
+ ++NumLoadsAnalyzed;
+ if (!LI->isSimple() || !LI->getType()->isIntegerTy())
+ continue;
+ auto POP = getPointerOffsetPair(*LI);
+ if (!POP.Pointer)
+ continue;
+ LoadMap[POP.Pointer].push_back(LoadPOPPair(LI, POP, Index++));
+ }
+ if (combineLoads(LoadMap))
+ Combined = true;
+ return Combined;
+}
+
+void LoadCombine::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
+
+char LoadCombine::ID = 0;
+
+BasicBlockPass *llvm::createLoadCombinePass() {
+ return new LoadCombine();
+}
+
+INITIALIZE_PASS(LoadCombine, "load-combine", "Combine Adjacent Loads", false,
+ false)
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 26a83df..a12f5a7 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -112,7 +112,7 @@ namespace {
/// the variable involved in the comparion is returned. This function will
/// be called to see if the precondition and postcondition of the loop
/// are in desirable form.
- Value *matchCondition (BranchInst *Br, BasicBlock *NonZeroTarget) const;
+ Value *matchCondition(BranchInst *Br, BasicBlock *NonZeroTarget) const;
/// Return true iff the idiom is detected in the loop. and 1) \p CntInst
/// is set to the instruction counting the population bit. 2) \p CntPhi
@@ -122,7 +122,7 @@ namespace {
(Instruction *&CntInst, PHINode *&CntPhi, Value *&Var) const;
/// Insert ctpop intrinsic function and some obviously dead instructions.
- void transform (Instruction *CntInst, PHINode *CntPhi, Value *Var);
+ void transform(Instruction *CntInst, PHINode *CntPhi, Value *Var);
/// Create llvm.ctpop.* intrinsic function.
CallInst *createPopcntIntrinsic(IRBuilderTy &IRB, Value *Val, DebugLoc DL);
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index 8b5e036..b6fbb16 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -924,8 +924,10 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
// them, and this matching fails. As an exception, we allow the alias
// set tracker to handle regular (simple) load/store dependencies.
if (FutureSideEffects &&
- ((!isSimpleLoadStore(J1) && !isSafeToSpeculativelyExecute(J1)) ||
- (!isSimpleLoadStore(J2) && !isSafeToSpeculativelyExecute(J2)))) {
+ ((!isSimpleLoadStore(J1) &&
+ !isSafeToSpeculativelyExecute(J1, DL)) ||
+ (!isSimpleLoadStore(J2) &&
+ !isSafeToSpeculativelyExecute(J2, DL)))) {
DEBUG(dbgs() << "LRR: iteration root match failed at " << *J1 <<
" vs. " << *J2 <<
" (side effects prevent reordering)\n");
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index fc28fd2..00c0f88 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -18,8 +18,10 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -36,7 +38,8 @@ UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
static cl::opt<unsigned>
UnrollCount("unroll-count", cl::init(0), cl::Hidden,
- cl::desc("Use this unroll count for all loops, for testing purposes"));
+ cl::desc("Use this unroll count for all loops including those with "
+ "unroll_count pragma values, for testing purposes"));
static cl::opt<bool>
UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
@@ -47,6 +50,11 @@ static cl::opt<bool>
UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden,
cl::desc("Unroll loops with run-time trip counts"));
+static cl::opt<unsigned>
+PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
+ cl::desc("Unrolled size limit for loops with an unroll(enable) or "
+ "unroll_count pragma."));
+
namespace {
class LoopUnroll : public LoopPass {
public:
@@ -109,6 +117,66 @@ namespace {
// For now, recreate dom info, if loop is unrolled.
AU.addPreserved<DominatorTreeWrapperPass>();
}
+
+ // Fill in the UnrollingPreferences parameter with values from the
+ // TargetTransformationInfo.
+ void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI,
+ TargetTransformInfo::UnrollingPreferences &UP) {
+ UP.Threshold = CurrentThreshold;
+ UP.OptSizeThreshold = OptSizeUnrollThreshold;
+ UP.PartialThreshold = CurrentThreshold;
+ UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
+ UP.Count = CurrentCount;
+ UP.MaxCount = UINT_MAX;
+ UP.Partial = CurrentAllowPartial;
+ UP.Runtime = CurrentRuntime;
+ TTI.getUnrollingPreferences(L, UP);
+ }
+
+ // Select and return an unroll count based on parameters from
+ // user, unroll preferences, unroll pragmas, or a heuristic.
+ // SetExplicitly is set to true if the unroll count is is set by
+ // the user or a pragma rather than selected heuristically.
+ unsigned
+ selectUnrollCount(const Loop *L, unsigned TripCount, bool HasEnablePragma,
+ unsigned PragmaCount,
+ const TargetTransformInfo::UnrollingPreferences &UP,
+ bool &SetExplicitly);
+
+
+ // Select threshold values used to limit unrolling based on a
+ // total unrolled size. Parameters Threshold and PartialThreshold
+ // are set to the maximum unrolled size for fully and partially
+ // unrolled loops respectively.
+ void selectThresholds(const Loop *L, bool HasPragma,
+ const TargetTransformInfo::UnrollingPreferences &UP,
+ unsigned &Threshold, unsigned &PartialThreshold) {
+ // Determine the current unrolling threshold. While this is
+ // normally set from UnrollThreshold, it is overridden to a
+ // smaller value if the current function is marked as
+ // optimize-for-size, and the unroll threshold was not user
+ // specified.
+ Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
+ PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold;
+ if (!UserThreshold &&
+ L->getHeader()->getParent()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize)) {
+ Threshold = UP.OptSizeThreshold;
+ PartialThreshold = UP.PartialOptSizeThreshold;
+ }
+ if (HasPragma) {
+ // If the loop has an unrolling pragma, we want to be more
+ // aggressive with unrolling limits. Set thresholds to at
+ // least the PragmaTheshold value which is larger than the
+ // default limits.
+ if (Threshold != NoThreshold)
+ Threshold = std::max<unsigned>(Threshold, PragmaUnrollThreshold);
+ if (PartialThreshold != NoThreshold)
+ PartialThreshold =
+ std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold);
+ }
+ }
};
}
@@ -151,6 +219,103 @@ static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
return LoopSize;
}
+// Returns the value associated with the given metadata node name (for
+// example, "llvm.loop.unroll.count"). If no such named metadata node
+// exists, then nullptr is returned.
+static const ConstantInt *GetUnrollMetadataValue(const Loop *L,
+ StringRef Name) {
+ MDNode *LoopID = L->getLoopID();
+ if (!LoopID) return nullptr;
+
+ // First operand should refer to the loop id itself.
+ assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
+ assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
+
+ for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
+ const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
+ if (!MD) continue;
+
+ const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
+ if (!S) continue;
+
+ if (Name.equals(S->getString())) {
+ assert(MD->getNumOperands() == 2 &&
+ "Unroll hint metadata should have two operands.");
+ return cast<ConstantInt>(MD->getOperand(1));
+ }
+ }
+ return nullptr;
+}
+
+// Returns true if the loop has an unroll(enable) pragma.
+static bool HasUnrollEnablePragma(const Loop *L) {
+ const ConstantInt *EnableValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.enable");
+ return (EnableValue && EnableValue->getZExtValue());
+}
+
+// Returns true if the loop has an unroll(disable) pragma.
+static bool HasUnrollDisablePragma(const Loop *L) {
+ const ConstantInt *EnableValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.enable");
+ return (EnableValue && !EnableValue->getZExtValue());
+}
+
+// If loop has an unroll_count pragma return the (necessarily
+// positive) value from the pragma. Otherwise return 0.
+static unsigned UnrollCountPragmaValue(const Loop *L) {
+ const ConstantInt *CountValue =
+ GetUnrollMetadataValue(L, "llvm.loop.unroll.count");
+ if (CountValue) {
+ unsigned Count = CountValue->getZExtValue();
+ assert(Count >= 1 && "Unroll count must be positive.");
+ return Count;
+ }
+ return 0;
+}
+
+unsigned LoopUnroll::selectUnrollCount(
+ const Loop *L, unsigned TripCount, bool HasEnablePragma,
+ unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP,
+ bool &SetExplicitly) {
+ SetExplicitly = true;
+
+ // User-specified count (either as a command-line option or
+ // constructor parameter) has highest precedence.
+ unsigned Count = UserCount ? CurrentCount : 0;
+
+ // If there is no user-specified count, unroll pragmas have the next
+ // highest precendence.
+ if (Count == 0) {
+ if (PragmaCount) {
+ Count = PragmaCount;
+ } else if (HasEnablePragma) {
+ // unroll(enable) pragma without an unroll_count pragma
+ // indicates to unroll loop fully.
+ Count = TripCount;
+ }
+ }
+
+ if (Count == 0)
+ Count = UP.Count;
+
+ if (Count == 0) {
+ SetExplicitly = false;
+ if (TripCount == 0)
+ // Runtime trip count.
+ Count = UnrollRuntimeCount;
+ else
+ // Conservative heuristic: if we know the trip count, see if we can
+ // completely unroll (subject to the threshold, checked below); otherwise
+ // try to find greatest modulo of the trip count which is still under
+ // threshold value.
+ Count = TripCount;
+ }
+ if (TripCount && Count > TripCount)
+ return TripCount;
+ return Count;
+}
+
bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
if (skipOptnoneFunction(L))
return false;
@@ -162,33 +327,16 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
BasicBlock *Header = L->getHeader();
DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
<< "] Loop %" << Header->getName() << "\n");
- (void)Header;
- TargetTransformInfo::UnrollingPreferences UP;
- UP.Threshold = CurrentThreshold;
- UP.OptSizeThreshold = OptSizeUnrollThreshold;
- UP.PartialThreshold = CurrentThreshold;
- UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
- UP.Count = CurrentCount;
- UP.MaxCount = UINT_MAX;
- UP.Partial = CurrentAllowPartial;
- UP.Runtime = CurrentRuntime;
- TTI.getUnrollingPreferences(L, UP);
-
- // Determine the current unrolling threshold. While this is normally set
- // from UnrollThreshold, it is overridden to a smaller value if the current
- // function is marked as optimize-for-size, and the unroll threshold was
- // not user specified.
- unsigned Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
- unsigned PartialThreshold =
- UserThreshold ? CurrentThreshold : UP.PartialThreshold;
- if (!UserThreshold &&
- Header->getParent()->getAttributes().
- hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize)) {
- Threshold = UP.OptSizeThreshold;
- PartialThreshold = UP.PartialOptSizeThreshold;
+ if (HasUnrollDisablePragma(L)) {
+ return false;
}
+ bool HasEnablePragma = HasUnrollEnablePragma(L);
+ unsigned PragmaCount = UnrollCountPragmaValue(L);
+ bool HasPragma = HasEnablePragma || PragmaCount > 0;
+
+ TargetTransformInfo::UnrollingPreferences UP;
+ getUnrollingPreferences(L, TTI, UP);
// Find trip count and trip multiple if count is not available
unsigned TripCount = 0;
@@ -202,79 +350,117 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
TripMultiple = SE->getSmallConstantTripMultiple(L, LatchBlock);
}
- bool Runtime = UserRuntime ? CurrentRuntime : UP.Runtime;
-
- // Use a default unroll-count if the user doesn't specify a value
- // and the trip count is a run-time value. The default is different
- // for run-time or compile-time trip count loops.
- unsigned Count = UserCount ? CurrentCount : UP.Count;
- if (Runtime && Count == 0 && TripCount == 0)
- Count = UnrollRuntimeCount;
+ // Select an initial unroll count. This may be reduced later based
+ // on size thresholds.
+ bool CountSetExplicitly;
+ unsigned Count = selectUnrollCount(L, TripCount, HasEnablePragma, PragmaCount,
+ UP, CountSetExplicitly);
+
+ unsigned NumInlineCandidates;
+ bool notDuplicatable;
+ unsigned LoopSize =
+ ApproximateLoopSize(L, NumInlineCandidates, notDuplicatable, TTI);
+ DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
+ uint64_t UnrolledSize = (uint64_t)LoopSize * Count;
+ if (notDuplicatable) {
+ DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
+ << " instructions.\n");
+ return false;
+ }
+ if (NumInlineCandidates != 0) {
+ DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
+ return false;
+ }
- if (Count == 0) {
- // Conservative heuristic: if we know the trip count, see if we can
- // completely unroll (subject to the threshold, checked below); otherwise
- // try to find greatest modulo of the trip count which is still under
- // threshold value.
- if (TripCount == 0)
- return false;
- Count = TripCount;
+ unsigned Threshold, PartialThreshold;
+ selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold);
+
+ // Given Count, TripCount and thresholds determine the type of
+ // unrolling which is to be performed.
+ enum { Full = 0, Partial = 1, Runtime = 2 };
+ int Unrolling;
+ if (TripCount && Count == TripCount) {
+ if (Threshold != NoThreshold && UnrolledSize > Threshold) {
+ DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
+ << " because size: " << UnrolledSize << ">" << Threshold
+ << "\n");
+ Unrolling = Partial;
+ } else {
+ Unrolling = Full;
+ }
+ } else if (TripCount && Count < TripCount) {
+ Unrolling = Partial;
+ } else {
+ Unrolling = Runtime;
}
- // Enforce the threshold.
- if (Threshold != NoThreshold && PartialThreshold != NoThreshold) {
- unsigned NumInlineCandidates;
- bool notDuplicatable;
- unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates,
- notDuplicatable, TTI);
- DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
- if (notDuplicatable) {
- DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
- << " instructions.\n");
+ // Reduce count based on the type of unrolling and the threshold values.
+ unsigned OriginalCount = Count;
+ bool AllowRuntime = UserRuntime ? CurrentRuntime : UP.Runtime;
+ if (Unrolling == Partial) {
+ bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
+ if (!AllowPartial && !CountSetExplicitly) {
+ DEBUG(dbgs() << " will not try to unroll partially because "
+ << "-unroll-allow-partial not given\n");
return false;
}
- if (NumInlineCandidates != 0) {
- DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
+ if (PartialThreshold != NoThreshold && UnrolledSize > PartialThreshold) {
+ // Reduce unroll count to be modulo of TripCount for partial unrolling.
+ Count = PartialThreshold / LoopSize;
+ while (Count != 0 && TripCount % Count != 0)
+ Count--;
+ }
+ } else if (Unrolling == Runtime) {
+ if (!AllowRuntime && !CountSetExplicitly) {
+ DEBUG(dbgs() << " will not try to unroll loop with runtime trip count "
+ << "-unroll-runtime not given\n");
return false;
}
- uint64_t Size = (uint64_t)LoopSize*Count;
- if (TripCount != 1 &&
- (Size > Threshold || (Count != TripCount && Size > PartialThreshold))) {
- if (Size > Threshold)
- DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
- << " because size: " << Size << ">" << Threshold << "\n");
-
- bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
- if (!AllowPartial && !(Runtime && TripCount == 0)) {
- DEBUG(dbgs() << " will not try to unroll partially because "
- << "-unroll-allow-partial not given\n");
- return false;
- }
- if (TripCount) {
- // Reduce unroll count to be modulo of TripCount for partial unrolling
- Count = PartialThreshold / LoopSize;
- while (Count != 0 && TripCount%Count != 0)
- Count--;
- }
- else if (Runtime) {
- // Reduce unroll count to be a lower power-of-two value
- while (Count != 0 && Size > PartialThreshold) {
- Count >>= 1;
- Size = LoopSize*Count;
- }
- }
- if (Count > UP.MaxCount)
- Count = UP.MaxCount;
- if (Count < 2) {
- DEBUG(dbgs() << " could not unroll partially\n");
- return false;
+ // Reduce unroll count to be the largest power-of-two factor of
+ // the original count which satisfies the threshold limit.
+ while (Count != 0 && UnrolledSize > PartialThreshold) {
+ Count >>= 1;
+ UnrolledSize = LoopSize * Count;
+ }
+ if (Count > UP.MaxCount)
+ Count = UP.MaxCount;
+ DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
+ }
+
+ if (HasPragma) {
+ // Emit optimization remarks if we are unable to unroll the loop
+ // as directed by a pragma.
+ DebugLoc LoopLoc = L->getStartLoc();
+ Function *F = Header->getParent();
+ LLVMContext &Ctx = F->getContext();
+ if (HasEnablePragma && PragmaCount == 0) {
+ if (TripCount && Count != TripCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to fully unroll loop as directed by unroll(enable) pragma "
+ "because unrolled size is too large.");
+ } else if (!TripCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to fully unroll loop as directed by unroll(enable) pragma "
+ "because loop has a runtime trip count.");
}
- DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
+ } else if (PragmaCount > 0 && Count != OriginalCount) {
+ emitOptimizationRemarkMissed(
+ Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "Unable to unroll loop the number of times directed by "
+ "unroll_count pragma because unrolled size is too large.");
}
}
+ if (Unrolling != Full && Count < 2) {
+ // Partial unrolling by 1 is a nop. For full unrolling, a factor
+ // of 1 makes sense because loop control can be eliminated.
+ return false;
+ }
+
// Unroll the loop.
- if (!UnrollLoop(L, Count, TripCount, Runtime, TripMultiple, LI, this, &LPM))
+ if (!UnrollLoop(L, Count, TripCount, AllowRuntime, TripMultiple, LI, this, &LPM))
return false;
return true;
diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp
index 4251ac4..3314e1e 100644
--- a/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -32,7 +32,10 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
- CXI->replaceAllUsesWith(Orig);
+ Res = Builder.CreateInsertValue(UndefValue::get(CXI->getType()), Orig, 0);
+ Res = Builder.CreateInsertValue(Res, Equal, 1);
+
+ CXI->replaceAllUsesWith(Res);
CXI->eraseFromParent();
return true;
}
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 986d6a4..ea2cf7c 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -1368,11 +1368,10 @@ Value *Reassociate::OptimizeXor(Instruction *I,
Value *Reassociate::OptimizeAdd(Instruction *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Scan the operand lists looking for X and -X pairs. If we find any, we
- // can simplify the expression. X+-X == 0. While we're at it, scan for any
+ // can simplify expressions like X+-X == 0 and X+~X ==-1. While we're at it,
+ // scan for any
// duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z.
- //
- // TODO: We could handle "X + ~X" -> "-1" if we wanted, since "-X = ~X+1".
- //
+
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
Value *TheOp = Ops[i].Op;
// Check to see if we've seen this operand before. If so, we factor all
@@ -1412,19 +1411,28 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
continue;
}
- // Check for X and -X in the operand list.
- if (!BinaryOperator::isNeg(TheOp))
+ // Check for X and -X or X and ~X in the operand list.
+ if (!BinaryOperator::isNeg(TheOp) && !BinaryOperator::isNot(TheOp))
continue;
- Value *X = BinaryOperator::getNegArgument(TheOp);
+ Value *X = nullptr;
+ if (BinaryOperator::isNeg(TheOp))
+ X = BinaryOperator::getNegArgument(TheOp);
+ else if (BinaryOperator::isNot(TheOp))
+ X = BinaryOperator::getNotArgument(TheOp);
+
unsigned FoundX = FindInOperandList(Ops, i, X);
if (FoundX == i)
continue;
// Remove X and -X from the operand list.
- if (Ops.size() == 2)
+ if (Ops.size() == 2 && BinaryOperator::isNeg(TheOp))
return Constant::getNullValue(X->getType());
+ // Remove X and ~X from the operand list.
+ if (Ops.size() == 2 && BinaryOperator::isNot(TheOp))
+ return Constant::getAllOnesValue(X->getType());
+
Ops.erase(Ops.begin()+i);
if (i < FoundX)
--FoundX;
@@ -1434,6 +1442,13 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
++NumAnnihil;
--i; // Revisit element.
e -= 2; // Removed two elements.
+
+ // if X and ~X we append -1 to the operand list.
+ if (BinaryOperator::isNot(TheOp)) {
+ Value *V = Constant::getAllOnesValue(X->getType());
+ Ops.insert(Ops.end(), ValueEntry(getRank(V), V));
+ e += 1;
+ }
}
// Scan the operand list, checking to see if there are any common factors
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index feeb231..90c3520 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -494,7 +494,9 @@ private:
void visitResumeInst (TerminatorInst &I) { /*returns void*/ }
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
void visitFenceInst (FenceInst &I) { /*returns void*/ }
- void visitAtomicCmpXchgInst (AtomicCmpXchgInst &I) { markOverdefined(&I); }
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
+ markAnythingOverdefined(&I);
+ }
void visitAtomicRMWInst (AtomicRMWInst &I) { markOverdefined(&I); }
void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
void visitVAArgInst (Instruction &I) { markAnythingOverdefined(&I); }
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 04bf4f8..8c7f253 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -1032,11 +1032,6 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
UserTy = SI->getValueOperand()->getType();
}
- if (!UserTy || (Ty && Ty != UserTy))
- TyIsCommon = false; // Give up on anything but an iN type.
- else
- Ty = UserTy;
-
if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
// If the type is larger than the partition, skip it. We only encounter
// this for split integer operations where we want to use the type of the
@@ -1051,6 +1046,13 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
ITy = UserITy;
}
+
+ // To avoid depending on the order of slices, Ty and TyIsCommon must not
+ // depend on types skipped above.
+ if (!UserTy || (Ty && Ty != UserTy))
+ TyIsCommon = false; // Give up on anything but an iN type.
+ else
+ Ty = UserTy;
}
return TyIsCommon ? Ty : ITy;
@@ -1128,7 +1130,7 @@ static bool isSafePHIToSpeculate(PHINode &PN,
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
- if (InVal->isDereferenceablePointer() ||
+ if (InVal->isDereferenceablePointer(DL) ||
isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
continue;
@@ -1196,8 +1198,8 @@ static bool isSafeSelectToSpeculate(SelectInst &SI,
const DataLayout *DL = nullptr) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer();
- bool FDerefable = FValue->isDereferenceablePointer();
+ bool TDerefable = TValue->isDereferenceablePointer(DL);
+ bool FDerefable = FValue->isDereferenceablePointer(DL);
for (User *U : SI.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp
index 8e557aa..73c97ff 100644
--- a/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/lib/Transforms/Scalar/SampleProfile.cpp
@@ -450,13 +450,14 @@ void SampleModuleProfile::dump() {
///
/// \returns true if the file was loaded successfully, false otherwise.
bool SampleModuleProfile::loadText() {
- std::unique_ptr<MemoryBuffer> Buffer;
- error_code EC = MemoryBuffer::getFile(Filename, Buffer);
- if (EC) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(Filename);
+ if (std::error_code EC = BufferOrErr.getError()) {
std::string Msg(EC.message());
M.getContext().diagnose(DiagnosticInfoSampleProfile(Filename.data(), Msg));
return false;
}
+ std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get());
line_iterator LineIt(*Buffer, '#');
// Read the profile of each function. Since each function may be
diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp
index f8f828c..edf012d 100644
--- a/lib/Transforms/Scalar/Scalar.cpp
+++ b/lib/Transforms/Scalar/Scalar.cpp
@@ -65,6 +65,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeSinkingPass(Registry);
initializeTailCallElimPass(Registry);
initializeSeparateConstOffsetFromGEPPass(Registry);
+ initializeLoadCombinePass(Registry);
}
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 58192fc..e2a24a7 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -1142,8 +1142,8 @@ public:
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
- bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
- bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
+ bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(DL);
+ bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(DL);
for (User *U : SI->users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
@@ -1226,7 +1226,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
// If this pointer is always safe to load, or if we can prove that there is
// already a load in the block, then we can move the load to the pred block.
- if (InVal->isDereferenceablePointer() ||
+ if (InVal->isDereferenceablePointer(DL) ||
isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))
continue;
diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index b8529e1..62f2026 100644
--- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -121,41 +121,75 @@ class ConstantOffsetExtractor {
/// numeric value of the extracted constant offset (0 if failed), and a
/// new index representing the remainder (equal to the original index minus
/// the constant offset).
- /// \p Idx The given GEP index
- /// \p NewIdx The new index to replace
- /// \p DL The datalayout of the module
- /// \p IP Calculating the new index requires new instructions. IP indicates
- /// where to insert them (typically right before the GEP).
+ /// \p Idx The given GEP index
+ /// \p NewIdx The new index to replace (output)
+ /// \p DL The datalayout of the module
+ /// \p GEP The given GEP
static int64_t Extract(Value *Idx, Value *&NewIdx, const DataLayout *DL,
- Instruction *IP);
+ GetElementPtrInst *GEP);
/// Looks for a constant offset without extracting it. The meaning of the
/// arguments and the return value are the same as Extract.
- static int64_t Find(Value *Idx, const DataLayout *DL);
+ static int64_t Find(Value *Idx, const DataLayout *DL, GetElementPtrInst *GEP);
private:
ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt)
: DL(Layout), IP(InsertionPt) {}
- /// Searches the expression that computes V for a constant offset. If the
- /// searching is successful, update UserChain as a path from V to the constant
- /// offset.
- int64_t find(Value *V);
- /// A helper function to look into both operands of a binary operator U.
- /// \p IsSub Whether U is a sub operator. If so, we need to negate the
- /// constant offset at some point.
- int64_t findInEitherOperand(User *U, bool IsSub);
- /// After finding the constant offset and how it is reached from the GEP
- /// index, we build a new index which is a clone of the old one except the
- /// constant offset is removed. For example, given (a + (b + 5)) and knowning
- /// the constant offset is 5, this function returns (a + b).
+ /// Searches the expression that computes V for a non-zero constant C s.t.
+ /// V can be reassociated into the form V' + C. If the searching is
+ /// successful, returns C and update UserChain as a def-use chain from C to V;
+ /// otherwise, UserChain is empty.
///
- /// We cannot simply change the constant to zero because the expression that
- /// computes the index or its intermediate result may be used by others.
- Value *rebuildWithoutConstantOffset();
- // A helper function for rebuildWithoutConstantOffset that rebuilds the direct
- // user (U) of the constant offset (C).
- Value *rebuildLeafWithoutConstantOffset(User *U, Value *C);
- /// Returns a clone of U except the first occurrence of From with To.
- Value *cloneAndReplace(User *U, Value *From, Value *To);
+ /// \p V The given expression
+ /// \p SignExtended Whether V will be sign-extended in the computation of the
+ /// GEP index
+ /// \p ZeroExtended Whether V will be zero-extended in the computation of the
+ /// GEP index
+ /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
+ /// an index of an inbounds GEP is guaranteed to be
+ /// non-negative. Levaraging this, we can better split
+ /// inbounds GEPs.
+ APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
+ /// A helper function to look into both operands of a binary operator.
+ APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
+ bool ZeroExtended);
+ /// After finding the constant offset C from the GEP index I, we build a new
+ /// index I' s.t. I' + C = I. This function builds and returns the new
+ /// index I' according to UserChain produced by function "find".
+ ///
+ /// The building conceptually takes two steps:
+ /// 1) iteratively distribute s/zext towards the leaves of the expression tree
+ /// that computes I
+ /// 2) reassociate the expression tree to the form I' + C.
+ ///
+ /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
+ /// sext to a, b and 5 so that we have
+ /// sext(a) + (sext(b) + 5).
+ /// Then, we reassociate it to
+ /// (sext(a) + sext(b)) + 5.
+ /// Given this form, we know I' is sext(a) + sext(b).
+ Value *rebuildWithoutConstOffset();
+ /// After the first step of rebuilding the GEP index without the constant
+ /// offset, distribute s/zext to the operands of all operators in UserChain.
+ /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
+ /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
+ ///
+ /// The function also updates UserChain to point to new subexpressions after
+ /// distributing s/zext. e.g., the old UserChain of the above example is
+ /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
+ /// and the new UserChain is
+ /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
+ /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
+ ///
+ /// \p ChainIndex The index to UserChain. ChainIndex is initially
+ /// UserChain.size() - 1, and is decremented during
+ /// the recursion.
+ Value *distributeExtsAndCloneChain(unsigned ChainIndex);
+ /// Reassociates the GEP index to the form I' + C and returns I'.
+ Value *removeConstOffset(unsigned ChainIndex);
+ /// A helper function to apply ExtInsts, a list of s/zext, to value V.
+ /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
+ /// returns "sext i32 (zext i16 V to i32) to i64".
+ Value *applyExts(Value *V);
/// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0.
bool NoCommonBits(Value *LHS, Value *RHS) const;
@@ -163,20 +197,26 @@ class ConstantOffsetExtractor {
/// \p KnownOne Mask of all bits that are known to be one.
/// \p KnownZero Mask of all bits that are known to be zero.
void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const;
- /// Finds the first use of Used in U. Returns -1 if not found.
- static unsigned FindFirstUse(User *U, Value *Used);
- /// Returns whether OPC (sext or zext) can be distributed to the operands of
- /// BO. e.g., sext can be distributed to the operands of an "add nsw" because
- /// sext (add nsw a, b) == add nsw (sext a), (sext b).
- static bool Distributable(unsigned OPC, BinaryOperator *BO);
+ /// A helper function that returns whether we can trace into the operands
+ /// of binary operator BO for a constant offset.
+ ///
+ /// \p SignExtended Whether BO is surrounded by sext
+ /// \p ZeroExtended Whether BO is surrounded by zext
+ /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
+ /// array index.
+ bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
+ bool NonNegative);
/// The path from the constant offset to the old GEP index. e.g., if the GEP
/// index is "a * b + (c + 5)". After running function find, UserChain[0] will
/// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
/// UserChain[2] will be the entire expression "a * b + (c + 5)".
///
- /// This path helps rebuildWithoutConstantOffset rebuild the new GEP index.
+ /// This path helps to rebuild the new GEP index.
SmallVector<User *, 8> UserChain;
+ /// A data structure used in rebuildWithoutConstOffset. Contains all
+ /// sext/zext instructions along UserChain.
+ SmallVector<CastInst *, 16> ExtInsts;
/// The data layout of the module. Used in ComputeKnownBits.
const DataLayout *DL;
Instruction *IP; /// Insertion position of cloned instructions.
@@ -196,6 +236,15 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetTransformInfo>();
}
+
+ bool doInitialization(Module &M) override {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (DLP == nullptr)
+ report_fatal_error("data layout missing");
+ DL = &DLP->getDataLayout();
+ return false;
+ }
+
bool runOnFunction(Function &F) override;
private:
@@ -206,8 +255,42 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
/// function only inspects the GEP without changing it. The output
/// NeedsExtraction indicates whether we can extract a non-zero constant
/// offset from any index.
- int64_t accumulateByteOffset(GetElementPtrInst *GEP, const DataLayout *DL,
- bool &NeedsExtraction);
+ int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
+ /// Canonicalize array indices to pointer-size integers. This helps to
+ /// simplify the logic of splitting a GEP. For example, if a + b is a
+ /// pointer-size integer, we have
+ /// gep base, a + b = gep (gep base, a), b
+ /// However, this equality may not hold if the size of a + b is smaller than
+ /// the pointer size, because LLVM conceptually sign-extends GEP indices to
+ /// pointer size before computing the address
+ /// (http://llvm.org/docs/LangRef.html#id181).
+ ///
+ /// This canonicalization is very likely already done in clang and
+ /// instcombine. Therefore, the program will probably remain the same.
+ ///
+ /// Returns true if the module changes.
+ ///
+ /// Verified in @i32_add in split-gep.ll
+ bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
+ /// For each array index that is in the form of zext(a), convert it to sext(a)
+ /// if we can prove zext(a) <= max signed value of typeof(a). We prefer
+ /// sext(a) to zext(a), because in the special case where x + y >= 0 and
+ /// (x >= 0 or y >= 0), function CanTraceInto can split sext(x + y),
+ /// while no such case exists for zext(x + y).
+ ///
+ /// Note that
+ /// zext(x + y) = zext(x) + zext(y)
+ /// is wrong, e.g.,
+ /// zext i32(UINT_MAX + 1) to i64 !=
+ /// (zext i32 UINT_MAX to i64) + (zext i32 1 to i64)
+ ///
+ /// Returns true if the module changes.
+ ///
+ /// Verified in @inbounds_zext_add in split-gep.ll and @sum_of_array3 in
+ /// split-gep-and-gvn.ll
+ bool convertInBoundsZExtToSExt(GetElementPtrInst *GEP);
+
+ const DataLayout *DL;
};
} // anonymous namespace
@@ -227,181 +310,272 @@ FunctionPass *llvm::createSeparateConstOffsetFromGEPPass() {
return new SeparateConstOffsetFromGEP();
}
-bool ConstantOffsetExtractor::Distributable(unsigned OPC, BinaryOperator *BO) {
- assert(OPC == Instruction::SExt || OPC == Instruction::ZExt);
+bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
+ bool ZeroExtended,
+ BinaryOperator *BO,
+ bool NonNegative) {
+ // We only consider ADD, SUB and OR, because a non-zero constant found in
+ // expressions composed of these operations can be easily hoisted as a
+ // constant offset by reassociation.
+ if (BO->getOpcode() != Instruction::Add &&
+ BO->getOpcode() != Instruction::Sub &&
+ BO->getOpcode() != Instruction::Or) {
+ return false;
+ }
+
+ Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
+ // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
+ // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
+ if (BO->getOpcode() == Instruction::Or && !NoCommonBits(LHS, RHS))
+ return false;
+
+ // In addition, tracing into BO requires that its surrounding s/zext (if
+ // any) is distributable to both operands.
+ //
+ // Suppose BO = A op B.
+ // SignExtended | ZeroExtended | Distributable?
+ // --------------+--------------+----------------------------------
+ // 0 | 0 | true because no s/zext exists
+ // 0 | 1 | zext(BO) == zext(A) op zext(B)
+ // 1 | 0 | sext(BO) == sext(A) op sext(B)
+ // 1 | 1 | zext(sext(BO)) ==
+ // | | zext(sext(A)) op zext(sext(B))
+ if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
+ // If a + b >= 0 and (a >= 0 or b >= 0), then
+ // sext(a + b) = sext(a) + sext(b)
+ // even if the addition is not marked nsw.
+ //
+ // Leveraging this invarient, we can trace into an sext'ed inbound GEP
+ // index if the constant offset is non-negative.
+ //
+ // Verified in @sext_add in split-gep.ll.
+ if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
+ if (!ConstLHS->isNegative())
+ return true;
+ }
+ if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
+ if (!ConstRHS->isNegative())
+ return true;
+ }
+ }
// sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
// zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
if (BO->getOpcode() == Instruction::Add ||
BO->getOpcode() == Instruction::Sub) {
- return (OPC == Instruction::SExt && BO->hasNoSignedWrap()) ||
- (OPC == Instruction::ZExt && BO->hasNoUnsignedWrap());
+ if (SignExtended && !BO->hasNoSignedWrap())
+ return false;
+ if (ZeroExtended && !BO->hasNoUnsignedWrap())
+ return false;
}
- // sext/zext (and/or/xor A, B) == and/or/xor (sext/zext A), (sext/zext B)
- // -instcombine also leverages this invariant to do the reverse
- // transformation to reduce integer casts.
- return BO->getOpcode() == Instruction::And ||
- BO->getOpcode() == Instruction::Or ||
- BO->getOpcode() == Instruction::Xor;
+ return true;
}
-int64_t ConstantOffsetExtractor::findInEitherOperand(User *U, bool IsSub) {
- assert(U->getNumOperands() == 2);
- int64_t ConstantOffset = find(U->getOperand(0));
+APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
+ bool SignExtended,
+ bool ZeroExtended) {
+ // BO being non-negative does not shed light on whether its operands are
+ // non-negative. Clear the NonNegative flag here.
+ APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
+ /* NonNegative */ false);
// If we found a constant offset in the left operand, stop and return that.
// This shortcut might cause us to miss opportunities of combining the
// constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
// However, such cases are probably already handled by -instcombine,
// given this pass runs after the standard optimizations.
if (ConstantOffset != 0) return ConstantOffset;
- ConstantOffset = find(U->getOperand(1));
+ ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
+ /* NonNegative */ false);
// If U is a sub operator, negate the constant offset found in the right
// operand.
- return IsSub ? -ConstantOffset : ConstantOffset;
+ if (BO->getOpcode() == Instruction::Sub)
+ ConstantOffset = -ConstantOffset;
+ return ConstantOffset;
}
-int64_t ConstantOffsetExtractor::find(Value *V) {
- // TODO(jingyue): We can even trace into integer/pointer casts, such as
+APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
+ bool ZeroExtended, bool NonNegative) {
+ // TODO(jingyue): We could trace into integer/pointer casts, such as
// inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
// integers because it gives good enough results for our benchmarks.
- assert(V->getType()->isIntegerTy());
+ unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ // We cannot do much with Values that are not a User, such as an Argument.
User *U = dyn_cast<User>(V);
- // We cannot do much with Values that are not a User, such as BasicBlock and
- // MDNode.
- if (U == nullptr) return 0;
+ if (U == nullptr) return APInt(BitWidth, 0);
- int64_t ConstantOffset = 0;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(U)) {
+ APInt ConstantOffset(BitWidth, 0);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// Hooray, we found it!
- ConstantOffset = CI->getSExtValue();
- } else if (Operator *O = dyn_cast<Operator>(U)) {
- // The GEP index may be more complicated than a simple addition of a
- // varaible and a constant. Therefore, we trace into subexpressions for more
- // hoisting opportunities.
- switch (O->getOpcode()) {
- case Instruction::Add: {
- ConstantOffset = findInEitherOperand(U, false);
- break;
- }
- case Instruction::Sub: {
- ConstantOffset = findInEitherOperand(U, true);
- break;
- }
- case Instruction::Or: {
- // If LHS and RHS don't have common bits, (LHS | RHS) is equivalent to
- // (LHS + RHS).
- if (NoCommonBits(U->getOperand(0), U->getOperand(1)))
- ConstantOffset = findInEitherOperand(U, false);
- break;
- }
- case Instruction::SExt:
- case Instruction::ZExt: {
- // We trace into sext/zext if the operator can be distributed to its
- // operand. e.g., we can transform into "sext (add nsw a, 5)" and
- // extract constant 5, because
- // sext (add nsw a, 5) == add nsw (sext a), 5
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) {
- if (Distributable(O->getOpcode(), BO))
- ConstantOffset = find(U->getOperand(0));
- }
- break;
- }
+ ConstantOffset = CI->getValue();
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
+ // Trace into subexpressions for more hoisting opportunities.
+ if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) {
+ ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
}
+ } else if (isa<SExtInst>(V)) {
+ ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
+ ZeroExtended, NonNegative).sext(BitWidth);
+ } else if (isa<ZExtInst>(V)) {
+ // As an optimization, we can clear the SignExtended flag because
+ // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
+ //
+ // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
+ ConstantOffset =
+ find(U->getOperand(0), /* SignExtended */ false,
+ /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
}
- // If we found a non-zero constant offset, adds it to the path for future
- // transformation (rebuildWithoutConstantOffset). Zero is a valid constant
- // offset, but doesn't help this optimization.
+
+ // If we found a non-zero constant offset, add it to the path for
+ // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
+ // help this optimization.
if (ConstantOffset != 0)
UserChain.push_back(U);
return ConstantOffset;
}
-unsigned ConstantOffsetExtractor::FindFirstUse(User *U, Value *Used) {
- for (unsigned I = 0, E = U->getNumOperands(); I < E; ++I) {
- if (U->getOperand(I) == Used)
- return I;
+Value *ConstantOffsetExtractor::applyExts(Value *V) {
+ Value *Current = V;
+ // ExtInsts is built in the use-def order. Therefore, we apply them to V
+ // in the reversed order.
+ for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
+ if (Constant *C = dyn_cast<Constant>(Current)) {
+ // If Current is a constant, apply s/zext using ConstantExpr::getCast.
+ // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
+ Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
+ } else {
+ Instruction *Ext = (*I)->clone();
+ Ext->setOperand(0, Current);
+ Ext->insertBefore(IP);
+ Current = Ext;
+ }
}
- return -1;
+ return Current;
}
-Value *ConstantOffsetExtractor::cloneAndReplace(User *U, Value *From,
- Value *To) {
- // Finds in U the first use of From. It is safe to ignore future occurrences
- // of From, because findInEitherOperand similarly stops searching the right
- // operand when the first operand has a non-zero constant offset.
- unsigned OpNo = FindFirstUse(U, From);
- assert(OpNo != (unsigned)-1 && "UserChain wasn't built correctly");
-
- // ConstantOffsetExtractor::find only follows Operators (i.e., Instructions
- // and ConstantExprs). Therefore, U is either an Instruction or a
- // ConstantExpr.
- if (Instruction *I = dyn_cast<Instruction>(U)) {
- Instruction *Clone = I->clone();
- Clone->setOperand(OpNo, To);
- Clone->insertBefore(IP);
- return Clone;
+Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
+ distributeExtsAndCloneChain(UserChain.size() - 1);
+ // Remove all nullptrs (used to be s/zext) from UserChain.
+ unsigned NewSize = 0;
+ for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
+ if (*I != nullptr) {
+ UserChain[NewSize] = *I;
+ NewSize++;
+ }
}
- // cast<Constant>(To) is safe because a ConstantExpr only uses Constants.
- return cast<ConstantExpr>(U)
- ->getWithOperandReplaced(OpNo, cast<Constant>(To));
+ UserChain.resize(NewSize);
+ return removeConstOffset(UserChain.size() - 1);
}
-Value *ConstantOffsetExtractor::rebuildLeafWithoutConstantOffset(User *U,
- Value *C) {
- assert(U->getNumOperands() <= 2 &&
- "We didn't trace into any operator with more than 2 operands");
- // If U has only one operand which is the constant offset, removing the
- // constant offset leaves U as a null value.
- if (U->getNumOperands() == 1)
- return Constant::getNullValue(U->getType());
-
- // U->getNumOperands() == 2
- unsigned OpNo = FindFirstUse(U, C); // U->getOperand(OpNo) == C
- assert(OpNo < 2 && "UserChain wasn't built correctly");
- Value *TheOther = U->getOperand(1 - OpNo); // The other operand of U
- // If U = C - X, removing C makes U = -X; otherwise U will simply be X.
- if (!isa<SubOperator>(U) || OpNo == 1)
- return TheOther;
- if (isa<ConstantExpr>(U))
- return ConstantExpr::getNeg(cast<Constant>(TheOther));
- return BinaryOperator::CreateNeg(TheOther, "", IP);
+Value *
+ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
+ User *U = UserChain[ChainIndex];
+ if (ChainIndex == 0) {
+ assert(isa<ConstantInt>(U));
+ // If U is a ConstantInt, applyExts will return a ConstantInt as well.
+ return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
+ }
+
+ if (CastInst *Cast = dyn_cast<CastInst>(U)) {
+ assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
+ "We only traced into two types of CastInst: sext and zext");
+ ExtInsts.push_back(Cast);
+ UserChain[ChainIndex] = nullptr;
+ return distributeExtsAndCloneChain(ChainIndex - 1);
+ }
+
+ // Function find only trace into BinaryOperator and CastInst.
+ BinaryOperator *BO = cast<BinaryOperator>(U);
+ // OpNo = which operand of BO is UserChain[ChainIndex - 1]
+ unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
+ Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
+ Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
+
+ BinaryOperator *NewBO = nullptr;
+ if (OpNo == 0) {
+ NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
+ BO->getName(), IP);
+ } else {
+ NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
+ BO->getName(), IP);
+ }
+ return UserChain[ChainIndex] = NewBO;
}
-Value *ConstantOffsetExtractor::rebuildWithoutConstantOffset() {
- assert(UserChain.size() > 0 && "you at least found a constant, right?");
- // Start with the constant and go up through UserChain, each time building a
- // clone of the subexpression but with the constant removed.
- // e.g., to build a clone of (a + (b + (c + 5)) but with the 5 removed, we
- // first c, then (b + c), and finally (a + (b + c)).
- //
- // Fast path: if the GEP index is a constant, simply returns 0.
- if (UserChain.size() == 1)
- return ConstantInt::get(UserChain[0]->getType(), 0);
-
- Value *Remainder =
- rebuildLeafWithoutConstantOffset(UserChain[1], UserChain[0]);
- for (size_t I = 2; I < UserChain.size(); ++I)
- Remainder = cloneAndReplace(UserChain[I], UserChain[I - 1], Remainder);
- return Remainder;
+Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
+ if (ChainIndex == 0) {
+ assert(isa<ConstantInt>(UserChain[ChainIndex]));
+ return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
+ }
+
+ BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
+ unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
+ assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
+ Value *NextInChain = removeConstOffset(ChainIndex - 1);
+ Value *TheOther = BO->getOperand(1 - OpNo);
+
+ // If NextInChain is 0 and not the LHS of a sub, we can simplify the
+ // sub-expression to be just TheOther.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
+ if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
+ return TheOther;
+ }
+
+ if (BO->getOpcode() == Instruction::Or) {
+ // Rebuild "or" as "add", because "or" may be invalid for the new
+ // epxression.
+ //
+ // For instance, given
+ // a | (b + 5) where a and b + 5 have no common bits,
+ // we can extract 5 as the constant offset.
+ //
+ // However, reusing the "or" in the new index would give us
+ // (a | b) + 5
+ // which does not equal a | (b + 5).
+ //
+ // Replacing the "or" with "add" is fine, because
+ // a | (b + 5) = a + (b + 5) = (a + b) + 5
+ return BinaryOperator::CreateAdd(BO->getOperand(0), BO->getOperand(1),
+ BO->getName(), IP);
+ }
+
+ // We can reuse BO in this case, because the new expression shares the same
+ // instruction type and BO is used at most once.
+ assert(BO->getNumUses() <= 1 &&
+ "distributeExtsAndCloneChain clones each BinaryOperator in "
+ "UserChain, so no one should be used more than "
+ "once");
+ BO->setOperand(OpNo, NextInChain);
+ BO->setHasNoSignedWrap(false);
+ BO->setHasNoUnsignedWrap(false);
+ // Make sure it appears after all instructions we've inserted so far.
+ BO->moveBefore(IP);
+ return BO;
}
int64_t ConstantOffsetExtractor::Extract(Value *Idx, Value *&NewIdx,
const DataLayout *DL,
- Instruction *IP) {
- ConstantOffsetExtractor Extractor(DL, IP);
+ GetElementPtrInst *GEP) {
+ ConstantOffsetExtractor Extractor(DL, GEP);
// Find a non-zero constant offset first.
- int64_t ConstantOffset = Extractor.find(Idx);
- if (ConstantOffset == 0)
- return 0;
- // Then rebuild a new index with the constant removed.
- NewIdx = Extractor.rebuildWithoutConstantOffset();
- return ConstantOffset;
+ APInt ConstantOffset =
+ Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
+ GEP->isInBounds());
+ if (ConstantOffset != 0) {
+ // Separates the constant offset from the GEP index.
+ NewIdx = Extractor.rebuildWithoutConstOffset();
+ }
+ return ConstantOffset.getSExtValue();
}
-int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL) {
- return ConstantOffsetExtractor(DL, nullptr).find(Idx);
+int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL,
+ GetElementPtrInst *GEP) {
+ // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
+ return ConstantOffsetExtractor(DL, GEP)
+ .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
+ GEP->isInBounds())
+ .getSExtValue();
}
void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
@@ -421,8 +595,64 @@ bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
}
-int64_t SeparateConstOffsetFromGEP::accumulateByteOffset(
- GetElementPtrInst *GEP, const DataLayout *DL, bool &NeedsExtraction) {
+bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
+ GetElementPtrInst *GEP) {
+ bool Changed = false;
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
+ I != E; ++I, ++GTI) {
+ // Skip struct member indices which must be i32.
+ if (isa<SequentialType>(*GTI)) {
+ if ((*I)->getType() != IntPtrTy) {
+ *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
+
+bool
+SeparateConstOffsetFromGEP::convertInBoundsZExtToSExt(GetElementPtrInst *GEP) {
+ if (!GEP->isInBounds())
+ return false;
+
+ // TODO: consider alloca
+ GlobalVariable *UnderlyingObject =
+ dyn_cast<GlobalVariable>(GEP->getPointerOperand());
+ if (UnderlyingObject == nullptr)
+ return false;
+
+ uint64_t ObjectSize =
+ DL->getTypeAllocSize(UnderlyingObject->getType()->getElementType());
+ gep_type_iterator GTI = gep_type_begin(*GEP);
+ bool Changed = false;
+ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E;
+ ++I, ++GTI) {
+ if (isa<SequentialType>(*GTI)) {
+ if (ZExtInst *Extended = dyn_cast<ZExtInst>(*I)) {
+ unsigned SrcBitWidth =
+ cast<IntegerType>(Extended->getSrcTy())->getBitWidth();
+ // For GEP operand zext(a), if a <= max signed value of typeof(a), then
+ // the sign bit of a is zero and sext(a) = zext(a). Because the GEP is
+ // in bounds, we know a <= ObjectSize, so the condition can be reduced
+ // to ObjectSize <= max signed value of typeof(a).
+ if (ObjectSize <=
+ APInt::getSignedMaxValue(SrcBitWidth).getZExtValue()) {
+ *I = new SExtInst(Extended->getOperand(0), Extended->getType(),
+ Extended->getName(), GEP);
+ Changed = true;
+ }
+ }
+ }
+ }
+ return Changed;
+}
+
+int64_t
+SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
+ bool &NeedsExtraction) {
NeedsExtraction = false;
int64_t AccumulativeByteOffset = 0;
gep_type_iterator GTI = gep_type_begin(*GEP);
@@ -430,7 +660,7 @@ int64_t SeparateConstOffsetFromGEP::accumulateByteOffset(
if (isa<SequentialType>(*GTI)) {
// Tries to extract a constant offset from this GEP index.
int64_t ConstantOffset =
- ConstantOffsetExtractor::Find(GEP->getOperand(I), DL);
+ ConstantOffsetExtractor::Find(GEP->getOperand(I), DL, GEP);
if (ConstantOffset != 0) {
NeedsExtraction = true;
// A GEP may have multiple indices. We accumulate the extracted
@@ -455,31 +685,11 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
return false;
bool Changed = false;
+ Changed |= canonicalizeArrayIndicesToPointerSize(GEP);
+ Changed |= convertInBoundsZExtToSExt(GEP);
- // Shortcuts integer casts. Eliminating these explicit casts can make
- // subsequent optimizations more obvious: ConstantOffsetExtractor needn't
- // trace into these casts.
- if (GEP->isInBounds()) {
- // Doing this to inbounds GEPs is safe because their indices are guaranteed
- // to be non-negative and in bounds.
- gep_type_iterator GTI = gep_type_begin(*GEP);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
- if (Operator *O = dyn_cast<Operator>(GEP->getOperand(I))) {
- if (O->getOpcode() == Instruction::SExt ||
- O->getOpcode() == Instruction::ZExt) {
- GEP->setOperand(I, O->getOperand(0));
- Changed = true;
- }
- }
- }
- }
- }
-
- const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
bool NeedsExtraction;
- int64_t AccumulativeByteOffset =
- accumulateByteOffset(GEP, DL, NeedsExtraction);
+ int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
if (!NeedsExtraction)
return Changed;
@@ -506,30 +716,29 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
assert(NewIdx != nullptr &&
"ConstantOffset != 0 implies NewIdx is set");
GEP->setOperand(I, NewIdx);
- // Clear the inbounds attribute because the new index may be off-bound.
- // e.g.,
- //
- // b = add i64 a, 5
- // addr = gep inbounds float* p, i64 b
- //
- // is transformed to:
- //
- // addr2 = gep float* p, i64 a
- // addr = gep float* addr2, i64 5
- //
- // If a is -4, although the old index b is in bounds, the new index a is
- // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
- // inbounds keyword is not present, the offsets are added to the base
- // address with silently-wrapping two's complement arithmetic".
- // Therefore, the final code will be a semantically equivalent.
- //
- // TODO(jingyue): do some range analysis to keep as many inbounds as
- // possible. GEPs with inbounds are more friendly to alias analysis.
- GEP->setIsInBounds(false);
- Changed = true;
}
}
}
+ // Clear the inbounds attribute because the new index may be off-bound.
+ // e.g.,
+ //
+ // b = add i64 a, 5
+ // addr = gep inbounds float* p, i64 b
+ //
+ // is transformed to:
+ //
+ // addr2 = gep float* p, i64 a
+ // addr = gep float* addr2, i64 5
+ //
+ // If a is -4, although the old index b is in bounds, the new index a is
+ // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
+ // inbounds keyword is not present, the offsets are added to the base
+ // address with silently-wrapping two's complement arithmetic".
+ // Therefore, the final code will be a semantically equivalent.
+ //
+ // TODO(jingyue): do some range analysis to keep as many inbounds as
+ // possible. GEPs with inbounds are more friendly to alias analysis.
+ GEP->setIsInBounds(false);
// Offsets the base with the accumulative byte offset.
//
@@ -562,9 +771,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
Instruction *NewGEP = GEP->clone();
NewGEP->insertBefore(GEP);
- Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
uint64_t ElementTypeSizeOfGEP =
DL->getTypeAllocSize(GEP->getType()->getElementType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
// Very likely. As long as %gep is natually aligned, the byte offset we
// extracted should be a multiple of sizeof(*%gep).
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 482c33a..7348c45 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -18,6 +18,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
@@ -34,6 +35,7 @@ namespace {
DominatorTree *DT;
LoopInfo *LI;
AliasAnalysis *AA;
+ const DataLayout *DL;
public:
static char ID; // Pass identification
@@ -98,6 +100,8 @@ bool Sinking::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
bool MadeChange, EverMadeChange = false;
@@ -193,7 +197,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
- if (!isSafeToSpeculativelyExecute(Inst))
+ if (!isSafeToSpeculativelyExecute(Inst, DL))
return false;
// We don't want to sink across a critical edge if we don't dominate the
diff --git a/lib/Transforms/Utils/Android.mk b/lib/Transforms/Utils/Android.mk
index cbd8dd0..2390027 100644
--- a/lib/Transforms/Utils/Android.mk
+++ b/lib/Transforms/Utils/Android.mk
@@ -33,7 +33,6 @@ transforms_utils_SRC_FILES := \
SimplifyIndVar.cpp \
SimplifyInstructions.cpp \
SimplifyLibCalls.cpp \
- SpecialCaseList.cpp \
UnifyFunctionExitNodes.cpp \
Utils.cpp \
ValueMapper.cpp
diff --git a/lib/Transforms/Utils/CMakeLists.txt b/lib/Transforms/Utils/CMakeLists.txt
index e10ca90..fcf548f 100644
--- a/lib/Transforms/Utils/CMakeLists.txt
+++ b/lib/Transforms/Utils/CMakeLists.txt
@@ -33,7 +33,6 @@ add_llvm_library(LLVMTransformUtils
SimplifyIndVar.cpp
SimplifyInstructions.cpp
SimplifyLibCalls.cpp
- SpecialCaseList.cpp
UnifyFunctionExitNodes.cpp
Utils.cpp
ValueMapper.cpp
diff --git a/lib/Transforms/Utils/CloneModule.cpp b/lib/Transforms/Utils/CloneModule.cpp
index eb67db1..3f75b3e 100644
--- a/lib/Transforms/Utils/CloneModule.cpp
+++ b/lib/Transforms/Utils/CloneModule.cpp
@@ -107,7 +107,7 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
- if (const GlobalObject *C = I->getAliasee())
+ if (const Constant *C = I->getAliasee())
GA->setAliasee(cast<GlobalObject>(MapValue(C, VMap)));
}
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index e01d0c3..f0a9f2b 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -189,6 +189,7 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
Invoke.getOuterResumeDest(),
InvokeArgs, CI->getName(), BB);
+ II->setDebugLoc(CI->getDebugLoc());
II->setCallingConv(CI->getCallingConv());
II->setAttributes(CI->getAttributes());
@@ -466,7 +467,13 @@ static void fixupLineNumbers(Function *Fn, Function::iterator FI,
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
DebugLoc DL = BI->getDebugLoc();
- if (!DL.isUnknown()) {
+ if (DL.isUnknown()) {
+ // If the inlined instruction has no line number, make it look as if it
+ // originates from the call location. This is important for
+ // ((__always_inline__, __nodebug__)) functions which must use caller
+ // location for all instructions in their function body.
+ BI->setDebugLoc(TheCallDL);
+ } else {
BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
LLVMContext &Ctx = BI->getContext();
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index f7787da..ef42291 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -50,6 +50,7 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
@@ -473,7 +474,8 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
/// explicit if they accepted the analysis directly and then updated it.
static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist,
AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI,
- ScalarEvolution *SE, Pass *PP) {
+ ScalarEvolution *SE, Pass *PP,
+ const DataLayout *DL) {
bool Changed = false;
ReprocessLoop:
@@ -672,7 +674,7 @@ ReprocessLoop:
// The block has now been cleared of all instructions except for
// a comparison and a conditional branch. SimplifyCFG may be able
// to fold it now.
- if (!FoldBranchToCommonDest(BI)) continue;
+ if (!FoldBranchToCommonDest(BI, DL)) continue;
// Success. The block is now dead, so remove it from the loop,
// update the dominator tree and delete it.
@@ -709,7 +711,8 @@ ReprocessLoop:
}
bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
- AliasAnalysis *AA, ScalarEvolution *SE) {
+ AliasAnalysis *AA, ScalarEvolution *SE,
+ const DataLayout *DL) {
bool Changed = false;
// Worklist maintains our depth-first queue of loops in this nest to process.
@@ -726,7 +729,8 @@ bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
}
while (!Worklist.empty())
- Changed |= simplifyOneLoop(Worklist.pop_back_val(), Worklist, AA, DT, LI, SE, PP);
+ Changed |= simplifyOneLoop(Worklist.pop_back_val(), Worklist, AA, DT, LI,
+ SE, PP, DL);
return Changed;
}
@@ -744,6 +748,7 @@ namespace {
DominatorTree *DT;
LoopInfo *LI;
ScalarEvolution *SE;
+ const DataLayout *DL;
bool runOnFunction(Function &F) override;
@@ -787,10 +792,12 @@ bool LoopSimplify::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = getAnalysisIfAvailable<ScalarEvolution>();
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
// Simplify each loop nest in the function.
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
- Changed |= simplifyLoop(*I, DT, LI, this, AA, SE);
+ Changed |= simplifyLoop(*I, DT, LI, this, AA, SE, DL);
return Changed;
}
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index d953e30..c86b82c 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -23,6 +23,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/LLVMContext.h"
@@ -242,21 +243,25 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
Twine("completely unrolled loop with ") +
Twine(TripCount) + " iterations");
} else {
+ auto EmitDiag = [&](const Twine &T) {
+ emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
+ "unrolled loop by a factor of " + Twine(Count) +
+ T);
+ };
+
DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
<< " by " << Count);
- Twine DiagMsg("unrolled loop by a factor of " + Twine(Count));
if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
- DiagMsg.concat(" with a breakout at trip " + Twine(BreakoutTrip));
+ EmitDiag(" with a breakout at trip " + Twine(BreakoutTrip));
} else if (TripMultiple != 1) {
DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
- DiagMsg.concat(" with " + Twine(TripMultiple) + " trips per branch");
+ EmitDiag(" with " + Twine(TripMultiple) + " trips per branch");
} else if (RuntimeTripCount) {
DEBUG(dbgs() << " with run-time trip count");
- DiagMsg.concat(" with run-time trip count");
+ EmitDiag(" with run-time trip count");
}
DEBUG(dbgs() << "!\n");
- emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc, DiagMsg);
}
bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
@@ -485,8 +490,19 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
if (!OuterL && !CompletelyUnroll)
OuterL = L;
if (OuterL) {
+ DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
- simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE);
+ simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL);
+
+ // LCSSA must be performed on the outermost affected loop. The unrolled
+ // loop's last loop latch is guaranteed to be in the outermost loop after
+ // deleteLoopFromQueue updates LoopInfo.
+ Loop *LatchLoop = LI->getLoopFor(Latches.back());
+ if (!OuterL->contains(LatchLoop))
+ while (OuterL->getParentLoop() != LatchLoop)
+ OuterL = OuterL->getParentLoop();
+
formLCSSARecursively(*OuterL, *DT, SE);
}
}
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 5bef091..a96c46a 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -280,17 +280,17 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
SCEVExpander Expander(*SE, "loop-unroll");
Value *TripCount = Expander.expandCodeFor(TripCountSC, TripCountSC->getType(),
PreHeaderBR);
- Type *CountTy = TripCount->getType();
- BinaryOperator *ModVal =
- BinaryOperator::CreateURem(TripCount,
- ConstantInt::get(CountTy, Count),
- "xtraiter");
- ModVal->insertBefore(PreHeaderBR);
-
- // Check if for no extra iterations, then jump to unrolled loop
- Value *BranchVal = new ICmpInst(PreHeaderBR,
- ICmpInst::ICMP_NE, ModVal,
- ConstantInt::get(CountTy, 0), "lcmp");
+
+ IRBuilder<> B(PreHeaderBR);
+ Value *ModVal = B.CreateAnd(TripCount, Count - 1, "xtraiter");
+
+ // Check if for no extra iterations, then jump to unrolled loop. We have to
+ // check that the trip count computation didn't overflow when adding one to
+ // the backedge taken count.
+ Value *LCmp = B.CreateIsNotNull(ModVal, "lcmp.mod");
+ Value *OverflowCheck = B.CreateIsNull(TripCount, "lcmp.overflow");
+ Value *BranchVal = B.CreateOr(OverflowCheck, LCmp, "lcmp.or");
+
// Branch to either the extra iterations or the unrolled loop
// We will fix up the true branch label when adding loop body copies
BranchInst::Create(PEnd, PEnd, BranchVal, PreHeaderBR);
@@ -344,6 +344,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
}
// The comparison w/ the extra iteration value and branch
+ Type *CountTy = TripCount->getType();
Value *BranchVal = new ICmpInst(*NewBB, ICmpInst::ICMP_EQ, ModVal,
ConstantInt::get(CountTy, leftOverIters),
"un.tmp");
diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp
index 9ef694c..eac693b 100644
--- a/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/lib/Transforms/Utils/LowerSwitch.cpp
@@ -14,11 +14,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/CFG.h"
#include "llvm/Pass.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -58,16 +60,18 @@ namespace {
Low(low), High(high), BB(bb) { }
};
- typedef std::vector<CaseRange> CaseVector;
+ typedef std::vector<CaseRange> CaseVector;
typedef std::vector<CaseRange>::iterator CaseItr;
private:
void processSwitchInst(SwitchInst *SI);
- BasicBlock* switchConvert(CaseItr Begin, CaseItr End, Value* Val,
- BasicBlock* OrigBlock, BasicBlock* Default);
- BasicBlock* newLeafBlock(CaseRange& Leaf, Value* Val,
- BasicBlock* OrigBlock, BasicBlock* Default);
- unsigned Clusterify(CaseVector& Cases, SwitchInst *SI);
+ BasicBlock *switchConvert(CaseItr Begin, CaseItr End,
+ ConstantInt *LowerBound, ConstantInt *UpperBound,
+ Value *Val, BasicBlock *OrigBlock,
+ BasicBlock *Default);
+ BasicBlock *newLeafBlock(CaseRange &Leaf, Value *Val, BasicBlock *OrigBlock,
+ BasicBlock *Default);
+ unsigned Clusterify(CaseVector &Cases, SwitchInst *SI);
};
/// The comparison function for sorting the switch case values in the vector.
@@ -129,15 +133,26 @@ static raw_ostream& operator<<(raw_ostream &O,
// switchConvert - Convert the switch statement into a binary lookup of
// the case values. The function recursively builds this tree.
-//
-BasicBlock* LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
- Value* Val, BasicBlock* OrigBlock,
- BasicBlock* Default)
-{
+// LowerBound and UpperBound are used to keep track of the bounds for Val
+// that have already been checked by a block emitted by one of the previous
+// calls to switchConvert in the call stack.
+BasicBlock *LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
+ ConstantInt *LowerBound,
+ ConstantInt *UpperBound, Value *Val,
+ BasicBlock *OrigBlock,
+ BasicBlock *Default) {
unsigned Size = End - Begin;
- if (Size == 1)
+ if (Size == 1) {
+ // Check if the Case Range is perfectly squeezed in between
+ // already checked Upper and Lower bounds. If it is then we can avoid
+ // emitting the code that checks if the value actually falls in the range
+ // because the bounds already tell us so.
+ if (Begin->Low == LowerBound && Begin->High == UpperBound) {
+ return Begin->BB;
+ }
return newLeafBlock(*Begin, Val, OrigBlock, Default);
+ }
unsigned Mid = Size / 2;
std::vector<CaseRange> LHS(Begin, Begin + Mid);
@@ -145,15 +160,50 @@ BasicBlock* LowerSwitch::switchConvert(CaseItr Begin, CaseItr End,
std::vector<CaseRange> RHS(Begin + Mid, End);
DEBUG(dbgs() << "RHS: " << RHS << "\n");
- CaseRange& Pivot = *(Begin + Mid);
- DEBUG(dbgs() << "Pivot ==> "
- << cast<ConstantInt>(Pivot.Low)->getValue() << " -"
- << cast<ConstantInt>(Pivot.High)->getValue() << "\n");
+ CaseRange &Pivot = *(Begin + Mid);
+ DEBUG(dbgs() << "Pivot ==> "
+ << cast<ConstantInt>(Pivot.Low)->getValue()
+ << " -" << cast<ConstantInt>(Pivot.High)->getValue() << "\n");
+
+ // NewLowerBound here should never be the integer minimal value.
+ // This is because it is computed from a case range that is never
+ // the smallest, so there is always a case range that has at least
+ // a smaller value.
+ ConstantInt *NewLowerBound = cast<ConstantInt>(Pivot.Low);
+ ConstantInt *NewUpperBound;
+
+ // If we don't have a Default block then it means that we can never
+ // have a value outside of a case range, so set the UpperBound to the highest
+ // value in the LHS part of the case ranges.
+ if (Default != nullptr) {
+ // Because NewLowerBound is never the smallest representable integer
+ // it is safe here to subtract one.
+ NewUpperBound = ConstantInt::get(NewLowerBound->getContext(),
+ NewLowerBound->getValue() - 1);
+ } else {
+ CaseItr LastLHS = LHS.begin() + LHS.size() - 1;
+ NewUpperBound = cast<ConstantInt>(LastLHS->High);
+ }
- BasicBlock* LBranch = switchConvert(LHS.begin(), LHS.end(), Val,
- OrigBlock, Default);
- BasicBlock* RBranch = switchConvert(RHS.begin(), RHS.end(), Val,
- OrigBlock, Default);
+ DEBUG(dbgs() << "LHS Bounds ==> ";
+ if (LowerBound) {
+ dbgs() << cast<ConstantInt>(LowerBound)->getSExtValue();
+ } else {
+ dbgs() << "NONE";
+ }
+ dbgs() << " - " << NewUpperBound->getSExtValue() << "\n";
+ dbgs() << "RHS Bounds ==> ";
+ dbgs() << NewLowerBound->getSExtValue() << " - ";
+ if (UpperBound) {
+ dbgs() << cast<ConstantInt>(UpperBound)->getSExtValue() << "\n";
+ } else {
+ dbgs() << "NONE\n";
+ });
+
+ BasicBlock *LBranch = switchConvert(LHS.begin(), LHS.end(), LowerBound,
+ NewUpperBound, Val, OrigBlock, Default);
+ BasicBlock *RBranch = switchConvert(RHS.begin(), RHS.end(), NewLowerBound,
+ UpperBound, Val, OrigBlock, Default);
// Create a new node that checks if the value is < pivot. Go to the
// left branch if it is and right branch if not.
@@ -291,13 +341,19 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
return;
}
+ const bool DefaultIsUnreachable =
+ Default->size() == 1 && isa<UnreachableInst>(Default->getTerminator());
// Create a new, empty default block so that the new hierarchy of
// if-then statements go to this and the PHI nodes are happy.
- BasicBlock* NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
- F->getBasicBlockList().insert(Default, NewDefault);
-
- BranchInst::Create(Default, NewDefault);
-
+ // if the default block is set as an unreachable we avoid creating one
+ // because will never be a valid target.
+ BasicBlock *NewDefault = nullptr;
+ if (!DefaultIsUnreachable) {
+ NewDefault = BasicBlock::Create(SI->getContext(), "NewDefault");
+ F->getBasicBlockList().insert(Default, NewDefault);
+
+ BranchInst::Create(Default, NewDefault);
+ }
// If there is an entry in any PHI nodes for the default edge, make sure
// to update them as well.
for (BasicBlock::iterator I = Default->begin(); isa<PHINode>(I); ++I) {
@@ -316,12 +372,31 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
DEBUG(dbgs() << "Cases: " << Cases << "\n");
(void)numCmps;
- BasicBlock* SwitchBlock = switchConvert(Cases.begin(), Cases.end(), Val,
- OrigBlock, NewDefault);
+ ConstantInt *UpperBound = nullptr;
+ ConstantInt *LowerBound = nullptr;
+
+ // Optimize the condition where Default is an unreachable block. In this case
+ // we can make the bounds tightly fitted around the case value ranges,
+ // because we know that the value passed to the switch should always be
+ // exactly one of the case values.
+ if (DefaultIsUnreachable) {
+ CaseItr LastCase = Cases.begin() + Cases.size() - 1;
+ UpperBound = cast<ConstantInt>(LastCase->High);
+ LowerBound = cast<ConstantInt>(Cases.begin()->Low);
+ }
+ BasicBlock *SwitchBlock =
+ switchConvert(Cases.begin(), Cases.end(), LowerBound, UpperBound, Val,
+ OrigBlock, NewDefault);
// Branch to our shiny new if-then stuff...
BranchInst::Create(SwitchBlock, OrigBlock);
// We are now done with the switch instruction, delete it.
CurBlock->getInstList().erase(SI);
+
+ pred_iterator PI = pred_begin(Default), E = pred_end(Default);
+ // If the Default block has no more predecessors just remove it
+ if (PI == E) {
+ DeleteDeadBlock(Default);
+ }
}
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 150dbdd..960b198 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -201,8 +201,8 @@ static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
/// ComputeSpeculationCost - Compute an abstract "cost" of speculating the
/// given instruction, which is assumed to be safe to speculate. 1 means
/// cheap, 2 means less cheap, and UINT_MAX means prohibitively expensive.
-static unsigned ComputeSpeculationCost(const User *I) {
- assert(isSafeToSpeculativelyExecute(I) &&
+static unsigned ComputeSpeculationCost(const User *I, const DataLayout *DL) {
+ assert(isSafeToSpeculativelyExecute(I, DL) &&
"Instruction is not safe to speculatively execute!");
switch (Operator::getOpcode(I)) {
default:
@@ -227,6 +227,9 @@ static unsigned ComputeSpeculationCost(const User *I) {
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
+ case Instruction::BitCast:
+ case Instruction::ExtractElement:
+ case Instruction::InsertElement:
return 1; // These are all cheap.
case Instruction::Call:
@@ -254,7 +257,8 @@ static unsigned ComputeSpeculationCost(const User *I) {
/// CostRemaining, false is returned and CostRemaining is undefined.
static bool DominatesMergePoint(Value *V, BasicBlock *BB,
SmallPtrSet<Instruction*, 4> *AggressiveInsts,
- unsigned &CostRemaining) {
+ unsigned &CostRemaining,
+ const DataLayout *DL) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
// Non-instructions all dominate instructions, but not all constantexprs
@@ -287,10 +291,10 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// Okay, it looks like the instruction IS in the "condition". Check to
// see if it's a cheap instruction to unconditionally compute, and if it
// only uses stuff defined outside of the condition. If so, hoist it out.
- if (!isSafeToSpeculativelyExecute(I))
+ if (!isSafeToSpeculativelyExecute(I, DL))
return false;
- unsigned Cost = ComputeSpeculationCost(I);
+ unsigned Cost = ComputeSpeculationCost(I, DL);
if (Cost > CostRemaining)
return false;
@@ -300,7 +304,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// Okay, we can only really hoist these out if their operands do
// not take us over the cost threshold.
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
- if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining))
+ if (!DominatesMergePoint(*i, BB, AggressiveInsts, CostRemaining, DL))
return false;
// Okay, it's safe to do this! Remember this instruction.
AggressiveInsts->insert(I);
@@ -994,7 +998,7 @@ static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2,
/// HoistThenElseCodeToIf - Given a conditional branch that goes to BB1 and
/// BB2, hoist any common code in the two blocks up into the branch block. The
/// caller of this function guarantees that BI's block dominates BB1 and BB2.
-static bool HoistThenElseCodeToIf(BranchInst *BI) {
+static bool HoistThenElseCodeToIf(BranchInst *BI, const DataLayout *DL) {
// This does very trivial matching, with limited scanning, to find identical
// instructions in the two blocks. In particular, we don't want to get into
// O(M*N) situations here where M and N are the sizes of BB1 and BB2. As
@@ -1068,9 +1072,9 @@ HoistTerminator:
if (BB1V == BB2V)
continue;
- if (isa<ConstantExpr>(BB1V) && !isSafeToSpeculativelyExecute(BB1V))
+ if (isa<ConstantExpr>(BB1V) && !isSafeToSpeculativelyExecute(BB1V, DL))
return Changed;
- if (isa<ConstantExpr>(BB2V) && !isSafeToSpeculativelyExecute(BB2V))
+ if (isa<ConstantExpr>(BB2V) && !isSafeToSpeculativelyExecute(BB2V, DL))
return Changed;
}
}
@@ -1387,7 +1391,8 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
/// \endcode
///
/// \returns true if the conditional block is removed.
-static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
+static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
+ const DataLayout *DL) {
// Be conservative for now. FP select instruction can often be expensive.
Value *BrCond = BI->getCondition();
if (isa<FCmpInst>(BrCond))
@@ -1430,13 +1435,13 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
return false;
// Don't hoist the instruction if it's unsafe or expensive.
- if (!isSafeToSpeculativelyExecute(I) &&
+ if (!isSafeToSpeculativelyExecute(I, DL) &&
!(HoistCondStores &&
(SpeculatedStoreValue = isSafeToSpeculateStore(I, BB, ThenBB,
EndBB))))
return false;
if (!SpeculatedStoreValue &&
- ComputeSpeculationCost(I) > PHINodeFoldingThreshold)
+ ComputeSpeculationCost(I, DL) > PHINodeFoldingThreshold)
return false;
// Store the store speculation candidate.
@@ -1487,11 +1492,11 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
if (!OrigCE && !ThenCE)
continue; // Known safe and cheap.
- if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE)) ||
- (OrigCE && !isSafeToSpeculativelyExecute(OrigCE)))
+ if ((ThenCE && !isSafeToSpeculativelyExecute(ThenCE, DL)) ||
+ (OrigCE && !isSafeToSpeculativelyExecute(OrigCE, DL)))
return false;
- unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE) : 0;
- unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE) : 0;
+ unsigned OrigCost = OrigCE ? ComputeSpeculationCost(OrigCE, DL) : 0;
+ unsigned ThenCost = ThenCE ? ComputeSpeculationCost(ThenCE, DL) : 0;
if (OrigCost + ThenCost > 2 * PHINodeFoldingThreshold)
return false;
@@ -1738,9 +1743,9 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
}
if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts,
- MaxCostVal0) ||
+ MaxCostVal0, DL) ||
!DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts,
- MaxCostVal1))
+ MaxCostVal1, DL))
return false;
}
@@ -1958,7 +1963,7 @@ static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
/// FoldBranchToCommonDest - If this basic block is simple enough, and if a
/// predecessor branches to us and one of our successors, fold the block into
/// the predecessor and use logical operations to pick the right destination.
-bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
+bool llvm::FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL) {
BasicBlock *BB = BI->getParent();
Instruction *Cond = nullptr;
@@ -2010,7 +2015,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
Instruction *BonusInst = nullptr;
if (&*FrontIt != Cond &&
FrontIt->hasOneUse() && FrontIt->user_back() == Cond &&
- isSafeToSpeculativelyExecute(FrontIt)) {
+ isSafeToSpeculativelyExecute(FrontIt, DL)) {
BonusInst = &*FrontIt;
++FrontIt;
@@ -2025,7 +2030,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Make sure the instruction after the condition is the cond branch.
BasicBlock::iterator CondIt = Cond; ++CondIt;
- // Ingore dbg intrinsics.
+ // Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(CondIt)) ++CondIt;
if (&*CondIt != BI)
@@ -2340,7 +2345,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
}
// If this is a conditional branch in an empty block, and if any
- // predecessors is a conditional branch to one of our destinations,
+ // predecessors are a conditional branch to one of our destinations,
// fold the conditions into logical ops and one cond br.
BasicBlock::iterator BBI = BB->begin();
// Ignore dbg intrinsics.
@@ -2375,16 +2380,33 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Do not perform this transformation if it would require
// insertion of a large number of select instructions. For targets
// without predication/cmovs, this is a big pessimization.
- BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
+ // Also do not perform this transformation if any phi node in the common
+ // destination block can trap when reached by BB or PBB (PR17073). In that
+ // case, it would be unsafe to hoist the operation into a select instruction.
+
+ BasicBlock *CommonDest = PBI->getSuccessor(PBIOp);
unsigned NumPhis = 0;
for (BasicBlock::iterator II = CommonDest->begin();
- isa<PHINode>(II); ++II, ++NumPhis)
+ isa<PHINode>(II); ++II, ++NumPhis) {
if (NumPhis > 2) // Disable this xform.
return false;
+ PHINode *PN = cast<PHINode>(II);
+ Value *BIV = PN->getIncomingValueForBlock(BB);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(BIV))
+ if (CE->canTrap())
+ return false;
+
+ unsigned PBBIdx = PN->getBasicBlockIndex(PBI->getParent());
+ Value *PBIV = PN->getIncomingValue(PBBIdx);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(PBIV))
+ if (CE->canTrap())
+ return false;
+ }
+
// Finally, if everything is ok, fold the branches to logical ops.
- BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
+ BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
<< "AND: " << *BI->getParent());
@@ -3308,6 +3330,11 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
/// ValidLookupTableConstant - Return true if the backend will be able to handle
/// initializing an array of constants like C.
static bool ValidLookupTableConstant(Constant *C) {
+ if (C->isThreadDependent())
+ return false;
+ if (C->isDLLImportDependent())
+ return false;
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
return CE->isGEPWithNoNotionalOverIndexing();
@@ -3521,7 +3548,8 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
// Fill in any holes in the table with the default result.
if (Values.size() < TableSize) {
- assert(DefaultValue && "Need a default value to fill the lookup table holes.");
+ assert(DefaultValue &&
+ "Need a default value to fill the lookup table holes.");
assert(DefaultValue->getType() == ValueType);
for (uint64_t I = 0; I < TableSize; ++I) {
if (!TableContents[I])
@@ -3990,7 +4018,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
// branches to us and our successor, fold the comparison into the
// predecessor and use logical operations to update the incoming value
// for PHI nodes in common successor.
- if (FoldBranchToCommonDest(BI))
+ if (FoldBranchToCommonDest(BI, DL))
return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -4034,7 +4062,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and one of our successors, fold the comparison into the
// predecessor and use logical operations to pick the right destination.
- if (FoldBranchToCommonDest(BI))
+ if (FoldBranchToCommonDest(BI, DL))
return SimplifyCFG(BB, TTI, DL) | true;
// We have a conditional branch to two blocks that are only reachable
@@ -4043,24 +4071,24 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// can hoist it up to the branching block.
if (BI->getSuccessor(0)->getSinglePredecessor()) {
if (BI->getSuccessor(1)->getSinglePredecessor()) {
- if (HoistThenElseCodeToIf(BI))
+ if (HoistThenElseCodeToIf(BI, DL))
return SimplifyCFG(BB, TTI, DL) | true;
} else {
// If Successor #1 has multiple preds, we may be able to conditionally
- // execute Successor #0 if it branches to successor #1.
+ // execute Successor #0 if it branches to Successor #1.
TerminatorInst *Succ0TI = BI->getSuccessor(0)->getTerminator();
if (Succ0TI->getNumSuccessors() == 1 &&
Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0), DL))
return SimplifyCFG(BB, TTI, DL) | true;
}
} else if (BI->getSuccessor(1)->getSinglePredecessor()) {
// If Successor #0 has multiple preds, we may be able to conditionally
- // execute Successor #1 if it branches to successor #0.
+ // execute Successor #1 if it branches to Successor #0.
TerminatorInst *Succ1TI = BI->getSuccessor(1)->getTerminator();
if (Succ1TI->getNumSuccessors() == 1 &&
Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
- if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
+ if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1), DL))
return SimplifyCFG(BB, TTI, DL) | true;
}
diff --git a/lib/Transforms/Utils/SpecialCaseList.cpp b/lib/Transforms/Utils/SpecialCaseList.cpp
deleted file mode 100644
index 2c6fcd1..0000000
--- a/lib/Transforms/Utils/SpecialCaseList.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-//===-- SpecialCaseList.cpp - special case list for sanitizers ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is a utility class for instrumentation passes (like AddressSanitizer
-// or ThreadSanitizer) to avoid instrumenting some functions or global
-// variables, or to instrument some functions or global variables in a specific
-// way, based on a user-supplied list.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Transforms/Utils/SpecialCaseList.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
-#include <string>
-#include <utility>
-
-namespace llvm {
-
-/// Represents a set of regular expressions. Regular expressions which are
-/// "literal" (i.e. no regex metacharacters) are stored in Strings, while all
-/// others are represented as a single pipe-separated regex in RegEx. The
-/// reason for doing so is efficiency; StringSet is much faster at matching
-/// literal strings than Regex.
-struct SpecialCaseList::Entry {
- StringSet<> Strings;
- Regex *RegEx;
-
- Entry() : RegEx(nullptr) {}
-
- bool match(StringRef Query) const {
- return Strings.count(Query) || (RegEx && RegEx->match(Query));
- }
-};
-
-SpecialCaseList::SpecialCaseList() : Entries() {}
-
-SpecialCaseList *SpecialCaseList::create(
- const StringRef Path, std::string &Error) {
- if (Path.empty())
- return new SpecialCaseList();
- std::unique_ptr<MemoryBuffer> File;
- if (error_code EC = MemoryBuffer::getFile(Path, File)) {
- Error = (Twine("Can't open file '") + Path + "': " + EC.message()).str();
- return nullptr;
- }
- return create(File.get(), Error);
-}
-
-SpecialCaseList *SpecialCaseList::create(
- const MemoryBuffer *MB, std::string &Error) {
- std::unique_ptr<SpecialCaseList> SCL(new SpecialCaseList());
- if (!SCL->parse(MB, Error))
- return nullptr;
- return SCL.release();
-}
-
-SpecialCaseList *SpecialCaseList::createOrDie(const StringRef Path) {
- std::string Error;
- if (SpecialCaseList *SCL = create(Path, Error))
- return SCL;
- report_fatal_error(Error);
-}
-
-bool SpecialCaseList::parse(const MemoryBuffer *MB, std::string &Error) {
- // Iterate through each line in the blacklist file.
- SmallVector<StringRef, 16> Lines;
- SplitString(MB->getBuffer(), Lines, "\n\r");
- StringMap<StringMap<std::string> > Regexps;
- assert(Entries.empty() &&
- "parse() should be called on an empty SpecialCaseList");
- int LineNo = 1;
- for (SmallVectorImpl<StringRef>::iterator I = Lines.begin(), E = Lines.end();
- I != E; ++I, ++LineNo) {
- // Ignore empty lines and lines starting with "#"
- if (I->empty() || I->startswith("#"))
- continue;
- // Get our prefix and unparsed regexp.
- std::pair<StringRef, StringRef> SplitLine = I->split(":");
- StringRef Prefix = SplitLine.first;
- if (SplitLine.second.empty()) {
- // Missing ':' in the line.
- Error = (Twine("Malformed line ") + Twine(LineNo) + ": '" +
- SplitLine.first + "'").str();
- return false;
- }
-
- std::pair<StringRef, StringRef> SplitRegexp = SplitLine.second.split("=");
- std::string Regexp = SplitRegexp.first;
- StringRef Category = SplitRegexp.second;
-
- // Backwards compatibility.
- if (Prefix == "global-init") {
- Prefix = "global";
- Category = "init";
- } else if (Prefix == "global-init-type") {
- Prefix = "type";
- Category = "init";
- } else if (Prefix == "global-init-src") {
- Prefix = "src";
- Category = "init";
- }
-
- // See if we can store Regexp in Strings.
- if (Regex::isLiteralERE(Regexp)) {
- Entries[Prefix][Category].Strings.insert(Regexp);
- continue;
- }
-
- // Replace * with .*
- for (size_t pos = 0; (pos = Regexp.find("*", pos)) != std::string::npos;
- pos += strlen(".*")) {
- Regexp.replace(pos, strlen("*"), ".*");
- }
-
- // Check that the regexp is valid.
- Regex CheckRE(Regexp);
- std::string REError;
- if (!CheckRE.isValid(REError)) {
- Error = (Twine("Malformed regex in line ") + Twine(LineNo) + ": '" +
- SplitLine.second + "': " + REError).str();
- return false;
- }
-
- // Add this regexp into the proper group by its prefix.
- if (!Regexps[Prefix][Category].empty())
- Regexps[Prefix][Category] += "|";
- Regexps[Prefix][Category] += "^" + Regexp + "$";
- }
-
- // Iterate through each of the prefixes, and create Regexs for them.
- for (StringMap<StringMap<std::string> >::const_iterator I = Regexps.begin(),
- E = Regexps.end();
- I != E; ++I) {
- for (StringMap<std::string>::const_iterator II = I->second.begin(),
- IE = I->second.end();
- II != IE; ++II) {
- Entries[I->getKey()][II->getKey()].RegEx = new Regex(II->getValue());
- }
- }
- return true;
-}
-
-SpecialCaseList::~SpecialCaseList() {
- for (StringMap<StringMap<Entry> >::iterator I = Entries.begin(),
- E = Entries.end();
- I != E; ++I) {
- for (StringMap<Entry>::const_iterator II = I->second.begin(),
- IE = I->second.end();
- II != IE; ++II) {
- delete II->second.RegEx;
- }
- }
-}
-
-bool SpecialCaseList::isIn(const Function& F, const StringRef Category) const {
- return isIn(*F.getParent(), Category) ||
- inSectionCategory("fun", F.getName(), Category);
-}
-
-static StringRef GetGlobalTypeString(const GlobalValue &G) {
- // Types of GlobalVariables are always pointer types.
- Type *GType = G.getType()->getElementType();
- // For now we support blacklisting struct types only.
- if (StructType *SGType = dyn_cast<StructType>(GType)) {
- if (!SGType->isLiteral())
- return SGType->getName();
- }
- return "<unknown type>";
-}
-
-bool SpecialCaseList::isIn(const GlobalVariable &G,
- const StringRef Category) const {
- return isIn(*G.getParent(), Category) ||
- inSectionCategory("global", G.getName(), Category) ||
- inSectionCategory("type", GetGlobalTypeString(G), Category);
-}
-
-bool SpecialCaseList::isIn(const GlobalAlias &GA,
- const StringRef Category) const {
- if (isIn(*GA.getParent(), Category))
- return true;
-
- if (isa<FunctionType>(GA.getType()->getElementType()))
- return inSectionCategory("fun", GA.getName(), Category);
-
- return inSectionCategory("global", GA.getName(), Category) ||
- inSectionCategory("type", GetGlobalTypeString(GA), Category);
-}
-
-bool SpecialCaseList::isIn(const Module &M, const StringRef Category) const {
- return inSectionCategory("src", M.getModuleIdentifier(), Category);
-}
-
-bool SpecialCaseList::inSectionCategory(const StringRef Section,
- const StringRef Query,
- const StringRef Category) const {
- StringMap<StringMap<Entry> >::const_iterator I = Entries.find(Section);
- if (I == Entries.end()) return false;
- StringMap<Entry>::const_iterator II = I->second.find(Category);
- if (II == I->second.end()) return false;
-
- return II->getValue().match(Query);
-}
-
-} // namespace llvm
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 34d8a10..cb8a41d 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -209,6 +209,29 @@ namespace {
class LoopVectorizationLegality;
class LoopVectorizationCostModel;
+/// Optimization analysis message produced during vectorization. Messages inform
+/// the user why vectorization did not occur.
+class Report {
+ std::string Message;
+ raw_string_ostream Out;
+ Instruction *Instr;
+
+public:
+ Report(Instruction *I = nullptr) : Out(Message), Instr(I) {
+ Out << "loop not vectorized: ";
+ }
+
+ template <typename A> Report &operator<<(const A &Value) {
+ Out << Value;
+ return *this;
+ }
+
+ Instruction *getInstr() { return Instr; }
+
+ std::string &str() { return Out.str(); }
+ operator Twine() { return Out.str(); }
+};
+
/// InnerLoopVectorizer vectorizes loops which contain only one basic
/// block to a specified vectorization factor (VF).
/// This class performs the widening of scalars into vectors, or multiple
@@ -515,10 +538,12 @@ public:
unsigned NumPredStores;
LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
- DominatorTree *DT, TargetLibraryInfo *TLI)
+ DominatorTree *DT, TargetLibraryInfo *TLI,
+ Function *F)
: NumLoads(0), NumStores(0), NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
- DT(DT), TLI(TLI), Induction(nullptr), WidestIndTy(nullptr),
- HasFunNoNaNAttr(false), MaxSafeDepDistBytes(-1U) {}
+ DT(DT), TLI(TLI), TheFunction(F), Induction(nullptr),
+ WidestIndTy(nullptr), HasFunNoNaNAttr(false), MaxSafeDepDistBytes(-1U) {
+ }
/// This enum represents the kinds of reductions that we support.
enum ReductionKind {
@@ -747,6 +772,16 @@ private:
/// invariant.
void collectStridedAcccess(Value *LoadOrStoreInst);
+ /// Report an analysis message to assist the user in diagnosing loops that are
+ /// not vectorized.
+ void emitAnalysis(Report &Message) {
+ DebugLoc DL = TheLoop->getStartLoc();
+ if (Instruction *I = Message.getInstr())
+ DL = I->getDebugLoc();
+ emitOptimizationRemarkAnalysis(TheFunction->getContext(), DEBUG_TYPE,
+ *TheFunction, DL, Message.str());
+ }
+
/// The loop that we evaluate.
Loop *TheLoop;
/// Scev analysis.
@@ -757,6 +792,8 @@ private:
DominatorTree *DT;
/// Target Library Info.
TargetLibraryInfo *TLI;
+ /// Parent function
+ Function *TheFunction;
// --- vectorization state --- //
@@ -906,7 +943,7 @@ public:
}
/// Return the loop vectorizer metadata prefix.
- static StringRef Prefix() { return "llvm.vectorizer."; }
+ static StringRef Prefix() { return "llvm.loop.vectorize."; }
MDNode *createHint(LLVMContext &Context, StringRef Name, unsigned V) const {
SmallVector<Value*, 2> Vals;
@@ -942,6 +979,29 @@ public:
LoopID = NewLoopID;
}
+ std::string emitRemark() const {
+ Report R;
+ R << "vectorization ";
+ switch (Force) {
+ case LoopVectorizeHints::FK_Disabled:
+ R << "is explicitly disabled";
+ break;
+ case LoopVectorizeHints::FK_Enabled:
+ R << "is explicitly enabled";
+ if (Width != 0 && Unroll != 0)
+ R << " with width " << Width << " and interleave count " << Unroll;
+ else if (Width != 0)
+ R << " with width " << Width;
+ else if (Unroll != 0)
+ R << " with interleave count " << Unroll;
+ break;
+ case LoopVectorizeHints::FK_Undefined:
+ R << "was not specified";
+ break;
+ }
+ return R.str();
+ }
+
unsigned getWidth() const { return Width; }
unsigned getUnroll() const { return Unroll; }
enum ForceKind getForce() const { return Force; }
@@ -1125,18 +1185,37 @@ struct LoopVectorize : public FunctionPass {
: "?")) << " width=" << Hints.getWidth()
<< " unroll=" << Hints.getUnroll() << "\n");
+ // Function containing loop
+ Function *F = L->getHeader()->getParent();
+
+ // Looking at the diagnostic output is the only way to determine if a loop
+ // was vectorized (other than looking at the IR or machine code), so it
+ // is important to generate an optimization remark for each loop. Most of
+ // these messages are generated by emitOptimizationRemarkAnalysis. Remarks
+ // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are
+ // less verbose reporting vectorized loops and unvectorized loops that may
+ // benefit from vectorization, respectively.
+
if (Hints.getForce() == LoopVectorizeHints::FK_Disabled) {
DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
+ emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
return false;
}
if (!AlwaysVectorize && Hints.getForce() != LoopVectorizeHints::FK_Enabled) {
DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
+ emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
return false;
}
if (Hints.getWidth() == 1 && Hints.getUnroll() == 1) {
DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "loop not vectorized: vector width and interleave count are "
+ "explicitly set to 1");
return false;
}
@@ -1151,14 +1230,19 @@ struct LoopVectorize : public FunctionPass {
DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
else {
DEBUG(dbgs() << "\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "vectorization is not beneficial and is not explicitly forced");
return false;
}
}
// Check if it is legal to vectorize the loop.
- LoopVectorizationLegality LVL(L, SE, DL, DT, TLI);
+ LoopVectorizationLegality LVL(L, SE, DL, DT, TLI, F);
if (!LVL.canVectorize()) {
DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
+ emitOptimizationRemarkMissed(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
return false;
}
@@ -1167,7 +1251,6 @@ struct LoopVectorize : public FunctionPass {
// Check the function attributes to find out if this function should be
// optimized for size.
- Function *F = L->getHeader()->getParent();
bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
F->hasFnAttribute(Attribute::OptimizeForSize);
@@ -1190,6 +1273,11 @@ struct LoopVectorize : public FunctionPass {
if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
"attribute is used.\n");
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "loop not vectorized due to NoImplicitFloat attribute");
+ emitOptimizationRemarkMissed(F->getContext(), DEBUG_TYPE, *F,
+ L->getStartLoc(), Hints.emitRemark());
return false;
}
@@ -1208,9 +1296,14 @@ struct LoopVectorize : public FunctionPass {
DEBUG(dbgs() << "LV: Unroll Factor is " << UF << '\n');
if (VF.Width == 1) {
- DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
- if (UF == 1)
+ DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial\n");
+
+ if (UF == 1) {
+ emitOptimizationRemarkAnalysis(
+ F->getContext(), DEBUG_TYPE, *F, L->getStartLoc(),
+ "not beneficial to vectorize and user disabled interleaving");
return false;
+ }
DEBUG(dbgs() << "LV: Trying to at least unroll the loops.\n");
// Report the unrolling decision.
@@ -1220,6 +1313,7 @@ struct LoopVectorize : public FunctionPass {
" (vectorization not beneficial)"));
// We decided not to vectorize, but we may want to unroll.
+
InnerLoopUnroller Unroller(L, SE, LI, DT, DL, TLI, UF);
Unroller.vectorize(&LVL);
} else {
@@ -1909,20 +2003,23 @@ void InnerLoopVectorizer::createEmptyLoop() {
the vectorized instructions while the old loop will continue to run the
scalar remainder.
- [ ] <-- vector loop bypass (may consist of multiple blocks).
- / |
- / v
- | [ ] <-- vector pre header.
- | |
- | v
- | [ ] \
- | [ ]_| <-- vector loop.
- | |
- \ v
- >[ ] <--- middle-block.
- / |
- / v
- | [ ] <--- new preheader.
+ [ ] <-- Back-edge taken count overflow check.
+ / |
+ / v
+ | [ ] <-- vector loop bypass (may consist of multiple blocks).
+ | / |
+ | / v
+ || [ ] <-- vector pre header.
+ || |
+ || v
+ || [ ] \
+ || [ ]_| <-- vector loop.
+ || |
+ | \ v
+ | >[ ] <--- middle-block.
+ | / |
+ | / v
+ -|- >[ ] <--- new preheader.
| |
| v
| [ ] \
@@ -1936,6 +2033,7 @@ void InnerLoopVectorizer::createEmptyLoop() {
BasicBlock *OldBasicBlock = OrigLoop->getHeader();
BasicBlock *BypassBlock = OrigLoop->getLoopPreheader();
BasicBlock *ExitBlock = OrigLoop->getExitBlock();
+ assert(BypassBlock && "Invalid loop structure");
assert(ExitBlock && "Must have an exit block");
// Some loops have a single integer induction variable, while other loops
@@ -1958,18 +2056,30 @@ void InnerLoopVectorizer::createEmptyLoop() {
IdxTy->getPrimitiveSizeInBits())
ExitCount = SE->getTruncateOrNoop(ExitCount, IdxTy);
- ExitCount = SE->getNoopOrZeroExtend(ExitCount, IdxTy);
+ const SCEV *BackedgeTakeCount = SE->getNoopOrZeroExtend(ExitCount, IdxTy);
// Get the total trip count from the count by adding 1.
- ExitCount = SE->getAddExpr(ExitCount,
- SE->getConstant(ExitCount->getType(), 1));
+ ExitCount = SE->getAddExpr(BackedgeTakeCount,
+ SE->getConstant(BackedgeTakeCount->getType(), 1));
// Expand the trip count and place the new instructions in the preheader.
// Notice that the pre-header does not change, only the loop body.
SCEVExpander Exp(*SE, "induction");
- // Count holds the overall loop count (N).
- Value *Count = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
- BypassBlock->getTerminator());
+ // We need to test whether the backedge-taken count is uint##_max. Adding one
+ // to it will cause overflow and an incorrect loop trip count in the vector
+ // body. In case of overflow we want to directly jump to the scalar remainder
+ // loop.
+ Value *BackedgeCount =
+ Exp.expandCodeFor(BackedgeTakeCount, BackedgeTakeCount->getType(),
+ BypassBlock->getTerminator());
+ if (BackedgeCount->getType()->isPointerTy())
+ BackedgeCount = CastInst::CreatePointerCast(BackedgeCount, IdxTy,
+ "backedge.ptrcnt.to.int",
+ BypassBlock->getTerminator());
+ Instruction *CheckBCOverflow =
+ CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, BackedgeCount,
+ Constant::getAllOnesValue(BackedgeCount->getType()),
+ "backedge.overflow", BypassBlock->getTerminator());
// The loop index does not have to start at Zero. Find the original start
// value from the induction PHI node. If we don't have an induction variable
@@ -1980,7 +2090,18 @@ void InnerLoopVectorizer::createEmptyLoop() {
IdxTy):
ConstantInt::get(IdxTy, 0);
- assert(BypassBlock && "Invalid loop structure");
+ // We need an instruction to anchor the overflow check on. StartIdx needs to
+ // be defined before the overflow check branch. Because the scalar preheader
+ // is going to merge the start index and so the overflow branch block needs to
+ // contain a definition of the start index.
+ Instruction *OverflowCheckAnchor = BinaryOperator::CreateAdd(
+ StartIdx, ConstantInt::get(IdxTy, 0), "overflow.check.anchor",
+ BypassBlock->getTerminator());
+
+ // Count holds the overall loop count (N).
+ Value *Count = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
+ BypassBlock->getTerminator());
+
LoopBypassBlocks.push_back(BypassBlock);
// Split the single block loop into the two loop structure described above.
@@ -2049,29 +2170,45 @@ void InnerLoopVectorizer::createEmptyLoop() {
// Now, compare the new count to zero. If it is zero skip the vector loop and
// jump to the scalar loop.
- Value *Cmp = BypassBuilder.CreateICmpEQ(IdxEndRoundDown, StartIdx,
- "cmp.zero");
+ Value *Cmp =
+ BypassBuilder.CreateICmpEQ(IdxEndRoundDown, StartIdx, "cmp.zero");
BasicBlock *LastBypassBlock = BypassBlock;
+ // Generate code to check that the loops trip count that we computed by adding
+ // one to the backedge-taken count will not overflow.
+ {
+ auto PastOverflowCheck =
+ std::next(BasicBlock::iterator(OverflowCheckAnchor));
+ BasicBlock *CheckBlock =
+ LastBypassBlock->splitBasicBlock(PastOverflowCheck, "overflow.checked");
+ if (ParentLoop)
+ ParentLoop->addBasicBlockToLoop(CheckBlock, LI->getBase());
+ LoopBypassBlocks.push_back(CheckBlock);
+ Instruction *OldTerm = LastBypassBlock->getTerminator();
+ BranchInst::Create(ScalarPH, CheckBlock, CheckBCOverflow, OldTerm);
+ OldTerm->eraseFromParent();
+ LastBypassBlock = CheckBlock;
+ }
+
// Generate the code to check that the strides we assumed to be one are really
// one. We want the new basic block to start at the first instruction in a
// sequence of instructions that form a check.
Instruction *StrideCheck;
Instruction *FirstCheckInst;
std::tie(FirstCheckInst, StrideCheck) =
- addStrideCheck(BypassBlock->getTerminator());
+ addStrideCheck(LastBypassBlock->getTerminator());
if (StrideCheck) {
// Create a new block containing the stride check.
BasicBlock *CheckBlock =
- BypassBlock->splitBasicBlock(FirstCheckInst, "vector.stridecheck");
+ LastBypassBlock->splitBasicBlock(FirstCheckInst, "vector.stridecheck");
if (ParentLoop)
ParentLoop->addBasicBlockToLoop(CheckBlock, LI->getBase());
LoopBypassBlocks.push_back(CheckBlock);
// Replace the branch into the memory check block with a conditional branch
// for the "few elements case".
- Instruction *OldTerm = BypassBlock->getTerminator();
+ Instruction *OldTerm = LastBypassBlock->getTerminator();
BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
OldTerm->eraseFromParent();
@@ -2134,6 +2271,19 @@ void InnerLoopVectorizer::createEmptyLoop() {
PHINode::Create(OrigPhi->getType(), 2, "trunc.resume.val",
MiddleBlock->getTerminator()) : nullptr;
+ // Create phi nodes to merge from the backedge-taken check block.
+ PHINode *BCResumeVal = PHINode::Create(ResumeValTy, 3, "bc.resume.val",
+ ScalarPH->getTerminator());
+ BCResumeVal->addIncoming(ResumeVal, MiddleBlock);
+
+ PHINode *BCTruncResumeVal = nullptr;
+ if (OrigPhi == OldInduction) {
+ BCTruncResumeVal =
+ PHINode::Create(OrigPhi->getType(), 2, "bc.trunc.resume.val",
+ ScalarPH->getTerminator());
+ BCTruncResumeVal->addIncoming(TruncResumeVal, MiddleBlock);
+ }
+
Value *EndValue = nullptr;
switch (II.IK) {
case LoopVectorizationLegality::IK_NoInduction:
@@ -2150,10 +2300,12 @@ void InnerLoopVectorizer::createEmptyLoop() {
BypassBuilder.CreateTrunc(IdxEndRoundDown, OrigPhi->getType());
// The new PHI merges the original incoming value, in case of a bypass,
// or the value at the end of the vectorized loop.
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
TruncResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[I]);
TruncResumeVal->addIncoming(EndValue, VecBody);
+ BCTruncResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[0]);
+
// We know what the end value is.
EndValue = IdxEndRoundDown;
// We also know which PHI node holds it.
@@ -2199,7 +2351,7 @@ void InnerLoopVectorizer::createEmptyLoop() {
// The new PHI merges the original incoming value, in case of a bypass,
// or the value at the end of the vectorized loop.
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) {
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I) {
if (OrigPhi == OldInduction)
ResumeVal->addIncoming(StartIdx, LoopBypassBlocks[I]);
else
@@ -2209,11 +2361,16 @@ void InnerLoopVectorizer::createEmptyLoop() {
// Fix the scalar body counter (PHI node).
unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
- // The old inductions phi node in the scalar body needs the truncated value.
- if (OrigPhi == OldInduction)
- OrigPhi->setIncomingValue(BlockIdx, TruncResumeVal);
- else
- OrigPhi->setIncomingValue(BlockIdx, ResumeVal);
+
+ // The old induction's phi node in the scalar body needs the truncated
+ // value.
+ if (OrigPhi == OldInduction) {
+ BCResumeVal->addIncoming(StartIdx, LoopBypassBlocks[0]);
+ OrigPhi->setIncomingValue(BlockIdx, BCTruncResumeVal);
+ } else {
+ BCResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[0]);
+ OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
+ }
}
// If we are generating a new induction variable then we also need to
@@ -2224,7 +2381,7 @@ void InnerLoopVectorizer::createEmptyLoop() {
assert(!ResumeIndex && "Unexpected resume value found");
ResumeIndex = PHINode::Create(IdxTy, 2, "new.indc.resume.val",
MiddleBlock->getTerminator());
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
ResumeIndex->addIncoming(StartIdx, LoopBypassBlocks[I]);
ResumeIndex->addIncoming(IdxEndRoundDown, VecBody);
}
@@ -2494,7 +2651,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
// To do so, we need to generate the 'identity' vector and override
// one of the elements with the incoming scalar reduction. We need
// to do it in the vector-loop preheader.
- Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator());
+ Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator());
// This is the vector-clone of the value that leaves the loop.
VectorParts &VectorExit = getVectorValue(RdxDesc.LoopExitInstr);
@@ -2568,7 +2725,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
VectorParts &RdxExitVal = getVectorValue(RdxDesc.LoopExitInstr);
PHINode *NewPhi = Builder.CreatePHI(VecTy, 2, "rdx.vec.exit.phi");
Value *StartVal = (part == 0) ? VectorStart : Identity;
- for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
+ for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
NewPhi->addIncoming(StartVal, LoopBypassBlocks[I]);
NewPhi->addIncoming(RdxExitVal[part],
LoopVectorBody.back());
@@ -2626,6 +2783,13 @@ void InnerLoopVectorizer::vectorizeLoop() {
Builder.getInt32(0));
}
+ // Create a phi node that merges control-flow from the backedge-taken check
+ // block and the middle block.
+ PHINode *BCBlockPhi = PHINode::Create(RdxPhi->getType(), 2, "bc.merge.rdx",
+ LoopScalarPreHeader->getTerminator());
+ BCBlockPhi->addIncoming(RdxDesc.StartValue, LoopBypassBlocks[0]);
+ BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
+
// Now, we need to fix the users of the reduction variable
// inside and outside of the scalar remainder loop.
// We know that the loop is in LCSSA form. We need to update the
@@ -2655,7 +2819,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
// Pick the other block.
int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
- (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, ReducedPartRdx);
+ (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
(RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, RdxDesc.LoopExitInstr);
}// end of for each redux variable.
@@ -3062,9 +3226,14 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
scalarizeInstruction(it);
break;
default:
+ bool HasScalarOpd = hasVectorInstrinsicScalarOpd(ID, 1);
for (unsigned Part = 0; Part < UF; ++Part) {
SmallVector<Value *, 4> Args;
for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
+ if (HasScalarOpd && i == 1) {
+ Args.push_back(CI->getArgOperand(i));
+ continue;
+ }
VectorParts &Arg = getVectorValue(CI->getArgOperand(i));
Args.push_back(Arg[Part]);
}
@@ -3112,8 +3281,8 @@ void InnerLoopVectorizer::updateAnalysis() {
}
}
- DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks.front());
- DT->addNewBlock(LoopScalarPreHeader, LoopMiddleBlock);
+ DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks[1]);
+ DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
@@ -3138,8 +3307,10 @@ static bool canIfConvertPHINodes(BasicBlock *BB) {
}
bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
- if (!EnableIfConversion)
+ if (!EnableIfConversion) {
+ emitAnalysis(Report() << "if-conversion is disabled");
return false;
+ }
assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
@@ -3169,16 +3340,24 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
BasicBlock *BB = *BI;
// We don't support switch statements inside loops.
- if (!isa<BranchInst>(BB->getTerminator()))
+ if (!isa<BranchInst>(BB->getTerminator())) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "loop contains a switch statement");
return false;
+ }
// We must be able to predicate all blocks that need to be predicated.
if (blockNeedsPredication(BB)) {
- if (!blockCanBePredicated(BB, SafePointes))
+ if (!blockCanBePredicated(BB, SafePointes)) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "control flow cannot be substituted for a select");
return false;
- } else if (BB != Header && !canIfConvertPHINodes(BB))
+ }
+ } else if (BB != Header && !canIfConvertPHINodes(BB)) {
+ emitAnalysis(Report(BB->getTerminator())
+ << "control flow cannot be substituted for a select");
return false;
-
+ }
}
// We can if-convert this loop.
@@ -3188,20 +3367,31 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
bool LoopVectorizationLegality::canVectorize() {
// We must have a loop in canonical form. Loops with indirectbr in them cannot
// be canonicalized.
- if (!TheLoop->getLoopPreheader())
+ if (!TheLoop->getLoopPreheader()) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We can only vectorize innermost loops.
- if (TheLoop->getSubLoopsVector().size())
+ if (TheLoop->getSubLoopsVector().size()) {
+ emitAnalysis(Report() << "loop is not the innermost loop");
return false;
+ }
// We must have a single backedge.
- if (TheLoop->getNumBackEdges() != 1)
+ if (TheLoop->getNumBackEdges() != 1) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We must have a single exiting block.
- if (!TheLoop->getExitingBlock())
+ if (!TheLoop->getExitingBlock()) {
+ emitAnalysis(
+ Report() << "loop control flow is not understood by vectorizer");
return false;
+ }
// We need to have a loop header.
DEBUG(dbgs() << "LV: Found a loop: " <<
@@ -3217,6 +3407,7 @@ bool LoopVectorizationLegality::canVectorize() {
// ScalarEvolution needs to be able to find the exit count.
const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
if (ExitCount == SE->getCouldNotCompute()) {
+ emitAnalysis(Report() << "could not determine number of loop iterations");
DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
return false;
}
@@ -3310,6 +3501,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (!PhiTy->isIntegerTy() &&
!PhiTy->isFloatingPointTy() &&
!PhiTy->isPointerTy()) {
+ emitAnalysis(Report(it)
+ << "loop control flow is not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
return false;
}
@@ -3320,13 +3513,17 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (*bb != Header) {
// Check that this instruction has no outside users or is an
// identified reduction value with an outside user.
- if(!hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (!hasOutsideLoopUser(TheLoop, it, AllowedExit))
continue;
+ emitAnalysis(Report(it) << "value that could not be identified as "
+ "reduction is used outside the loop");
return false;
}
// We only allow if-converted PHIs with more than two incoming values.
if (Phi->getNumIncomingValues() != 2) {
+ emitAnalysis(Report(it)
+ << "control flow not understood by vectorizer");
DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
return false;
}
@@ -3357,8 +3554,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Until we explicitly handle the case of an induction variable with
// an outside loop user we have to give up vectorizing this loop.
- if (hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (hasOutsideLoopUser(TheLoop, it, AllowedExit)) {
+ emitAnalysis(Report(it) << "use of induction value outside of the "
+ "loop is not handled by vectorizer");
return false;
+ }
continue;
}
@@ -3401,6 +3601,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
continue;
}
+ emitAnalysis(Report(it) << "unvectorizable operation");
DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n");
return false;
}// end of PHI handling
@@ -3409,14 +3610,29 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// calls and we do handle certain intrinsic and libm functions.
CallInst *CI = dyn_cast<CallInst>(it);
if (CI && !getIntrinsicIDForCall(CI, TLI) && !isa<DbgInfoIntrinsic>(CI)) {
+ emitAnalysis(Report(it) << "call instruction cannot be vectorized");
DEBUG(dbgs() << "LV: Found a call site.\n");
return false;
}
+ // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
+ // second argument is the same (i.e. loop invariant)
+ if (CI &&
+ hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) {
+ if (!SE->isLoopInvariant(SE->getSCEV(CI->getOperand(1)), TheLoop)) {
+ emitAnalysis(Report(it)
+ << "intrinsic instruction cannot be vectorized");
+ DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
+ return false;
+ }
+ }
+
// Check that the instruction return type is vectorizable.
// Also, we can't vectorize extractelement instructions.
if ((!VectorType::isValidElementType(it->getType()) &&
!it->getType()->isVoidTy()) || isa<ExtractElementInst>(it)) {
+ emitAnalysis(Report(it)
+ << "instruction return type cannot be vectorized");
DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
return false;
}
@@ -3424,8 +3640,10 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Check that the stored type is vectorizable.
if (StoreInst *ST = dyn_cast<StoreInst>(it)) {
Type *T = ST->getValueOperand()->getType();
- if (!VectorType::isValidElementType(T))
+ if (!VectorType::isValidElementType(T)) {
+ emitAnalysis(Report(ST) << "store instruction cannot be vectorized");
return false;
+ }
if (EnableMemAccessVersioning)
collectStridedAcccess(ST);
}
@@ -3436,8 +3654,10 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// Reduction instructions are allowed to have exit users.
// All other instructions must not have external users.
- if (hasOutsideLoopUser(TheLoop, it, AllowedExit))
+ if (hasOutsideLoopUser(TheLoop, it, AllowedExit)) {
+ emitAnalysis(Report(it) << "value cannot be used outside the loop");
return false;
+ }
} // next instr.
@@ -3445,8 +3665,11 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (!Induction) {
DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
- if (Inductions.empty())
+ if (Inductions.empty()) {
+ emitAnalysis(Report()
+ << "loop induction variable could not be identified");
return false;
+ }
}
return true;
@@ -4353,8 +4576,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
continue;
LoadInst *Ld = dyn_cast<LoadInst>(it);
- if (!Ld) return false;
- if (!Ld->isSimple() && !IsAnnotatedParallel) {
+ if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
+ emitAnalysis(Report(Ld)
+ << "read with atomic ordering or volatile read");
DEBUG(dbgs() << "LV: Found a non-simple load.\n");
return false;
}
@@ -4367,8 +4591,13 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
// Save 'store' instructions. Abort if other instructions write to memory.
if (it->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(it);
- if (!St) return false;
+ if (!St) {
+ emitAnalysis(Report(it) << "instruction cannot be vectorized");
+ return false;
+ }
if (!St->isSimple() && !IsAnnotatedParallel) {
+ emitAnalysis(Report(St)
+ << "write with atomic ordering or volatile write");
DEBUG(dbgs() << "LV: Found a non-simple store.\n");
return false;
}
@@ -4405,6 +4634,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
Value* Ptr = ST->getPointerOperand();
if (isUniform(Ptr)) {
+ emitAnalysis(
+ Report(ST)
+ << "write to a loop invariant address could not be vectorized");
DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
return false;
}
@@ -4483,6 +4715,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
if (NeedRTCheck && !CanDoRT) {
+ emitAnalysis(Report() << "cannot identify array bounds");
DEBUG(dbgs() << "LV: We can't vectorize because we can't find " <<
"the array bounds.\n");
PtrRtCheck.reset();
@@ -4513,6 +4746,14 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
// Check that we did not collect too many pointers or found an unsizeable
// pointer.
if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) {
+ if (!CanDoRT && NumComparisons > 0)
+ emitAnalysis(Report()
+ << "cannot check memory dependencies at runtime");
+ else
+ emitAnalysis(Report()
+ << NumComparisons << " exceeds limit of "
+ << RuntimeMemoryCheckThreshold
+ << " dependent memory operations checked at runtime");
DEBUG(dbgs() << "LV: Can't vectorize with memory checks\n");
PtrRtCheck.reset();
return false;
@@ -4522,6 +4763,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
}
}
+ if (!CanVecMem)
+ emitAnalysis(Report() << "unsafe dependent memory operations in loop");
+
DEBUG(dbgs() << "LV: We" << (NeedRTCheck ? "" : " don't") <<
" need a runtime memory check.\n");
@@ -5774,4 +6018,3 @@ Value *InnerLoopUnroller::getConsecutiveVector(Value* Val, int StartIdx,
Constant *C = ConstantInt::get(ITy, StartIdx, Negate);
return Builder.CreateAdd(Val, C, "induction");
}
-
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index e13ba95..53a43d9 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -149,6 +149,48 @@ static bool isSplat(ArrayRef<Value *> VL) {
return true;
}
+///\returns Opcode that can be clubbed with \p Op to create an alternate
+/// sequence which can later be merged as a ShuffleVector instruction.
+static unsigned getAltOpcode(unsigned Op) {
+ switch (Op) {
+ case Instruction::FAdd:
+ return Instruction::FSub;
+ case Instruction::FSub:
+ return Instruction::FAdd;
+ case Instruction::Add:
+ return Instruction::Sub;
+ case Instruction::Sub:
+ return Instruction::Add;
+ default:
+ return 0;
+ }
+}
+
+///\returns bool representing if Opcode \p Op can be part
+/// of an alternate sequence which can later be merged as
+/// a ShuffleVector instruction.
+static bool canCombineAsAltInst(unsigned Op) {
+ if (Op == Instruction::FAdd || Op == Instruction::FSub ||
+ Op == Instruction::Sub || Op == Instruction::Add)
+ return true;
+ return false;
+}
+
+/// \returns ShuffleVector instruction if intructions in \p VL have
+/// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
+/// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
+static unsigned isAltInst(ArrayRef<Value *> VL) {
+ Instruction *I0 = dyn_cast<Instruction>(VL[0]);
+ unsigned Opcode = I0->getOpcode();
+ unsigned AltOpcode = getAltOpcode(Opcode);
+ for (int i = 1, e = VL.size(); i < e; i++) {
+ Instruction *I = dyn_cast<Instruction>(VL[i]);
+ if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
+ return 0;
+ }
+ return Instruction::ShuffleVector;
+}
+
/// \returns The opcode if all of the Instructions in \p VL have the same
/// opcode, or zero.
static unsigned getSameOpcode(ArrayRef<Value *> VL) {
@@ -158,8 +200,11 @@ static unsigned getSameOpcode(ArrayRef<Value *> VL) {
unsigned Opcode = I0->getOpcode();
for (int i = 1, e = VL.size(); i < e; i++) {
Instruction *I = dyn_cast<Instruction>(VL[i]);
- if (!I || Opcode != I->getOpcode())
+ if (!I || Opcode != I->getOpcode()) {
+ if (canCombineAsAltInst(Opcode) && i == 1)
+ return isAltInst(VL);
return 0;
+ }
}
return Opcode;
}
@@ -377,6 +422,7 @@ public:
/// \brief Perform LICM and CSE on the newly generated gather sequences.
void optimizeGatherSequence();
+
private:
struct TreeEntry;
@@ -594,6 +640,7 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
bool SameTy = getSameType(VL); (void)SameTy;
+ bool isAltShuffle = false;
assert(SameTy && "Invalid types!");
if (Depth == RecursionMaxDepth) {
@@ -615,10 +662,19 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
newTreeEntry(VL, false);
return;
}
+ unsigned Opcode = getSameOpcode(VL);
+
+ // Check that this shuffle vector refers to the alternate
+ // sequence of opcodes.
+ if (Opcode == Instruction::ShuffleVector) {
+ Instruction *I0 = dyn_cast<Instruction>(VL[0]);
+ unsigned Op = I0->getOpcode();
+ if (Op != Instruction::ShuffleVector)
+ isAltShuffle = true;
+ }
// If all of the operands are identical or constant we have a simple solution.
- if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) ||
- !getSameOpcode(VL)) {
+ if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) {
DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
newTreeEntry(VL, false);
return;
@@ -754,8 +810,6 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
- unsigned Opcode = getSameOpcode(VL);
-
// Check if it is safe to sink the loads or the stores.
if (Opcode == Instruction::Load || Opcode == Instruction::Store) {
Instruction *Last = getLastInstruction(VL);
@@ -914,8 +968,20 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
ValueList Left, Right;
reorderInputsAccordingToOpcode(VL, Left, Right);
- buildTree_rec(Left, Depth + 1);
- buildTree_rec(Right, Depth + 1);
+ BasicBlock *LeftBB = getSameBlock(Left);
+ BasicBlock *RightBB = getSameBlock(Right);
+ // If we have common uses on separate paths in the tree make sure we
+ // process the one with greater common depth first.
+ // We can use block numbering to determine the subtree traversal as
+ // earler user has to come in between the common use and the later user.
+ if (LeftBB && RightBB && LeftBB == RightBB &&
+ getLastIndex(Right) > getLastIndex(Left)) {
+ buildTree_rec(Right, Depth + 1);
+ buildTree_rec(Left, Depth + 1);
+ } else {
+ buildTree_rec(Left, Depth + 1);
+ buildTree_rec(Right, Depth + 1);
+ }
return;
}
@@ -929,6 +995,51 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
}
return;
}
+ case Instruction::GetElementPtr: {
+ // We don't combine GEPs with complicated (nested) indexing.
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
+ DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ // We can't combine several GEPs into one vector if they operate on
+ // different types.
+ Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
+ if (Ty0 != CurTy) {
+ DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ // We don't combine GEPs with non-constant indexes.
+ for (unsigned j = 0; j < VL.size(); ++j) {
+ auto Op = cast<Instruction>(VL[j])->getOperand(1);
+ if (!isa<ConstantInt>(Op)) {
+ DEBUG(
+ dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
+ newTreeEntry(VL, false);
+ return;
+ }
+ }
+
+ newTreeEntry(VL, true);
+ DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
+ for (unsigned i = 0, e = 2; i < e; ++i) {
+ ValueList Operands;
+ // Prepare the operand vector.
+ for (unsigned j = 0; j < VL.size(); ++j)
+ Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
+
+ buildTree_rec(Operands, Depth + 1);
+ }
+ return;
+ }
case Instruction::Store: {
// Check if the stores are consecutive or of we need to swizzle them.
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
@@ -961,9 +1072,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
return;
}
-
Function *Int = CI->getCalledFunction();
-
+ Value *A1I = nullptr;
+ if (hasVectorInstrinsicScalarOpd(ID, 1))
+ A1I = CI->getArgOperand(1);
for (unsigned i = 1, e = VL.size(); i != e; ++i) {
CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
if (!CI2 || CI2->getCalledFunction() != Int ||
@@ -973,6 +1085,18 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
<< "\n");
return;
}
+ // ctlz,cttz and powi are special intrinsics whose second argument
+ // should be same in order for them to be vectorized.
+ if (hasVectorInstrinsicScalarOpd(ID, 1)) {
+ Value *A1J = CI2->getArgOperand(1);
+ if (A1I != A1J) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
+ << " argument "<< A1I<<"!=" << A1J
+ << "\n");
+ return;
+ }
+ }
}
newTreeEntry(VL, true);
@@ -987,6 +1111,26 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
}
return;
}
+ case Instruction::ShuffleVector: {
+ // If this is not an alternate sequence of opcode like add-sub
+ // then do not vectorize this instruction.
+ if (!isAltShuffle) {
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
+ return;
+ }
+ newTreeEntry(VL, true);
+ DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
+ for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
+ ValueList Operands;
+ // Prepare the operand vector.
+ for (unsigned j = 0; j < VL.size(); ++j)
+ Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
+
+ buildTree_rec(Operands, Depth + 1);
+ }
+ return;
+ }
default:
newTreeEntry(VL, false);
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
@@ -1010,11 +1154,9 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
return getGatherCost(E->Scalars);
}
-
- assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) &&
- "Invalid VL");
+ unsigned Opcode = getSameOpcode(VL);
+ assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL");
Instruction *VL0 = cast<Instruction>(VL[0]);
- unsigned Opcode = VL0->getOpcode();
switch (Opcode) {
case Instruction::PHI: {
return 0;
@@ -1121,6 +1263,20 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
return VecCost - ScalarCost;
}
+ case Instruction::GetElementPtr: {
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_UniformConstantValue;
+
+ int ScalarCost =
+ VecTy->getNumElements() *
+ TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
+ int VecCost =
+ TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
+
+ return VecCost - ScalarCost;
+ }
case Instruction::Load: {
// Cost of wide load - cost of scalar loads.
int ScalarLdCost = VecTy->getNumElements() *
@@ -1158,6 +1314,32 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
return VecCallCost - ScalarCallCost;
}
+ case Instruction::ShuffleVector: {
+ TargetTransformInfo::OperandValueKind Op1VK =
+ TargetTransformInfo::OK_AnyValue;
+ TargetTransformInfo::OperandValueKind Op2VK =
+ TargetTransformInfo::OK_AnyValue;
+ int ScalarCost = 0;
+ int VecCost = 0;
+ for (unsigned i = 0; i < VL.size(); ++i) {
+ Instruction *I = cast<Instruction>(VL[i]);
+ if (!I)
+ break;
+ ScalarCost +=
+ TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
+ }
+ // VecCost is equal to sum of the cost of creating 2 vectors
+ // and the cost of creating shuffle.
+ Instruction *I0 = cast<Instruction>(VL[0]);
+ VecCost =
+ TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
+ Instruction *I1 = cast<Instruction>(VL[1]);
+ VecCost +=
+ TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
+ VecCost +=
+ TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
+ return VecCost - ScalarCost;
+ }
default:
llvm_unreachable("Unknown instruction");
}
@@ -1438,9 +1620,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars);
return Gather(E->Scalars, VecTy);
}
-
- unsigned Opcode = VL0->getOpcode();
- assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode");
+ unsigned Opcode = getSameOpcode(E->Scalars);
switch (Opcode) {
case Instruction::PHI: {
@@ -1649,12 +1829,52 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
E->VectorizedValue = S;
return propagateMetadata(S, E->Scalars);
}
+ case Instruction::GetElementPtr: {
+ setInsertPointAfterBundle(E->Scalars);
+
+ ValueList Op0VL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i)
+ Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0));
+
+ Value *Op0 = vectorizeTree(Op0VL);
+
+ std::vector<Value *> OpVecs;
+ for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
+ ++j) {
+ ValueList OpVL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i)
+ OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j));
+
+ Value *OpVec = vectorizeTree(OpVL);
+ OpVecs.push_back(OpVec);
+ }
+
+ Value *V = Builder.CreateGEP(Op0, OpVecs);
+ E->VectorizedValue = V;
+
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return propagateMetadata(I, E->Scalars);
+
+ return V;
+ }
case Instruction::Call: {
CallInst *CI = cast<CallInst>(VL0);
setInsertPointAfterBundle(E->Scalars);
+ Function *FI;
+ Intrinsic::ID IID = Intrinsic::not_intrinsic;
+ if (CI && (FI = CI->getCalledFunction())) {
+ IID = (Intrinsic::ID) FI->getIntrinsicID();
+ }
std::vector<Value *> OpVecs;
for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
ValueList OpVL;
+ // ctlz,cttz and powi are special intrinsics whose second argument is
+ // a scalar. This argument should not be vectorized.
+ if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
+ CallInst *CEI = cast<CallInst>(E->Scalars[0]);
+ OpVecs.push_back(CEI->getArgOperand(j));
+ continue;
+ }
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
CallInst *CEI = cast<CallInst>(E->Scalars[i]);
OpVL.push_back(CEI->getArgOperand(j));
@@ -1673,6 +1893,49 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
E->VectorizedValue = V;
return V;
}
+ case Instruction::ShuffleVector: {
+ ValueList LHSVL, RHSVL;
+ for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
+ LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
+ RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
+ }
+ setInsertPointAfterBundle(E->Scalars);
+
+ Value *LHS = vectorizeTree(LHSVL);
+ Value *RHS = vectorizeTree(RHSVL);
+
+ if (Value *V = alreadyVectorized(E->Scalars))
+ return V;
+
+ // Create a vector of LHS op1 RHS
+ BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
+ Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
+
+ // Create a vector of LHS op2 RHS
+ Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
+ BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
+ Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
+
+ // Create appropriate shuffle to take alternative operations from
+ // the vector.
+ std::vector<Constant *> Mask(E->Scalars.size());
+ unsigned e = E->Scalars.size();
+ for (unsigned i = 0; i < e; ++i) {
+ if (i & 1)
+ Mask[i] = Builder.getInt32(e + i);
+ else
+ Mask[i] = Builder.getInt32(i);
+ }
+
+ Value *ShuffleMask = ConstantVector::get(Mask);
+
+ Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
+ E->VectorizedValue = V;
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ return propagateMetadata(I, E->Scalars);
+
+ return V;
+ }
default:
llvm_unreachable("unknown inst");
}
@@ -1741,7 +2004,6 @@ Value *BoUpSLP::vectorizeTree() {
// For each lane:
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
Value *Scalar = Entry->Scalars[Lane];
-
// No need to handle users of gathered values.
if (Entry->NeedToGather)
continue;
@@ -1925,7 +2187,6 @@ struct SLPVectorizer : public FunctionPass {
for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()),
e = po_end(&F.getEntryBlock()); it != e; ++it) {
BasicBlock *BB = *it;
-
// Vectorize trees that end at stores.
if (unsigned count = collectStores(BB, R)) {
(void)count;