aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/Transforms/Utils
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Transforms/Utils')
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h158
-rw-r--r--include/llvm/Transforms/Utils/BuildLibCalls.h14
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h67
-rw-r--r--include/llvm/Transforms/Utils/Local.h23
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h69
-rw-r--r--include/llvm/Transforms/Utils/PromoteMemToReg.h4
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdater.h4
-rw-r--r--include/llvm/Transforms/Utils/SimplifyLibCalls.h70
-rw-r--r--include/llvm/Transforms/Utils/SymbolRewriter.h4
-rw-r--r--include/llvm/Transforms/Utils/UnrollLoop.h9
-rw-r--r--include/llvm/Transforms/Utils/ValueMapper.h21
-rw-r--r--include/llvm/Transforms/Utils/VectorUtils.h6
12 files changed, 321 insertions, 128 deletions
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 19acf5b..710db03 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -23,10 +23,11 @@
namespace llvm {
class AliasAnalysis;
+class MemoryDependenceAnalysis;
class DominatorTree;
+class LoopInfo;
class Instruction;
class MDNode;
-class Pass;
class ReturnInst;
class TargetLibraryInfo;
class TerminatorInst;
@@ -39,7 +40,8 @@ void DeleteDeadBlock(BasicBlock *BB);
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
-void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = nullptr);
+void FoldSingleEntryPHINodes(BasicBlock *BB, AliasAnalysis *AA = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
/// is dead. Also recursively delete any operands that become dead as
@@ -50,7 +52,10 @@ bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
-bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = nullptr);
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ AliasAnalysis *AA = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
// with a value, then remove and delete the original instruction.
@@ -70,18 +75,62 @@ void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
//
void ReplaceInstWithInst(Instruction *From, Instruction *To);
+/// \brief Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ LoopInfo *LI;
+ bool MergeIdenticalEdges;
+ bool DontDeleteUselessPHIs;
+ bool PreserveLCSSA;
+
+ CriticalEdgeSplittingOptions()
+ : AA(nullptr), DT(nullptr), LI(nullptr), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ /// \brief Basic case of setting up all the analysis.
+ CriticalEdgeSplittingOptions(AliasAnalysis *AA, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr)
+ : AA(AA), DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ /// \brief A common pattern is to preserve the dominator tree and loop
+ /// info but not care about AA.
+ CriticalEdgeSplittingOptions(DominatorTree *DT, LoopInfo *LI)
+ : AA(nullptr), DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+ MergeIdenticalEdges = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+ DontDeleteUselessPHIs = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+ PreserveLCSSA = true;
+ return *this;
+ }
+};
+
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
-/// split the critical edge. This will update DominatorTree and
-/// DominatorFrontier information if it is available, thus calling this pass
-/// will not invalidate either of them. This returns the new block if the edge
-/// was split, null otherwise.
+/// split the critical edge. This will update the analyses passed in through
+/// the option struct. This returns the new block if the edge was split, null
+/// otherwise.
///
-/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
-/// specified successor will be merged into the same critical edge block.
-/// This is most commonly interesting with switch instructions, which may
-/// have many edges to any one destination. This ensures that all edges to that
-/// dest go to one block instead of each going to a different block, but isn't
-/// the standard definition of a "critical edge".
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination. This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
///
/// It is invalid to call this function on a critical edge that starts at an
/// IndirectBrInst. Splitting these edges will almost always create an invalid
@@ -89,14 +138,15 @@ void ReplaceInstWithInst(Instruction *From, Instruction *To);
/// to.
///
BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
- Pass *P = nullptr,
- bool MergeIdenticalEdges = false,
- bool DontDeleteUselessPHIs = false,
- bool SplitLandingPads = false);
-
-inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
- Pass *P = nullptr) {
- return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), P);
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+ Options);
}
/// SplitCriticalEdge - If the edge from *PI to BB is not critical, return
@@ -105,55 +155,62 @@ inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
/// function. If P is specified, it updates the analyses
/// described above.
inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
- Pass *P = nullptr) {
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
bool MadeChange = false;
TerminatorInst *TI = (*PI)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
if (TI->getSuccessor(i) == Succ)
- MadeChange |= !!SplitCriticalEdge(TI, i, P);
+ MadeChange |= !!SplitCriticalEdge(TI, i, Options);
return MadeChange;
}
/// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge
/// and return true, otherwise return false. This method requires that there be
-/// an edge between the two blocks. If P is specified, it updates the analyses
-/// described above.
-inline BasicBlock *SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
- Pass *P = nullptr,
- bool MergeIdenticalEdges = false,
- bool DontDeleteUselessPHIs = false) {
+/// an edge between the two blocks. It updates the analyses
+/// passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
TerminatorInst *TI = Src->getTerminator();
unsigned i = 0;
while (1) {
assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
if (TI->getSuccessor(i) == Dst)
- return SplitCriticalEdge(TI, i, P, MergeIdenticalEdges,
- DontDeleteUselessPHIs);
+ return SplitCriticalEdge(TI, i, Options);
++i;
}
}
// SplitAllCriticalEdges - Loop over all of the edges in the CFG,
-// breaking critical edges as they are found. Pass P must not be NULL.
+// breaking critical edges as they are found.
// Returns the number of broken edges.
-unsigned SplitAllCriticalEdges(Function &F, Pass *P);
+unsigned SplitAllCriticalEdges(Function &F,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
-/// SplitEdge - Split the edge connecting specified block. Pass P must
-/// not be NULL.
-BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To, Pass *P);
+/// SplitEdge - Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
/// SplitBlock - Split the specified block at the specified instruction - every
/// thing before SplitPt stays in Old and everything starting with SplitPt moves
/// to a new block. The two blocks are joined by an unconditional branch and
/// the loop info is updated.
///
-BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
-/// SplitBlockPredecessors - This method transforms BB by introducing a new
-/// basic block into the function, and moving some of the predecessors of BB to
-/// be predecessors of the new block. The new predecessors are indicated by the
-/// Preds array, which has NumPreds elements in it. The new block is given a
-/// suffix of 'Suffix'. This function returns the new block.
+/// SplitBlockPredecessors - This method introduces at least one new basic block
+/// into the function and moves some of the predecessors of BB to be
+/// predecessors of the new block. The new predecessors are indicated by the
+/// Preds array. The new block is given a suffix of 'Suffix'. Returns new basic
+/// block to which predecessors from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses.
@@ -161,8 +218,12 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
/// complicated to handle the case where one of the edges being split
/// is an exit of a loop with other exits).
///
-BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
- const char *Suffix, Pass *P = nullptr);
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix,
+ AliasAnalysis *AA = nullptr,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
@@ -177,9 +238,14 @@ BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
/// case where one of the edges being split is an exit of a loop with other
/// exits).
///
-void SplitLandingPadPredecessors(BasicBlock *OrigBB,ArrayRef<BasicBlock*> Preds,
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+ ArrayRef<BasicBlock *> Preds,
const char *Suffix, const char *Suffix2,
- Pass *P, SmallVectorImpl<BasicBlock*> &NewBBs);
+ SmallVectorImpl<BasicBlock *> &NewBBs,
+ AliasAnalysis *AA = nullptr,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
/// FoldReturnIntoUncondBranch - This method duplicates the specified return
/// instruction into a predecessor which ends in an unconditional branch. If
diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h
index 1e407fb..6387c16 100644
--- a/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -115,20 +115,6 @@ namespace llvm {
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
-
- /// SimplifyFortifiedLibCalls - Helper class for folding checked library
- /// calls (e.g. __strcpy_chk) into their unchecked counterparts.
- class SimplifyFortifiedLibCalls {
- protected:
- CallInst *CI;
- virtual void replaceCall(Value *With) = 0;
- virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
- bool isString) const = 0;
-
- public:
- virtual ~SimplifyFortifiedLibCalls();
- bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
- };
}
#endif
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index 740d725..d1563ef 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -44,7 +44,7 @@ class Loop;
class LoopInfo;
class AllocaInst;
class AliasAnalysis;
-class AssumptionTracker;
+class AssumptionCacheTracker;
/// CloneModule - Return an exact copy of the specified module
///
@@ -95,8 +95,7 @@ struct ClonedCodeInfo {
/// function, you can specify a ClonedCodeInfo object with the optional fifth
/// parameter.
///
-BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- ValueToValueMapTy &VMap,
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
const Twine &NameSuffix = "", Function *F = nullptr,
ClonedCodeInfo *CodeInfo = nullptr);
@@ -112,8 +111,7 @@ BasicBlock *CloneBasicBlock(const BasicBlock *BB,
/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
/// mappings, and debug info metadata will not be cloned.
///
-Function *CloneFunction(const Function *F,
- ValueToValueMapTy &VMap,
+Function *CloneFunction(const Function *F, ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = nullptr);
@@ -127,14 +125,49 @@ Function *CloneFunction(const Function *F,
/// mappings.
///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueToValueMapTy &VMap,
- bool ModuleLevelChanges,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
+/// A helper class used with CloneAndPruneIntoFromInst to change the default
+/// behavior while instructions are being cloned.
+class CloningDirector {
+public:
+ /// This enumeration describes the way CloneAndPruneIntoFromInst should
+ /// proceed after the CloningDirector has examined an instruction.
+ enum CloningAction {
+ ///< Continue cloning the instruction (default behavior).
+ CloneInstruction,
+ ///< Skip this instruction but continue cloning the current basic block.
+ SkipInstruction,
+ ///< Skip this instruction and stop cloning the current basic block.
+ StopCloningBB
+ };
+
+ virtual ~CloningDirector() {}
+
+ /// Subclasses must override this function to customize cloning behavior.
+ virtual CloningAction handleInstruction(ValueToValueMapTy &VMap,
+ const Instruction *Inst,
+ BasicBlock *NewBB) = 0;
+
+ virtual ValueMapTypeRemapper *getTypeRemapper() { return nullptr; }
+ virtual ValueMaterializer *getValueMaterializer() { return nullptr; }
+};
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+ const Instruction *StartingInst,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ const DataLayout *DL = nullptr,
+ CloningDirector *Director = nullptr);
+
+
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly. The
/// effect of this is to copy significantly less code in cases where (for
@@ -147,8 +180,7 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// mappings.
///
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueToValueMapTy &VMap,
- bool ModuleLevelChanges,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
@@ -162,19 +194,19 @@ public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
const DataLayout *DL = nullptr,
AliasAnalysis *AA = nullptr,
- AssumptionTracker *AT = nullptr)
- : CG(cg), DL(DL), AA(AA), AT(AT) {}
+ AssumptionCacheTracker *ACT = nullptr)
+ : CG(cg), DL(DL), AA(AA), ACT(ACT) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
const DataLayout *DL;
AliasAnalysis *AA;
- AssumptionTracker *AT;
+ AssumptionCacheTracker *ACT;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
- SmallVector<AllocaInst*, 4> StaticAllocas;
+ SmallVector<AllocaInst *, 4> StaticAllocas;
/// InlinedCalls - InlineFunction fills this in with callsites that were
/// inlined from the callee. This is only filled in if CG is non-null.
@@ -196,9 +228,12 @@ public:
/// exists in the instruction stream. Similarly this will inline a recursive
/// function by one level.
///
-bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime = true);
-bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, bool InsertLifetime = true);
-bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI, bool InsertLifetime = true);
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index e89e5e5..463ab96 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -31,10 +31,9 @@ class DbgDeclareInst;
class StoreInst;
class LoadInst;
class Value;
-class Pass;
class PHINode;
class AllocaInst;
-class AssumptionTracker;
+class AssumptionCache;
class ConstantExpr;
class DataLayout;
class TargetLibraryInfo;
@@ -115,7 +114,7 @@ void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
/// between them, moving the instructions in the predecessor into BB. This
/// deletes the predecessor block.
///
-void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = nullptr);
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
/// unconditional branch, and contains no instructions other than PHI nodes,
@@ -138,9 +137,8 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- unsigned BonusInstThreshold,
- const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr);
+ unsigned BonusInstThreshold, const DataLayout *TD = nullptr,
+ AssumptionCache *AC = nullptr);
/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
@@ -176,17 +174,17 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr,
+ AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V,
const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr,
+ AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr) {
- return getOrEnforceKnownAlignment(V, 0, TD, AT, CxtI, DT);
+ return getOrEnforceKnownAlignment(V, 0, TD, AC, CxtI, DT);
}
/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
@@ -276,10 +274,11 @@ bool LowerDbgDeclare(Function &F);
/// an alloca, if any.
DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
-/// replaceDbgDeclareForAlloca - Replaces llvm.dbg.declare instruction when
-/// alloca is replaced with a new value.
+/// \brief Replaces llvm.dbg.declare instruction when an alloca is replaced with
+/// a new value. If Deref is true, tan additional DW_OP_deref is prepended to
+/// the expression.
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
- DIBuilder &Builder);
+ DIBuilder &Builder, bool Deref);
/// \brief Remove all blocks that can not be reached from the function's entry.
///
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index fdae80d..bb80f20 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -14,16 +14,33 @@
#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Dominators.h"
+
namespace llvm {
class AliasAnalysis;
-class AssumptionTracker;
+class AliasSet;
+class AliasSetTracker;
+class AssumptionCache;
class BasicBlock;
class DataLayout;
class DominatorTree;
class Loop;
class LoopInfo;
class Pass;
+class PredIteratorCache;
class ScalarEvolution;
+class TargetLibraryInfo;
+
+/// \brief Captures loop safety information.
+/// It keep information for loop & its header may throw exception.
+struct LICMSafetyInfo {
+ bool MayThrow; // The current loop contains an instruction which
+ // may throw.
+ bool HeaderMayThrow; // Same as previous, but specific to loop header
+ LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false)
+ {}
+};
BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
@@ -36,7 +53,7 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr,
const DataLayout *DL = nullptr,
- AssumptionTracker *AT = nullptr);
+ AssumptionCache *AC = nullptr);
/// \brief Put loop into LCSSA form.
///
@@ -49,7 +66,8 @@ bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE = nullptr);
/// \brief Put a loop nest into LCSSA form.
///
@@ -60,8 +78,51 @@ bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSARecursively(Loop &L, DominatorTree &DT,
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
ScalarEvolution *SE = nullptr);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ const DataLayout *, TargetLibraryInfo *, Loop *,
+ AliasSetTracker *, LICMSafetyInfo *);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree. This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ const DataLayout *, TargetLibraryInfo *, Loop *,
+ AliasSetTracker *, LICMSafetyInfo *);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks
+/// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop,
+/// AliasSet information for all instructions of the loop and loop safety
+/// information as arguments. It returns changed status.
+bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock*> &,
+ SmallVectorImpl<Instruction*> &,
+ PredIteratorCache &, LoopInfo *,
+ DominatorTree *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Computes safety information for a loop
+/// checks loop body & header for the possiblity of may throw
+/// exception, it takes LICMSafetyInfo and loop as argument.
+/// Updates safety information in LICMSafetyInfo argument.
+void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *);
+
}
#endif
diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 3fdd5e9..d0602bf 100644
--- a/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -22,7 +22,7 @@ namespace llvm {
class AllocaInst;
class DominatorTree;
class AliasSetTracker;
-class AssumptionTracker;
+class AssumptionCache;
/// \brief Return true if this alloca is legal for promotion.
///
@@ -43,7 +43,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
/// made to the IR.
void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
AliasSetTracker *AST = nullptr,
- AssumptionTracker *AT = nullptr);
+ AssumptionCache *AC = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h
index 7874a5f..19e2a4a 100644
--- a/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -118,8 +118,8 @@ public:
private:
Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
- void operator=(const SSAUpdater&) LLVM_DELETED_FUNCTION;
- SSAUpdater(const SSAUpdater&) LLVM_DELETED_FUNCTION;
+ void operator=(const SSAUpdater&) = delete;
+ SSAUpdater(const SSAUpdater&) = delete;
};
/// \brief Helper class for promoting a collection of loads and stores into SSA
diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 6765ac1..08358e1 100644
--- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -16,6 +16,7 @@
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IRBuilder.h"
namespace llvm {
@@ -27,20 +28,67 @@ class TargetLibraryInfo;
class BasicBlock;
class Function;
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+ const DataLayout *DL;
+ const TargetLibraryInfo *TLI;
+ bool OnlyLowerUnknownSize;
+
+public:
+ FortifiedLibCallSimplifier(const DataLayout *DL, const TargetLibraryInfo *TLI,
+ bool OnlyLowerUnknownSize = false);
+
+ /// \brief Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+ // Str/Stp cpy are similar enough to be handled in the same functions.
+ Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+ Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+
+ /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+ /// to the non-fortified version.
+ bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+ unsigned SizeOp, bool isString);
+};
+
/// LibCallSimplifier - This class implements a collection of optimizations
/// that replace well formed calls to library functions with a more optimal
/// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
class LibCallSimplifier {
private:
+ FortifiedLibCallSimplifier FortifiedSimplifier;
const DataLayout *DL;
const TargetLibraryInfo *TLI;
bool UnsafeFPShrink;
+ function_ref<void(Instruction *, Value *)> Replacer;
+
+ /// \brief Internal wrapper for RAUW that is the default implementation.
+ ///
+ /// Other users may provide an alternate function with this signature instead
+ /// of this one.
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With);
-protected:
- ~LibCallSimplifier() {}
+ /// \brief Replace an instruction's uses with a value using our replacer.
+ void replaceAllUsesWith(Instruction *I, Value *With);
public:
- LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI);
+ LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault);
/// optimizeCall - Take the given call instruction and return a more
/// optimal value to replace the instruction with or 0 if a more
@@ -48,22 +96,10 @@ public:
/// be equal to the instruction being optimized. In this case all
/// other instructions that use the given instruction were modified
/// and the given instruction is dead.
+ /// The call must not be an indirect call.
Value *optimizeCall(CallInst *CI);
- /// replaceAllUsesWith - This method is used when the library call
- /// simplifier needs to replace instructions other than the library
- /// call being modified.
- virtual void replaceAllUsesWith(Instruction *I, Value *With) const;
-
private:
- // Fortified Library Call Optimizations
- Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStrCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStpCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStrNCpyChk(CallInst *CI, IRBuilder<> &B);
-
// String and Memory Library Call Optimizations
Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
@@ -84,6 +120,8 @@ private:
Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+ // Wrapper for all String/Memory Library Call Optimizations
+ Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
// Math Library Optimizations
Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B, bool CheckRetType);
diff --git a/include/llvm/Transforms/Utils/SymbolRewriter.h b/include/llvm/Transforms/Utils/SymbolRewriter.h
index af79372..48dea04 100644
--- a/include/llvm/Transforms/Utils/SymbolRewriter.h
+++ b/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -60,10 +60,10 @@ namespace SymbolRewriter {
/// select the symbols to rewrite. This descriptor list is passed to the
/// SymbolRewriter pass.
class RewriteDescriptor : public ilist_node<RewriteDescriptor> {
- RewriteDescriptor(const RewriteDescriptor &) LLVM_DELETED_FUNCTION;
+ RewriteDescriptor(const RewriteDescriptor &) = delete;
const RewriteDescriptor &
- operator=(const RewriteDescriptor &) LLVM_DELETED_FUNCTION;
+ operator=(const RewriteDescriptor &) = delete;
public:
enum class Type {
diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h
index 0b88d25..04d9ee1 100644
--- a/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -16,20 +16,25 @@
#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
-class AssumptionTracker;
+class AssumptionCache;
class Loop;
class LoopInfo;
class LPPassManager;
+class MDNode;
class Pass;
bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
unsigned TripMultiple, LoopInfo *LI, Pass *PP,
- LPPassManager *LPM, AssumptionTracker *AT);
+ LPPassManager *LPM, AssumptionCache *AC);
bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
LPPassManager* LPM);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
}
#endif
diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h
index 5774763..047ab81 100644
--- a/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/include/llvm/Transforms/Utils/ValueMapper.h
@@ -71,20 +71,23 @@ namespace llvm {
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
+ Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapMetadata - provide versions that preserve type safety for MDNodes.
+ MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
- /// MapValue - provide versions that preserve type safety for MDNode and
- /// Constants.
- inline MDNode *MapValue(const MDNode *V, ValueToValueMapTy &VM,
- RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = nullptr,
- ValueMaterializer *Materializer = nullptr) {
- return cast<MDNode>(MapValue((const Value*)V, VM, Flags, TypeMapper,
- Materializer));
- }
+ /// MapValue - provide versions that preserve type safety for Constants.
inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
ValueMapTypeRemapper *TypeMapper = nullptr,
diff --git a/include/llvm/Transforms/Utils/VectorUtils.h b/include/llvm/Transforms/Utils/VectorUtils.h
index 83871fc..9f0fb19 100644
--- a/include/llvm/Transforms/Utils/VectorUtils.h
+++ b/include/llvm/Transforms/Utils/VectorUtils.h
@@ -14,9 +14,9 @@
#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/IR/Intrinsics.h"
namespace llvm {
@@ -59,7 +59,7 @@ static inline bool isTriviallyVectorizable(Intrinsic::ID ID) {
}
}
-static bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
+static inline bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
switch (ID) {
case Intrinsic::ctlz: