aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Transforms')
-rw-r--r--include/llvm/Transforms/IPO.h4
-rw-r--r--include/llvm/Transforms/IPO/LowerBitSets.h153
-rw-r--r--include/llvm/Transforms/IPO/PassManagerBuilder.h21
-rw-r--r--include/llvm/Transforms/InstCombine/InstCombine.h46
-rw-r--r--include/llvm/Transforms/InstCombine/InstCombineWorklist.h116
-rw-r--r--include/llvm/Transforms/Instrumentation.h56
-rw-r--r--include/llvm/Transforms/Scalar.h37
-rw-r--r--include/llvm/Transforms/Scalar/EarlyCSE.h39
-rw-r--r--include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h40
-rw-r--r--include/llvm/Transforms/Scalar/SimplifyCFG.h46
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h158
-rw-r--r--include/llvm/Transforms/Utils/BuildLibCalls.h14
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h67
-rw-r--r--include/llvm/Transforms/Utils/Local.h23
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h69
-rw-r--r--include/llvm/Transforms/Utils/PromoteMemToReg.h4
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdater.h4
-rw-r--r--include/llvm/Transforms/Utils/SimplifyLibCalls.h70
-rw-r--r--include/llvm/Transforms/Utils/SymbolRewriter.h4
-rw-r--r--include/llvm/Transforms/Utils/UnrollLoop.h9
-rw-r--r--include/llvm/Transforms/Utils/ValueMapper.h21
-rw-r--r--include/llvm/Transforms/Utils/VectorUtils.h6
22 files changed, 830 insertions, 177 deletions
diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h
index ce1a7d6..fbd999c 100644
--- a/include/llvm/Transforms/IPO.h
+++ b/include/llvm/Transforms/IPO.h
@@ -199,6 +199,10 @@ ModulePass *createMetaRenamerPass();
/// manager.
ModulePass *createBarrierNoopPass();
+/// \brief This pass lowers bitset metadata and the llvm.bitset.test intrinsic
+/// to bitsets.
+ModulePass *createLowerBitSetsPass();
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/IPO/LowerBitSets.h b/include/llvm/Transforms/IPO/LowerBitSets.h
new file mode 100644
index 0000000..0f60617
--- /dev/null
+++ b/include/llvm/Transforms/IPO/LowerBitSets.h
@@ -0,0 +1,153 @@
+//===- LowerBitSets.h - Bitset lowering pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the bitset lowering pass implementation that may
+// be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+#define LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <stdint.h>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class DataLayout;
+class GlobalVariable;
+class Value;
+
+struct BitSetInfo {
+ // The actual bitset.
+ std::vector<uint8_t> Bits;
+
+ // The byte offset into the combined global represented by the bitset.
+ uint64_t ByteOffset;
+
+ // The size of the bitset in bits.
+ uint64_t BitSize;
+
+ // Log2 alignment of the bit set relative to the combined global.
+ // For example, a log2 alignment of 3 means that bits in the bitset
+ // represent addresses 8 bytes apart.
+ unsigned AlignLog2;
+
+ bool isSingleOffset() const {
+ return Bits.size() == 1 && Bits[0] == 1;
+ }
+
+ bool isAllOnes() const {
+ for (unsigned I = 0; I != Bits.size() - 1; ++I)
+ if (Bits[I] != 0xFF)
+ return false;
+
+ if (BitSize % 8 == 0)
+ return Bits[Bits.size() - 1] == 0xFF;
+
+ return Bits[Bits.size() - 1] == (1 << (BitSize % 8)) - 1;
+ }
+
+ bool containsGlobalOffset(uint64_t Offset) const;
+
+ bool containsValue(const DataLayout *DL,
+ const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout,
+ Value *V, uint64_t COffset = 0) const;
+
+};
+
+struct BitSetBuilder {
+ SmallVector<uint64_t, 16> Offsets;
+ uint64_t Min, Max;
+
+ BitSetBuilder() : Min(std::numeric_limits<uint64_t>::max()), Max(0) {}
+
+ void addOffset(uint64_t Offset) {
+ if (Min > Offset)
+ Min = Offset;
+ if (Max < Offset)
+ Max = Offset;
+
+ Offsets.push_back(Offset);
+ }
+
+ BitSetInfo build();
+};
+
+/// This class implements a layout algorithm for globals referenced by bit sets
+/// that tries to keep members of small bit sets together. This can
+/// significantly reduce bit set sizes in many cases.
+///
+/// It works by assembling fragments of layout from sets of referenced globals.
+/// Each set of referenced globals causes the algorithm to create a new
+/// fragment, which is assembled by appending each referenced global in the set
+/// into the fragment. If a referenced global has already been referenced by an
+/// fragment created earlier, we instead delete that fragment and append its
+/// contents into the fragment we are assembling.
+///
+/// By starting with the smallest fragments, we minimize the size of the
+/// fragments that are copied into larger fragments. This is most intuitively
+/// thought about when considering the case where the globals are virtual tables
+/// and the bit sets represent their derived classes: in a single inheritance
+/// hierarchy, the optimum layout would involve a depth-first search of the
+/// class hierarchy (and in fact the computed layout ends up looking a lot like
+/// a DFS), but a naive DFS would not work well in the presence of multiple
+/// inheritance. This aspect of the algorithm ends up fitting smaller
+/// hierarchies inside larger ones where that would be beneficial.
+///
+/// For example, consider this class hierarchy:
+///
+/// A B
+/// \ / | \
+/// C D E
+///
+/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
+/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
+/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
+/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
+/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
+///
+/// Add bsC, fragments {{C}}
+/// Add bsD, fragments {{C}, {D}}
+/// Add bsE, fragments {{C}, {D}, {E}}
+/// Add bsA, fragments {{A, C}, {D}, {E}}
+/// Add bsB, fragments {{B, A, C, D, E}}
+///
+/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
+/// fewer) objects, at the cost of bsB needing to cover 1 more object.
+///
+/// The bit set lowering pass assigns an object index to each object that needs
+/// to be laid out, and calls addFragment for each bit set passing the object
+/// indices of its referenced globals. It then assembles a layout from the
+/// computed layout in the Fragments field.
+struct GlobalLayoutBuilder {
+ /// The computed layout. Each element of this vector contains a fragment of
+ /// layout (which may be empty) consisting of object indices.
+ std::vector<std::vector<uint64_t>> Fragments;
+
+ /// Mapping from object index to fragment index.
+ std::vector<uint64_t> FragmentMap;
+
+ GlobalLayoutBuilder(uint64_t NumObjects)
+ : Fragments(1), FragmentMap(NumObjects) {}
+
+ /// Add F to the layout while trying to keep its indices contiguous.
+ /// If a previously seen fragment uses any of F's indices, that
+ /// fragment will be laid out inside F.
+ void addFragment(const std::set<uint64_t> &F);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/Transforms/IPO/PassManagerBuilder.h b/include/llvm/Transforms/IPO/PassManagerBuilder.h
index b1426b4..65f4712 100644
--- a/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -19,7 +19,7 @@
namespace llvm {
class Pass;
-class TargetLibraryInfo;
+class TargetLibraryInfoImpl;
class TargetMachine;
// The old pass manager infrastructure is hidden in a legacy namespace now.
@@ -27,8 +27,6 @@ namespace legacy {
class FunctionPassManager;
class PassManagerBase;
}
-using legacy::FunctionPassManager;
-using legacy::PassManagerBase;
/// PassManagerBuilder - This class is used to set up a standard optimization
/// sequence for languages like C and C++, allowing some APIs to customize the
@@ -59,7 +57,7 @@ public:
/// Extensions are passed the builder itself (so they can see how it is
/// configured) as well as the pass manager to add stuff to.
typedef void (*ExtensionFn)(const PassManagerBuilder &Builder,
- PassManagerBase &PM);
+ legacy::PassManagerBase &PM);
enum ExtensionPointTy {
/// EP_EarlyAsPossible - This extension point allows adding passes before
/// any other transformations, allowing them to see the code as it is coming
@@ -105,7 +103,7 @@ public:
/// LibraryInfo - Specifies information about the runtime library for the
/// optimizer. If this is non-null, it is added to both the function and
/// per-module pass pipeline.
- TargetLibraryInfo *LibraryInfo;
+ TargetLibraryInfoImpl *LibraryInfo;
/// Inliner - Specifies the inliner to use. If this is non-null, it is
/// added to the per-module passes.
@@ -139,19 +137,20 @@ public:
void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
private:
- void addExtensionsToPM(ExtensionPointTy ETy, PassManagerBase &PM) const;
- void addInitialAliasAnalysisPasses(PassManagerBase &PM) const;
- void addLTOOptimizationPasses(PassManagerBase &PM);
+ void addExtensionsToPM(ExtensionPointTy ETy,
+ legacy::PassManagerBase &PM) const;
+ void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
+ void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
public:
/// populateFunctionPassManager - This fills in the function pass manager,
/// which is expected to be run on each function immediately as it is
/// generated. The idea is to reduce the size of the IR in memory.
- void populateFunctionPassManager(FunctionPassManager &FPM);
+ void populateFunctionPassManager(legacy::FunctionPassManager &FPM);
/// populateModulePassManager - This sets up the primary pass manager.
- void populateModulePassManager(PassManagerBase &MPM);
- void populateLTOPassManager(PassManagerBase &PM, TargetMachine *TM = nullptr);
+ void populateModulePassManager(legacy::PassManagerBase &MPM);
+ void populateLTOPassManager(legacy::PassManagerBase &PM);
};
/// Registers a function for adding a standard set of passes. This should be
diff --git a/include/llvm/Transforms/InstCombine/InstCombine.h b/include/llvm/Transforms/InstCombine/InstCombine.h
new file mode 100644
index 0000000..f48ec13
--- /dev/null
+++ b/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -0,0 +1,46 @@
+//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the instcombine pass. This pass
+/// is suitable for use in the new pass manager. For a pass that works with the
+/// legacy pass manager, please look for \c createInstructionCombiningPass() in
+/// Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+
+namespace llvm {
+
+class InstCombinePass {
+ InstCombineWorklist Worklist;
+
+public:
+ static StringRef name() { return "InstCombinePass"; }
+
+ // Explicitly define constructors for MSVC.
+ InstCombinePass() {}
+ InstCombinePass(InstCombinePass &&Arg) : Worklist(std::move(Arg.Worklist)) {}
+ InstCombinePass &operator=(InstCombinePass &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ return *this;
+ }
+
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
new file mode 100644
index 0000000..a6bad34
--- /dev/null
+++ b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -0,0 +1,116 @@
+//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+/// InstCombineWorklist - This is the worklist management logic for
+/// InstCombine.
+class InstCombineWorklist {
+ SmallVector<Instruction*, 256> Worklist;
+ DenseMap<Instruction*, unsigned> WorklistMap;
+
+ void operator=(const InstCombineWorklist&RHS) = delete;
+ InstCombineWorklist(const InstCombineWorklist&) = delete;
+public:
+ InstCombineWorklist() {}
+
+ InstCombineWorklist(InstCombineWorklist &&Arg)
+ : Worklist(std::move(Arg.Worklist)),
+ WorklistMap(std::move(Arg.WorklistMap)) {}
+ InstCombineWorklist &operator=(InstCombineWorklist &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ WorklistMap = std::move(RHS.WorklistMap);
+ return *this;
+ }
+
+ bool isEmpty() const { return Worklist.empty(); }
+
+ /// Add - Add the specified instruction to the worklist if it isn't already
+ /// in it.
+ void Add(Instruction *I) {
+ if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
+ DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+ Worklist.push_back(I);
+ }
+ }
+
+ void AddValue(Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ Add(I);
+ }
+
+ /// AddInitialGroup - Add the specified batch of stuff in reverse order.
+ /// which should only be done when the worklist is empty and when the group
+ /// has no duplicates.
+ void AddInitialGroup(Instruction *const *List, unsigned NumEntries) {
+ assert(Worklist.empty() && "Worklist must be empty to add initial group");
+ Worklist.reserve(NumEntries+16);
+ WorklistMap.resize(NumEntries);
+ DEBUG(dbgs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n");
+ for (unsigned Idx = 0; NumEntries; --NumEntries) {
+ Instruction *I = List[NumEntries-1];
+ WorklistMap.insert(std::make_pair(I, Idx++));
+ Worklist.push_back(I);
+ }
+ }
+
+ // Remove - remove I from the worklist if it exists.
+ void Remove(Instruction *I) {
+ DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+ if (It == WorklistMap.end()) return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = nullptr;
+
+ WorklistMap.erase(It);
+ }
+
+ Instruction *RemoveOne() {
+ Instruction *I = Worklist.pop_back_val();
+ WorklistMap.erase(I);
+ return I;
+ }
+
+ /// AddUsersToWorkList - When an instruction is simplified, add all users of
+ /// the instruction to the work lists because they might get more simplified
+ /// now.
+ ///
+ void AddUsersToWorkList(Instruction &I) {
+ for (User *U : I.users())
+ Add(cast<Instruction>(U));
+ }
+
+
+ /// Zap - check that the worklist is empty and nuke the backing store for
+ /// the map if it is large.
+ void Zap() {
+ assert(WorklistMap.empty() && "Worklist empty, but map not?");
+
+ // Do an explicit clear, this shrinks the map if needed.
+ WorklistMap.clear();
+ }
+};
+
+} // end namespace llvm.
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index 87422df..8fac6ca 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -15,6 +15,7 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_H
#include "llvm/ADT/StringRef.h"
+#include <vector>
#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
inline void *getDFSanArgTLSPtrForJIT() {
@@ -63,6 +64,18 @@ struct GCOVOptions {
ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
GCOVOptions::getDefault());
+/// Options for the frontend instrumentation based profiling pass.
+struct InstrProfOptions {
+ InstrProfOptions() : NoRedZone(false) {}
+
+ // Add the 'noredzone' attribute to added runtime library calls.
+ bool NoRedZone;
+};
+
+/// Insert frontend instrumentation based profiling.
+ModulePass *createInstrProfilingPass(
+ const InstrProfOptions &Options = InstrProfOptions());
+
// Insert AddressSanitizer (address sanity checking) instrumentation
FunctionPass *createAddressSanitizerFunctionPass();
ModulePass *createAddressSanitizerModulePass();
@@ -74,17 +87,17 @@ FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0);
FunctionPass *createThreadSanitizerPass();
// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
-ModulePass *createDataFlowSanitizerPass(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = nullptr,
- void *(*getRetValTLS)() = nullptr);
+ModulePass *createDataFlowSanitizerPass(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+ void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
// Insert SanitizerCoverage instrumentation.
ModulePass *createSanitizerCoverageModulePass(int CoverageLevel);
#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
-inline ModulePass *createDataFlowSanitizerPassForJIT(StringRef ABIListFile =
- StringRef()) {
- return createDataFlowSanitizerPass(ABIListFile, getDFSanArgTLSPtrForJIT,
+inline ModulePass *createDataFlowSanitizerPassForJIT(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) {
+ return createDataFlowSanitizerPass(ABIListFiles, getDFSanArgTLSPtrForJIT,
getDFSanRetValTLSPtrForJIT);
}
#endif
@@ -93,37 +106,6 @@ inline ModulePass *createDataFlowSanitizerPassForJIT(StringRef ABIListFile =
// checking on loads, stores, and other memory intrinsics.
FunctionPass *createBoundsCheckingPass();
-/// createDebugIRPass - Enable interactive stepping through LLVM IR in LLDB (or
-/// GDB) and generate a file with the LLVM IR to be
-/// displayed in the debugger.
-///
-/// Existing debug metadata is preserved (but may be modified) in order to allow
-/// accessing variables in the original source. The line table and file
-/// information is modified to correspond to the lines in the LLVM IR. If
-/// Filename and Directory are empty, a file name is generated based on existing
-/// debug information. If no debug information is available, a temporary file
-/// name is generated.
-///
-/// @param HideDebugIntrinsics Omit debug intrinsics in emitted IR source file.
-/// @param HideDebugMetadata Omit debug metadata in emitted IR source file.
-/// @param Directory Embed this directory in the debug information.
-/// @param Filename Embed this file name in the debug information.
-ModulePass *createDebugIRPass(bool HideDebugIntrinsics,
- bool HideDebugMetadata,
- StringRef Directory = StringRef(),
- StringRef Filename = StringRef());
-
-/// createDebugIRPass - Enable interactive stepping through LLVM IR in LLDB
-/// (or GDB) with an existing IR file on disk. When creating
-/// a DebugIR pass with this function, no source file is
-/// output to disk and the existing one is unmodified. Debug
-/// metadata in the Module is created/updated to point to
-/// the existing textual IR file on disk.
-/// NOTE: If the IR file to be debugged is not on disk, use the version of this
-/// function with parameters in order to generate the file that will be
-/// seen by the debugger.
-ModulePass *createDebugIRPass();
-
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index 5dcd899..558b81e 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -21,6 +21,7 @@ namespace llvm {
class BasicBlockPass;
class FunctionPass;
+class ModulePass;
class Pass;
class GetElementPtrInst;
class PassInfo;
@@ -81,6 +82,13 @@ FunctionPass *createAggressiveDCEPass();
//===----------------------------------------------------------------------===//
//
+// BitTrackingDCE - This pass uses a bit-tracking DCE algorithm in order to
+// remove computations of dead bits.
+//
+FunctionPass *createBitTrackingDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
//
FunctionPass *createSROAPass(bool RequiresDomTree = true);
@@ -98,6 +106,13 @@ FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1,
//===----------------------------------------------------------------------===//
//
+// InductiveRangeCheckElimination - Transform loops to elide range checks on
+// linear functions of the induction variable.
+//
+Pass *createInductiveRangeCheckEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
// InductionVariableSimplify - Transform induction variables in a program to all
// use a single canonical induction variable per loop.
//
@@ -130,7 +145,7 @@ Pass *createLICMPass();
//
Pass *createLoopStrengthReducePass();
-Pass *createGlobalMergePass(const TargetMachine *TM = nullptr);
+Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset);
//===----------------------------------------------------------------------===//
//
@@ -405,6 +420,26 @@ createSeparateConstOffsetFromGEPPass(const TargetMachine *TM = nullptr,
//
BasicBlockPass *createLoadCombinePass();
+FunctionPass *createStraightLineStrengthReducePass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// PlaceSafepoints - Rewrite any IR calls to gc.statepoints and insert any
+// safepoint polls (method entry, backedge) that might be required. This pass
+// does not generate explicit relocation sequences - that's handled by
+// RewriteStatepointsForGC which can be run at an arbitrary point in the pass
+// order following this pass.
+//
+ModulePass *createPlaceSafepointsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// RewriteStatepointsForGC - Rewrite any gc.statepoints which do not yet have
+// explicit relocations to include explicit relocations.
+//
+FunctionPass *createRewriteStatepointsForGCPass();
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/Scalar/EarlyCSE.h b/include/llvm/Transforms/Scalar/EarlyCSE.h
new file mode 100644
index 0000000..e3dd3c0
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -0,0 +1,39 @@
+//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for a simple, fast CSE pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+class EarlyCSEPass {
+public:
+ static StringRef name() { return "EarlyCSEPass"; }
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
new file mode 100644
index 0000000..4028320
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -0,0 +1,40 @@
+//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerExpectIntrinsic pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerExpectIntrinsicPass {
+public:
+ static StringRef name() { return "LowerExpectIntrinsicPass"; }
+
+ /// \brief Run the pass over the function.
+ ///
+ /// This will lower all of th expect intrinsic calls in this function into
+ /// branch weight metadata. That metadata will subsequently feed the analysis
+ /// of the probabilities and frequencies of the CFG. After running this pass,
+ /// no more expect intrinsics remain, allowing the rest of the optimizer to
+ /// ignore them.
+ PreservedAnalyses run(Function &F);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/SimplifyCFG.h b/include/llvm/Transforms/Scalar/SimplifyCFG.h
new file mode 100644
index 0000000..ef28e0f
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -0,0 +1,46 @@
+//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the pass responsible for both
+/// simplifying and canonicalizing the CFG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A pass to simplify and canonicalize the CFG of a function.
+///
+/// This pass iteratively simplifies the entire CFG of a function, removing
+/// unnecessary control flows and bringing it into the canonical form expected
+/// by the rest of the mid-level optimizer.
+class SimplifyCFGPass {
+ int BonusInstThreshold;
+
+public:
+ static StringRef name() { return "SimplifyCFGPass"; }
+
+ /// \brief Construct a pass with the default thresholds.
+ SimplifyCFGPass();
+
+ /// \brief Construct a pass with a specific bonus threshold.
+ SimplifyCFGPass(int BonusInstThreshold);
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 19acf5b..710db03 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -23,10 +23,11 @@
namespace llvm {
class AliasAnalysis;
+class MemoryDependenceAnalysis;
class DominatorTree;
+class LoopInfo;
class Instruction;
class MDNode;
-class Pass;
class ReturnInst;
class TargetLibraryInfo;
class TerminatorInst;
@@ -39,7 +40,8 @@ void DeleteDeadBlock(BasicBlock *BB);
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
-void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = nullptr);
+void FoldSingleEntryPHINodes(BasicBlock *BB, AliasAnalysis *AA = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
/// is dead. Also recursively delete any operands that become dead as
@@ -50,7 +52,10 @@ bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
-bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = nullptr);
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ AliasAnalysis *AA = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
// with a value, then remove and delete the original instruction.
@@ -70,18 +75,62 @@ void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
//
void ReplaceInstWithInst(Instruction *From, Instruction *To);
+/// \brief Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ LoopInfo *LI;
+ bool MergeIdenticalEdges;
+ bool DontDeleteUselessPHIs;
+ bool PreserveLCSSA;
+
+ CriticalEdgeSplittingOptions()
+ : AA(nullptr), DT(nullptr), LI(nullptr), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ /// \brief Basic case of setting up all the analysis.
+ CriticalEdgeSplittingOptions(AliasAnalysis *AA, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr)
+ : AA(AA), DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ /// \brief A common pattern is to preserve the dominator tree and loop
+ /// info but not care about AA.
+ CriticalEdgeSplittingOptions(DominatorTree *DT, LoopInfo *LI)
+ : AA(nullptr), DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+ MergeIdenticalEdges = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+ DontDeleteUselessPHIs = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+ PreserveLCSSA = true;
+ return *this;
+ }
+};
+
/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
-/// split the critical edge. This will update DominatorTree and
-/// DominatorFrontier information if it is available, thus calling this pass
-/// will not invalidate either of them. This returns the new block if the edge
-/// was split, null otherwise.
+/// split the critical edge. This will update the analyses passed in through
+/// the option struct. This returns the new block if the edge was split, null
+/// otherwise.
///
-/// If MergeIdenticalEdges is true (not the default), *all* edges from TI to the
-/// specified successor will be merged into the same critical edge block.
-/// This is most commonly interesting with switch instructions, which may
-/// have many edges to any one destination. This ensures that all edges to that
-/// dest go to one block instead of each going to a different block, but isn't
-/// the standard definition of a "critical edge".
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination. This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
///
/// It is invalid to call this function on a critical edge that starts at an
/// IndirectBrInst. Splitting these edges will almost always create an invalid
@@ -89,14 +138,15 @@ void ReplaceInstWithInst(Instruction *From, Instruction *To);
/// to.
///
BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
- Pass *P = nullptr,
- bool MergeIdenticalEdges = false,
- bool DontDeleteUselessPHIs = false,
- bool SplitLandingPads = false);
-
-inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
- Pass *P = nullptr) {
- return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), P);
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+ Options);
}
/// SplitCriticalEdge - If the edge from *PI to BB is not critical, return
@@ -105,55 +155,62 @@ inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
/// function. If P is specified, it updates the analyses
/// described above.
inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
- Pass *P = nullptr) {
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
bool MadeChange = false;
TerminatorInst *TI = (*PI)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
if (TI->getSuccessor(i) == Succ)
- MadeChange |= !!SplitCriticalEdge(TI, i, P);
+ MadeChange |= !!SplitCriticalEdge(TI, i, Options);
return MadeChange;
}
/// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge
/// and return true, otherwise return false. This method requires that there be
-/// an edge between the two blocks. If P is specified, it updates the analyses
-/// described above.
-inline BasicBlock *SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
- Pass *P = nullptr,
- bool MergeIdenticalEdges = false,
- bool DontDeleteUselessPHIs = false) {
+/// an edge between the two blocks. It updates the analyses
+/// passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
TerminatorInst *TI = Src->getTerminator();
unsigned i = 0;
while (1) {
assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
if (TI->getSuccessor(i) == Dst)
- return SplitCriticalEdge(TI, i, P, MergeIdenticalEdges,
- DontDeleteUselessPHIs);
+ return SplitCriticalEdge(TI, i, Options);
++i;
}
}
// SplitAllCriticalEdges - Loop over all of the edges in the CFG,
-// breaking critical edges as they are found. Pass P must not be NULL.
+// breaking critical edges as they are found.
// Returns the number of broken edges.
-unsigned SplitAllCriticalEdges(Function &F, Pass *P);
+unsigned SplitAllCriticalEdges(Function &F,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
-/// SplitEdge - Split the edge connecting specified block. Pass P must
-/// not be NULL.
-BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To, Pass *P);
+/// SplitEdge - Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
/// SplitBlock - Split the specified block at the specified instruction - every
/// thing before SplitPt stays in Old and everything starting with SplitPt moves
/// to a new block. The two blocks are joined by an unconditional branch and
/// the loop info is updated.
///
-BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
-/// SplitBlockPredecessors - This method transforms BB by introducing a new
-/// basic block into the function, and moving some of the predecessors of BB to
-/// be predecessors of the new block. The new predecessors are indicated by the
-/// Preds array, which has NumPreds elements in it. The new block is given a
-/// suffix of 'Suffix'. This function returns the new block.
+/// SplitBlockPredecessors - This method introduces at least one new basic block
+/// into the function and moves some of the predecessors of BB to be
+/// predecessors of the new block. The new predecessors are indicated by the
+/// Preds array. The new block is given a suffix of 'Suffix'. Returns new basic
+/// block to which predecessors from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses.
@@ -161,8 +218,12 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
/// complicated to handle the case where one of the edges being split
/// is an exit of a loop with other exits).
///
-BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
- const char *Suffix, Pass *P = nullptr);
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix,
+ AliasAnalysis *AA = nullptr,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
@@ -177,9 +238,14 @@ BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
/// case where one of the edges being split is an exit of a loop with other
/// exits).
///
-void SplitLandingPadPredecessors(BasicBlock *OrigBB,ArrayRef<BasicBlock*> Preds,
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+ ArrayRef<BasicBlock *> Preds,
const char *Suffix, const char *Suffix2,
- Pass *P, SmallVectorImpl<BasicBlock*> &NewBBs);
+ SmallVectorImpl<BasicBlock *> &NewBBs,
+ AliasAnalysis *AA = nullptr,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
/// FoldReturnIntoUncondBranch - This method duplicates the specified return
/// instruction into a predecessor which ends in an unconditional branch. If
diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h
index 1e407fb..6387c16 100644
--- a/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -115,20 +115,6 @@ namespace llvm {
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
-
- /// SimplifyFortifiedLibCalls - Helper class for folding checked library
- /// calls (e.g. __strcpy_chk) into their unchecked counterparts.
- class SimplifyFortifiedLibCalls {
- protected:
- CallInst *CI;
- virtual void replaceCall(Value *With) = 0;
- virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
- bool isString) const = 0;
-
- public:
- virtual ~SimplifyFortifiedLibCalls();
- bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
- };
}
#endif
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index 740d725..d1563ef 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -44,7 +44,7 @@ class Loop;
class LoopInfo;
class AllocaInst;
class AliasAnalysis;
-class AssumptionTracker;
+class AssumptionCacheTracker;
/// CloneModule - Return an exact copy of the specified module
///
@@ -95,8 +95,7 @@ struct ClonedCodeInfo {
/// function, you can specify a ClonedCodeInfo object with the optional fifth
/// parameter.
///
-BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- ValueToValueMapTy &VMap,
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
const Twine &NameSuffix = "", Function *F = nullptr,
ClonedCodeInfo *CodeInfo = nullptr);
@@ -112,8 +111,7 @@ BasicBlock *CloneBasicBlock(const BasicBlock *BB,
/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
/// mappings, and debug info metadata will not be cloned.
///
-Function *CloneFunction(const Function *F,
- ValueToValueMapTy &VMap,
+Function *CloneFunction(const Function *F, ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = nullptr);
@@ -127,14 +125,49 @@ Function *CloneFunction(const Function *F,
/// mappings.
///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueToValueMapTy &VMap,
- bool ModuleLevelChanges,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
+/// A helper class used with CloneAndPruneIntoFromInst to change the default
+/// behavior while instructions are being cloned.
+class CloningDirector {
+public:
+ /// This enumeration describes the way CloneAndPruneIntoFromInst should
+ /// proceed after the CloningDirector has examined an instruction.
+ enum CloningAction {
+ ///< Continue cloning the instruction (default behavior).
+ CloneInstruction,
+ ///< Skip this instruction but continue cloning the current basic block.
+ SkipInstruction,
+ ///< Skip this instruction and stop cloning the current basic block.
+ StopCloningBB
+ };
+
+ virtual ~CloningDirector() {}
+
+ /// Subclasses must override this function to customize cloning behavior.
+ virtual CloningAction handleInstruction(ValueToValueMapTy &VMap,
+ const Instruction *Inst,
+ BasicBlock *NewBB) = 0;
+
+ virtual ValueMapTypeRemapper *getTypeRemapper() { return nullptr; }
+ virtual ValueMaterializer *getValueMaterializer() { return nullptr; }
+};
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+ const Instruction *StartingInst,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ const DataLayout *DL = nullptr,
+ CloningDirector *Director = nullptr);
+
+
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly. The
/// effect of this is to copy significantly less code in cases where (for
@@ -147,8 +180,7 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// mappings.
///
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- ValueToValueMapTy &VMap,
- bool ModuleLevelChanges,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
@@ -162,19 +194,19 @@ public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
const DataLayout *DL = nullptr,
AliasAnalysis *AA = nullptr,
- AssumptionTracker *AT = nullptr)
- : CG(cg), DL(DL), AA(AA), AT(AT) {}
+ AssumptionCacheTracker *ACT = nullptr)
+ : CG(cg), DL(DL), AA(AA), ACT(ACT) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
const DataLayout *DL;
AliasAnalysis *AA;
- AssumptionTracker *AT;
+ AssumptionCacheTracker *ACT;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
- SmallVector<AllocaInst*, 4> StaticAllocas;
+ SmallVector<AllocaInst *, 4> StaticAllocas;
/// InlinedCalls - InlineFunction fills this in with callsites that were
/// inlined from the callee. This is only filled in if CG is non-null.
@@ -196,9 +228,12 @@ public:
/// exists in the instruction stream. Similarly this will inline a recursive
/// function by one level.
///
-bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime = true);
-bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, bool InsertLifetime = true);
-bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI, bool InsertLifetime = true);
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ bool InsertLifetime = true);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index e89e5e5..463ab96 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -31,10 +31,9 @@ class DbgDeclareInst;
class StoreInst;
class LoadInst;
class Value;
-class Pass;
class PHINode;
class AllocaInst;
-class AssumptionTracker;
+class AssumptionCache;
class ConstantExpr;
class DataLayout;
class TargetLibraryInfo;
@@ -115,7 +114,7 @@ void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
/// between them, moving the instructions in the predecessor into BB. This
/// deletes the predecessor block.
///
-void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = nullptr);
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
/// unconditional branch, and contains no instructions other than PHI nodes,
@@ -138,9 +137,8 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- unsigned BonusInstThreshold,
- const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr);
+ unsigned BonusInstThreshold, const DataLayout *TD = nullptr,
+ AssumptionCache *AC = nullptr);
/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
@@ -176,17 +174,17 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr,
+ AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V,
const DataLayout *TD = nullptr,
- AssumptionTracker *AT = nullptr,
+ AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr) {
- return getOrEnforceKnownAlignment(V, 0, TD, AT, CxtI, DT);
+ return getOrEnforceKnownAlignment(V, 0, TD, AC, CxtI, DT);
}
/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
@@ -276,10 +274,11 @@ bool LowerDbgDeclare(Function &F);
/// an alloca, if any.
DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
-/// replaceDbgDeclareForAlloca - Replaces llvm.dbg.declare instruction when
-/// alloca is replaced with a new value.
+/// \brief Replaces llvm.dbg.declare instruction when an alloca is replaced with
+/// a new value. If Deref is true, tan additional DW_OP_deref is prepended to
+/// the expression.
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
- DIBuilder &Builder);
+ DIBuilder &Builder, bool Deref);
/// \brief Remove all blocks that can not be reached from the function's entry.
///
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index fdae80d..bb80f20 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -14,16 +14,33 @@
#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Dominators.h"
+
namespace llvm {
class AliasAnalysis;
-class AssumptionTracker;
+class AliasSet;
+class AliasSetTracker;
+class AssumptionCache;
class BasicBlock;
class DataLayout;
class DominatorTree;
class Loop;
class LoopInfo;
class Pass;
+class PredIteratorCache;
class ScalarEvolution;
+class TargetLibraryInfo;
+
+/// \brief Captures loop safety information.
+/// It keep information for loop & its header may throw exception.
+struct LICMSafetyInfo {
+ bool MayThrow; // The current loop contains an instruction which
+ // may throw.
+ bool HeaderMayThrow; // Same as previous, but specific to loop header
+ LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false)
+ {}
+};
BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
@@ -36,7 +53,7 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr,
const DataLayout *DL = nullptr,
- AssumptionTracker *AT = nullptr);
+ AssumptionCache *AC = nullptr);
/// \brief Put loop into LCSSA form.
///
@@ -49,7 +66,8 @@ bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE = nullptr);
/// \brief Put a loop nest into LCSSA form.
///
@@ -60,8 +78,51 @@ bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSARecursively(Loop &L, DominatorTree &DT,
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
ScalarEvolution *SE = nullptr);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ const DataLayout *, TargetLibraryInfo *, Loop *,
+ AliasSetTracker *, LICMSafetyInfo *);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree. This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ const DataLayout *, TargetLibraryInfo *, Loop *,
+ AliasSetTracker *, LICMSafetyInfo *);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks
+/// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop,
+/// AliasSet information for all instructions of the loop and loop safety
+/// information as arguments. It returns changed status.
+bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock*> &,
+ SmallVectorImpl<Instruction*> &,
+ PredIteratorCache &, LoopInfo *,
+ DominatorTree *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Computes safety information for a loop
+/// checks loop body & header for the possiblity of may throw
+/// exception, it takes LICMSafetyInfo and loop as argument.
+/// Updates safety information in LICMSafetyInfo argument.
+void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *);
+
}
#endif
diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 3fdd5e9..d0602bf 100644
--- a/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -22,7 +22,7 @@ namespace llvm {
class AllocaInst;
class DominatorTree;
class AliasSetTracker;
-class AssumptionTracker;
+class AssumptionCache;
/// \brief Return true if this alloca is legal for promotion.
///
@@ -43,7 +43,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
/// made to the IR.
void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
AliasSetTracker *AST = nullptr,
- AssumptionTracker *AT = nullptr);
+ AssumptionCache *AC = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h
index 7874a5f..19e2a4a 100644
--- a/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -118,8 +118,8 @@ public:
private:
Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
- void operator=(const SSAUpdater&) LLVM_DELETED_FUNCTION;
- SSAUpdater(const SSAUpdater&) LLVM_DELETED_FUNCTION;
+ void operator=(const SSAUpdater&) = delete;
+ SSAUpdater(const SSAUpdater&) = delete;
};
/// \brief Helper class for promoting a collection of loads and stores into SSA
diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 6765ac1..08358e1 100644
--- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -16,6 +16,7 @@
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IRBuilder.h"
namespace llvm {
@@ -27,20 +28,67 @@ class TargetLibraryInfo;
class BasicBlock;
class Function;
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+ const DataLayout *DL;
+ const TargetLibraryInfo *TLI;
+ bool OnlyLowerUnknownSize;
+
+public:
+ FortifiedLibCallSimplifier(const DataLayout *DL, const TargetLibraryInfo *TLI,
+ bool OnlyLowerUnknownSize = false);
+
+ /// \brief Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+ // Str/Stp cpy are similar enough to be handled in the same functions.
+ Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+ Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+
+ /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+ /// to the non-fortified version.
+ bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+ unsigned SizeOp, bool isString);
+};
+
/// LibCallSimplifier - This class implements a collection of optimizations
/// that replace well formed calls to library functions with a more optimal
/// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
class LibCallSimplifier {
private:
+ FortifiedLibCallSimplifier FortifiedSimplifier;
const DataLayout *DL;
const TargetLibraryInfo *TLI;
bool UnsafeFPShrink;
+ function_ref<void(Instruction *, Value *)> Replacer;
+
+ /// \brief Internal wrapper for RAUW that is the default implementation.
+ ///
+ /// Other users may provide an alternate function with this signature instead
+ /// of this one.
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With);
-protected:
- ~LibCallSimplifier() {}
+ /// \brief Replace an instruction's uses with a value using our replacer.
+ void replaceAllUsesWith(Instruction *I, Value *With);
public:
- LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI);
+ LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault);
/// optimizeCall - Take the given call instruction and return a more
/// optimal value to replace the instruction with or 0 if a more
@@ -48,22 +96,10 @@ public:
/// be equal to the instruction being optimized. In this case all
/// other instructions that use the given instruction were modified
/// and the given instruction is dead.
+ /// The call must not be an indirect call.
Value *optimizeCall(CallInst *CI);
- /// replaceAllUsesWith - This method is used when the library call
- /// simplifier needs to replace instructions other than the library
- /// call being modified.
- virtual void replaceAllUsesWith(Instruction *I, Value *With) const;
-
private:
- // Fortified Library Call Optimizations
- Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStrCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStpCpyChk(CallInst *CI, IRBuilder<> &B);
- Value *optimizeStrNCpyChk(CallInst *CI, IRBuilder<> &B);
-
// String and Memory Library Call Optimizations
Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
@@ -84,6 +120,8 @@ private:
Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+ // Wrapper for all String/Memory Library Call Optimizations
+ Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
// Math Library Optimizations
Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B, bool CheckRetType);
diff --git a/include/llvm/Transforms/Utils/SymbolRewriter.h b/include/llvm/Transforms/Utils/SymbolRewriter.h
index af79372..48dea04 100644
--- a/include/llvm/Transforms/Utils/SymbolRewriter.h
+++ b/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -60,10 +60,10 @@ namespace SymbolRewriter {
/// select the symbols to rewrite. This descriptor list is passed to the
/// SymbolRewriter pass.
class RewriteDescriptor : public ilist_node<RewriteDescriptor> {
- RewriteDescriptor(const RewriteDescriptor &) LLVM_DELETED_FUNCTION;
+ RewriteDescriptor(const RewriteDescriptor &) = delete;
const RewriteDescriptor &
- operator=(const RewriteDescriptor &) LLVM_DELETED_FUNCTION;
+ operator=(const RewriteDescriptor &) = delete;
public:
enum class Type {
diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h
index 0b88d25..04d9ee1 100644
--- a/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -16,20 +16,25 @@
#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
-class AssumptionTracker;
+class AssumptionCache;
class Loop;
class LoopInfo;
class LPPassManager;
+class MDNode;
class Pass;
bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
unsigned TripMultiple, LoopInfo *LI, Pass *PP,
- LPPassManager *LPM, AssumptionTracker *AT);
+ LPPassManager *LPM, AssumptionCache *AC);
bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
LPPassManager* LPM);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
}
#endif
diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h
index 5774763..047ab81 100644
--- a/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/include/llvm/Transforms/Utils/ValueMapper.h
@@ -71,20 +71,23 @@ namespace llvm {
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
+ Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapMetadata - provide versions that preserve type safety for MDNodes.
+ MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
ValueMapTypeRemapper *TypeMapper = nullptr,
ValueMaterializer *Materializer = nullptr);
- /// MapValue - provide versions that preserve type safety for MDNode and
- /// Constants.
- inline MDNode *MapValue(const MDNode *V, ValueToValueMapTy &VM,
- RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = nullptr,
- ValueMaterializer *Materializer = nullptr) {
- return cast<MDNode>(MapValue((const Value*)V, VM, Flags, TypeMapper,
- Materializer));
- }
+ /// MapValue - provide versions that preserve type safety for Constants.
inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
ValueMapTypeRemapper *TypeMapper = nullptr,
diff --git a/include/llvm/Transforms/Utils/VectorUtils.h b/include/llvm/Transforms/Utils/VectorUtils.h
index 83871fc..9f0fb19 100644
--- a/include/llvm/Transforms/Utils/VectorUtils.h
+++ b/include/llvm/Transforms/Utils/VectorUtils.h
@@ -14,9 +14,9 @@
#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/IR/Intrinsics.h"
namespace llvm {
@@ -59,7 +59,7 @@ static inline bool isTriviallyVectorizable(Intrinsic::ID ID) {
}
}
-static bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
+static inline bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
switch (ID) {
case Intrinsic::ctlz: