aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/IR
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-04-23 16:57:46 -0700
committerStephen Hines <srhines@google.com>2014-04-24 15:53:16 -0700
commit36b56886974eae4f9c5ebc96befd3e7bfe5de338 (patch)
treee6cfb69fbbd937f450eeb83bfb83b9da3b01275a /include/llvm/IR
parent69a8640022b04415ae9fac62f8ab090601d8f889 (diff)
downloadexternal_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.zip
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.gz
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.bz2
Update to LLVM 3.5a.
Change-Id: Ifadecab779f128e62e430c2b4f6ddd84953ed617
Diffstat (limited to 'include/llvm/IR')
-rw-r--r--include/llvm/IR/Argument.h10
-rw-r--r--include/llvm/IR/AssemblyAnnotationWriter.h63
-rw-r--r--include/llvm/IR/Attributes.h7
-rw-r--r--include/llvm/IR/AutoUpgrade.h66
-rw-r--r--include/llvm/IR/BasicBlock.h2
-rw-r--r--include/llvm/IR/CFG.h383
-rw-r--r--include/llvm/IR/CMakeLists.txt4
-rw-r--r--include/llvm/IR/CallSite.h355
-rw-r--r--include/llvm/IR/CallingConv.h16
-rw-r--r--include/llvm/IR/Constant.h4
-rw-r--r--include/llvm/IR/ConstantFolder.h238
-rw-r--r--include/llvm/IR/ConstantRange.h272
-rw-r--r--include/llvm/IR/Constants.h48
-rw-r--r--include/llvm/IR/DIBuilder.h705
-rw-r--r--include/llvm/IR/DataLayout.h198
-rw-r--r--include/llvm/IR/DebugInfo.h929
-rw-r--r--include/llvm/IR/DebugLoc.h120
-rw-r--r--include/llvm/IR/DerivedTypes.h22
-rw-r--r--include/llvm/IR/DiagnosticInfo.h240
-rw-r--r--include/llvm/IR/DiagnosticPrinter.h87
-rw-r--r--include/llvm/IR/Dominators.h190
-rw-r--r--include/llvm/IR/Function.h23
-rw-r--r--include/llvm/IR/GVMaterializer.h62
-rw-r--r--include/llvm/IR/GetElementPtrTypeIterator.h113
-rw-r--r--include/llvm/IR/GlobalAlias.h20
-rw-r--r--include/llvm/IR/GlobalValue.h80
-rw-r--r--include/llvm/IR/GlobalVariable.h8
-rw-r--r--include/llvm/IR/IRBuilder.h45
-rw-r--r--include/llvm/IR/IRPrintingPasses.h85
-rw-r--r--include/llvm/IR/InlineAsm.h5
-rw-r--r--include/llvm/IR/InstIterator.h147
-rw-r--r--include/llvm/IR/InstVisitor.h289
-rw-r--r--include/llvm/IR/InstrTypes.h13
-rw-r--r--include/llvm/IR/Instruction.h26
-rw-r--r--include/llvm/IR/Instructions.h233
-rw-r--r--include/llvm/IR/IntrinsicInst.h9
-rw-r--r--include/llvm/IR/Intrinsics.h10
-rw-r--r--include/llvm/IR/Intrinsics.td58
-rw-r--r--include/llvm/IR/IntrinsicsAArch64.td201
-rw-r--r--include/llvm/IR/IntrinsicsARM.td90
-rw-r--r--include/llvm/IR/IntrinsicsARM64.td628
-rw-r--r--include/llvm/IR/IntrinsicsMips.td46
-rw-r--r--include/llvm/IR/IntrinsicsNVVM.td10
-rw-r--r--include/llvm/IR/IntrinsicsX86.td453
-rw-r--r--include/llvm/IR/IntrinsicsXCore.td3
-rw-r--r--include/llvm/IR/LLVMContext.h35
-rw-r--r--include/llvm/IR/LeakDetector.h92
-rw-r--r--include/llvm/IR/LegacyPassManager.h4
-rw-r--r--include/llvm/IR/LegacyPassManagers.h34
-rw-r--r--include/llvm/IR/LegacyPassNameParser.h141
-rw-r--r--include/llvm/IR/MDBuilder.h7
-rw-r--r--include/llvm/IR/Mangler.h69
-rw-r--r--include/llvm/IR/Metadata.h57
-rw-r--r--include/llvm/IR/Module.h83
-rw-r--r--include/llvm/IR/NoFolder.h298
-rw-r--r--include/llvm/IR/Operator.h36
-rw-r--r--include/llvm/IR/PassManager.h1057
-rw-r--r--include/llvm/IR/PatternMatch.h1211
-rw-r--r--include/llvm/IR/PredIteratorCache.h70
-rw-r--r--include/llvm/IR/Type.h36
-rw-r--r--include/llvm/IR/Use.h198
-rw-r--r--include/llvm/IR/User.h22
-rw-r--r--include/llvm/IR/Value.h138
-rw-r--r--include/llvm/IR/ValueHandle.h380
-rw-r--r--include/llvm/IR/ValueMap.h377
-rw-r--r--include/llvm/IR/Verifier.h75
66 files changed, 9975 insertions, 1061 deletions
diff --git a/include/llvm/IR/Argument.h b/include/llvm/IR/Argument.h
index eb6ed46..7c1ebf6 100644
--- a/include/llvm/IR/Argument.h
+++ b/include/llvm/IR/Argument.h
@@ -59,7 +59,12 @@ public:
/// containing function.
bool hasByValAttr() const;
- /// \brief If this is a byval argument, return its alignment.
+ /// \brief Return true if this argument has the byval attribute or inalloca
+ /// attribute on it in its containing function. These attributes both
+ /// represent arguments being passed by value.
+ bool hasByValOrInAllocaAttr() const;
+
+ /// \brief If this is a byval or inalloca argument, return its alignment.
unsigned getParamAlignment() const;
/// \brief Return true if this argument has the nest attribute on it in its
@@ -86,6 +91,9 @@ public:
/// on it in its containing function.
bool onlyReadsMemory() const;
+ /// \brief Return true if this argument has the inalloca attribute on it in
+ /// its containing function.
+ bool hasInAllocaAttr() const;
/// \brief Add a Attribute to an argument.
void addAttr(AttributeSet AS);
diff --git a/include/llvm/IR/AssemblyAnnotationWriter.h b/include/llvm/IR/AssemblyAnnotationWriter.h
new file mode 100644
index 0000000..a8d52f6
--- /dev/null
+++ b/include/llvm/IR/AssemblyAnnotationWriter.h
@@ -0,0 +1,63 @@
+//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Clients of the assembly writer can use this interface to add their own
+// special-purpose annotations to LLVM assembly language printouts. Note that
+// the assembly parser won't be able to parse these, in general, so
+// implementations are advised to print stuff as LLVM comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ASMANNOTATIONWRITER_H
+#define LLVM_IR_ASMANNOTATIONWRITER_H
+
+namespace llvm {
+
+class Function;
+class BasicBlock;
+class Instruction;
+class Value;
+class formatted_raw_ostream;
+
+class AssemblyAnnotationWriter {
+public:
+
+ virtual ~AssemblyAnnotationWriter();
+
+ /// emitFunctionAnnot - This may be implemented to emit a string right before
+ /// the start of a function.
+ virtual void emitFunctionAnnot(const Function *,
+ formatted_raw_ostream &) {}
+
+ /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
+ /// after the basic block label, but before the first instruction in the
+ /// block.
+ virtual void emitBasicBlockStartAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
+ }
+
+ /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
+ /// after the basic block.
+ virtual void emitBasicBlockEndAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
+ }
+
+ /// emitInstructionAnnot - This may be implemented to emit a string right
+ /// before an instruction is emitted.
+ virtual void emitInstructionAnnot(const Instruction *,
+ formatted_raw_ostream &) {}
+
+ /// printInfoComment - This may be implemented to emit a comment to the
+ /// right of an instruction or global value.
+ virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index c23ba0f..9eccf40 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -71,6 +71,7 @@ public:
Builtin, ///< Callee is recognized as a builtin, despite
///< nobuiltin attribute on its declaration.
ByVal, ///< Pass structure by value
+ InAlloca, ///< Pass structure in an alloca
Cold, ///< Marks function as being in a cold path.
InlineHint, ///< Source said inlining was desirable
InReg, ///< Force argument to be passed in register
@@ -201,7 +202,7 @@ public:
/// index `1'.
class AttributeSet {
public:
- enum AttrIndex LLVM_ENUM_INT_TYPE(unsigned) {
+ enum AttrIndex : unsigned {
ReturnIndex = 0U,
FunctionIndex = ~0U
};
@@ -402,10 +403,6 @@ public:
addAttribute(A);
}
AttrBuilder(AttributeSet AS, unsigned Idx);
- AttrBuilder(const AttrBuilder &B)
- : Attrs(B.Attrs),
- TargetDepAttrs(B.TargetDepAttrs.begin(), B.TargetDepAttrs.end()),
- Alignment(B.Alignment), StackAlignment(B.StackAlignment) {}
void clear();
diff --git a/include/llvm/IR/AutoUpgrade.h b/include/llvm/IR/AutoUpgrade.h
new file mode 100644
index 0000000..076ed4a
--- /dev/null
+++ b/include/llvm/IR/AutoUpgrade.h
@@ -0,0 +1,66 @@
+//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These functions are implemented by lib/IR/AutoUpgrade.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_AUTOUPGRADE_H
+#define LLVM_IR_AUTOUPGRADE_H
+
+namespace llvm {
+ class CallInst;
+ class Constant;
+ class Function;
+ class Instruction;
+ class Module;
+ class GlobalVariable;
+ class Type;
+ class Value;
+
+ /// This is a more granular function that simply checks an intrinsic function
+ /// for upgrading, and returns true if it requires upgrading. It may return
+ /// null in NewFn if the all calls to the original intrinsic function
+ /// should be transformed to non-function-call instructions.
+ bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn);
+
+ /// This is the complement to the above, replacing a specific call to an
+ /// intrinsic function with a call to the specified new function.
+ void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn);
+
+ /// This is an auto-upgrade hook for any old intrinsic function syntaxes
+ /// which need to have both the function updated as well as all calls updated
+ /// to the new function. This should only be run in a post-processing fashion
+ /// so that it can update all calls to the old function.
+ void UpgradeCallsToIntrinsic(Function* F);
+
+ /// This checks for global variables which should be upgraded. It returns true
+ /// if it requires upgrading.
+ bool UpgradeGlobalVariable(GlobalVariable *GV);
+
+ /// If the TBAA tag for the given instruction uses the scalar TBAA format,
+ /// we upgrade it to the struct-path aware TBAA format.
+ void UpgradeInstWithTBAATag(Instruction *I);
+
+ /// This is an auto-upgrade for bitcast between pointers with different
+ /// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
+ Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
+ Instruction *&Temp);
+
+ /// This is an auto-upgrade for bitcast constant expression between pointers
+ /// with different address spaces: the instruction is replaced by a pair
+ /// ptrtoint+inttoptr.
+ Value *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy);
+
+ /// Check the debug info version number, if it is out-dated, drop the debug
+ /// info. Return true if module is modified.
+ bool UpgradeDebugInfo(Module &M);
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index 3bdc95d..1adc254 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -116,6 +116,8 @@ public:
const Function *getParent() const { return Parent; }
Function *getParent() { return Parent; }
+ const DataLayout *getDataLayout() const;
+
/// \brief Returns the terminator instruction if the block is well formed or
/// null if the block is not well formed.
TerminatorInst *getTerminator();
diff --git a/include/llvm/IR/CFG.h b/include/llvm/IR/CFG.h
new file mode 100644
index 0000000..c8be8bd
--- /dev/null
+++ b/include/llvm/IR/CFG.h
@@ -0,0 +1,383 @@
+//===- CFG.h - Process LLVM structures as graphs ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines specializations of GraphTraits that allow Function and
+// BasicBlock graphs to be treated as proper graphs for generic algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CFG_H
+#define LLVM_IR_CFG_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// BasicBlock pred_iterator definition
+//===----------------------------------------------------------------------===//
+
+template <class Ptr, class USE_iterator> // Predecessor Iterator
+class PredIterator : public std::iterator<std::forward_iterator_tag,
+ Ptr, ptrdiff_t, Ptr*, Ptr*> {
+ typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*,
+ Ptr*> super;
+ typedef PredIterator<Ptr, USE_iterator> Self;
+ USE_iterator It;
+
+ inline void advancePastNonTerminators() {
+ // Loop to ignore non-terminator uses (for example BlockAddresses).
+ while (!It.atEnd() && !isa<TerminatorInst>(*It))
+ ++It;
+ }
+
+public:
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+ PredIterator() {}
+ explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
+ advancePastNonTerminators();
+ }
+ inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}
+
+ inline bool operator==(const Self& x) const { return It == x.It; }
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline reference operator*() const {
+ assert(!It.atEnd() && "pred_iterator out of range!");
+ return cast<TerminatorInst>(*It)->getParent();
+ }
+ inline pointer *operator->() const { return &operator*(); }
+
+ inline Self& operator++() { // Preincrement
+ assert(!It.atEnd() && "pred_iterator out of range!");
+ ++It; advancePastNonTerminators();
+ return *this;
+ }
+
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this; ++*this; return tmp;
+ }
+
+ /// getOperandNo - Return the operand number in the predecessor's
+ /// terminator of the successor.
+ unsigned getOperandNo() const {
+ return It.getOperandNo();
+ }
+
+ /// getUse - Return the operand Use in the predecessor's terminator
+ /// of the successor.
+ Use &getUse() const {
+ return It.getUse();
+ }
+};
+
+typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
+typedef PredIterator<const BasicBlock,
+ Value::const_user_iterator> const_pred_iterator;
+
+inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
+inline const_pred_iterator pred_begin(const BasicBlock *BB) {
+ return const_pred_iterator(BB);
+}
+inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
+inline const_pred_iterator pred_end(const BasicBlock *BB) {
+ return const_pred_iterator(BB, true);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// BasicBlock succ_iterator definition
+//===----------------------------------------------------------------------===//
+
+template <class Term_, class BB_> // Successor Iterator
+class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB_,
+ int, BB_ *, BB_ *> {
+ typedef std::iterator<std::random_access_iterator_tag, BB_, int, BB_ *, BB_ *>
+ super;
+
+public:
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+private:
+ const Term_ Term;
+ unsigned idx;
+ typedef SuccIterator<Term_, BB_> Self;
+
+ inline bool index_is_valid(int idx) {
+ return idx >= 0 && (unsigned) idx < Term->getNumSuccessors();
+ }
+
+ /// \brief Proxy object to allow write access in operator[]
+ class SuccessorProxy {
+ Self it;
+
+ public:
+ explicit SuccessorProxy(const Self &it) : it(it) {}
+
+ SuccessorProxy &operator=(SuccessorProxy r) {
+ *this = reference(r);
+ return *this;
+ }
+
+ SuccessorProxy &operator=(reference r) {
+ it.Term->setSuccessor(it.idx, r);
+ return *this;
+ }
+
+ operator reference() const { return *it; }
+ };
+
+public:
+ explicit inline SuccIterator(Term_ T) : Term(T), idx(0) {// begin iterator
+ }
+ inline SuccIterator(Term_ T, bool) // end iterator
+ : Term(T) {
+ if (Term)
+ idx = Term->getNumSuccessors();
+ else
+ // Term == NULL happens, if a basic block is not fully constructed and
+ // consequently getTerminator() returns NULL. In this case we construct a
+ // SuccIterator which describes a basic block that has zero successors.
+ // Defining SuccIterator for incomplete and malformed CFGs is especially
+ // useful for debugging.
+ idx = 0;
+ }
+
+ inline const Self &operator=(const Self &I) {
+ assert(Term == I.Term &&"Cannot assign iterators to two different blocks!");
+ idx = I.idx;
+ return *this;
+ }
+
+ /// getSuccessorIndex - This is used to interface between code that wants to
+ /// operate on terminator instructions directly.
+ unsigned getSuccessorIndex() const { return idx; }
+
+ inline bool operator==(const Self& x) const { return idx == x.idx; }
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline reference operator*() const { return Term->getSuccessor(idx); }
+ inline pointer operator->() const { return operator*(); }
+
+ inline Self& operator++() { ++idx; return *this; } // Preincrement
+
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this; ++*this; return tmp;
+ }
+
+ inline Self& operator--() { --idx; return *this; } // Predecrement
+ inline Self operator--(int) { // Postdecrement
+ Self tmp = *this; --*this; return tmp;
+ }
+
+ inline bool operator<(const Self& x) const {
+ assert(Term == x.Term && "Cannot compare iterators of different blocks!");
+ return idx < x.idx;
+ }
+
+ inline bool operator<=(const Self& x) const {
+ assert(Term == x.Term && "Cannot compare iterators of different blocks!");
+ return idx <= x.idx;
+ }
+ inline bool operator>=(const Self& x) const {
+ assert(Term == x.Term && "Cannot compare iterators of different blocks!");
+ return idx >= x.idx;
+ }
+
+ inline bool operator>(const Self& x) const {
+ assert(Term == x.Term && "Cannot compare iterators of different blocks!");
+ return idx > x.idx;
+ }
+
+ inline Self& operator+=(int Right) {
+ unsigned new_idx = idx + Right;
+ assert(index_is_valid(new_idx) && "Iterator index out of bound");
+ idx = new_idx;
+ return *this;
+ }
+
+ inline Self operator+(int Right) const {
+ Self tmp = *this;
+ tmp += Right;
+ return tmp;
+ }
+
+ inline Self& operator-=(int Right) {
+ return operator+=(-Right);
+ }
+
+ inline Self operator-(int Right) const {
+ return operator+(-Right);
+ }
+
+ inline int operator-(const Self& x) const {
+ assert(Term == x.Term && "Cannot work on iterators of different blocks!");
+ int distance = idx - x.idx;
+ return distance;
+ }
+
+ inline SuccessorProxy operator[](int offset) {
+ Self tmp = *this;
+ tmp += offset;
+ return SuccessorProxy(tmp);
+ }
+
+ /// Get the source BB of this iterator.
+ inline BB_ *getSource() {
+ assert(Term && "Source not available, if basic block was malformed");
+ return Term->getParent();
+ }
+};
+
+typedef SuccIterator<TerminatorInst*, BasicBlock> succ_iterator;
+typedef SuccIterator<const TerminatorInst*,
+ const BasicBlock> succ_const_iterator;
+
+inline succ_iterator succ_begin(BasicBlock *BB) {
+ return succ_iterator(BB->getTerminator());
+}
+inline succ_const_iterator succ_begin(const BasicBlock *BB) {
+ return succ_const_iterator(BB->getTerminator());
+}
+inline succ_iterator succ_end(BasicBlock *BB) {
+ return succ_iterator(BB->getTerminator(), true);
+}
+inline succ_const_iterator succ_end(const BasicBlock *BB) {
+ return succ_const_iterator(BB->getTerminator(), true);
+}
+
+template <typename T, typename U> struct isPodLike<SuccIterator<T, U> > {
+ static const bool value = isPodLike<T>::value;
+};
+
+
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks...
+
+template <> struct GraphTraits<BasicBlock*> {
+ typedef BasicBlock NodeType;
+ typedef succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(BasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return succ_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return succ_end(N);
+ }
+};
+
+template <> struct GraphTraits<const BasicBlock*> {
+ typedef const BasicBlock NodeType;
+ typedef succ_const_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const BasicBlock *BB) { return BB; }
+
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return succ_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return succ_end(N);
+ }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<BasicBlock*> > {
+ typedef BasicBlock NodeType;
+ typedef pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return pred_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return pred_end(N);
+ }
+};
+
+template <> struct GraphTraits<Inverse<const BasicBlock*> > {
+ typedef const BasicBlock NodeType;
+ typedef const_pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<const BasicBlock*> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return pred_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return pred_end(N);
+ }
+};
+
+
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... these are the same as the basic block iterators,
+// except that the root node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
+ static NodeType *getEntryNode(Function *F) { return &F->getEntryBlock(); }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef Function::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(Function *F) { return F->begin(); }
+ static nodes_iterator nodes_end (Function *F) { return F->end(); }
+ static size_t size (Function *F) { return F->size(); }
+};
+template <> struct GraphTraits<const Function*> :
+ public GraphTraits<const BasicBlock*> {
+ static NodeType *getEntryNode(const Function *F) {return &F->getEntryBlock();}
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef Function::const_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(const Function *F) { return F->begin(); }
+ static nodes_iterator nodes_end (const Function *F) { return F->end(); }
+ static size_t size (const Function *F) { return F->size(); }
+};
+
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<Function*> > :
+ public GraphTraits<Inverse<BasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<Function*> G) {
+ return &G.Graph->getEntryBlock();
+ }
+};
+template <> struct GraphTraits<Inverse<const Function*> > :
+ public GraphTraits<Inverse<const BasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<const Function *> G) {
+ return &G.Graph->getEntryBlock();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/CMakeLists.txt b/include/llvm/IR/CMakeLists.txt
index 2d52a89..dd8e04f 100644
--- a/include/llvm/IR/CMakeLists.txt
+++ b/include/llvm/IR/CMakeLists.txt
@@ -2,6 +2,4 @@ set(LLVM_TARGET_DEFINITIONS Intrinsics.td)
tablegen(LLVM Intrinsics.gen -gen-intrinsic)
-add_custom_target(intrinsics_gen ALL
- DEPENDS ${llvm_builded_incs_dir}/IR/Intrinsics.gen)
-set_target_properties(intrinsics_gen PROPERTIES FOLDER "Tablegenning")
+add_public_tablegen_target(intrinsics_gen)
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
new file mode 100644
index 0000000..ec46103
--- /dev/null
+++ b/include/llvm/IR/CallSite.h
@@ -0,0 +1,355 @@
+//===- CallSite.h - Abstract Call & Invoke instrs ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CallSite class, which is a handy wrapper for code that
+// wants to treat Call and Invoke instructions in a generic way. When in non-
+// mutation context (e.g. an analysis) ImmutableCallSite should be used.
+// Finally, when some degree of customization is necessary between these two
+// extremes, CallSiteBase<> can be supplied with fine-tuned parameters.
+//
+// NOTE: These classes are supposed to have "value semantics". So they should be
+// passed by value, not by reference; they should not be "new"ed or "delete"d.
+// They are efficiently copyable, assignable and constructable, with cost
+// equivalent to copying a pointer (notice that they have only a single data
+// member). The internal representation carries a flag which indicates which of
+// the two variants is enclosed. This allows for cheaper checks when various
+// accessors of CallSite are employed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CALLSITE_H
+#define LLVM_IR_CALLSITE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+
+class CallInst;
+class InvokeInst;
+
+template <typename FunTy = const Function,
+ typename ValTy = const Value,
+ typename UserTy = const User,
+ typename InstrTy = const Instruction,
+ typename CallTy = const CallInst,
+ typename InvokeTy = const InvokeInst,
+ typename IterTy = User::const_op_iterator>
+class CallSiteBase {
+protected:
+ PointerIntPair<InstrTy*, 1, bool> I;
+public:
+ CallSiteBase() : I(0, false) {}
+ CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
+ CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
+ CallSiteBase(ValTy *II) { *this = get(II); }
+protected:
+ /// CallSiteBase::get - This static method is sort of like a constructor. It
+ /// will create an appropriate call site for a Call or Invoke instruction, but
+ /// it can also create a null initialized CallSiteBase object for something
+ /// which is NOT a call site.
+ ///
+ static CallSiteBase get(ValTy *V) {
+ if (InstrTy *II = dyn_cast<InstrTy>(V)) {
+ if (II->getOpcode() == Instruction::Call)
+ return CallSiteBase(static_cast<CallTy*>(II));
+ else if (II->getOpcode() == Instruction::Invoke)
+ return CallSiteBase(static_cast<InvokeTy*>(II));
+ }
+ return CallSiteBase();
+ }
+public:
+ /// isCall - true if a CallInst is enclosed.
+ /// Note that !isCall() does not mean it is an InvokeInst enclosed,
+ /// it also could signify a NULL Instruction pointer.
+ bool isCall() const { return I.getInt(); }
+
+ /// isInvoke - true if a InvokeInst is enclosed.
+ ///
+ bool isInvoke() const { return getInstruction() && !I.getInt(); }
+
+ InstrTy *getInstruction() const { return I.getPointer(); }
+ InstrTy *operator->() const { return I.getPointer(); }
+ LLVM_EXPLICIT operator bool() const { return I.getPointer(); }
+
+ /// getCalledValue - Return the pointer to function that is being called.
+ ///
+ ValTy *getCalledValue() const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ return *getCallee();
+ }
+
+ /// getCalledFunction - Return the function being called if this is a direct
+ /// call, otherwise return null (if it's an indirect call).
+ ///
+ FunTy *getCalledFunction() const {
+ return dyn_cast<FunTy>(getCalledValue());
+ }
+
+ /// setCalledFunction - Set the callee to the specified value.
+ ///
+ void setCalledFunction(Value *V) {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ *getCallee() = V;
+ }
+
+ /// isCallee - Determine whether the passed iterator points to the
+ /// callee operand's Use.
+ bool isCallee(Value::const_user_iterator UI) const {
+ return isCallee(&UI.getUse());
+ }
+
+ /// Determine whether this Use is the callee operand's Use.
+ bool isCallee(const Use *U) const { return getCallee() == U; }
+
+ ValTy *getArgument(unsigned ArgNo) const {
+ assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+ return *(arg_begin() + ArgNo);
+ }
+
+ void setArgument(unsigned ArgNo, Value* newVal) {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+ getInstruction()->setOperand(ArgNo, newVal);
+ }
+
+ /// Given a value use iterator, returns the argument that corresponds to it.
+ /// Iterator must actually correspond to an argument.
+ unsigned getArgumentNo(Value::const_user_iterator I) const {
+ return getArgumentNo(&I.getUse());
+ }
+
+ /// Given a use for an argument, get the argument number that corresponds to
+ /// it.
+ unsigned getArgumentNo(const Use *U) const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(arg_begin() <= U && U < arg_end()
+ && "Argument # out of range!");
+ return U - arg_begin();
+ }
+
+ /// arg_iterator - The type of iterator to use when looping over actual
+ /// arguments at this call site.
+ typedef IterTy arg_iterator;
+
+ /// arg_begin/arg_end - Return iterators corresponding to the actual argument
+ /// list for a call site.
+ IterTy arg_begin() const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ // Skip non-arguments
+ return (*this)->op_begin();
+ }
+
+ IterTy arg_end() const { return (*this)->op_end() - getArgumentEndOffset(); }
+ bool arg_empty() const { return arg_end() == arg_begin(); }
+ unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); }
+
+ /// getType - Return the type of the instruction that generated this call site
+ ///
+ Type *getType() const { return (*this)->getType(); }
+
+ /// getCaller - Return the caller function for this call site
+ ///
+ FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
+
+#define CALLSITE_DELEGATE_GETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ return isCall() \
+ ? cast<CallInst>(II)->METHOD \
+ : cast<InvokeInst>(II)->METHOD
+
+#define CALLSITE_DELEGATE_SETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ if (isCall()) \
+ cast<CallInst>(II)->METHOD; \
+ else \
+ cast<InvokeInst>(II)->METHOD
+
+ /// getCallingConv/setCallingConv - get or set the calling convention of the
+ /// call.
+ CallingConv::ID getCallingConv() const {
+ CALLSITE_DELEGATE_GETTER(getCallingConv());
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
+ }
+
+ /// getAttributes/setAttributes - get or set the parameter attributes of
+ /// the call.
+ const AttributeSet &getAttributes() const {
+ CALLSITE_DELEGATE_GETTER(getAttributes());
+ }
+ void setAttributes(const AttributeSet &PAL) {
+ CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
+ }
+
+ /// \brief Return true if this function has the given attribute.
+ bool hasFnAttr(Attribute::AttrKind A) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
+ }
+
+ /// \brief Return true if the call or the callee has the given attribute.
+ bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+ CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A));
+ }
+
+ /// @brief Extract the alignment for a call or parameter (0=unknown).
+ uint16_t getParamAlignment(uint16_t i) const {
+ CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
+ }
+
+ /// \brief Return true if the call should not be treated as a call to a
+ /// builtin.
+ bool isNoBuiltin() const {
+ CALLSITE_DELEGATE_GETTER(isNoBuiltin());
+ }
+
+ /// @brief Return true if the call should not be inlined.
+ bool isNoInline() const {
+ CALLSITE_DELEGATE_GETTER(isNoInline());
+ }
+ void setIsNoInline(bool Value = true) {
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
+ }
+
+ /// @brief Determine if the call does not access memory.
+ bool doesNotAccessMemory() const {
+ CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
+ }
+ void setDoesNotAccessMemory() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
+ }
+
+ /// @brief Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
+ }
+ void setOnlyReadsMemory() {
+ CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
+ }
+
+ /// @brief Determine if the call cannot return.
+ bool doesNotReturn() const {
+ CALLSITE_DELEGATE_GETTER(doesNotReturn());
+ }
+ void setDoesNotReturn() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
+ }
+
+ /// @brief Determine if the call cannot unwind.
+ bool doesNotThrow() const {
+ CALLSITE_DELEGATE_GETTER(doesNotThrow());
+ }
+ void setDoesNotThrow() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
+ }
+
+#undef CALLSITE_DELEGATE_GETTER
+#undef CALLSITE_DELEGATE_SETTER
+
+ /// @brief Determine whether this argument is not captured.
+ bool doesNotCapture(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::NoCapture);
+ }
+
+ /// @brief Determine whether this argument is passed by value.
+ bool isByValArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ }
+
+ /// @brief Determine whether this argument is passed in an alloca.
+ bool isInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::InAlloca);
+ }
+
+ /// @brief Determine whether this argument is passed by value or in an alloca.
+ bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ByVal) ||
+ paramHasAttr(ArgNo + 1, Attribute::InAlloca);
+ }
+
+ /// @brief Determine if there are is an inalloca argument. Only the last
+ /// argument can have the inalloca attribute.
+ bool hasInAllocaArgument() const {
+ return paramHasAttr(arg_size(), Attribute::InAlloca);
+ }
+
+ bool doesNotAccessMemory(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ReadNone);
+ }
+
+ bool onlyReadsMemory(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ReadOnly) ||
+ paramHasAttr(ArgNo + 1, Attribute::ReadNone);
+ }
+
+ /// hasArgument - Returns true if this CallSite passes the given Value* as an
+ /// argument to the called function.
+ bool hasArgument(const Value *Arg) const {
+ for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E;
+ ++AI)
+ if (AI->get() == Arg)
+ return true;
+ return false;
+ }
+
+private:
+ unsigned getArgumentEndOffset() const {
+ if (isCall())
+ return 1; // Skip Callee
+ else
+ return 3; // Skip BB, BB, Callee
+ }
+
+ IterTy getCallee() const {
+ if (isCall()) // Skip Callee
+ return cast<CallInst>(getInstruction())->op_end() - 1;
+ else // Skip BB, BB, Callee
+ return cast<InvokeInst>(getInstruction())->op_end() - 3;
+ }
+};
+
+class CallSite : public CallSiteBase<Function, Value, User, Instruction,
+ CallInst, InvokeInst, User::op_iterator> {
+ typedef CallSiteBase<Function, Value, User, Instruction,
+ CallInst, InvokeInst, User::op_iterator> Base;
+public:
+ CallSite() {}
+ CallSite(Base B) : Base(B) {}
+ CallSite(Value* V) : Base(V) {}
+ CallSite(CallInst *CI) : Base(CI) {}
+ CallSite(InvokeInst *II) : Base(II) {}
+ CallSite(Instruction *II) : Base(II) {}
+
+ bool operator==(const CallSite &CS) const { return I == CS.I; }
+ bool operator!=(const CallSite &CS) const { return I != CS.I; }
+ bool operator<(const CallSite &CS) const {
+ return getInstruction() < CS.getInstruction();
+ }
+
+private:
+ User::op_iterator getCallee() const;
+};
+
+/// ImmutableCallSite - establish a view to a call site for examination
+class ImmutableCallSite : public CallSiteBase<> {
+ typedef CallSiteBase<> Base;
+public:
+ ImmutableCallSite(const Value* V) : Base(V) {}
+ ImmutableCallSite(const CallInst *CI) : Base(CI) {}
+ ImmutableCallSite(const InvokeInst *II) : Base(II) {}
+ ImmutableCallSite(const Instruction *II) : Base(II) {}
+ ImmutableCallSite(CallSite CS) : Base(CS.getInstruction()) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index 4437af2..af44e8a 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -58,6 +58,14 @@ namespace CallingConv {
// stackmap and patchpoint intrinsics).
AnyReg = 13,
+ // PreserveMost - Calling convention for runtime calls that preserves most
+ // registers.
+ PreserveMost = 14,
+
+ // PreserveAll - Calling convention for runtime calls that preserves
+ // (almost) all registers.
+ PreserveAll = 15,
+
// Target - This is the start of the target-specific calling conventions,
// e.g. fastcall and thiscall on X86.
FirstTargetCC = 64,
@@ -129,7 +137,13 @@ namespace CallingConv {
/// convention differs from the more common \c X86_64_SysV convention
/// in a number of ways, most notably in that XMM registers used to pass
/// arguments are shadowed by GPRs, and vice versa.
- X86_64_Win64 = 79
+ X86_64_Win64 = 79,
+
+ /// \brief The calling convention used for __cdecl methods on win32.
+ /// Differs from the C calling convention only in that the order of the
+ /// first parameter and the sret parameter are swapped.
+ X86_CDeclMethod = 80
+
};
} // End CallingConv namespace
diff --git a/include/llvm/IR/Constant.h b/include/llvm/IR/Constant.h
index 26bad1d..f03e3dd 100644
--- a/include/llvm/IR/Constant.h
+++ b/include/llvm/IR/Constant.h
@@ -41,8 +41,8 @@ namespace llvm {
class Constant : public User {
void operator=(const Constant &) LLVM_DELETED_FUNCTION;
Constant(const Constant &) LLVM_DELETED_FUNCTION;
- virtual void anchor();
-
+ void anchor() override;
+
protected:
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
: User(ty, vty, Ops, NumOps) {}
diff --git a/include/llvm/IR/ConstantFolder.h b/include/llvm/IR/ConstantFolder.h
new file mode 100644
index 0000000..86668f7
--- /dev/null
+++ b/include/llvm/IR/ConstantFolder.h
@@ -0,0 +1,238 @@
+//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ConstantFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for creating constants
+// with minimal folding. For general constant creation and folding,
+// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTFOLDER_H
+#define LLVM_IR_CONSTANTFOLDER_H
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+/// ConstantFolder - Create constants with minimum, target independent, folding.
+class ConstantFolder {
+public:
+ explicit ConstantFolder() {}
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFAdd(LHS, RHS);
+ }
+ Constant *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFSub(LHS, RHS);
+ }
+ Constant *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFMul(LHS, RHS);
+ }
+ Constant *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getUDiv(LHS, RHS, isExact);
+ }
+ Constant *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getSDiv(LHS, RHS, isExact);
+ }
+ Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFDiv(LHS, RHS);
+ }
+ Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getURem(LHS, RHS);
+ }
+ Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getSRem(LHS, RHS);
+ }
+ Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFRem(LHS, RHS);
+ }
+ Constant *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getLShr(LHS, RHS, isExact);
+ }
+ Constant *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getAShr(LHS, RHS, isExact);
+ }
+ Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getAnd(LHS, RHS);
+ }
+ Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getOr(LHS, RHS);
+ }
+ Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getXor(LHS, RHS);
+ }
+
+ Constant *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::get(Opc, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getNeg(C, HasNUW, HasNSW);
+ }
+ Constant *CreateFNeg(Constant *C) const {
+ return ConstantExpr::getFNeg(C);
+ }
+ Constant *CreateNot(Constant *C) const {
+ return ConstantExpr::getNot(C);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateGetElementPtr(Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(C, IdxList);
+ }
+ Constant *CreateGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(C, Idx);
+ }
+ Constant *CreateGetElementPtr(Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(C, IdxList);
+ }
+
+ Constant *CreateInBoundsGetElementPtr(Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(C, IdxList);
+ }
+ Constant *CreateInBoundsGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(C, Idx);
+ }
+ Constant *CreateInBoundsGetElementPtr(Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(C, IdxList);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const {
+ return ConstantExpr::getCast(Op, C, DestTy);
+ }
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getPointerCast(C, DestTy);
+ }
+ Constant *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const {
+ return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
+ }
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getFPCast(C, DestTy);
+ }
+
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getZExtOrBitCast(C, DestTy);
+ }
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getSExtOrBitCast(C, DestTy);
+ }
+
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getTruncOrBitCast(C, DestTy);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return ConstantExpr::getCompare(P, LHS, RHS);
+ }
+ Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return ConstantExpr::getCompare(P, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+ return ConstantExpr::getSelect(C, True, False);
+ }
+
+ Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ return ConstantExpr::getExtractElement(Vec, Idx);
+ }
+
+ Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
+ return ConstantExpr::getInsertElement(Vec, NewElt, Idx);
+ }
+
+ Constant *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
+ return ConstantExpr::getShuffleVector(V1, V2, Mask);
+ }
+
+ Constant *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const {
+ return ConstantExpr::getExtractValue(Agg, IdxList);
+ }
+
+ Constant *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const {
+ return ConstantExpr::getInsertValue(Agg, Val, IdxList);
+ }
+};
+
+}
+
+#endif
diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h
new file mode 100644
index 0000000..86988de
--- /dev/null
+++ b/include/llvm/IR/ConstantRange.h
@@ -0,0 +1,272 @@
+//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Represent a range of possible values that may occur when the program is run
+// for an integral value. This keeps track of a lower and upper bound for the
+// constant, which MAY wrap around the end of the numeric range. To do this, it
+// keeps track of a [lower, upper) bound, which specifies an interval just like
+// STL iterators. When used with boolean values, the following are important
+// ranges: :
+//
+// [F, F) = {} = Empty set
+// [T, F) = {T}
+// [F, T) = {F}
+// [T, T) = {F, T} = Full set
+//
+// The other integral ranges use min/max values for special range values. For
+// example, for 8-bit types, it uses:
+// [0, 0) = {} = Empty set
+// [255, 255) = {0..255} = Full Set
+//
+// Note that ConstantRange can be used to represent either signed or
+// unsigned ranges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CONSTANTRANGE_H
+#define LLVM_SUPPORT_CONSTANTRANGE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// ConstantRange - This class represents an range of values.
+///
+class ConstantRange {
+ APInt Lower, Upper;
+
+ // If we have move semantics, pass APInts by value and move them into place.
+ typedef APInt APIntMoveTy;
+
+public:
+ /// Initialize a full (the default) or empty set for the specified bit width.
+ ///
+ explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true);
+
+ /// Initialize a range to hold the single specified value.
+ ///
+ ConstantRange(APIntMoveTy Value);
+
+ /// @brief Initialize a range of values explicitly. This will assert out if
+ /// Lower==Upper and Lower != Min or Max value for its type. It will also
+ /// assert out if the two APInt's are not the same bit width.
+ ConstantRange(APIntMoveTy Lower, APIntMoveTy Upper);
+
+ /// makeICmpRegion - Produce the smallest range that contains all values that
+ /// might satisfy the comparison specified by Pred when compared to any value
+ /// contained within Other.
+ ///
+ /// Solves for range X in 'for all x in X, there exists a y in Y such that
+ /// icmp op x, y is true'. Every value that might make the comparison true
+ /// is included in the resulting range.
+ static ConstantRange makeICmpRegion(unsigned Pred,
+ const ConstantRange &Other);
+
+ /// getLower - Return the lower value for this range...
+ ///
+ const APInt &getLower() const { return Lower; }
+
+ /// getUpper - Return the upper value for this range...
+ ///
+ const APInt &getUpper() const { return Upper; }
+
+ /// getBitWidth - get the bit width of this ConstantRange
+ ///
+ uint32_t getBitWidth() const { return Lower.getBitWidth(); }
+
+ /// isFullSet - Return true if this set contains all of the elements possible
+ /// for this data-type
+ ///
+ bool isFullSet() const;
+
+ /// isEmptySet - Return true if this set contains no members.
+ ///
+ bool isEmptySet() const;
+
+ /// isWrappedSet - Return true if this set wraps around the top of the range,
+ /// for example: [100, 8)
+ ///
+ bool isWrappedSet() const;
+
+ /// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
+ /// its bitwidth, for example: i8 [120, 140).
+ ///
+ bool isSignWrappedSet() const;
+
+ /// contains - Return true if the specified value is in the set.
+ ///
+ bool contains(const APInt &Val) const;
+
+ /// contains - Return true if the other range is a subset of this one.
+ ///
+ bool contains(const ConstantRange &CR) const;
+
+ /// getSingleElement - If this set contains a single element, return it,
+ /// otherwise return null.
+ ///
+ const APInt *getSingleElement() const {
+ if (Upper == Lower + 1)
+ return &Lower;
+ return 0;
+ }
+
+ /// isSingleElement - Return true if this set contains exactly one member.
+ ///
+ bool isSingleElement() const { return getSingleElement() != 0; }
+
+ /// getSetSize - Return the number of elements in this set.
+ ///
+ APInt getSetSize() const;
+
+ /// getUnsignedMax - Return the largest unsigned value contained in the
+ /// ConstantRange.
+ ///
+ APInt getUnsignedMax() const;
+
+ /// getUnsignedMin - Return the smallest unsigned value contained in the
+ /// ConstantRange.
+ ///
+ APInt getUnsignedMin() const;
+
+ /// getSignedMax - Return the largest signed value contained in the
+ /// ConstantRange.
+ ///
+ APInt getSignedMax() const;
+
+ /// getSignedMin - Return the smallest signed value contained in the
+ /// ConstantRange.
+ ///
+ APInt getSignedMin() const;
+
+ /// operator== - Return true if this range is equal to another range.
+ ///
+ bool operator==(const ConstantRange &CR) const {
+ return Lower == CR.Lower && Upper == CR.Upper;
+ }
+ bool operator!=(const ConstantRange &CR) const {
+ return !operator==(CR);
+ }
+
+ /// subtract - Subtract the specified constant from the endpoints of this
+ /// constant range.
+ ConstantRange subtract(const APInt &CI) const;
+
+ /// \brief Subtract the specified range from this range (aka relative
+ /// complement of the sets).
+ ConstantRange difference(const ConstantRange &CR) const;
+
+ /// intersectWith - Return the range that results from the intersection of
+ /// this range with another range. The resultant range is guaranteed to
+ /// include all elements contained in both input ranges, and to have the
+ /// smallest possible set size that does so. Because there may be two
+ /// intersections with the same set size, A.intersectWith(B) might not
+ /// be equal to B.intersectWith(A).
+ ///
+ ConstantRange intersectWith(const ConstantRange &CR) const;
+
+ /// unionWith - Return the range that results from the union of this range
+ /// with another range. The resultant range is guaranteed to include the
+ /// elements of both sets, but may contain more. For example, [3, 9) union
+ /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
+ /// in either set before.
+ ///
+ ConstantRange unionWith(const ConstantRange &CR) const;
+
+ /// zeroExtend - Return a new range in the specified integer type, which must
+ /// be strictly larger than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// zero extended to BitWidth.
+ ConstantRange zeroExtend(uint32_t BitWidth) const;
+
+ /// signExtend - Return a new range in the specified integer type, which must
+ /// be strictly larger than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// sign extended to BitWidth.
+ ConstantRange signExtend(uint32_t BitWidth) const;
+
+ /// truncate - Return a new range in the specified integer type, which must be
+ /// strictly smaller than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// truncated to the specified type.
+ ConstantRange truncate(uint32_t BitWidth) const;
+
+ /// zextOrTrunc - make this range have the bit width given by \p BitWidth. The
+ /// value is zero extended, truncated, or left alone to make it that width.
+ ConstantRange zextOrTrunc(uint32_t BitWidth) const;
+
+ /// sextOrTrunc - make this range have the bit width given by \p BitWidth. The
+ /// value is sign extended, truncated, or left alone to make it that width.
+ ConstantRange sextOrTrunc(uint32_t BitWidth) const;
+
+ /// add - Return a new range representing the possible values resulting
+ /// from an addition of a value in this range and a value in \p Other.
+ ConstantRange add(const ConstantRange &Other) const;
+
+ /// sub - Return a new range representing the possible values resulting
+ /// from a subtraction of a value in this range and a value in \p Other.
+ ConstantRange sub(const ConstantRange &Other) const;
+
+ /// multiply - Return a new range representing the possible values resulting
+ /// from a multiplication of a value in this range and a value in \p Other.
+ /// TODO: This isn't fully implemented yet.
+ ConstantRange multiply(const ConstantRange &Other) const;
+
+ /// smax - Return a new range representing the possible values resulting
+ /// from a signed maximum of a value in this range and a value in \p Other.
+ ConstantRange smax(const ConstantRange &Other) const;
+
+ /// umax - Return a new range representing the possible values resulting
+ /// from an unsigned maximum of a value in this range and a value in \p Other.
+ ConstantRange umax(const ConstantRange &Other) const;
+
+ /// udiv - Return a new range representing the possible values resulting
+ /// from an unsigned division of a value in this range and a value in
+ /// \p Other.
+ ConstantRange udiv(const ConstantRange &Other) const;
+
+ /// binaryAnd - return a new range representing the possible values resulting
+ /// from a binary-and of a value in this range by a value in \p Other.
+ ConstantRange binaryAnd(const ConstantRange &Other) const;
+
+ /// binaryOr - return a new range representing the possible values resulting
+ /// from a binary-or of a value in this range by a value in \p Other.
+ ConstantRange binaryOr(const ConstantRange &Other) const;
+
+ /// shl - Return a new range representing the possible values resulting
+ /// from a left shift of a value in this range by a value in \p Other.
+ /// TODO: This isn't fully implemented yet.
+ ConstantRange shl(const ConstantRange &Other) const;
+
+ /// lshr - Return a new range representing the possible values resulting
+ /// from a logical right shift of a value in this range and a value in
+ /// \p Other.
+ ConstantRange lshr(const ConstantRange &Other) const;
+
+ /// inverse - Return a new range that is the logical not of the current set.
+ ///
+ ConstantRange inverse() const;
+
+ /// print - Print out the bounds to a stream...
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// dump - Allow printing from a debugger easily...
+ ///
+ void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
+ CR.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index dac20c9..ed7a70f 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -25,8 +25,8 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Constant.h"
-#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/OperandTraits.h"
namespace llvm {
@@ -49,7 +49,7 @@ struct ConvertConstantType;
/// represents both boolean and integral constants.
/// @brief Class for constant integers.
class ConstantInt : public Constant {
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
ConstantInt(const ConstantInt &) LLVM_DELETED_FUNCTION;
ConstantInt(IntegerType *Ty, const APInt& V);
@@ -231,7 +231,7 @@ public:
///
class ConstantFP : public Constant {
APFloat Val;
- virtual void anchor();
+ void anchor() override;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
ConstantFP(const ConstantFP &) LLVM_DELETED_FUNCTION;
friend class LLVMContextImpl;
@@ -255,8 +255,8 @@ public:
static Constant *get(Type* Ty, double V);
static Constant *get(Type* Ty, StringRef Str);
static ConstantFP *get(LLVMContext &Context, const APFloat &V);
- static ConstantFP *getNegativeZero(Type* Ty);
- static ConstantFP *getInfinity(Type *Ty, bool Negative = false);
+ static Constant *getNegativeZero(Type *Ty);
+ static Constant *getInfinity(Type *Ty, bool Negative = false);
/// isValueValidForType - return true if Ty is big enough to represent V.
static bool isValueValidForType(Type *Ty, const APFloat &V);
@@ -308,7 +308,7 @@ protected:
public:
static ConstantAggregateZero *get(Type *Ty);
- virtual void destroyConstant();
+ void destroyConstant() override;
/// getSequentialElement - If this CAZ has array or vector type, return a zero
/// with the right element type.
@@ -356,8 +356,8 @@ public:
return cast<ArrayType>(Value::getType());
}
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void destroyConstant() override;
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
@@ -414,8 +414,8 @@ public:
return cast<StructType>(Value::getType());
}
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void destroyConstant() override;
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
@@ -461,8 +461,8 @@ public:
/// elements have the same value, return that value. Otherwise return NULL.
Constant *getSplatValue() const;
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void destroyConstant() override;
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
@@ -497,7 +497,7 @@ public:
/// get() - Static factory methods - Return objects of the specified value
static ConstantPointerNull *get(PointerType *T);
- virtual void destroyConstant();
+ void destroyConstant() override;
/// getType - Specialize the getType() method to always return an PointerType,
/// which reduces the amount of casting needed in parts of the compiler.
@@ -624,7 +624,7 @@ public:
/// host endianness of the data elements.
StringRef getRawDataValues() const;
- virtual void destroyConstant();
+ void destroyConstant() override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
@@ -645,7 +645,7 @@ private:
class ConstantDataArray : public ConstantDataSequential {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
ConstantDataArray(const ConstantDataArray &) LLVM_DELETED_FUNCTION;
- virtual void anchor();
+ void anchor() override;
friend class ConstantDataSequential;
explicit ConstantDataArray(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
@@ -697,7 +697,7 @@ public:
class ConstantDataVector : public ConstantDataSequential {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
ConstantDataVector(const ConstantDataVector &) LLVM_DELETED_FUNCTION;
- virtual void anchor();
+ void anchor() override;
friend class ConstantDataSequential;
explicit ConstantDataVector(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
@@ -757,14 +757,20 @@ public:
/// block must be embedded into a function.
static BlockAddress *get(BasicBlock *BB);
+ /// \brief Lookup an existing \c BlockAddress constant for the given
+ /// BasicBlock.
+ ///
+ /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress.
+ static BlockAddress *lookup(const BasicBlock *BB);
+
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
Function *getFunction() const { return (Function*)Op<0>().get(); }
BasicBlock *getBasicBlock() const { return (BasicBlock*)Op<1>().get(); }
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void destroyConstant() override;
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *V) {
@@ -1093,8 +1099,8 @@ public:
/// would make it harder to remove ConstantExprs altogether.
Instruction *getAsInstruction();
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void destroyConstant() override;
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *V) {
@@ -1158,7 +1164,7 @@ public:
/// index.
UndefValue *getElementValue(unsigned Idx) const;
- virtual void destroyConstant();
+ void destroyConstant() override;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h
new file mode 100644
index 0000000..7d87a69
--- /dev/null
+++ b/include/llvm/IR/DIBuilder.h
@@ -0,0 +1,705 @@
+//===- DIBuilder.h - Debug Information Builder ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DIBuilder that is useful for creating debugging
+// information entries in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIBUILDER_H
+#define LLVM_IR_DIBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class Function;
+ class Module;
+ class Value;
+ class LLVMContext;
+ class MDNode;
+ class StringRef;
+ class DIBasicType;
+ class DICompileUnit;
+ class DICompositeType;
+ class DIDerivedType;
+ class DIDescriptor;
+ class DIFile;
+ class DIEnumerator;
+ class DIType;
+ class DIArray;
+ class DIGlobalVariable;
+ class DIImportedEntity;
+ class DINameSpace;
+ class DIVariable;
+ class DISubrange;
+ class DILexicalBlockFile;
+ class DILexicalBlock;
+ class DIScope;
+ class DISubprogram;
+ class DITemplateTypeParameter;
+ class DITemplateValueParameter;
+ class DIObjCProperty;
+
+ class DIBuilder {
+ private:
+ Module &M;
+ LLVMContext &VMContext;
+
+ MDNode *TempEnumTypes;
+ MDNode *TempRetainTypes;
+ MDNode *TempSubprograms;
+ MDNode *TempGVs;
+ MDNode *TempImportedModules;
+
+ Function *DeclareFn; // llvm.dbg.declare
+ Function *ValueFn; // llvm.dbg.value
+
+ SmallVector<Value *, 4> AllEnumTypes;
+ /// Use TrackingVH to collect RetainTypes, since they can be updated
+ /// later on.
+ SmallVector<TrackingVH<MDNode>, 4> AllRetainTypes;
+ SmallVector<Value *, 4> AllSubprograms;
+ SmallVector<Value *, 4> AllGVs;
+ SmallVector<TrackingVH<MDNode>, 4> AllImportedModules;
+
+ // Private use for multiple types of template parameters.
+ DITemplateValueParameter
+ createTemplateValueParameter(unsigned Tag, DIDescriptor Scope,
+ StringRef Name, DIType Ty, Value *Val,
+ MDNode *File = 0, unsigned LineNo = 0,
+ unsigned ColumnNo = 0);
+
+ DIBuilder(const DIBuilder &) LLVM_DELETED_FUNCTION;
+ void operator=(const DIBuilder &) LLVM_DELETED_FUNCTION;
+
+ public:
+ explicit DIBuilder(Module &M);
+ enum ComplexAddrKind { OpPlus=1, OpDeref };
+ enum DebugEmissionKind { FullDebug=1, LineTablesOnly };
+
+ /// finalize - Construct any deferred debug info descriptors.
+ void finalize();
+
+ /// createCompileUnit - A CompileUnit provides an anchor for all debugging
+ /// information generated during this instance of compilation.
+ /// @param Lang Source programming language, eg. dwarf::DW_LANG_C99
+ /// @param File File name
+ /// @param Dir Directory
+ /// @param Producer Identify the producer of debugging information and code.
+ /// Usually this is a compiler version string.
+ /// @param isOptimized A boolean flag which indicates whether optimization
+ /// is ON or not.
+ /// @param Flags This string lists command line options. This string is
+ /// directly embedded in debug info output which may be used
+ /// by a tool analyzing generated debugging information.
+ /// @param RV This indicates runtime version for languages like
+ /// Objective-C.
+ /// @param SplitName The name of the file that we'll split debug info out
+ /// into.
+ DICompileUnit createCompileUnit(unsigned Lang, StringRef File,
+ StringRef Dir, StringRef Producer,
+ bool isOptimized, StringRef Flags,
+ unsigned RV,
+ StringRef SplitName = StringRef(),
+ DebugEmissionKind Kind = FullDebug);
+
+ /// createFile - Create a file descriptor to hold debugging information
+ /// for a file.
+ DIFile createFile(StringRef Filename, StringRef Directory);
+
+ /// createEnumerator - Create a single enumerator value.
+ DIEnumerator createEnumerator(StringRef Name, int64_t Val);
+
+ /// \brief Create a DWARF unspecified type.
+ DIBasicType createUnspecifiedType(StringRef Name);
+
+ /// \brief Create C++11 nullptr type.
+ DIBasicType createNullPtrType();
+
+ /// createBasicType - Create debugging information entry for a basic
+ /// type.
+ /// @param Name Type name.
+ /// @param SizeInBits Size of the type.
+ /// @param AlignInBits Type alignment.
+ /// @param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float.
+ DIBasicType createBasicType(StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding);
+
+ /// createQualifiedType - Create debugging information entry for a qualified
+ /// type, e.g. 'const int'.
+ /// @param Tag Tag identifing type, e.g. dwarf::TAG_volatile_type
+ /// @param FromTy Base Type.
+ DIDerivedType createQualifiedType(unsigned Tag, DIType FromTy);
+
+ /// createPointerType - Create debugging information entry for a pointer.
+ /// @param PointeeTy Type pointed by this pointer.
+ /// @param SizeInBits Size.
+ /// @param AlignInBits Alignment. (optional)
+ /// @param Name Pointer type name. (optional)
+ DIDerivedType
+ createPointerType(DIType PointeeTy, uint64_t SizeInBits,
+ uint64_t AlignInBits = 0, StringRef Name = StringRef());
+
+ /// \brief Create debugging information entry for a pointer to member.
+ /// @param PointeeTy Type pointed to by this pointer.
+ /// @param Class Type for which this pointer points to members of.
+ DIDerivedType createMemberPointerType(DIType PointeeTy, DIType Class);
+
+ /// createReferenceType - Create debugging information entry for a c++
+ /// style reference or rvalue reference type.
+ DIDerivedType createReferenceType(unsigned Tag, DIType RTy);
+
+ /// createTypedef - Create debugging information entry for a typedef.
+ /// @param Ty Original type.
+ /// @param Name Typedef name.
+ /// @param File File where this type is defined.
+ /// @param LineNo Line number.
+ /// @param Context The surrounding context for the typedef.
+ DIDerivedType createTypedef(DIType Ty, StringRef Name, DIFile File,
+ unsigned LineNo, DIDescriptor Context);
+
+ /// createFriend - Create debugging information entry for a 'friend'.
+ DIDerivedType createFriend(DIType Ty, DIType FriendTy);
+
+ /// createInheritance - Create debugging information entry to establish
+ /// inheritance relationship between two types.
+ /// @param Ty Original type.
+ /// @param BaseTy Base type. Ty is inherits from base.
+ /// @param BaseOffset Base offset.
+ /// @param Flags Flags to describe inheritance attribute,
+ /// e.g. private
+ DIDerivedType createInheritance(DIType Ty, DIType BaseTy,
+ uint64_t BaseOffset, unsigned Flags);
+
+ /// createMemberType - Create debugging information entry for a member.
+ /// @param Scope Member scope.
+ /// @param Name Member name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Ty Parent type.
+ DIDerivedType
+ createMemberType(DIDescriptor Scope, StringRef Name, DIFile File,
+ unsigned LineNo, uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, DIType Ty);
+
+ /// createStaticMemberType - Create debugging information entry for a
+ /// C++ static data member.
+ /// @param Scope Member scope.
+ /// @param Name Member name.
+ /// @param File File where this member is declared.
+ /// @param LineNo Line number.
+ /// @param Ty Type of the static member.
+ /// @param Flags Flags to encode member attribute, e.g. private.
+ /// @param Val Const initializer of the member.
+ DIDerivedType
+ createStaticMemberType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNo, DIType Ty,
+ unsigned Flags, llvm::Value *Val);
+
+ /// createObjCIVar - Create debugging information entry for Objective-C
+ /// instance variable.
+ /// @param Name Member name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Ty Parent type.
+ /// @param PropertyName Name of the Objective C property associated with
+ /// this ivar.
+ /// @param PropertyGetterName Name of the Objective C property getter
+ /// selector.
+ /// @param PropertySetterName Name of the Objective C property setter
+ /// selector.
+ /// @param PropertyAttributes Objective C property attributes.
+ DIDerivedType createObjCIVar(StringRef Name, DIFile File,
+ unsigned LineNo, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType Ty,
+ StringRef PropertyName = StringRef(),
+ StringRef PropertyGetterName = StringRef(),
+ StringRef PropertySetterName = StringRef(),
+ unsigned PropertyAttributes = 0);
+
+ /// createObjCIVar - Create debugging information entry for Objective-C
+ /// instance variable.
+ /// @param Name Member name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Ty Parent type.
+ /// @param PropertyNode Property associated with this ivar.
+ DIDerivedType createObjCIVar(StringRef Name, DIFile File,
+ unsigned LineNo, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType Ty,
+ MDNode *PropertyNode);
+
+ /// createObjCProperty - Create debugging information entry for Objective-C
+ /// property.
+ /// @param Name Property name.
+ /// @param File File where this property is defined.
+ /// @param LineNumber Line number.
+ /// @param GetterName Name of the Objective C property getter selector.
+ /// @param SetterName Name of the Objective C property setter selector.
+ /// @param PropertyAttributes Objective C property attributes.
+ /// @param Ty Type.
+ DIObjCProperty createObjCProperty(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes,
+ DIType Ty);
+
+ /// createClassType - Create debugging information entry for a class.
+ /// @param Scope Scope in which this class is defined.
+ /// @param Name class name.
+ /// @param File File where this member is defined.
+ /// @param LineNumber Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements class members.
+ /// @param VTableHolder Debug info of the base class that contains vtable
+ /// for this type. This is used in
+ /// DW_AT_containing_type. See DWARF documentation
+ /// for more info.
+ /// @param TemplateParms Template type parameters.
+ /// @param UniqueIdentifier A unique identifier for the class.
+ DICompositeType createClassType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType DerivedFrom, DIArray Elements,
+ DIType VTableHolder = DIType(),
+ MDNode *TemplateParms = 0,
+ StringRef UniqueIdentifier = StringRef());
+
+ /// createStructType - Create debugging information entry for a struct.
+ /// @param Scope Scope in which this struct is defined.
+ /// @param Name Struct name.
+ /// @param File File where this member is defined.
+ /// @param LineNumber Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements Struct elements.
+ /// @param RunTimeLang Optional parameter, Objective-C runtime version.
+ /// @param UniqueIdentifier A unique identifier for the struct.
+ DICompositeType createStructType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ unsigned Flags, DIType DerivedFrom,
+ DIArray Elements, unsigned RunTimeLang = 0,
+ DIType VTableHolder = DIType(),
+ StringRef UniqueIdentifier = StringRef());
+
+ /// createUnionType - Create debugging information entry for an union.
+ /// @param Scope Scope in which this union is defined.
+ /// @param Name Union name.
+ /// @param File File where this member is defined.
+ /// @param LineNumber Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Elements Union elements.
+ /// @param RunTimeLang Optional parameter, Objective-C runtime version.
+ /// @param UniqueIdentifier A unique identifier for the union.
+ DICompositeType createUnionType(
+ DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags,
+ DIArray Elements, unsigned RunTimeLang = 0,
+ StringRef UniqueIdentifier = StringRef());
+
+ /// createTemplateTypeParameter - Create debugging information for template
+ /// type parameter.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Type parameter name.
+ /// @param Ty Parameter type.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateTypeParameter
+ createTemplateTypeParameter(DIDescriptor Scope, StringRef Name, DIType Ty,
+ MDNode *File = 0, unsigned LineNo = 0,
+ unsigned ColumnNo = 0);
+
+ /// createTemplateValueParameter - Create debugging information for template
+ /// value parameter.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Value parameter name.
+ /// @param Ty Parameter type.
+ /// @param Val Constant parameter value.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateValueParameter
+ createTemplateValueParameter(DIDescriptor Scope, StringRef Name,
+ DIType Ty, Value *Val, MDNode *File = 0,
+ unsigned LineNo = 0, unsigned ColumnNo = 0);
+
+ /// \brief Create debugging information for a template template parameter.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Value parameter name.
+ /// @param Ty Parameter type.
+ /// @param Val The fully qualified name of the template.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateValueParameter
+ createTemplateTemplateParameter(DIDescriptor Scope, StringRef Name,
+ DIType Ty, StringRef Val, MDNode *File = 0,
+ unsigned LineNo = 0, unsigned ColumnNo = 0);
+
+ /// \brief Create debugging information for a template parameter pack.
+ /// @param Scope Scope in which this type is defined.
+ /// @param Name Value parameter name.
+ /// @param Ty Parameter type.
+ /// @param Val An array of types in the pack.
+ /// @param File File where this type parameter is defined.
+ /// @param LineNo Line number.
+ /// @param ColumnNo Column Number.
+ DITemplateValueParameter
+ createTemplateParameterPack(DIDescriptor Scope, StringRef Name,
+ DIType Ty, DIArray Val, MDNode *File = 0,
+ unsigned LineNo = 0, unsigned ColumnNo = 0);
+
+ /// createArrayType - Create debugging information entry for an array.
+ /// @param Size Array size.
+ /// @param AlignInBits Alignment.
+ /// @param Ty Element type.
+ /// @param Subscripts Subscripts.
+ DICompositeType createArrayType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts);
+
+ /// createVectorType - Create debugging information entry for a vector type.
+ /// @param Size Array size.
+ /// @param AlignInBits Alignment.
+ /// @param Ty Element type.
+ /// @param Subscripts Subscripts.
+ DICompositeType createVectorType(uint64_t Size, uint64_t AlignInBits,
+ DIType Ty, DIArray Subscripts);
+
+ /// createEnumerationType - Create debugging information entry for an
+ /// enumeration.
+ /// @param Scope Scope in which this enumeration is defined.
+ /// @param Name Union name.
+ /// @param File File where this member is defined.
+ /// @param LineNumber Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param Elements Enumeration elements.
+ /// @param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
+ /// @param UniqueIdentifier A unique identifier for the enum.
+ DICompositeType createEnumerationType(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNumber, uint64_t SizeInBits,
+ uint64_t AlignInBits, DIArray Elements, DIType UnderlyingType,
+ StringRef UniqueIdentifier = StringRef());
+
+ /// createSubroutineType - Create subroutine type.
+ /// @param File File in which this subroutine is defined.
+ /// @param ParameterTypes An array of subroutine parameter types. This
+ /// includes return type at 0th index.
+ /// @param Flags E.g.: LValueReference.
+ /// These flags are used to emit dwarf attributes.
+ DICompositeType createSubroutineType(DIFile File, DIArray ParameterTypes,
+ unsigned Flags = 0);
+
+ /// createArtificialType - Create a new DIType with "artificial" flag set.
+ DIType createArtificialType(DIType Ty);
+
+ /// createObjectPointerType - Create a new DIType with the "object pointer"
+ /// flag set.
+ DIType createObjectPointerType(DIType Ty);
+
+ /// createForwardDecl - Create a temporary forward-declared type.
+ DICompositeType createForwardDecl(unsigned Tag, StringRef Name,
+ DIDescriptor Scope, DIFile F,
+ unsigned Line, unsigned RuntimeLang = 0,
+ uint64_t SizeInBits = 0,
+ uint64_t AlignInBits = 0,
+ StringRef UniqueIdentifier = StringRef());
+
+ /// retainType - Retain DIType in a module even if it is not referenced
+ /// through debug info anchors.
+ void retainType(DIType T);
+
+ /// createUnspecifiedParameter - Create unspecified type descriptor
+ /// for a subroutine type.
+ DIDescriptor createUnspecifiedParameter();
+
+ /// getOrCreateArray - Get a DIArray, create one if required.
+ DIArray getOrCreateArray(ArrayRef<Value *> Elements);
+
+ /// getOrCreateSubrange - Create a descriptor for a value range. This
+ /// implicitly uniques the values returned.
+ DISubrange getOrCreateSubrange(int64_t Lo, int64_t Count);
+
+ /// createGlobalVariable - Create a new descriptor for the specified global.
+ /// @param Name Name of the variable.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type.
+ /// @param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// @param Val llvm::Value of the variable.
+ DIGlobalVariable
+ createGlobalVariable(StringRef Name, DIFile File, unsigned LineNo,
+ DITypeRef Ty, bool isLocalToUnit, llvm::Value *Val);
+
+ /// \brief Create a new descriptor for the specified global.
+ /// @param Name Name of the variable.
+ /// @param LinkageName Mangled variable name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type.
+ /// @param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// @param Val llvm::Value of the variable.
+ DIGlobalVariable
+ createGlobalVariable(StringRef Name, StringRef LinkageName, DIFile File,
+ unsigned LineNo, DITypeRef Ty, bool isLocalToUnit,
+ llvm::Value *Val);
+
+ /// createStaticVariable - Create a new descriptor for the specified
+ /// variable.
+ /// @param Context Variable scope.
+ /// @param Name Name of the variable.
+ /// @param LinkageName Mangled name of the variable.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type.
+ /// @param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// @param Val llvm::Value of the variable.
+ /// @param Decl Reference to the corresponding declaration.
+ DIGlobalVariable
+ createStaticVariable(DIDescriptor Context, StringRef Name,
+ StringRef LinkageName, DIFile File, unsigned LineNo,
+ DITypeRef Ty, bool isLocalToUnit, llvm::Value *Val,
+ MDNode *Decl = NULL);
+
+
+ /// createLocalVariable - Create a new descriptor for the specified
+ /// local variable.
+ /// @param Tag Dwarf TAG. Usually DW_TAG_auto_variable or
+ /// DW_TAG_arg_variable.
+ /// @param Scope Variable scope.
+ /// @param Name Variable name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type
+ /// @param AlwaysPreserve Boolean. Set to true if debug info for this
+ /// variable should be preserved in optimized build.
+ /// @param Flags Flags, e.g. artificial variable.
+ /// @param ArgNo If this variable is an argument then this argument's
+ /// number. 1 indicates 1st argument.
+ DIVariable createLocalVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name,
+ DIFile File, unsigned LineNo,
+ DITypeRef Ty, bool AlwaysPreserve = false,
+ unsigned Flags = 0,
+ unsigned ArgNo = 0);
+
+
+ /// createComplexVariable - Create a new descriptor for the specified
+ /// variable which has a complex address expression for its address.
+ /// @param Tag Dwarf TAG. Usually DW_TAG_auto_variable or
+ /// DW_TAG_arg_variable.
+ /// @param Scope Variable scope.
+ /// @param Name Variable name.
+ /// @param F File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Variable Type
+ /// @param Addr An array of complex address operations.
+ /// @param ArgNo If this variable is an argument then this argument's
+ /// number. 1 indicates 1st argument.
+ DIVariable createComplexVariable(unsigned Tag, DIDescriptor Scope,
+ StringRef Name, DIFile F, unsigned LineNo,
+ DITypeRef Ty, ArrayRef<Value *> Addr,
+ unsigned ArgNo = 0);
+
+ /// createFunction - Create a new descriptor for the specified subprogram.
+ /// See comments in DISubprogram for descriptions of these fields.
+ /// @param Scope Function scope.
+ /// @param Name Function name.
+ /// @param LinkageName Mangled function name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Function type.
+ /// @param isLocalToUnit True if this function is not externally visible.
+ /// @param isDefinition True if this is a function definition.
+ /// @param ScopeLine Set to the beginning of the scope this starts
+ /// @param Flags e.g. is this function prototyped or not.
+ /// These flags are used to emit dwarf attributes.
+ /// @param isOptimized True if optimization is ON.
+ /// @param Fn llvm::Function pointer.
+ /// @param TParam Function template parameters.
+ DISubprogram createFunction(DIDescriptor Scope, StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DICompositeType Ty, bool isLocalToUnit,
+ bool isDefinition,
+ unsigned ScopeLine,
+ unsigned Flags = 0,
+ bool isOptimized = false,
+ Function *Fn = 0,
+ MDNode *TParam = 0,
+ MDNode *Decl = 0);
+
+ /// FIXME: this is added for dragonegg. Once we update dragonegg
+ /// to call resolve function, this will be removed.
+ DISubprogram createFunction(DIScopeRef Scope, StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DICompositeType Ty, bool isLocalToUnit,
+ bool isDefinition,
+ unsigned ScopeLine,
+ unsigned Flags = 0,
+ bool isOptimized = false,
+ Function *Fn = 0,
+ MDNode *TParam = 0,
+ MDNode *Decl = 0);
+
+ /// createMethod - Create a new descriptor for the specified C++ method.
+ /// See comments in DISubprogram for descriptions of these fields.
+ /// @param Scope Function scope.
+ /// @param Name Function name.
+ /// @param LinkageName Mangled function name.
+ /// @param File File where this variable is defined.
+ /// @param LineNo Line number.
+ /// @param Ty Function type.
+ /// @param isLocalToUnit True if this function is not externally visible..
+ /// @param isDefinition True if this is a function definition.
+ /// @param Virtuality Attributes describing virtualness. e.g. pure
+ /// virtual function.
+ /// @param VTableIndex Index no of this method in virtual table.
+ /// @param VTableHolder Type that holds vtable.
+ /// @param Flags e.g. is this function prototyped or not.
+ /// This flags are used to emit dwarf attributes.
+ /// @param isOptimized True if optimization is ON.
+ /// @param Fn llvm::Function pointer.
+ /// @param TParam Function template parameters.
+ DISubprogram createMethod(DIDescriptor Scope, StringRef Name,
+ StringRef LinkageName,
+ DIFile File, unsigned LineNo,
+ DICompositeType Ty, bool isLocalToUnit,
+ bool isDefinition,
+ unsigned Virtuality = 0, unsigned VTableIndex = 0,
+ DIType VTableHolder = DIType(),
+ unsigned Flags = 0,
+ bool isOptimized = false,
+ Function *Fn = 0,
+ MDNode *TParam = 0);
+
+ /// createNameSpace - This creates new descriptor for a namespace
+ /// with the specified parent scope.
+ /// @param Scope Namespace scope
+ /// @param Name Name of this namespace
+ /// @param File Source file
+ /// @param LineNo Line number
+ DINameSpace createNameSpace(DIDescriptor Scope, StringRef Name,
+ DIFile File, unsigned LineNo);
+
+
+ /// createLexicalBlockFile - This creates a descriptor for a lexical
+ /// block with a new file attached. This merely extends the existing
+ /// lexical block as it crosses a file.
+ /// @param Scope Lexical block.
+ /// @param File Source file.
+ DILexicalBlockFile createLexicalBlockFile(DIDescriptor Scope,
+ DIFile File);
+
+ /// createLexicalBlock - This creates a descriptor for a lexical block
+ /// with the specified parent context.
+ /// @param Scope Parent lexical scope.
+ /// @param File Source file.
+ /// @param Line Line number.
+ /// @param Col Column number.
+ /// @param Discriminator DWARF path discriminator value.
+ DILexicalBlock createLexicalBlock(DIDescriptor Scope, DIFile File,
+ unsigned Line, unsigned Col,
+ unsigned Discriminator);
+
+ /// \brief Create a descriptor for an imported module.
+ /// @param Context The scope this module is imported into
+ /// @param NS The namespace being imported here
+ /// @param Line Line number
+ DIImportedEntity createImportedModule(DIScope Context, DINameSpace NS,
+ unsigned Line,
+ StringRef Name = StringRef());
+
+ /// \brief Create a descriptor for an imported module.
+ /// @param Context The scope this module is imported into
+ /// @param NS An aliased namespace
+ /// @param Line Line number
+ DIImportedEntity createImportedModule(DIScope Context, DIImportedEntity NS,
+ unsigned Line, StringRef Name);
+
+ /// \brief Create a descriptor for an imported function.
+ /// @param Context The scope this module is imported into
+ /// @param Decl The declaration (or definition) of a function, type, or
+ /// variable
+ /// @param Line Line number
+ DIImportedEntity createImportedDeclaration(DIScope Context,
+ DIScope Decl,
+ unsigned Line);
+
+ /// insertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+ /// @param Storage llvm::Value of the variable
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertAtEnd Location for the new intrinsic.
+ Instruction *insertDeclare(llvm::Value *Storage, DIVariable VarInfo,
+ BasicBlock *InsertAtEnd);
+
+ /// insertDeclare - Insert a new llvm.dbg.declare intrinsic call.
+ /// @param Storage llvm::Value of the variable
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertBefore Location for the new intrinsic.
+ Instruction *insertDeclare(llvm::Value *Storage, DIVariable VarInfo,
+ Instruction *InsertBefore);
+
+
+ /// insertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+ /// @param Val llvm::Value of the variable
+ /// @param Offset Offset
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertAtEnd Location for the new intrinsic.
+ Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DIVariable VarInfo,
+ BasicBlock *InsertAtEnd);
+
+ /// insertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
+ /// @param Val llvm::Value of the variable
+ /// @param Offset Offset
+ /// @param VarInfo Variable's debug info descriptor.
+ /// @param InsertBefore Location for the new intrinsic.
+ Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DIVariable VarInfo,
+ Instruction *InsertBefore);
+
+ };
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index 10630a2..59dca63 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -34,6 +34,7 @@ class Type;
class IntegerType;
class StructType;
class StructLayout;
+class Triple;
class GlobalVariable;
class LLVMContext;
template<typename T>
@@ -45,8 +46,7 @@ enum AlignTypeEnum {
INTEGER_ALIGN = 'i', ///< Integer type alignment
VECTOR_ALIGN = 'v', ///< Vector type alignment
FLOAT_ALIGN = 'f', ///< Floating point type alignment
- AGGREGATE_ALIGN = 'a', ///< Aggregate alignment
- STACK_ALIGN = 's' ///< Stack objects alignment
+ AGGREGATE_ALIGN = 'a' ///< Aggregate alignment
};
/// Layout alignment element.
@@ -78,38 +78,52 @@ struct LayoutAlignElem {
struct PointerAlignElem {
unsigned ABIAlign; ///< ABI alignment for this type/bitw
unsigned PrefAlign; ///< Pref. alignment for this type/bitw
- uint32_t TypeBitWidth; ///< Type bit width
+ uint32_t TypeByteWidth; ///< Type byte width
uint32_t AddressSpace; ///< Address space for the pointer type
/// Initializer
- static PointerAlignElem get(uint32_t addr_space, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
+ static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth);
/// Equality predicate
bool operator==(const PointerAlignElem &rhs) const;
};
-
-/// DataLayout - This class holds a parsed version of the target data layout
-/// string in a module and provides methods for querying it. The target data
-/// layout string is specified *by the target* - a frontend generating LLVM IR
-/// is required to generate the right target data for the target being codegen'd
-/// to. If some measure of portability is desired, an empty string may be
-/// specified in the module.
-class DataLayout : public ImmutablePass {
+/// This class holds a parsed version of the target data layout string in a
+/// module and provides methods for querying it. The target data layout string
+/// is specified *by the target* - a frontend generating LLVM IR is required to
+/// generate the right target data for the target being codegen'd to.
+class DataLayout {
private:
bool LittleEndian; ///< Defaults to false
unsigned StackNaturalAlign; ///< Stack natural alignment
+ enum ManglingModeT {
+ MM_None,
+ MM_ELF,
+ MM_MachO,
+ MM_WINCOFF,
+ MM_Mips
+ };
+ ManglingModeT ManglingMode;
+
SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
/// Alignments - Where the primitive type alignment data is stored.
///
- /// @sa init().
+ /// @sa reset().
/// @note Could support multiple size pointer alignments, e.g., 32-bit
/// pointers vs. 64-bit pointers by extending LayoutAlignment, but for now,
/// we don't.
SmallVector<LayoutAlignElem, 16> Alignments;
- DenseMap<unsigned, PointerAlignElem> Pointers;
+ typedef SmallVector<PointerAlignElem, 8> PointersTy;
+ PointersTy Pointers;
+
+ PointersTy::const_iterator
+ findPointerLowerBound(uint32_t AddressSpace) const {
+ return const_cast<DataLayout *>(this)->findPointerLowerBound(AddressSpace);
+ }
+
+ PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
/// InvalidAlignmentElem - This member is a signal that a requested alignment
/// type and bit width were not found in the SmallVector.
@@ -129,8 +143,8 @@ private:
bool ABIAlign, Type *Ty) const;
//! Set/initialize pointer alignments
- void setPointerAlignment(uint32_t addr_space, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
+ void setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth);
//! Internal helper method that returns requested alignment for type.
unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
@@ -155,41 +169,38 @@ private:
/// malformed.
void parseSpecifier(StringRef LayoutDescription);
+ // Free all internal data structures.
+ void clear();
+
public:
- /// Default ctor.
- ///
- /// @note This has to exist, because this is a pass, but it should never be
- /// used.
- DataLayout();
-
- /// Constructs a DataLayout from a specification string. See init().
- explicit DataLayout(StringRef LayoutDescription)
- : ImmutablePass(ID) {
- init(LayoutDescription);
+ /// Constructs a DataLayout from a specification string. See reset().
+ explicit DataLayout(StringRef LayoutDescription) : LayoutMap(0) {
+ reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
- DataLayout(const DataLayout &DL) :
- ImmutablePass(ID),
- LittleEndian(DL.isLittleEndian()),
- StackNaturalAlign(DL.StackNaturalAlign),
- LegalIntWidths(DL.LegalIntWidths),
- Alignments(DL.Alignments),
- Pointers(DL.Pointers),
- LayoutMap(0)
- { }
+ DataLayout(const DataLayout &DL) : LayoutMap(0) { *this = DL; }
+
+ DataLayout &operator=(const DataLayout &DL) {
+ clear();
+ LittleEndian = DL.isLittleEndian();
+ StackNaturalAlign = DL.StackNaturalAlign;
+ ManglingMode = DL.ManglingMode;
+ LegalIntWidths = DL.LegalIntWidths;
+ Alignments = DL.Alignments;
+ Pointers = DL.Pointers;
+ return *this;
+ }
- ~DataLayout(); // Not virtual, do not subclass this class
+ bool operator==(const DataLayout &Other) const;
+ bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
- /// DataLayout is an immutable pass, but holds state. This allows the pass
- /// manager to clear its mutable state.
- bool doFinalization(Module &M);
+ ~DataLayout(); // Not virtual, do not subclass this class
- /// Parse a data layout string (with fallback to default values). Ensure that
- /// the data layout pass is registered.
- void init(StringRef LayoutDescription);
+ /// Parse a data layout string (with fallback to default values).
+ void reset(StringRef LayoutDescription);
/// Layout endianness...
bool isLittleEndian() const { return LittleEndian; }
@@ -208,8 +219,8 @@ public:
/// The width is specified in bits.
///
bool isLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (LegalIntWidths[i] == Width)
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (LegalIntWidth == Width)
return true;
return false;
}
@@ -223,13 +234,57 @@ public:
return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
}
+ bool hasMicrosoftFastStdCallMangling() const {
+ return ManglingMode == MM_WINCOFF;
+ }
+
+ bool hasLinkerPrivateGlobalPrefix() const {
+ return ManglingMode == MM_MachO;
+ }
+
+ const char *getLinkerPrivateGlobalPrefix() const {
+ if (ManglingMode == MM_MachO)
+ return "l";
+ return getPrivateGlobalPrefix();
+ }
+
+ char getGlobalPrefix() const {
+ switch (ManglingMode) {
+ case MM_None:
+ case MM_ELF:
+ case MM_Mips:
+ return '\0';
+ case MM_MachO:
+ case MM_WINCOFF:
+ return '_';
+ }
+ llvm_unreachable("invalid mangling mode");
+ }
+
+ const char *getPrivateGlobalPrefix() const {
+ switch (ManglingMode) {
+ case MM_None:
+ return "";
+ case MM_ELF:
+ return ".L";
+ case MM_Mips:
+ return "$";
+ case MM_MachO:
+ case MM_WINCOFF:
+ return "L";
+ }
+ llvm_unreachable("invalid mangling mode");
+ }
+
+ static const char *getManglingComponent(const Triple &T);
+
/// fitsInLegalInteger - This function returns true if the specified type fits
/// in a native integer type supported by the CPU. For example, if the CPU
/// only supports i32 as a native integer type, then i27 fits in a legal
- // integer type but i45 does not.
+ /// integer type but i45 does not.
bool fitsInLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (Width <= LegalIntWidths[i])
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (Width <= LegalIntWidth)
return true;
return false;
}
@@ -237,34 +292,18 @@ public:
/// Layout pointer alignment
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- unsigned getPointerABIAlignment(unsigned AS = 0) const {
- DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
- if (val == Pointers.end()) {
- val = Pointers.find(0);
- }
- return val->second.ABIAlign;
- }
+ unsigned getPointerABIAlignment(unsigned AS = 0) const;
/// Return target's alignment for stack-based pointers
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- unsigned getPointerPrefAlignment(unsigned AS = 0) const {
- DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
- if (val == Pointers.end()) {
- val = Pointers.find(0);
- }
- return val->second.PrefAlign;
- }
+ unsigned getPointerPrefAlignment(unsigned AS = 0) const;
+
/// Layout pointer size
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- unsigned getPointerSize(unsigned AS = 0) const {
- DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
- if (val == Pointers.end()) {
- val = Pointers.find(0);
- }
- return val->second.TypeBitWidth;
- }
+ unsigned getPointerSize(unsigned AS = 0) const;
+
/// Layout pointer size, in bits
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
@@ -344,10 +383,6 @@ public:
/// an integer type of the specified bitwidth.
unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
- /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
- /// for the specified type when it is part of a call frame.
- unsigned getCallFrameTypeAlignment(Type *Ty) const;
-
/// getPrefTypeAlignment - Return the preferred stack/global alignment for
/// the specified type. This is always at least as good as the ABI alignment.
unsigned getPrefTypeAlignment(Type *Ty) const;
@@ -408,6 +443,23 @@ public:
assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
}
+};
+
+class DataLayoutPass : public ImmutablePass {
+ DataLayout DL;
+
+public:
+ /// This has to exist, because this is a pass, but it should never be used.
+ DataLayoutPass();
+ ~DataLayoutPass();
+
+ const DataLayout &getDataLayout() const { return DL; }
+
+ // For use with the C API. C++ code should always use the constructor that
+ // takes a module.
+ explicit DataLayoutPass(const DataLayout &DL);
+
+ explicit DataLayoutPass(const Module *M);
static char ID; // Pass identification, replacement for typeid
};
diff --git a/include/llvm/IR/DebugInfo.h b/include/llvm/IR/DebugInfo.h
new file mode 100644
index 0000000..f7244b8
--- /dev/null
+++ b/include/llvm/IR/DebugInfo.h
@@ -0,0 +1,929 @@
+//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a bunch of datatypes that are useful for creating and
+// walking debug info in LLVM IR form. They essentially provide wrappers around
+// the information in the global variables that's needed when constructing the
+// DWARF information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGINFO_H
+#define LLVM_IR_DEBUGINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Dwarf.h"
+
+namespace llvm {
+class BasicBlock;
+class Constant;
+class Function;
+class GlobalVariable;
+class Module;
+class Type;
+class Value;
+class DbgDeclareInst;
+class DbgValueInst;
+class Instruction;
+class MDNode;
+class MDString;
+class NamedMDNode;
+class LLVMContext;
+class raw_ostream;
+
+class DIFile;
+class DISubprogram;
+class DILexicalBlock;
+class DILexicalBlockFile;
+class DIVariable;
+class DIType;
+class DIScope;
+class DIObjCProperty;
+
+/// Maps from type identifier to the actual MDNode.
+typedef DenseMap<const MDString *, MDNode *> DITypeIdentifierMap;
+
+/// DIDescriptor - A thin wraper around MDNode to access encoded debug info.
+/// This should not be stored in a container, because the underlying MDNode
+/// may change in certain situations.
+class DIDescriptor {
+ // Befriends DIRef so DIRef can befriend the protected member
+ // function: getFieldAs<DIRef>.
+ template <typename T> friend class DIRef;
+
+public:
+ enum {
+ FlagPrivate = 1 << 0,
+ FlagProtected = 1 << 1,
+ FlagFwdDecl = 1 << 2,
+ FlagAppleBlock = 1 << 3,
+ FlagBlockByrefStruct = 1 << 4,
+ FlagVirtual = 1 << 5,
+ FlagArtificial = 1 << 6,
+ FlagExplicit = 1 << 7,
+ FlagPrototyped = 1 << 8,
+ FlagObjcClassComplete = 1 << 9,
+ FlagObjectPointer = 1 << 10,
+ FlagVector = 1 << 11,
+ FlagStaticMember = 1 << 12,
+ FlagIndirectVariable = 1 << 13,
+ FlagLValueReference = 1 << 14,
+ FlagRValueReference = 1 << 15
+ };
+
+protected:
+ const MDNode *DbgNode;
+
+ StringRef getStringField(unsigned Elt) const;
+ unsigned getUnsignedField(unsigned Elt) const {
+ return (unsigned)getUInt64Field(Elt);
+ }
+ uint64_t getUInt64Field(unsigned Elt) const;
+ int64_t getInt64Field(unsigned Elt) const;
+ DIDescriptor getDescriptorField(unsigned Elt) const;
+
+ template <typename DescTy> DescTy getFieldAs(unsigned Elt) const {
+ return DescTy(getDescriptorField(Elt));
+ }
+
+ GlobalVariable *getGlobalVariableField(unsigned Elt) const;
+ Constant *getConstantField(unsigned Elt) const;
+ Function *getFunctionField(unsigned Elt) const;
+ void replaceFunctionField(unsigned Elt, Function *F);
+
+public:
+ explicit DIDescriptor(const MDNode *N = 0) : DbgNode(N) {}
+
+ bool Verify() const;
+
+ operator MDNode *() const { return const_cast<MDNode *>(DbgNode); }
+ MDNode *operator->() const { return const_cast<MDNode *>(DbgNode); }
+
+ // An explicit operator bool so that we can do testing of DI values
+ // easily.
+ // FIXME: This operator bool isn't actually protecting anything at the
+ // moment due to the conversion operator above making DIDescriptor nodes
+ // implicitly convertable to bool.
+ LLVM_EXPLICIT operator bool() const { return DbgNode != 0; }
+
+ bool operator==(DIDescriptor Other) const { return DbgNode == Other.DbgNode; }
+ bool operator!=(DIDescriptor Other) const { return !operator==(Other); }
+
+ uint16_t getTag() const {
+ return getUnsignedField(0) & ~LLVMDebugVersionMask;
+ }
+
+ bool isDerivedType() const;
+ bool isCompositeType() const;
+ bool isBasicType() const;
+ bool isVariable() const;
+ bool isSubprogram() const;
+ bool isGlobalVariable() const;
+ bool isScope() const;
+ bool isFile() const;
+ bool isCompileUnit() const;
+ bool isNameSpace() const;
+ bool isLexicalBlockFile() const;
+ bool isLexicalBlock() const;
+ bool isSubrange() const;
+ bool isEnumerator() const;
+ bool isType() const;
+ bool isUnspecifiedParameter() const;
+ bool isTemplateTypeParameter() const;
+ bool isTemplateValueParameter() const;
+ bool isObjCProperty() const;
+ bool isImportedEntity() const;
+
+ /// print - print descriptor.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print descriptor to dbgs() with a newline.
+ void dump() const;
+};
+
+/// DISubrange - This is used to represent ranges, for array bounds.
+class DISubrange : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ int64_t getLo() const { return getInt64Field(1); }
+ int64_t getCount() const { return getInt64Field(2); }
+ bool Verify() const;
+};
+
+/// DIArray - This descriptor holds an array of descriptors.
+class DIArray : public DIDescriptor {
+public:
+ explicit DIArray(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ unsigned getNumElements() const;
+ DIDescriptor getElement(unsigned Idx) const {
+ return getDescriptorField(Idx);
+ }
+};
+
+/// DIEnumerator - A wrapper for an enumerator (e.g. X and Y in 'enum {X,Y}').
+/// FIXME: it seems strange that this doesn't have either a reference to the
+/// type/precision or a file/line pair for location info.
+class DIEnumerator : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIEnumerator(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ StringRef getName() const { return getStringField(1); }
+ int64_t getEnumValue() const { return getInt64Field(2); }
+ bool Verify() const;
+};
+
+template <typename T> class DIRef;
+typedef DIRef<DIScope> DIScopeRef;
+typedef DIRef<DIType> DITypeRef;
+
+/// DIScope - A base class for various scopes.
+///
+/// Although, implementation-wise, DIScope is the parent class of most
+/// other DIxxx classes, including DIType and its descendants, most of
+/// DIScope's descendants are not a substitutable subtype of
+/// DIScope. The DIDescriptor::isScope() method only is true for
+/// DIScopes that are scopes in the strict lexical scope sense
+/// (DICompileUnit, DISubprogram, etc.), but not for, e.g., a DIType.
+class DIScope : public DIDescriptor {
+protected:
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIScope(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ /// Gets the parent scope for this scope node or returns a
+ /// default constructed scope.
+ DIScopeRef getContext() const;
+ /// If the scope node has a name, return that, else return an empty string.
+ StringRef getName() const;
+ StringRef getFilename() const;
+ StringRef getDirectory() const;
+
+ /// Generate a reference to this DIScope. Uses the type identifier instead
+ /// of the actual MDNode if possible, to help type uniquing.
+ DIScopeRef getRef() const;
+};
+
+/// Represents reference to a DIDescriptor, abstracts over direct and
+/// identifier-based metadata references.
+template <typename T> class DIRef {
+ template <typename DescTy>
+ friend DescTy DIDescriptor::getFieldAs(unsigned Elt) const;
+ friend DIScopeRef DIScope::getContext() const;
+ friend DIScopeRef DIScope::getRef() const;
+ friend class DIType;
+
+ /// Val can be either a MDNode or a MDString, in the latter,
+ /// MDString specifies the type identifier.
+ const Value *Val;
+ explicit DIRef(const Value *V);
+
+public:
+ T resolve(const DITypeIdentifierMap &Map) const;
+ StringRef getName() const;
+ operator Value *() const { return const_cast<Value *>(Val); }
+};
+
+template <typename T>
+T DIRef<T>::resolve(const DITypeIdentifierMap &Map) const {
+ if (!Val)
+ return T();
+
+ if (const MDNode *MD = dyn_cast<MDNode>(Val))
+ return T(MD);
+
+ const MDString *MS = cast<MDString>(Val);
+ // Find the corresponding MDNode.
+ DITypeIdentifierMap::const_iterator Iter = Map.find(MS);
+ assert(Iter != Map.end() && "Identifier not in the type map?");
+ assert(DIDescriptor(Iter->second).isType() &&
+ "MDNode in DITypeIdentifierMap should be a DIType.");
+ return T(Iter->second);
+}
+
+template <typename T> StringRef DIRef<T>::getName() const {
+ if (!Val)
+ return StringRef();
+
+ if (const MDNode *MD = dyn_cast<MDNode>(Val))
+ return T(MD).getName();
+
+ const MDString *MS = cast<MDString>(Val);
+ return MS->getString();
+}
+
+/// Specialize getFieldAs to handle fields that are references to DIScopes.
+template <> DIScopeRef DIDescriptor::getFieldAs<DIScopeRef>(unsigned Elt) const;
+/// Specialize DIRef constructor for DIScopeRef.
+template <> DIRef<DIScope>::DIRef(const Value *V);
+
+/// Specialize getFieldAs to handle fields that are references to DITypes.
+template <> DITypeRef DIDescriptor::getFieldAs<DITypeRef>(unsigned Elt) const;
+/// Specialize DIRef constructor for DITypeRef.
+template <> DIRef<DIType>::DIRef(const Value *V);
+
+/// DIType - This is a wrapper for a type.
+/// FIXME: Types should be factored much better so that CV qualifiers and
+/// others do not require a huge and empty descriptor full of zeros.
+class DIType : public DIScope {
+protected:
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIType(const MDNode *N = 0) : DIScope(N) {}
+ operator DITypeRef () const {
+ assert(isType() &&
+ "constructing DITypeRef from an MDNode that is not a type");
+ return DITypeRef(&*getRef());
+ }
+
+ /// Verify - Verify that a type descriptor is well formed.
+ bool Verify() const;
+
+ DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(2); }
+ StringRef getName() const { return getStringField(3); }
+ unsigned getLineNumber() const { return getUnsignedField(4); }
+ uint64_t getSizeInBits() const { return getUInt64Field(5); }
+ uint64_t getAlignInBits() const { return getUInt64Field(6); }
+ // FIXME: Offset is only used for DW_TAG_member nodes. Making every type
+ // carry this is just plain insane.
+ uint64_t getOffsetInBits() const { return getUInt64Field(7); }
+ unsigned getFlags() const { return getUnsignedField(8); }
+ bool isPrivate() const { return (getFlags() & FlagPrivate) != 0; }
+ bool isProtected() const { return (getFlags() & FlagProtected) != 0; }
+ bool isForwardDecl() const { return (getFlags() & FlagFwdDecl) != 0; }
+ // isAppleBlock - Return true if this is the Apple Blocks extension.
+ bool isAppleBlockExtension() const {
+ return (getFlags() & FlagAppleBlock) != 0;
+ }
+ bool isBlockByrefStruct() const {
+ return (getFlags() & FlagBlockByrefStruct) != 0;
+ }
+ bool isVirtual() const { return (getFlags() & FlagVirtual) != 0; }
+ bool isArtificial() const { return (getFlags() & FlagArtificial) != 0; }
+ bool isObjectPointer() const { return (getFlags() & FlagObjectPointer) != 0; }
+ bool isObjcClassComplete() const {
+ return (getFlags() & FlagObjcClassComplete) != 0;
+ }
+ bool isVector() const { return (getFlags() & FlagVector) != 0; }
+ bool isStaticMember() const { return (getFlags() & FlagStaticMember) != 0; }
+ bool isLValueReference() const {
+ return (getFlags() & FlagLValueReference) != 0;
+ }
+ bool isRValueReference() const {
+ return (getFlags() & FlagRValueReference) != 0;
+ }
+ bool isValid() const { return DbgNode && isType(); }
+
+ /// replaceAllUsesWith - Replace all uses of debug info referenced by
+ /// this descriptor.
+ void replaceAllUsesWith(DIDescriptor &D);
+ void replaceAllUsesWith(MDNode *D);
+};
+
+/// DIBasicType - A basic type, like 'int' or 'float'.
+class DIBasicType : public DIType {
+public:
+ explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
+
+ unsigned getEncoding() const { return getUnsignedField(9); }
+
+ /// Verify - Verify that a basic type descriptor is well formed.
+ bool Verify() const;
+};
+
+/// DIDerivedType - A simple derived type, like a const qualified type,
+/// a typedef, a pointer or reference, et cetera. Or, a data member of
+/// a class/struct/union.
+class DIDerivedType : public DIType {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIDerivedType(const MDNode *N = 0) : DIType(N) {}
+
+ DITypeRef getTypeDerivedFrom() const { return getFieldAs<DITypeRef>(9); }
+
+ /// getObjCProperty - Return property node, if this ivar is
+ /// associated with one.
+ MDNode *getObjCProperty() const;
+
+ DITypeRef getClassType() const {
+ assert(getTag() == dwarf::DW_TAG_ptr_to_member_type);
+ return getFieldAs<DITypeRef>(10);
+ }
+
+ Constant *getConstant() const {
+ assert((getTag() == dwarf::DW_TAG_member) && isStaticMember());
+ return getConstantField(10);
+ }
+
+ /// Verify - Verify that a derived type descriptor is well formed.
+ bool Verify() const;
+};
+
+/// DICompositeType - This descriptor holds a type that can refer to multiple
+/// other types, like a function or struct.
+/// DICompositeType is derived from DIDerivedType because some
+/// composite types (such as enums) can be derived from basic types
+// FIXME: Make this derive from DIType directly & just store the
+// base type in a single DIType field.
+class DICompositeType : public DIDerivedType {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DICompositeType(const MDNode *N = 0) : DIDerivedType(N) {}
+
+ DIArray getTypeArray() const { return getFieldAs<DIArray>(10); }
+ void setTypeArray(DIArray Elements, DIArray TParams = DIArray());
+ unsigned getRunTimeLang() const { return getUnsignedField(11); }
+ DITypeRef getContainingType() const { return getFieldAs<DITypeRef>(12); }
+ void setContainingType(DICompositeType ContainingType);
+ DIArray getTemplateParams() const { return getFieldAs<DIArray>(13); }
+ MDString *getIdentifier() const;
+
+ /// Verify - Verify that a composite type descriptor is well formed.
+ bool Verify() const;
+};
+
+/// DIFile - This is a wrapper for a file.
+class DIFile : public DIScope {
+ friend class DIDescriptor;
+
+public:
+ explicit DIFile(const MDNode *N = 0) : DIScope(N) {}
+ MDNode *getFileNode() const;
+ bool Verify() const;
+};
+
+/// DICompileUnit - A wrapper for a compile unit.
+class DICompileUnit : public DIScope {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
+
+ unsigned getLanguage() const { return getUnsignedField(2); }
+ StringRef getProducer() const { return getStringField(3); }
+
+ bool isOptimized() const { return getUnsignedField(4) != 0; }
+ StringRef getFlags() const { return getStringField(5); }
+ unsigned getRunTimeVersion() const { return getUnsignedField(6); }
+
+ DIArray getEnumTypes() const;
+ DIArray getRetainedTypes() const;
+ DIArray getSubprograms() const;
+ DIArray getGlobalVariables() const;
+ DIArray getImportedEntities() const;
+
+ StringRef getSplitDebugFilename() const { return getStringField(12); }
+ unsigned getEmissionKind() const { return getUnsignedField(13); }
+
+ /// Verify - Verify that a compile unit is well formed.
+ bool Verify() const;
+};
+
+/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
+class DISubprogram : public DIScope {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
+
+ DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(2); }
+ StringRef getName() const { return getStringField(3); }
+ StringRef getDisplayName() const { return getStringField(4); }
+ StringRef getLinkageName() const { return getStringField(5); }
+ unsigned getLineNumber() const { return getUnsignedField(6); }
+ DICompositeType getType() const { return getFieldAs<DICompositeType>(7); }
+
+ /// isLocalToUnit - Return true if this subprogram is local to the current
+ /// compile unit, like 'static' in C.
+ unsigned isLocalToUnit() const { return getUnsignedField(8); }
+ unsigned isDefinition() const { return getUnsignedField(9); }
+
+ unsigned getVirtuality() const { return getUnsignedField(10); }
+ unsigned getVirtualIndex() const { return getUnsignedField(11); }
+
+ DITypeRef getContainingType() const { return getFieldAs<DITypeRef>(12); }
+
+ unsigned getFlags() const { return getUnsignedField(13); }
+
+ unsigned isArtificial() const {
+ return (getUnsignedField(13) & FlagArtificial) != 0;
+ }
+ /// isPrivate - Return true if this subprogram has "private"
+ /// access specifier.
+ bool isPrivate() const { return (getUnsignedField(13) & FlagPrivate) != 0; }
+ /// isProtected - Return true if this subprogram has "protected"
+ /// access specifier.
+ bool isProtected() const {
+ return (getUnsignedField(13) & FlagProtected) != 0;
+ }
+ /// isExplicit - Return true if this subprogram is marked as explicit.
+ bool isExplicit() const { return (getUnsignedField(13) & FlagExplicit) != 0; }
+ /// isPrototyped - Return true if this subprogram is prototyped.
+ bool isPrototyped() const {
+ return (getUnsignedField(13) & FlagPrototyped) != 0;
+ }
+
+ /// Return true if this subprogram is a C++11 reference-qualified
+ /// non-static member function (void foo() &).
+ unsigned isLValueReference() const {
+ return (getUnsignedField(13) & FlagLValueReference) != 0;
+ }
+
+ /// Return true if this subprogram is a C++11
+ /// rvalue-reference-qualified non-static member function
+ /// (void foo() &&).
+ unsigned isRValueReference() const {
+ return (getUnsignedField(13) & FlagRValueReference) != 0;
+ }
+
+ unsigned isOptimized() const;
+
+ /// Verify - Verify that a subprogram descriptor is well formed.
+ bool Verify() const;
+
+ /// describes - Return true if this subprogram provides debugging
+ /// information for the function F.
+ bool describes(const Function *F);
+
+ Function *getFunction() const { return getFunctionField(15); }
+ void replaceFunction(Function *F) { replaceFunctionField(15, F); }
+ DIArray getTemplateParams() const { return getFieldAs<DIArray>(16); }
+ DISubprogram getFunctionDeclaration() const {
+ return getFieldAs<DISubprogram>(17);
+ }
+ MDNode *getVariablesNodes() const;
+ DIArray getVariables() const;
+
+ /// getScopeLineNumber - Get the beginning of the scope of the
+ /// function, not necessarily where the name of the program
+ /// starts.
+ unsigned getScopeLineNumber() const { return getUnsignedField(19); }
+};
+
+/// DILexicalBlock - This is a wrapper for a lexical block.
+class DILexicalBlock : public DIScope {
+public:
+ explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
+ DIScope getContext() const { return getFieldAs<DIScope>(2); }
+ unsigned getLineNumber() const { return getUnsignedField(3); }
+ unsigned getColumnNumber() const { return getUnsignedField(4); }
+ unsigned getDiscriminator() const { return getUnsignedField(5); }
+ bool Verify() const;
+};
+
+/// DILexicalBlockFile - This is a wrapper for a lexical block with
+/// a filename change.
+class DILexicalBlockFile : public DIScope {
+public:
+ explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {}
+ DIScope getContext() const {
+ if (getScope().isSubprogram())
+ return getScope();
+ return getScope().getContext();
+ }
+ unsigned getLineNumber() const { return getScope().getLineNumber(); }
+ unsigned getColumnNumber() const { return getScope().getColumnNumber(); }
+ DILexicalBlock getScope() const { return getFieldAs<DILexicalBlock>(2); }
+ bool Verify() const;
+};
+
+/// DINameSpace - A wrapper for a C++ style name space.
+class DINameSpace : public DIScope {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
+ DIScope getContext() const { return getFieldAs<DIScope>(2); }
+ StringRef getName() const { return getStringField(3); }
+ unsigned getLineNumber() const { return getUnsignedField(4); }
+ bool Verify() const;
+};
+
+/// DIUnspecifiedParameter - This is a wrapper for unspecified parameters.
+class DIUnspecifiedParameter : public DIDescriptor {
+public:
+ explicit DIUnspecifiedParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+ bool Verify() const;
+};
+
+/// DITemplateTypeParameter - This is a wrapper for template type parameter.
+class DITemplateTypeParameter : public DIDescriptor {
+public:
+ explicit DITemplateTypeParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DITypeRef getType() const { return getFieldAs<DITypeRef>(3); }
+ StringRef getFilename() const { return getFieldAs<DIFile>(4).getFilename(); }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(4).getDirectory();
+ }
+ unsigned getLineNumber() const { return getUnsignedField(5); }
+ unsigned getColumnNumber() const { return getUnsignedField(6); }
+ bool Verify() const;
+};
+
+/// DITemplateValueParameter - This is a wrapper for template value parameter.
+class DITemplateValueParameter : public DIDescriptor {
+public:
+ explicit DITemplateValueParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DITypeRef getType() const { return getFieldAs<DITypeRef>(3); }
+ Value *getValue() const;
+ StringRef getFilename() const { return getFieldAs<DIFile>(5).getFilename(); }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(5).getDirectory();
+ }
+ unsigned getLineNumber() const { return getUnsignedField(6); }
+ unsigned getColumnNumber() const { return getUnsignedField(7); }
+ bool Verify() const;
+};
+
+/// DIGlobalVariable - This is a wrapper for a global variable.
+class DIGlobalVariable : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIGlobalVariable(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScope getContext() const { return getFieldAs<DIScope>(2); }
+ StringRef getName() const { return getStringField(3); }
+ StringRef getDisplayName() const { return getStringField(4); }
+ StringRef getLinkageName() const { return getStringField(5); }
+ StringRef getFilename() const { return getFieldAs<DIFile>(6).getFilename(); }
+ StringRef getDirectory() const {
+ return getFieldAs<DIFile>(6).getDirectory();
+ }
+
+ unsigned getLineNumber() const { return getUnsignedField(7); }
+ DITypeRef getType() const { return getFieldAs<DITypeRef>(8); }
+ unsigned isLocalToUnit() const { return getUnsignedField(9); }
+ unsigned isDefinition() const { return getUnsignedField(10); }
+
+ GlobalVariable *getGlobal() const { return getGlobalVariableField(11); }
+ Constant *getConstant() const { return getConstantField(11); }
+ DIDerivedType getStaticDataMemberDeclaration() const {
+ return getFieldAs<DIDerivedType>(12);
+ }
+
+ /// Verify - Verify that a global variable descriptor is well formed.
+ bool Verify() const;
+};
+
+/// DIVariable - This is a wrapper for a variable (e.g. parameter, local,
+/// global etc).
+class DIVariable : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIVariable(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DIFile getFile() const { return getFieldAs<DIFile>(3); }
+ unsigned getLineNumber() const { return (getUnsignedField(4) << 8) >> 8; }
+ unsigned getArgNumber() const {
+ unsigned L = getUnsignedField(4);
+ return L >> 24;
+ }
+ DITypeRef getType() const { return getFieldAs<DITypeRef>(5); }
+
+ /// isArtificial - Return true if this variable is marked as "artificial".
+ bool isArtificial() const {
+ return (getUnsignedField(6) & FlagArtificial) != 0;
+ }
+
+ bool isObjectPointer() const {
+ return (getUnsignedField(6) & FlagObjectPointer) != 0;
+ }
+
+ /// \brief Return true if this variable is represented as a pointer.
+ bool isIndirect() const {
+ return (getUnsignedField(6) & FlagIndirectVariable) != 0;
+ }
+
+ /// getInlinedAt - If this variable is inlined then return inline location.
+ MDNode *getInlinedAt() const;
+
+ /// Verify - Verify that a variable descriptor is well formed.
+ bool Verify() const;
+
+ /// HasComplexAddr - Return true if the variable has a complex address.
+ bool hasComplexAddress() const { return getNumAddrElements() > 0; }
+
+ unsigned getNumAddrElements() const;
+
+ uint64_t getAddrElement(unsigned Idx) const {
+ return getUInt64Field(Idx + 8);
+ }
+
+ /// isBlockByrefVariable - Return true if the variable was declared as
+ /// a "__block" variable (Apple Blocks).
+ bool isBlockByrefVariable(const DITypeIdentifierMap &Map) const {
+ return (getType().resolve(Map)).isBlockByrefStruct();
+ }
+
+ /// isInlinedFnArgument - Return true if this variable provides debugging
+ /// information for an inlined function arguments.
+ bool isInlinedFnArgument(const Function *CurFn);
+
+ void printExtendedName(raw_ostream &OS) const;
+};
+
+/// DILocation - This object holds location information. This object
+/// is not associated with any DWARF tag.
+class DILocation : public DIDescriptor {
+public:
+ explicit DILocation(const MDNode *N) : DIDescriptor(N) {}
+
+ unsigned getLineNumber() const { return getUnsignedField(0); }
+ unsigned getColumnNumber() const { return getUnsignedField(1); }
+ DIScope getScope() const { return getFieldAs<DIScope>(2); }
+ DILocation getOrigLocation() const { return getFieldAs<DILocation>(3); }
+ StringRef getFilename() const { return getScope().getFilename(); }
+ StringRef getDirectory() const { return getScope().getDirectory(); }
+ bool Verify() const;
+ bool atSameLineAs(const DILocation &Other) const {
+ return (getLineNumber() == Other.getLineNumber() &&
+ getFilename() == Other.getFilename());
+ }
+ /// getDiscriminator - DWARF discriminators are used to distinguish
+ /// identical file locations for instructions that are on different
+ /// basic blocks. If two instructions are inside the same lexical block
+ /// and are in different basic blocks, we create a new lexical block
+ /// with identical location as the original but with a different
+ /// discriminator value (lib/Transforms/Util/AddDiscriminators.cpp
+ /// for details).
+ unsigned getDiscriminator() const {
+ // Since discriminators are associated with lexical blocks, make
+ // sure this location is a lexical block before retrieving its
+ // value.
+ return getScope().isLexicalBlock()
+ ? getFieldAs<DILexicalBlock>(2).getDiscriminator()
+ : 0;
+ }
+ unsigned computeNewDiscriminator(LLVMContext &Ctx);
+ DILocation copyWithNewScope(LLVMContext &Ctx, DILexicalBlock NewScope);
+};
+
+class DIObjCProperty : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIObjCProperty(const MDNode *N) : DIDescriptor(N) {}
+
+ StringRef getObjCPropertyName() const { return getStringField(1); }
+ DIFile getFile() const { return getFieldAs<DIFile>(2); }
+ unsigned getLineNumber() const { return getUnsignedField(3); }
+
+ StringRef getObjCPropertyGetterName() const { return getStringField(4); }
+ StringRef getObjCPropertySetterName() const { return getStringField(5); }
+ bool isReadOnlyObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_readonly) != 0;
+ }
+ bool isReadWriteObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_readwrite) != 0;
+ }
+ bool isAssignObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_assign) != 0;
+ }
+ bool isRetainObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_retain) != 0;
+ }
+ bool isCopyObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_copy) != 0;
+ }
+ bool isNonAtomicObjCProperty() const {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_nonatomic) != 0;
+ }
+
+ /// Objective-C doesn't have an ODR, so there is no benefit in storing
+ /// the type as a DITypeRef here.
+ DIType getType() const { return getFieldAs<DIType>(7); }
+
+ /// Verify - Verify that a derived type descriptor is well formed.
+ bool Verify() const;
+};
+
+/// \brief An imported module (C++ using directive or similar).
+class DIImportedEntity : public DIDescriptor {
+ friend class DIDescriptor;
+ void printInternal(raw_ostream &OS) const;
+
+public:
+ explicit DIImportedEntity(const MDNode *N) : DIDescriptor(N) {}
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
+ DIScopeRef getEntity() const { return getFieldAs<DIScopeRef>(2); }
+ unsigned getLineNumber() const { return getUnsignedField(3); }
+ StringRef getName() const { return getStringField(4); }
+ bool Verify() const;
+};
+
+/// getDISubprogram - Find subprogram that is enclosing this scope.
+DISubprogram getDISubprogram(const MDNode *Scope);
+
+/// getDICompositeType - Find underlying composite type.
+DICompositeType getDICompositeType(DIType T);
+
+/// getOrInsertFnSpecificMDNode - Return a NameMDNode that is suitable
+/// to hold function specific information.
+NamedMDNode *getOrInsertFnSpecificMDNode(Module &M, DISubprogram SP);
+
+/// getFnSpecificMDNode - Return a NameMDNode, if available, that is
+/// suitable to hold function specific information.
+NamedMDNode *getFnSpecificMDNode(const Module &M, DISubprogram SP);
+
+/// createInlinedVariable - Create a new inlined variable based on current
+/// variable.
+/// @param DV Current Variable.
+/// @param InlinedScope Location at current variable is inlined.
+DIVariable createInlinedVariable(MDNode *DV, MDNode *InlinedScope,
+ LLVMContext &VMContext);
+
+/// cleanseInlinedVariable - Remove inlined scope from the variable.
+DIVariable cleanseInlinedVariable(MDNode *DV, LLVMContext &VMContext);
+
+/// Construct DITypeIdentifierMap by going through retained types of each CU.
+DITypeIdentifierMap generateDITypeIdentifierMap(const NamedMDNode *CU_Nodes);
+
+/// Strip debug info in the module if it exists.
+/// To do this, we remove all calls to the debugger intrinsics and any named
+/// metadata for debugging. We also remove debug locations for instructions.
+/// Return true if module is modified.
+bool StripDebugInfo(Module &M);
+
+/// Return Debug Info Metadata Version by checking module flags.
+unsigned getDebugMetadataVersionFromModule(const Module &M);
+
+/// DebugInfoFinder tries to list all debug info MDNodes used in a module. To
+/// list debug info MDNodes used by an instruction, DebugInfoFinder uses
+/// processDeclare, processValue and processLocation to handle DbgDeclareInst,
+/// DbgValueInst and DbgLoc attached to instructions. processModule will go
+/// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes
+/// used by the CUs.
+class DebugInfoFinder {
+public:
+ DebugInfoFinder() : TypeMapInitialized(false) {}
+
+ /// processModule - Process entire module and collect debug info
+ /// anchors.
+ void processModule(const Module &M);
+
+ /// processDeclare - Process DbgDeclareInst.
+ void processDeclare(const Module &M, const DbgDeclareInst *DDI);
+ /// Process DbgValueInst.
+ void processValue(const Module &M, const DbgValueInst *DVI);
+ /// processLocation - Process DILocation.
+ void processLocation(const Module &M, DILocation Loc);
+
+ /// Clear all lists.
+ void reset();
+
+private:
+ /// Initialize TypeIdentifierMap.
+ void InitializeTypeMap(const Module &M);
+
+ /// processType - Process DIType.
+ void processType(DIType DT);
+
+ /// processSubprogram - Process DISubprogram.
+ void processSubprogram(DISubprogram SP);
+
+ void processScope(DIScope Scope);
+
+ /// addCompileUnit - Add compile unit into CUs.
+ bool addCompileUnit(DICompileUnit CU);
+
+ /// addGlobalVariable - Add global variable into GVs.
+ bool addGlobalVariable(DIGlobalVariable DIG);
+
+ // addSubprogram - Add subprogram into SPs.
+ bool addSubprogram(DISubprogram SP);
+
+ /// addType - Add type into Tys.
+ bool addType(DIType DT);
+
+ bool addScope(DIScope Scope);
+
+public:
+ typedef SmallVectorImpl<DICompileUnit>::const_iterator compile_unit_iterator;
+ typedef SmallVectorImpl<DISubprogram>::const_iterator subprogram_iterator;
+ typedef SmallVectorImpl<DIGlobalVariable>::const_iterator global_variable_iterator;
+ typedef SmallVectorImpl<DIType>::const_iterator type_iterator;
+ typedef SmallVectorImpl<DIScope>::const_iterator scope_iterator;
+
+ iterator_range<compile_unit_iterator> compile_units() const {
+ return iterator_range<compile_unit_iterator>(CUs.begin(), CUs.end());
+ }
+
+ iterator_range<subprogram_iterator> subprograms() const {
+ return iterator_range<subprogram_iterator>(SPs.begin(), SPs.end());
+ }
+
+ iterator_range<global_variable_iterator> global_variables() const {
+ return iterator_range<global_variable_iterator>(GVs.begin(), GVs.end());
+ }
+
+ iterator_range<type_iterator> types() const {
+ return iterator_range<type_iterator>(TYs.begin(), TYs.end());
+ }
+
+ iterator_range<scope_iterator> scopes() const {
+ return iterator_range<scope_iterator>(Scopes.begin(), Scopes.end());
+ }
+
+ unsigned compile_unit_count() const { return CUs.size(); }
+ unsigned global_variable_count() const { return GVs.size(); }
+ unsigned subprogram_count() const { return SPs.size(); }
+ unsigned type_count() const { return TYs.size(); }
+ unsigned scope_count() const { return Scopes.size(); }
+
+private:
+ SmallVector<DICompileUnit, 8> CUs; // Compile Units
+ SmallVector<DISubprogram, 8> SPs; // Subprograms
+ SmallVector<DIGlobalVariable, 8> GVs; // Global Variables;
+ SmallVector<DIType, 8> TYs; // Types
+ SmallVector<DIScope, 8> Scopes; // Scopes
+ SmallPtrSet<MDNode *, 64> NodesSeen;
+ DITypeIdentifierMap TypeIdentifierMap;
+ /// Specify if TypeIdentifierMap is initialized.
+ bool TypeMapInitialized;
+};
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/DebugLoc.h b/include/llvm/IR/DebugLoc.h
new file mode 100644
index 0000000..50b5d54
--- /dev/null
+++ b/include/llvm/IR/DebugLoc.h
@@ -0,0 +1,120 @@
+//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a number of light weight data structures used
+// to describe and track debug location information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGLOC_H
+#define LLVM_IR_DEBUGLOC_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ template <typename T> struct DenseMapInfo;
+ class MDNode;
+ class LLVMContext;
+
+ /// DebugLoc - Debug location id. This is carried by Instruction, SDNode,
+ /// and MachineInstr to compactly encode file/line/scope information for an
+ /// operation.
+ class DebugLoc {
+ friend struct DenseMapInfo<DebugLoc>;
+
+ /// getEmptyKey() - A private constructor that returns an unknown that is
+ /// not equal to the tombstone key or DebugLoc().
+ static DebugLoc getEmptyKey() {
+ DebugLoc DL;
+ DL.LineCol = 1;
+ return DL;
+ }
+
+ /// getTombstoneKey() - A private constructor that returns an unknown that
+ /// is not equal to the empty key or DebugLoc().
+ static DebugLoc getTombstoneKey() {
+ DebugLoc DL;
+ DL.LineCol = 2;
+ return DL;
+ }
+
+ /// LineCol - This 32-bit value encodes the line and column number for the
+ /// location, encoded as 24-bits for line and 8 bits for col. A value of 0
+ /// for either means unknown.
+ uint32_t LineCol;
+
+ /// ScopeIdx - This is an opaque ID# for Scope/InlinedAt information,
+ /// decoded by LLVMContext. 0 is unknown.
+ int ScopeIdx;
+ public:
+ DebugLoc() : LineCol(0), ScopeIdx(0) {} // Defaults to unknown.
+
+ /// get - Get a new DebugLoc that corresponds to the specified line/col
+ /// scope/inline location.
+ static DebugLoc get(unsigned Line, unsigned Col,
+ MDNode *Scope, MDNode *InlinedAt = 0);
+
+ /// getFromDILocation - Translate the DILocation quad into a DebugLoc.
+ static DebugLoc getFromDILocation(MDNode *N);
+
+ /// getFromDILexicalBlock - Translate the DILexicalBlock into a DebugLoc.
+ static DebugLoc getFromDILexicalBlock(MDNode *N);
+
+ /// isUnknown - Return true if this is an unknown location.
+ bool isUnknown() const { return ScopeIdx == 0; }
+
+ unsigned getLine() const {
+ return (LineCol << 8) >> 8; // Mask out column.
+ }
+
+ unsigned getCol() const {
+ return LineCol >> 24;
+ }
+
+ /// getScope - This returns the scope pointer for this DebugLoc, or null if
+ /// invalid.
+ MDNode *getScope(const LLVMContext &Ctx) const;
+
+ /// getInlinedAt - This returns the InlinedAt pointer for this DebugLoc, or
+ /// null if invalid or not present.
+ MDNode *getInlinedAt(const LLVMContext &Ctx) const;
+
+ /// getScopeAndInlinedAt - Return both the Scope and the InlinedAt values.
+ void getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
+ const LLVMContext &Ctx) const;
+
+ /// getScopeNode - Get MDNode for DebugLoc's scope, or null if invalid.
+ MDNode *getScopeNode(const LLVMContext &Ctx) const;
+
+ // getFnDebugLoc - Walk up the scope chain of given debug loc and find line
+ // number info for the function.
+ DebugLoc getFnDebugLoc(const LLVMContext &Ctx);
+
+ /// getAsMDNode - This method converts the compressed DebugLoc node into a
+ /// DILocation compatible MDNode.
+ MDNode *getAsMDNode(const LLVMContext &Ctx) const;
+
+ bool operator==(const DebugLoc &DL) const {
+ return LineCol == DL.LineCol && ScopeIdx == DL.ScopeIdx;
+ }
+ bool operator!=(const DebugLoc &DL) const { return !(*this == DL); }
+
+ void dump(const LLVMContext &Ctx) const;
+ };
+
+ template <>
+ struct DenseMapInfo<DebugLoc> {
+ static DebugLoc getEmptyKey() { return DebugLoc::getEmptyKey(); }
+ static DebugLoc getTombstoneKey() { return DebugLoc::getTombstoneKey(); }
+ static unsigned getHashValue(const DebugLoc &Key);
+ static bool isEqual(DebugLoc LHS, DebugLoc RHS) { return LHS == RHS; }
+ };
+} // end namespace llvm
+
+#endif /* LLVM_SUPPORT_DEBUGLOC_H */
diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h
index e279e60..71d9973 100644
--- a/include/llvm/IR/DerivedTypes.h
+++ b/include/llvm/IR/DerivedTypes.h
@@ -249,7 +249,7 @@ public:
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
/// isSized - Return true if this is a sized type.
- bool isSized() const;
+ bool isSized(SmallPtrSet<const Type*, 4> *Visited = 0) const;
/// hasName - Return true if this is a named struct that has a non-empty name.
bool hasName() const { return SymbolTableEntry != 0; }
@@ -400,6 +400,26 @@ public:
return VectorType::get(EltTy, VTy->getNumElements());
}
+ /// VectorType::getHalfElementsVectorType - This static method returns
+ /// a VectorType with half as many elements as the input type and the
+ /// same element type.
+ ///
+ static VectorType *getHalfElementsVectorType(VectorType *VTy) {
+ unsigned NumElts = VTy->getNumElements();
+ assert ((NumElts & 1) == 0 &&
+ "Cannot halve vector with odd number of elements.");
+ return VectorType::get(VTy->getElementType(), NumElts/2);
+ }
+
+ /// VectorType::getDoubleElementsVectorType - This static method returns
+ /// a VectorType with twice as many elements as the input type and the
+ /// same element type.
+ ///
+ static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
+ unsigned NumElts = VTy->getNumElements();
+ return VectorType::get(VTy->getElementType(), NumElts*2);
+ }
+
/// isValidElementType - Return true if the specified type is valid as a
/// element type.
static bool isValidElementType(Type *ElemTy);
diff --git a/include/llvm/IR/DiagnosticInfo.h b/include/llvm/IR/DiagnosticInfo.h
new file mode 100644
index 0000000..49eb1b0
--- /dev/null
+++ b/include/llvm/IR/DiagnosticInfo.h
@@ -0,0 +1,240 @@
+//===- llvm/Support/DiagnosticInfo.h - Diagnostic Declaration ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DIAGNOSTICINFO_H
+#define LLVM_SUPPORT_DIAGNOSTICINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+// Forward declarations.
+class DiagnosticPrinter;
+class Function;
+class Instruction;
+class Twine;
+class Value;
+
+/// \brief Defines the different supported severity of a diagnostic.
+enum DiagnosticSeverity {
+ DS_Error,
+ DS_Warning,
+ DS_Remark,
+ // A note attaches additional information to one of the previous diagnostic
+ // types.
+ DS_Note
+};
+
+/// \brief Defines the different supported kind of a diagnostic.
+/// This enum should be extended with a new ID for each added concrete subclass.
+enum DiagnosticKind {
+ DK_InlineAsm,
+ DK_StackSize,
+ DK_DebugMetadataVersion,
+ DK_SampleProfile,
+ DK_FirstPluginKind
+};
+
+/// \brief Get the next available kind ID for a plugin diagnostic.
+/// Each time this function is called, it returns a different number.
+/// Therefore, a plugin that wants to "identify" its own classes
+/// with a dynamic identifier, just have to use this method to get a new ID
+/// and assign it to each of its classes.
+/// The returned ID will be greater than or equal to DK_FirstPluginKind.
+/// Thus, the plugin identifiers will not conflict with the
+/// DiagnosticKind values.
+int getNextAvailablePluginDiagnosticKind();
+
+/// \brief This is the base abstract class for diagnostic reporting in
+/// the backend.
+/// The print method must be overloaded by the subclasses to print a
+/// user-friendly message in the client of the backend (let us call it a
+/// frontend).
+class DiagnosticInfo {
+private:
+ /// Kind defines the kind of report this is about.
+ const /* DiagnosticKind */ int Kind;
+ /// Severity gives the severity of the diagnostic.
+ const DiagnosticSeverity Severity;
+
+public:
+ DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
+ : Kind(Kind), Severity(Severity) {}
+
+ virtual ~DiagnosticInfo() {}
+
+ /* DiagnosticKind */ int getKind() const { return Kind; }
+ DiagnosticSeverity getSeverity() const { return Severity; }
+
+ /// Print using the given \p DP a user-friendly message.
+ /// This is the default message that will be printed to the user.
+ /// It is used when the frontend does not directly take advantage
+ /// of the information contained in fields of the subclasses.
+ /// The printed message must not end with '.' nor start with a severity
+ /// keyword.
+ virtual void print(DiagnosticPrinter &DP) const = 0;
+};
+
+/// Diagnostic information for inline asm reporting.
+/// This is basically a message and an optional location.
+class DiagnosticInfoInlineAsm : public DiagnosticInfo {
+private:
+ /// Optional line information. 0 if not set.
+ unsigned LocCookie;
+ /// Message to be reported.
+ const Twine &MsgStr;
+ /// Optional origin of the problem.
+ const Instruction *Instr;
+
+public:
+ /// \p MsgStr is the message to be reported to the frontend.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ DiagnosticInfoInlineAsm(const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
+ Instr(NULL) {}
+
+ /// \p LocCookie if non-zero gives the line number for this report.
+ /// \p MsgStr gives the message.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ DiagnosticInfoInlineAsm(unsigned LocCookie, const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie),
+ MsgStr(MsgStr), Instr(NULL) {}
+
+ /// \p Instr gives the original instruction that triggered the diagnostic.
+ /// \p MsgStr gives the message.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ /// Same for \p I.
+ DiagnosticInfoInlineAsm(const Instruction &I, const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error);
+
+ unsigned getLocCookie() const { return LocCookie; }
+ const Twine &getMsgStr() const { return MsgStr; }
+ const Instruction *getInstruction() const { return Instr; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Hand rolled RTTI.
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_InlineAsm;
+ }
+};
+
+/// Diagnostic information for stack size reporting.
+/// This is basically a function and a size.
+class DiagnosticInfoStackSize : public DiagnosticInfo {
+private:
+ /// The function that is concerned by this stack size diagnostic.
+ const Function &Fn;
+ /// The computed stack size.
+ unsigned StackSize;
+
+public:
+ /// \p The function that is concerned by this stack size diagnostic.
+ /// \p The computed stack size.
+ DiagnosticInfoStackSize(const Function &Fn, unsigned StackSize,
+ DiagnosticSeverity Severity = DS_Warning)
+ : DiagnosticInfo(DK_StackSize, Severity), Fn(Fn), StackSize(StackSize) {}
+
+ const Function &getFunction() const { return Fn; }
+ unsigned getStackSize() const { return StackSize; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Hand rolled RTTI.
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_StackSize;
+ }
+};
+
+/// Diagnostic information for debug metadata version reporting.
+/// This is basically a module and a version.
+class DiagnosticInfoDebugMetadataVersion : public DiagnosticInfo {
+private:
+ /// The module that is concerned by this debug metadata version diagnostic.
+ const Module &M;
+ /// The actual metadata version.
+ unsigned MetadataVersion;
+
+public:
+ /// \p The module that is concerned by this debug metadata version diagnostic.
+ /// \p The actual metadata version.
+ DiagnosticInfoDebugMetadataVersion(const Module &M, unsigned MetadataVersion,
+ DiagnosticSeverity Severity = DS_Warning)
+ : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M),
+ MetadataVersion(MetadataVersion) {}
+
+ const Module &getModule() const { return M; }
+ unsigned getMetadataVersion() const { return MetadataVersion; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Hand rolled RTTI.
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_DebugMetadataVersion;
+ }
+};
+
+/// Diagnostic information for the sample profiler.
+class DiagnosticInfoSampleProfile : public DiagnosticInfo {
+public:
+ DiagnosticInfoSampleProfile(const char *FileName, unsigned LineNum,
+ const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+ LineNum(LineNum), Msg(Msg) {}
+ DiagnosticInfoSampleProfile(const char *FileName, const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+ LineNum(0), Msg(Msg) {}
+ DiagnosticInfoSampleProfile(const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(NULL),
+ LineNum(0), Msg(Msg) {}
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Hand rolled RTTI.
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_SampleProfile;
+ }
+
+ const char *getFileName() const { return FileName; }
+ unsigned getLineNum() const { return LineNum; }
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Name of the input file associated with this diagnostic.
+ const char *FileName;
+
+ /// Line number where the diagnostic occured. If 0, no line number will
+ /// be emitted in the message.
+ unsigned LineNum;
+
+ /// Message to report.
+ const Twine &Msg;
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/include/llvm/IR/DiagnosticPrinter.h b/include/llvm/IR/DiagnosticPrinter.h
new file mode 100644
index 0000000..411c781
--- /dev/null
+++ b/include/llvm/IR/DiagnosticPrinter.h
@@ -0,0 +1,87 @@
+//===- llvm/Support/DiagnosticPrinter.h - Diagnostic Printer ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the main interface for printer backend diagnostic.
+//
+// Clients of the backend diagnostics should overload this interface based
+// on their needs.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DIAGNOSTICPRINTER_H
+#define LLVM_SUPPORT_DIAGNOSTICPRINTER_H
+
+#include <string>
+
+namespace llvm {
+// Forward declarations.
+class Module;
+class raw_ostream;
+class StringRef;
+class Twine;
+class Value;
+
+/// \brief Interface for custom diagnostic printing.
+class DiagnosticPrinter {
+public:
+ virtual ~DiagnosticPrinter() {}
+
+ // Simple types.
+ virtual DiagnosticPrinter &operator<<(char C) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned char C) = 0;
+ virtual DiagnosticPrinter &operator<<(signed char C) = 0;
+ virtual DiagnosticPrinter &operator<<(StringRef Str) = 0;
+ virtual DiagnosticPrinter &operator<<(const char *Str) = 0;
+ virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned long N) = 0;
+ virtual DiagnosticPrinter &operator<<(long N) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0;
+ virtual DiagnosticPrinter &operator<<(long long N) = 0;
+ virtual DiagnosticPrinter &operator<<(const void *P) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned int N) = 0;
+ virtual DiagnosticPrinter &operator<<(int N) = 0;
+ virtual DiagnosticPrinter &operator<<(double N) = 0;
+ virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0;
+
+ // IR related types.
+ virtual DiagnosticPrinter &operator<<(const Value &V) = 0;
+ virtual DiagnosticPrinter &operator<<(const Module &M) = 0;
+};
+
+/// \brief Basic diagnostic printer that uses an underlying raw_ostream.
+class DiagnosticPrinterRawOStream : public DiagnosticPrinter {
+protected:
+ raw_ostream &Stream;
+
+public:
+ DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {};
+
+ // Simple types.
+ DiagnosticPrinter &operator<<(char C) override;
+ DiagnosticPrinter &operator<<(unsigned char C) override;
+ DiagnosticPrinter &operator<<(signed char C) override;
+ DiagnosticPrinter &operator<<(StringRef Str) override;
+ DiagnosticPrinter &operator<<(const char *Str) override;
+ DiagnosticPrinter &operator<<(const std::string &Str) override;
+ DiagnosticPrinter &operator<<(unsigned long N) override;
+ DiagnosticPrinter &operator<<(long N) override;
+ DiagnosticPrinter &operator<<(unsigned long long N) override;
+ DiagnosticPrinter &operator<<(long long N) override;
+ DiagnosticPrinter &operator<<(const void *P) override;
+ DiagnosticPrinter &operator<<(unsigned int N) override;
+ DiagnosticPrinter &operator<<(int N) override;
+ DiagnosticPrinter &operator<<(double N) override;
+ DiagnosticPrinter &operator<<(const Twine &Str) override;
+
+ // IR related types.
+ DiagnosticPrinter &operator<<(const Value &V) override;
+ DiagnosticPrinter &operator<<(const Module &M) override;
+};
+} // End namespace llvm
+
+#endif
diff --git a/include/llvm/IR/Dominators.h b/include/llvm/IR/Dominators.h
new file mode 100644
index 0000000..86bbe39
--- /dev/null
+++ b/include/llvm/IR/Dominators.h
@@ -0,0 +1,190 @@
+//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominatorTree class, which provides fast and efficient
+// dominance queries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DOMINATORS_H
+#define LLVM_IR_DOMINATORS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+
+EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<BasicBlock>);
+EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>);
+
+#define LLVM_COMMA ,
+EXTERN_TEMPLATE_INSTANTIATION(void Calculate<Function LLVM_COMMA BasicBlock *>(
+ DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT LLVM_COMMA
+ Function &F));
+EXTERN_TEMPLATE_INSTANTIATION(
+ void Calculate<Function LLVM_COMMA Inverse<BasicBlock *> >(
+ DominatorTreeBase<GraphTraits<Inverse<BasicBlock *> >::NodeType> &DT
+ LLVM_COMMA Function &F));
+#undef LLVM_COMMA
+
+typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
+
+class BasicBlockEdge {
+ const BasicBlock *Start;
+ const BasicBlock *End;
+public:
+ BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
+ Start(Start_), End(End_) { }
+ const BasicBlock *getStart() const {
+ return Start;
+ }
+ const BasicBlock *getEnd() const {
+ return End;
+ }
+ bool isSingleEdge() const;
+};
+
+/// \brief Concrete subclass of DominatorTreeBase that is used to compute a
+/// normal dominator tree.
+class DominatorTree : public DominatorTreeBase<BasicBlock> {
+public:
+ typedef DominatorTreeBase<BasicBlock> Base;
+
+ DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
+
+ /// \brief Returns *false* if the other dominator tree matches this dominator
+ /// tree.
+ inline bool compare(const DominatorTree &Other) const {
+ const DomTreeNode *R = getRootNode();
+ const DomTreeNode *OtherR = Other.getRootNode();
+
+ if (!R || !OtherR || R->getBlock() != OtherR->getBlock())
+ return true;
+
+ if (Base::compare(Other))
+ return true;
+
+ return false;
+ }
+
+ // Ensure base-class overloads are visible.
+ using Base::dominates;
+
+ /// \brief Return true if Def dominates a use in User.
+ ///
+ /// This performs the special checks necessary if Def and User are in the same
+ /// basic block. Note that Def doesn't dominate a use in Def itself!
+ bool dominates(const Instruction *Def, const Use &U) const;
+ bool dominates(const Instruction *Def, const Instruction *User) const;
+ bool dominates(const Instruction *Def, const BasicBlock *BB) const;
+ bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
+ bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
+
+ inline DomTreeNode *operator[](BasicBlock *BB) const {
+ return getNode(BB);
+ }
+
+ // Ensure base class overloads are visible.
+ using Base::isReachableFromEntry;
+
+ /// \brief Provide an overload for a Use.
+ bool isReachableFromEntry(const Use &U) const;
+
+ /// \brief Verify the correctness of the domtree by re-computing it.
+ ///
+ /// This should only be used for debugging as it aborts the program if the
+ /// verification fails.
+ void verifyDomTree() const;
+};
+
+//===-------------------------------------
+// DominatorTree GraphTraits specializations so the DominatorTree can be
+// iterable by generic graph iterators.
+
+template <> struct GraphTraits<DomTreeNode*> {
+ typedef DomTreeNode NodeType;
+ typedef NodeType::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) {
+ return N;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+
+ typedef df_iterator<DomTreeNode*> nodes_iterator;
+
+ static nodes_iterator nodes_begin(DomTreeNode *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(DomTreeNode *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+template <> struct GraphTraits<DominatorTree*>
+ : public GraphTraits<DomTreeNode*> {
+ static NodeType *getEntryNode(DominatorTree *DT) {
+ return DT->getRootNode();
+ }
+
+ static nodes_iterator nodes_begin(DominatorTree *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(DominatorTree *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+/// \brief Analysis pass which computes a \c DominatorTree.
+class DominatorTreeWrapperPass : public FunctionPass {
+ DominatorTree DT;
+
+public:
+ static char ID;
+
+ DominatorTreeWrapperPass() : FunctionPass(ID) {
+ initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ DominatorTree &getDomTree() { return DT; }
+ const DominatorTree &getDomTree() const { return DT; }
+
+ bool runOnFunction(Function &F) override;
+
+ void verifyAnalysis() const override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ void releaseMemory() override { DT.releaseMemory(); }
+
+ void print(raw_ostream &OS, const Module *M = 0) const override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index bba7ecd..cb43bba 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -18,6 +18,7 @@
#ifndef LLVM_IR_FUNCTION_H
#define LLVM_IR_FUNCTION_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
@@ -335,7 +336,7 @@ public:
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a Function) from the Function Src to this one.
- void copyAttributesFrom(const GlobalValue *Src);
+ void copyAttributesFrom(const GlobalValue *Src) override;
/// deleteBody - This method deletes the body of the function, and converts
/// the linkage to external.
@@ -348,12 +349,12 @@ public:
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
- virtual void removeFromParent();
+ void removeFromParent() override;
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
- virtual void eraseFromParent();
+ void eraseFromParent() override;
/// Get the underlying elements of the Function... the basic block list is
@@ -404,9 +405,9 @@ public:
const BasicBlock &back() const { return BasicBlocks.back(); }
BasicBlock &back() { return BasicBlocks.back(); }
- //===--------------------------------------------------------------------===//
- // Argument iterator forwarding functions
- //
+/// @name Function Argument Iteration
+/// @{
+
arg_iterator arg_begin() {
CheckLazyArguments();
return ArgumentList.begin();
@@ -424,6 +425,16 @@ public:
return ArgumentList.end();
}
+ iterator_range<arg_iterator> args() {
+ return iterator_range<arg_iterator>(arg_begin(), arg_end());
+ }
+
+ iterator_range<const_arg_iterator> args() const {
+ return iterator_range<const_arg_iterator>(arg_begin(), arg_end());
+ }
+
+/// @}
+
size_t arg_size() const;
bool arg_empty() const;
diff --git a/include/llvm/IR/GVMaterializer.h b/include/llvm/IR/GVMaterializer.h
new file mode 100644
index 0000000..6717bc8
--- /dev/null
+++ b/include/llvm/IR/GVMaterializer.h
@@ -0,0 +1,62 @@
+//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an abstract interface for loading a module from some
+// place. This interface allows incremental or random access loading of
+// functions from the file. This is useful for applications like JIT compilers
+// or interprocedural optimizers that do not need the entire program in memory
+// at the same time.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GVMATERIALIZER_H
+#define LLVM_IR_GVMATERIALIZER_H
+
+#include "llvm/Support/system_error.h"
+
+namespace llvm {
+
+class Function;
+class GlobalValue;
+class Module;
+
+class GVMaterializer {
+protected:
+ GVMaterializer() {}
+
+public:
+ virtual ~GVMaterializer();
+
+ /// isMaterializable - True if GV can be materialized from whatever backing
+ /// store this GVMaterializer uses and has not been materialized yet.
+ virtual bool isMaterializable(const GlobalValue *GV) const = 0;
+
+ /// isDematerializable - True if GV has been materialized and can be
+ /// dematerialized back to whatever backing store this GVMaterializer uses.
+ virtual bool isDematerializable(const GlobalValue *GV) const = 0;
+
+ /// Materialize - make sure the given GlobalValue is fully read.
+ ///
+ virtual error_code Materialize(GlobalValue *GV) = 0;
+
+ /// Dematerialize - If the given GlobalValue is read in, and if the
+ /// GVMaterializer supports it, release the memory for the GV, and set it up
+ /// to be materialized lazily. If the Materializer doesn't support this
+ /// capability, this method is a noop.
+ ///
+ virtual void Dematerialize(GlobalValue *) {}
+
+ /// MaterializeModule - make sure the entire Module has been completely read.
+ ///
+ virtual error_code MaterializeModule(Module *M) = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/GetElementPtrTypeIterator.h b/include/llvm/IR/GetElementPtrTypeIterator.h
new file mode 100644
index 0000000..f2722d6
--- /dev/null
+++ b/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -0,0 +1,113 @@
+//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an iterator for walking through the types indexed by
+// getelementptr instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/User.h"
+
+namespace llvm {
+ template<typename ItTy = User::const_op_iterator>
+ class generic_gep_type_iterator
+ : public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag,
+ Type *, ptrdiff_t> super;
+
+ ItTy OpIt;
+ Type *CurTy;
+ generic_gep_type_iterator() {}
+ public:
+
+ static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
+ generic_gep_type_iterator I;
+ I.CurTy = Ty;
+ I.OpIt = It;
+ return I;
+ }
+ static generic_gep_type_iterator end(ItTy It) {
+ generic_gep_type_iterator I;
+ I.CurTy = 0;
+ I.OpIt = It;
+ return I;
+ }
+
+ bool operator==(const generic_gep_type_iterator& x) const {
+ return OpIt == x.OpIt;
+ }
+ bool operator!=(const generic_gep_type_iterator& x) const {
+ return !operator==(x);
+ }
+
+ Type *operator*() const {
+ return CurTy;
+ }
+
+ Type *getIndexedType() const {
+ CompositeType *CT = cast<CompositeType>(CurTy);
+ return CT->getTypeAtIndex(getOperand());
+ }
+
+ // This is a non-standard operator->. It allows you to call methods on the
+ // current type directly.
+ Type *operator->() const { return operator*(); }
+
+ Value *getOperand() const { return *OpIt; }
+
+ generic_gep_type_iterator& operator++() { // Preincrement
+ if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
+ CurTy = CT->getTypeAtIndex(getOperand());
+ } else {
+ CurTy = 0;
+ }
+ ++OpIt;
+ return *this;
+ }
+
+ generic_gep_type_iterator operator++(int) { // Postincrement
+ generic_gep_type_iterator tmp = *this; ++*this; return tmp;
+ }
+ };
+
+ typedef generic_gep_type_iterator<> gep_type_iterator;
+
+ inline gep_type_iterator gep_type_begin(const User *GEP) {
+ return gep_type_iterator::begin
+ (GEP->getOperand(0)->getType()->getScalarType(), GEP->op_begin()+1);
+ }
+ inline gep_type_iterator gep_type_end(const User *GEP) {
+ return gep_type_iterator::end(GEP->op_end());
+ }
+ inline gep_type_iterator gep_type_begin(const User &GEP) {
+ return gep_type_iterator::begin
+ (GEP.getOperand(0)->getType()->getScalarType(), GEP.op_begin()+1);
+ }
+ inline gep_type_iterator gep_type_end(const User &GEP) {
+ return gep_type_iterator::end(GEP.op_end());
+ }
+
+ template<typename T>
+ inline generic_gep_type_iterator<const T *>
+ gep_type_begin(Type *Op0, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
+ }
+
+ template<typename T>
+ inline generic_gep_type_iterator<const T *>
+ gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::end(A.end());
+ }
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/GlobalAlias.h b/include/llvm/IR/GlobalAlias.h
index fec61a7..2ca481a 100644
--- a/include/llvm/IR/GlobalAlias.h
+++ b/include/llvm/IR/GlobalAlias.h
@@ -49,12 +49,12 @@ public:
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
- virtual void removeFromParent();
+ void removeFromParent() override;
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
- virtual void eraseFromParent();
+ void eraseFromParent() override;
/// set/getAliasee - These methods retrive and set alias target.
void setAliasee(Constant *GV);
@@ -64,23 +64,15 @@ public:
Constant *getAliasee() {
return getOperand(0);
}
- /// getAliasedGlobal() - Aliasee can be either global or bitcast of
- /// global. This method retrives the global for both aliasee flavours.
+
+ /// This method tries to ultimately resolve the alias by going through the
+ /// aliasing chain and trying to find the very last global. Returns NULL if a
+ /// cycle was found.
GlobalValue *getAliasedGlobal();
const GlobalValue *getAliasedGlobal() const {
return const_cast<GlobalAlias *>(this)->getAliasedGlobal();
}
- /// resolveAliasedGlobal() - This method tries to ultimately resolve the alias
- /// by going through the aliasing chain and trying to find the very last
- /// global. Returns NULL if a cycle was found. If stopOnWeak is false, then
- /// the whole chain aliasing chain is traversed, otherwise - only strong
- /// aliases.
- GlobalValue *resolveAliasedGlobal(bool stopOnWeak = true);
- const GlobalValue *resolveAliasedGlobal(bool stopOnWeak = true) const {
- return const_cast<GlobalAlias *>(this)->resolveAliasedGlobal(stopOnWeak);
- }
-
static bool isValidLinkage(LinkageTypes L) {
return isExternalLinkage(L) || isLocalLinkage(L) ||
isWeakLinkage(L) || isLinkOnceLinkage(L);
diff --git a/include/llvm/IR/GlobalValue.h b/include/llvm/IR/GlobalValue.h
index 4f20a31..59c320d 100644
--- a/include/llvm/IR/GlobalValue.h
+++ b/include/llvm/IR/GlobalValue.h
@@ -40,10 +40,6 @@ public:
AppendingLinkage, ///< Special purpose, only applies to global arrays
InternalLinkage, ///< Rename collisions when linking (static functions).
PrivateLinkage, ///< Like Internal, but omit from symbol table.
- LinkerPrivateLinkage, ///< Like Private, but linker removes.
- LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
- DLLImportLinkage, ///< Function to be imported from DLL
- DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
CommonLinkage ///< Tentative definitions.
};
@@ -55,11 +51,19 @@ public:
ProtectedVisibility ///< The GV is protected
};
+ /// @brief Storage classes of global values for PE targets.
+ enum DLLStorageClassTypes {
+ DefaultStorageClass = 0,
+ DLLImportStorageClass = 1, ///< Function to be imported from DLL
+ DLLExportStorageClass = 2 ///< Function to be accessible from DLL.
+ };
+
protected:
GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
LinkageTypes linkage, const Twine &Name)
: Constant(ty, vty, Ops, NumOps), Linkage(linkage),
- Visibility(DefaultVisibility), Alignment(0), UnnamedAddr(0), Parent(0) {
+ Visibility(DefaultVisibility), Alignment(0), UnnamedAddr(0),
+ DllStorageClass(DefaultStorageClass), Parent(0) {
setName(Name);
}
@@ -69,6 +73,7 @@ protected:
unsigned Visibility : 2; // The visibility style of this global
unsigned Alignment : 16; // Alignment of this symbol, must be power of two
unsigned UnnamedAddr : 1; // This value's address is not significant
+ unsigned DllStorageClass : 2; // DLL storage class
Module *Parent; // The containing module.
std::string Section; // Section to emit this into, empty mean default
public:
@@ -91,11 +96,26 @@ public:
return Visibility == ProtectedVisibility;
}
void setVisibility(VisibilityTypes V) { Visibility = V; }
-
+
+ DLLStorageClassTypes getDLLStorageClass() const {
+ return DLLStorageClassTypes(DllStorageClass);
+ }
+ bool hasDLLImportStorageClass() const {
+ return DllStorageClass == DLLImportStorageClass;
+ }
+ bool hasDLLExportStorageClass() const {
+ return DllStorageClass == DLLExportStorageClass;
+ }
+ void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; }
+
bool hasSection() const { return !Section.empty(); }
const std::string &getSection() const { return Section; }
- void setSection(StringRef S) { Section = S; }
-
+ void setSection(StringRef S) {
+ assert((getValueID() != Value::GlobalAliasVal || S.empty()) &&
+ "GlobalAlias should not have a section!");
+ Section = S;
+ }
+
/// If the usage is empty (except transitively dead constants), then this
/// global value can be safely deleted since the destructor will
/// delete the dead constants as well.
@@ -136,21 +156,8 @@ public:
static bool isPrivateLinkage(LinkageTypes Linkage) {
return Linkage == PrivateLinkage;
}
- static bool isLinkerPrivateLinkage(LinkageTypes Linkage) {
- return Linkage == LinkerPrivateLinkage;
- }
- static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
- return Linkage == LinkerPrivateWeakLinkage;
- }
static bool isLocalLinkage(LinkageTypes Linkage) {
- return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
- }
- static bool isDLLImportLinkage(LinkageTypes Linkage) {
- return Linkage == DLLImportLinkage;
- }
- static bool isDLLExportLinkage(LinkageTypes Linkage) {
- return Linkage == DLLExportLinkage;
+ return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage);
}
static bool isExternalWeakLinkage(LinkageTypes Linkage) {
return Linkage == ExternalWeakLinkage;
@@ -169,11 +176,8 @@ public:
/// by something non-equivalent at link time. For example, if a function has
/// weak linkage then the code defining it may be replaced by different code.
static bool mayBeOverridden(LinkageTypes Linkage) {
- return Linkage == WeakAnyLinkage ||
- Linkage == LinkOnceAnyLinkage ||
- Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage;
+ return Linkage == WeakAnyLinkage || Linkage == LinkOnceAnyLinkage ||
+ Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -181,14 +185,10 @@ public:
/// always a mistake: when working at the IR level use mayBeOverridden instead
/// as it knows about ODR semantics.
static bool isWeakForLinker(LinkageTypes Linkage) {
- return Linkage == AvailableExternallyLinkage ||
- Linkage == WeakAnyLinkage ||
- Linkage == WeakODRLinkage ||
- Linkage == LinkOnceAnyLinkage ||
- Linkage == LinkOnceODRLinkage ||
- Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage;
+ return Linkage == AvailableExternallyLinkage || Linkage == WeakAnyLinkage ||
+ Linkage == WeakODRLinkage || Linkage == LinkOnceAnyLinkage ||
+ Linkage == LinkOnceODRLinkage || Linkage == CommonLinkage ||
+ Linkage == ExternalWeakLinkage;
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -204,13 +204,7 @@ public:
bool hasAppendingLinkage() const { return isAppendingLinkage(Linkage); }
bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
- bool hasLinkerPrivateLinkage() const { return isLinkerPrivateLinkage(Linkage); }
- bool hasLinkerPrivateWeakLinkage() const {
- return isLinkerPrivateWeakLinkage(Linkage);
- }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
- bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
- bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
bool hasExternalWeakLinkage() const { return isExternalWeakLinkage(Linkage); }
bool hasCommonLinkage() const { return isCommonLinkage(Linkage); }
@@ -267,7 +261,7 @@ public:
/// @}
/// Override from Constant class.
- virtual void destroyConstant();
+ void destroyConstant() override;
/// isDeclaration - Return true if the primary definition of this global
/// value is outside of the current translation unit.
@@ -286,6 +280,8 @@ public:
inline Module *getParent() { return Parent; }
inline const Module *getParent() const { return Parent; }
+ const DataLayout *getDataLayout() const;
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal ||
diff --git a/include/llvm/IR/GlobalVariable.h b/include/llvm/IR/GlobalVariable.h
index 660092d..a82740f 100644
--- a/include/llvm/IR/GlobalVariable.h
+++ b/include/llvm/IR/GlobalVariable.h
@@ -174,21 +174,21 @@ public:
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a GlobalVariable) from the GlobalVariable Src to this one.
- void copyAttributesFrom(const GlobalValue *Src);
+ void copyAttributesFrom(const GlobalValue *Src) override;
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
- virtual void removeFromParent();
+ void removeFromParent() override;
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
- virtual void eraseFromParent();
+ void eraseFromParent() override;
/// Override Constant's implementation of this method so we can
/// replace constant initializers.
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
+ void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U) override;
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *V) {
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index 8d1432d..79ee7b7 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -19,13 +19,13 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/ConstantFolder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CBindingWrapping.h"
-#include "llvm/Support/ConstantFolder.h"
-#include "llvm/Support/ValueHandle.h"
namespace llvm {
class MDNode;
@@ -282,6 +282,12 @@ public:
return ConstantInt::get(getInt64Ty(), C);
}
+ /// \brief Get a constant N-bit value, zero extended or truncated from
+ /// a 64-bit value.
+ ConstantInt *getIntN(unsigned N, uint64_t C) {
+ return ConstantInt::get(getIntNTy(N), C);
+ }
+
/// \brief Get a constant integer value.
ConstantInt *getInt(const APInt &AI) {
return ConstantInt::get(Context, AI);
@@ -316,6 +322,11 @@ public:
return Type::getInt64Ty(Context);
}
+ /// \brief Fetch the type representing an N-bit integer.
+ IntegerType *getIntNTy(unsigned N) {
+ return Type::getIntNTy(Context, N);
+ }
+
/// \brief Fetch the type representing a 32-bit floating point value.
Type *getFloatTy() {
return Type::getFloatTy(Context);
@@ -638,7 +649,7 @@ public:
bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateSub(LC, RC), Name);
+ return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
HasNUW, HasNSW);
}
@@ -660,7 +671,7 @@ public:
bool HasNUW = false, bool HasNSW = false) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateMul(LC, RC), Name);
+ return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
HasNUW, HasNSW);
}
@@ -832,11 +843,15 @@ public:
}
Value *CreateBinOp(Instruction::BinaryOps Opc,
- Value *LHS, Value *RHS, const Twine &Name = "") {
+ Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
- return Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
+ llvm::Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
+ if (isa<FPMathOperator>(BinOp))
+ BinOp = AddFPMathAttributes(BinOp, FPMathTag, FMF);
+ return Insert(BinOp, Name);
}
Value *CreateNeg(Value *V, const Twine &Name = "",
@@ -915,13 +930,17 @@ public:
return SI;
}
FenceInst *CreateFence(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
- return Insert(new FenceInst(Context, Ordering, SynchScope));
- }
- AtomicCmpXchgInst *CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
- AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
- return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope));
+ SynchronizationScope SynchScope = CrossThread,
+ const Twine &Name = "") {
+ return Insert(new FenceInst(Context, Ordering, SynchScope), Name);
+ }
+ AtomicCmpXchgInst *
+ CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope = CrossThread) {
+ return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
+ FailureOrdering, SynchScope));
}
AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
diff --git a/include/llvm/IR/IRPrintingPasses.h b/include/llvm/IR/IRPrintingPasses.h
new file mode 100644
index 0000000..2f78c83
--- /dev/null
+++ b/include/llvm/IR/IRPrintingPasses.h
@@ -0,0 +1,85 @@
+//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines passes to print out IR in various granularities. The
+/// PrintModulePass pass simply prints out the entire module when it is
+/// executed. The PrintFunctionPass class is designed to be pipelined with
+/// other FunctionPass's, and prints out the functions of the module as they
+/// are processed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IR_PRINTING_PASSES_H
+#define LLVM_IR_IR_PRINTING_PASSES_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class Module;
+class ModulePass;
+class PreservedAnalyses;
+class raw_ostream;
+
+/// \brief Create and return a pass that writes the module to the specified
+/// \c raw_ostream.
+ModulePass *createPrintModulePass(raw_ostream &OS,
+ const std::string &Banner = "");
+
+/// \brief Create and return a pass that prints functions to the specified
+/// \c raw_ostream as they are processed.
+FunctionPass *createPrintFunctionPass(raw_ostream &OS,
+ const std::string &Banner = "");
+
+/// \brief Create and return a pass that writes the BB to the specified
+/// \c raw_ostream.
+BasicBlockPass *createPrintBasicBlockPass(raw_ostream &OS,
+ const std::string &Banner = "");
+
+/// \brief Pass for printing a Module as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintModulePass {
+ raw_ostream &OS;
+ std::string Banner;
+
+public:
+ PrintModulePass();
+ PrintModulePass(raw_ostream &OS, const std::string &Banner = "");
+
+ PreservedAnalyses run(Module *M);
+
+ static StringRef name() { return "PrintModulePass"; }
+};
+
+/// \brief Pass for printing a Function as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintFunctionPass {
+ raw_ostream &OS;
+ std::string Banner;
+
+public:
+ PrintFunctionPass();
+ PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");
+
+ PreservedAnalyses run(Function *F);
+
+ static StringRef name() { return "PrintFunctionPass"; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index 3398a83..ac19089 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -164,9 +164,6 @@ public:
///Default constructor.
ConstraintInfo();
- /// Copy constructor.
- ConstraintInfo(const ConstraintInfo &other);
-
/// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
@@ -197,7 +194,7 @@ public:
// These are helper methods for dealing with flags in the INLINEASM SDNode
// in the backend.
- enum LLVM_ENUM_INT_TYPE(uint32_t) {
+ enum : uint32_t {
// Fixed operands on an INLINEASM SDNode.
Op_InputChain = 0,
Op_AsmString = 1,
diff --git a/include/llvm/IR/InstIterator.h b/include/llvm/IR/InstIterator.h
new file mode 100644
index 0000000..75e93bd
--- /dev/null
+++ b/include/llvm/IR/InstIterator.h
@@ -0,0 +1,147 @@
+//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of two iterators for iterating over the
+// instructions in a function. This is effectively a wrapper around a two level
+// iterator that can probably be genericized later.
+//
+// Note that this iterator gets invalidated any time that basic blocks or
+// instructions are moved around.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTITERATOR_H
+#define LLVM_IR_INSTITERATOR_H
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+// This class implements inst_begin() & inst_end() for
+// inst_iterator and const_inst_iterator's.
+//
+template <class _BB_t, class _BB_i_t, class _BI_t, class _II_t>
+class InstIterator {
+ typedef _BB_t BBty;
+ typedef _BB_i_t BBIty;
+ typedef _BI_t BIty;
+ typedef _II_t IIty;
+ _BB_t *BBs; // BasicBlocksType
+ _BB_i_t BB; // BasicBlocksType::iterator
+ _BI_t BI; // BasicBlock::iterator
+public:
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef IIty value_type;
+ typedef signed difference_type;
+ typedef IIty* pointer;
+ typedef IIty& reference;
+
+ // Default constructor
+ InstIterator() {}
+
+ // Copy constructor...
+ template<typename A, typename B, typename C, typename D>
+ InstIterator(const InstIterator<A,B,C,D> &II)
+ : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+ template<typename A, typename B, typename C, typename D>
+ InstIterator(InstIterator<A,B,C,D> &II)
+ : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+ template<class M> InstIterator(M &m)
+ : BBs(&m.getBasicBlockList()), BB(BBs->begin()) { // begin ctor
+ if (BB != BBs->end()) {
+ BI = BB->begin();
+ advanceToNextBB();
+ }
+ }
+
+ template<class M> InstIterator(M &m, bool)
+ : BBs(&m.getBasicBlockList()), BB(BBs->end()) { // end ctor
+ }
+
+ // Accessors to get at the underlying iterators...
+ inline BBIty &getBasicBlockIterator() { return BB; }
+ inline BIty &getInstructionIterator() { return BI; }
+
+ inline reference operator*() const { return *BI; }
+ inline pointer operator->() const { return &operator*(); }
+
+ inline bool operator==(const InstIterator &y) const {
+ return BB == y.BB && (BB == BBs->end() || BI == y.BI);
+ }
+ inline bool operator!=(const InstIterator& y) const {
+ return !operator==(y);
+ }
+
+ InstIterator& operator++() {
+ ++BI;
+ advanceToNextBB();
+ return *this;
+ }
+ inline InstIterator operator++(int) {
+ InstIterator tmp = *this; ++*this; return tmp;
+ }
+
+ InstIterator& operator--() {
+ while (BB == BBs->end() || BI == BB->begin()) {
+ --BB;
+ BI = BB->end();
+ }
+ --BI;
+ return *this;
+ }
+ inline InstIterator operator--(int) {
+ InstIterator tmp = *this; --*this; return tmp;
+ }
+
+ inline bool atEnd() const { return BB == BBs->end(); }
+
+private:
+ inline void advanceToNextBB() {
+ // The only way that the II could be broken is if it is now pointing to
+ // the end() of the current BasicBlock and there are successor BBs.
+ while (BI == BB->end()) {
+ ++BB;
+ if (BB == BBs->end()) break;
+ BI = BB->begin();
+ }
+ }
+};
+
+
+typedef InstIterator<iplist<BasicBlock>,
+ Function::iterator, BasicBlock::iterator,
+ Instruction> inst_iterator;
+typedef InstIterator<const iplist<BasicBlock>,
+ Function::const_iterator,
+ BasicBlock::const_iterator,
+ const Instruction> const_inst_iterator;
+
+inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
+inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }
+inline const_inst_iterator inst_begin(const Function *F) {
+ return const_inst_iterator(*F);
+}
+inline const_inst_iterator inst_end(const Function *F) {
+ return const_inst_iterator(*F, true);
+}
+inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); }
+inline inst_iterator inst_end(Function &F) { return inst_iterator(F, true); }
+inline const_inst_iterator inst_begin(const Function &F) {
+ return const_inst_iterator(F);
+}
+inline const_inst_iterator inst_end(const Function &F) {
+ return const_inst_iterator(F, true);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/InstVisitor.h b/include/llvm/IR/InstVisitor.h
new file mode 100644
index 0000000..1cdcd55
--- /dev/null
+++ b/include/llvm/IR/InstVisitor.h
@@ -0,0 +1,289 @@
+//===- InstVisitor.h - Instruction visitor templates ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_IR_INSTVISITOR_H
+#define LLVM_IR_INSTVISITOR_H
+
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+// We operate on opaque instruction classes, so forward declare all instruction
+// types now...
+//
+#define HANDLE_INST(NUM, OPCODE, CLASS) class CLASS;
+#include "llvm/IR/Instruction.def"
+
+#define DELEGATE(CLASS_TO_VISIT) \
+ return static_cast<SubClass*>(this)-> \
+ visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I))
+
+
+/// @brief Base class for instruction visitors
+///
+/// Instruction visitors are used when you want to perform different actions
+/// for different kinds of instructions without having to use lots of casts
+/// and a big switch statement (in your code, that is).
+///
+/// To define your own visitor, inherit from this class, specifying your
+/// new type for the 'SubClass' template parameter, and "override" visitXXX
+/// functions in your class. I say "override" because this class is defined
+/// in terms of statically resolved overloading, not virtual functions.
+///
+/// For example, here is a visitor that counts the number of malloc
+/// instructions processed:
+///
+/// /// Declare the class. Note that we derive from InstVisitor instantiated
+/// /// with _our new subclasses_ type.
+/// ///
+/// struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
+/// unsigned Count;
+/// CountAllocaVisitor() : Count(0) {}
+///
+/// void visitAllocaInst(AllocaInst &AI) { ++Count; }
+/// };
+///
+/// And this class would be used like this:
+/// CountAllocaVisitor CAV;
+/// CAV.visit(function);
+/// NumAllocas = CAV.Count;
+///
+/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
+/// Function, and Module, which recursively process all contained instructions.
+///
+/// Note that if you don't implement visitXXX for some instruction type,
+/// the visitXXX method for instruction superclass will be invoked. So
+/// if instructions are added in the future, they will be automatically
+/// supported, if you handle one of their superclasses.
+///
+/// The optional second template argument specifies the type that instruction
+/// visitation functions should return. If you specify this, you *MUST* provide
+/// an implementation of visitInstruction though!.
+///
+/// Note that this class is specifically designed as a template to avoid
+/// virtual function call overhead. Defining and using an InstVisitor is just
+/// as efficient as having your own switch statement over the instruction
+/// opcode.
+template<typename SubClass, typename RetTy=void>
+class InstVisitor {
+ //===--------------------------------------------------------------------===//
+ // Interface code - This is the public interface of the InstVisitor that you
+ // use to visit instructions...
+ //
+
+public:
+ // Generic visit method - Allow visitation to all instructions in a range
+ template<class Iterator>
+ void visit(Iterator Start, Iterator End) {
+ while (Start != End)
+ static_cast<SubClass*>(this)->visit(*Start++);
+ }
+
+ // Define visitors for functions and basic blocks...
+ //
+ void visit(Module &M) {
+ static_cast<SubClass*>(this)->visitModule(M);
+ visit(M.begin(), M.end());
+ }
+ void visit(Function &F) {
+ static_cast<SubClass*>(this)->visitFunction(F);
+ visit(F.begin(), F.end());
+ }
+ void visit(BasicBlock &BB) {
+ static_cast<SubClass*>(this)->visitBasicBlock(BB);
+ visit(BB.begin(), BB.end());
+ }
+
+ // Forwarding functions so that the user can visit with pointers AND refs.
+ void visit(Module *M) { visit(*M); }
+ void visit(Function *F) { visit(*F); }
+ void visit(BasicBlock *BB) { visit(*BB); }
+ RetTy visit(Instruction *I) { return visit(*I); }
+
+ // visit - Finally, code to visit an instruction...
+ //
+ RetTy visit(Instruction &I) {
+ switch (I.getOpcode()) {
+ default: llvm_unreachable("Unknown instruction type encountered!");
+ // Build the switch statement using the Instruction.def file...
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ case Instruction::OPCODE: return \
+ static_cast<SubClass*>(this)-> \
+ visit##OPCODE(static_cast<CLASS&>(I));
+#include "llvm/IR/Instruction.def"
+ }
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitation functions... these functions provide default fallbacks in case
+ // the user does not specify what to do for a particular instruction type.
+ // The default behavior is to generalize the instruction type to its subtype
+ // and try visiting the subtype. All of this should be inlined perfectly,
+ // because there are no virtual functions to get in the way.
+ //
+
+ // When visiting a module, function or basic block directly, these methods get
+ // called to indicate when transitioning into a new unit.
+ //
+ void visitModule (Module &M) {}
+ void visitFunction (Function &F) {}
+ void visitBasicBlock(BasicBlock &BB) {}
+
+ // Define instruction specific visitor functions that can be overridden to
+ // handle SPECIFIC instructions. These functions automatically define
+ // visitMul to proxy to visitBinaryOperator for instance in case the user does
+ // not need this generality.
+ //
+ // These functions can also implement fan-out, when a single opcode and
+ // instruction have multiple more specific Instruction subclasses. The Call
+ // instruction currently supports this. We implement that by redirecting that
+ // instruction to a special delegation helper.
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ RetTy visit##OPCODE(CLASS &I) { \
+ if (NUM == Instruction::Call) \
+ return delegateCallInst(I); \
+ else \
+ DELEGATE(CLASS); \
+ }
+#include "llvm/IR/Instruction.def"
+
+ // Specific Instruction type classes... note that all of the casts are
+ // necessary because we use the instruction classes as opaque types...
+ //
+ RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitLoadInst(LoadInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction);}
+ RetTy visitFenceInst(FenceInst &I) { DELEGATE(Instruction);}
+ RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
+ RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction);}
+ RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst);}
+ RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst);}
+ RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst);}
+ RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst);}
+ RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
+ RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction);}
+ RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
+ RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
+ RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
+ RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
+ RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
+ RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
+
+ // Handle the special instrinsic instruction classes.
+ RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
+
+ // Call and Invoke are slightly different as they delegate first through
+ // a generic CallSite visitor.
+ RetTy visitCallInst(CallInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
+ RetTy visitInvokeInst(InvokeInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
+
+ // Next level propagators: If the user does not overload a specific
+ // instruction type, they can overload one of these to get the whole class
+ // of instructions...
+ //
+ RetTy visitCastInst(CastInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction);}
+ RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction);}
+ RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction);}
+ RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
+
+ // Provide a special visitor for a 'callsite' that visits both calls and
+ // invokes. When unimplemented, properly delegates to either the terminator or
+ // regular instruction visitor.
+ RetTy visitCallSite(CallSite CS) {
+ assert(CS);
+ Instruction &I = *CS.getInstruction();
+ if (CS.isCall())
+ DELEGATE(Instruction);
+
+ assert(CS.isInvoke());
+ DELEGATE(TerminatorInst);
+ }
+
+ // If the user wants a 'default' case, they can choose to override this
+ // function. If this function is not overloaded in the user's subclass, then
+ // this instruction just gets ignored.
+ //
+ // Note that you MUST override this function if your return type is not void.
+ //
+ void visitInstruction(Instruction &I) {} // Ignore unhandled instructions
+
+private:
+ // Special helper function to delegate to CallInst subclass visitors.
+ RetTy delegateCallInst(CallInst &I) {
+ if (const Function *F = I.getCalledFunction()) {
+ switch ((Intrinsic::ID)F->getIntrinsicID()) {
+ default: DELEGATE(IntrinsicInst);
+ case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
+ case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
+ case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memmove: DELEGATE(MemMoveInst);
+ case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::vastart: DELEGATE(VAStartInst);
+ case Intrinsic::vaend: DELEGATE(VAEndInst);
+ case Intrinsic::vacopy: DELEGATE(VACopyInst);
+ case Intrinsic::not_intrinsic: break;
+ }
+ }
+ DELEGATE(CallInst);
+ }
+
+ // An overload that will never actually be called, it is used only from dead
+ // code in the dispatching from opcodes to instruction subclasses.
+ RetTy delegateCallInst(Instruction &I) {
+ llvm_unreachable("delegateCallInst called for non-CallInst");
+ }
+};
+
+#undef DELEGATE
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index e12bb03..e1a5130 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -51,7 +51,7 @@ protected:
virtual BasicBlock *getSuccessorV(unsigned idx) const = 0;
virtual unsigned getNumSuccessorsV() const = 0;
virtual void setSuccessorV(unsigned idx, BasicBlock *B) = 0;
- virtual TerminatorInst *clone_impl() const = 0;
+ TerminatorInst *clone_impl() const override = 0;
public:
/// getNumSuccessors - Return the number of successors that this terminator
@@ -143,7 +143,7 @@ protected:
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
- virtual BinaryOperator *clone_impl() const LLVM_OVERRIDE;
+ BinaryOperator *clone_impl() const override;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -385,7 +385,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
/// if (isa<CastInst>(Instr)) { ... }
/// @brief Base class of casting instructions.
class CastInst : public UnaryInstruction {
- virtual void anchor() LLVM_OVERRIDE;
+ void anchor() override;
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
@@ -582,6 +582,11 @@ public:
Type *IntPtrTy ///< Integer type corresponding to pointer
) const;
+ /// @brief Determine if this cast is a no-op cast.
+ bool isNoopCast(
+ const DataLayout *DL ///< DataLayout to get the Int Ptr type from.
+ ) const;
+
/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.
/// @returns 0 if the CastInst pair can't be eliminated, otherwise
@@ -642,7 +647,7 @@ protected:
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd);
- virtual void anchor() LLVM_OVERRIDE; // Out of line virtual method.
+ void anchor() override; // Out of line virtual method.
public:
/// This enumeration lists the possible predicates for CmpInst subclasses.
/// Values in the range 0-31 are reserved for FCmpInst, while values in the
diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h
index 5721d8f..928dc07 100644
--- a/include/llvm/IR/Instruction.h
+++ b/include/llvm/IR/Instruction.h
@@ -15,9 +15,10 @@
#ifndef LLVM_IR_INSTRUCTION_H
#define LLVM_IR_INSTRUCTION_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/User.h"
-#include "llvm/Support/DebugLoc.h"
namespace llvm {
@@ -44,14 +45,16 @@ public:
// Out of line virtual method, so the vtable, etc has a home.
~Instruction();
- /// use_back - Specialize the methods defined in Value, as we know that an
+ /// user_back - Specialize the methods defined in Value, as we know that an
/// instruction can only be used by other instructions.
- Instruction *use_back() { return cast<Instruction>(*use_begin());}
- const Instruction *use_back() const { return cast<Instruction>(*use_begin());}
+ Instruction *user_back() { return cast<Instruction>(*user_begin());}
+ const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
inline const BasicBlock *getParent() const { return Parent; }
inline BasicBlock *getParent() { return Parent; }
+ const DataLayout *getDataLayout() const;
+
/// removeFromParent - This method unlinks 'this' from the containing basic
/// block, but does not delete it.
///
@@ -171,6 +174,21 @@ public:
void setMetadata(unsigned KindID, MDNode *Node);
void setMetadata(StringRef Kind, MDNode *Node);
+ /// \brief Drop unknown metadata.
+ /// Passes are required to drop metadata they don't understand. This is a
+ /// convenience method for passes to do so.
+ void dropUnknownMetadata(ArrayRef<unsigned> KnownIDs);
+ void dropUnknownMetadata() {
+ return dropUnknownMetadata(ArrayRef<unsigned>());
+ }
+ void dropUnknownMetadata(unsigned ID1) {
+ return dropUnknownMetadata(makeArrayRef(ID1));
+ }
+ void dropUnknownMetadata(unsigned ID1, unsigned ID2) {
+ unsigned IDs[] = {ID1, ID2};
+ return dropUnknownMetadata(IDs);
+ }
+
/// setDebugLoc - Set the debug location information for this instruction.
void setDebugLoc(const DebugLoc &Loc) { DbgLoc = Loc; }
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index 0843d8f..06d7287 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -17,6 +17,7 @@
#define LLVM_IR_INSTRUCTIONS_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
@@ -57,7 +58,7 @@ enum SynchronizationScope {
///
class AllocaInst : public UnaryInstruction {
protected:
- virtual AllocaInst *clone_impl() const;
+ AllocaInst *clone_impl() const override;
public:
explicit AllocaInst(Type *Ty, Value *ArraySize = 0,
const Twine &Name = "", Instruction *InsertBefore = 0);
@@ -101,7 +102,7 @@ public:
/// by the instruction.
///
unsigned getAlignment() const {
- return (1u << getSubclassDataFromInstruction()) >> 1;
+ return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
}
void setAlignment(unsigned Align);
@@ -110,6 +111,20 @@ public:
/// into the prolog/epilog code, so it is basically free.
bool isStaticAlloca() const;
+ /// \brief Return true if this alloca is used as an inalloca argument to a
+ /// call. Such allocas are never considered static even if they are in the
+ /// entry block.
+ bool isUsedWithInAlloca() const {
+ return getSubclassDataFromInstruction() & 32;
+ }
+
+ /// \brief Specify whether this alloca is used to represent a the arguments to
+ /// a call.
+ void setUsedWithInAlloca(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
+ (V ? 32 : 0));
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Alloca);
@@ -136,7 +151,7 @@ private:
class LoadInst : public UnaryInstruction {
void AssertOK();
protected:
- virtual LoadInst *clone_impl() const;
+ LoadInst *clone_impl() const override;
public:
LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -256,7 +271,7 @@ class StoreInst : public Instruction {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void AssertOK();
protected:
- virtual StoreInst *clone_impl() const;
+ StoreInst *clone_impl() const override;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -383,7 +398,7 @@ class FenceInst : public Instruction {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
- virtual FenceInst *clone_impl() const;
+ FenceInst *clone_impl() const override;
public:
// allocate space for exactly zero operands
void *operator new(size_t s) {
@@ -449,19 +464,24 @@ private:
class AtomicCmpXchgInst : public Instruction {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
protected:
- virtual AtomicCmpXchgInst *clone_impl() const;
+ AtomicCmpXchgInst *clone_impl() const override;
public:
// allocate space for exactly three operands
void *operator new(size_t s) {
return User::operator new(s, 3);
}
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
Instruction *InsertBefore = 0);
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
/// isVolatile - Return true if this is a cmpxchg from a volatile memory
@@ -482,13 +502,20 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Set the ordering constraint on this cmpxchg.
- void setOrdering(AtomicOrdering Ordering) {
+ void setSuccessOrdering(AtomicOrdering Ordering) {
assert(Ordering != NotAtomic &&
"CmpXchg instructions can only be atomic.");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
(Ordering << 2));
}
+ void setFailureOrdering(AtomicOrdering Ordering) {
+ assert(Ordering != NotAtomic &&
+ "CmpXchg instructions can only be atomic.");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
+ (Ordering << 5));
+ }
+
/// Specify whether this cmpxchg is atomic and orders other operations with
/// respect to all concurrently executing threads, or only with respect to
/// signal handlers executing in the same thread.
@@ -498,8 +525,13 @@ public:
}
/// Returns the ordering constraint on this cmpxchg.
- AtomicOrdering getOrdering() const {
- return AtomicOrdering(getSubclassDataFromInstruction() >> 2);
+ AtomicOrdering getSuccessOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Returns the ordering constraint on this cmpxchg.
+ AtomicOrdering getFailureOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
}
/// Returns whether this cmpxchg is atomic between threads or only within a
@@ -523,6 +555,28 @@ public:
return getPointerOperand()->getType()->getPointerAddressSpace();
}
+ /// \brief Returns the strongest permitted ordering on failure, given the
+ /// desired ordering on success.
+ ///
+ /// If the comparison in a cmpxchg operation fails, there is no atomic store
+ /// so release semantics cannot be provided. So this function drops explicit
+ /// Release requests from the AtomicOrdering. A SequentiallyConsistent
+ /// operation would remain SequentiallyConsistent.
+ static AtomicOrdering
+ getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
+ switch (SuccessOrdering) {
+ default: llvm_unreachable("invalid cmpxchg success ordering");
+ case Release:
+ case Monotonic:
+ return Monotonic;
+ case AcquireRelease:
+ case Acquire:
+ return Acquire;
+ case SequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicCmpXchg;
@@ -556,7 +610,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
class AtomicRMWInst : public Instruction {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
- virtual AtomicRMWInst *clone_impl() const;
+ AtomicRMWInst *clone_impl() const override;
public:
/// This enumeration lists the possible modifications atomicrmw can make. In
/// the descriptions, 'p' is the pointer to the instruction's memory location,
@@ -721,7 +775,7 @@ class GetElementPtrInst : public Instruction {
unsigned Values, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
- virtual GetElementPtrInst *clone_impl() const;
+ GetElementPtrInst *clone_impl() const override;
public:
static GetElementPtrInst *Create(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
@@ -923,7 +977,7 @@ class ICmpInst: public CmpInst {
protected:
/// \brief Clone an identical ICmpInst
- virtual ICmpInst *clone_impl() const;
+ ICmpInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics.
ICmpInst(
@@ -1055,7 +1109,7 @@ public:
class FCmpInst: public CmpInst {
protected:
/// \brief Clone an identical FCmpInst
- virtual FCmpInst *clone_impl() const;
+ FCmpInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics.
FCmpInst(
@@ -1174,15 +1228,11 @@ class CallInst : public Instruction {
inline CallInst(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd);
- CallInst(Value *F, Value *Actual, const Twine &NameStr,
- Instruction *InsertBefore);
- CallInst(Value *F, Value *Actual, const Twine &NameStr,
- BasicBlock *InsertAtEnd);
explicit CallInst(Value *F, const Twine &NameStr,
Instruction *InsertBefore);
CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
- virtual CallInst *clone_impl() const;
+ CallInst *clone_impl() const override;
public:
static CallInst *Create(Value *Func,
ArrayRef<Value *> Args,
@@ -1245,6 +1295,22 @@ public:
Value *getArgOperand(unsigned i) const { return getOperand(i); }
void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+ /// arg_operands - iteration adapter for range-for loops.
+ iterator_range<op_iterator> arg_operands() {
+ // The last operand in the op list is the callee - it's not one of the args
+ // so we don't want to iterate over it.
+ return iterator_range<op_iterator>(op_begin(), op_end() - 1);
+ }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ iterator_range<const_op_iterator> arg_operands() const {
+ return iterator_range<const_op_iterator>(op_begin(), op_end() - 1);
+ }
+
+ /// \brief Wrappers for getting the \c Use of a call argument.
+ const Use &getArgOperandUse(unsigned i) const { return getOperandUse(i); }
+ Use &getArgOperandUse(unsigned i) { return getOperandUse(i); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -1450,7 +1516,7 @@ class SelectInst : public Instruction {
setName(NameStr);
}
protected:
- virtual SelectInst *clone_impl() const;
+ SelectInst *clone_impl() const override;
public:
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr = "",
@@ -1505,7 +1571,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
///
class VAArgInst : public UnaryInstruction {
protected:
- virtual VAArgInst *clone_impl() const;
+ VAArgInst *clone_impl() const override;
public:
VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
@@ -1545,7 +1611,7 @@ class ExtractElementInst : public Instruction {
ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
- virtual ExtractElementInst *clone_impl() const;
+ ExtractElementInst *clone_impl() const override;
public:
static ExtractElementInst *Create(Value *Vec, Value *Idx,
@@ -1606,7 +1672,7 @@ class InsertElementInst : public Instruction {
InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
- virtual InsertElementInst *clone_impl() const;
+ InsertElementInst *clone_impl() const override;
public:
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
@@ -1659,7 +1725,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
///
class ShuffleVectorInst : public Instruction {
protected:
- virtual ShuffleVectorInst *clone_impl() const;
+ ShuffleVectorInst *clone_impl() const override;
public:
// allocate space for exactly three operands
@@ -1760,7 +1826,7 @@ class ExtractValueInst : public UnaryInstruction {
return User::operator new(s, 1);
}
protected:
- virtual ExtractValueInst *clone_impl() const;
+ ExtractValueInst *clone_impl() const override;
public:
static ExtractValueInst *Create(Value *Agg,
@@ -1871,7 +1937,7 @@ class InsertValueInst : public Instruction {
InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
- virtual InsertValueInst *clone_impl() const;
+ InsertValueInst *clone_impl() const override;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -2006,7 +2072,7 @@ protected:
// values and pointers to the incoming blocks, all in one allocation.
Use *allocHungoffUses(unsigned) const;
- virtual PHINode *clone_impl() const;
+ PHINode *clone_impl() const override;
public:
/// Constructors - NumReservedValues is a hint for the number of incoming
/// edges that this phi node will have (use 0 if you really have no idea).
@@ -2086,8 +2152,7 @@ public:
/// getIncomingBlock - Return incoming basic block corresponding
/// to value use iterator.
///
- template <typename U>
- BasicBlock *getIncomingBlock(value_use_iterator<U> I) const {
+ BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
return getIncomingBlock(I.getUse());
}
@@ -2198,7 +2263,7 @@ private:
unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
- virtual LandingPadInst *clone_impl() const;
+ LandingPadInst *clone_impl() const override;
public:
/// Constructors - NumReservedClauses is a hint for the number of incoming
/// clauses that this landingpad will have (use 0 if you really have no idea).
@@ -2296,7 +2361,7 @@ private:
ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
protected:
- virtual ReturnInst *clone_impl() const;
+ ReturnInst *clone_impl() const override;
public:
static ReturnInst* Create(LLVMContext &C, Value *retVal = 0,
Instruction *InsertBefore = 0) {
@@ -2329,9 +2394,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
template <>
@@ -2368,7 +2433,7 @@ class BranchInst : public TerminatorInst {
BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
BasicBlock *InsertAtEnd);
protected:
- virtual BranchInst *clone_impl() const;
+ BranchInst *clone_impl() const override;
public:
static BranchInst *Create(BasicBlock *IfTrue, Instruction *InsertBefore = 0) {
return new(1) BranchInst(IfTrue, InsertBefore);
@@ -2428,9 +2493,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
template <>
@@ -2474,7 +2539,7 @@ class SwitchInst : public TerminatorInst {
SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd);
protected:
- virtual SwitchInst *clone_impl() const;
+ SwitchInst *clone_impl() const override;
public:
// -2
@@ -2721,9 +2786,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
template <>
@@ -2766,7 +2831,7 @@ class IndirectBrInst : public TerminatorInst {
/// autoinserts at the end of the specified BasicBlock.
IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
protected:
- virtual IndirectBrInst *clone_impl() const;
+ IndirectBrInst *clone_impl() const override;
public:
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
Instruction *InsertBefore = 0) {
@@ -2819,9 +2884,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
template <>
@@ -2858,7 +2923,7 @@ class InvokeInst : public TerminatorInst {
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
- virtual InvokeInst *clone_impl() const;
+ InvokeInst *clone_impl() const override;
public:
static InvokeInst *Create(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
@@ -2889,6 +2954,20 @@ public:
Value *getArgOperand(unsigned i) const { return getOperand(i); }
void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+ /// arg_operands - iteration adapter for range-for loops.
+ iterator_range<op_iterator> arg_operands() {
+ return iterator_range<op_iterator>(op_begin(), op_end() - 3);
+ }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ iterator_range<const_op_iterator> arg_operands() const {
+ return iterator_range<const_op_iterator>(op_begin(), op_end() - 3);
+ }
+
+ /// \brief Wrappers for getting the \c Use of a invoke argument.
+ const Use &getArgOperandUse(unsigned i) const { return getOperandUse(i); }
+ Use &getArgOperandUse(unsigned i) { return getOperandUse(i); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -2970,6 +3049,12 @@ public:
addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
}
+ /// \brief Determine if the invoke cannot be duplicated.
+ bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
+ void setCannotDuplicate() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate);
+ }
+
/// \brief Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
@@ -3038,9 +3123,9 @@ public:
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
bool hasFnAttrImpl(Attribute::AttrKind A) const;
@@ -3093,7 +3178,7 @@ class ResumeInst : public TerminatorInst {
explicit ResumeInst(Value *Exn, Instruction *InsertBefore=0);
ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
protected:
- virtual ResumeInst *clone_impl() const;
+ ResumeInst *clone_impl() const override;
public:
static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = 0) {
return new(1) ResumeInst(Exn, InsertBefore);
@@ -3118,9 +3203,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
template <>
@@ -3142,7 +3227,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
class UnreachableInst : public TerminatorInst {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
- virtual UnreachableInst *clone_impl() const;
+ UnreachableInst *clone_impl() const override;
public:
// allocate space for exactly zero operands
@@ -3162,9 +3247,9 @@ public:
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
};
//===----------------------------------------------------------------------===//
@@ -3175,7 +3260,7 @@ private:
class TruncInst : public CastInst {
protected:
/// \brief Clone an identical TruncInst
- virtual TruncInst *clone_impl() const;
+ TruncInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3211,7 +3296,7 @@ public:
class ZExtInst : public CastInst {
protected:
/// \brief Clone an identical ZExtInst
- virtual ZExtInst *clone_impl() const;
+ ZExtInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3247,7 +3332,7 @@ public:
class SExtInst : public CastInst {
protected:
/// \brief Clone an identical SExtInst
- virtual SExtInst *clone_impl() const;
+ SExtInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3283,7 +3368,7 @@ public:
class FPTruncInst : public CastInst {
protected:
/// \brief Clone an identical FPTruncInst
- virtual FPTruncInst *clone_impl() const;
+ FPTruncInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3319,7 +3404,7 @@ public:
class FPExtInst : public CastInst {
protected:
/// \brief Clone an identical FPExtInst
- virtual FPExtInst *clone_impl() const;
+ FPExtInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3355,7 +3440,7 @@ public:
class UIToFPInst : public CastInst {
protected:
/// \brief Clone an identical UIToFPInst
- virtual UIToFPInst *clone_impl() const;
+ UIToFPInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3391,7 +3476,7 @@ public:
class SIToFPInst : public CastInst {
protected:
/// \brief Clone an identical SIToFPInst
- virtual SIToFPInst *clone_impl() const;
+ SIToFPInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3427,7 +3512,7 @@ public:
class FPToUIInst : public CastInst {
protected:
/// \brief Clone an identical FPToUIInst
- virtual FPToUIInst *clone_impl() const;
+ FPToUIInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3463,7 +3548,7 @@ public:
class FPToSIInst : public CastInst {
protected:
/// \brief Clone an identical FPToSIInst
- virtual FPToSIInst *clone_impl() const;
+ FPToSIInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3515,7 +3600,7 @@ public:
);
/// \brief Clone an identical IntToPtrInst
- virtual IntToPtrInst *clone_impl() const;
+ IntToPtrInst *clone_impl() const override;
/// \brief Returns the address space of this instruction's pointer type.
unsigned getAddressSpace() const {
@@ -3539,7 +3624,7 @@ public:
class PtrToIntInst : public CastInst {
protected:
/// \brief Clone an identical PtrToIntInst
- virtual PtrToIntInst *clone_impl() const;
+ PtrToIntInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3587,7 +3672,7 @@ public:
class BitCastInst : public CastInst {
protected:
/// \brief Clone an identical BitCastInst
- virtual BitCastInst *clone_impl() const;
+ BitCastInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
@@ -3624,7 +3709,7 @@ public:
class AddrSpaceCastInst : public CastInst {
protected:
/// \brief Clone an identical AddrSpaceCastInst
- virtual AddrSpaceCastInst *clone_impl() const;
+ AddrSpaceCastInst *clone_impl() const override;
public:
/// \brief Constructor with insert-before-instruction semantics
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 8344c56..e053f78 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -118,8 +118,13 @@ namespace llvm {
class MemIntrinsic : public IntrinsicInst {
public:
Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
+ const Use &getRawDestUse() const { return getArgOperandUse(0); }
+ Use &getRawDestUse() { return getArgOperandUse(0); }
Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
+ const Use &getLengthUse() const { return getArgOperandUse(2); }
+ Use &getLengthUse() { return getArgOperandUse(2); }
+
ConstantInt *getAlignmentCst() const {
return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
}
@@ -192,6 +197,8 @@ namespace llvm {
/// get* - Return the arguments to the instruction.
///
Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+ const Use &getValueUse() const { return getArgOperandUse(1); }
+ Use &getValueUse() { return getArgOperandUse(1); }
void setValue(Value *Val) {
assert(getValue()->getType() == Val->getType() &&
@@ -215,6 +222,8 @@ namespace llvm {
/// get* - Return the arguments to the instruction.
///
Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+ const Use &getRawSourceUse() const { return getArgOperandUse(1); }
+ Use &getRawSourceUse() { return getArgOperandUse(1); }
/// getSource - This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
diff --git a/include/llvm/IR/Intrinsics.h b/include/llvm/IR/Intrinsics.h
index 473e525..839bbbd 100644
--- a/include/llvm/IR/Intrinsics.h
+++ b/include/llvm/IR/Intrinsics.h
@@ -79,7 +79,7 @@ namespace Intrinsic {
enum IITDescriptorKind {
Void, VarArg, MMX, Metadata, Half, Float, Double,
Integer, Vector, Pointer, Struct,
- Argument, ExtendVecArgument, TruncVecArgument
+ Argument, ExtendArgument, TruncArgument, HalfVecArgument
} Kind;
union {
@@ -98,13 +98,13 @@ namespace Intrinsic {
AK_AnyPointer
};
unsigned getArgumentNumber() const {
- assert(Kind == Argument || Kind == ExtendVecArgument ||
- Kind == TruncVecArgument);
+ assert(Kind == Argument || Kind == ExtendArgument ||
+ Kind == TruncArgument || Kind == HalfVecArgument);
return Argument_Info >> 2;
}
ArgKind getArgumentKind() const {
- assert(Kind == Argument || Kind == ExtendVecArgument ||
- Kind == TruncVecArgument);
+ assert(Kind == Argument || Kind == ExtendArgument ||
+ Kind == TruncArgument || Kind == HalfVecArgument);
return (ArgKind)(Argument_Info&3);
}
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index ded6cc1..6a48f17 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -69,6 +69,10 @@ class ReadNone<int argNo> : IntrinsicProperty {
def IntrNoReturn : IntrinsicProperty;
+// IntrNoduplicate - Calls to this intrinsic cannot be duplicated.
+// Parallels the noduplicate attribute on LLVM IR functions.
+def IntrNoDuplicate : IntrinsicProperty;
+
//===----------------------------------------------------------------------===//
// Types used by intrinsics.
//===----------------------------------------------------------------------===//
@@ -102,12 +106,16 @@ class LLVMMatchType<int num>
int Number = num;
}
-// Match the type of another intrinsic parameter that is expected to be
-// an integral vector type, but change the element size to be twice as wide
-// or half as wide as the other type. This is only useful when the intrinsic
-// is overloaded, so the matched type should be declared as iAny.
-class LLVMExtendedElementVectorType<int num> : LLVMMatchType<num>;
-class LLVMTruncatedElementVectorType<int num> : LLVMMatchType<num>;
+// Match the type of another intrinsic parameter that is expected to be based on
+// an integral type (i.e. either iN or <N x iM>), but change the scalar size to
+// be twice as wide or half as wide as the other type. This is only useful when
+// the intrinsic is overloaded, so the matched type should be declared as iAny.
+class LLVMExtendedType<int num> : LLVMMatchType<num>;
+class LLVMTruncatedType<int num> : LLVMMatchType<num>;
+
+// Match the type of another intrinsic parameter that is expected to be a
+// vector type, but change the element count to be half as many
+class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
def llvm_void_ty : LLVMType<isVoid>;
def llvm_anyint_ty : LLVMType<iAny>;
@@ -285,10 +293,17 @@ def int_memset : Intrinsic<[],
llvm_i32_ty, llvm_i1_ty],
[IntrReadWriteArgMem, NoCapture<0>]>;
-// These functions do not actually read memory, but they are sensitive to the
-// rounding mode. This needs to be modelled separately; in the meantime
-// declaring them as reading memory is conservatively correct.
-let Properties = [IntrReadMem] in {
+let Properties = [IntrNoMem] in {
+ def int_fma : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>]>;
+ def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>]>;
+
+ // These functions do not read memory, but are sensitive to the
+ // rounding mode. LLVM purposely does not model changes to the FP
+ // environment so they can be treated as readnone.
def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>;
def int_sin : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
@@ -311,16 +326,6 @@ let Properties = [IntrReadMem] in {
def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
}
-let Properties = [IntrNoMem] in {
- def int_fma : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>]>;
-
- def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>]>;
-}
-
// NOTE: these are internal interfaces.
def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
@@ -458,13 +463,14 @@ def int_invariant_end : Intrinsic<[],
//===------------------------ Stackmap Intrinsics -------------------------===//
//
def int_experimental_stackmap : Intrinsic<[],
- [llvm_i32_ty, llvm_i32_ty, llvm_vararg_ty]>;
+ [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty],
+ [Throws]>;
def int_experimental_patchpoint_void : Intrinsic<[],
- [llvm_i32_ty, llvm_i32_ty,
+ [llvm_i64_ty, llvm_i32_ty,
llvm_ptr_ty, llvm_i32_ty,
llvm_vararg_ty]>;
def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
- [llvm_i32_ty, llvm_i32_ty,
+ [llvm_i64_ty, llvm_i32_ty,
llvm_ptr_ty, llvm_i32_ty,
llvm_vararg_ty]>;
@@ -511,6 +517,11 @@ def int_convertus : Intrinsic<[llvm_anyint_ty],
def int_convertuu : Intrinsic<[llvm_anyint_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+// Clear cache intrinsic, default to ignore (ie. emit nothing)
+// maps to void __clear_cache() on supporting platforms
+def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
+ [], "llvm.clear_cache">;
+
//===----------------------------------------------------------------------===//
// Target-specific intrinsics
//===----------------------------------------------------------------------===//
@@ -518,6 +529,7 @@ def int_convertuu : Intrinsic<[llvm_anyint_ty],
include "llvm/IR/IntrinsicsPowerPC.td"
include "llvm/IR/IntrinsicsX86.td"
include "llvm/IR/IntrinsicsARM.td"
+include "llvm/IR/IntrinsicsARM64.td"
include "llvm/IR/IntrinsicsAArch64.td"
include "llvm/IR/IntrinsicsXCore.td"
include "llvm/IR/IntrinsicsHexagon.td"
diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td
index 68af8c1..61c0e5d 100644
--- a/include/llvm/IR/IntrinsicsAArch64.td
+++ b/include/llvm/IR/IntrinsicsAArch64.td
@@ -36,27 +36,11 @@ def int_aarch64_neon_xtn :
// Vector floating-point convert
def int_aarch64_neon_frintn : Neon_1Arg_Intrinsic;
def int_aarch64_neon_fsqrt : Neon_1Arg_Intrinsic;
-def int_aarch64_neon_fcvtxn :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtns :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtnu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtps :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtpu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtms :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtmu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtas :
+def int_aarch64_neon_vcvtxn :
Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtau :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtzs :
+def int_aarch64_neon_vcvtzs :
Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtzu :
+def int_aarch64_neon_vcvtzu :
Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Vector maxNum (Floating Point)
@@ -84,7 +68,7 @@ class Neon_N3V_Intrinsic
[IntrNoMem]>;
class Neon_N2V_Narrow_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>, llvm_i32_ty],
+ [LLVMExtendedType<0>, llvm_i32_ty],
[IntrNoMem]>;
// Vector rounding shift right by immediate (Signed)
@@ -107,9 +91,6 @@ def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
class Neon_Across_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-class Neon_2Arg_Across_Float_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-
def int_aarch64_neon_saddlv : Neon_Across_Intrinsic;
def int_aarch64_neon_uaddlv : Neon_Across_Intrinsic;
def int_aarch64_neon_smaxv : Neon_Across_Intrinsic;
@@ -117,30 +98,34 @@ def int_aarch64_neon_umaxv : Neon_Across_Intrinsic;
def int_aarch64_neon_sminv : Neon_Across_Intrinsic;
def int_aarch64_neon_uminv : Neon_Across_Intrinsic;
def int_aarch64_neon_vaddv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxnmv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminnmv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vmaxv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vmaxnmv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminnmv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
// Vector Table Lookup.
def int_aarch64_neon_vtbl1 :
Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+ [llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_aarch64_neon_vtbl2 :
Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
+ [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
[IntrNoMem]>;
def int_aarch64_neon_vtbl3 :
Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
LLVMMatchType<0>], [IntrNoMem]>;
def int_aarch64_neon_vtbl4 :
Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
- LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>;
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
// Vector Table Extension.
// Some elements of the destination vector may not be updated, so the original
@@ -148,23 +133,23 @@ def int_aarch64_neon_vtbl4 :
// arguments after that are the table.
def int_aarch64_neon_vtbx1 :
Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
[IntrNoMem]>;
def int_aarch64_neon_vtbx2 :
Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
- LLVMMatchType<0>], [IntrNoMem]>;
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>], [IntrNoMem]>;
def int_aarch64_neon_vtbx3 :
Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
- LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>;
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_aarch64_neon_vtbx4 :
Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
- LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
[IntrNoMem]>;
// Vector Load/store
@@ -233,74 +218,118 @@ def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic;
def int_aarch64_neon_vpadd :
Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>;
def int_aarch64_neon_vpfadd :
- Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfaddq :
- Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Scalar Reduce Pairwise Floating Point Max/Min.
def int_aarch64_neon_vpmax :
- Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpmaxq :
- Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_aarch64_neon_vpmin :
- Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpminq :
- Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Scalar Reduce Pairwise Floating Point Maxnm/Minnm.
def int_aarch64_neon_vpfmaxnm :
- Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfmaxnmq :
- Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_aarch64_neon_vpfminnm :
- Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfminnmq :
- Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Scalar Signed Integer Convert To Floating-point
-def int_aarch64_neon_vcvtf32_s32 :
- Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_s64 :
- Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtint2fps :
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Scalar Unsigned Integer Convert To Floating-point
-def int_aarch64_neon_vcvtf32_u32 :
- Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_u64 :
- Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtint2fpu :
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Convert
+def int_aarch64_neon_fcvtxn :
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtns :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtnu :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtps :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtpu :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtms :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtmu :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtas :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtau :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtzs :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+def int_aarch64_neon_fcvtzu :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Estimate.
+def int_aarch64_neon_vrecpe :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
// Scalar Floating-point Reciprocal Exponent
-def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic;
+def int_aarch64_neon_vrecpx :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-class Neon_Cmp_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
- [IntrNoMem]>;
+// Scalar Floating-point Reciprocal Square Root Estimate
+def int_aarch64_neon_vrsqrte :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Step
+def int_aarch64_neon_vrecps :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Square Root Step
+def int_aarch64_neon_vrsqrts :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+// Compare with vector operands.
+class Neon_Cmp_Intrinsic :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+// Floating-point compare with scalar operands.
+class Neon_Float_Cmp_Intrinsic :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_anyfloat_ty],
+ [IntrNoMem]>;
// Scalar Compare Equal
def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fceq : Neon_Float_Cmp_Intrinsic;
// Scalar Compare Greater-Than or Equal
def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic;
def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcge : Neon_Float_Cmp_Intrinsic;
+def int_aarch64_neon_fchs : Neon_Float_Cmp_Intrinsic;
// Scalar Compare Less-Than or Equal
def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fclez : Neon_Float_Cmp_Intrinsic;
// Scalar Compare Less-Than
def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcltz : Neon_Float_Cmp_Intrinsic;
// Scalar Compare Greater-Than
def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic;
def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcgt : Neon_Float_Cmp_Intrinsic;
+def int_aarch64_neon_fchi : Neon_Float_Cmp_Intrinsic;
// Scalar Compare Bitwise Test Bits
def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic;
// Scalar Floating-point Absolute Compare Greater Than Or Equal
def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic;
-
+def int_aarch64_neon_fcage : Neon_Float_Cmp_Intrinsic;
+
// Scalar Floating-point Absolute Compare Greater Than
def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic;
+def int_aarch64_neon_fcagt : Neon_Float_Cmp_Intrinsic;
// Scalar Signed Saturating Accumulated of Unsigned Value
def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic;
@@ -313,7 +342,9 @@ def int_aarch64_neon_vabs :
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
// Scalar Absolute Difference
-def int_aarch64_neon_vabd : Neon_2Arg_Intrinsic;
+def int_aarch64_neon_vabd :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
// Scalar Negate Value
def int_aarch64_neon_vneg :
@@ -325,6 +356,9 @@ def int_aarch64_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
// Signed Saturating Doubling Multiply-Subtract Long
def int_aarch64_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
+def int_aarch64_neon_vmull_p64 :
+ Intrinsic<[llvm_v16i8_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
+
class Neon_2Arg_ShiftImm_Intrinsic
: Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -355,34 +389,19 @@ def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic;
def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
-def int_aarch64_neon_vcvtf32_n_s32 :
- Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_n_s64 :
- Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtfxs2fp_n :
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
-def int_aarch64_neon_vcvtf32_n_u32 :
- Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtf64_n_u64 :
- Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtfxu2fp_n :
+ Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
-def int_aarch64_neon_vcvts_n_s32_f32 :
- Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtd_n_s64_f64 :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vcvtfp2fxs_n :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
-def int_aarch64_neon_vcvts_n_u32_f32 :
- Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtd_n_u64_f64 :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
-
-class Neon_SHA_Intrinsic
- : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
+def int_aarch64_neon_vcvtfp2fxu_n :
+ Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
-def int_aarch64_neon_sha1c : Neon_SHA_Intrinsic;
-def int_aarch64_neon_sha1m : Neon_SHA_Intrinsic;
-def int_aarch64_neon_sha1p : Neon_SHA_Intrinsic;
}
diff --git a/include/llvm/IR/IntrinsicsARM.td b/include/llvm/IR/IntrinsicsARM.td
index 0b50d64..482f98e 100644
--- a/include/llvm/IR/IntrinsicsARM.td
+++ b/include/llvm/IR/IntrinsicsARM.td
@@ -38,12 +38,20 @@ def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
def int_arm_ldrex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
def int_arm_strex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
+def int_arm_ldaex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
+def int_arm_stlex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
def int_arm_clrex : Intrinsic<[]>;
def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
llvm_ptr_ty]>;
def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+def int_arm_stlexd : Intrinsic<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty]>;
+def int_arm_ldaexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+
//===----------------------------------------------------------------------===//
// Data barrier instructions
def int_arm_dmb : GCCBuiltin<"__builtin_arm_dmb">, Intrinsic<[], [llvm_i32_ty]>;
@@ -123,20 +131,15 @@ def int_arm_sevl : Intrinsic<[], []>;
class Neon_1Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class Neon_1Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
+ : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
class Neon_2Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem]>;
class Neon_2Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>,
- LLVMExtendedElementVectorType<0>],
+ : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>, LLVMExtendedType<0>],
[IntrNoMem]>;
class Neon_2Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
+ : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
[IntrNoMem]>;
class Neon_3Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
@@ -144,9 +147,7 @@ class Neon_3Arg_Intrinsic
[IntrNoMem]>;
class Neon_3Arg_Long_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
+ [LLVMMatchType<0>, LLVMTruncatedType<0>, LLVMTruncatedType<0>],
[IntrNoMem]>;
class Neon_CvtFxToFP_Intrinsic
: Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -155,6 +156,10 @@ class Neon_CvtFPToFx_Intrinsic
class Neon_CvtFPtoInt_1Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+class Neon_Compare_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+
// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
// Overall, the classes range from 2 to 6 v8i8 arguments.
@@ -224,18 +229,8 @@ def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic;
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
// Vector Absolute Compare.
-def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
-def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
-def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
-def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
+def int_arm_neon_vacge : Neon_Compare_Intrinsic;
+def int_arm_neon_vacgt : Neon_Compare_Intrinsic;
// Vector Absolute Differences.
def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
@@ -293,9 +288,6 @@ def int_arm_neon_vpminu : Neon_2Arg_Intrinsic;
// Vector Shift.
def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;
-def int_arm_neon_vshiftls : Neon_2Arg_Long_Intrinsic;
-def int_arm_neon_vshiftlu : Neon_2Arg_Long_Intrinsic;
-def int_arm_neon_vshiftn : Neon_2Arg_Narrow_Intrinsic;
// Vector Rounding Shift.
def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
@@ -472,19 +464,37 @@ def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty],
// Crypto instructions
-def int_arm_neon_aesd : Neon_2Arg_Intrinsic;
-def int_arm_neon_aese : Neon_2Arg_Intrinsic;
-def int_arm_neon_aesimc : Neon_1Arg_Intrinsic;
-def int_arm_neon_aesmc : Neon_1Arg_Intrinsic;
-def int_arm_neon_sha1h : Neon_1Arg_Intrinsic;
-def int_arm_neon_sha1su1 : Neon_2Arg_Intrinsic;
-def int_arm_neon_sha256su0 : Neon_2Arg_Intrinsic;
-def int_arm_neon_sha1c : Neon_3Arg_Intrinsic;
-def int_arm_neon_sha1m : Neon_3Arg_Intrinsic;
-def int_arm_neon_sha1p : Neon_3Arg_Intrinsic;
-def int_arm_neon_sha1su0: Neon_3Arg_Intrinsic;
-def int_arm_neon_sha256h: Neon_3Arg_Intrinsic;
-def int_arm_neon_sha256h2: Neon_3Arg_Intrinsic;
-def int_arm_neon_sha256su1: Neon_3Arg_Intrinsic;
+class AES_1Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty], [IntrNoMem]>;
+class AES_2Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+class SHA_1Arg_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+class SHA_2Arg_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+class SHA_3Arg_i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+class SHA_3Arg_v4i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_arm_neon_aesd : AES_2Arg_Intrinsic;
+def int_arm_neon_aese : AES_2Arg_Intrinsic;
+def int_arm_neon_aesimc : AES_1Arg_Intrinsic;
+def int_arm_neon_aesmc : AES_1Arg_Intrinsic;
+def int_arm_neon_sha1h : SHA_1Arg_Intrinsic;
+def int_arm_neon_sha1su1 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha256su0 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha1c : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1m : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1p : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1su0: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h2: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256su1: SHA_3Arg_v4i32_Intrinsic;
} // end TargetPrefix
diff --git a/include/llvm/IR/IntrinsicsARM64.td b/include/llvm/IR/IntrinsicsARM64.td
new file mode 100644
index 0000000..d7f307e
--- /dev/null
+++ b/include/llvm/IR/IntrinsicsARM64.td
@@ -0,0 +1,628 @@
+//===- IntrinsicsARM64.td - Defines ARM64 intrinsics -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the ARM64-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "arm64" in {
+
+def int_arm64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_arm64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+def int_arm64_clrex : Intrinsic<[]>;
+
+def int_arm64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_arm64_stxp : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_ptr_ty]>;
+
+def int_arm64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON)
+
+let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
+ class AdvSIMD_2Scalar_Float_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_FPToIntRounding_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+ class AdvSIMD_1IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Expand_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1IntArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Int_Across_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Float_Across_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+ class AdvSIMD_2IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Compare_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_FloatCompare_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedType<0>, LLVMExtendedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMExtendedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class AdvSIMD_3VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
+ LLVMMatchType<1>], [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Arithmetic ops
+
+let Properties = [IntrNoMem] in {
+ // Vector Add Across Lanes
+ def int_arm64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Long Add Across Lanes
+ def int_arm64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+
+ // Vector Halving Add
+ def int_arm64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Rounding Halving Add
+ def int_arm64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Add
+ def int_arm64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Add High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_arm64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Add High-Half
+ def int_arm64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Doubling Multiply High
+ def int_arm64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Doubling Multiply High
+ def int_arm64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Polynominal Multiply
+ def int_arm64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Long Multiply
+ def int_arm64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_arm64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_arm64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+
+ // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
+ // it with a v16i8.
+ def int_arm64_neon_pmull64 :
+ Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+ // Vector Extending Multiply
+ def int_arm64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Vector Saturating Doubling Long Multiply
+ def int_arm64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_arm64_neon_sqdmulls_scalar
+ : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ // Vector Halving Subtract
+ def int_arm64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Subtract
+ def int_arm64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Subtract High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_arm64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Subtract High-Half
+ def int_arm64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Compare Absolute Greater-than-or-equal
+ def int_arm64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Compare Absolute Greater-than
+ def int_arm64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Absolute Difference
+ def int_arm64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Scalar Absolute Difference
+ def int_arm64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
+
+ // Vector Max
+ def int_arm64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Max Across Lanes
+ def int_arm64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_arm64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Min
+ def int_arm64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Min/Max Number
+ def int_arm64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
+ def int_arm64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Vector Min Across Lanes
+ def int_arm64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_arm64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_arm64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Pairwise Add
+ def int_arm64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Long Pairwise Add
+ // FIXME: In theory, we shouldn't need intrinsics for saddlp or
+ // uaddlp, but tblgen's type inference currently can't handle the
+ // pattern fragments this ends up generating.
+ def int_arm64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+ def int_arm64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+ // Folding Maximum
+ def int_arm64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Folding Minimum
+ def int_arm64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_arm64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Reciprocal Estimate/Step
+ def int_arm64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
+ def int_arm64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Reciprocal Exponent
+ def int_arm64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Saturating Shift Left
+ def int_arm64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Rounding Shift Left
+ def int_arm64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Shift Left
+ def int_arm64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Shift Left by Constant
+ def int_arm64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
+ def int_arm64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
+ def int_arm64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Narrowing Shift Right by Constant
+ def int_arm64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_arm64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Shift Right by Constant
+ def int_arm64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Saturating Shift Right by Constant
+ def int_arm64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_arm64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Shift Left
+ def int_arm64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_arm64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Widening Shift Left by Constant
+ def int_arm64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
+ def int_arm64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+ def int_arm64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+
+ // Vector Shift Right by Constant and Insert
+ def int_arm64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Shift Left by Constant and Insert
+ def int_arm64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Saturating Narrow
+ def int_arm64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_arm64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_arm64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+ def int_arm64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Extract and Unsigned Narrow
+ def int_arm64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_arm64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Absolute Value
+ def int_arm64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Absolute Value
+ def int_arm64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Negation
+ def int_arm64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Count Leading Sign Bits
+ def int_arm64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Reciprocal Estimate
+ def int_arm64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
+ def int_arm64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Square Root Estimate
+ def int_arm64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
+ def int_arm64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Bitwise Reverse
+ def int_arm64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Conversions Between Half-Precision and Single-Precision.
+ def int_arm64_neon_vcvtfp2hf
+ : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_arm64_neon_vcvthf2fp
+ : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
+ // Vector Conversions Between Floating-point and Fixed-point.
+ def int_arm64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_arm64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_arm64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+ def int_arm64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+
+ // Vector FP->Int Conversions
+ def int_arm64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_arm64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
+
+ // Vector FP Rounding: only ties to even is unrepresented by a normal
+ // intrinsic.
+ def int_arm64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Scalar FP->Int conversions
+
+ // Vector FP Inexact Narrowing
+ def int_arm64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+ // Scalar FP Inexact Narrowing
+ def int_arm64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+}
+
+let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
+ class AdvSIMD_2Vector2Index_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
+ [IntrNoMem]>;
+}
+
+// Vector element to element moves
+def int_arm64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
+
+let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
+ class AdvSIMD_1Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_1Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+
+ class AdvSIMD_2Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+ class AdvSIMD_2Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+
+ class AdvSIMD_3Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+ class AdvSIMD_3Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+
+ class AdvSIMD_4Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+ class AdvSIMD_4Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<5>]>;
+}
+
+// Memory ops
+
+def int_arm64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_arm64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_arm64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_arm64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_arm64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_arm64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_arm64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_arm64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_arm64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_arm64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
+def int_arm64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
+def int_arm64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
+
+def int_arm64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
+def int_arm64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
+def int_arm64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_arm64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_arm64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_arm64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_arm64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic;
+def int_arm64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic;
+def int_arm64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic;
+
+let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
+ class AdvSIMD_Tbl1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_Tbl3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_Tbx1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+}
+def int_arm64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
+def int_arm64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
+def int_arm64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
+def int_arm64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
+
+def int_arm64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
+def int_arm64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
+def int_arm64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
+def int_arm64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
+
+let TargetPrefix = "arm64" in {
+ class Crypto_AES_DataKey_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+ class Crypto_AES_Data_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_5Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_1Hash_Intrinsic
+ : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the schedule
+ class Crypto_SHA_8Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 12 words of the schedule
+ class Crypto_SHA_12Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
+ class Crypto_SHA_8Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+}
+
+// AES
+def int_arm64_crypto_aese : Crypto_AES_DataKey_Intrinsic;
+def int_arm64_crypto_aesd : Crypto_AES_DataKey_Intrinsic;
+def int_arm64_crypto_aesmc : Crypto_AES_Data_Intrinsic;
+def int_arm64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
+
+// SHA1
+def int_arm64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_arm64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_arm64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_arm64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic;
+
+def int_arm64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
+def int_arm64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
+
+// SHA256
+def int_arm64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_arm64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_arm64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
+def int_arm64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+let TargetPrefix = "arm64" in {
+
+def int_arm64_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32x : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+}
diff --git a/include/llvm/IR/IntrinsicsMips.td b/include/llvm/IR/IntrinsicsMips.td
index 42c5821..3455761 100644
--- a/include/llvm/IR/IntrinsicsMips.td
+++ b/include/llvm/IR/IntrinsicsMips.td
@@ -26,22 +26,26 @@ let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.".
// Addition/subtraction
def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
- Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [Commutative, IntrNoMem]>;
def int_mips_addu_s_qb : GCCBuiltin<"__builtin_mips_addu_s_qb">,
- Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [Commutative, IntrNoMem]>;
def int_mips_subu_qb : GCCBuiltin<"__builtin_mips_subu_qb">,
- Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_subu_s_qb : GCCBuiltin<"__builtin_mips_subu_s_qb">,
- Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_addq_ph : GCCBuiltin<"__builtin_mips_addq_ph">,
- Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [Commutative, IntrNoMem]>;
def int_mips_addq_s_ph : GCCBuiltin<"__builtin_mips_addq_s_ph">,
- Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [Commutative, IntrNoMem]>;
def int_mips_subq_ph : GCCBuiltin<"__builtin_mips_subq_ph">,
- Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_subq_s_ph : GCCBuiltin<"__builtin_mips_subq_s_ph">,
- Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_madd: GCCBuiltin<"__builtin_mips_madd">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
@@ -833,6 +837,12 @@ def int_mips_div_u_w : GCCBuiltin<"__builtin_msa_div_u_w">,
def int_mips_div_u_d : GCCBuiltin<"__builtin_msa_div_u_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+// This instruction is part of the MSA spec but it does not share the
+// __builtin_msa prefix because it operates on GP registers.
+def int_mips_dlsa : GCCBuiltin<"__builtin_mips_dlsa">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
def int_mips_dotp_s_h : GCCBuiltin<"__builtin_msa_dotp_s_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_dotp_s_w : GCCBuiltin<"__builtin_msa_dotp_s_w">,
@@ -1544,22 +1554,26 @@ def int_mips_shf_w : GCCBuiltin<"__builtin_msa_shf_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_b : GCCBuiltin<"__builtin_msa_sld_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_h : GCCBuiltin<"__builtin_msa_sld_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_w : GCCBuiltin<"__builtin_msa_sld_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_d : GCCBuiltin<"__builtin_msa_sld_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sldi_b : GCCBuiltin<"__builtin_msa_sldi_b">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
def int_mips_sldi_h : GCCBuiltin<"__builtin_msa_sldi_h">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
def int_mips_sldi_w : GCCBuiltin<"__builtin_msa_sldi_w">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
def int_mips_sldi_d : GCCBuiltin<"__builtin_msa_sldi_d">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
def int_mips_sll_b : GCCBuiltin<"__builtin_msa_sll_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
diff --git a/include/llvm/IR/IntrinsicsNVVM.td b/include/llvm/IR/IntrinsicsNVVM.td
index a372c22..7f72ce8 100644
--- a/include/llvm/IR/IntrinsicsNVVM.td
+++ b/include/llvm/IR/IntrinsicsNVVM.td
@@ -730,15 +730,15 @@ def llvm_anyi64ptr_ty : LLVMAnyPointerType<llvm_i64_ty>; // (space)i64*
// Bar.Sync
def int_cuda_syncthreads : GCCBuiltin<"__syncthreads">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoDuplicate]>;
def int_nvvm_barrier0 : GCCBuiltin<"__nvvm_bar0">,
- Intrinsic<[], [], []>;
+ Intrinsic<[], [], [IntrNoDuplicate]>;
def int_nvvm_barrier0_popc : GCCBuiltin<"__nvvm_bar0_popc">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
def int_nvvm_barrier0_and : GCCBuiltin<"__nvvm_bar0_and">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
def int_nvvm_barrier0_or : GCCBuiltin<"__nvvm_bar0_or">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
// Membar
def int_nvvm_membar_cta : GCCBuiltin<"__nvvm_membar_cta">,
diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td
index 4c5718f..8f64b5d 100644
--- a/include/llvm/IR/IntrinsicsX86.td
+++ b/include/llvm/IR/IntrinsicsX86.td
@@ -536,6 +536,8 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[], [], []>;
def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">,
Intrinsic<[], [], []>;
+ def int_x86_sse2_pause : GCCBuiltin<"__builtin_ia32_pause">,
+ Intrinsic<[], [], []>;
}
//===----------------------------------------------------------------------===//
@@ -1246,6 +1248,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_ptestnzc_256 : GCCBuiltin<"__builtin_ia32_ptestnzc256">,
Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ptestm_d_512 : GCCBuiltin<"__builtin_ia32_ptestmd512">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ptestm_q_512 : GCCBuiltin<"__builtin_ia32_ptestmq512">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
}
// Vector extract sign mask
@@ -1313,6 +1321,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty],
[IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_ps_512 : GCCBuiltin<"__builtin_ia32_loadups512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_pd_512 : GCCBuiltin<"__builtin_ia32_loadupd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
@@ -1331,6 +1345,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_maskstoreps256">,
Intrinsic<[], [llvm_ptr_ty,
llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_ps_512 :
+ GCCBuiltin<"__builtin_ia32_storeups512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_pd_512 :
+ GCCBuiltin<"__builtin_ia32_storeupd512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1386,6 +1408,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v32i8_ty,
llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx512_mask_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
}
// Vector min, max
@@ -1426,6 +1454,30 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_pmins_d : GCCBuiltin<"__builtin_ia32_pminsd256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx512_mask_pmaxu_d_512 : GCCBuiltin<"__builtin_ia32_pmaxud512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_d_512 : GCCBuiltin<"__builtin_ia32_pmaxsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_q_512 : GCCBuiltin<"__builtin_ia32_pmaxuq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_q_512 : GCCBuiltin<"__builtin_ia32_pmaxsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_d_512 : GCCBuiltin<"__builtin_ia32_pminud512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_d_512 : GCCBuiltin<"__builtin_ia32_pminsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_q_512 : GCCBuiltin<"__builtin_ia32_pminuq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_q_512 : GCCBuiltin<"__builtin_ia32_pminsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
}
// Integer shift ops.
@@ -1518,6 +1570,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty], [IntrNoMem]>;
def int_x86_avx2_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pabs_d_512 : GCCBuiltin<"__builtin_ia32_pabsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pabs_q_512 : GCCBuiltin<"__builtin_ia32_pabsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
}
// Horizontal arithmetic ops
@@ -1658,6 +1716,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_pbroadcastq_256 :
GCCBuiltin<"__builtin_ia32_pbroadcastq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pbroadcast_d_gpr_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd512_gpr_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pbroadcast_q_gpr_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq512_gpr_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pbroadcast_q_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq512_mem_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
}
// Vector permutation
@@ -1697,6 +1767,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
[IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_d_512 : GCCBuiltin<"__builtin_ia32_loaddqusi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_q_512 : GCCBuiltin<"__builtin_ia32_loaddqudi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
@@ -1715,6 +1791,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_maskstoreq256">,
Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
[IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_d_512 :
+ GCCBuiltin<"__builtin_ia32_storedqusi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_q_512 :
+ GCCBuiltin<"__builtin_ia32_storedqudi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
}
// Variable bit shift ops
@@ -1758,68 +1842,68 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_gather_d_pd : GCCBuiltin<"__builtin_ia32_gatherd_pd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_pd_256 : GCCBuiltin<"__builtin_ia32_gatherd_pd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_pd : GCCBuiltin<"__builtin_ia32_gatherq_pd">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_pd_256 : GCCBuiltin<"__builtin_ia32_gatherq_pd256">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_ps : GCCBuiltin<"__builtin_ia32_gatherd_ps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_ps_256 : GCCBuiltin<"__builtin_ia32_gatherd_ps256">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_ps : GCCBuiltin<"__builtin_ia32_gatherq_ps">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_ps_256 : GCCBuiltin<"__builtin_ia32_gatherq_ps256">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_q : GCCBuiltin<"__builtin_ia32_gatherd_q">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_q_256 : GCCBuiltin<"__builtin_ia32_gatherd_q256">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_q : GCCBuiltin<"__builtin_ia32_gatherq_q">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_q_256 : GCCBuiltin<"__builtin_ia32_gatherq_q256">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_d : GCCBuiltin<"__builtin_ia32_gatherd_d">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_d_d_256 : GCCBuiltin<"__builtin_ia32_gatherd_d256">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_d : GCCBuiltin<"__builtin_ia32_gatherq_d">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx2_gather_q_d_256 : GCCBuiltin<"__builtin_ia32_gatherq_d256">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
}
// Misc.
@@ -2592,6 +2676,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
[IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtph2ps_512 : GCCBuiltin<"__builtin_ia32_vcvtph2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtps2ph_512 : GCCBuiltin<"__builtin_ia32_vcvtps2ph512_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
@@ -2641,37 +2731,30 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Mask instructions
// 16-bit mask
- def int_x86_kadd_v16i1 : GCCBuiltin<"__builtin_ia32_kaddw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
- [IntrNoMem]>;
- def int_x86_kand_v16i1 : GCCBuiltin<"__builtin_ia32_kandw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
+ def int_x86_avx512_kand_w : GCCBuiltin<"__builtin_ia32_kandhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_kandn_v16i1 : GCCBuiltin<"__builtin_ia32_kandnw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
+ def int_x86_avx512_kandn_w : GCCBuiltin<"__builtin_ia32_kandnhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_knot_v16i1 : GCCBuiltin<"__builtin_ia32_knotw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty], [IntrNoMem]>;
- def int_x86_kor_v16i1 : GCCBuiltin<"__builtin_ia32_korw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
+ def int_x86_avx512_knot_w : GCCBuiltin<"__builtin_ia32_knothi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_kor_w : GCCBuiltin<"__builtin_ia32_korhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_kxor_v16i1 : GCCBuiltin<"__builtin_ia32_kxorw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
+ def int_x86_avx512_kxor_w : GCCBuiltin<"__builtin_ia32_kxorhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_kxnor_v16i1 : GCCBuiltin<"__builtin_ia32_kxnorw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
+ def int_x86_avx512_kxnor_w : GCCBuiltin<"__builtin_ia32_kxnorhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_mask2int_v16i1 : GCCBuiltin<"__builtin_ia32_mask2intw">,
- Intrinsic<[llvm_i32_ty], [llvm_v16i1_ty], [IntrNoMem]>;
- def int_x86_int2mask_v16i1 : GCCBuiltin<"__builtin_ia32_int2maskw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_x86_kunpck_v16i1 : GCCBuiltin<"__builtin_ia32_kunpckbw">,
- Intrinsic<[llvm_v16i1_ty], [llvm_v8i1_ty, llvm_v8i1_ty],
+ def int_x86_avx512_kunpck_bw : GCCBuiltin<"__builtin_ia32_kunpckhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_kortestz : GCCBuiltin<"__builtin_ia32_kortestz">,
+ def int_x86_avx512_kortestz_w : GCCBuiltin<"__builtin_ia32_kortestzhi">,
Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_kortestc : GCCBuiltin<"__builtin_ia32_kortestc">,
+ def int_x86_avx512_kortestc_w : GCCBuiltin<"__builtin_ia32_kortestchi">,
Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
}
@@ -2707,20 +2790,55 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_cvtusi642sd : GCCBuiltin<"__builtin_ia32_cvtusi642sd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i64_ty], [IntrNoMem]>;
-
- def int_x86_avx512_vcvtph2ps_512 : GCCBuiltin<"__builtin_ia32_vcvtph2ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty], [IntrNoMem]>;
- def int_x86_avx512_vcvtps2ph_512 : GCCBuiltin<"__builtin_ia32_vcvtps2ph512">,
- Intrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty],
- [IntrNoMem]>;
}
// Vector convert
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_cvt_ps2dq_512 : GCCBuiltin<"__builtin_ia32_cvtps2dq512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty], [IntrNoMem]>;
- def int_x86_avx512_cvtdq2_ps_512 : GCCBuiltin<"__builtin_ia32_cvtdq2ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvttps2dq_512: GCCBuiltin<"__builtin_ia32_cvttps2dq512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvttps2udq_512: GCCBuiltin<"__builtin_ia32_cvttps2udq512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvttpd2dq_512: GCCBuiltin<"__builtin_ia32_cvttpd2dq512_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvttpd2udq_512: GCCBuiltin<"__builtin_ia32_cvttpd2udq512_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_ps_512: GCCBuiltin<"__builtin_ia32_rndscaleps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_pd_512: GCCBuiltin<"__builtin_ia32_rndscalepd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtps2dq_512: GCCBuiltin<"__builtin_ia32_cvtps2dq512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtpd2dq_512: GCCBuiltin<"__builtin_ia32_cvtpd2dq512_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtps2udq_512: GCCBuiltin<"__builtin_ia32_cvtps2udq512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtpd2udq_512: GCCBuiltin<"__builtin_ia32_cvtpd2udq512_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtdq2ps_512 : GCCBuiltin<"__builtin_ia32_cvtdq2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtdq2pd_512 : GCCBuiltin<"__builtin_ia32_cvtdq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8i32_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtudq2ps_512 : GCCBuiltin<"__builtin_ia32_cvtudq2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtudq2pd_512 : GCCBuiltin<"__builtin_ia32_cvtudq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8i32_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cvtpd2ps_512 : GCCBuiltin<"__builtin_ia32_cvtpd2ps512_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f64_ty, llvm_v8f32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
}
// Vector load with broadcast
@@ -2773,44 +2891,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_min_ps_512 : GCCBuiltin<"__builtin_ia32_minps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty,
- llvm_v16f32_ty], [IntrNoMem]>;
- def int_x86_avx512_min_pd_512 : GCCBuiltin<"__builtin_ia32_minpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty,
- llvm_v8f64_ty], [IntrNoMem]>;
- def int_x86_avx512_max_ps_512 : GCCBuiltin<"__builtin_ia32_maxps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty,
- llvm_v16f32_ty], [IntrNoMem]>;
- def int_x86_avx512_max_pd_512 : GCCBuiltin<"__builtin_ia32_maxpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty,
- llvm_v8f64_ty], [IntrNoMem]>;
-
- def int_x86_avx512_pmaxu_d : GCCBuiltin<"__builtin_ia32_pmaxud512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pmaxu_q : GCCBuiltin<"__builtin_ia32_pmaxuq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
- def int_x86_avx512_pmaxs_d : GCCBuiltin<"__builtin_ia32_pmaxsd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pmaxs_q : GCCBuiltin<"__builtin_ia32_pmaxsq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
-
- def int_x86_avx512_pminu_d : GCCBuiltin<"__builtin_ia32_pminud512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pminu_q : GCCBuiltin<"__builtin_ia32_pminuq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
- def int_x86_avx512_pmins_d : GCCBuiltin<"__builtin_ia32_pminsd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty], [IntrNoMem]>;
- def int_x86_avx512_pmins_q : GCCBuiltin<"__builtin_ia32_pminsq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_ps_512 : GCCBuiltin<"__builtin_ia32_maxps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_pd_512 : GCCBuiltin<"__builtin_ia32_maxpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_ps_512 : GCCBuiltin<"__builtin_ia32_minps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_pd_512 : GCCBuiltin<"__builtin_ia32_minpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_rndscale_ss : GCCBuiltin<"__builtin_ia32_rndscaless">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
@@ -2825,66 +2917,67 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
- def int_x86_avx512_rndscale_ps_512 : GCCBuiltin<"__builtin_ia32_rndscaleps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty,
- llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_rndscale_pd_512 : GCCBuiltin<"__builtin_ia32_rndscalepd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty,
- llvm_i32_ty], [IntrNoMem]>;
-
def int_x86_avx512_sqrt_pd_512 : GCCBuiltin<"__builtin_ia32_sqrtpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty], [IntrNoMem]>;
def int_x86_avx512_sqrt_ps_512 : GCCBuiltin<"__builtin_ia32_sqrtps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty], [IntrNoMem]>;
- def int_x86_avx512_rcp14_ps_512 : GCCBuiltin<"__builtin_ia32_rcp14ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rcp14_pd_512 : GCCBuiltin<"__builtin_ia32_rcp14pd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rcp14_ss : GCCBuiltin<"__builtin_ia32_rcp14ss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rcp14_sd : GCCBuiltin<"__builtin_ia32_rcp14sd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rsqrt14_ps_512 : GCCBuiltin<"__builtin_ia32_rsqrt14ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rsqrt14_pd_512 : GCCBuiltin<"__builtin_ia32_rsqrt14pd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rsqrt14_ss : GCCBuiltin<"__builtin_ia32_rsqrt14ss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rsqrt14_sd : GCCBuiltin<"__builtin_ia32_rsqrt14sd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_rcp28_ps_512 : GCCBuiltin<"__builtin_ia32_rcp28ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rcp28_pd_512 : GCCBuiltin<"__builtin_ia32_rcp28pd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty],
- [IntrNoMem]>;
- def int_x86_avx512_rcp28_ss : GCCBuiltin<"__builtin_ia32_rcp28ss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ def int_x86_avx512_rsqrt14_ss : GCCBuiltin<"__builtin_ia32_rsqrt14ss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_sd : GCCBuiltin<"__builtin_ia32_rsqrt14sd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rsqrt14_pd_512 : GCCBuiltin<"__builtin_ia32_rsqrt14pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_ps_512 : GCCBuiltin<"__builtin_ia32_rsqrt14ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ss : GCCBuiltin<"__builtin_ia32_rcp14ss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_sd : GCCBuiltin<"__builtin_ia32_rcp14sd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rcp14_pd_512 : GCCBuiltin<"__builtin_ia32_rcp14pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ps_512 : GCCBuiltin<"__builtin_ia32_rcp14ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rcp28_ps : GCCBuiltin<"__builtin_ia32_rcp28ps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp28_pd : GCCBuiltin<"__builtin_ia32_rcp28pd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp28_ss : GCCBuiltin<"__builtin_ia32_rcp28ss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_rcp28_sd : GCCBuiltin<"__builtin_ia32_rcp28sd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+ def int_x86_avx512_rcp28_sd : GCCBuiltin<"__builtin_ia32_rcp28sd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_rsqrt28_ps_512 : GCCBuiltin<"__builtin_ia32_rsqrt28ps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty],
+ def int_x86_avx512_rsqrt28_ps : GCCBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_rsqrt28_pd_512 : GCCBuiltin<"__builtin_ia32_rsqrt28pd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty],
+ def int_x86_avx512_rsqrt28_pd : GCCBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_rsqrt28_ss : GCCBuiltin<"__builtin_ia32_rsqrt28ss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ def int_x86_avx512_rsqrt28_ss : GCCBuiltin<"__builtin_ia32_rsqrt28ss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_avx512_rsqrt28_sd : GCCBuiltin<"__builtin_ia32_rsqrt28sd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+ def int_x86_avx512_rsqrt28_sd : GCCBuiltin<"__builtin_ia32_rsqrt28sd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrNoMem]>;
}
@@ -2909,28 +3002,28 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather_dpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_qpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_qps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqps512">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dpd_512 : GCCBuiltin<"__builtin_ia32_gatherdpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8i32_ty, llvm_ptr_ty,
llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dps_512 : GCCBuiltin<"__builtin_ia32_gatherdps512">,
Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherqpd512">,
Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_i32_ty],
@@ -2938,12 +3031,12 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather_qps_512 : GCCBuiltin<"__builtin_ia32_gatherqps512">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8i64_ty, llvm_ptr_ty,
llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dpq_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpi512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_ptr_ty, llvm_i32_ty],
@@ -2955,7 +3048,7 @@ let TargetPrefix = "x86" in {
def int_x86_avx512_gather_qpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqpi512">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadMem]>;
+ [IntrReadArgMem]>;
def int_x86_avx512_gather_dpq_512 : GCCBuiltin<"__builtin_ia32_gatherdpq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty, llvm_ptr_ty,
@@ -3045,62 +3138,62 @@ let TargetPrefix = "x86" in {
// AVX-512 conflict detection
let TargetPrefix = "x86" in {
- def int_x86_avx512_conflict_d_512 : GCCBuiltin<"__builtin_ia32_conflictd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty],
- []>;
- def int_x86_avx512_conflict_d_mask_512 :
- GCCBuiltin<"__builtin_ia32_mask_conflictd512">,
+ def int_x86_avx512_mask_conflict_d_512 :
+ GCCBuiltin<"__builtin_ia32_vpconflictsi_512_mask">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i1_ty, llvm_v16i32_ty],
- []>;
- def int_x86_avx512_conflict_d_maskz_512:
- GCCBuiltin<"__builtin_ia32_maskz_conflictd512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i1_ty, llvm_v16i32_ty],
+ llvm_v16i32_ty, llvm_i16_ty],
[]>;
-
- def int_x86_avx512_conflict_q_512 : GCCBuiltin<"__builtin_ia32_conflictq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty],
- []>;
- def int_x86_avx512_conflict_q_mask_512 :
- GCCBuiltin<"__builtin_ia32_mask_conflictq512">,
+ def int_x86_avx512_mask_conflict_q_512 :
+ GCCBuiltin<"__builtin_ia32_vpconflictdi_512_mask">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i1_ty, llvm_v8i64_ty],
- []>;
- def int_x86_avx512_conflict_q_maskz_512:
- GCCBuiltin<"__builtin_ia32_maskz_conflictq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i1_ty, llvm_v8i64_ty],
+ llvm_v8i64_ty, llvm_i8_ty],
[]>;
}
// Vector blend
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx512_mskblend_ps_512 : GCCBuiltin<"__builtin_ia32_mskblendps512">,
+ def int_x86_avx512_mask_blend_ps_512 : GCCBuiltin<"__builtin_ia32_blendmps_512_mask">,
Intrinsic<[llvm_v16f32_ty],
- [llvm_v16i1_ty, llvm_v16f32_ty, llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_mskblend_pd_512 : GCCBuiltin<"__builtin_ia32_mskblendpd512">,
+ def int_x86_avx512_mask_blend_pd_512 : GCCBuiltin<"__builtin_ia32_blendmpd_512_mask">,
Intrinsic<[llvm_v8f64_ty],
- [llvm_v8i1_ty, llvm_v8f64_ty, llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
[IntrNoMem]>;
- def int_x86_avx512_mskblend_d_512 : GCCBuiltin<"__builtin_ia32_mskblendd512">,
+ def int_x86_avx512_mask_blend_d_512 : GCCBuiltin<"__builtin_ia32_blendmd_512_mask">,
Intrinsic<[llvm_v16i32_ty],
- [llvm_v16i1_ty, llvm_v16i32_ty, llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_mskblend_q_512 : GCCBuiltin<"__builtin_ia32_mskblendq512">,
+ def int_x86_avx512_mask_blend_q_512 : GCCBuiltin<"__builtin_ia32_blendmq_512_mask">,
Intrinsic<[llvm_v8i64_ty],
- [llvm_v8i1_ty, llvm_v8i64_ty, llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
[IntrNoMem]>;
}
// Misc.
let TargetPrefix = "x86" in {
- def int_x86_avx512_cmpeq_pi_512 : GCCBuiltin<"__builtin_ia32_cmpeqpi512">,
- Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
+ def int_x86_avx512_mask_cmp_ps_512 : GCCBuiltin<"__builtin_ia32_cmpps512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_pd_512 : GCCBuiltin<"__builtin_ia32_cmppd512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pcmpeq_d_512 : GCCBuiltin<"__builtin_ia32_pcmpeqd512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_q_512 : GCCBuiltin<"__builtin_ia32_pcmpeqq512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_d_512 : GCCBuiltin<"__builtin_ia32_pandd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_q_512 : GCCBuiltin<"__builtin_ia32_pandq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty],
[IntrNoMem]>;
- def int_x86_avx512_and_pi : GCCBuiltin<"__builtin_ia32_andpi512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
- [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsXCore.td b/include/llvm/IR/IntrinsicsXCore.td
index bf345d4..b614e1e 100644
--- a/include/llvm/IR/IntrinsicsXCore.td
+++ b/include/llvm/IR/IntrinsicsXCore.td
@@ -67,6 +67,8 @@ let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
[NoCapture<0>]>;
def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<0>]>;
+ def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<0>]>;
def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
@@ -78,6 +80,7 @@ let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<0>]>;
def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
[NoCapture<0>, NoCapture<1>]>;
def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index dd379ae..ae4859a 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -15,9 +15,9 @@
#ifndef LLVM_IR_LLVMCONTEXT_H
#define LLVM_IR_LLVMCONTEXT_H
+#include "llvm-c/Core.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Compiler.h"
-#include "llvm-c/Core.h"
namespace llvm {
@@ -27,6 +27,7 @@ class Twine;
class Instruction;
class Module;
class SMDiagnostic;
+class DiagnosticInfo;
template <typename T> class SmallVectorImpl;
/// This is an important class for using LLVM in a threaded context. It
@@ -64,6 +65,11 @@ public:
typedef void (*InlineAsmDiagHandlerTy)(const SMDiagnostic&, void *Context,
unsigned LocCookie);
+ /// Defines the type of a diagnostic handler.
+ /// \see LLVMContext::setDiagnosticHandler.
+ /// \see LLVMContext::diagnose.
+ typedef void (*DiagnosticHandlerTy)(const DiagnosticInfo &DI, void *Context);
+
/// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
/// when problems with inline asm are detected by the backend. The first
/// argument is a function pointer and the second is a context pointer that
@@ -82,6 +88,33 @@ public:
/// setInlineAsmDiagnosticHandler.
void *getInlineAsmDiagnosticContext() const;
+ /// setDiagnosticHandler - This method sets a handler that is invoked
+ /// when the backend needs to report anything to the user. The first
+ /// argument is a function pointer and the second is a context pointer that
+ /// gets passed into the DiagHandler.
+ ///
+ /// LLVMContext doesn't take ownership or interpret either of these
+ /// pointers.
+ void setDiagnosticHandler(DiagnosticHandlerTy DiagHandler,
+ void *DiagContext = 0);
+
+ /// getDiagnosticHandler - Return the diagnostic handler set by
+ /// setDiagnosticHandler.
+ DiagnosticHandlerTy getDiagnosticHandler() const;
+
+ /// getDiagnosticContext - Return the diagnostic context set by
+ /// setDiagnosticContext.
+ void *getDiagnosticContext() const;
+
+ /// diagnose - Report a message to the currently installed diagnostic handler.
+ /// This function returns, in particular in the case of error reporting
+ /// (DI.Severity == RS_Error), so the caller should leave the compilation
+ /// process in a self-consistent state, even though the generated code
+ /// need not be correct.
+ /// The diagnostic message will be implicitly prefixed with a severity
+ /// keyword according to \p DI.getSeverity(), i.e., "error: "
+ /// for RS_Error, "warning: " for RS_Warning, and "note: " for RS_Note.
+ void diagnose(const DiagnosticInfo &DI);
/// emitError - Emit an error message to the currently installed error handler
/// with optional location information. This function returns, so code should
diff --git a/include/llvm/IR/LeakDetector.h b/include/llvm/IR/LeakDetector.h
new file mode 100644
index 0000000..cb18df8
--- /dev/null
+++ b/include/llvm/IR/LeakDetector.h
@@ -0,0 +1,92 @@
+//===- LeakDetector.h - Provide leak detection ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a class that can be used to provide very simple memory leak
+// checks for an API. Basically LLVM uses this to make sure that Instructions,
+// for example, are deleted when they are supposed to be, and not leaked away.
+//
+// When compiling with NDEBUG (Release build), this class does nothing, thus
+// adding no checking overhead to release builds. Note that this class is
+// implemented in a very simple way, requiring completely manual manipulation
+// and checking for garbage, but this is intentional: users should not be using
+// this API, only other APIs should.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEAKDETECTOR_H
+#define LLVM_IR_LEAKDETECTOR_H
+
+#include <string>
+
+namespace llvm {
+
+class LLVMContext;
+class Value;
+
+struct LeakDetector {
+ /// addGarbageObject - Add a pointer to the internal set of "garbage" object
+ /// pointers. This should be called when objects are created, or if they are
+ /// taken out of an owning collection.
+ ///
+ static void addGarbageObject(void *Object) {
+#ifndef NDEBUG
+ addGarbageObjectImpl(Object);
+#endif
+ }
+
+ /// removeGarbageObject - Remove a pointer from our internal representation of
+ /// our "garbage" objects. This should be called when an object is added to
+ /// an "owning" collection.
+ ///
+ static void removeGarbageObject(void *Object) {
+#ifndef NDEBUG
+ removeGarbageObjectImpl(Object);
+#endif
+ }
+
+ /// checkForGarbage - Traverse the internal representation of garbage
+ /// pointers. If there are any pointers that have been add'ed, but not
+ /// remove'd, big obnoxious warnings about memory leaks are issued.
+ ///
+ /// The specified message will be printed indicating when the check was
+ /// performed.
+ ///
+ static void checkForGarbage(LLVMContext &C, const std::string &Message) {
+#ifndef NDEBUG
+ checkForGarbageImpl(C, Message);
+#endif
+ }
+
+ /// Overload the normal methods to work better with Value*'s because they are
+ /// by far the most common in LLVM. This does not affect the actual
+ /// functioning of this class, it just makes the warning messages nicer.
+ ///
+ static void addGarbageObject(const Value *Object) {
+#ifndef NDEBUG
+ addGarbageObjectImpl(Object);
+#endif
+ }
+ static void removeGarbageObject(const Value *Object) {
+#ifndef NDEBUG
+ removeGarbageObjectImpl(Object);
+#endif
+ }
+
+private:
+ // If we are debugging, the actual implementations will be called...
+ static void addGarbageObjectImpl(const Value *Object);
+ static void removeGarbageObjectImpl(const Value *Object);
+ static void addGarbageObjectImpl(void *Object);
+ static void removeGarbageObjectImpl(void *Object);
+ static void checkForGarbageImpl(LLVMContext &C, const std::string &Message);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/LegacyPassManager.h b/include/llvm/IR/LegacyPassManager.h
index fa1436e..c967a6b 100644
--- a/include/llvm/IR/LegacyPassManager.h
+++ b/include/llvm/IR/LegacyPassManager.h
@@ -55,7 +55,7 @@ public:
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
/// will be destroyed as well, so there is no need to delete the pass. This
/// implies that all passes MUST be allocated with 'new'.
- void add(Pass *P);
+ void add(Pass *P) override;
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
@@ -80,7 +80,7 @@ public:
/// PassManager_X is destroyed, the pass will be destroyed as well, so
/// there is no need to delete the pass.
/// This implies that all passes MUST be allocated with 'new'.
- void add(Pass *P);
+ void add(Pass *P) override;
/// run - Execute all of the passes scheduled for execution. Keep
/// track of whether any of the passes modifies the function, and if
diff --git a/include/llvm/IR/LegacyPassManagers.h b/include/llvm/IR/LegacyPassManagers.h
index d256a3e..5c9dccd 100644
--- a/include/llvm/IR/LegacyPassManagers.h
+++ b/include/llvm/IR/LegacyPassManagers.h
@@ -101,15 +101,15 @@ namespace llvm {
// enums for debugging strings
enum PassDebuggingString {
- EXECUTION_MSG, // "Executing Pass '"
- MODIFICATION_MSG, // "' Made Modification '"
- FREEING_MSG, // " Freeing Pass '"
- ON_BASICBLOCK_MSG, // "' on BasicBlock '" + PassName + "'...\n"
+ EXECUTION_MSG, // "Executing Pass '" + PassName
+ MODIFICATION_MSG, // "Made Modification '" + PassName
+ FREEING_MSG, // " Freeing Pass '" + PassName
+ ON_BASICBLOCK_MSG, // "' on BasicBlock '" + InstructionName + "'...\n"
ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
- ON_REGION_MSG, // " 'on Region ...\n'"
- ON_LOOP_MSG, // " 'on Loop ...\n'"
- ON_CG_MSG // "' on Call Graph ...\n'"
+ ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'"
+ ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'"
+ ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'"
};
/// PassManagerPrettyStackEntry - This is used to print informative information
@@ -127,7 +127,7 @@ public:
: P(p), V(0), M(&m) {} // When P is run on M
/// print - Emit information about this stack frame to OS.
- virtual void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS) const override;
};
@@ -414,7 +414,7 @@ public:
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool runOnFunction(Function &F);
- bool runOnModule(Module &M);
+ bool runOnModule(Module &M) override;
/// cleanup - After running all passes, clean up pass manager cache.
void cleanup();
@@ -426,7 +426,7 @@ public:
/// doInitialization - Run all of the initializers for the function passes.
///
- bool doInitialization(Module &M);
+ bool doInitialization(Module &M) override;
/// doFinalization - Overrides ModulePass doFinalization for global
/// finalization tasks
@@ -435,20 +435,20 @@ public:
/// doFinalization - Run all of the finalizers for the function passes.
///
- bool doFinalization(Module &M);
+ bool doFinalization(Module &M) override;
- virtual PMDataManager *getAsPMDataManager() { return this; }
- virtual Pass *getAsPass() { return this; }
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const {
+ void getAnalysisUsage(AnalysisUsage &Info) const override{
Info.setPreservesAll();
}
// Print passes managed by this manager
- void dumpPassStructure(unsigned Offset);
+ void dumpPassStructure(unsigned Offset) override;
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Function Pass Manager";
}
@@ -458,7 +458,7 @@ public:
return FP;
}
- virtual PassManagerType getPassManagerType() const {
+ PassManagerType getPassManagerType() const override {
return PMT_FunctionPassManager;
}
};
diff --git a/include/llvm/IR/LegacyPassNameParser.h b/include/llvm/IR/LegacyPassNameParser.h
new file mode 100644
index 0000000..1f6bbbc
--- /dev/null
+++ b/include/llvm/IR/LegacyPassNameParser.h
@@ -0,0 +1,141 @@
+//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PassNameParser and FilteredPassNameParser<> classes,
+// which are used to add command line arguments to a utility for all of the
+// passes that have been registered into the system.
+//
+// The PassNameParser class adds ALL passes linked into the system (that are
+// creatable) as command line arguments to the tool (when instantiated with the
+// appropriate command line option template). The FilteredPassNameParser<>
+// template is used for the same purposes as PassNameParser, except that it only
+// includes passes that have a PassType that are compatible with the filter
+// (which is the template argument).
+//
+// Note that this is part of the legacy pass manager infrastructure and will be
+// (eventually) going away.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H
+#define LLVM_IR_LEGACYPASSNAMEPARSER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// PassNameParser class - Make use of the pass registration mechanism to
+// automatically add a command line argument to opt for each pass.
+//
+class PassNameParser : public PassRegistrationListener,
+ public cl::parser<const PassInfo*> {
+ cl::Option *Opt;
+public:
+ PassNameParser() : Opt(0) {}
+ virtual ~PassNameParser();
+
+ void initialize(cl::Option &O) {
+ Opt = &O;
+ cl::parser<const PassInfo*>::initialize(O);
+
+ // Add all of the passes to the map that got initialized before 'this' did.
+ enumeratePasses();
+ }
+
+ // ignorablePassImpl - Can be overriden in subclasses to refine the list of
+ // which passes we want to include.
+ //
+ virtual bool ignorablePassImpl(const PassInfo *P) const { return false; }
+
+ inline bool ignorablePass(const PassInfo *P) const {
+ // Ignore non-selectable and non-constructible passes! Ignore
+ // non-optimizations.
+ return P->getPassArgument() == 0 || *P->getPassArgument() == 0 ||
+ P->getNormalCtor() == 0 || ignorablePassImpl(P);
+ }
+
+ // Implement the PassRegistrationListener callbacks used to populate our map
+ //
+ void passRegistered(const PassInfo *P) override {
+ if (ignorablePass(P) || !Opt) return;
+ if (findOption(P->getPassArgument()) != getNumOptions()) {
+ errs() << "Two passes with the same argument (-"
+ << P->getPassArgument() << ") attempted to be registered!\n";
+ llvm_unreachable(0);
+ }
+ addLiteralOption(P->getPassArgument(), P, P->getPassName());
+ }
+ void passEnumerate(const PassInfo *P) override { passRegistered(P); }
+
+ // printOptionInfo - Print out information about this option. Override the
+ // default implementation to sort the table before we print...
+ void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override {
+ PassNameParser *PNP = const_cast<PassNameParser*>(this);
+ array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan);
+ cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
+ }
+
+private:
+ // ValLessThan - Provide a sorting comparator for Values elements...
+ static int ValLessThan(const PassNameParser::OptionInfo *VT1,
+ const PassNameParser::OptionInfo *VT2) {
+ return std::strcmp(VT1->Name, VT2->Name);
+ }
+};
+
+///===----------------------------------------------------------------------===//
+/// FilteredPassNameParser class - Make use of the pass registration
+/// mechanism to automatically add a command line argument to opt for
+/// each pass that satisfies a filter criteria. Filter should return
+/// true for passes to be registered as command-line options.
+///
+template<typename Filter>
+class FilteredPassNameParser : public PassNameParser {
+private:
+ Filter filter;
+
+public:
+ bool ignorablePassImpl(const PassInfo *P) const override {
+ return !filter(*P);
+ }
+};
+
+///===----------------------------------------------------------------------===//
+/// PassArgFilter - A filter for use with PassNameFilterParser that only
+/// accepts a Pass whose Arg matches certain strings.
+///
+/// Use like this:
+///
+/// extern const char AllowedPassArgs[] = "-anders_aa -dse";
+///
+/// static cl::list<
+/// const PassInfo*,
+/// bool,
+/// FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > >
+/// PassList(cl::desc("Passes available:"));
+///
+/// Only the -anders_aa and -dse options will be available to the user.
+///
+template<const char *Args>
+class PassArgFilter {
+public:
+ bool operator()(const PassInfo &P) const {
+ return(std::strstr(Args, P.getPassArgument()));
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/MDBuilder.h b/include/llvm/IR/MDBuilder.h
index ce81b54..c07b2bd 100644
--- a/include/llvm/IR/MDBuilder.h
+++ b/include/llvm/IR/MDBuilder.h
@@ -174,11 +174,8 @@ public:
/// given name, an offset and a parent in the TBAA type DAG.
MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
uint64_t Offset = 0) {
- SmallVector<Value *, 4> Ops(3);
- Type *Int64 = IntegerType::get(Context, 64);
- Ops[0] = createString(Name);
- Ops[1] = Parent;
- Ops[2] = ConstantInt::get(Int64, Offset);
+ ConstantInt *Off = ConstantInt::get(Type::getInt64Ty(Context), Offset);
+ Value *Ops[3] = { createString(Name), Parent, Off };
return MDNode::get(Context, Ops);
}
diff --git a/include/llvm/IR/Mangler.h b/include/llvm/IR/Mangler.h
new file mode 100644
index 0000000..c1ba585
--- /dev/null
+++ b/include/llvm/IR/Mangler.h
@@ -0,0 +1,69 @@
+//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for various backends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_MANGLER_H
+#define LLVM_TARGET_MANGLER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class DataLayout;
+class GlobalValue;
+template <typename T> class SmallVectorImpl;
+class Twine;
+
+class Mangler {
+public:
+ enum ManglerPrefixTy {
+ Default, ///< Emit default string before each symbol.
+ Private, ///< Emit "private" prefix before each symbol.
+ LinkerPrivate ///< Emit "linker private" prefix before each symbol.
+ };
+
+private:
+ const DataLayout *DL;
+
+ /// AnonGlobalIDs - We need to give global values the same name every time
+ /// they are mangled. This keeps track of the number we give to anonymous
+ /// ones.
+ ///
+ mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
+
+ /// NextAnonGlobalID - This simple counter is used to unique value names.
+ ///
+ mutable unsigned NextAnonGlobalID;
+
+public:
+ Mangler(const DataLayout *DL) : DL(DL), NextAnonGlobalID(1) {}
+
+ /// Print the appropriate prefix and the specified global variable's name.
+ /// If the global variable doesn't have a name, this fills in a unique name
+ /// for the global.
+ void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const;
+ void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const;
+
+ /// Print the appropriate prefix and the specified name as the global variable
+ /// name. GVName must not be empty.
+ void getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+ ManglerPrefixTy PrefixTy = Mangler::Default) const;
+ void getNameWithPrefix(SmallVectorImpl<char> &OutName, const Twine &GVName,
+ ManglerPrefixTy PrefixTy = Mangler::Default) const;
+};
+
+} // End llvm namespace
+
+#endif // LLVM_TARGET_MANGLER_H
diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h
index 9659c2e..d054fbb 100644
--- a/include/llvm/IR/Metadata.h
+++ b/include/llvm/IR/Metadata.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Value.h"
namespace llvm {
@@ -28,7 +29,7 @@ template<typename ValueSubClass, typename ItemParentClass>
class SymbolTableListTraits;
-enum LLVMConstants LLVM_ENUM_INT_TYPE(uint32_t) {
+enum LLVMConstants : uint32_t {
DEBUG_METADATA_VERSION = 1 // Current debug info version number.
};
@@ -207,6 +208,42 @@ class NamedMDNode : public ilist_node<NamedMDNode> {
explicit NamedMDNode(const Twine &N);
+ template<class T1, class T2>
+ class op_iterator_impl :
+ public std::iterator<std::bidirectional_iterator_tag, T2> {
+ const NamedMDNode *Node;
+ unsigned Idx;
+ op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) { }
+
+ friend class NamedMDNode;
+
+ public:
+ op_iterator_impl() : Node(0), Idx(0) { }
+
+ bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
+ bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }
+ op_iterator_impl &operator++() {
+ ++Idx;
+ return *this;
+ }
+ op_iterator_impl operator++(int) {
+ op_iterator_impl tmp(*this);
+ operator++();
+ return tmp;
+ }
+ op_iterator_impl &operator--() {
+ --Idx;
+ return *this;
+ }
+ op_iterator_impl operator--(int) {
+ op_iterator_impl tmp(*this);
+ operator--();
+ return tmp;
+ }
+
+ T1 operator*() const { return Node->getOperand(Idx); }
+ };
+
public:
/// eraseFromParent - Drop all references and remove the node from parent
/// module.
@@ -239,6 +276,24 @@ public:
/// dump() - Allow printing of NamedMDNodes from the debugger.
void dump() const;
+
+ // ---------------------------------------------------------------------------
+ // Operand Iterator interface...
+ //
+ typedef op_iterator_impl<MDNode*, MDNode> op_iterator;
+ op_iterator op_begin() { return op_iterator(this, 0); }
+ op_iterator op_end() { return op_iterator(this, getNumOperands()); }
+
+ typedef op_iterator_impl<const MDNode*, MDNode> const_op_iterator;
+ const_op_iterator op_begin() const { return const_op_iterator(this, 0); }
+ const_op_iterator op_end() const { return const_op_iterator(this, getNumOperands()); }
+
+ inline iterator_range<op_iterator> operands() {
+ return iterator_range<op_iterator>(op_begin(), op_end());
+ }
+ inline iterator_range<const_op_iterator> operands() const {
+ return iterator_range<const_op_iterator>(op_begin(), op_end());
+ }
};
} // end llvm namespace
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index b30a9a3..f0d4002 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -15,13 +15,15 @@
#ifndef LLVM_IR_MODULE_H
#define LLVM_IR_MODULE_H
-#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/system_error.h"
namespace llvm {
@@ -142,12 +144,6 @@ public:
/// The named metadata constant interators.
typedef NamedMDListType::const_iterator const_named_metadata_iterator;
- /// An enumeration for describing the endianess of the target machine.
- enum Endianness { AnyEndianness, LittleEndian, BigEndian };
-
- /// An enumeration for describing the size of a pointer on the target machine.
- enum PointerSize { AnyPointerSize, Pointer32, Pointer64 };
-
/// This enumeration defines the supported behaviors of module flags.
enum ModFlagBehavior {
/// Emits an error if two values disagree, otherwise the resulting value is
@@ -156,7 +152,7 @@ public:
/// Emits a warning if two values disagree. The result value will be the
/// operand for the flag from the first module being linked.
- Warning = 2,
+ Warning = 2,
/// Adds a requirement that another module flag be present and have a
/// specified value after linking is performed. The value must be a metadata
@@ -201,12 +197,20 @@ private:
NamedMDListType NamedMDList; ///< The named metadata in the module
std::string GlobalScopeAsm; ///< Inline Asm at global scope.
ValueSymbolTable *ValSymTab; ///< Symbol table for values
- OwningPtr<GVMaterializer> Materializer; ///< Used to materialize GlobalValues
+ std::unique_ptr<GVMaterializer>
+ Materializer; ///< Used to materialize GlobalValues
std::string ModuleID; ///< Human readable identifier for the module
std::string TargetTriple; ///< Platform target triple Module compiled on
- std::string DataLayout; ///< Target data description
void *NamedMDSymTab; ///< NamedMDNode names.
+ // We need to keep the string because the C API expects us to own the string
+ // representation.
+ // Since we have it, we also use an empty string to represent a module without
+ // a DataLayout. If it has a DataLayout, these variables are in sync and the
+ // string is just a cache of getDataLayout()->getStringRepresentation().
+ std::string DataLayoutStr;
+ DataLayout DL;
+
friend class Constant;
/// @}
@@ -227,23 +231,17 @@ public:
/// @returns the module identifier as a string
const std::string &getModuleIdentifier() const { return ModuleID; }
- /// Get the data layout string for the module's target platform. This encodes
- /// the type sizes and alignments expected by this module.
- /// @returns the data layout as a string
- const std::string &getDataLayout() const { return DataLayout; }
+ /// Get the data layout string for the module's target platform. This is
+ /// equivalent to getDataLayout()->getStringRepresentation().
+ const std::string &getDataLayoutStr() const { return DataLayoutStr; }
+
+ /// Get the data layout for the module's target platform.
+ const DataLayout *getDataLayout() const;
/// Get the target triple which is a string describing the target host.
/// @returns a string containing the target triple.
const std::string &getTargetTriple() const { return TargetTriple; }
- /// Get the target endian information.
- /// @returns Endianess - an enumeration for the endianess of the target
- Endianness getEndianness() const;
-
- /// Get the target pointer size.
- /// @returns PointerSize - an enumeration for the size of the target's pointer
- PointerSize getPointerSize() const;
-
/// Get the global data context.
/// @returns LLVMContext - a container for LLVM's global information
LLVMContext &getContext() const { return Context; }
@@ -260,7 +258,8 @@ public:
void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
/// Set the data layout
- void setDataLayout(StringRef DL) { DataLayout = DL; }
+ void setDataLayout(StringRef Desc);
+ void setDataLayout(const DataLayout *Other);
/// Set the target triple.
void setTargetTriple(StringRef T) { TargetTriple = T; }
@@ -464,18 +463,13 @@ public:
/// materialized lazily. If !isDematerializable(), this method is a noop.
void Dematerialize(GlobalValue *GV);
- /// MaterializeAll - Make sure all GlobalValues in this Module are fully read.
- /// If the module is corrupt, this returns true and fills in the optional
- /// string with information about the problem. If successful, this returns
- /// false.
- bool MaterializeAll(std::string *ErrInfo = 0);
+ /// Make sure all GlobalValues in this Module are fully read.
+ error_code materializeAll();
- /// MaterializeAllPermanently - Make sure all GlobalValues in this Module are
- /// fully read and clear the Materializer. If the module is corrupt, this
- /// returns true, fills in the optional string with information about the
- /// problem, and DOES NOT clear the old Materializer. If successful, this
- /// returns false.
- bool MaterializeAllPermanently(std::string *ErrInfo = 0);
+ /// Make sure all GlobalValues in this Module are fully read and clear the
+ /// Materializer. If the module is corrupt, this DOES NOT clear the old
+ /// Materializer.
+ error_code materializeAllPermanently();
/// @}
/// @name Direct access to the globals list, functions list, and symbol table
@@ -524,6 +518,13 @@ public:
const_global_iterator global_end () const { return GlobalList.end(); }
bool global_empty() const { return GlobalList.empty(); }
+ iterator_range<global_iterator> globals() {
+ return iterator_range<global_iterator>(global_begin(), global_end());
+ }
+ iterator_range<const_global_iterator> globals() const {
+ return iterator_range<const_global_iterator>(global_begin(), global_end());
+ }
+
/// @}
/// @name Function Iteration
/// @{
@@ -546,6 +547,12 @@ public:
size_t alias_size () const { return AliasList.size(); }
bool alias_empty() const { return AliasList.empty(); }
+ iterator_range<alias_iterator> aliases() {
+ return iterator_range<alias_iterator>(alias_begin(), alias_end());
+ }
+ iterator_range<const_alias_iterator> aliases() const {
+ return iterator_range<const_alias_iterator>(alias_begin(), alias_end());
+ }
/// @}
/// @name Named Metadata Iteration
@@ -564,6 +571,14 @@ public:
size_t named_metadata_size() const { return NamedMDList.size(); }
bool named_metadata_empty() const { return NamedMDList.empty(); }
+ iterator_range<named_metadata_iterator> named_metadata() {
+ return iterator_range<named_metadata_iterator>(named_metadata_begin(),
+ named_metadata_end());
+ }
+ iterator_range<const_named_metadata_iterator> named_metadata() const {
+ return iterator_range<const_named_metadata_iterator>(named_metadata_begin(),
+ named_metadata_end());
+ }
/// @}
/// @name Utility functions for printing and dumping Module objects
diff --git a/include/llvm/IR/NoFolder.h b/include/llvm/IR/NoFolder.h
new file mode 100644
index 0000000..a9cdfc3
--- /dev/null
+++ b/include/llvm/IR/NoFolder.h
@@ -0,0 +1,298 @@
+//===- NoFolder.h - Constant folding helper ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NoFolder class, a helper for IRBuilder. It provides
+// IRBuilder with a set of methods for creating unfolded constants. This is
+// useful for learners trying to understand how LLVM IR works, and who don't
+// want details to be hidden by the constant folder. For general constant
+// creation and folding, use ConstantExpr and the routines in
+// llvm/Analysis/ConstantFolding.h.
+//
+// Note: since it is not actually possible to create unfolded constants, this
+// class returns instructions rather than constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_NOFOLDER_H
+#define LLVM_IR_NOFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+
+/// NoFolder - Create "constants" (actually, instructions) with no folding.
+class NoFolder {
+public:
+ explicit NoFolder() {}
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWAdd(LHS, RHS);
+ }
+ Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWAdd(LHS, RHS);
+ }
+ Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFAdd(LHS, RHS);
+ }
+ Instruction *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWSub(LHS, RHS);
+ }
+ Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWSub(LHS, RHS);
+ }
+ Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFSub(LHS, RHS);
+ }
+ Instruction *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWMul(LHS, RHS);
+ }
+ Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWMul(LHS, RHS);
+ }
+ Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFMul(LHS, RHS);
+ }
+ Instruction *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateUDiv(LHS, RHS);
+ return BinaryOperator::CreateExactUDiv(LHS, RHS);
+ }
+ Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateExactUDiv(LHS, RHS);
+ }
+ Instruction *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateSDiv(LHS, RHS);
+ return BinaryOperator::CreateExactSDiv(LHS, RHS);
+ }
+ Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateExactSDiv(LHS, RHS);
+ }
+ Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFDiv(LHS, RHS);
+ }
+ Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateURem(LHS, RHS);
+ }
+ Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateSRem(LHS, RHS);
+ }
+ Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFRem(LHS, RHS);
+ }
+ Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
+ bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateLShr(LHS, RHS);
+ return BinaryOperator::CreateExactLShr(LHS, RHS);
+ }
+ Instruction *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateAShr(LHS, RHS);
+ return BinaryOperator::CreateExactAShr(LHS, RHS);
+ }
+ Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateAnd(LHS, RHS);
+ }
+ Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateOr(LHS, RHS);
+ }
+ Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateXor(LHS, RHS);
+ }
+
+ Instruction *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::Create(Opc, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateNeg(C);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWNeg(Constant *C) const {
+ return BinaryOperator::CreateNSWNeg(C);
+ }
+ Instruction *CreateNUWNeg(Constant *C) const {
+ return BinaryOperator::CreateNUWNeg(C);
+ }
+ Instruction *CreateFNeg(Constant *C) const {
+ return BinaryOperator::CreateFNeg(C);
+ }
+ Instruction *CreateNot(Constant *C) const {
+ return BinaryOperator::CreateNot(C);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateGetElementPtr(Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(C, IdxList);
+ }
+ Constant *CreateGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(C, Idx);
+ }
+ Instruction *CreateGetElementPtr(Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return GetElementPtrInst::Create(C, IdxList);
+ }
+
+ Constant *CreateInBoundsGetElementPtr(Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(C, IdxList);
+ }
+ Constant *CreateInBoundsGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(C, Idx);
+ }
+ Instruction *CreateInBoundsGetElementPtr(Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return GetElementPtrInst::CreateInBounds(C, IdxList);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const {
+ return CastInst::Create(Op, C, DestTy);
+ }
+ Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreatePointerCast(C, DestTy);
+ }
+ Instruction *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const {
+ return CastInst::CreateIntegerCast(C, DestTy, isSigned);
+ }
+ Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateFPCast(C, DestTy);
+ }
+
+ Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateZExtOrBitCast(C, DestTy);
+ }
+ Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateSExtOrBitCast(C, DestTy);
+ }
+
+ Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateTruncOrBitCast(C, DestTy);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateICmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
+ return new ICmpInst(P, LHS, RHS);
+ }
+ Instruction *CreateFCmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
+ return new FCmpInst(P, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateSelect(Constant *C,
+ Constant *True, Constant *False) const {
+ return SelectInst::Create(C, True, False);
+ }
+
+ Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ return ExtractElementInst::Create(Vec, Idx);
+ }
+
+ Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
+ return InsertElementInst::Create(Vec, NewElt, Idx);
+ }
+
+ Instruction *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
+ return new ShuffleVectorInst(V1, V2, Mask);
+ }
+
+ Instruction *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const {
+ return ExtractValueInst::Create(Agg, IdxList);
+ }
+
+ Instruction *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const {
+ return InsertValueInst::Create(Agg, Val, IdxList);
+ }
+};
+
+}
+
+#endif
diff --git a/include/llvm/IR/Operator.h b/include/llvm/IR/Operator.h
index 5b9bee7..888cabf 100644
--- a/include/llvm/IR/Operator.h
+++ b/include/llvm/IR/Operator.h
@@ -18,9 +18,9 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
namespace llvm {
@@ -210,6 +210,10 @@ public:
setNoSignedZeros();
setAllowReciprocal();
}
+
+ void operator&=(const FastMathFlags &OtherFlags) {
+ Flags &= OtherFlags.Flags;
+ }
};
@@ -473,6 +477,36 @@ public:
};
+class PtrToIntOperator
+ : public ConcreteOperator<Operator, Instruction::PtrToInt> {
+ friend class PtrToInt;
+ friend class ConstantExpr;
+
+public:
+ Value *getPointerOperand() {
+ return getOperand(0);
+ }
+ const Value *getPointerOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getPointerOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ /// getPointerOperandType - Method to return the pointer operand as a
+ /// PointerType.
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
+ }
+
+ /// getPointerAddressSpace - Method to return the address space of the
+ /// pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+ }
+};
+
+
} // End llvm namespace
#endif
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index 833547a..c6c530c 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -35,12 +35,17 @@
///
//===----------------------------------------------------------------------===//
+#ifndef LLVM_IR_PASS_MANAGER_H
+#define LLVM_IR_PASS_MANAGER_H
+
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/polymorphic_ptr.h"
-#include "llvm/Support/type_traits.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/type_traits.h"
#include <list>
+#include <memory>
#include <vector>
namespace llvm {
@@ -48,317 +53,747 @@ namespace llvm {
class Module;
class Function;
+/// \brief An abstract set of preserved analyses following a transformation pass
+/// run.
+///
+/// When a transformation pass is run, it can return a set of analyses whose
+/// results were preserved by that transformation. The default set is "none",
+/// and preserving analyses must be done explicitly.
+///
+/// There is also an explicit all state which can be used (for example) when
+/// the IR is not mutated at all.
+class PreservedAnalyses {
+public:
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PreservedAnalyses() {}
+ PreservedAnalyses(const PreservedAnalyses &Arg)
+ : PreservedPassIDs(Arg.PreservedPassIDs) {}
+ PreservedAnalyses(PreservedAnalyses &&Arg)
+ : PreservedPassIDs(std::move(Arg.PreservedPassIDs)) {}
+ friend void swap(PreservedAnalyses &LHS, PreservedAnalyses &RHS) {
+ using std::swap;
+ swap(LHS.PreservedPassIDs, RHS.PreservedPassIDs);
+ }
+ PreservedAnalyses &operator=(PreservedAnalyses RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Convenience factory function for the empty preserved set.
+ static PreservedAnalyses none() { return PreservedAnalyses(); }
+
+ /// \brief Construct a special preserved set that preserves all passes.
+ static PreservedAnalyses all() {
+ PreservedAnalyses PA;
+ PA.PreservedPassIDs.insert((void *)AllPassesID);
+ return PA;
+ }
+
+ /// \brief Mark a particular pass as preserved, adding it to the set.
+ template <typename PassT> void preserve() {
+ if (!areAllPreserved())
+ PreservedPassIDs.insert(PassT::ID());
+ }
+
+ /// \brief Intersect this set with another in place.
+ ///
+ /// This is a mutating operation on this preserved set, removing all
+ /// preserved passes which are not also preserved in the argument.
+ void intersect(const PreservedAnalyses &Arg) {
+ if (Arg.areAllPreserved())
+ return;
+ if (areAllPreserved()) {
+ PreservedPassIDs = Arg.PreservedPassIDs;
+ return;
+ }
+ for (SmallPtrSet<void *, 2>::const_iterator I = PreservedPassIDs.begin(),
+ E = PreservedPassIDs.end();
+ I != E; ++I)
+ if (!Arg.PreservedPassIDs.count(*I))
+ PreservedPassIDs.erase(*I);
+ }
+
+ /// \brief Intersect this set with a temporary other set in place.
+ ///
+ /// This is a mutating operation on this preserved set, removing all
+ /// preserved passes which are not also preserved in the argument.
+ void intersect(PreservedAnalyses &&Arg) {
+ if (Arg.areAllPreserved())
+ return;
+ if (areAllPreserved()) {
+ PreservedPassIDs = std::move(Arg.PreservedPassIDs);
+ return;
+ }
+ for (SmallPtrSet<void *, 2>::const_iterator I = PreservedPassIDs.begin(),
+ E = PreservedPassIDs.end();
+ I != E; ++I)
+ if (!Arg.PreservedPassIDs.count(*I))
+ PreservedPassIDs.erase(*I);
+ }
+
+ /// \brief Query whether a pass is marked as preserved by this set.
+ template <typename PassT> bool preserved() const {
+ return preserved(PassT::ID());
+ }
+
+ /// \brief Query whether an abstract pass ID is marked as preserved by this
+ /// set.
+ bool preserved(void *PassID) const {
+ return PreservedPassIDs.count((void *)AllPassesID) ||
+ PreservedPassIDs.count(PassID);
+ }
+
+private:
+ // Note that this must not be -1 or -2 as those are already used by the
+ // SmallPtrSet.
+ static const uintptr_t AllPassesID = (intptr_t)(-3);
+
+ bool areAllPreserved() const {
+ return PreservedPassIDs.count((void *)AllPassesID);
+ }
+
+ SmallPtrSet<void *, 2> PreservedPassIDs;
+};
+
/// \brief Implementation details of the pass manager interfaces.
namespace detail {
/// \brief Template for the abstract base class used to dispatch
/// polymorphically over pass objects.
-template <typename T> struct PassConcept {
+template <typename IRUnitT, typename AnalysisManagerT> struct PassConcept {
// Boiler plate necessary for the container of derived classes.
virtual ~PassConcept() {}
- virtual PassConcept *clone() = 0;
/// \brief The polymorphic API which runs the pass over a given IR entity.
- virtual bool run(T Arg) = 0;
+ ///
+ /// Note that actual pass object can omit the analysis manager argument if
+ /// desired. Also that the analysis manager may be null if there is no
+ /// analysis manager in the pass pipeline.
+ virtual PreservedAnalyses run(IRUnitT IR, AnalysisManagerT *AM) = 0;
+
+ /// \brief Polymorphic method to access the name of a pass.
+ virtual StringRef name() = 0;
+};
+
+/// \brief SFINAE metafunction for computing whether \c PassT has a run method
+/// accepting an \c AnalysisManagerT.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT,
+ typename ResultT>
+class PassRunAcceptsAnalysisManager {
+ typedef char SmallType;
+ struct BigType {
+ char a, b;
+ };
+
+ template <typename T, ResultT (T::*)(IRUnitT, AnalysisManagerT *)>
+ struct Checker;
+
+ template <typename T> static SmallType f(Checker<T, &T::run> *);
+ template <typename T> static BigType f(...);
+
+public:
+ enum { Value = sizeof(f<PassT>(0)) == sizeof(SmallType) };
};
/// \brief A template wrapper used to implement the polymorphic API.
///
-/// Can be instantiated for any object which provides a \c run method
-/// accepting a \c T. It requires the pass to be a copyable
-/// object.
-template <typename T, typename PassT> struct PassModel : PassConcept<T> {
- PassModel(PassT Pass) : Pass(llvm_move(Pass)) {}
- virtual PassModel *clone() { return new PassModel(Pass); }
- virtual bool run(T Arg) { return Pass.run(Arg); }
+/// Can be instantiated for any object which provides a \c run method accepting
+/// an \c IRUnitT. It requires the pass to be a copyable object. When the
+/// \c run method also accepts an \c AnalysisManagerT*, we pass it along.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT,
+ bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager<
+ IRUnitT, AnalysisManagerT, PassT, PreservedAnalyses>::Value>
+struct PassModel;
+
+/// \brief Specialization of \c PassModel for passes that accept an analyis
+/// manager.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT>
+struct PassModel<IRUnitT, AnalysisManagerT, PassT, true>
+ : PassConcept<IRUnitT, AnalysisManagerT> {
+ explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
+ PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(PassModel &LHS, PassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ PassModel &operator=(PassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ PreservedAnalyses run(IRUnitT IR, AnalysisManagerT *AM) override {
+ return Pass.run(IR, AM);
+ }
+ StringRef name() override { return PassT::name(); }
PassT Pass;
};
-}
+/// \brief Specialization of \c PassModel for passes that accept an analyis
+/// manager.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT>
+struct PassModel<IRUnitT, AnalysisManagerT, PassT, false>
+ : PassConcept<IRUnitT, AnalysisManagerT> {
+ explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
+ PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(PassModel &LHS, PassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ PassModel &operator=(PassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ PreservedAnalyses run(IRUnitT IR, AnalysisManagerT *AM) override {
+ return Pass.run(IR);
+ }
+ StringRef name() override { return PassT::name(); }
+ PassT Pass;
+};
-class AnalysisManager;
+/// \brief Abstract concept of an analysis result.
+///
+/// This concept is parameterized over the IR unit that this result pertains
+/// to.
+template <typename IRUnitT> struct AnalysisResultConcept {
+ virtual ~AnalysisResultConcept() {}
+
+ /// \brief Method to try and mark a result as invalid.
+ ///
+ /// When the outer analysis manager detects a change in some underlying
+ /// unit of the IR, it will call this method on all of the results cached.
+ ///
+ /// This method also receives a set of preserved analyses which can be used
+ /// to avoid invalidation because the pass which changed the underlying IR
+ /// took care to update or preserve the analysis result in some way.
+ ///
+ /// \returns true if the result is indeed invalid (the default).
+ virtual bool invalidate(IRUnitT IR, const PreservedAnalyses &PA) = 0;
+};
+
+/// \brief SFINAE metafunction for computing whether \c ResultT provides an
+/// \c invalidate member function.
+template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
+ typedef char SmallType;
+ struct BigType {
+ char a, b;
+ };
+
+ template <typename T, bool (T::*)(IRUnitT, const PreservedAnalyses &)>
+ struct Checker;
+
+ template <typename T> static SmallType f(Checker<T, &T::invalidate> *);
+ template <typename T> static BigType f(...);
+
+public:
+ enum { Value = sizeof(f<ResultT>(0)) == sizeof(SmallType) };
+};
+
+/// \brief Wrapper to model the analysis result concept.
+///
+/// By default, this will implement the invalidate method with a trivial
+/// implementation so that the actual analysis result doesn't need to provide
+/// an invalidation handler. It is only selected when the invalidation handler
+/// is not part of the ResultT's interface.
+template <typename IRUnitT, typename PassT, typename ResultT,
+ bool HasInvalidateHandler =
+ ResultHasInvalidateMethod<IRUnitT, ResultT>::Value>
+struct AnalysisResultModel;
+
+/// \brief Specialization of \c AnalysisResultModel which provides the default
+/// invalidate functionality.
+template <typename IRUnitT, typename PassT, typename ResultT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, false>
+ : AnalysisResultConcept<IRUnitT> {
+ explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+ AnalysisResultModel(AnalysisResultModel &&Arg)
+ : Result(std::move(Arg.Result)) {}
+ friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+ using std::swap;
+ swap(LHS.Result, RHS.Result);
+ }
+ AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief The model bases invalidation solely on being in the preserved set.
+ //
+ // FIXME: We should actually use two different concepts for analysis results
+ // rather than two different models, and avoid the indirect function call for
+ // ones that use the trivial behavior.
+ bool invalidate(IRUnitT, const PreservedAnalyses &PA) override {
+ return !PA.preserved(PassT::ID());
+ }
+
+ ResultT Result;
+};
+
+/// \brief Specialization of \c AnalysisResultModel which delegates invalidate
+/// handling to \c ResultT.
+template <typename IRUnitT, typename PassT, typename ResultT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, true>
+ : AnalysisResultConcept<IRUnitT> {
+ explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+ AnalysisResultModel(AnalysisResultModel &&Arg)
+ : Result(std::move(Arg.Result)) {}
+ friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+ using std::swap;
+ swap(LHS.Result, RHS.Result);
+ }
+ AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief The model delegates to the \c ResultT method.
+ bool invalidate(IRUnitT IR, const PreservedAnalyses &PA) override {
+ return Result.invalidate(IR, PA);
+ }
+
+ ResultT Result;
+};
+
+/// \brief Abstract concept of an analysis pass.
+///
+/// This concept is parameterized over the IR unit that it can run over and
+/// produce an analysis result.
+template <typename IRUnitT, typename AnalysisManagerT>
+struct AnalysisPassConcept {
+ virtual ~AnalysisPassConcept() {}
+
+ /// \brief Method to run this analysis over a unit of IR.
+ /// \returns A unique_ptr to the analysis result object to be queried by
+ /// users.
+ virtual std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT IR, AnalysisManagerT *AM) = 0;
+};
+
+/// \brief Wrapper to model the analysis pass concept.
+///
+/// Can wrap any type which implements a suitable \c run method. The method
+/// must accept the IRUnitT as an argument and produce an object which can be
+/// wrapped in a \c AnalysisResultModel.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT,
+ bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager<
+ IRUnitT, AnalysisManagerT, PassT, typename PassT::Result>::Value>
+struct AnalysisPassModel;
+
+/// \brief Specialization of \c AnalysisPassModel which passes an
+/// \c AnalysisManager to PassT's run method.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT>
+struct AnalysisPassModel<IRUnitT, AnalysisManagerT, PassT, true>
+ : AnalysisPassConcept<IRUnitT, AnalysisManagerT> {
+ explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
+ AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ AnalysisPassModel &operator=(AnalysisPassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ // FIXME: Replace PassT::Result with type traits when we use C++11.
+ typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+
+ /// \brief The model delegates to the \c PassT::run method.
+ ///
+ /// The return is wrapped in an \c AnalysisResultModel.
+ std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT IR, AnalysisManagerT *AM) override {
+ return make_unique<ResultModelT>(Pass.run(IR, AM));
+ }
+
+ PassT Pass;
+};
+
+/// \brief Specialization of \c AnalysisPassModel which does not pass an
+/// \c AnalysisManager to PassT's run method.
+template <typename IRUnitT, typename AnalysisManagerT, typename PassT>
+struct AnalysisPassModel<IRUnitT, AnalysisManagerT, PassT, false>
+ : AnalysisPassConcept<IRUnitT, AnalysisManagerT> {
+ explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
+ AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ AnalysisPassModel &operator=(AnalysisPassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ // FIXME: Replace PassT::Result with type traits when we use C++11.
+ typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+
+ /// \brief The model delegates to the \c PassT::run method.
+ ///
+ /// The return is wrapped in an \c AnalysisResultModel.
+ std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT IR, AnalysisManagerT *) override {
+ return make_unique<ResultModelT>(Pass.run(IR));
+ }
+
+ PassT Pass;
+};
+
+} // End namespace detail
+
+class ModuleAnalysisManager;
class ModulePassManager {
public:
- ModulePassManager(Module *M, AnalysisManager *AM = 0) : M(M), AM(AM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModulePassManager() {}
+ ModulePassManager(ModulePassManager &&Arg) : Passes(std::move(Arg.Passes)) {}
+ ModulePassManager &operator=(ModulePassManager &&RHS) {
+ Passes = std::move(RHS.Passes);
+ return *this;
+ }
+
+ /// \brief Run all of the module passes in this module pass manager over
+ /// a module.
+ ///
+ /// This method should only be called for a single module as there is the
+ /// expectation that the lifetime of a pass is bounded to that of a module.
+ PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM = 0);
template <typename ModulePassT> void addPass(ModulePassT Pass) {
- Passes.push_back(new ModulePassModel<ModulePassT>(llvm_move(Pass)));
+ Passes.emplace_back(new ModulePassModel<ModulePassT>(std::move(Pass)));
}
- void run();
+ static StringRef name() { return "ModulePassManager"; }
private:
// Pull in the concept type and model template specialized for modules.
- typedef detail::PassConcept<Module *> ModulePassConcept;
+ typedef detail::PassConcept<Module *, ModuleAnalysisManager>
+ ModulePassConcept;
template <typename PassT>
- struct ModulePassModel : detail::PassModel<Module *, PassT> {
- ModulePassModel(PassT Pass) : detail::PassModel<Module *, PassT>(Pass) {}
+ struct ModulePassModel
+ : detail::PassModel<Module *, ModuleAnalysisManager, PassT> {
+ ModulePassModel(PassT Pass)
+ : detail::PassModel<Module *, ModuleAnalysisManager, PassT>(
+ std::move(Pass)) {}
};
- Module *M;
- AnalysisManager *AM;
- std::vector<polymorphic_ptr<ModulePassConcept> > Passes;
+ ModulePassManager(const ModulePassManager &) LLVM_DELETED_FUNCTION;
+ ModulePassManager &operator=(const ModulePassManager &) LLVM_DELETED_FUNCTION;
+
+ std::vector<std::unique_ptr<ModulePassConcept>> Passes;
};
+class FunctionAnalysisManager;
+
class FunctionPassManager {
public:
- FunctionPassManager(AnalysisManager *AM = 0) : AM(AM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionPassManager() {}
+ FunctionPassManager(FunctionPassManager &&Arg)
+ : Passes(std::move(Arg.Passes)) {}
+ FunctionPassManager &operator=(FunctionPassManager &&RHS) {
+ Passes = std::move(RHS.Passes);
+ return *this;
+ }
template <typename FunctionPassT> void addPass(FunctionPassT Pass) {
- Passes.push_back(new FunctionPassModel<FunctionPassT>(llvm_move(Pass)));
+ Passes.emplace_back(new FunctionPassModel<FunctionPassT>(std::move(Pass)));
}
- bool run(Module *M);
+ PreservedAnalyses run(Function *F, FunctionAnalysisManager *AM = 0);
+
+ static StringRef name() { return "FunctionPassManager"; }
private:
// Pull in the concept type and model template specialized for functions.
- typedef detail::PassConcept<Function *> FunctionPassConcept;
+ typedef detail::PassConcept<Function *, FunctionAnalysisManager>
+ FunctionPassConcept;
template <typename PassT>
- struct FunctionPassModel : detail::PassModel<Function *, PassT> {
+ struct FunctionPassModel
+ : detail::PassModel<Function *, FunctionAnalysisManager, PassT> {
FunctionPassModel(PassT Pass)
- : detail::PassModel<Function *, PassT>(Pass) {}
+ : detail::PassModel<Function *, FunctionAnalysisManager, PassT>(
+ std::move(Pass)) {}
};
- AnalysisManager *AM;
- std::vector<polymorphic_ptr<FunctionPassConcept> > Passes;
+ FunctionPassManager(const FunctionPassManager &) LLVM_DELETED_FUNCTION;
+ FunctionPassManager &
+ operator=(const FunctionPassManager &) LLVM_DELETED_FUNCTION;
+
+ std::vector<std::unique_ptr<FunctionPassConcept>> Passes;
};
+namespace detail {
-/// \brief An analysis manager to coordinate and cache analyses run over
-/// a module.
+/// \brief A CRTP base used to implement analysis managers.
///
-/// The analysis manager is typically used by passes in a pass pipeline
-/// (consisting potentially of several individual pass managers) over a module
-/// of IR. It provides registration of available analyses, declaring
-/// requirements on support for specific analyses, running of an specific
-/// analysis over a specific unit of IR to compute an analysis result, and
-/// caching of the analysis results to reuse them across multiple passes.
+/// This class template serves as the boiler plate of an analysis manager. Any
+/// analysis manager can be implemented on top of this base class. Any
+/// implementation will be required to provide specific hooks:
///
-/// It is the responsibility of callers to use the invalidation API to
-/// invalidate analysis results when the IR they correspond to changes. The
-/// \c ModulePassManager and \c FunctionPassManager do this automatically.
-class AnalysisManager {
-public:
- AnalysisManager(Module *M) : M(M) {}
+/// - getResultImpl
+/// - getCachedResultImpl
+/// - invalidateImpl
+///
+/// The details of the call pattern are within.
+template <typename DerivedT, typename IRUnitT> class AnalysisManagerBase {
+ DerivedT *derived_this() { return static_cast<DerivedT *>(this); }
+ const DerivedT *derived_this() const {
+ return static_cast<const DerivedT *>(this);
+ }
+
+ AnalysisManagerBase(const AnalysisManagerBase &) LLVM_DELETED_FUNCTION;
+ AnalysisManagerBase &
+ operator=(const AnalysisManagerBase &) LLVM_DELETED_FUNCTION;
+
+protected:
+ typedef detail::AnalysisResultConcept<IRUnitT> ResultConceptT;
+ typedef detail::AnalysisPassConcept<IRUnitT, DerivedT> PassConceptT;
+
+ // FIXME: Provide template aliases for the models when we're using C++11 in
+ // a mode supporting them.
+
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisManagerBase() {}
+ AnalysisManagerBase(AnalysisManagerBase &&Arg)
+ : AnalysisPasses(std::move(Arg.AnalysisPasses)) {}
+ AnalysisManagerBase &operator=(AnalysisManagerBase &&RHS) {
+ AnalysisPasses = std::move(RHS.AnalysisPasses);
+ return *this;
+ }
+public:
/// \brief Get the result of an analysis pass for this module.
///
/// If there is not a valid cached result in the manager already, this will
/// re-run the analysis to produce a valid result.
- ///
- /// The module passed in must be the same module as the analysis manager was
- /// constructed around.
- template <typename PassT>
- const typename PassT::Result &getResult(Module *M) {
- assert(ModuleAnalysisPasses.count(PassT::ID()) &&
+ template <typename PassT> typename PassT::Result &getResult(IRUnitT IR) {
+ assert(AnalysisPasses.count(PassT::ID()) &&
"This analysis pass was not registered prior to being queried");
- const AnalysisResultConcept<Module> &ResultConcept =
- getResultImpl(PassT::ID(), M);
- typedef AnalysisResultModel<Module, typename PassT::Result> ResultModelT;
- return static_cast<const ResultModelT &>(ResultConcept).Result;
+ ResultConceptT &ResultConcept =
+ derived_this()->getResultImpl(PassT::ID(), IR);
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+ return static_cast<ResultModelT &>(ResultConcept).Result;
}
- /// \brief Get the result of an analysis pass for a function.
+ /// \brief Get the cached result of an analysis pass for this module.
///
- /// If there is not a valid cached result in the manager already, this will
- /// re-run the analysis to produce a valid result.
+ /// This method never runs the analysis.
+ ///
+ /// \returns null if there is no cached result.
template <typename PassT>
- const typename PassT::Result &getResult(Function *F) {
- assert(FunctionAnalysisPasses.count(PassT::ID()) &&
+ typename PassT::Result *getCachedResult(IRUnitT IR) const {
+ assert(AnalysisPasses.count(PassT::ID()) &&
"This analysis pass was not registered prior to being queried");
- const AnalysisResultConcept<Function> &ResultConcept =
- getResultImpl(PassT::ID(), F);
- typedef AnalysisResultModel<Function, typename PassT::Result> ResultModelT;
- return static_cast<const ResultModelT &>(ResultConcept).Result;
+ ResultConceptT *ResultConcept =
+ derived_this()->getCachedResultImpl(PassT::ID(), IR);
+ if (!ResultConcept)
+ return 0;
+
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+ return &static_cast<ResultModelT *>(ResultConcept)->Result;
}
/// \brief Register an analysis pass with the manager.
///
- /// This provides an initialized and set-up analysis pass to the
- /// analysis
- /// manager. Whomever is setting up analysis passes must use this to
- /// populate
+ /// This provides an initialized and set-up analysis pass to the analysis
+ /// manager. Whomever is setting up analysis passes must use this to populate
/// the manager with all of the analysis passes available.
- template <typename PassT> void registerAnalysisPass(PassT Pass) {
- registerAnalysisPassImpl<PassT>(llvm_move(Pass));
+ template <typename PassT> void registerPass(PassT Pass) {
+ assert(!AnalysisPasses.count(PassT::ID()) &&
+ "Registered the same analysis pass twice!");
+ typedef detail::AnalysisPassModel<IRUnitT, DerivedT, PassT> PassModelT;
+ AnalysisPasses[PassT::ID()].reset(new PassModelT(std::move(Pass)));
}
/// \brief Invalidate a specific analysis pass for an IR module.
///
/// Note that the analysis result can disregard invalidation.
template <typename PassT> void invalidate(Module *M) {
- invalidateImpl(PassT::ID(), M);
+ assert(AnalysisPasses.count(PassT::ID()) &&
+ "This analysis pass was not registered prior to being invalidated");
+ derived_this()->invalidateImpl(PassT::ID(), M);
}
- /// \brief Invalidate a specific analysis pass for an IR function.
+ /// \brief Invalidate analyses cached for an IR unit.
///
- /// Note that the analysis result can disregard invalidation.
- template <typename PassT> void invalidate(Function *F) {
- invalidateImpl(PassT::ID(), F);
+ /// Walk through all of the analyses pertaining to this unit of IR and
+ /// invalidate them unless they are preserved by the PreservedAnalyses set.
+ void invalidate(IRUnitT IR, const PreservedAnalyses &PA) {
+ derived_this()->invalidateImpl(IR, PA);
}
- /// \brief Invalidate analyses cached for an IR Module.
- ///
- /// Note that specific analysis results can disregard invalidation by
- /// overriding their invalidate method.
- ///
- /// The module must be the module this analysis manager was constructed
- /// around.
- void invalidateAll(Module *M);
+protected:
+ /// \brief Lookup a registered analysis pass.
+ PassConceptT &lookupPass(void *PassID) {
+ typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(PassID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
+ }
- /// \brief Invalidate analyses cached for an IR Function.
- ///
- /// Note that specific analysis results can disregard invalidation by
- /// overriding the invalidate method.
- void invalidateAll(Function *F);
+ /// \brief Lookup a registered analysis pass.
+ const PassConceptT &lookupPass(void *PassID) const {
+ typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(PassID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
+ }
private:
- /// \brief Abstract concept of an analysis result.
- ///
- /// This concept is parameterized over the IR unit that this result pertains
- /// to.
- template <typename IRUnitT> struct AnalysisResultConcept {
- virtual ~AnalysisResultConcept() {}
- virtual AnalysisResultConcept *clone() = 0;
-
- /// \brief Method to try and mark a result as invalid.
- ///
- /// When the outer \c AnalysisManager detects a change in some underlying
- /// unit of the IR, it will call this method on all of the results cached.
- ///
- /// \returns true if the result should indeed be invalidated (the default).
- virtual bool invalidate(IRUnitT *IR) = 0;
- };
-
- /// \brief Wrapper to model the analysis result concept.
- ///
- /// Can wrap any type which implements a suitable invalidate member and model
- /// the AnalysisResultConcept for the AnalysisManager.
- template <typename IRUnitT, typename ResultT>
- struct AnalysisResultModel : AnalysisResultConcept<IRUnitT> {
- AnalysisResultModel(ResultT Result) : Result(llvm_move(Result)) {}
- virtual AnalysisResultModel *clone() {
- return new AnalysisResultModel(Result);
- }
-
- /// \brief The model delegates to the \c ResultT method.
- virtual bool invalidate(IRUnitT *IR) { return Result.invalidate(IR); }
-
- ResultT Result;
- };
-
- /// \brief Abstract concept of an analysis pass.
- ///
- /// This concept is parameterized over the IR unit that it can run over and
- /// produce an analysis result.
- template <typename IRUnitT> struct AnalysisPassConcept {
- virtual ~AnalysisPassConcept() {}
- virtual AnalysisPassConcept *clone() = 0;
-
- /// \brief Method to run this analysis over a unit of IR.
- /// \returns The analysis result object to be queried by users, the caller
- /// takes ownership.
- virtual AnalysisResultConcept<IRUnitT> *run(IRUnitT *IR) = 0;
- };
-
- /// \brief Wrapper to model the analysis pass concept.
- ///
- /// Can wrap any type which implements a suitable \c run method. The method
- /// must accept the IRUnitT as an argument and produce an object which can be
- /// wrapped in a \c AnalysisResultModel.
- template <typename PassT>
- struct AnalysisPassModel : AnalysisPassConcept<typename PassT::IRUnitT> {
- AnalysisPassModel(PassT Pass) : Pass(llvm_move(Pass)) {}
- virtual AnalysisPassModel *clone() { return new AnalysisPassModel(Pass); }
+ /// \brief Map type from module analysis pass ID to pass concept pointer.
+ typedef DenseMap<void *, std::unique_ptr<PassConceptT>> AnalysisPassMapT;
- // FIXME: Replace PassT::IRUnitT with type traits when we use C++11.
- typedef typename PassT::IRUnitT IRUnitT;
+ /// \brief Collection of module analysis passes, indexed by ID.
+ AnalysisPassMapT AnalysisPasses;
+};
- // FIXME: Replace PassT::Result with type traits when we use C++11.
- typedef AnalysisResultModel<IRUnitT, typename PassT::Result> ResultModelT;
+} // End namespace detail
- /// \brief The model delegates to the \c PassT::run method.
- ///
- /// The return is wrapped in an \c AnalysisResultModel.
- virtual ResultModelT *run(IRUnitT *IR) {
- return new ResultModelT(Pass.run(IR));
- }
+/// \brief A module analysis pass manager with lazy running and caching of
+/// results.
+class ModuleAnalysisManager
+ : public detail::AnalysisManagerBase<ModuleAnalysisManager, Module *> {
+ friend class detail::AnalysisManagerBase<ModuleAnalysisManager, Module *>;
+ typedef detail::AnalysisManagerBase<ModuleAnalysisManager, Module *> BaseT;
+ typedef BaseT::ResultConceptT ResultConceptT;
+ typedef BaseT::PassConceptT PassConceptT;
- PassT Pass;
- };
+public:
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleAnalysisManager() {}
+ ModuleAnalysisManager(ModuleAnalysisManager &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))),
+ ModuleAnalysisResults(std::move(Arg.ModuleAnalysisResults)) {}
+ ModuleAnalysisManager &operator=(ModuleAnalysisManager &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ ModuleAnalysisResults = std::move(RHS.ModuleAnalysisResults);
+ return *this;
+ }
+private:
+ ModuleAnalysisManager(const ModuleAnalysisManager &) LLVM_DELETED_FUNCTION;
+ ModuleAnalysisManager &
+ operator=(const ModuleAnalysisManager &) LLVM_DELETED_FUNCTION;
/// \brief Get a module pass result, running the pass if necessary.
- const AnalysisResultConcept<Module> &getResultImpl(void *PassID, Module *M);
+ ResultConceptT &getResultImpl(void *PassID, Module *M);
- /// \brief Get a function pass result, running the pass if necessary.
- const AnalysisResultConcept<Function> &getResultImpl(void *PassID,
- Function *F);
+ /// \brief Get a cached module pass result or return null.
+ ResultConceptT *getCachedResultImpl(void *PassID, Module *M) const;
/// \brief Invalidate a module pass result.
void invalidateImpl(void *PassID, Module *M);
- /// \brief Invalidate a function pass result.
- void invalidateImpl(void *PassID, Function *F);
+ /// \brief Invalidate results across a module.
+ void invalidateImpl(Module *M, const PreservedAnalyses &PA);
+ /// \brief Map type from module analysis pass ID to pass result concept
+ /// pointer.
+ typedef DenseMap<void *,
+ std::unique_ptr<detail::AnalysisResultConcept<Module *>>>
+ ModuleAnalysisResultMapT;
- /// \brief Module pass specific implementation of registration.
- template <typename PassT>
- typename enable_if<is_same<typename PassT::IRUnitT, Module> >::type
- registerAnalysisPassImpl(PassT Pass) {
- assert(!ModuleAnalysisPasses.count(PassT::ID()) &&
- "Registered the same analysis pass twice!");
- ModuleAnalysisPasses[PassT::ID()] =
- new AnalysisPassModel<PassT>(llvm_move(Pass));
- }
+ /// \brief Cache of computed module analysis results for this module.
+ ModuleAnalysisResultMapT ModuleAnalysisResults;
+};
- /// \brief Function pass specific implementation of registration.
- template <typename PassT>
- typename enable_if<is_same<typename PassT::IRUnitT, Function> >::type
- registerAnalysisPassImpl(PassT Pass) {
- assert(!FunctionAnalysisPasses.count(PassT::ID()) &&
- "Registered the same analysis pass twice!");
- FunctionAnalysisPasses[PassT::ID()] =
- new AnalysisPassModel<PassT>(llvm_move(Pass));
- }
+/// \brief A function analysis manager to coordinate and cache analyses run over
+/// a module.
+class FunctionAnalysisManager
+ : public detail::AnalysisManagerBase<FunctionAnalysisManager, Function *> {
+ friend class detail::AnalysisManagerBase<FunctionAnalysisManager, Function *>;
+ typedef detail::AnalysisManagerBase<FunctionAnalysisManager, Function *>
+ BaseT;
+ typedef BaseT::ResultConceptT ResultConceptT;
+ typedef BaseT::PassConceptT PassConceptT;
+public:
+ // Most public APIs are inherited from the CRTP base class.
+
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionAnalysisManager() {}
+ FunctionAnalysisManager(FunctionAnalysisManager &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))),
+ FunctionAnalysisResults(std::move(Arg.FunctionAnalysisResults)) {}
+ FunctionAnalysisManager &operator=(FunctionAnalysisManager &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ FunctionAnalysisResults = std::move(RHS.FunctionAnalysisResults);
+ return *this;
+ }
- /// \brief Map type from module analysis pass ID to pass concept pointer.
- typedef DenseMap<void *, polymorphic_ptr<AnalysisPassConcept<Module> > >
- ModuleAnalysisPassMapT;
+ /// \brief Returns true if the analysis manager has an empty results cache.
+ bool empty() const;
- /// \brief Collection of module analysis passes, indexed by ID.
- ModuleAnalysisPassMapT ModuleAnalysisPasses;
+ /// \brief Clear the function analysis result cache.
+ ///
+ /// This routine allows cleaning up when the set of functions itself has
+ /// potentially changed, and thus we can't even look up a a result and
+ /// invalidate it directly. Notably, this does *not* call invalidate
+ /// functions as there is nothing to be done for them.
+ void clear();
- /// \brief Map type from module analysis pass ID to pass result concept pointer.
- typedef DenseMap<void *, polymorphic_ptr<AnalysisResultConcept<Module> > >
- ModuleAnalysisResultMapT;
+private:
+ FunctionAnalysisManager(const FunctionAnalysisManager &)
+ LLVM_DELETED_FUNCTION;
+ FunctionAnalysisManager &
+ operator=(const FunctionAnalysisManager &) LLVM_DELETED_FUNCTION;
- /// \brief Cache of computed module analysis results for this module.
- ModuleAnalysisResultMapT ModuleAnalysisResults;
+ /// \brief Get a function pass result, running the pass if necessary.
+ ResultConceptT &getResultImpl(void *PassID, Function *F);
+ /// \brief Get a cached function pass result or return null.
+ ResultConceptT *getCachedResultImpl(void *PassID, Function *F) const;
- /// \brief Map type from function analysis pass ID to pass concept pointer.
- typedef DenseMap<void *, polymorphic_ptr<AnalysisPassConcept<Function> > >
- FunctionAnalysisPassMapT;
+ /// \brief Invalidate a function pass result.
+ void invalidateImpl(void *PassID, Function *F);
- /// \brief Collection of function analysis passes, indexed by ID.
- FunctionAnalysisPassMapT FunctionAnalysisPasses;
+ /// \brief Invalidate the results for a function..
+ void invalidateImpl(Function *F, const PreservedAnalyses &PA);
/// \brief List of function analysis pass IDs and associated concept pointers.
///
/// Requires iterators to be valid across appending new entries and arbitrary
/// erases. Provides both the pass ID and concept pointer such that it is
/// half of a bijection and provides storage for the actual result concept.
- typedef std::list<
- std::pair<void *, polymorphic_ptr<AnalysisResultConcept<Function> > > >
- FunctionAnalysisResultListT;
+ typedef std::list<std::pair<
+ void *, std::unique_ptr<detail::AnalysisResultConcept<Function *>>>>
+ FunctionAnalysisResultListT;
/// \brief Map type from function pointer to our custom list type.
- typedef DenseMap<Function *, FunctionAnalysisResultListT> FunctionAnalysisResultListMapT;
+ typedef DenseMap<Function *, FunctionAnalysisResultListT>
+ FunctionAnalysisResultListMapT;
/// \brief Map from function to a list of function analysis results.
///
@@ -370,14 +805,230 @@ private:
/// iterator into a particular result list.
typedef DenseMap<std::pair<void *, Function *>,
FunctionAnalysisResultListT::iterator>
- FunctionAnalysisResultMapT;
+ FunctionAnalysisResultMapT;
/// \brief Map from an analysis ID and function to a particular cached
/// analysis result.
FunctionAnalysisResultMapT FunctionAnalysisResults;
+};
+
+/// \brief A module analysis which acts as a proxy for a function analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the module analysis
+/// manager and module pass manager to a function analysis manager. You should
+/// never use a function analysis manager from within (transitively) a module
+/// pass manager unless your parent module pass has received a proxy result
+/// object for it.
+class FunctionAnalysisManagerModuleProxy {
+public:
+ class Result;
+
+ static void *ID() { return (void *)&PassID; }
+
+ explicit FunctionAnalysisManagerModuleProxy(FunctionAnalysisManager &FAM)
+ : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionAnalysisManagerModuleProxy(
+ const FunctionAnalysisManagerModuleProxy &Arg)
+ : FAM(Arg.FAM) {}
+ FunctionAnalysisManagerModuleProxy(FunctionAnalysisManagerModuleProxy &&Arg)
+ : FAM(std::move(Arg.FAM)) {}
+ FunctionAnalysisManagerModuleProxy &
+ operator=(FunctionAnalysisManagerModuleProxy RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the function analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the function analysis manager prior to
+ /// this analysis being requested.
+ Result run(Module *M);
+
+private:
+ static char PassID;
+
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief The result proxy object for the
+/// \c FunctionAnalysisManagerModuleProxy.
+///
+/// See its documentation for more information.
+class FunctionAnalysisManagerModuleProxy::Result {
+public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ Result(const Result &Arg) : FAM(Arg.FAM) {}
+ Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c FunctionAnalysisManager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ /// \brief Handler for invalidation of the module.
+ ///
+ /// If this analysis itself is preserved, then we assume that the set of \c
+ /// Function objects in the \c Module hasn't changed and thus we don't need
+ /// to invalidate *all* cached data associated with a \c Function* in the \c
+ /// FunctionAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c FunctionAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(Module *M, const PreservedAnalyses &PA);
+
+private:
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief A function analysis which acts as a proxy for a module analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent module analysis manager to
+/// function passes. Only the const interface of the module analysis manager is
+/// provided to indicate that once inside of a function analysis pass you
+/// cannot request a module analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class ModuleAnalysisManagerFunctionProxy {
+public:
+ /// \brief Result proxy object for \c ModuleAnalysisManagerFunctionProxy.
+ class Result {
+ public:
+ explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : MAM(Arg.MAM) {}
+ Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ const ModuleAnalysisManager &getManager() const { return *MAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(Function *) { return false; }
+
+ private:
+ const ModuleAnalysisManager *MAM;
+ };
- /// \brief Module handle for the \c AnalysisManager.
- Module *M;
+ static void *ID() { return (void *)&PassID; }
+
+ ModuleAnalysisManagerFunctionProxy(const ModuleAnalysisManager &MAM)
+ : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleAnalysisManagerFunctionProxy(
+ const ModuleAnalysisManagerFunctionProxy &Arg)
+ : MAM(Arg.MAM) {}
+ ModuleAnalysisManagerFunctionProxy(ModuleAnalysisManagerFunctionProxy &&Arg)
+ : MAM(std::move(Arg.MAM)) {}
+ ModuleAnalysisManagerFunctionProxy &
+ operator=(ModuleAnalysisManagerFunctionProxy RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c MAM reference into the
+ /// result.
+ Result run(Function *) { return Result(*MAM); }
+
+private:
+ static char PassID;
+
+ const ModuleAnalysisManager *MAM;
};
+/// \brief Trivial adaptor that maps from a module to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a ModulePassManager. Note that if this pass is constructed with a pointer
+/// to a \c ModuleAnalysisManager it will run the
+/// \c FunctionAnalysisManagerModuleProxy analysis prior to running the function
+/// pass over the module to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename FunctionPassT> class ModuleToFunctionPassAdaptor {
+public:
+ explicit ModuleToFunctionPassAdaptor(FunctionPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleToFunctionPassAdaptor(const ModuleToFunctionPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ ModuleToFunctionPassAdaptor(ModuleToFunctionPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(ModuleToFunctionPassAdaptor &LHS, ModuleToFunctionPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ ModuleToFunctionPassAdaptor &operator=(ModuleToFunctionPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the function pass across every function in the module.
+ PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM) {
+ FunctionAnalysisManager *FAM = 0;
+ if (AM)
+ // Setup the function analysis manager from its proxy.
+ FAM = &AM->getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) {
+ PreservedAnalyses PassPA = Pass.run(I, FAM);
+
+ // We know that the function pass couldn't have invalidated any other
+ // function's analyses (that's the contract of a function pass), so
+ // directly handle the function analysis manager's invalidation here.
+ if (FAM)
+ FAM->invalidate(I, PassPA);
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of function analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the function analysis manager
+ // incrementally above.
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "ModuleToFunctionPassAdaptor"; }
+
+private:
+ FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+ModuleToFunctionPassAdaptor<FunctionPassT>
+createModuleToFunctionPassAdaptor(FunctionPassT Pass) {
+ return std::move(ModuleToFunctionPassAdaptor<FunctionPassT>(std::move(Pass)));
}
+
+}
+
+#endif
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
new file mode 100644
index 0000000..2efb294
--- /dev/null
+++ b/include/llvm/IR/PatternMatch.h
@@ -0,0 +1,1211 @@
+//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a simple and efficient mechanism for performing general
+// tree-based pattern matches on the LLVM IR. The power of these routines is
+// that it allows you to write concise patterns that are expressive and easy to
+// understand. The other major advantage of this is that it allows you to
+// trivially capture/bind elements in the pattern to variables. For example,
+// you can do something like this:
+//
+// Value *Exp = ...
+// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
+// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
+// m_And(m_Value(Y), m_ConstantInt(C2))))) {
+// ... Pattern is matched and variables are bound ...
+// }
+//
+// This is primarily useful to things like the instruction combiner, but can
+// also be useful for static analysis tools or code generators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PATTERNMATCH_H
+#define LLVM_IR_PATTERNMATCH_H
+
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+namespace PatternMatch {
+
+template<typename Val, typename Pattern>
+bool match(Val *V, const Pattern &P) {
+ return const_cast<Pattern&>(P).match(V);
+}
+
+
+template<typename SubPattern_t>
+struct OneUse_match {
+ SubPattern_t SubPattern;
+
+ OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ return V->hasOneUse() && SubPattern.match(V);
+ }
+};
+
+template<typename T>
+inline OneUse_match<T> m_OneUse(const T &SubPattern) { return SubPattern; }
+
+
+template<typename Class>
+struct class_match {
+ template<typename ITy>
+ bool match(ITy *V) { return isa<Class>(V); }
+};
+
+/// m_Value() - Match an arbitrary value and ignore it.
+inline class_match<Value> m_Value() { return class_match<Value>(); }
+/// m_ConstantInt() - Match an arbitrary ConstantInt and ignore it.
+inline class_match<ConstantInt> m_ConstantInt() {
+ return class_match<ConstantInt>();
+}
+/// m_Undef() - Match an arbitrary undef constant.
+inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
+
+inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
+
+/// Matching combinators
+template<typename LTy, typename RTy>
+struct match_combine_or {
+ LTy L;
+ RTy R;
+
+ match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) { }
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (L.match(V))
+ return true;
+ if (R.match(V))
+ return true;
+ return false;
+ }
+};
+
+template<typename LTy, typename RTy>
+struct match_combine_and {
+ LTy L;
+ RTy R;
+
+ match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) { }
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (L.match(V))
+ if (R.match(V))
+ return true;
+ return false;
+ }
+};
+
+/// Combine two pattern matchers matching L || R
+template<typename LTy, typename RTy>
+inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
+ return match_combine_or<LTy, RTy>(L, R);
+}
+
+/// Combine two pattern matchers matching L && R
+template<typename LTy, typename RTy>
+inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
+ return match_combine_and<LTy, RTy>(L, R);
+}
+
+struct match_zero {
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const Constant *C = dyn_cast<Constant>(V))
+ return C->isNullValue();
+ return false;
+ }
+};
+
+/// m_Zero() - Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers.
+inline match_zero m_Zero() { return match_zero(); }
+
+struct match_neg_zero {
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const Constant *C = dyn_cast<Constant>(V))
+ return C->isNegativeZeroValue();
+ return false;
+ }
+};
+
+/// m_NegZero() - Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers. For
+/// floating point constants, this will match negative zero but not positive
+/// zero
+inline match_neg_zero m_NegZero() { return match_neg_zero(); }
+
+/// m_AnyZero() - Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers. For
+/// floating point constants, this will match negative zero and positive zero
+inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() {
+ return m_CombineOr(m_Zero(), m_NegZero());
+}
+
+struct apint_match {
+ const APInt *&Res;
+ apint_match(const APInt *&R) : Res(R) {}
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (V->getType()->isVectorTy())
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ return false;
+ }
+};
+
+/// m_APInt - Match a ConstantInt or splatted ConstantVector, binding the
+/// specified pointer to the contained APInt.
+inline apint_match m_APInt(const APInt *&Res) { return Res; }
+
+
+template<int64_t Val>
+struct constantint_match {
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ const APInt &CIV = CI->getValue();
+ if (Val >= 0)
+ return CIV == static_cast<uint64_t>(Val);
+ // If Val is negative, and CI is shorter than it, truncate to the right
+ // number of bits. If it is larger, then we have to sign extend. Just
+ // compare their negated values.
+ return -CIV == -Val;
+ }
+ return false;
+ }
+};
+
+/// m_ConstantInt<int64_t> - Match a ConstantInt with a specific value.
+template<int64_t Val>
+inline constantint_match<Val> m_ConstantInt() {
+ return constantint_match<Val>();
+}
+
+/// cst_pred_ty - This helper class is used to match scalar and vector constants
+/// that satisfy a specified predicate.
+template<typename Predicate>
+struct cst_pred_ty : public Predicate {
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ return this->isValue(CI->getValue());
+ if (V->getType()->isVectorTy())
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (const ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+ return this->isValue(CI->getValue());
+ return false;
+ }
+};
+
+/// api_pred_ty - This helper class is used to match scalar and vector constants
+/// that satisfy a specified predicate, and bind them to an APInt.
+template<typename Predicate>
+struct api_pred_ty : public Predicate {
+ const APInt *&Res;
+ api_pred_ty(const APInt *&R) : Res(R) {}
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (V->getType()->isVectorTy())
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+
+ return false;
+ }
+};
+
+
+struct is_one {
+ bool isValue(const APInt &C) { return C == 1; }
+};
+
+/// m_One() - Match an integer 1 or a vector with all elements equal to 1.
+inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }
+inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; }
+
+struct is_all_ones {
+ bool isValue(const APInt &C) { return C.isAllOnesValue(); }
+};
+
+/// m_AllOnes() - Match an integer or vector with all bits set to true.
+inline cst_pred_ty<is_all_ones> m_AllOnes() {return cst_pred_ty<is_all_ones>();}
+inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
+
+struct is_sign_bit {
+ bool isValue(const APInt &C) { return C.isSignBit(); }
+};
+
+/// m_SignBit() - Match an integer or vector with only the sign bit(s) set.
+inline cst_pred_ty<is_sign_bit> m_SignBit() {return cst_pred_ty<is_sign_bit>();}
+inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; }
+
+struct is_power2 {
+ bool isValue(const APInt &C) { return C.isPowerOf2(); }
+};
+
+/// m_Power2() - Match an integer or vector power of 2.
+inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
+inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }
+
+template<typename Class>
+struct bind_ty {
+ Class *&VR;
+ bind_ty(Class *&V) : VR(V) {}
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (Class *CV = dyn_cast<Class>(V)) {
+ VR = CV;
+ return true;
+ }
+ return false;
+ }
+};
+
+/// m_Value - Match a value, capturing it if we match.
+inline bind_ty<Value> m_Value(Value *&V) { return V; }
+
+/// m_ConstantInt - Match a ConstantInt, capturing the value if we match.
+inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
+
+/// m_Constant - Match a Constant, capturing the value if we match.
+inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
+
+/// m_ConstantFP - Match a ConstantFP, capturing the value if we match.
+inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
+
+/// specificval_ty - Match a specified Value*.
+struct specificval_ty {
+ const Value *Val;
+ specificval_ty(const Value *V) : Val(V) {}
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ return V == Val;
+ }
+};
+
+/// m_Specific - Match if we have a specific specified value.
+inline specificval_ty m_Specific(const Value *V) { return V; }
+
+/// Match a specified floating point value or vector of all elements of that
+/// value.
+struct specific_fpval {
+ double Val;
+ specific_fpval(double V) : Val(V) {}
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
+ return CFP->isExactlyValue(Val);
+ if (V->getType()->isVectorTy())
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
+ return CFP->isExactlyValue(Val);
+ return false;
+ }
+};
+
+/// Match a specific floating point value or vector with all elements equal to
+/// the value.
+inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
+
+/// Match a float 1.0 or vector with all elements equal to 1.0.
+inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
+
+struct bind_const_intval_ty {
+ uint64_t &VR;
+ bind_const_intval_ty(uint64_t &V) : VR(V) {}
+
+ template<typename ITy>
+ bool match(ITy *V) {
+ if (ConstantInt *CV = dyn_cast<ConstantInt>(V))
+ if (CV->getBitWidth() <= 64) {
+ VR = CV->getZExtValue();
+ return true;
+ }
+ return false;
+ }
+};
+
+/// m_ConstantInt - Match a ConstantInt and bind to its value. This does not
+/// match ConstantInts wider than 64-bits.
+inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
+
+//===----------------------------------------------------------------------===//
+// Matchers for specific binary operators.
+//
+
+template<typename LHS_t, typename RHS_t, unsigned Opcode>
+struct BinaryOp_match {
+ LHS_t L;
+ RHS_t R;
+
+ BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opcode) {
+ BinaryOperator *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
+ }
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) &&
+ R.match(CE->getOperand(1));
+ return false;
+ }
+};
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Add>
+m_Add(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FAdd>
+m_FAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Sub>
+m_Sub(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FSub>
+m_FSub(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Mul>
+m_Mul(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FMul>
+m_FMul(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::UDiv>
+m_UDiv(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SDiv>
+m_SDiv(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FDiv>
+m_FDiv(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::URem>
+m_URem(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SRem>
+m_SRem(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FRem>
+m_FRem(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::And>
+m_And(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Or>
+m_Or(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Xor>
+m_Xor(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Shl>
+m_Shl(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::LShr>
+m_LShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::AShr>
+m_AShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
+}
+
+template<typename LHS_t, typename RHS_t, unsigned Opcode, unsigned WrapFlags = 0>
+struct OverflowingBinaryOp_match {
+ LHS_t L;
+ RHS_t R;
+
+ OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (OverflowingBinaryOperator *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
+ if (Op->getOpcode() != Opcode)
+ return false;
+ if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
+ !Op->hasNoUnsignedWrap())
+ return false;
+ if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
+ !Op->hasNoSignedWrap())
+ return false;
+ return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
+ }
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWAdd(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWSub(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWMul(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWShl(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWAdd(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWSub(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWMul(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWShl(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches two different binary ops.
+//
+template<typename LHS_t, typename RHS_t, unsigned Opc1, unsigned Opc2>
+struct BinOp2_match {
+ LHS_t L;
+ RHS_t R;
+
+ BinOp2_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opc1 ||
+ V->getValueID() == Value::InstructionVal + Opc2) {
+ BinaryOperator *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
+ }
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ return (CE->getOpcode() == Opc1 || CE->getOpcode() == Opc2) &&
+ L.match(CE->getOperand(0)) && R.match(CE->getOperand(1));
+ return false;
+ }
+};
+
+/// m_Shr - Matches LShr or AShr.
+template<typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>
+m_Shr(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>(L, R);
+}
+
+/// m_LogicalShift - Matches LShr or Shl.
+template<typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>
+m_LogicalShift(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>(L, R);
+}
+
+/// m_IDiv - Matches UDiv and SDiv.
+template<typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>
+m_IDiv(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches exact binary ops.
+//
+template<typename SubPattern_t>
+struct Exact_match {
+ SubPattern_t SubPattern;
+
+ Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V))
+ return PEO->isExact() && SubPattern.match(V);
+ return false;
+ }
+};
+
+template<typename T>
+inline Exact_match<T> m_Exact(const T &SubPattern) { return SubPattern; }
+
+//===----------------------------------------------------------------------===//
+// Matchers for CmpInst classes
+//
+
+template<typename LHS_t, typename RHS_t, typename Class, typename PredicateTy>
+struct CmpClass_match {
+ PredicateTy &Predicate;
+ LHS_t L;
+ RHS_t R;
+
+ CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
+ : Predicate(Pred), L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (Class *I = dyn_cast<Class>(V))
+ if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
+ Predicate = I->getPredicate();
+ return true;
+ }
+ return false;
+ }
+};
+
+template<typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
+m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+ return CmpClass_match<LHS, RHS,
+ ICmpInst, ICmpInst::Predicate>(Pred, L, R);
+}
+
+template<typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
+m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+ return CmpClass_match<LHS, RHS,
+ FCmpInst, FCmpInst::Predicate>(Pred, L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for SelectInst classes
+//
+
+template<typename Cond_t, typename LHS_t, typename RHS_t>
+struct SelectClass_match {
+ Cond_t C;
+ LHS_t L;
+ RHS_t R;
+
+ SelectClass_match(const Cond_t &Cond, const LHS_t &LHS,
+ const RHS_t &RHS)
+ : C(Cond), L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (SelectInst *I = dyn_cast<SelectInst>(V))
+ return C.match(I->getOperand(0)) &&
+ L.match(I->getOperand(1)) &&
+ R.match(I->getOperand(2));
+ return false;
+ }
+};
+
+template<typename Cond, typename LHS, typename RHS>
+inline SelectClass_match<Cond, LHS, RHS>
+m_Select(const Cond &C, const LHS &L, const RHS &R) {
+ return SelectClass_match<Cond, LHS, RHS>(C, L, R);
+}
+
+/// m_SelectCst - This matches a select of two constants, e.g.:
+/// m_SelectCst<-1, 0>(m_Value(V))
+template<int64_t L, int64_t R, typename Cond>
+inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R> >
+m_SelectCst(const Cond &C) {
+ return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
+}
+
+
+//===----------------------------------------------------------------------===//
+// Matchers for CastInst classes
+//
+
+template<typename Op_t, unsigned Opcode>
+struct CastClass_match {
+ Op_t Op;
+
+ CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (Operator *O = dyn_cast<Operator>(V))
+ return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
+ return false;
+ }
+};
+
+/// m_BitCast
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::BitCast>
+m_BitCast(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::BitCast>(Op);
+}
+
+/// m_PtrToInt
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::PtrToInt>
+m_PtrToInt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
+}
+
+/// m_Trunc
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::Trunc>
+m_Trunc(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::Trunc>(Op);
+}
+
+/// m_SExt
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::SExt>
+m_SExt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::SExt>(Op);
+}
+
+/// m_ZExt
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::ZExt>
+m_ZExt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::ZExt>(Op);
+}
+
+/// m_UIToFP
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::UIToFP>
+m_UIToFP(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::UIToFP>(Op);
+}
+
+/// m_SIToFP
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::SIToFP>
+m_SIToFP(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::SIToFP>(Op);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for unary operators
+//
+
+template<typename LHS_t>
+struct not_match {
+ LHS_t L;
+
+ not_match(const LHS_t &LHS) : L(LHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Xor)
+ return matchIfNot(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+private:
+ bool matchIfNot(Value *LHS, Value *RHS) {
+ return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) ||
+ // FIXME: Remove CV.
+ isa<ConstantVector>(RHS)) &&
+ cast<Constant>(RHS)->isAllOnesValue() &&
+ L.match(LHS);
+ }
+};
+
+template<typename LHS>
+inline not_match<LHS> m_Not(const LHS &L) { return L; }
+
+
+template<typename LHS_t>
+struct neg_match {
+ LHS_t L;
+
+ neg_match(const LHS_t &LHS) : L(LHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Sub)
+ return matchIfNeg(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+private:
+ bool matchIfNeg(Value *LHS, Value *RHS) {
+ return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
+ isa<ConstantAggregateZero>(LHS)) &&
+ L.match(RHS);
+ }
+};
+
+/// m_Neg - Match an integer negate.
+template<typename LHS>
+inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
+
+
+template<typename LHS_t>
+struct fneg_match {
+ LHS_t L;
+
+ fneg_match(const LHS_t &LHS) : L(LHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::FSub)
+ return matchIfFNeg(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+private:
+ bool matchIfFNeg(Value *LHS, Value *RHS) {
+ if (ConstantFP *C = dyn_cast<ConstantFP>(LHS))
+ return C->isNegativeZeroValue() && L.match(RHS);
+ return false;
+ }
+};
+
+/// m_FNeg - Match a floating point negate.
+template<typename LHS>
+inline fneg_match<LHS> m_FNeg(const LHS &L) { return L; }
+
+
+//===----------------------------------------------------------------------===//
+// Matchers for control flow.
+//
+
+struct br_match {
+ BasicBlock *&Succ;
+ br_match(BasicBlock *&Succ)
+ : Succ(Succ) {
+ }
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (BranchInst *BI = dyn_cast<BranchInst>(V))
+ if (BI->isUnconditional()) {
+ Succ = BI->getSuccessor(0);
+ return true;
+ }
+ return false;
+ }
+};
+
+inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
+
+template<typename Cond_t>
+struct brc_match {
+ Cond_t Cond;
+ BasicBlock *&T, *&F;
+ brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
+ : Cond(C), T(t), F(f) {
+ }
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (BranchInst *BI = dyn_cast<BranchInst>(V))
+ if (BI->isConditional() && Cond.match(BI->getCondition())) {
+ T = BI->getSuccessor(0);
+ F = BI->getSuccessor(1);
+ return true;
+ }
+ return false;
+ }
+};
+
+template<typename Cond_t>
+inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
+ return brc_match<Cond_t>(C, T, F);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
+//
+
+template<typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t>
+struct MaxMin_match {
+ LHS_t L;
+ RHS_t R;
+
+ MaxMin_match(const LHS_t &LHS, const RHS_t &RHS)
+ : L(LHS), R(RHS) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
+ SelectInst *SI = dyn_cast<SelectInst>(V);
+ if (!SI)
+ return false;
+ CmpInst_t *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
+ if (!Cmp)
+ return false;
+ // At this point we have a select conditioned on a comparison. Check that
+ // it is the values returned by the select that are being compared.
+ Value *TrueVal = SI->getTrueValue();
+ Value *FalseVal = SI->getFalseValue();
+ Value *LHS = Cmp->getOperand(0);
+ Value *RHS = Cmp->getOperand(1);
+ if ((TrueVal != LHS || FalseVal != RHS) &&
+ (TrueVal != RHS || FalseVal != LHS))
+ return false;
+ typename CmpInst_t::Predicate Pred = LHS == TrueVal ?
+ Cmp->getPredicate() : Cmp->getSwappedPredicate();
+ // Does "(x pred y) ? x : y" represent the desired max/min operation?
+ if (!Pred_t::match(Pred))
+ return false;
+ // It does! Bind the operands.
+ return L.match(LHS) && R.match(RHS);
+ }
+};
+
+/// smax_pred_ty - Helper class for identifying signed max predicates.
+struct smax_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
+ }
+};
+
+/// smin_pred_ty - Helper class for identifying signed min predicates.
+struct smin_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
+ }
+};
+
+/// umax_pred_ty - Helper class for identifying unsigned max predicates.
+struct umax_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
+ }
+};
+
+/// umin_pred_ty - Helper class for identifying unsigned min predicates.
+struct umin_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
+ }
+};
+
+/// ofmax_pred_ty - Helper class for identifying ordered max predicates.
+struct ofmax_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
+ }
+};
+
+/// ofmin_pred_ty - Helper class for identifying ordered min predicates.
+struct ofmin_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
+ }
+};
+
+/// ufmax_pred_ty - Helper class for identifying unordered max predicates.
+struct ufmax_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
+ }
+};
+
+/// ufmin_pred_ty - Helper class for identifying unordered min predicates.
+struct ufmin_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
+ }
+};
+
+template<typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>
+m_SMax(const LHS &L, const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>
+m_SMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>
+m_UMax(const LHS &L, const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
+}
+
+template<typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>
+m_UMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
+}
+
+/// \brief Match an 'ordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_OrdFMax(L, R) = R iff L or R are NaN
+template<typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>
+m_OrdFMax(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
+}
+
+/// \brief Match an 'ordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_OrdFMin(L, R) = R iff L or R are NaN
+template<typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>
+m_OrdFMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
+}
+
+/// \brief Match an 'unordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_UnordFMin(L, R) = L iff L or R are NaN
+template<typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
+m_UnordFMax(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
+}
+
+/// \brief Match an 'unordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_UnordFMin(L, R) = L iff L or R are NaN
+template<typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
+m_UnordFMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
+}
+
+template<typename Opnd_t>
+struct Argument_match {
+ unsigned OpI;
+ Opnd_t Val;
+ Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) { }
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ CallSite CS(V);
+ return CS.isCall() && Val.match(CS.getArgument(OpI));
+ }
+};
+
+/// Match an argument
+template<unsigned OpI, typename Opnd_t>
+inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
+ return Argument_match<Opnd_t>(OpI, Op);
+}
+
+/// Intrinsic matchers.
+struct IntrinsicID_match {
+ unsigned ID;
+ IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) { }
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(V);
+ return II && II->getIntrinsicID() == ID;
+ }
+};
+
+/// Intrinsic matches are combinations of ID matchers, and argument
+/// matchers. Higher arity matcher are defined recursively in terms of and-ing
+/// them with lower arity matchers. Here's some convenient typedefs for up to
+/// several arguments, and more can be added as needed
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void, typename T10 = void> struct m_Intrinsic_Ty;
+template <typename T0>
+struct m_Intrinsic_Ty<T0> {
+ typedef match_combine_and<IntrinsicID_match, Argument_match<T0> > Ty;
+};
+template <typename T0, typename T1>
+struct m_Intrinsic_Ty<T0, T1> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0>::Ty,
+ Argument_match<T1> > Ty;
+};
+template <typename T0, typename T1, typename T2>
+struct m_Intrinsic_Ty<T0, T1, T2> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
+ Argument_match<T2> > Ty;
+};
+template <typename T0, typename T1, typename T2, typename T3>
+struct m_Intrinsic_Ty<T0, T1, T2, T3> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
+ Argument_match<T3> > Ty;
+};
+
+/// Match intrinsic calls like this:
+/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
+template <Intrinsic::ID IntrID>
+inline IntrinsicID_match
+m_Intrinsic() { return IntrinsicID_match(IntrID); }
+
+template<Intrinsic::ID IntrID, typename T0>
+inline typename m_Intrinsic_Ty<T0>::Ty
+m_Intrinsic(const T0 &Op0) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
+}
+
+template<Intrinsic::ID IntrID, typename T0, typename T1>
+inline typename m_Intrinsic_Ty<T0, T1>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
+}
+
+template<Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
+inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
+}
+
+template<Intrinsic::ID IntrID, typename T0, typename T1, typename T2, typename T3>
+inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
+}
+
+// Helper intrinsic matching specializations
+template<typename Opnd0>
+inline typename m_Intrinsic_Ty<Opnd0>::Ty
+m_BSwap(const Opnd0 &Op0) {
+ return m_Intrinsic<Intrinsic::bswap>(Op0);
+}
+
+} // end namespace PatternMatch
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/PredIteratorCache.h b/include/llvm/IR/PredIteratorCache.h
new file mode 100644
index 0000000..bf18dfe
--- /dev/null
+++ b/include/llvm/IR/PredIteratorCache.h
@@ -0,0 +1,70 @@
+//===- PredIteratorCache.h - pred_iterator Cache ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PredIteratorCache class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/Support/Allocator.h"
+
+#ifndef LLVM_IR_PREDITERATORCACHE_H
+#define LLVM_IR_PREDITERATORCACHE_H
+
+namespace llvm {
+
+ /// PredIteratorCache - This class is an extremely trivial cache for
+ /// predecessor iterator queries. This is useful for code that repeatedly
+ /// wants the predecessor list for the same blocks.
+ class PredIteratorCache {
+ /// BlockToPredsMap - Pointer to null-terminated list.
+ DenseMap<BasicBlock*, BasicBlock**> BlockToPredsMap;
+ DenseMap<BasicBlock*, unsigned> BlockToPredCountMap;
+
+ /// Memory - This is the space that holds cached preds.
+ BumpPtrAllocator Memory;
+ public:
+
+ /// GetPreds - Get a cached list for the null-terminated predecessor list of
+ /// the specified block. This can be used in a loop like this:
+ /// for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI)
+ /// use(*PI);
+ /// instead of:
+ /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ BasicBlock **GetPreds(BasicBlock *BB) {
+ BasicBlock **&Entry = BlockToPredsMap[BB];
+ if (Entry) return Entry;
+
+ SmallVector<BasicBlock*, 32> PredCache(pred_begin(BB), pred_end(BB));
+ PredCache.push_back(0); // null terminator.
+
+ BlockToPredCountMap[BB] = PredCache.size()-1;
+
+ Entry = Memory.Allocate<BasicBlock*>(PredCache.size());
+ std::copy(PredCache.begin(), PredCache.end(), Entry);
+ return Entry;
+ }
+
+ unsigned GetNumPreds(BasicBlock *BB) {
+ GetPreds(BB);
+ return BlockToPredCountMap[BB];
+ }
+
+ /// clear - Remove all information.
+ void clear() {
+ BlockToPredsMap.clear();
+ BlockToPredCountMap.clear();
+ Memory.Reset();
+ }
+ };
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index 3cfb84e..742a0d3 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -15,12 +15,13 @@
#ifndef LLVM_IR_TYPE_H
#define LLVM_IR_TYPE_H
+#include "llvm-c/Core.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/Support/Casting.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm-c/Core.h"
namespace llvm {
@@ -70,11 +71,7 @@ public:
StructTyID, ///< 12: Structures
ArrayTyID, ///< 13: Arrays
PointerTyID, ///< 14: Pointers
- VectorTyID, ///< 15: SIMD 'packed' format, or other vector type
-
- NumTypeIDs, // Must remain as last defined ID
- LastPrimitiveTyID = X86_MMXTyID,
- FirstDerivedTyID = IntegerTyID
+ VectorTyID ///< 15: SIMD 'packed' format, or other vector type
};
private:
@@ -239,12 +236,6 @@ public:
/// elements or all its elements are empty.
bool isEmptyTy() const;
- /// Here are some useful little methods to query what type derived types are
- /// Note that all other types can just compare to see if this == Type::xxxTy;
- ///
- bool isPrimitiveType() const { return getTypeID() <= LastPrimitiveTyID; }
- bool isDerivedType() const { return getTypeID() >= FirstDerivedTyID; }
-
/// isFirstClassType - Return true if the type is "first class", meaning it
/// is a valid type for a Value.
///
@@ -257,9 +248,8 @@ public:
/// and array types.
///
bool isSingleValueType() const {
- return (getTypeID() != VoidTyID && isPrimitiveType()) ||
- getTypeID() == IntegerTyID || getTypeID() == PointerTyID ||
- getTypeID() == VectorTyID;
+ return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
+ isPointerTy() || isVectorTy();
}
/// isAggregateType - Return true if the type is an aggregate type. This
@@ -275,7 +265,7 @@ public:
/// get the actual size for a particular target, it is reasonable to use the
/// DataLayout subsystem to do this.
///
- bool isSized() const {
+ bool isSized(SmallPtrSet<const Type*, 4> *Visited = 0) const {
// If it's a primitive, it is always sized.
if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
getTypeID() == PointerTyID ||
@@ -287,7 +277,7 @@ public:
getTypeID() != VectorTyID)
return false;
// Otherwise we have to try harder to decide.
- return isSizedDerivedType();
+ return isSizedDerivedType(Visited);
}
/// getPrimitiveSizeInBits - Return the basic size of this type if it is a
@@ -300,12 +290,12 @@ public:
/// instance of the type is stored to memory. The DataLayout class provides
/// additional query functions to provide this information.
///
- unsigned getPrimitiveSizeInBits() const;
+ unsigned getPrimitiveSizeInBits() const LLVM_READONLY;
/// getScalarSizeInBits - If this is a vector type, return the
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
/// getPrimitiveSizeInBits value for this type.
- unsigned getScalarSizeInBits();
+ unsigned getScalarSizeInBits() const LLVM_READONLY;
/// getFPMantissaWidth - Return the width of the mantissa of this type. This
/// is only valid on floating point types. If the FP type does not
@@ -314,8 +304,8 @@ public:
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return 'this'.
- const Type *getScalarType() const;
- Type *getScalarType();
+ const Type *getScalarType() const LLVM_READONLY;
+ Type *getScalarType() LLVM_READONLY;
//===--------------------------------------------------------------------===//
// Type Iteration support.
@@ -429,7 +419,7 @@ private:
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
- bool isSizedDerivedType() const;
+ bool isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited = 0) const;
};
// Printing of types.
diff --git a/include/llvm/IR/Use.h b/include/llvm/IR/Use.h
index 12cd150..340572a 100644
--- a/include/llvm/IR/Use.h
+++ b/include/llvm/IR/Use.h
@@ -6,29 +6,29 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This defines the Use class. The Use class represents the operand of an
-// instruction or some other User instance which refers to a Value. The Use
-// class keeps the "use list" of the referenced value up to date.
-//
-// Pointer tagging is used to efficiently find the User corresponding
-// to a Use without having to store a User pointer in every Use. A
-// User is preceded in memory by all the Uses corresponding to its
-// operands, and the low bits of one of the fields (Prev) of the Use
-// class are used to encode offsets to be able to find that User given
-// a pointer to any Use. For details, see:
-//
-// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
-//
+/// \file
+///
+/// This defines the Use class. The Use class represents the operand of an
+/// instruction or some other User instance which refers to a Value. The Use
+/// class keeps the "use list" of the referenced value up to date.
+///
+/// Pointer tagging is used to efficiently find the User corresponding to a Use
+/// without having to store a User pointer in every Use. A User is preceded in
+/// memory by all the Uses corresponding to its operands, and the low bits of
+/// one of the fields (Prev) of the Use class are used to encode offsets to be
+/// able to find that User given a pointer to any Use. For details, see:
+///
+/// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_USE_H
#define LLVM_IR_USE_H
+#include "llvm-c/Core.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Compiler.h"
-#include "llvm-c/Core.h"
#include <cstddef>
#include <iterator>
@@ -37,64 +37,67 @@ namespace llvm {
class Value;
class User;
class Use;
-template<typename>
-struct simplify_type;
+template <typename> struct simplify_type;
// Use** is only 4-byte aligned.
-template<>
-class PointerLikeTypeTraits<Use**> {
+template <> class PointerLikeTypeTraits<Use **> {
public:
- static inline void *getAsVoidPointer(Use** P) { return P; }
+ static inline void *getAsVoidPointer(Use **P) { return P; }
static inline Use **getFromVoidPointer(void *P) {
- return static_cast<Use**>(P);
+ return static_cast<Use **>(P);
}
enum { NumLowBitsAvailable = 2 };
};
-//===----------------------------------------------------------------------===//
-// Use Class
-//===----------------------------------------------------------------------===//
-
-/// Use is here to make keeping the "use" list of a Value up-to-date really
-/// easy.
+/// \brief A Use represents the edge between a Value definition and its users.
+///
+/// This is notionally a two-dimensional linked list. It supports traversing
+/// all of the uses for a particular value definition. It also supports jumping
+/// directly to the used value when we arrive from the User's operands, and
+/// jumping directly to the User when we arrive from the Value's uses.
+///
+/// The pointer to the used Value is explicit, and the pointer to the User is
+/// implicit. The implicit pointer is found via a waymarking algorithm
+/// described in the programmer's manual:
+///
+/// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+///
+/// This is essentially the single most memory intensive object in LLVM because
+/// of the number of uses in the system. At the same time, the constant time
+/// operations it allows are essential to many optimizations having reasonable
+/// time complexity.
class Use {
public:
- /// swap - provide a fast substitute to std::swap<Use>
+ /// \brief Provide a fast substitute to std::swap<Use>
/// that also works with less standard-compliant compilers
void swap(Use &RHS);
// A type for the word following an array of hung-off Uses in memory, which is
// a pointer back to their User with the bottom bit set.
- typedef PointerIntPair<User*, 1, unsigned> UserRef;
+ typedef PointerIntPair<User *, 1, unsigned> UserRef;
private:
Use(const Use &U) LLVM_DELETED_FUNCTION;
/// Destructor - Only for zap()
~Use() {
- if (Val) removeFromList();
+ if (Val)
+ removeFromList();
}
- enum PrevPtrTag { zeroDigitTag
- , oneDigitTag
- , stopTag
- , fullStopTag };
+ enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag };
/// Constructor
- Use(PrevPtrTag tag) : Val(0) {
- Prev.setInt(tag);
- }
+ Use(PrevPtrTag tag) : Val(0) { Prev.setInt(tag); }
public:
- /// Normally Use will just implicitly convert to a Value* that it holds.
- operator Value*() const { return Val; }
-
- /// If implicit conversion to Value* doesn't work, the get() method returns
- /// the Value*.
+ operator Value *() const { return Val; }
Value *get() const { return Val; }
-
- /// getUser - This returns the User that contains this Use. For an
- /// instruction operand, for example, this will return the instruction.
+
+ /// \brief Returns the User that contains this Use.
+ ///
+ /// For an instruction operand, for example, this will return the
+ /// instruction.
User *getUser() const;
inline void set(Value *Val);
@@ -108,116 +111,63 @@ public:
return *this;
}
- Value *operator->() { return Val; }
+ Value *operator->() { return Val; }
const Value *operator->() const { return Val; }
Use *getNext() const { return Next; }
-
- /// initTags - initialize the waymarking tags on an array of Uses, so that
- /// getUser() can find the User from any of those Uses.
+ /// \brief Return the operand # of this use in its User.
+ unsigned getOperandNo() const;
+
+ /// \brief Initializes the waymarking tags on an array of Uses.
+ ///
+ /// This sets up the array of Uses such that getUser() can find the User from
+ /// any of those Uses.
static Use *initTags(Use *Start, Use *Stop);
- /// zap - This is used to destroy Use operands when the number of operands of
+ /// \brief Destroys Use operands when the number of operands of
/// a User changes.
static void zap(Use *Start, const Use *Stop, bool del = false);
private:
- const Use* getImpliedUser() const;
-
+ const Use *getImpliedUser() const;
+
Value *Val;
Use *Next;
- PointerIntPair<Use**, 2, PrevPtrTag> Prev;
+ PointerIntPair<Use **, 2, PrevPtrTag> Prev;
- void setPrev(Use **NewPrev) {
- Prev.setPointer(NewPrev);
- }
+ void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
void addToList(Use **List) {
Next = *List;
- if (Next) Next->setPrev(&Next);
+ if (Next)
+ Next->setPrev(&Next);
setPrev(List);
*List = this;
}
void removeFromList() {
Use **StrippedPrev = Prev.getPointer();
*StrippedPrev = Next;
- if (Next) Next->setPrev(StrippedPrev);
+ if (Next)
+ Next->setPrev(StrippedPrev);
}
friend class Value;
};
-// simplify_type - Allow clients to treat uses just like values when using
-// casting operators.
-template<> struct simplify_type<Use> {
- typedef Value* SimpleType;
- static SimpleType getSimplifiedValue(Use &Val) {
- return Val.get();
- }
+/// \brief Allow clients to treat uses just like values when using
+/// casting operators.
+template <> struct simplify_type<Use> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(Use &Val) { return Val.get(); }
};
-template<> struct simplify_type<const Use> {
- typedef /*const*/ Value* SimpleType;
- static SimpleType getSimplifiedValue(const Use &Val) {
- return Val.get();
- }
-};
-
-
-
-template<typename UserTy> // UserTy == 'User' or 'const User'
-class value_use_iterator : public std::iterator<std::forward_iterator_tag,
- UserTy*, ptrdiff_t> {
- typedef std::iterator<std::forward_iterator_tag, UserTy*, ptrdiff_t> super;
- typedef value_use_iterator<UserTy> _Self;
-
- Use *U;
- explicit value_use_iterator(Use *u) : U(u) {}
- friend class Value;
-public:
- typedef typename super::reference reference;
- typedef typename super::pointer pointer;
-
- value_use_iterator() {}
-
- bool operator==(const _Self &x) const {
- return U == x.U;
- }
- bool operator!=(const _Self &x) const {
- return !operator==(x);
- }
-
- /// atEnd - return true if this iterator is equal to use_end() on the value.
- bool atEnd() const { return U == 0; }
-
- // Iterator traversal: forward iteration only
- _Self &operator++() { // Preincrement
- assert(U && "Cannot increment end iterator!");
- U = U->getNext();
- return *this;
- }
- _Self operator++(int) { // Postincrement
- _Self tmp = *this; ++*this; return tmp;
- }
-
- // Retrieve a pointer to the current User.
- UserTy *operator*() const {
- assert(U && "Cannot dereference end iterator!");
- return U->getUser();
- }
-
- UserTy *operator->() const { return operator*(); }
-
- Use &getUse() const { return *U; }
-
- /// getOperandNo - Return the operand # of this use in its User. Defined in
- /// User.h
- ///
- unsigned getOperandNo() const;
+template <> struct simplify_type<const Use> {
+ typedef /*const*/ Value *SimpleType;
+ static SimpleType getSimplifiedValue(const Use &Val) { return Val.get(); }
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef)
-} // End llvm namespace
+}
#endif
diff --git a/include/llvm/IR/User.h b/include/llvm/IR/User.h
index 505bdeb..061bc91 100644
--- a/include/llvm/IR/User.h
+++ b/include/llvm/IR/User.h
@@ -19,6 +19,7 @@
#ifndef LLVM_IR_USER_H
#define LLVM_IR_USER_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
@@ -112,11 +113,19 @@ public:
//
typedef Use* op_iterator;
typedef const Use* const_op_iterator;
+ typedef iterator_range<op_iterator> op_range;
+ typedef iterator_range<const_op_iterator> const_op_range;
inline op_iterator op_begin() { return OperandList; }
inline const_op_iterator op_begin() const { return OperandList; }
inline op_iterator op_end() { return OperandList+NumOperands; }
inline const_op_iterator op_end() const { return OperandList+NumOperands; }
+ inline op_range operands() {
+ return op_range(op_begin(), op_end());
+ }
+ inline const_op_range operands() const {
+ return const_op_range(op_begin(), op_end());
+ }
/// Convenience iterator for directly iterating over the Values in the
/// OperandList
@@ -156,6 +165,9 @@ public:
inline value_op_iterator value_op_end() {
return value_op_iterator(op_end());
}
+ inline iterator_range<value_op_iterator> operand_values() {
+ return iterator_range<value_op_iterator>(value_op_begin(), value_op_end());
+ }
// dropAllReferences() - This function is in charge of "letting go" of all
// objects that this User refers to. This allows one to
@@ -166,8 +178,8 @@ public:
// delete.
//
void dropAllReferences() {
- for (op_iterator i = op_begin(), e = op_end(); i != e; ++i)
- i->set(0);
+ for (Use &U : operands())
+ U.set(0);
}
/// replaceUsesOfWith - Replaces all references to the "From" definition with
@@ -194,12 +206,6 @@ template<> struct simplify_type<User::const_op_iterator> {
}
};
-// value_use_iterator::getOperandNo - Requires the definition of the User class.
-template<typename UserTy>
-unsigned value_use_iterator<UserTy>::getOperandNo() const {
- return U - U->getUser()->op_begin();
-}
-
} // End llvm namespace
#endif
diff --git a/include/llvm/IR/Value.h b/include/llvm/IR/Value.h
index e1361fe..d5b9f11 100644
--- a/include/llvm/IR/Value.h
+++ b/include/llvm/IR/Value.h
@@ -14,11 +14,12 @@
#ifndef LLVM_IR_VALUE_H
#define LLVM_IR_VALUE_H
+#include "llvm-c/Core.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Use.h"
-#include "llvm/Support/Casting.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
-#include "llvm-c/Core.h"
namespace llvm {
@@ -36,6 +37,7 @@ class InlineAsm;
class Instruction;
class LLVMContext;
class MDNode;
+class Module;
class StringRef;
class Twine;
class Type;
@@ -60,7 +62,7 @@ typedef StringMapEntry<Value*> ValueName;
/// Every value has a "use list" that keeps track of which other Values are
/// using this Value. A Value can also have an arbitrary number of ValueHandle
/// objects that watch it and listen to RAUW and Destroy events. See
-/// llvm/Support/ValueHandle.h for details.
+/// llvm/IR/ValueHandle.h for details.
///
/// @brief LLVM Value Representation
class Value {
@@ -74,6 +76,96 @@ protected:
unsigned char SubclassOptionalData : 7;
private:
+ template <typename UseT> // UseT == 'Use' or 'const Use'
+ class use_iterator_impl
+ : public std::iterator<std::forward_iterator_tag, UseT *, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag, UseT *, ptrdiff_t> super;
+
+ UseT *U;
+ explicit use_iterator_impl(UseT *u) : U(u) {}
+ friend class Value;
+
+ public:
+ typedef typename super::reference reference;
+ typedef typename super::pointer pointer;
+
+ use_iterator_impl() : U() {}
+
+ bool operator==(const use_iterator_impl &x) const { return U == x.U; }
+ bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
+
+ use_iterator_impl &operator++() { // Preincrement
+ assert(U && "Cannot increment end iterator!");
+ U = U->getNext();
+ return *this;
+ }
+ use_iterator_impl operator++(int) { // Postincrement
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ UseT &operator*() const {
+ assert(U && "Cannot dereference end iterator!");
+ return *U;
+ }
+
+ UseT *operator->() const { return &operator*(); }
+
+ operator use_iterator_impl<const UseT>() const {
+ return use_iterator_impl<const UseT>(U);
+ }
+ };
+
+ template <typename UserTy> // UserTy == 'User' or 'const User'
+ class user_iterator_impl
+ : public std::iterator<std::forward_iterator_tag, UserTy *, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag, UserTy *, ptrdiff_t> super;
+
+ use_iterator_impl<Use> UI;
+ explicit user_iterator_impl(Use *U) : UI(U) {}
+ friend class Value;
+
+ public:
+ typedef typename super::reference reference;
+ typedef typename super::pointer pointer;
+
+ user_iterator_impl() {}
+
+ bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
+ bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
+
+ /// \brief Returns true if this iterator is equal to user_end() on the value.
+ bool atEnd() const { return *this == user_iterator_impl(); }
+
+ user_iterator_impl &operator++() { // Preincrement
+ ++UI;
+ return *this;
+ }
+ user_iterator_impl operator++(int) { // Postincrement
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // Retrieve a pointer to the current User.
+ UserTy *operator*() const {
+ return UI->getUser();
+ }
+
+ UserTy *operator->() const { return operator*(); }
+
+ operator user_iterator_impl<const UserTy>() const {
+ return user_iterator_impl<const UserTy>(*UI);
+ }
+
+ Use &getUse() const { return *UI; }
+
+ /// \brief Return the operand # of this use in its User.
+ /// FIXME: Replace all callers with a direct call to Use::getOperandNo.
+ unsigned getOperandNo() const { return UI->getOperandNo(); }
+ };
+
/// SubclassData - This member is defined by this class, but is not used for
/// anything. Subclasses can use it to hold whatever state they find useful.
/// This field is initialized to zero by the ctor.
@@ -106,6 +198,13 @@ public:
///
void print(raw_ostream &O, AssemblyAnnotationWriter *AAW = 0) const;
+ /// \brief Print the name of this Value out to the specified raw_ostream.
+ /// This is useful when you just want to print 'int %reg126', not the
+ /// instruction that generated it. If you specify a Module for context, then
+ /// even constanst get pretty-printed; for example, the type of a null
+ /// pointer is printed symbolically.
+ void printAsOperand(raw_ostream &O, bool PrintType = true, const Module *M = 0) const;
+
/// All values are typed, get the type of this value.
///
Type *getType() const { return VTy; }
@@ -143,16 +242,35 @@ public:
//----------------------------------------------------------------------
// Methods for handling the chain of uses of this Value.
//
- typedef value_use_iterator<User> use_iterator;
- typedef value_use_iterator<const User> const_use_iterator;
-
bool use_empty() const { return UseList == 0; }
+
+ typedef use_iterator_impl<Use> use_iterator;
+ typedef use_iterator_impl<const Use> const_use_iterator;
use_iterator use_begin() { return use_iterator(UseList); }
const_use_iterator use_begin() const { return const_use_iterator(UseList); }
- use_iterator use_end() { return use_iterator(0); }
- const_use_iterator use_end() const { return const_use_iterator(0); }
- User *use_back() { return *use_begin(); }
- const User *use_back() const { return *use_begin(); }
+ use_iterator use_end() { return use_iterator(); }
+ const_use_iterator use_end() const { return const_use_iterator(); }
+ iterator_range<use_iterator> uses() {
+ return iterator_range<use_iterator>(use_begin(), use_end());
+ }
+ iterator_range<const_use_iterator> uses() const {
+ return iterator_range<const_use_iterator>(use_begin(), use_end());
+ }
+
+ typedef user_iterator_impl<User> user_iterator;
+ typedef user_iterator_impl<const User> const_user_iterator;
+ user_iterator user_begin() { return user_iterator(UseList); }
+ const_user_iterator user_begin() const { return const_user_iterator(UseList); }
+ user_iterator user_end() { return user_iterator(); }
+ const_user_iterator user_end() const { return const_user_iterator(); }
+ User *user_back() { return *user_begin(); }
+ const User *user_back() const { return *user_begin(); }
+ iterator_range<user_iterator> users() {
+ return iterator_range<user_iterator>(user_begin(), user_end());
+ }
+ iterator_range<const_user_iterator> users() const {
+ return iterator_range<const_user_iterator>(user_begin(), user_end());
+ }
/// hasOneUse - Return true if there is exactly one user of this value. This
/// is specialized because it is a common request and does not require
diff --git a/include/llvm/IR/ValueHandle.h b/include/llvm/IR/ValueHandle.h
new file mode 100644
index 0000000..9b5e11a
--- /dev/null
+++ b/include/llvm/IR/ValueHandle.h
@@ -0,0 +1,380 @@
+//===- ValueHandle.h - Value Smart Pointer classes --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ValueHandle class and its sub-classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEHANDLE_H
+#define LLVM_IR_VALUEHANDLE_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+class ValueHandleBase;
+template<typename From> struct simplify_type;
+
+// ValueHandleBase** is only 4-byte aligned.
+template<>
+class PointerLikeTypeTraits<ValueHandleBase**> {
+public:
+ static inline void *getAsVoidPointer(ValueHandleBase** P) { return P; }
+ static inline ValueHandleBase **getFromVoidPointer(void *P) {
+ return static_cast<ValueHandleBase**>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+
+/// ValueHandleBase - This is the common base class of value handles.
+/// ValueHandle's are smart pointers to Value's that have special behavior when
+/// the value is deleted or ReplaceAllUsesWith'd. See the specific handles
+/// below for details.
+///
+class ValueHandleBase {
+ friend class Value;
+protected:
+ /// HandleBaseKind - This indicates what sub class the handle actually is.
+ /// This is to avoid having a vtable for the light-weight handle pointers. The
+ /// fully general Callback version does have a vtable.
+ enum HandleBaseKind {
+ Assert,
+ Callback,
+ Tracking,
+ Weak
+ };
+
+private:
+ PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
+ ValueHandleBase *Next;
+
+ // A subclass may want to store some information along with the value
+ // pointer. Allow them to do this by making the value pointer a pointer-int
+ // pair. The 'setValPtrInt' and 'getValPtrInt' methods below give them this
+ // access.
+ PointerIntPair<Value*, 2> VP;
+
+ ValueHandleBase(const ValueHandleBase&) LLVM_DELETED_FUNCTION;
+public:
+ explicit ValueHandleBase(HandleBaseKind Kind)
+ : PrevPair(0, Kind), Next(0), VP(0, 0) {}
+ ValueHandleBase(HandleBaseKind Kind, Value *V)
+ : PrevPair(0, Kind), Next(0), VP(V, 0) {
+ if (isValid(VP.getPointer()))
+ AddToUseList();
+ }
+ ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
+ : PrevPair(0, Kind), Next(0), VP(RHS.VP) {
+ if (isValid(VP.getPointer()))
+ AddToExistingUseList(RHS.getPrevPtr());
+ }
+ ~ValueHandleBase() {
+ if (isValid(VP.getPointer()))
+ RemoveFromUseList();
+ }
+
+ Value *operator=(Value *RHS) {
+ if (VP.getPointer() == RHS) return RHS;
+ if (isValid(VP.getPointer())) RemoveFromUseList();
+ VP.setPointer(RHS);
+ if (isValid(VP.getPointer())) AddToUseList();
+ return RHS;
+ }
+
+ Value *operator=(const ValueHandleBase &RHS) {
+ if (VP.getPointer() == RHS.VP.getPointer()) return RHS.VP.getPointer();
+ if (isValid(VP.getPointer())) RemoveFromUseList();
+ VP.setPointer(RHS.VP.getPointer());
+ if (isValid(VP.getPointer())) AddToExistingUseList(RHS.getPrevPtr());
+ return VP.getPointer();
+ }
+
+ Value *operator->() const { return getValPtr(); }
+ Value &operator*() const { return *getValPtr(); }
+
+protected:
+ Value *getValPtr() const { return VP.getPointer(); }
+
+ void setValPtrInt(unsigned K) { VP.setInt(K); }
+ unsigned getValPtrInt() const { return VP.getInt(); }
+
+ static bool isValid(Value *V) {
+ return V &&
+ V != DenseMapInfo<Value *>::getEmptyKey() &&
+ V != DenseMapInfo<Value *>::getTombstoneKey();
+ }
+
+public:
+ // Callbacks made from Value.
+ static void ValueIsDeleted(Value *V);
+ static void ValueIsRAUWd(Value *Old, Value *New);
+
+private:
+ // Internal implementation details.
+ ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
+ HandleBaseKind getKind() const { return PrevPair.getInt(); }
+ void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); }
+
+ /// AddToExistingUseList - Add this ValueHandle to the use list for VP, where
+ /// List is the address of either the head of the list or a Next node within
+ /// the existing use list.
+ void AddToExistingUseList(ValueHandleBase **List);
+
+ /// AddToExistingUseListAfter - Add this ValueHandle to the use list after
+ /// Node.
+ void AddToExistingUseListAfter(ValueHandleBase *Node);
+
+ /// AddToUseList - Add this ValueHandle to the use list for VP.
+ void AddToUseList();
+ /// RemoveFromUseList - Remove this ValueHandle from its current use list.
+ void RemoveFromUseList();
+};
+
+/// WeakVH - This is a value handle that tries hard to point to a Value, even
+/// across RAUW operations, but will null itself out if the value is destroyed.
+/// this is useful for advisory sorts of information, but should not be used as
+/// the key of a map (since the map would have to rearrange itself when the
+/// pointer changes).
+class WeakVH : public ValueHandleBase {
+public:
+ WeakVH() : ValueHandleBase(Weak) {}
+ WeakVH(Value *P) : ValueHandleBase(Weak, P) {}
+ WeakVH(const WeakVH &RHS)
+ : ValueHandleBase(Weak, RHS) {}
+
+ Value *operator=(Value *RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+ Value *operator=(const ValueHandleBase &RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+
+ operator Value*() const {
+ return getValPtr();
+ }
+};
+
+// Specialize simplify_type to allow WeakVH to participate in
+// dyn_cast, isa, etc.
+template<> struct simplify_type<WeakVH> {
+ typedef Value* SimpleType;
+ static SimpleType getSimplifiedValue(WeakVH &WVH) {
+ return WVH;
+ }
+};
+
+/// AssertingVH - This is a Value Handle that points to a value and asserts out
+/// if the value is destroyed while the handle is still live. This is very
+/// useful for catching dangling pointer bugs and other things which can be
+/// non-obvious. One particularly useful place to use this is as the Key of a
+/// map. Dangling pointer bugs often lead to really subtle bugs that only occur
+/// if another object happens to get allocated to the same address as the old
+/// one. Using an AssertingVH ensures that an assert is triggered as soon as
+/// the bad delete occurs.
+///
+/// Note that an AssertingVH handle does *not* follow values across RAUW
+/// operations. This means that RAUW's need to explicitly update the
+/// AssertingVH's as it moves. This is required because in non-assert mode this
+/// class turns into a trivial wrapper around a pointer.
+template <typename ValueTy>
+class AssertingVH
+#ifndef NDEBUG
+ : public ValueHandleBase
+#endif
+ {
+
+#ifndef NDEBUG
+ ValueTy *getValPtr() const {
+ return static_cast<ValueTy*>(ValueHandleBase::getValPtr());
+ }
+ void setValPtr(ValueTy *P) {
+ ValueHandleBase::operator=(GetAsValue(P));
+ }
+#else
+ ValueTy *ThePtr;
+ ValueTy *getValPtr() const { return ThePtr; }
+ void setValPtr(ValueTy *P) { ThePtr = P; }
+#endif
+
+ // Convert a ValueTy*, which may be const, to the type the base
+ // class expects.
+ static Value *GetAsValue(Value *V) { return V; }
+ static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+public:
+#ifndef NDEBUG
+ AssertingVH() : ValueHandleBase(Assert) {}
+ AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {}
+ AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {}
+#else
+ AssertingVH() : ThePtr(0) {}
+ AssertingVH(ValueTy *P) : ThePtr(P) {}
+#endif
+
+ operator ValueTy*() const {
+ return getValPtr();
+ }
+
+ ValueTy *operator=(ValueTy *RHS) {
+ setValPtr(RHS);
+ return getValPtr();
+ }
+ ValueTy *operator=(const AssertingVH<ValueTy> &RHS) {
+ setValPtr(RHS.getValPtr());
+ return getValPtr();
+ }
+
+ ValueTy *operator->() const { return getValPtr(); }
+ ValueTy &operator*() const { return *getValPtr(); }
+};
+
+// Specialize DenseMapInfo to allow AssertingVH to participate in DenseMap.
+template<typename T>
+struct DenseMapInfo<AssertingVH<T> > {
+ typedef DenseMapInfo<T*> PointerInfo;
+ static inline AssertingVH<T> getEmptyKey() {
+ return AssertingVH<T>(PointerInfo::getEmptyKey());
+ }
+ static inline T* getTombstoneKey() {
+ return AssertingVH<T>(PointerInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const AssertingVH<T> &Val) {
+ return PointerInfo::getHashValue(Val);
+ }
+ static bool isEqual(const AssertingVH<T> &LHS, const AssertingVH<T> &RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <typename T>
+struct isPodLike<AssertingVH<T> > {
+#ifdef NDEBUG
+ static const bool value = true;
+#else
+ static const bool value = false;
+#endif
+};
+
+
+/// TrackingVH - This is a value handle that tracks a Value (or Value subclass),
+/// even across RAUW operations.
+///
+/// TrackingVH is designed for situations where a client needs to hold a handle
+/// to a Value (or subclass) across some operations which may move that value,
+/// but should never destroy it or replace it with some unacceptable type.
+///
+/// It is an error to do anything with a TrackingVH whose value has been
+/// destroyed, except to destruct it.
+///
+/// It is an error to attempt to replace a value with one of a type which is
+/// incompatible with any of its outstanding TrackingVHs.
+template<typename ValueTy>
+class TrackingVH : public ValueHandleBase {
+ void CheckValidity() const {
+ Value *VP = ValueHandleBase::getValPtr();
+
+ // Null is always ok.
+ if (!VP) return;
+
+ // Check that this value is valid (i.e., it hasn't been deleted). We
+ // explicitly delay this check until access to avoid requiring clients to be
+ // unnecessarily careful w.r.t. destruction.
+ assert(ValueHandleBase::isValid(VP) && "Tracked Value was deleted!");
+
+ // Check that the value is a member of the correct subclass. We would like
+ // to check this property on assignment for better debugging, but we don't
+ // want to require a virtual interface on this VH. Instead we allow RAUW to
+ // replace this value with a value of an invalid type, and check it here.
+ assert(isa<ValueTy>(VP) &&
+ "Tracked Value was replaced by one with an invalid type!");
+ }
+
+ ValueTy *getValPtr() const {
+ CheckValidity();
+ return (ValueTy*)ValueHandleBase::getValPtr();
+ }
+ void setValPtr(ValueTy *P) {
+ CheckValidity();
+ ValueHandleBase::operator=(GetAsValue(P));
+ }
+
+ // Convert a ValueTy*, which may be const, to the type the base
+ // class expects.
+ static Value *GetAsValue(Value *V) { return V; }
+ static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+public:
+ TrackingVH() : ValueHandleBase(Tracking) {}
+ TrackingVH(ValueTy *P) : ValueHandleBase(Tracking, GetAsValue(P)) {}
+ TrackingVH(const TrackingVH &RHS) : ValueHandleBase(Tracking, RHS) {}
+
+ operator ValueTy*() const {
+ return getValPtr();
+ }
+
+ ValueTy *operator=(ValueTy *RHS) {
+ setValPtr(RHS);
+ return getValPtr();
+ }
+ ValueTy *operator=(const TrackingVH<ValueTy> &RHS) {
+ setValPtr(RHS.getValPtr());
+ return getValPtr();
+ }
+
+ ValueTy *operator->() const { return getValPtr(); }
+ ValueTy &operator*() const { return *getValPtr(); }
+};
+
+/// CallbackVH - This is a value handle that allows subclasses to define
+/// callbacks that run when the underlying Value has RAUW called on it or is
+/// destroyed. This class can be used as the key of a map, as long as the user
+/// takes it out of the map before calling setValPtr() (since the map has to
+/// rearrange itself when the pointer changes). Unlike ValueHandleBase, this
+/// class has a vtable and a virtual destructor.
+class CallbackVH : public ValueHandleBase {
+ virtual void anchor();
+protected:
+ CallbackVH(const CallbackVH &RHS)
+ : ValueHandleBase(Callback, RHS) {}
+
+ virtual ~CallbackVH() {}
+
+ void setValPtr(Value *P) {
+ ValueHandleBase::operator=(P);
+ }
+
+public:
+ CallbackVH() : ValueHandleBase(Callback) {}
+ CallbackVH(Value *P) : ValueHandleBase(Callback, P) {}
+
+ operator Value*() const {
+ return getValPtr();
+ }
+
+ /// Called when this->getValPtr() is destroyed, inside ~Value(), so you may
+ /// call any non-virtual Value method on getValPtr(), but no subclass methods.
+ /// If WeakVH were implemented as a CallbackVH, it would use this method to
+ /// call setValPtr(NULL). AssertingVH would use this method to cause an
+ /// assertion failure.
+ ///
+ /// All implementations must remove the reference from this object to the
+ /// Value that's being destroyed.
+ virtual void deleted() { setValPtr(NULL); }
+
+ /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
+ /// _before_ any of the uses have actually been replaced. If WeakVH were
+ /// implemented as a CallbackVH, it would use this method to call
+ /// setValPtr(new_value). AssertingVH would do nothing in this method.
+ virtual void allUsesReplacedWith(Value *) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/ValueMap.h b/include/llvm/IR/ValueMap.h
new file mode 100644
index 0000000..42da529
--- /dev/null
+++ b/include/llvm/IR/ValueMap.h
@@ -0,0 +1,377 @@
+//===- ValueMap.h - Safe map from Values to data ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ValueMap class. ValueMap maps Value* or any subclass
+// to an arbitrary other type. It provides the DenseMap interface but updates
+// itself to remain safe when keys are RAUWed or deleted. By default, when a
+// key is RAUWed from V1 to V2, the old mapping V1->target is removed, and a new
+// mapping V2->target is added. If V2 already existed, its old target is
+// overwritten. When a key is deleted, its mapping is removed.
+//
+// You can override a ValueMap's Config parameter to control exactly what
+// happens on RAUW and destruction and to get called back on each event. It's
+// legal to call back into the ValueMap from a Config's callbacks. Config
+// parameters should inherit from ValueMapConfig<KeyT> to get default
+// implementations of all the methods ValueMap uses. See ValueMapConfig for
+// documentation of the functions you can override.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEMAP_H
+#define LLVM_IR_VALUEMAP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/type_traits.h"
+#include <iterator>
+
+namespace llvm {
+
+template<typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH;
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator;
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator;
+
+/// This class defines the default behavior for configurable aspects of
+/// ValueMap<>. User Configs should inherit from this class to be as compatible
+/// as possible with future versions of ValueMap.
+template<typename KeyT>
+struct ValueMapConfig {
+ /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
+ /// false, the ValueMap will leave the original mapping in place.
+ enum { FollowRAUW = true };
+
+ // All methods will be called with a first argument of type ExtraData. The
+ // default implementations in this class take a templated first argument so
+ // that users' subclasses can use any type they want without having to
+ // override all the defaults.
+ struct ExtraData {};
+
+ template<typename ExtraDataT>
+ static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
+ template<typename ExtraDataT>
+ static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
+
+ /// Returns a mutex that should be acquired around any changes to the map.
+ /// This is only acquired from the CallbackVH (and held around calls to onRAUW
+ /// and onDelete) and not inside other ValueMap methods. NULL means that no
+ /// mutex is necessary.
+ template<typename ExtraDataT>
+ static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
+};
+
+/// See the file comment.
+template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT> >
+class ValueMap {
+ friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> ValueMapCVH;
+ typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH> > MapT;
+ typedef typename Config::ExtraData ExtraData;
+ MapT Map;
+ ExtraData Data;
+ ValueMap(const ValueMap&) LLVM_DELETED_FUNCTION;
+ ValueMap& operator=(const ValueMap&) LLVM_DELETED_FUNCTION;
+public:
+ typedef KeyT key_type;
+ typedef ValueT mapped_type;
+ typedef std::pair<KeyT, ValueT> value_type;
+
+ explicit ValueMap(unsigned NumInitBuckets = 64)
+ : Map(NumInitBuckets), Data() {}
+ explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
+ : Map(NumInitBuckets), Data(Data) {}
+
+ ~ValueMap() {}
+
+ typedef ValueMapIterator<MapT, KeyT> iterator;
+ typedef ValueMapConstIterator<MapT, KeyT> const_iterator;
+ inline iterator begin() { return iterator(Map.begin()); }
+ inline iterator end() { return iterator(Map.end()); }
+ inline const_iterator begin() const { return const_iterator(Map.begin()); }
+ inline const_iterator end() const { return const_iterator(Map.end()); }
+
+ bool empty() const { return Map.empty(); }
+ unsigned size() const { return Map.size(); }
+
+ /// Grow the map so that it has at least Size buckets. Does not shrink
+ void resize(size_t Size) { Map.resize(Size); }
+
+ void clear() { Map.clear(); }
+
+ /// count - Return true if the specified key is in the map.
+ bool count(const KeyT &Val) const {
+ return Map.find_as(Val) != Map.end();
+ }
+
+ iterator find(const KeyT &Val) {
+ return iterator(Map.find_as(Val));
+ }
+ const_iterator find(const KeyT &Val) const {
+ return const_iterator(Map.find_as(Val));
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Val) const {
+ typename MapT::const_iterator I = Map.find_as(Val);
+ return I != Map.end() ? I->second : ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ std::pair<typename MapT::iterator, bool> map_result=
+ Map.insert(std::make_pair(Wrap(KV.first), KV.second));
+ return std::make_pair(iterator(map_result.first), map_result.second);
+ }
+
+ /// insert - Range insertion of pairs.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+
+ bool erase(const KeyT &Val) {
+ typename MapT::iterator I = Map.find_as(Val);
+ if (I == Map.end())
+ return false;
+
+ Map.erase(I);
+ return true;
+ }
+ void erase(iterator I) {
+ return Map.erase(I.base());
+ }
+
+ value_type& FindAndConstruct(const KeyT &Key) {
+ return Map.FindAndConstruct(Wrap(Key));
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ return Map[Wrap(Key)];
+ }
+
+ /// isPointerIntoBucketsArray - Return true if the specified pointer points
+ /// somewhere into the ValueMap's array of buckets (i.e. either to a key or
+ /// value in the ValueMap).
+ bool isPointerIntoBucketsArray(const void *Ptr) const {
+ return Map.isPointerIntoBucketsArray(Ptr);
+ }
+
+ /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+ /// array. In conjunction with the previous method, this can be used to
+ /// determine whether an insertion caused the ValueMap to reallocate.
+ const void *getPointerIntoBucketsArray() const {
+ return Map.getPointerIntoBucketsArray();
+ }
+
+private:
+ // Takes a key being looked up in the map and wraps it into a
+ // ValueMapCallbackVH, the actual key type of the map. We use a helper
+ // function because ValueMapCVH is constructed with a second parameter.
+ ValueMapCVH Wrap(KeyT key) const {
+ // The only way the resulting CallbackVH could try to modify *this (making
+ // the const_cast incorrect) is if it gets inserted into the map. But then
+ // this function must have been called from a non-const method, making the
+ // const_cast ok.
+ return ValueMapCVH(key, const_cast<ValueMap*>(this));
+ }
+};
+
+// This CallbackVH updates its ValueMap when the contained Value changes,
+// according to the user's preferences expressed through the Config object.
+template<typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH : public CallbackVH {
+ friend class ValueMap<KeyT, ValueT, Config>;
+ friend struct DenseMapInfo<ValueMapCallbackVH>;
+ typedef ValueMap<KeyT, ValueT, Config> ValueMapT;
+ typedef typename std::remove_pointer<KeyT>::type KeySansPointerT;
+
+ ValueMapT *Map;
+
+ ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
+ : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
+ Map(Map) {}
+
+public:
+ KeyT Unwrap() const { return cast_or_null<KeySansPointerT>(getValPtr()); }
+
+ void deleted() override {
+ // Make a copy that won't get changed even when *this is destroyed.
+ ValueMapCallbackVH Copy(*this);
+ sys::Mutex *M = Config::getMutex(Copy.Map->Data);
+ if (M)
+ M->acquire();
+ Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
+ Copy.Map->Map.erase(Copy); // Definitely destroys *this.
+ if (M)
+ M->release();
+ }
+ void allUsesReplacedWith(Value *new_key) override {
+ assert(isa<KeySansPointerT>(new_key) &&
+ "Invalid RAUW on key of ValueMap<>");
+ // Make a copy that won't get changed even when *this is destroyed.
+ ValueMapCallbackVH Copy(*this);
+ sys::Mutex *M = Config::getMutex(Copy.Map->Data);
+ if (M)
+ M->acquire();
+
+ KeyT typed_new_key = cast<KeySansPointerT>(new_key);
+ // Can destroy *this:
+ Config::onRAUW(Copy.Map->Data, Copy.Unwrap(), typed_new_key);
+ if (Config::FollowRAUW) {
+ typename ValueMapT::MapT::iterator I = Copy.Map->Map.find(Copy);
+ // I could == Copy.Map->Map.end() if the onRAUW callback already
+ // removed the old mapping.
+ if (I != Copy.Map->Map.end()) {
+ ValueT Target(I->second);
+ Copy.Map->Map.erase(I); // Definitely destroys *this.
+ Copy.Map->insert(std::make_pair(typed_new_key, Target));
+ }
+ }
+ if (M)
+ M->release();
+ }
+};
+
+template<typename KeyT, typename ValueT, typename Config>
+struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> VH;
+ typedef DenseMapInfo<KeyT> PointerInfo;
+
+ static inline VH getEmptyKey() {
+ return VH(PointerInfo::getEmptyKey(), NULL);
+ }
+ static inline VH getTombstoneKey() {
+ return VH(PointerInfo::getTombstoneKey(), NULL);
+ }
+ static unsigned getHashValue(const VH &Val) {
+ return PointerInfo::getHashValue(Val.Unwrap());
+ }
+ static unsigned getHashValue(const KeyT &Val) {
+ return PointerInfo::getHashValue(Val);
+ }
+ static bool isEqual(const VH &LHS, const VH &RHS) {
+ return LHS == RHS;
+ }
+ static bool isEqual(const KeyT &LHS, const VH &RHS) {
+ return LHS == RHS.getValPtr();
+ }
+};
+
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator :
+ public std::iterator<std::forward_iterator_tag,
+ std::pair<KeyT, typename DenseMapT::mapped_type>,
+ ptrdiff_t> {
+ typedef typename DenseMapT::iterator BaseT;
+ typedef typename DenseMapT::mapped_type ValueT;
+ BaseT I;
+public:
+ ValueMapIterator() : I() {}
+
+ ValueMapIterator(BaseT I) : I(I) {}
+
+ BaseT base() const { return I; }
+
+ struct ValueTypeProxy {
+ const KeyT first;
+ ValueT& second;
+ ValueTypeProxy *operator->() { return this; }
+ operator std::pair<KeyT, ValueT>() const {
+ return std::make_pair(first, second);
+ }
+ };
+
+ ValueTypeProxy operator*() const {
+ ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+ return Result;
+ }
+
+ ValueTypeProxy operator->() const {
+ return operator*();
+ }
+
+ bool operator==(const ValueMapIterator &RHS) const {
+ return I == RHS.I;
+ }
+ bool operator!=(const ValueMapIterator &RHS) const {
+ return I != RHS.I;
+ }
+
+ inline ValueMapIterator& operator++() { // Preincrement
+ ++I;
+ return *this;
+ }
+ ValueMapIterator operator++(int) { // Postincrement
+ ValueMapIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator :
+ public std::iterator<std::forward_iterator_tag,
+ std::pair<KeyT, typename DenseMapT::mapped_type>,
+ ptrdiff_t> {
+ typedef typename DenseMapT::const_iterator BaseT;
+ typedef typename DenseMapT::mapped_type ValueT;
+ BaseT I;
+public:
+ ValueMapConstIterator() : I() {}
+ ValueMapConstIterator(BaseT I) : I(I) {}
+ ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
+ : I(Other.base()) {}
+
+ BaseT base() const { return I; }
+
+ struct ValueTypeProxy {
+ const KeyT first;
+ const ValueT& second;
+ ValueTypeProxy *operator->() { return this; }
+ operator std::pair<KeyT, ValueT>() const {
+ return std::make_pair(first, second);
+ }
+ };
+
+ ValueTypeProxy operator*() const {
+ ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+ return Result;
+ }
+
+ ValueTypeProxy operator->() const {
+ return operator*();
+ }
+
+ bool operator==(const ValueMapConstIterator &RHS) const {
+ return I == RHS.I;
+ }
+ bool operator!=(const ValueMapConstIterator &RHS) const {
+ return I != RHS.I;
+ }
+
+ inline ValueMapConstIterator& operator++() { // Preincrement
+ ++I;
+ return *this;
+ }
+ ValueMapConstIterator operator++(int) { // Postincrement
+ ValueMapConstIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/IR/Verifier.h b/include/llvm/IR/Verifier.h
new file mode 100644
index 0000000..9a2f402
--- /dev/null
+++ b/include/llvm/IR/Verifier.h
@@ -0,0 +1,75 @@
+//===- Verifier.h - LLVM IR Verifier ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function verifier interface, that can be used for some
+// sanity checking of input to the system, and for checking that transformations
+// haven't done something bad.
+//
+// Note that this does not provide full 'java style' security and verifications,
+// instead it just tries to ensure that code is well formed.
+//
+// To see what specifically is checked, look at the top of Verifier.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VERIFIER_H
+#define LLVM_IR_VERIFIER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class Function;
+class FunctionPass;
+class Module;
+class PreservedAnalyses;
+class raw_ostream;
+
+/// \brief Check a function for errors, useful for use when debugging a
+/// pass.
+///
+/// If there are no errors, the function returns false. If an error is found,
+/// a message describing the error is written to OS (if non-null) and true is
+/// returned.
+bool verifyFunction(const Function &F, raw_ostream *OS = 0);
+
+/// \brief Check a module for errors.
+///
+/// If there are no errors, the function returns false. If an error is found,
+/// a message describing the error is written to OS (if non-null) and true is
+/// returned.
+bool verifyModule(const Module &M, raw_ostream *OS = 0);
+
+/// \brief Create a verifier pass.
+///
+/// Check a module or function for validity. This is essentially a pass wrapped
+/// around the above verifyFunction and verifyModule routines and
+/// functionality. When the pass detects a verification error it is always
+/// printed to stderr, and by default they are fatal. You can override that by
+/// passing \c false to \p FatalErrors.
+///
+/// Note that this creates a pass suitable for the legacy pass manager. It has nothing to do with \c VerifierPass.
+FunctionPass *createVerifierPass(bool FatalErrors = true);
+
+class VerifierPass {
+ bool FatalErrors;
+
+public:
+ explicit VerifierPass(bool FatalErrors = true) : FatalErrors(FatalErrors) {}
+
+ PreservedAnalyses run(Module *M);
+ PreservedAnalyses run(Function *F);
+
+ static StringRef name() { return "VerifierPass"; }
+};
+
+} // End llvm namespace
+
+#endif