diff options
Diffstat (limited to 'include')
194 files changed, 8929 insertions, 2537 deletions
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 3ffaddd..e0195e0 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -119,6 +119,11 @@ typedef enum { LLVMReturnsTwice = 1 << 29, LLVMUWTable = 1 << 30, LLVMNonLazyBind = 1 << 31 + + // FIXME: This attribute is currently not included in the C API as + // a temporary measure until the API/ABI impact to the C API is understood + // and the path forward agreed upon. + //LLVMAddressSafety = 1ULL << 32 } LLVMAttribute; typedef enum { @@ -195,14 +200,13 @@ typedef enum { /* Exception Handling Operators */ LLVMResume = 58, - LLVMLandingPad = 59, - LLVMUnwind = 60 - + LLVMLandingPad = 59 } LLVMOpcode; typedef enum { LLVMVoidTypeKind, /**< type with no size */ + LLVMHalfTypeKind, /**< 16 bit floating point type */ LLVMFloatTypeKind, /**< 32 bit floating point type */ LLVMDoubleTypeKind, /**< 64 bit floating point type */ LLVMX86_FP80TypeKind, /**< 80 bit floating point type (X87) */ @@ -382,12 +386,14 @@ LLVMTypeRef LLVMIntType(unsigned NumBits); unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy); /* Operations on real types */ +LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C); LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C); LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C); LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C); LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C); LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C); +LLVMTypeRef LLVMHalfType(void); LLVMTypeRef LLVMFloatType(void); LLVMTypeRef LLVMDoubleType(void); LLVMTypeRef LLVMX86FP80Type(void); @@ -473,8 +479,6 @@ LLVMTypeRef LLVMX86MMXType(void); macro(IntrinsicInst) \ macro(DbgInfoIntrinsic) \ macro(DbgDeclareInst) \ - macro(EHExceptionInst) \ - macro(EHSelectorInst) \ macro(MemIntrinsic) \ macro(MemCpyInst) \ macro(MemMoveInst) \ @@ -564,6 +568,8 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count); const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len); unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name); void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest); +void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name, + LLVMValueRef Val); /* Operations on scalar constants */ LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N, diff --git a/include/llvm-c/Initialization.h b/include/llvm-c/Initialization.h index 3b59abb..cbe60df 100644 --- a/include/llvm-c/Initialization.h +++ b/include/llvm-c/Initialization.h @@ -25,6 +25,7 @@ extern "C" { void LLVMInitializeCore(LLVMPassRegistryRef R); void LLVMInitializeTransformUtils(LLVMPassRegistryRef R); void LLVMInitializeScalarOpts(LLVMPassRegistryRef R); +void LLVMInitializeVectorization(LLVMPassRegistryRef R); void LLVMInitializeInstCombine(LLVMPassRegistryRef R); void LLVMInitializeIPO(LLVMPassRegistryRef R); void LLVMInitializeInstrumentation(LLVMPassRegistryRef R); diff --git a/include/llvm-c/Transforms/Vectorize.h b/include/llvm-c/Transforms/Vectorize.h new file mode 100644 index 0000000..ce92eaa --- /dev/null +++ b/include/llvm-c/Transforms/Vectorize.h @@ -0,0 +1,37 @@ +/*===---------------------------Vectorize.h --------------------- -*- C -*-===*\ +|*===----------- Vectorization Transformation Library C Interface ---------===*| +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This header declares the C interface to libLLVMVectorize.a, which *| +|* implements various vectorization transformations of the LLVM IR. *| +|* *| +|* Many exotic languages can interoperate with C code but have a harder time *| +|* with C++ due to name mangling. So in addition to C, this interface enables *| +|* tools written in such languages. *| +|* *| +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_C_TRANSFORMS_VECTORIZE_H +#define LLVM_C_TRANSFORMS_VECTORIZE_H + +#include "llvm-c/Core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** See llvm::createBBVectorizePass function. */ +void LLVMAddBBVectorizePass(LLVMPassManagerRef PM); + +#ifdef __cplusplus +} +#endif /* defined(__cplusplus) */ + +#endif + diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h index d2566a4..d40727d 100644 --- a/include/llvm/ADT/APFloat.h +++ b/include/llvm/ADT/APFloat.h @@ -328,8 +328,16 @@ namespace llvm { APFloat& operator=(const APFloat &); - /* Return an arbitrary integer value usable for hashing. */ - uint32_t getHashValue() const; + /// \brief Overload to compute a hash code for an APFloat value. + /// + /// Note that the use of hash codes for floating point values is in general + /// frought with peril. Equality is hard to define for these values. For + /// example, should negative and positive zero hash to different codes? Are + /// they equal or not? This hash value implementation specifically + /// emphasizes producing different codes for different inputs in order to + /// be used in canonicalization and memoization. As such, equality is + /// bitwiseIsEqual, and 0 != -0. + friend hash_code hash_value(const APFloat &Arg); /// Converts this value into a decimal string. /// diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h index 78119bc..b08564c 100644 --- a/include/llvm/ADT/APInt.h +++ b/include/llvm/ADT/APInt.h @@ -23,11 +23,12 @@ #include <string> namespace llvm { - class Serializer; class Deserializer; class FoldingSetNodeID; - class raw_ostream; + class Serializer; class StringRef; + class hash_code; + class raw_ostream; template<typename T> class SmallVectorImpl; @@ -502,10 +503,8 @@ public: return getAllOnesValue(numBits).lshr(numBits - loBitsSet); } - /// The hash value is computed as the sum of the words and the bit width. - /// @returns A hash value computed from the sum of the APInt words. - /// @brief Get a hash value based on this APInt - uint64_t getHashValue() const; + /// \brief Overload to compute a hash_code for an APInt value. + friend hash_code hash_value(const APInt &Arg); /// This function returns a pointer to the internal storage of the APInt. /// This is useful for writing out the APInt in binary form without any @@ -1056,6 +1055,16 @@ public: /// @brief Zero extend or truncate to width APInt zextOrTrunc(unsigned width) const; + /// Make this APInt have the bit width given by \p width. The value is sign + /// extended, or left alone to make it that width. + /// @brief Sign extend or truncate to width + APInt sextOrSelf(unsigned width) const; + + /// Make this APInt have the bit width given by \p width. The value is zero + /// extended, or left alone to make it that width. + /// @brief Zero extend or truncate to width + APInt zextOrSelf(unsigned width) const; + /// @} /// @name Bit Manipulation Operators /// @{ diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h index 33a8c65..f4c8e55 100644 --- a/include/llvm/ADT/ArrayRef.h +++ b/include/llvm/ADT/ArrayRef.h @@ -14,8 +14,7 @@ #include <vector> namespace llvm { - class APInt; - + /// ArrayRef - Represent a constant reference to an array (0 or more elements /// consecutively in memory), i.e. a start pointer and a length. It allows /// various APIs to take consecutive elements easily and conveniently. @@ -33,33 +32,33 @@ namespace llvm { typedef const T *iterator; typedef const T *const_iterator; typedef size_t size_type; - + private: /// The start of the array, in an external buffer. const T *Data; - + /// The number of elements. size_type Length; - + public: /// @name Constructors /// @{ - + /// Construct an empty ArrayRef. /*implicit*/ ArrayRef() : Data(0), Length(0) {} - + /// Construct an ArrayRef from a single element. /*implicit*/ ArrayRef(const T &OneElt) : Data(&OneElt), Length(1) {} - + /// Construct an ArrayRef from a pointer and length. /*implicit*/ ArrayRef(const T *data, size_t length) : Data(data), Length(length) {} - + /// Construct an ArrayRef from a range. ArrayRef(const T *begin, const T *end) : Data(begin), Length(end - begin) {} - + /// Construct an ArrayRef from a SmallVector. /*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec) : Data(Vec.data()), Length(Vec.size()) {} @@ -67,39 +66,39 @@ namespace llvm { /// Construct an ArrayRef from a std::vector. /*implicit*/ ArrayRef(const std::vector<T> &Vec) : Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {} - + /// Construct an ArrayRef from a C array. template <size_t N> /*implicit*/ ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {} - + /// @} /// @name Simple Operations /// @{ iterator begin() const { return Data; } iterator end() const { return Data + Length; } - + /// empty - Check if the array is empty. bool empty() const { return Length == 0; } - + const T *data() const { return Data; } - + /// size - Get the array size. size_t size() const { return Length; } - + /// front - Get the first element. const T &front() const { assert(!empty()); return Data[0]; } - + /// back - Get the last element. const T &back() const { assert(!empty()); return Data[Length-1]; } - + /// equals - Check for element-wise equality. bool equals(ArrayRef RHS) const { if (Length != RHS.Length) @@ -111,18 +110,18 @@ namespace llvm { } /// slice(n) - Chop off the first N elements of the array. - ArrayRef<T> slice(unsigned N) { + ArrayRef<T> slice(unsigned N) const { assert(N <= size() && "Invalid specifier"); return ArrayRef<T>(data()+N, size()-N); } /// slice(n, m) - Chop off the first N elements of the array, and keep M /// elements in the array. - ArrayRef<T> slice(unsigned N, unsigned M) { + ArrayRef<T> slice(unsigned N, unsigned M) const { assert(N+M <= size() && "Invalid specifier"); return ArrayRef<T>(data()+N, M); } - + /// @} /// @name Operator Overloads /// @{ @@ -130,22 +129,104 @@ namespace llvm { assert(Index < Length && "Invalid index!"); return Data[Index]; } - + /// @} /// @name Expensive Operations /// @{ std::vector<T> vec() const { return std::vector<T>(Data, Data+Length); } - + /// @} /// @name Conversion operators /// @{ operator std::vector<T>() const { return std::vector<T>(Data, Data+Length); } + + /// @} + }; + + /// MutableArrayRef - Represent a mutable reference to an array (0 or more + /// elements consecutively in memory), i.e. a start pointer and a length. It + /// allows various APIs to take and modify consecutive elements easily and + /// conveniently. + /// + /// This class does not own the underlying data, it is expected to be used in + /// situations where the data resides in some other buffer, whose lifetime + /// extends past that of the MutableArrayRef. For this reason, it is not in + /// general safe to store a MutableArrayRef. + /// + /// This is intended to be trivially copyable, so it should be passed by + /// value. + template<typename T> + class MutableArrayRef : public ArrayRef<T> { + public: + typedef T *iterator; + + /// Construct an empty ArrayRef. + /*implicit*/ MutableArrayRef() : ArrayRef<T>() {} + + /// Construct an MutableArrayRef from a single element. + /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {} + + /// Construct an MutableArrayRef from a pointer and length. + /*implicit*/ MutableArrayRef(T *data, size_t length) + : ArrayRef<T>(data, length) {} + + /// Construct an MutableArrayRef from a range. + MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {} + + /// Construct an MutableArrayRef from a SmallVector. + /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec) + : ArrayRef<T>(Vec) {} + + /// Construct a MutableArrayRef from a std::vector. + /*implicit*/ MutableArrayRef(std::vector<T> &Vec) + : ArrayRef<T>(Vec) {} + + /// Construct an MutableArrayRef from a C array. + template <size_t N> + /*implicit*/ MutableArrayRef(T (&Arr)[N]) + : ArrayRef<T>(Arr) {} + + T *data() const { return const_cast<T*>(ArrayRef<T>::data()); } + + iterator begin() const { return data(); } + iterator end() const { return data() + this->size(); } + + /// front - Get the first element. + T &front() const { + assert(!this->empty()); + return data()[0]; + } + + /// back - Get the last element. + T &back() const { + assert(!this->empty()); + return data()[this->size()-1]; + } + + /// slice(n) - Chop off the first N elements of the array. + MutableArrayRef<T> slice(unsigned N) const { + assert(N <= this->size() && "Invalid specifier"); + return MutableArrayRef<T>(data()+N, this->size()-N); + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + MutableArrayRef<T> slice(unsigned N, unsigned M) const { + assert(N+M <= this->size() && "Invalid specifier"); + return MutableArrayRef<T>(data()+N, M); + } /// @} + /// @name Operator Overloads + /// @{ + T &operator[](size_t Index) const { + assert(Index < this->size() && "Invalid index!"); + return data()[Index]; + } }; /// @name ArrayRef Convenience constructors @@ -215,5 +296,5 @@ namespace llvm { static const bool value = true; }; } - + #endif diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h index ac1cf0c..7e0b5ba 100644 --- a/include/llvm/ADT/BitVector.h +++ b/include/llvm/ADT/BitVector.h @@ -14,12 +14,12 @@ #ifndef LLVM_ADT_BITVECTOR_H #define LLVM_ADT_BITVECTOR_H +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cassert> #include <climits> #include <cstdlib> -#include <cstring> namespace llvm { @@ -116,7 +116,7 @@ public: else if (sizeof(BitWord) == 8) NumBits += CountPopulation_64(Bits[i]); else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); return NumBits; } @@ -146,10 +146,9 @@ public: if (Bits[i] != 0) { if (sizeof(BitWord) == 4) return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return -1; } @@ -170,10 +169,9 @@ public: if (Copy != 0) { if (sizeof(BitWord) == 4) return WordPos * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Copy); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } // Check subsequent words. @@ -181,10 +179,9 @@ public: if (Bits[i] != 0) { if (sizeof(BitWord) == 4) return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return -1; } @@ -318,6 +315,16 @@ public: return *this; } + // reset - Reset bits that are set in RHS. Same as *this &= ~RHS. + BitVector &reset(const BitVector &RHS) { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + Bits[i] &= ~RHS.Bits[i]; + return *this; + } + BitVector &operator|=(const BitVector &RHS) { if (size() < RHS.size()) resize(RHS.size()); @@ -365,6 +372,42 @@ public: std::swap(Capacity, RHS.Capacity); } + //===--------------------------------------------------------------------===// + // Portable bit mask operations. + //===--------------------------------------------------------------------===// + // + // These methods all operate on arrays of uint32_t, each holding 32 bits. The + // fixed word size makes it easier to work with literal bit vector constants + // in portable code. + // + // The LSB in each word is the lowest numbered bit. The size of a portable + // bit mask is always a whole multiple of 32 bits. If no bit mask size is + // given, the bit mask is assumed to cover the entire BitVector. + + /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize. + /// This computes "*this |= Mask". + void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask<true, false>(Mask, MaskWords); + } + + /// clearBitsInMask - Clear any bits in this vector that are set in Mask. + /// Don't resize. This computes "*this &= ~Mask". + void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask<false, false>(Mask, MaskWords); + } + + /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask. + /// Don't resize. This computes "*this |= ~Mask". + void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask<true, true>(Mask, MaskWords); + } + + /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask. + /// Don't resize. This computes "*this &= Mask". + void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { + applyMask<false, true>(Mask, MaskWords); + } + private: unsigned NumBitWords(unsigned S) const { return (S + BITWORD_SIZE-1) / BITWORD_SIZE; @@ -400,6 +443,33 @@ private: void init_words(BitWord *B, unsigned NumWords, bool t) { memset(B, 0 - (int)t, NumWords*sizeof(BitWord)); } + + template<bool AddBits, bool InvertMask> + void applyMask(const uint32_t *Mask, unsigned MaskWords) { + assert(BITWORD_SIZE % 32 == 0 && "Unsupported BitWord size."); + MaskWords = std::min(MaskWords, (size() + 31) / 32); + const unsigned Scale = BITWORD_SIZE / 32; + unsigned i; + for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) { + BitWord BW = Bits[i]; + // This inner loop should unroll completely when BITWORD_SIZE > 32. + for (unsigned b = 0; b != BITWORD_SIZE; b += 32) { + uint32_t M = *Mask++; + if (InvertMask) M = ~M; + if (AddBits) BW |= BitWord(M) << b; + else BW &= ~(BitWord(M) << b); + } + Bits[i] = BW; + } + for (unsigned b = 0; MaskWords; b += 32, --MaskWords) { + uint32_t M = *Mask++; + if (InvertMask) M = ~M; + if (AddBits) Bits[i] |= BitWord(M) << b; + else Bits[i] &= ~(BitWord(M) << b); + } + if (AddBits) + clear_unused_bits(); + } }; inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) { diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h index e70cacf..672147d 100644 --- a/include/llvm/ADT/DenseMap.h +++ b/include/llvm/ADT/DenseMap.h @@ -86,13 +86,13 @@ public: return empty() ? end() : iterator(Buckets, Buckets+NumBuckets); } inline iterator end() { - return iterator(Buckets+NumBuckets, Buckets+NumBuckets); + return iterator(Buckets+NumBuckets, Buckets+NumBuckets, true); } inline const_iterator begin() const { return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets); } inline const_iterator end() const { - return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets); + return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets, true); } bool empty() const { return NumEntries == 0; } @@ -137,13 +137,33 @@ public: iterator find(const KeyT &Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return iterator(TheBucket, Buckets+NumBuckets); + return iterator(TheBucket, Buckets+NumBuckets, true); return end(); } const_iterator find(const KeyT &Val) const { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return const_iterator(TheBucket, Buckets+NumBuckets); + return const_iterator(TheBucket, Buckets+NumBuckets, true); + return end(); + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template<class LookupKeyT> + iterator find_as(const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return iterator(TheBucket, Buckets+NumBuckets, true); + return end(); + } + template<class LookupKeyT> + const_iterator find_as(const LookupKeyT &Val) const { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, Buckets+NumBuckets, true); return end(); } @@ -162,13 +182,12 @@ public: std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { BucketT *TheBucket; if (LookupBucketFor(KV.first, TheBucket)) - return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), + return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true), false); // Already in map. // Otherwise, insert the new element. TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); - return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), - true); + return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true), true); } /// insert - Range insertion of pairs. @@ -310,6 +329,10 @@ private: static unsigned getHashValue(const KeyT &Val) { return KeyInfoT::getHashValue(Val); } + template<typename LookupKeyT> + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); } @@ -321,7 +344,8 @@ private: /// FoundBucket. If the bucket contains the key and a value, this returns /// true, otherwise it returns a bucket with an empty marker or tombstone and /// returns false. - bool LookupBucketFor(const KeyT &Val, BucketT *&FoundBucket) const { + template<typename LookupKeyT> + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) const { unsigned BucketNo = getHashValue(Val); unsigned ProbeAmt = 1; BucketT *BucketsPtr = Buckets; @@ -342,7 +366,7 @@ private: while (1) { BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1)); // Found Val's bucket? If so, return it. - if (KeyInfoT::isEqual(ThisBucket->first, Val)) { + if (KeyInfoT::isEqual(Val, ThisBucket->first)) { FoundBucket = ThisBucket; return true; } @@ -495,8 +519,9 @@ private: public: DenseMapIterator() : Ptr(0), End(0) {} - DenseMapIterator(pointer Pos, pointer E) : Ptr(Pos), End(E) { - AdvancePastEmptyBuckets(); + DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) + : Ptr(Pos), End(E) { + if (!NoAdvance) AdvancePastEmptyBuckets(); } // If IsConst is true this is a converting constructor from iterator to diff --git a/include/llvm/ADT/DenseMapInfo.h b/include/llvm/ADT/DenseMapInfo.h index df4084e..1559a35 100644 --- a/include/llvm/ADT/DenseMapInfo.h +++ b/include/llvm/ADT/DenseMapInfo.h @@ -59,7 +59,7 @@ template<> struct DenseMapInfo<char> { // Provide DenseMapInfo for unsigned ints. template<> struct DenseMapInfo<unsigned> { - static inline unsigned getEmptyKey() { return ~0; } + static inline unsigned getEmptyKey() { return ~0U; } static inline unsigned getTombstoneKey() { return ~0U - 1; } static unsigned getHashValue(const unsigned& Val) { return Val * 37U; } static bool isEqual(const unsigned& LHS, const unsigned& RHS) { diff --git a/include/llvm/ADT/Hashing.h b/include/llvm/ADT/Hashing.h new file mode 100644 index 0000000..06f4ce2 --- /dev/null +++ b/include/llvm/ADT/Hashing.h @@ -0,0 +1,764 @@ +//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the newly proposed standard C++ interfaces for hashing +// arbitrary data and building hash functions for user-defined types. This +// interface was originally proposed in N3333[1] and is currently under review +// for inclusion in a future TR and/or standard. +// +// The primary interfaces provide are comprised of one type and three functions: +// +// -- 'hash_code' class is an opaque type representing the hash code for some +// data. It is the intended product of hashing, and can be used to implement +// hash tables, checksumming, and other common uses of hashes. It is not an +// integer type (although it can be converted to one) because it is risky +// to assume much about the internals of a hash_code. In particular, each +// execution of the program has a high probability of producing a different +// hash_code for a given input. Thus their values are not stable to save or +// persist, and should only be used during the execution for the +// construction of hashing datastructures. +// +// -- 'hash_value' is a function designed to be overloaded for each +// user-defined type which wishes to be used within a hashing context. It +// should be overloaded within the user-defined type's namespace and found +// via ADL. Overloads for primitive types are provided by this library. +// +// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid +// programmers in easily and intuitively combining a set of data into +// a single hash_code for their object. They should only logically be used +// within the implementation of a 'hash_value' routine or similar context. +// +// Note that 'hash_combine_range' contains very special logic for hashing +// a contiguous array of integers or pointers. This logic is *extremely* fast, +// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were +// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys +// under 32-bytes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_HASHING_H +#define LLVM_ADT_HASHING_H + +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/Host.h" +#include "llvm/Support/SwapByteOrder.h" +#include "llvm/Support/type_traits.h" +#include <algorithm> +#include <cassert> +#include <cstring> +#include <iterator> +#include <utility> + +// Allow detecting C++11 feature availability when building with Clang without +// breaking other compilers. +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +namespace llvm { + +/// \brief An opaque object representing a hash code. +/// +/// This object represents the result of hashing some entity. It is intended to +/// be used to implement hashtables or other hashing-based data structures. +/// While it wraps and exposes a numeric value, this value should not be +/// trusted to be stable or predictable across processes or executions. +/// +/// In order to obtain the hash_code for an object 'x': +/// \code +/// using llvm::hash_value; +/// llvm::hash_code code = hash_value(x); +/// \endcode +/// +/// Also note that there are two numerical values which are reserved, and the +/// implementation ensures will never be produced for real hash_codes. These +/// can be used as sentinels within hashing data structures. +class hash_code { + size_t value; + +public: + /// \brief Default construct a hash_code. + /// Note that this leaves the value uninitialized. + hash_code() {} + + /// \brief Form a hash code directly from a numerical value. + hash_code(size_t value) : value(value) {} + + /// \brief Convert the hash code to its numerical value for use. + /*explicit*/ operator size_t() const { return value; } + + friend bool operator==(const hash_code &lhs, const hash_code &rhs) { + return lhs.value == rhs.value; + } + friend bool operator!=(const hash_code &lhs, const hash_code &rhs) { + return lhs.value != rhs.value; + } + + /// \brief Allow a hash_code to be directly run through hash_value. + friend size_t hash_value(const hash_code &code) { return code.value; } +}; + +/// \brief Compute a hash_code for any integer value. +/// +/// Note that this function is intended to compute the same hash_code for +/// a particular value without regard to the pre-promotion type. This is in +/// contrast to hash_combine which may produce different hash_codes for +/// differing argument types even if they would implicit promote to a common +/// type without changing the value. +template <typename T> +typename enable_if<is_integral<T>, hash_code>::type hash_value(T value); + +/// \brief Compute a hash_code for a pointer's address. +/// +/// N.B.: This hashes the *address*. Not the value and not the type. +template <typename T> hash_code hash_value(const T *ptr); + +/// \brief Compute a hash_code for a pair of objects. +template <typename T, typename U> +hash_code hash_value(const std::pair<T, U> &arg); + +/// \brief Compute a hash_code for a standard string. +template <typename T> +hash_code hash_value(const std::basic_string<T> &arg); + + +/// \brief Override the execution seed with a fixed value. +/// +/// This hashing library uses a per-execution seed designed to change on each +/// run with high probability in order to ensure that the hash codes are not +/// attackable and to ensure that output which is intended to be stable does +/// not rely on the particulars of the hash codes produced. +/// +/// That said, there are use cases where it is important to be able to +/// reproduce *exactly* a specific behavior. To that end, we provide a function +/// which will forcibly set the seed to a fixed value. This must be done at the +/// start of the program, before any hashes are computed. Also, it cannot be +/// undone. This makes it thread-hostile and very hard to use outside of +/// immediately on start of a simple program designed for reproducible +/// behavior. +void set_fixed_execution_hash_seed(size_t fixed_value); + + +// All of the implementation details of actually computing the various hash +// code values are held within this namespace. These routines are included in +// the header file mainly to allow inlining and constant propagation. +namespace hashing { +namespace detail { + +inline uint64_t fetch64(const char *p) { + uint64_t result; + memcpy(&result, p, sizeof(result)); + if (sys::isBigEndianHost()) + return sys::SwapByteOrder(result); + return result; +} + +inline uint32_t fetch32(const char *p) { + uint32_t result; + memcpy(&result, p, sizeof(result)); + if (sys::isBigEndianHost()) + return sys::SwapByteOrder(result); + return result; +} + +/// Some primes between 2^63 and 2^64 for various uses. +static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; +static const uint64_t k1 = 0xb492b66fbe98f273ULL; +static const uint64_t k2 = 0x9ae16a3b2f90404fULL; +static const uint64_t k3 = 0xc949d7c7509e6557ULL; + +/// \brief Bitwise right rotate. +/// Normally this will compile to a single instruction, especially if the +/// shift is a manifest constant. +inline uint64_t rotate(uint64_t val, size_t shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); +} + +inline uint64_t shift_mix(uint64_t val) { + return val ^ (val >> 47); +} + +inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) { + // Murmur-inspired hashing. + const uint64_t kMul = 0x9ddfea08eb382d69ULL; + uint64_t a = (low ^ high) * kMul; + a ^= (a >> 47); + uint64_t b = (high ^ a) * kMul; + b ^= (b >> 47); + b *= kMul; + return b; +} + +inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) { + uint8_t a = s[0]; + uint8_t b = s[len >> 1]; + uint8_t c = s[len - 1]; + uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8); + uint32_t z = len + (static_cast<uint32_t>(c) << 2); + return shift_mix(y * k2 ^ z * k3 ^ seed) * k2; +} + +inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch32(s); + return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4)); +} + +inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s); + uint64_t b = fetch64(s + len - 8); + return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b; +} + +inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t a = fetch64(s) * k1; + uint64_t b = fetch64(s + 8); + uint64_t c = fetch64(s + len - 8) * k2; + uint64_t d = fetch64(s + len - 16) * k0; + return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d, + a + rotate(b ^ k3, 20) - c + len + seed); +} + +inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) { + uint64_t z = fetch64(s + 24); + uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0; + uint64_t b = rotate(a + z, 52); + uint64_t c = rotate(a, 37); + a += fetch64(s + 8); + c += rotate(a, 7); + a += fetch64(s + 16); + uint64_t vf = a + z; + uint64_t vs = b + rotate(a, 31) + c; + a = fetch64(s + 16) + fetch64(s + len - 32); + z = fetch64(s + len - 8); + b = rotate(a + z, 52); + c = rotate(a, 37); + a += fetch64(s + len - 24); + c += rotate(a, 7); + a += fetch64(s + len - 16); + uint64_t wf = a + z; + uint64_t ws = b + rotate(a, 31) + c; + uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0); + return shift_mix((seed ^ (r * k0)) + vs) * k2; +} + +inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { + if (length >= 4 && length <= 8) + return hash_4to8_bytes(s, length, seed); + if (length > 8 && length <= 16) + return hash_9to16_bytes(s, length, seed); + if (length > 16 && length <= 32) + return hash_17to32_bytes(s, length, seed); + if (length > 32) + return hash_33to64_bytes(s, length, seed); + if (length != 0) + return hash_1to3_bytes(s, length, seed); + + return k2 ^ seed; +} + +/// \brief The intermediate state used during hashing. +/// Currently, the algorithm for computing hash codes is based on CityHash and +/// keeps 56 bytes of arbitrary state. +struct hash_state { + uint64_t h0, h1, h2, h3, h4, h5, h6; + uint64_t seed; + + /// \brief Create a new hash_state structure and initialize it based on the + /// seed and the first 64-byte chunk. + /// This effectively performs the initial mix. + static hash_state create(const char *s, uint64_t seed) { + hash_state state = { + 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), + seed * k1, shift_mix(seed), 0, seed }; + state.h6 = hash_16_bytes(state.h4, state.h5); + state.mix(s); + return state; + } + + /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a' + /// and 'b', including whatever is already in 'a' and 'b'. + static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) { + a += fetch64(s); + uint64_t c = fetch64(s + 24); + b = rotate(b + a + c, 21); + uint64_t d = a; + a += fetch64(s + 8) + fetch64(s + 16); + b += rotate(a, 44) + d; + a += c; + } + + /// \brief Mix in a 64-byte buffer of data. + /// We mix all 64 bytes even when the chunk length is smaller, but we + /// record the actual length. + void mix(const char *s) { + h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1; + h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1; + h0 ^= h6; + h1 += h3 + fetch64(s + 40); + h2 = rotate(h2 + h5, 33) * k1; + h3 = h4 * k1; + h4 = h0 + h5; + mix_32_bytes(s, h3, h4); + h5 = h2 + h6; + h6 = h1 + fetch64(s + 16); + mix_32_bytes(s + 32, h5, h6); + std::swap(h2, h0); + } + + /// \brief Compute the final 64-bit hash code value based on the current + /// state and the length of bytes hashed. + uint64_t finalize(size_t length) { + return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2, + hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0); + } +}; + + +/// \brief A global, fixed seed-override variable. +/// +/// This variable can be set using the \see llvm::set_fixed_execution_seed +/// function. See that function for details. Do not, under any circumstances, +/// set or read this variable. +extern size_t fixed_seed_override; + +inline size_t get_execution_seed() { + // FIXME: This needs to be a per-execution seed. This is just a placeholder + // implementation. Switching to a per-execution seed is likely to flush out + // instability bugs and so will happen as its own commit. + // + // However, if there is a fixed seed override set the first time this is + // called, return that instead of the per-execution seed. + const uint64_t seed_prime = 0xff51afd7ed558ccdULL; + static size_t seed = fixed_seed_override ? fixed_seed_override + : (size_t)seed_prime; + return seed; +} + + +/// \brief Trait to indicate whether a type's bits can be hashed directly. +/// +/// A type trait which is true if we want to combine values for hashing by +/// reading the underlying data. It is false if values of this type must +/// first be passed to hash_value, and the resulting hash_codes combined. +// +// FIXME: We want to replace is_integral and is_pointer here with a predicate +// which asserts that comparing the underlying storage of two values of the +// type for equality is equivalent to comparing the two values for equality. +// For all the platforms we care about, this holds for integers and pointers, +// but there are platforms where it doesn't and we would like to support +// user-defined types which happen to satisfy this property. +template <typename T> struct is_hashable_data + : integral_constant<bool, ((is_integral<T>::value || is_pointer<T>::value) && + 64 % sizeof(T) == 0)> {}; + +// Special case std::pair to detect when both types are viable and when there +// is no alignment-derived padding in the pair. This is a bit of a lie because +// std::pair isn't truly POD, but it's close enough in all reasonable +// implementations for our use case of hashing the underlying data. +template <typename T, typename U> struct is_hashable_data<std::pair<T, U> > + : integral_constant<bool, (is_hashable_data<T>::value && + is_hashable_data<U>::value && + (sizeof(T) + sizeof(U)) == + sizeof(std::pair<T, U>))> {}; + +/// \brief Helper to get the hashable data representation for a type. +/// This variant is enabled when the type itself can be used. +template <typename T> +typename enable_if<is_hashable_data<T>, T>::type +get_hashable_data(const T &value) { + return value; +} +/// \brief Helper to get the hashable data representation for a type. +/// This variant is enabled when we must first call hash_value and use the +/// result as our data. +template <typename T> +typename enable_if_c<!is_hashable_data<T>::value, size_t>::type +get_hashable_data(const T &value) { + using ::llvm::hash_value; + return hash_value(value); +} + +/// \brief Helper to store data from a value into a buffer and advance the +/// pointer into that buffer. +/// +/// This routine first checks whether there is enough space in the provided +/// buffer, and if not immediately returns false. If there is space, it +/// copies the underlying bytes of value into the buffer, advances the +/// buffer_ptr past the copied bytes, and returns true. +template <typename T> +bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, + size_t offset = 0) { + size_t store_size = sizeof(value) - offset; + if (buffer_ptr + store_size > buffer_end) + return false; + const char *value_data = reinterpret_cast<const char *>(&value); + memcpy(buffer_ptr, value_data + offset, store_size); + buffer_ptr += store_size; + return true; +} + +/// \brief Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is +/// integral. Rather than computing a hash_code for each object and then +/// combining them, this (as an optimization) directly combines the integers. +template <typename InputIteratorT> +hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { + typedef typename std::iterator_traits<InputIteratorT>::value_type ValueT; + const size_t seed = get_execution_seed(); + char buffer[64], *buffer_ptr = buffer; + char *const buffer_end = buffer_ptr + array_lengthof(buffer); + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; +/// \brief Metafunction that determines whether the given type is an integral +/// type. + if (first == last) + return hash_short(buffer, buffer_ptr - buffer, seed); + assert(buffer_ptr == buffer_end); + + hash_state state = state.create(buffer, seed); + size_t length = 64; + while (first != last) { + // Fill up the buffer. We don't clear it, which re-mixes the last round + // when only a partial 64-byte chunk is left. + buffer_ptr = buffer; + while (first != last && store_and_advance(buffer_ptr, buffer_end, + get_hashable_data(*first))) + ++first; + + // Rotate the buffer if we did a partial fill in order to simulate doing + // a mix of the last 64-bytes. That is how the algorithm works when we + // have a contiguous byte sequence, and we want to emulate that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + }; + + return state.finalize(length); +} + +/// \brief Implement the combining of integral values into a hash_code. +/// +/// This overload is selected when the value type of the iterator is integral +/// and when the input iterator is actually a pointer. Rather than computing +/// a hash_code for each object and then combining them, this (as an +/// optimization) directly combines the integers. Also, because the integers +/// are stored in contiguous memory, this routine avoids copying each value +/// and directly reads from the underlying memory. +template <typename ValueT> +typename enable_if<is_hashable_data<ValueT>, hash_code>::type +hash_combine_range_impl(const ValueT *first, const ValueT *last) { + const size_t seed = get_execution_seed(); + const char *s_begin = reinterpret_cast<const char *>(first); + const char *s_end = reinterpret_cast<const char *>(last); + const size_t length = std::distance(s_begin, s_end); + if (length <= 64) + return hash_short(s_begin, length, seed); + + const char *s_aligned_end = s_begin + (length & ~63); + hash_state state = state.create(s_begin, seed); + s_begin += 64; + while (s_begin != s_aligned_end) { + state.mix(s_begin); + s_begin += 64; + } + if (length & 63) + state.mix(s_end - 64); + + return state.finalize(length); +} + +} // namespace detail +} // namespace hashing + + +/// \brief Compute a hash_code for a sequence of values. +/// +/// This hashes a sequence of values. It produces the same hash_code as +/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences +/// and is significantly faster given pointers and types which can be hashed as +/// a sequence of bytes. +template <typename InputIteratorT> +hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) { + return ::llvm::hashing::detail::hash_combine_range_impl(first, last); +} + + +// Implementation details for hash_combine. +namespace hashing { +namespace detail { + +/// \brief Helper class to manage the recursive combining of hash_combine +/// arguments. +/// +/// This class exists to manage the state and various calls involved in the +/// recursive combining of arguments used in hash_combine. It is particularly +/// useful at minimizing the code in the recursive calls to ease the pain +/// caused by a lack of variadic functions. +class hash_combine_recursive_helper { + const size_t seed; + char buffer[64]; + char *const buffer_end; + char *buffer_ptr; + size_t length; + hash_state state; + +public: + /// \brief Construct a recursive hash combining helper. + /// + /// This sets up the state for a recursive hash combine, including getting + /// the seed and buffer setup. + hash_combine_recursive_helper() + : seed(get_execution_seed()), + buffer_end(buffer + array_lengthof(buffer)), + buffer_ptr(buffer), + length(0) {} + + /// \brief Combine one chunk of data into the current in-flight hash. + /// + /// This merges one chunk of data into the hash. First it tries to buffer + /// the data. If the buffer is full, it hashes the buffer into its + /// hash_state, empties it, and then merges the new chunk in. This also + /// handles cases where the data straddles the end of the buffer. + template <typename T> void combine_data(T data) { + if (!store_and_advance(buffer_ptr, buffer_end, data)) { + // Check for skew which prevents the buffer from being packed, and do + // a partial store into the buffer to fill it. This is only a concern + // with the variadic combine because that formation can have varying + // argument types. + size_t partial_store_size = buffer_end - buffer_ptr; + memcpy(buffer_ptr, &data, partial_store_size); + + // If the store fails, our buffer is full and ready to hash. We have to + // either initialize the hash state (on the first full buffer) or mix + // this buffer into the existing hash state. Length tracks the *hashed* + // length, not the buffered length. + if (length == 0) { + state = state.create(buffer, seed); + length = 64; + } else { + // Mix this chunk into the current state and bump length up by 64. + state.mix(buffer); + length += 64; + } + // Reset the buffer_ptr to the head of the buffer for the next chunk of + // data. + buffer_ptr = buffer; + + // Try again to store into the buffer -- this cannot fail as we only + // store types smaller than the buffer. + if (!store_and_advance(buffer_ptr, buffer_end, data, + partial_store_size)) + abort(); + } + } + +#if defined(__has_feature) && __has_feature(__cxx_variadic_templates__) + + /// \brief Recursive, variadic combining method. + /// + /// This function recurses through each argument, combining that argument + /// into a single hash. + template <typename T, typename ...Ts> + hash_code combine(const T &arg, const Ts &...args) { + combine_data( get_hashable_data(arg)); + + // Recurse to the next argument. + return combine(args...); + } + +#else + // Manually expanded recursive combining methods. See variadic above for + // documentation. + + template <typename T1, typename T2, typename T3, typename T4, typename T5, + typename T6> + hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4, const T5 &arg5, const T6 &arg6) { + combine_data(get_hashable_data(arg1)); + return combine(arg2, arg3, arg4, arg5, arg6); + } + template <typename T1, typename T2, typename T3, typename T4, typename T5> + hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4, const T5 &arg5) { + combine_data(get_hashable_data(arg1)); + return combine(arg2, arg3, arg4, arg5); + } + template <typename T1, typename T2, typename T3, typename T4> + hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4) { + combine_data(get_hashable_data(arg1)); + return combine(arg2, arg3, arg4); + } + template <typename T1, typename T2, typename T3> + hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) { + combine_data(get_hashable_data(arg1)); + return combine(arg2, arg3); + } + template <typename T1, typename T2> + hash_code combine(const T1 &arg1, const T2 &arg2) { + combine_data(get_hashable_data(arg1)); + return combine(arg2); + } + template <typename T1> + hash_code combine(const T1 &arg1) { + combine_data(get_hashable_data(arg1)); + return combine(); + } + +#endif + + /// \brief Base case for recursive, variadic combining. + /// + /// The base case when combining arguments recursively is reached when all + /// arguments have been handled. It flushes the remaining buffer and + /// constructs a hash_code. + hash_code combine() { + // Check whether the entire set of values fit in the buffer. If so, we'll + // use the optimized short hashing routine and skip state entirely. + if (length == 0) + return hash_short(buffer, buffer_ptr - buffer, seed); + + // Mix the final buffer, rotating it if we did a partial fill in order to + // simulate doing a mix of the last 64-bytes. That is how the algorithm + // works when we have a contiguous byte sequence, and we want to emulate + // that here. + std::rotate(buffer, buffer_ptr, buffer_end); + + // Mix this chunk into the current state. + state.mix(buffer); + length += buffer_ptr - buffer; + + return state.finalize(length); + } +}; + +} // namespace detail +} // namespace hashing + + +#if __has_feature(__cxx_variadic_templates__) + +/// \brief Combine values into a single hash_code. +/// +/// This routine accepts a varying number of arguments of any type. It will +/// attempt to combine them into a single hash_code. For user-defined types it +/// attempts to call a \see hash_value overload (via ADL) for the type. For +/// integer and pointer types it directly combines their data into the +/// resulting hash_code. +/// +/// The result is suitable for returning from a user's hash_value +/// *implementation* for their user-defined type. Consumers of a type should +/// *not* call this routine, they should instead call 'hash_value'. +template <typename ...Ts> hash_code hash_combine(const Ts &...args) { + // Recursively hash each argument using a helper class. + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(args...); +} + +#else + +// What follows are manually exploded overloads for each argument width. See +// the above variadic definition for documentation and specification. + +template <typename T1, typename T2, typename T3, typename T4, typename T5, + typename T6> +hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4, const T5 &arg5, const T6 &arg6) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1, arg2, arg3, arg4, arg5, arg6); +} +template <typename T1, typename T2, typename T3, typename T4, typename T5> +hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4, const T5 &arg5) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1, arg2, arg3, arg4, arg5); +} +template <typename T1, typename T2, typename T3, typename T4> +hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, + const T4 &arg4) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1, arg2, arg3, arg4); +} +template <typename T1, typename T2, typename T3> +hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1, arg2, arg3); +} +template <typename T1, typename T2> +hash_code hash_combine(const T1 &arg1, const T2 &arg2) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1, arg2); +} +template <typename T1> +hash_code hash_combine(const T1 &arg1) { + ::llvm::hashing::detail::hash_combine_recursive_helper helper; + return helper.combine(arg1); +} + +#endif + + +// Implementation details for implementatinos of hash_value overloads provided +// here. +namespace hashing { +namespace detail { + +/// \brief Helper to hash the value of a single integer. +/// +/// Overloads for smaller integer types are not provided to ensure consistent +/// behavior in the presence of integral promotions. Essentially, +/// "hash_value('4')" and "hash_value('0' + 4)" should be the same. +inline hash_code hash_integer_value(uint64_t value) { + // Similar to hash_4to8_bytes but using a seed instead of length. + const uint64_t seed = get_execution_seed(); + const char *s = reinterpret_cast<const char *>(&value); + const uint64_t a = fetch32(s); + return hash_16_bytes(seed + (a << 3), fetch32(s + 4)); +} + +} // namespace detail +} // namespace hashing + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template <typename T> +typename enable_if<is_integral<T>, hash_code>::type hash_value(T value) { + return ::llvm::hashing::detail::hash_integer_value(value); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template <typename T> hash_code hash_value(const T *ptr) { + return ::llvm::hashing::detail::hash_integer_value( + reinterpret_cast<uintptr_t>(ptr)); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template <typename T, typename U> +hash_code hash_value(const std::pair<T, U> &arg) { + return hash_combine(arg.first, arg.second); +} + +// Declared and documented above, but defined here so that any of the hashing +// infrastructure is available. +template <typename T> +hash_code hash_value(const std::basic_string<T> &arg) { + return hash_combine_range(arg.begin(), arg.end()); +} + +} // namespace llvm + +#endif diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h index d597a7c..2926462 100644 --- a/include/llvm/ADT/ImmutableSet.h +++ b/include/llvm/ADT/ImmutableSet.h @@ -18,6 +18,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include <cassert> #include <functional> #include <vector> @@ -686,7 +687,7 @@ public: stack.back() |= VisitedRight; break; default: - assert(false && "Unreachable."); + llvm_unreachable("Unreachable."); } } @@ -722,7 +723,7 @@ public: skipToParent(); break; default: - assert(false && "Unreachable."); + llvm_unreachable("Unreachable."); } return *this; } @@ -747,7 +748,7 @@ public: stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight); break; default: - assert(false && "Unreachable."); + llvm_unreachable("Unreachable."); } return *this; } diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h index 1230e8f..83e225c 100644 --- a/include/llvm/ADT/IntervalMap.h +++ b/include/llvm/ADT/IntervalMap.h @@ -739,7 +739,7 @@ public: // A Path is used by iterators to represent a position in a B+-tree, and the // path to get there from the root. // -// The Path class also constains the tree navigation code that doesn't have to +// The Path class also contains the tree navigation code that doesn't have to // be templatized. // //===----------------------------------------------------------------------===// diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h index 106daf4..3a1a3f4 100644 --- a/include/llvm/ADT/IntrusiveRefCntPtr.h +++ b/include/llvm/ADT/IntrusiveRefCntPtr.h @@ -65,6 +65,7 @@ namespace llvm { //===----------------------------------------------------------------------===// class RefCountedBaseVPTR { mutable unsigned ref_cnt; + virtual void anchor(); protected: RefCountedBaseVPTR() : ref_cnt(0) {} @@ -79,9 +80,15 @@ namespace llvm { } template <typename T> - friend class IntrusiveRefCntPtr; + friend struct IntrusiveRefCntPtrInfo; }; + + template <typename T> struct IntrusiveRefCntPtrInfo { + static void retain(T *obj) { obj->Retain(); } + static void release(T *obj) { obj->Release(); } + }; + //===----------------------------------------------------------------------===// /// IntrusiveRefCntPtr - A template class that implements a "smart pointer" /// that assumes the wrapped object has a reference count associated @@ -108,7 +115,7 @@ namespace llvm { explicit IntrusiveRefCntPtr() : Obj(0) {} - explicit IntrusiveRefCntPtr(T* obj) : Obj(obj) { + IntrusiveRefCntPtr(T* obj) : Obj(obj) { retain(); } @@ -167,8 +174,8 @@ namespace llvm { } private: - void retain() { if (Obj) Obj->Retain(); } - void release() { if (Obj) Obj->Release(); } + void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); } + void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); } void replace(T* S) { this_type(S).swap(*this); diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h index abe2067..965f0de 100644 --- a/include/llvm/ADT/SetVector.h +++ b/include/llvm/ADT/SetVector.h @@ -144,6 +144,12 @@ public: set_.erase(back()); vector_.pop_back(); } + + T pop_back_val() { + T Ret = back(); + pop_back(); + return Ret; + } bool operator==(const SetVector &that) const { return vector_ == that.vector_; diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h index b15b3ee..a3469a1 100644 --- a/include/llvm/ADT/SmallBitVector.h +++ b/include/llvm/ADT/SmallBitVector.h @@ -175,7 +175,7 @@ public: return CountPopulation_32(Bits); if (sizeof(uintptr_t) * CHAR_BIT == 64) return CountPopulation_64(Bits); - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return getPointer()->count(); } @@ -212,7 +212,7 @@ public: return CountTrailingZeros_32(Bits); if (sizeof(uintptr_t) * CHAR_BIT == 64) return CountTrailingZeros_64(Bits); - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return getPointer()->find_first(); } @@ -230,7 +230,7 @@ public: return CountTrailingZeros_32(Bits); if (sizeof(uintptr_t) * CHAR_BIT == 64) return CountTrailingZeros_64(Bits); - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return getPointer()->find_next(Prev); } diff --git a/include/llvm/ADT/SmallSet.h b/include/llvm/ADT/SmallSet.h index d03f1be..cd117f5 100644 --- a/include/llvm/ADT/SmallSet.h +++ b/include/llvm/ADT/SmallSet.h @@ -27,13 +27,13 @@ namespace llvm { /// /// Note that this set does not provide a way to iterate over members in the /// set. -template <typename T, unsigned N> +template <typename T, unsigned N, typename C = std::less<T> > class SmallSet { /// Use a SmallVector to hold the elements here (even though it will never /// reach its 'large' stage) to avoid calling the default ctors of elements /// we will never use. SmallVector<T, N> Vector; - std::set<T> Set; + std::set<T, C> Set; typedef typename SmallVector<T, N>::const_iterator VIterator; typedef typename SmallVector<T, N>::iterator mutable_iterator; public: diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h index da26416..199783b 100644 --- a/include/llvm/ADT/SmallString.h +++ b/include/llvm/ADT/SmallString.h @@ -24,21 +24,244 @@ namespace llvm { template<unsigned InternalLen> class SmallString : public SmallVector<char, InternalLen> { public: - // Default ctor - Initialize to empty. + /// Default ctor - Initialize to empty. SmallString() {} - // Initialize from a StringRef. + /// Initialize from a StringRef. SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {} - // Initialize with a range. + /// Initialize with a range. template<typename ItTy> SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {} - // Copy ctor. + /// Copy ctor. SmallString(const SmallString &RHS) : SmallVector<char, InternalLen>(RHS) {} + // Note that in order to add new overloads for append & assign, we have to + // duplicate the inherited versions so as not to inadvertently hide them. + + /// @} + /// @name String Assignment + /// @{ + + /// Assign from a repeated element + void assign(unsigned NumElts, char Elt) { + this->SmallVectorImpl<char>::assign(NumElts, Elt); + } + + /// Assign from an iterator pair + template<typename in_iter> + void assign(in_iter S, in_iter E) { + this->clear(); + SmallVectorImpl<char>::append(S, E); + } + + /// Assign from a StringRef + void assign(StringRef RHS) { + this->clear(); + SmallVectorImpl<char>::append(RHS.begin(), RHS.end()); + } + + /// Assign from a SmallVector + void assign(const SmallVectorImpl<char> &RHS) { + this->clear(); + SmallVectorImpl<char>::append(RHS.begin(), RHS.end()); + } + + /// @} + /// @name String Concatenation + /// @{ + + /// Append from an iterator pair + template<typename in_iter> + void append(in_iter S, in_iter E) { + SmallVectorImpl<char>::append(S, E); + } + + /// Append from a StringRef + void append(StringRef RHS) { + SmallVectorImpl<char>::append(RHS.begin(), RHS.end()); + } + + /// Append from a SmallVector + void append(const SmallVectorImpl<char> &RHS) { + SmallVectorImpl<char>::append(RHS.begin(), RHS.end()); + } + + /// @} + /// @name String Comparison + /// @{ + + /// equals - Check for string equality, this is more efficient than + /// compare() when the relative ordering of inequal strings isn't needed. + bool equals(StringRef RHS) const { + return str().equals(RHS); + } + + /// equals_lower - Check for string equality, ignoring case. + bool equals_lower(StringRef RHS) const { + return str().equals_lower(RHS); + } + + /// compare - Compare two strings; the result is -1, 0, or 1 if this string + /// is lexicographically less than, equal to, or greater than the \arg RHS. + int compare(StringRef RHS) const { + return str().compare(RHS); + } + + /// compare_lower - Compare two strings, ignoring case. + int compare_lower(StringRef RHS) const { + return str().compare_lower(RHS); + } + + /// compare_numeric - Compare two strings, treating sequences of digits as + /// numbers. + int compare_numeric(StringRef RHS) const { + return str().compare_numeric(RHS); + } + + /// @} + /// @name String Predicates + /// @{ + + /// startswith - Check if this string starts with the given \arg Prefix. + bool startswith(StringRef Prefix) const { + return str().startswith(Prefix); + } + + /// endswith - Check if this string ends with the given \arg Suffix. + bool endswith(StringRef Suffix) const { + return str().endswith(Suffix); + } + + /// @} + /// @name String Searching + /// @{ + + /// find - Search for the first character \arg C in the string. + /// + /// \return - The index of the first occurrence of \arg C, or npos if not + /// found. + size_t find(char C, size_t From = 0) const { + return str().find(C, From); + } + + /// find - Search for the first string \arg Str in the string. + /// + /// \return - The index of the first occurrence of \arg Str, or npos if not + /// found. + size_t find(StringRef Str, size_t From = 0) const { + return str().find(Str, From); + } + + /// rfind - Search for the last character \arg C in the string. + /// + /// \return - The index of the last occurrence of \arg C, or npos if not + /// found. + size_t rfind(char C, size_t From = StringRef::npos) const { + return str().rfind(C, From); + } + + /// rfind - Search for the last string \arg Str in the string. + /// + /// \return - The index of the last occurrence of \arg Str, or npos if not + /// found. + size_t rfind(StringRef Str) const { + return str().rfind(Str); + } + + /// find_first_of - Find the first character in the string that is \arg C, + /// or npos if not found. Same as find. + size_t find_first_of(char C, size_t From = 0) const { + return str().find_first_of(C, From); + } + + /// find_first_of - Find the first character in the string that is in \arg + /// Chars, or npos if not found. + /// + /// Note: O(size() + Chars.size()) + size_t find_first_of(StringRef Chars, size_t From = 0) const { + return str().find_first_of(Chars, From); + } + + /// find_first_not_of - Find the first character in the string that is not + /// \arg C or npos if not found. + size_t find_first_not_of(char C, size_t From = 0) const { + return str().find_first_not_of(C, From); + } + + /// find_first_not_of - Find the first character in the string that is not + /// in the string \arg Chars, or npos if not found. + /// + /// Note: O(size() + Chars.size()) + size_t find_first_not_of(StringRef Chars, size_t From = 0) const { + return str().find_first_not_of(Chars, From); + } + + /// find_last_of - Find the last character in the string that is \arg C, or + /// npos if not found. + size_t find_last_of(char C, size_t From = StringRef::npos) const { + return str().find_last_of(C, From); + } + + /// find_last_of - Find the last character in the string that is in \arg C, + /// or npos if not found. + /// + /// Note: O(size() + Chars.size()) + size_t find_last_of( + StringRef Chars, size_t From = StringRef::npos) const { + return str().find_last_of(Chars, From); + } + + /// @} + /// @name Helpful Algorithms + /// @{ + + /// count - Return the number of occurrences of \arg C in the string. + size_t count(char C) const { + return str().count(C); + } + + /// count - Return the number of non-overlapped occurrences of \arg Str in + /// the string. + size_t count(StringRef Str) const { + return str().count(Str); + } + + /// @} + /// @name Substring Operations + /// @{ + + /// substr - Return a reference to the substring from [Start, Start + N). + /// + /// \param Start - The index of the starting character in the substring; if + /// the index is npos or greater than the length of the string then the + /// empty substring will be returned. + /// + /// \param N - The number of characters to included in the substring. If N + /// exceeds the number of characters remaining in the string, the string + /// suffix (starting with \arg Start) will be returned. + StringRef substr(size_t Start, size_t N = StringRef::npos) const { + return str().substr(Start, N); + } + + /// slice - Return a reference to the substring from [Start, End). + /// + /// \param Start - The index of the starting character in the substring; if + /// the index is npos or greater than the length of the string then the + /// empty substring will be returned. + /// + /// \param End - The index following the last character to include in the + /// substring. If this is npos, or less than \arg Start, or exceeds the + /// number of characters remaining in the string, the string suffix + /// (starting with \arg Start) will be returned. + StringRef slice(size_t Start, size_t End) const { + return str().slice(Start, End); + } // Extra methods. + + /// Explicit conversion to StringRef StringRef str() const { return StringRef(this->begin(), this->size()); } // TODO: Make this const, if it's safe... @@ -48,7 +271,7 @@ public: return this->data(); } - // Implicit conversion to StringRef. + /// Implicit conversion to StringRef. operator StringRef() const { return str(); } // Extra operators. diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h index 1c42f29..d01cf32 100644 --- a/include/llvm/ADT/SmallVector.h +++ b/include/llvm/ADT/SmallVector.h @@ -100,10 +100,10 @@ public: template <typename T> class SmallVectorTemplateCommon : public SmallVectorBase { protected: - void setEnd(T *P) { this->EndX = P; } -public: SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {} + void setEnd(T *P) { this->EndX = P; } +public: typedef size_t size_type; typedef ptrdiff_t difference_type; typedef T value_type; @@ -174,7 +174,7 @@ public: /// implementations that are designed to work with non-POD-like T's. template <typename T, bool isPodLike> class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> { -public: +protected: SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} static void destroy_range(T *S, T *E) { @@ -194,6 +194,23 @@ public: /// grow - double the size of the allocated memory, guaranteeing space for at /// least one more element or MinSize if specified. void grow(size_t MinSize = 0); + +public: + void push_back(const T &Elt) { + if (this->EndX < this->CapacityX) { + Retry: + new (this->end()) T(Elt); + this->setEnd(this->end()+1); + return; + } + this->grow(); + goto Retry; + } + + void pop_back() { + this->setEnd(this->end()-1); + this->end()->~T(); + } }; // Define this out-of-line to dissuade the C++ compiler from inlining it. @@ -226,7 +243,7 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) { /// implementations that are designed to work with POD-like T's. template <typename T> class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> { -public: +protected: SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {} // No need to do a destroy loop for POD's. @@ -255,6 +272,21 @@ public: void grow(size_t MinSize = 0) { this->grow_pod(MinSize*sizeof(T), sizeof(T)); } +public: + void push_back(const T &Elt) { + if (this->EndX < this->CapacityX) { + Retry: + *this->end() = Elt; + this->setEnd(this->end()+1); + return; + } + this->grow(); + goto Retry; + } + + void pop_back() { + this->setEnd(this->end()-1); + } }; @@ -270,11 +302,13 @@ public: typedef typename SuperClass::iterator iterator; typedef typename SuperClass::size_type size_type; +protected: // Default ctor - Initialize to empty. explicit SmallVectorImpl(unsigned N) : SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) { } +public: ~SmallVectorImpl() { // Destroy the constructed elements in the vector. this->destroy_range(this->begin(), this->end()); @@ -297,7 +331,7 @@ public: } else if (N > this->size()) { if (this->capacity() < N) this->grow(N); - this->construct_range(this->end(), this->begin()+N, T()); + std::uninitialized_fill(this->end(), this->begin()+N, T()); this->setEnd(this->begin()+N); } } @@ -309,7 +343,7 @@ public: } else if (N > this->size()) { if (this->capacity() < N) this->grow(N); - construct_range(this->end(), this->begin()+N, NV); + std::uninitialized_fill(this->end(), this->begin()+N, NV); this->setEnd(this->begin()+N); } } @@ -319,25 +353,9 @@ public: this->grow(N); } - void push_back(const T &Elt) { - if (this->EndX < this->CapacityX) { - Retry: - new (this->end()) T(Elt); - this->setEnd(this->end()+1); - return; - } - this->grow(); - goto Retry; - } - - void pop_back() { - this->setEnd(this->end()-1); - this->end()->~T(); - } - T pop_back_val() { T Result = this->back(); - pop_back(); + this->pop_back(); return Result; } @@ -376,7 +394,7 @@ public: if (this->capacity() < NumElts) this->grow(NumElts); this->setEnd(this->begin()+NumElts); - construct_range(this->begin(), this->end(), Elt); + std::uninitialized_fill(this->begin(), this->end(), Elt); } iterator erase(iterator I) { @@ -384,7 +402,7 @@ public: // Shift all elts down one. std::copy(I+1, this->end(), I); // Drop the last elt. - pop_back(); + this->pop_back(); return(N); } @@ -400,7 +418,7 @@ public: iterator insert(iterator I, const T &Elt) { if (I == this->end()) { // Important special case for empty vector. - push_back(Elt); + this->push_back(Elt); return this->end()-1; } @@ -554,12 +572,6 @@ public: assert(N <= this->capacity()); this->setEnd(this->begin() + N); } - -private: - static void construct_range(T *S, T *E, const T &Elt) { - for (; S != E; ++S) - new (S) T(Elt); - } }; @@ -686,9 +698,7 @@ public: explicit SmallVector(unsigned Size, const T &Value = T()) : SmallVectorImpl<T>(NumTsAvailable) { - this->reserve(Size); - while (Size--) - this->push_back(Value); + this->assign(Size, Value); } template<typename ItTy> @@ -718,9 +728,7 @@ public: explicit SmallVector(unsigned Size, const T &Value = T()) : SmallVectorImpl<T>(0) { - this->reserve(Size); - while (Size--) - this->push_back(Value); + this->assign(Size, Value); } template<typename ItTy> diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h index d977136..7c824ee 100644 --- a/include/llvm/ADT/SparseBitVector.h +++ b/include/llvm/ADT/SparseBitVector.h @@ -18,11 +18,11 @@ #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include <cassert> #include <climits> -#include <cstring> namespace llvm { @@ -128,7 +128,7 @@ public: else if (sizeof(BitWord) == 8) NumBits += CountPopulation_64(Bits[i]); else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); return NumBits; } @@ -138,13 +138,11 @@ public: if (Bits[i] != 0) { if (sizeof(BitWord) == 4) return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } - assert(0 && "Illegal empty element"); - return 0; // Not reached + llvm_unreachable("Illegal empty element"); } /// find_next - Returns the index of the next set bit starting from the @@ -165,10 +163,9 @@ public: if (Copy != 0) { if (sizeof(BitWord) == 4) return WordPos * BITWORD_SIZE + CountTrailingZeros_32(Copy); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } // Check subsequent words. @@ -176,10 +173,9 @@ public: if (Bits[i] != 0) { if (sizeof(BitWord) == 4) return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]); - else if (sizeof(BitWord) == 8) + if (sizeof(BitWord) == 8) return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]); - else - assert(0 && "Unsupported!"); + llvm_unreachable("Unsupported!"); } return -1; } diff --git a/include/llvm/ADT/SparseSet.h b/include/llvm/ADT/SparseSet.h new file mode 100644 index 0000000..923c6a5 --- /dev/null +++ b/include/llvm/ADT/SparseSet.h @@ -0,0 +1,268 @@ +//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the SparseSet class derived from the version described in +// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters +// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993. +// +// A sparse set holds a small number of objects identified by integer keys from +// a moderately sized universe. The sparse set uses more memory than other +// containers in order to provide faster operations. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SPARSESET_H +#define LLVM_ADT_SPARSESET_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/DataTypes.h" +#include <limits> + +namespace llvm { + +/// SparseSetFunctor - Objects in a SparseSet are identified by small integer +/// keys. A functor object is used to compute the key of an object. The +/// functor's operator() must return an unsigned smaller than the universe. +/// +/// The default functor implementation forwards to a getSparseSetKey() method +/// on the object. It is intended for sparse sets holding ad-hoc structs. +/// +template<typename ValueT> +struct SparseSetFunctor { + unsigned operator()(const ValueT &Val) { + return Val.getSparseSetKey(); + } +}; + +/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for +/// SparseSet<unsigned>. +/// +template<> struct SparseSetFunctor<unsigned> { + unsigned operator()(unsigned Val) { return Val; } +}; + +/// SparseSet - Fast set implementation for objects that can be identified by +/// small unsigned keys. +/// +/// SparseSet allocates memory proportional to the size of the key universe, so +/// it is not recommended for building composite data structures. It is useful +/// for algorithms that require a single set with fast operations. +/// +/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast +/// clear() and iteration as fast as a vector. The find(), insert(), and +/// erase() operations are all constant time, and typically faster than a hash +/// table. The iteration order doesn't depend on numerical key values, it only +/// depends on the order of insert() and erase() operations. When no elements +/// have been erased, the iteration order is the insertion order. +/// +/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but +/// offers constant-time clear() and size() operations as well as fast +/// iteration independent on the size of the universe. +/// +/// SparseSet contains a dense vector holding all the objects and a sparse +/// array holding indexes into the dense vector. Most of the memory is used by +/// the sparse array which is the size of the key universe. The SparseT +/// template parameter provides a space/speed tradeoff for sets holding many +/// elements. +/// +/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse +/// array uses 4 x Universe bytes. +/// +/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache +/// lines, but the sparse array is 4x smaller. N is the number of elements in +/// the set. +/// +/// For sets that may grow to thousands of elements, SparseT should be set to +/// uint16_t or uint32_t. +/// +/// @param ValueT The type of objects in the set. +/// @param SparseT An unsigned integer type. See above. +/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT. +/// +template<typename ValueT, + typename SparseT = uint8_t, + typename KeyFunctorT = SparseSetFunctor<ValueT> > +class SparseSet { + typedef SmallVector<ValueT, 8> DenseT; + DenseT Dense; + SparseT *Sparse; + unsigned Universe; + KeyFunctorT KeyOf; + + // Disable copy construction and assignment. + // This data structure is not meant to be used that way. + SparseSet(const SparseSet&); // DO NOT IMPLEMENT. + SparseSet &operator=(const SparseSet&); // DO NOT IMPLEMENT. + +public: + typedef ValueT value_type; + typedef ValueT &reference; + typedef const ValueT &const_reference; + typedef ValueT *pointer; + typedef const ValueT *const_pointer; + + SparseSet() : Sparse(0), Universe(0) {} + ~SparseSet() { free(Sparse); } + + /// setUniverse - Set the universe size which determines the largest key the + /// set can hold. The universe must be sized before any elements can be + /// added. + /// + /// @param U Universe size. All object keys must be less than U. + /// + void setUniverse(unsigned U) { + // It's not hard to resize the universe on a non-empty set, but it doesn't + // seem like a likely use case, so we can add that code when we need it. + assert(empty() && "Can only resize universe on an empty map"); + // Hysteresis prevents needless reallocations. + if (U >= Universe/4 && U <= Universe) + return; + free(Sparse); + // The Sparse array doesn't actually need to be initialized, so malloc + // would be enough here, but that will cause tools like valgrind to + // complain about branching on uninitialized data. + Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT))); + Universe = U; + } + + // Import trivial vector stuff from DenseT. + typedef typename DenseT::iterator iterator; + typedef typename DenseT::const_iterator const_iterator; + + const_iterator begin() const { return Dense.begin(); } + const_iterator end() const { return Dense.end(); } + iterator begin() { return Dense.begin(); } + iterator end() { return Dense.end(); } + + /// empty - Returns true if the set is empty. + /// + /// This is not the same as BitVector::empty(). + /// + bool empty() const { return Dense.empty(); } + + /// size - Returns the number of elements in the set. + /// + /// This is not the same as BitVector::size() which returns the size of the + /// universe. + /// + unsigned size() const { return Dense.size(); } + + /// clear - Clears the set. This is a very fast constant time operation. + /// + void clear() { + // Sparse does not need to be cleared, see find(). + Dense.clear(); + } + + /// find - Find an element by its key. + /// + /// @param Key A valid key to find. + /// @returns An iterator to the element identified by key, or end(). + /// + iterator find(unsigned Key) { + assert(Key < Universe && "Key out of range"); + assert(std::numeric_limits<SparseT>::is_integer && + !std::numeric_limits<SparseT>::is_signed && + "SparseT must be an unsigned integer type"); + const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u; + for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) { + const unsigned FoundKey = KeyOf(Dense[i]); + assert(FoundKey < Universe && "Invalid key in set. Did object mutate?"); + if (Key == FoundKey) + return begin() + i; + // Stride is 0 when SparseT >= unsigned. We don't need to loop. + if (!Stride) + break; + } + return end(); + } + + const_iterator find(unsigned Key) const { + return const_cast<SparseSet*>(this)->find(Key); + } + + /// count - Returns true if this set contains an element identified by Key. + /// + bool count(unsigned Key) const { + return find(Key) != end(); + } + + /// insert - Attempts to insert a new element. + /// + /// If Val is successfully inserted, return (I, true), where I is an iterator + /// pointing to the newly inserted element. + /// + /// If the set already contains an element with the same key as Val, return + /// (I, false), where I is an iterator pointing to the existing element. + /// + /// Insertion invalidates all iterators. + /// + std::pair<iterator, bool> insert(const ValueT &Val) { + unsigned Key = KeyOf(Val); + iterator I = find(Key); + if (I != end()) + return std::make_pair(I, false); + Sparse[Key] = size(); + Dense.push_back(Val); + return std::make_pair(end() - 1, true); + } + + /// array subscript - If an element already exists with this key, return it. + /// Otherwise, automatically construct a new value from Key, insert it, + /// and return the newly inserted element. + ValueT &operator[](unsigned Key) { + return *insert(ValueT(Key)).first; + } + + /// erase - Erases an existing element identified by a valid iterator. + /// + /// This invalidates all iterators, but erase() returns an iterator pointing + /// to the next element. This makes it possible to erase selected elements + /// while iterating over the set: + /// + /// for (SparseSet::iterator I = Set.begin(); I != Set.end();) + /// if (test(*I)) + /// I = Set.erase(I); + /// else + /// ++I; + /// + /// Note that end() changes when elements are erased, unlike std::list. + /// + iterator erase(iterator I) { + assert(unsigned(I - begin()) < size() && "Invalid iterator"); + if (I != end() - 1) { + *I = Dense.back(); + unsigned BackKey = KeyOf(Dense.back()); + assert(BackKey < Universe && "Invalid key in set. Did object mutate?"); + Sparse[BackKey] = I - begin(); + } + // This depends on SmallVector::pop_back() not invalidating iterators. + // std::vector::pop_back() doesn't give that guarantee. + Dense.pop_back(); + return I; + } + + /// erase - Erases an element identified by Key, if it exists. + /// + /// @param Key The key identifying the element to erase. + /// @returns True when an element was erased, false if no element was found. + /// + bool erase(unsigned Key) { + iterator I = find(Key); + if (I == end()) + return false; + erase(I); + return true; + } + +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h index 3507787..097418e 100644 --- a/include/llvm/ADT/StringMap.h +++ b/include/llvm/ADT/StringMap.h @@ -51,20 +51,11 @@ public: /// StringMapImpl - This is the base class of StringMap that is shared among /// all of its instantiations. class StringMapImpl { -public: - /// ItemBucket - The hash table consists of an array of these. If Item is - /// non-null, this is an extant entry, otherwise, it is a hole. - struct ItemBucket { - /// FullHashValue - This remembers the full hash value of the key for - /// easy scanning. - unsigned FullHashValue; - - /// Item - This is a pointer to the actual item object. - StringMapEntryBase *Item; - }; - protected: - ItemBucket *TheTable; + // Array of NumBuckets pointers to entries, null pointers are holes. + // TheTable[NumBuckets] contains a sentinel value for easy iteration. Follwed + // by an array of the actual hash values as unsigned integers. + StringMapEntryBase **TheTable; unsigned NumBuckets; unsigned NumItems; unsigned NumTombstones; @@ -238,8 +229,9 @@ public: template<typename ValueTy, typename AllocatorTy = MallocAllocator> class StringMap : public StringMapImpl { AllocatorTy Allocator; - typedef StringMapEntry<ValueTy> MapEntryTy; public: + typedef StringMapEntry<ValueTy> MapEntryTy; + StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {} explicit StringMap(unsigned InitialSize) : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {} @@ -289,13 +281,13 @@ public: iterator find(StringRef Key) { int Bucket = FindKey(Key); if (Bucket == -1) return end(); - return iterator(TheTable+Bucket); + return iterator(TheTable+Bucket, true); } const_iterator find(StringRef Key) const { int Bucket = FindKey(Key); if (Bucket == -1) return end(); - return const_iterator(TheTable+Bucket); + return const_iterator(TheTable+Bucket, true); } /// lookup - Return the entry for the specified key, or a default @@ -320,13 +312,13 @@ public: /// insert it and return true. bool insert(MapEntryTy *KeyValue) { unsigned BucketNo = LookupBucketFor(KeyValue->getKey()); - ItemBucket &Bucket = TheTable[BucketNo]; - if (Bucket.Item && Bucket.Item != getTombstoneVal()) + StringMapEntryBase *&Bucket = TheTable[BucketNo]; + if (Bucket && Bucket != getTombstoneVal()) return false; // Already exists in map. - if (Bucket.Item == getTombstoneVal()) + if (Bucket == getTombstoneVal()) --NumTombstones; - Bucket.Item = KeyValue; + Bucket = KeyValue; ++NumItems; assert(NumItems + NumTombstones <= NumBuckets); @@ -340,10 +332,11 @@ public: // Zap all values, resetting the keys back to non-present (not tombstone), // which is safe because we're removing all elements. - for (ItemBucket *I = TheTable, *E = TheTable+NumBuckets; I != E; ++I) { - if (I->Item && I->Item != getTombstoneVal()) { - static_cast<MapEntryTy*>(I->Item)->Destroy(Allocator); - I->Item = 0; + for (unsigned I = 0, E = NumBuckets; I != E; ++I) { + StringMapEntryBase *&Bucket = TheTable[I]; + if (Bucket && Bucket != getTombstoneVal()) { + static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator); + Bucket = 0; } } @@ -357,21 +350,21 @@ public: template <typename InitTy> MapEntryTy &GetOrCreateValue(StringRef Key, InitTy Val) { unsigned BucketNo = LookupBucketFor(Key); - ItemBucket &Bucket = TheTable[BucketNo]; - if (Bucket.Item && Bucket.Item != getTombstoneVal()) - return *static_cast<MapEntryTy*>(Bucket.Item); + StringMapEntryBase *&Bucket = TheTable[BucketNo]; + if (Bucket && Bucket != getTombstoneVal()) + return *static_cast<MapEntryTy*>(Bucket); MapEntryTy *NewItem = MapEntryTy::Create(Key.begin(), Key.end(), Allocator, Val); - if (Bucket.Item == getTombstoneVal()) + if (Bucket == getTombstoneVal()) --NumTombstones; ++NumItems; assert(NumItems + NumTombstones <= NumBuckets); // Fill in the bucket for the hash table. The FullHashValue was already // filled in by LookupBucketFor. - Bucket.Item = NewItem; + Bucket = NewItem; RehashTable(); return *NewItem; @@ -410,21 +403,21 @@ public: template<typename ValueTy> class StringMapConstIterator { protected: - StringMapImpl::ItemBucket *Ptr; + StringMapEntryBase **Ptr; public: typedef StringMapEntry<ValueTy> value_type; - explicit StringMapConstIterator(StringMapImpl::ItemBucket *Bucket, + explicit StringMapConstIterator(StringMapEntryBase **Bucket, bool NoAdvance = false) : Ptr(Bucket) { if (!NoAdvance) AdvancePastEmptyBuckets(); } const value_type &operator*() const { - return *static_cast<StringMapEntry<ValueTy>*>(Ptr->Item); + return *static_cast<StringMapEntry<ValueTy>*>(*Ptr); } const value_type *operator->() const { - return static_cast<StringMapEntry<ValueTy>*>(Ptr->Item); + return static_cast<StringMapEntry<ValueTy>*>(*Ptr); } bool operator==(const StringMapConstIterator &RHS) const { @@ -445,7 +438,7 @@ public: private: void AdvancePastEmptyBuckets() { - while (Ptr->Item == 0 || Ptr->Item == StringMapImpl::getTombstoneVal()) + while (*Ptr == 0 || *Ptr == StringMapImpl::getTombstoneVal()) ++Ptr; } }; @@ -453,15 +446,15 @@ private: template<typename ValueTy> class StringMapIterator : public StringMapConstIterator<ValueTy> { public: - explicit StringMapIterator(StringMapImpl::ItemBucket *Bucket, + explicit StringMapIterator(StringMapEntryBase **Bucket, bool NoAdvance = false) : StringMapConstIterator<ValueTy>(Bucket, NoAdvance) { } StringMapEntry<ValueTy> &operator*() const { - return *static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item); + return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr); } StringMapEntry<ValueTy> *operator->() const { - return static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item); + return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr); } }; diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h index 6c33806..a2bb7fe 100644 --- a/include/llvm/ADT/StringRef.h +++ b/include/llvm/ADT/StringRef.h @@ -19,6 +19,7 @@ namespace llvm { template<typename T> class SmallVectorImpl; class APInt; + class hash_code; /// StringRef - Represent a constant reference to a string, i.e. a character /// array and a length, which need not be null terminated. @@ -353,6 +354,20 @@ namespace llvm { Start = min(Start, Length); return StringRef(Data + Start, min(N, Length - Start)); } + + /// drop_front - Return a StringRef equal to 'this' but with the first + /// elements dropped. + StringRef drop_front(unsigned N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return substr(N); + } + + /// drop_back - Return a StringRef equal to 'this' but with the last + /// elements dropped. + StringRef drop_back(unsigned N = 1) const { + assert(size() >= N && "Dropping more elements than exist"); + return substr(0, size()-N); + } /// slice - Return a reference to the substring from [Start, End). /// @@ -476,6 +491,9 @@ namespace llvm { /// @} + /// \brief Compute a hash_code for a StringRef. + hash_code hash_value(StringRef S); + // StringRefs can be treated like a POD type. template <typename T> struct isPodLike; template <> struct isPodLike<StringRef> { static const bool value = true; }; diff --git a/include/llvm/ADT/Trie.h b/include/llvm/ADT/Trie.h index 6b150c8..845af01 100644 --- a/include/llvm/ADT/Trie.h +++ b/include/llvm/ADT/Trie.h @@ -220,8 +220,7 @@ bool Trie<Payload>::addString(const std::string& s, const Payload& data) { assert(0 && "FIXME!"); return false; case Node::DontMatch: - assert(0 && "Impossible!"); - return false; + llvm_unreachable("Impossible!"); case Node::LabelIsPrefix: s1 = s1.substr(nNode->label().length()); cNode = nNode; @@ -258,8 +257,7 @@ const Payload& Trie<Payload>::lookup(const std::string& s) const { case Node::StringIsPrefix: return Empty; case Node::DontMatch: - assert(0 && "Impossible!"); - return Empty; + llvm_unreachable("Impossible!"); case Node::LabelIsPrefix: s1 = s1.substr(nNode->label().length()); cNode = nNode; diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h index 4739fb5..98f0b62 100644 --- a/include/llvm/ADT/Triple.h +++ b/include/llvm/ADT/Triple.h @@ -47,7 +47,7 @@ public: cellspu, // CellSPU: spu, cellspu hexagon, // Hexagon: hexagon mips, // MIPS: mips, mipsallegrex - mipsel, // MIPSEL: mipsel, mipsallegrexel, psp + mipsel, // MIPSEL: mipsel, mipsallegrexel mips64, // MIPS64: mips64 mips64el,// MIPS64EL: mips64el msp430, // MSP430: msp430 @@ -64,9 +64,7 @@ public: ptx32, // PTX: ptx (32-bit) ptx64, // PTX: ptx (64-bit) le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten) - amdil, // amdil: amd IL - - InvalidArch + amdil // amdil: amd IL }; enum VendorType { UnknownVendor, @@ -91,7 +89,6 @@ public: MinGW32, // i*86-pc-mingw32, *-w64-mingw32 NetBSD, OpenBSD, - Psp, Solaris, Win32, Haiku, @@ -104,48 +101,39 @@ public: GNU, GNUEABI, + GNUEABIHF, EABI, - MachO + MachO, + ANDROIDEABI }; private: std::string Data; - /// The parsed arch type (or InvalidArch if uninitialized). - mutable ArchType Arch; + /// The parsed arch type. + ArchType Arch; /// The parsed vendor type. - mutable VendorType Vendor; + VendorType Vendor; /// The parsed OS type. - mutable OSType OS; + OSType OS; /// The parsed Environment type. - mutable EnvironmentType Environment; - - bool isInitialized() const { return Arch != InvalidArch; } - static ArchType ParseArch(StringRef ArchName); - static VendorType ParseVendor(StringRef VendorName); - static OSType ParseOS(StringRef OSName); - static EnvironmentType ParseEnvironment(StringRef EnvironmentName); - void Parse() const; + EnvironmentType Environment; public: /// @name Constructors /// @{ - Triple() : Data(), Arch(InvalidArch) {} - explicit Triple(const Twine &Str) : Data(Str.str()), Arch(InvalidArch) {} - Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr) - : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr).str()), - Arch(InvalidArch) { - } + /// \brief Default constructor is the same as an empty string and leaves all + /// triple fields unknown. + Triple() : Data(), Arch(), Vendor(), OS(), Environment() {} + explicit Triple(const Twine &Str); + Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr); Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr, - const Twine &EnvironmentStr) - : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr + Twine('-') + - EnvironmentStr).str()), Arch(InvalidArch) { - } + const Twine &EnvironmentStr); /// @} /// @name Normalization @@ -162,22 +150,13 @@ public: /// @{ /// getArch - Get the parsed architecture type of this triple. - ArchType getArch() const { - if (!isInitialized()) Parse(); - return Arch; - } + ArchType getArch() const { return Arch; } /// getVendor - Get the parsed vendor type of this triple. - VendorType getVendor() const { - if (!isInitialized()) Parse(); - return Vendor; - } + VendorType getVendor() const { return Vendor; } /// getOS - Get the parsed operating system type of this triple. - OSType getOS() const { - if (!isInitialized()) Parse(); - return OS; - } + OSType getOS() const { return OS; } /// hasEnvironment - Does this triple have the optional environment /// (fourth) component? @@ -186,11 +165,31 @@ public: } /// getEnvironment - Get the parsed environment type of this triple. - EnvironmentType getEnvironment() const { - if (!isInitialized()) Parse(); - return Environment; + EnvironmentType getEnvironment() const { return Environment; } + + /// getOSVersion - Parse the version number from the OS name component of the + /// triple, if present. + /// + /// For example, "fooos1.2.3" would return (1, 2, 3). + /// + /// If an entry is not defined, it will be returned as 0. + void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; + + /// getOSMajorVersion - Return just the major version number, this is + /// specialized because it is a common query. + unsigned getOSMajorVersion() const { + unsigned Maj, Min, Micro; + getOSVersion(Maj, Min, Micro); + return Maj; } + /// getMacOSXVersion - Parse the version number as with getOSVersion and then + /// translate generic "darwin" versions to the corresponding OS X versions. + /// This may also be called with IOS triples but the OS X version number is + /// just set to a constant 10.4.0 in that case. Returns true if successful. + bool getMacOSXVersion(unsigned &Major, unsigned &Minor, + unsigned &Micro) const; + /// @} /// @name Direct Component Access /// @{ @@ -219,21 +218,28 @@ public: /// if the environment component is present). StringRef getOSAndEnvironmentName() const; - /// getOSVersion - Parse the version number from the OS name component of the - /// triple, if present. + /// @} + /// @name Convenience Predicates + /// @{ + + /// \brief Test whether the architecture is 64-bit /// - /// For example, "fooos1.2.3" would return (1, 2, 3). + /// Note that this tests for 64-bit pointer width, and nothing else. Note + /// that we intentionally expose only three predicates, 64-bit, 32-bit, and + /// 16-bit. The inner details of pointer width for particular architectures + /// is not summed up in the triple, and so only a coarse grained predicate + /// system is provided. + bool isArch64Bit() const; + + /// \brief Test whether the architecture is 32-bit /// - /// If an entry is not defined, it will be returned as 0. - void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; + /// Note that this tests for 32-bit pointer width, and nothing else. + bool isArch32Bit() const; - /// getOSMajorVersion - Return just the major version number, this is - /// specialized because it is a common query. - unsigned getOSMajorVersion() const { - unsigned Maj, Min, Micro; - getOSVersion(Maj, Min, Micro); - return Maj; - } + /// \brief Test whether the architecture is 16-bit + /// + /// Note that this tests for 16-bit pointer width, and nothing else. + bool isArch16Bit() const; /// isOSVersionLT - Helper function for doing comparisons against version /// numbers included in the target triple. @@ -252,6 +258,22 @@ public: return false; } + /// isMacOSXVersionLT - Comparison function for checking OS X version + /// compatibility, which handles supporting skewed version numbering schemes + /// used by the "darwin" triples. + unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0, + unsigned Micro = 0) const { + assert(isMacOSX() && "Not an OS X triple!"); + + // If this is OS X, expect a sane version number. + if (getOS() == Triple::MacOSX) + return isOSVersionLT(Major, Minor, Micro); + + // Otherwise, compare to the "Darwin" number. + assert(Major == 10 && "Unexpected major version"); + return isOSVersionLT(Minor + 4, Micro, 0); + } + /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both /// "darwin" and "osx" as OS X triples. bool isMacOSX() const { @@ -263,26 +285,30 @@ public: return isMacOSX() || getOS() == Triple::IOS; } + /// \brief Tests for either Cygwin or MinGW OS + bool isOSCygMing() const { + return getOS() == Triple::Cygwin || getOS() == Triple::MinGW32; + } + /// isOSWindows - Is this a "Windows" OS. bool isOSWindows() const { - return getOS() == Triple::Win32 || getOS() == Triple::Cygwin || - getOS() == Triple::MinGW32; + return getOS() == Triple::Win32 || isOSCygMing(); } - /// isMacOSXVersionLT - Comparison function for checking OS X version - /// compatibility, which handles supporting skewed version numbering schemes - /// used by the "darwin" triples. - unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0, - unsigned Micro = 0) const { - assert(isMacOSX() && "Not an OS X triple!"); + /// \brief Tests whether the OS uses the ELF binary format. + bool isOSBinFormatELF() const { + return !isOSDarwin() && !isOSWindows(); + } - // If this is OS X, expect a sane version number. - if (getOS() == Triple::MacOSX) - return isOSVersionLT(Major, Minor, Micro); + /// \brief Tests whether the OS uses the COFF binary format. + bool isOSBinFormatCOFF() const { + return isOSWindows(); + } - // Otherwise, compare to the "Darwin" number. - assert(Major == 10 && "Unexpected major version"); - return isOSVersionLT(Minor + 4, Micro, 0); + /// \brief Tests whether the environment is MachO. + // FIXME: Should this be an OSBinFormat predicate? + bool isEnvironmentMachO() const { + return getEnvironment() == Triple::MachO || isOSDarwin(); } /// @} @@ -333,6 +359,26 @@ public: const char *getArchNameForAssembler(); /// @} + /// @name Helpers to build variants of a particular triple. + /// @{ + + /// \brief Form a triple with a 32-bit variant of the current architecture. + /// + /// This can be used to move across "families" of architectures where useful. + /// + /// \returns A new triple with a 32-bit architecture or an unknown + /// architecture if no such variant can be found. + llvm::Triple get32BitArchVariant() const; + + /// \brief Form a triple with a 64-bit variant of the current architecture. + /// + /// This can be used to move across "families" of architectures where useful. + /// + /// \returns A new triple with a 64-bit architecture or an unknown + /// architecture if no such variant can be found. + llvm::Triple get64BitArchVariant() const; + + /// @} /// @name Static helpers for IDs. /// @{ diff --git a/include/llvm/ADT/Twine.h b/include/llvm/ADT/Twine.h index 3a60cab..9101df8 100644 --- a/include/llvm/ADT/Twine.h +++ b/include/llvm/ADT/Twine.h @@ -12,6 +12,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include <cassert> #include <string> @@ -425,7 +426,7 @@ namespace llvm { StringRef getSingleStringRef() const { assert(isSingleStringRef() &&"This cannot be had as a single stringref!"); switch (getLHSKind()) { - default: assert(0 && "Out of sync with isSingleStringRef"); + default: llvm_unreachable("Out of sync with isSingleStringRef"); case EmptyKind: return StringRef(); case CStringKind: return StringRef(LHS.cString); case StdStringKind: return StringRef(*LHS.stdString); diff --git a/include/llvm/ADT/VariadicFunction.h b/include/llvm/ADT/VariadicFunction.h new file mode 100644 index 0000000..a9a0dc6 --- /dev/null +++ b/include/llvm/ADT/VariadicFunction.h @@ -0,0 +1,331 @@ +//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements compile-time type-safe variadic functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_VARIADIC_FUNCTION_H +#define LLVM_ADT_VARIADIC_FUNCTION_H + +#include "llvm/ADT/ArrayRef.h" + +namespace llvm { + +// Define macros to aid in expanding a comma separated series with the index of +// the series pasted onto the last token. +#define LLVM_COMMA_JOIN1(x) x ## 0 +#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1 +#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2 +#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3 +#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4 +#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5 +#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6 +#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7 +#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8 +#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9 +#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10 +#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11 +#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12 +#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13 +#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14 +#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15 +#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16 +#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17 +#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18 +#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19 +#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20 +#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21 +#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22 +#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23 +#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24 +#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25 +#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26 +#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27 +#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28 +#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29 +#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30 +#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31 + +/// \brief Class which can simulate a type-safe variadic function. +/// +/// The VariadicFunction class template makes it easy to define +/// type-safe variadic functions where all arguments have the same +/// type. +/// +/// Suppose we need a variadic function like this: +/// +/// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N); +/// +/// Instead of many overloads of Foo(), we only need to define a helper +/// function that takes an array of arguments: +/// +/// ResultT FooImpl(ArrayRef<const ArgT *> Args) { +/// // 'Args[i]' is a pointer to the i-th argument passed to Foo(). +/// ... +/// } +/// +/// and then define Foo() like this: +/// +/// const VariadicFunction<ResultT, ArgT, FooImpl> Foo; +/// +/// VariadicFunction takes care of defining the overloads of Foo(). +/// +/// Actually, Foo is a function object (i.e. functor) instead of a plain +/// function. This object is stateless and its constructor/destructor +/// does nothing, so it's safe to create global objects and call Foo(...) at +/// any time. +/// +/// Sometimes we need a variadic function to have some fixed leading +/// arguments whose types may be different from that of the optional +/// arguments. For example: +/// +/// bool FullMatch(const StringRef &S, const RE &Regex, +/// const ArgT &A_0, ..., const ArgT &A_N); +/// +/// VariadicFunctionN is for such cases, where N is the number of fixed +/// arguments. It is like VariadicFunction, except that it takes N more +/// template arguments for the types of the fixed arguments: +/// +/// bool FullMatchImpl(const StringRef &S, const RE &Regex, +/// ArrayRef<const ArgT *> Args) { ... } +/// const VariadicFunction2<bool, const StringRef&, +/// const RE&, ArgT, FullMatchImpl> +/// FullMatch; +/// +/// Currently VariadicFunction and friends support up-to 3 +/// fixed leading arguments and up-to 32 optional arguments. +template <typename ResultT, typename ArgT, + ResultT (*Func)(ArrayRef<const ArgT *>)> +struct VariadicFunction { + ResultT operator()() const { + return Func(ArrayRef<const ArgT *>()); + } + +#define LLVM_DEFINE_OVERLOAD(N) \ + ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ + const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ + return Func(makeArrayRef(Args)); \ + } + LLVM_DEFINE_OVERLOAD(1) + LLVM_DEFINE_OVERLOAD(2) + LLVM_DEFINE_OVERLOAD(3) + LLVM_DEFINE_OVERLOAD(4) + LLVM_DEFINE_OVERLOAD(5) + LLVM_DEFINE_OVERLOAD(6) + LLVM_DEFINE_OVERLOAD(7) + LLVM_DEFINE_OVERLOAD(8) + LLVM_DEFINE_OVERLOAD(9) + LLVM_DEFINE_OVERLOAD(10) + LLVM_DEFINE_OVERLOAD(11) + LLVM_DEFINE_OVERLOAD(12) + LLVM_DEFINE_OVERLOAD(13) + LLVM_DEFINE_OVERLOAD(14) + LLVM_DEFINE_OVERLOAD(15) + LLVM_DEFINE_OVERLOAD(16) + LLVM_DEFINE_OVERLOAD(17) + LLVM_DEFINE_OVERLOAD(18) + LLVM_DEFINE_OVERLOAD(19) + LLVM_DEFINE_OVERLOAD(20) + LLVM_DEFINE_OVERLOAD(21) + LLVM_DEFINE_OVERLOAD(22) + LLVM_DEFINE_OVERLOAD(23) + LLVM_DEFINE_OVERLOAD(24) + LLVM_DEFINE_OVERLOAD(25) + LLVM_DEFINE_OVERLOAD(26) + LLVM_DEFINE_OVERLOAD(27) + LLVM_DEFINE_OVERLOAD(28) + LLVM_DEFINE_OVERLOAD(29) + LLVM_DEFINE_OVERLOAD(30) + LLVM_DEFINE_OVERLOAD(31) + LLVM_DEFINE_OVERLOAD(32) +#undef LLVM_DEFINE_OVERLOAD +}; + +template <typename ResultT, typename Param0T, typename ArgT, + ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)> +struct VariadicFunction1 { + ResultT operator()(Param0T P0) const { + return Func(P0, ArrayRef<const ArgT *>()); + } + +#define LLVM_DEFINE_OVERLOAD(N) \ + ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ + const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ + return Func(P0, makeArrayRef(Args)); \ + } + LLVM_DEFINE_OVERLOAD(1) + LLVM_DEFINE_OVERLOAD(2) + LLVM_DEFINE_OVERLOAD(3) + LLVM_DEFINE_OVERLOAD(4) + LLVM_DEFINE_OVERLOAD(5) + LLVM_DEFINE_OVERLOAD(6) + LLVM_DEFINE_OVERLOAD(7) + LLVM_DEFINE_OVERLOAD(8) + LLVM_DEFINE_OVERLOAD(9) + LLVM_DEFINE_OVERLOAD(10) + LLVM_DEFINE_OVERLOAD(11) + LLVM_DEFINE_OVERLOAD(12) + LLVM_DEFINE_OVERLOAD(13) + LLVM_DEFINE_OVERLOAD(14) + LLVM_DEFINE_OVERLOAD(15) + LLVM_DEFINE_OVERLOAD(16) + LLVM_DEFINE_OVERLOAD(17) + LLVM_DEFINE_OVERLOAD(18) + LLVM_DEFINE_OVERLOAD(19) + LLVM_DEFINE_OVERLOAD(20) + LLVM_DEFINE_OVERLOAD(21) + LLVM_DEFINE_OVERLOAD(22) + LLVM_DEFINE_OVERLOAD(23) + LLVM_DEFINE_OVERLOAD(24) + LLVM_DEFINE_OVERLOAD(25) + LLVM_DEFINE_OVERLOAD(26) + LLVM_DEFINE_OVERLOAD(27) + LLVM_DEFINE_OVERLOAD(28) + LLVM_DEFINE_OVERLOAD(29) + LLVM_DEFINE_OVERLOAD(30) + LLVM_DEFINE_OVERLOAD(31) + LLVM_DEFINE_OVERLOAD(32) +#undef LLVM_DEFINE_OVERLOAD +}; + +template <typename ResultT, typename Param0T, typename Param1T, typename ArgT, + ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)> +struct VariadicFunction2 { + ResultT operator()(Param0T P0, Param1T P1) const { + return Func(P0, P1, ArrayRef<const ArgT *>()); + } + +#define LLVM_DEFINE_OVERLOAD(N) \ + ResultT operator()(Param0T P0, Param1T P1, \ + LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ + const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ + return Func(P0, P1, makeAraryRef(Args)); \ + } + LLVM_DEFINE_OVERLOAD(1) + LLVM_DEFINE_OVERLOAD(2) + LLVM_DEFINE_OVERLOAD(3) + LLVM_DEFINE_OVERLOAD(4) + LLVM_DEFINE_OVERLOAD(5) + LLVM_DEFINE_OVERLOAD(6) + LLVM_DEFINE_OVERLOAD(7) + LLVM_DEFINE_OVERLOAD(8) + LLVM_DEFINE_OVERLOAD(9) + LLVM_DEFINE_OVERLOAD(10) + LLVM_DEFINE_OVERLOAD(11) + LLVM_DEFINE_OVERLOAD(12) + LLVM_DEFINE_OVERLOAD(13) + LLVM_DEFINE_OVERLOAD(14) + LLVM_DEFINE_OVERLOAD(15) + LLVM_DEFINE_OVERLOAD(16) + LLVM_DEFINE_OVERLOAD(17) + LLVM_DEFINE_OVERLOAD(18) + LLVM_DEFINE_OVERLOAD(19) + LLVM_DEFINE_OVERLOAD(20) + LLVM_DEFINE_OVERLOAD(21) + LLVM_DEFINE_OVERLOAD(22) + LLVM_DEFINE_OVERLOAD(23) + LLVM_DEFINE_OVERLOAD(24) + LLVM_DEFINE_OVERLOAD(25) + LLVM_DEFINE_OVERLOAD(26) + LLVM_DEFINE_OVERLOAD(27) + LLVM_DEFINE_OVERLOAD(28) + LLVM_DEFINE_OVERLOAD(29) + LLVM_DEFINE_OVERLOAD(30) + LLVM_DEFINE_OVERLOAD(31) + LLVM_DEFINE_OVERLOAD(32) +#undef LLVM_DEFINE_OVERLOAD +}; + +template <typename ResultT, typename Param0T, typename Param1T, + typename Param2T, typename ArgT, + ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)> +struct VariadicFunction3 { + ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const { + return Func(P0, P1, P2, ArrayRef<const ArgT *>()); + } + +#define LLVM_DEFINE_OVERLOAD(N) \ + ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \ + LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ + const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ + return Func(P0, P1, P2, makeArrayRef(Args)); \ + } + LLVM_DEFINE_OVERLOAD(1) + LLVM_DEFINE_OVERLOAD(2) + LLVM_DEFINE_OVERLOAD(3) + LLVM_DEFINE_OVERLOAD(4) + LLVM_DEFINE_OVERLOAD(5) + LLVM_DEFINE_OVERLOAD(6) + LLVM_DEFINE_OVERLOAD(7) + LLVM_DEFINE_OVERLOAD(8) + LLVM_DEFINE_OVERLOAD(9) + LLVM_DEFINE_OVERLOAD(10) + LLVM_DEFINE_OVERLOAD(11) + LLVM_DEFINE_OVERLOAD(12) + LLVM_DEFINE_OVERLOAD(13) + LLVM_DEFINE_OVERLOAD(14) + LLVM_DEFINE_OVERLOAD(15) + LLVM_DEFINE_OVERLOAD(16) + LLVM_DEFINE_OVERLOAD(17) + LLVM_DEFINE_OVERLOAD(18) + LLVM_DEFINE_OVERLOAD(19) + LLVM_DEFINE_OVERLOAD(20) + LLVM_DEFINE_OVERLOAD(21) + LLVM_DEFINE_OVERLOAD(22) + LLVM_DEFINE_OVERLOAD(23) + LLVM_DEFINE_OVERLOAD(24) + LLVM_DEFINE_OVERLOAD(25) + LLVM_DEFINE_OVERLOAD(26) + LLVM_DEFINE_OVERLOAD(27) + LLVM_DEFINE_OVERLOAD(28) + LLVM_DEFINE_OVERLOAD(29) + LLVM_DEFINE_OVERLOAD(30) + LLVM_DEFINE_OVERLOAD(31) + LLVM_DEFINE_OVERLOAD(32) +#undef LLVM_DEFINE_OVERLOAD +}; + +// Cleanup the macro namespace. +#undef LLVM_COMMA_JOIN1 +#undef LLVM_COMMA_JOIN2 +#undef LLVM_COMMA_JOIN3 +#undef LLVM_COMMA_JOIN4 +#undef LLVM_COMMA_JOIN5 +#undef LLVM_COMMA_JOIN6 +#undef LLVM_COMMA_JOIN7 +#undef LLVM_COMMA_JOIN8 +#undef LLVM_COMMA_JOIN9 +#undef LLVM_COMMA_JOIN10 +#undef LLVM_COMMA_JOIN11 +#undef LLVM_COMMA_JOIN12 +#undef LLVM_COMMA_JOIN13 +#undef LLVM_COMMA_JOIN14 +#undef LLVM_COMMA_JOIN15 +#undef LLVM_COMMA_JOIN16 +#undef LLVM_COMMA_JOIN17 +#undef LLVM_COMMA_JOIN18 +#undef LLVM_COMMA_JOIN19 +#undef LLVM_COMMA_JOIN20 +#undef LLVM_COMMA_JOIN21 +#undef LLVM_COMMA_JOIN22 +#undef LLVM_COMMA_JOIN23 +#undef LLVM_COMMA_JOIN24 +#undef LLVM_COMMA_JOIN25 +#undef LLVM_COMMA_JOIN26 +#undef LLVM_COMMA_JOIN27 +#undef LLVM_COMMA_JOIN28 +#undef LLVM_COMMA_JOIN29 +#undef LLVM_COMMA_JOIN30 +#undef LLVM_COMMA_JOIN31 +#undef LLVM_COMMA_JOIN32 + +} // end namespace llvm + +#endif // LLVM_ADT_VARIADIC_FUNCTION_H diff --git a/include/llvm/ADT/VectorExtras.h b/include/llvm/ADT/VectorExtras.h deleted file mode 100644 index e05f585..0000000 --- a/include/llvm/ADT/VectorExtras.h +++ /dev/null @@ -1,41 +0,0 @@ -//===-- llvm/ADT/VectorExtras.h - Helpers for std::vector -------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains helper functions which are useful for working with the -// std::vector class. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_ADT_VECTOREXTRAS_H -#define LLVM_ADT_VECTOREXTRAS_H - -#include <cstdarg> -#include <vector> - -namespace llvm { - -/// make_vector - Helper function which is useful for building temporary vectors -/// to pass into type construction of CallInst ctors. This turns a null -/// terminated list of pointers (or other value types) into a real live vector. -/// -template<typename T> -inline std::vector<T> make_vector(T A, ...) { - va_list Args; - va_start(Args, A); - std::vector<T> Result; - Result.push_back(A); - while (T Val = va_arg(Args, T)) - Result.push_back(Val); - va_end(Args); - return Result; -} - -} // End llvm namespace - -#endif diff --git a/include/llvm/ADT/edit_distance.h b/include/llvm/ADT/edit_distance.h new file mode 100644 index 0000000..f77ef13 --- /dev/null +++ b/include/llvm/ADT/edit_distance.h @@ -0,0 +1,102 @@ +//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a Levenshtein distance function that works for any two +// sequences, with each element of each sequence being analogous to a character +// in a string. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_EDIT_DISTANCE_H +#define LLVM_ADT_EDIT_DISTANCE_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/OwningPtr.h" +#include <algorithm> + +namespace llvm { + +/// \brief Determine the edit distance between two sequences. +/// +/// \param FromArray the first sequence to compare. +/// +/// \param ToArray the second sequence to compare. +/// +/// \param AllowReplacements whether to allow element replacements (change one +/// element into another) as a single operation, rather than as two operations +/// (an insertion and a removal). +/// +/// \param MaxEditDistance If non-zero, the maximum edit distance that this +/// routine is allowed to compute. If the edit distance will exceed that +/// maximum, returns \c MaxEditDistance+1. +/// +/// \returns the minimum number of element insertions, removals, or (if +/// \p AllowReplacements is \c true) replacements needed to transform one of +/// the given sequences into the other. If zero, the sequences are identical. +template<typename T> +unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray, + bool AllowReplacements = true, + unsigned MaxEditDistance = 0) { + // The algorithm implemented below is the "classic" + // dynamic-programming algorithm for computing the Levenshtein + // distance, which is described here: + // + // http://en.wikipedia.org/wiki/Levenshtein_distance + // + // Although the algorithm is typically described using an m x n + // array, only two rows are used at a time, so this implemenation + // just keeps two separate vectors for those two rows. + typename ArrayRef<T>::size_type m = FromArray.size(); + typename ArrayRef<T>::size_type n = ToArray.size(); + + const unsigned SmallBufferSize = 64; + unsigned SmallBuffer[SmallBufferSize]; + llvm::OwningArrayPtr<unsigned> Allocated; + unsigned *Previous = SmallBuffer; + if (2*(n + 1) > SmallBufferSize) { + Previous = new unsigned [2*(n+1)]; + Allocated.reset(Previous); + } + unsigned *Current = Previous + (n + 1); + + for (unsigned i = 0; i <= n; ++i) + Previous[i] = i; + + for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) { + Current[0] = y; + unsigned BestThisRow = Current[0]; + + for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) { + if (AllowReplacements) { + Current[x] = std::min( + Previous[x-1] + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u), + std::min(Current[x-1], Previous[x])+1); + } + else { + if (FromArray[y-1] == ToArray[x-1]) Current[x] = Previous[x-1]; + else Current[x] = std::min(Current[x-1], Previous[x]) + 1; + } + BestThisRow = std::min(BestThisRow, Current[x]); + } + + if (MaxEditDistance && BestThisRow > MaxEditDistance) + return MaxEditDistance + 1; + + unsigned *tmp = Current; + Current = Previous; + Previous = tmp; + } + + unsigned Result = Previous[n]; + return Result; +} + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h index 7c9bdd9..b823f71 100644 --- a/include/llvm/Analysis/AliasAnalysis.h +++ b/include/llvm/Analysis/AliasAnalysis.h @@ -568,6 +568,11 @@ bool isNoAliasCall(const Value *V); /// bool isIdentifiedObject(const Value *V); +/// isKnownNonNull - Return true if this pointer couldn't possibly be null by +/// its definition. This returns true for allocas, non-extern-weak globals and +/// byval arguments. +bool isKnownNonNull(const Value *V); + } // End llvm namespace #endif diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h index c4ebe40..95626d6 100644 --- a/include/llvm/Analysis/AliasSetTracker.h +++ b/include/llvm/Analysis/AliasSetTracker.h @@ -264,6 +264,7 @@ private: } void setVolatile() { Volatile = true; } +public: /// aliasesPointer - Return true if the specified pointer "may" (or must) /// alias one of the members in the set. /// diff --git a/include/llvm/Analysis/BlockFrequencyImpl.h b/include/llvm/Analysis/BlockFrequencyImpl.h index a33cb1f..6f2ccfb 100644 --- a/include/llvm/Analysis/BlockFrequencyImpl.h +++ b/include/llvm/Analysis/BlockFrequencyImpl.h @@ -40,7 +40,7 @@ class MachineBlockFrequencyInfo; template<class BlockT, class FunctionT, class BlockProbInfoT> class BlockFrequencyImpl { - DenseMap<BlockT *, BlockFrequency> Freqs; + DenseMap<const BlockT *, BlockFrequency> Freqs; BlockProbInfoT *BPI; @@ -308,8 +308,9 @@ class BlockFrequencyImpl { public: /// getBlockFreq - Return block frequency. Return 0 if we don't have it. - BlockFrequency getBlockFreq(BlockT *BB) const { - typename DenseMap<BlockT *, BlockFrequency>::const_iterator I = Freqs.find(BB); + BlockFrequency getBlockFreq(const BlockT *BB) const { + typename DenseMap<const BlockT *, BlockFrequency>::const_iterator + I = Freqs.find(BB); if (I != Freqs.end()) return I->second; return 0; diff --git a/include/llvm/Analysis/BlockFrequencyInfo.h b/include/llvm/Analysis/BlockFrequencyInfo.h index 9e698a9..fcab906 100644 --- a/include/llvm/Analysis/BlockFrequencyInfo.h +++ b/include/llvm/Analysis/BlockFrequencyInfo.h @@ -47,7 +47,7 @@ public: /// that we should not rely on the value itself, but only on the comparison to /// the other block frequencies. We do this to avoid using of floating points. /// - BlockFrequency getBlockFreq(BasicBlock *BB) const; + BlockFrequency getBlockFreq(const BasicBlock *BB) const; }; } diff --git a/include/llvm/Analysis/CFGPrinter.h b/include/llvm/Analysis/CFGPrinter.h index 8f7ebcb..2cde838 100644 --- a/include/llvm/Analysis/CFGPrinter.h +++ b/include/llvm/Analysis/CFGPrinter.h @@ -95,7 +95,8 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits { std::string Str; raw_string_ostream OS(Str); - OS << SI->getCaseValue(SuccNo)->getValue(); + unsigned Case = SI->resolveCaseIndex(SuccNo); + OS << SI->getCaseValue(Case)->getValue(); return OS.str(); } return ""; diff --git a/include/llvm/Analysis/CaptureTracking.h b/include/llvm/Analysis/CaptureTracking.h index 01eca60..9b5e842 100644 --- a/include/llvm/Analysis/CaptureTracking.h +++ b/include/llvm/Analysis/CaptureTracking.h @@ -17,8 +17,6 @@ #include "llvm/Constants.h" #include "llvm/Instructions.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/ADT/SmallSet.h" -#include "llvm/ADT/SmallVector.h" #include "llvm/Support/CallSite.h" namespace llvm { @@ -50,10 +48,10 @@ namespace llvm { /// U->getUser() is always an Instruction. virtual bool shouldExplore(Use *U) = 0; - /// captured - The instruction I captured the pointer. Return true to - /// stop the traversal or false to continue looking for more capturing - /// instructions. - virtual bool captured(Instruction *I) = 0; + /// captured - Information about the pointer was captured by the user of + /// use U. Return true to stop the traversal or false to continue looking + /// for more capturing instructions. + virtual bool captured(Use *U) = 0; }; /// PointerMayBeCaptured - Visit the value and the values derived from it and diff --git a/include/llvm/Analysis/CodeMetrics.h b/include/llvm/Analysis/CodeMetrics.h index 5f78021..6d34781 100644 --- a/include/llvm/Analysis/CodeMetrics.h +++ b/include/llvm/Analysis/CodeMetrics.h @@ -31,8 +31,9 @@ namespace llvm { /// caller. // bool NeverInline; - // True if this function contains a call to setjmp or _setjmp - bool callsSetJmp; + // True if this function contains a call to setjmp or other functions + // with attribute "returns twice" without having the attribute itself. + bool exposesReturnsTwice; // True if this function calls itself bool isRecursive; @@ -66,7 +67,7 @@ namespace llvm { /// NumRets - Keep track of how many Ret instructions the block contains. unsigned NumRets; - CodeMetrics() : callsSetJmp(false), isRecursive(false), + CodeMetrics() : exposesReturnsTwice(false), isRecursive(false), containsIndirectBr(false), usesDynamicAlloca(false), NumInsts(0), NumBlocks(0), NumCalls(0), NumInlineCandidates(0), NumVectorInsts(0), diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h index 67bc2b3..2fdef5f 100644 --- a/include/llvm/Analysis/ConstantFolding.h +++ b/include/llvm/Analysis/ConstantFolding.h @@ -81,7 +81,14 @@ Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0); /// getelementptr constantexpr, return the constant value being addressed by the /// constant expression, or null if something is funny and we can't decide. Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE); - + +/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr +/// indices (with an *implied* zero pointer index that is not in the list), +/// return the constant value being addressed by a virtual load, or null if +/// something is funny and we can't decide. +Constant *ConstantFoldLoadThroughGEPIndices(Constant *C, + ArrayRef<Constant*> Indices); + /// canConstantFoldCallTo - Return true if its even possible to fold a call to /// the specified function. bool canConstantFoldCallTo(const Function *F); diff --git a/include/llvm/Analysis/DIBuilder.h b/include/llvm/Analysis/DIBuilder.h index ee24226..5190f0a 100644 --- a/include/llvm/Analysis/DIBuilder.h +++ b/include/llvm/Analysis/DIBuilder.h @@ -42,6 +42,7 @@ namespace llvm { class DISubprogram; class DITemplateTypeParameter; class DITemplateValueParameter; + class DIObjCProperty; class DIBuilder { private: @@ -190,6 +191,33 @@ namespace llvm { StringRef PropertySetterName = StringRef(), unsigned PropertyAttributes = 0); + /// createObjCIVar - Create debugging information entry for Objective-C + /// instance variable. + /// @param Name Member name. + /// @param File File where this member is defined. + /// @param LineNo Line number. + /// @param SizeInBits Member size. + /// @param AlignInBits Member alignment. + /// @param OffsetInBits Member offset. + /// @param Flags Flags to encode member attribute, e.g. private + /// @param Ty Parent type. + /// @param Property Property associated with this ivar. + DIType createObjCIVar(StringRef Name, DIFile File, + unsigned LineNo, uint64_t SizeInBits, + uint64_t AlignInBits, uint64_t OffsetInBits, + unsigned Flags, DIType Ty, + MDNode *PropertyNode); + + /// createObjCProperty - Create debugging information entry for Objective-C + /// property. + /// @param Name Property name. + /// @param GetterName Name of the Objective C property getter selector. + /// @param SetterName Name of the Objective C property setter selector. + /// @param PropertyAttributes Objective C property attributes. + DIObjCProperty createObjCProperty(StringRef Name, StringRef GetterName, + StringRef SetterName, + unsigned PropertyAttributes); + /// createClassType - Create debugging information entry for a class. /// @param Scope Scope in which this class is defined. /// @param Name class name. @@ -313,6 +341,10 @@ namespace llvm { DIType createTemporaryType(); DIType createTemporaryType(DIFile F); + /// createForwardDecl - Create a temporary forward-declared type. + DIType createForwardDecl(unsigned Tag, StringRef Name, DIFile F, + unsigned Line, unsigned RuntimeLang = 0); + /// retainType - Retain DIType in a module even if it is not referenced /// through debug info anchors. void retainType(DIType T); @@ -470,7 +502,7 @@ namespace llvm { /// @param Scope Lexical block. /// @param File Source file. DILexicalBlockFile createLexicalBlockFile(DIDescriptor Scope, - DIFile File); + DIFile File); /// createLexicalBlock - This creates a descriptor for a lexical block /// with the specified parent context. diff --git a/include/llvm/Analysis/DebugInfo.h b/include/llvm/Analysis/DebugInfo.h index c4489cf..1bbe8df 100644 --- a/include/llvm/Analysis/DebugInfo.h +++ b/include/llvm/Analysis/DebugInfo.h @@ -43,6 +43,7 @@ namespace llvm { class DILexicalBlockFile; class DIVariable; class DIType; + class DIObjCProperty; /// DIDescriptor - A thin wraper around MDNode to access encoded debug info. /// This should not be stored in a container, because underly MDNode may @@ -128,6 +129,7 @@ namespace llvm { bool isUnspecifiedParameter() const; bool isTemplateTypeParameter() const; bool isTemplateValueParameter() const; + bool isObjCProperty() const; }; /// DISubrange - This is used to represent ranges, for array bounds. @@ -153,6 +155,7 @@ namespace llvm { /// DIScope - A base class for various scopes. class DIScope : public DIDescriptor { + virtual void anchor(); public: explicit DIScope(const MDNode *N = 0) : DIDescriptor (N) {} virtual ~DIScope() {} @@ -163,6 +166,7 @@ namespace llvm { /// DICompileUnit - A wrapper for a compile unit. class DICompileUnit : public DIScope { + virtual void anchor(); public: explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {} @@ -202,6 +206,7 @@ namespace llvm { /// DIFile - This is a wrapper for a file. class DIFile : public DIScope { + virtual void anchor(); public: explicit DIFile(const MDNode *N = 0) : DIScope(N) { if (DbgNode && !isFile()) @@ -230,7 +235,7 @@ namespace llvm { /// FIXME: Types should be factored much better so that CV qualifiers and /// others do not require a huge and empty descriptor full of zeros. class DIType : public DIScope { - public: + virtual void anchor(); protected: // This ctor is used when the Tag has already been validated by a derived // ctor. @@ -240,7 +245,6 @@ namespace llvm { /// Verify - Verify that a type descriptor is well formed. bool Verify() const; - public: explicit DIType(const MDNode *N); explicit DIType() {} virtual ~DIType() {} @@ -320,6 +324,7 @@ namespace llvm { /// DIBasicType - A basic type, like 'int' or 'float'. class DIBasicType : public DIType { + virtual void anchor(); public: explicit DIBasicType(const MDNode *N = 0) : DIType(N) {} @@ -338,6 +343,7 @@ namespace llvm { /// DIDerivedType - A simple derived type, like a const qualified type, /// a typedef, a pointer or reference, etc. class DIDerivedType : public DIType { + virtual void anchor(); protected: explicit DIDerivedType(const MDNode *N, bool, bool) : DIType(N, true, true) {} @@ -351,29 +357,45 @@ namespace llvm { /// return base type size. uint64_t getOriginalTypeSize() const; - StringRef getObjCPropertyName() const { return getStringField(10); } + /// getObjCProperty - Return property node, if this ivar is + /// associated with one. + MDNode *getObjCProperty() const; + + StringRef getObjCPropertyName() const { + if (getVersion() > LLVMDebugVersion11) + return StringRef(); + return getStringField(10); + } StringRef getObjCPropertyGetterName() const { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return getStringField(11); } StringRef getObjCPropertySetterName() const { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return getStringField(12); } bool isReadOnlyObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_readonly) != 0; } bool isReadWriteObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_readwrite) != 0; } bool isAssignObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_assign) != 0; } bool isRetainObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_retain) != 0; } bool isCopyObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_copy) != 0; } bool isNonAtomicObjCProperty() { + assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request"); return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_nonatomic) != 0; } @@ -391,6 +413,7 @@ namespace llvm { /// other types, like a function or struct. /// FIXME: Why is this a DIDerivedType?? class DICompositeType : public DIDerivedType { + virtual void anchor(); public: explicit DICompositeType(const MDNode *N = 0) : DIDerivedType(N, true, true) { @@ -454,6 +477,7 @@ namespace llvm { /// DISubprogram - This is a wrapper for a subprogram (e.g. a function). class DISubprogram : public DIScope { + virtual void anchor(); public: explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {} @@ -687,6 +711,7 @@ namespace llvm { /// DILexicalBlock - This is a wrapper for a lexical block. class DILexicalBlock : public DIScope { + virtual void anchor(); public: explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {} DIScope getContext() const { return getFieldAs<DIScope>(1); } @@ -705,6 +730,7 @@ namespace llvm { /// DILexicalBlockFile - This is a wrapper for a lexical block with /// a filename change. class DILexicalBlockFile : public DIScope { + virtual void anchor(); public: explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {} DIScope getContext() const { return getScope().getContext(); } @@ -724,6 +750,7 @@ namespace llvm { /// DINameSpace - A wrapper for a C++ style name space. class DINameSpace : public DIScope { + virtual void anchor(); public: explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {} DIScope getContext() const { return getFieldAs<DIScope>(1); } @@ -760,6 +787,46 @@ namespace llvm { bool Verify() const; }; + class DIObjCProperty : public DIDescriptor { + public: + explicit DIObjCProperty(const MDNode *N) : DIDescriptor(N) { } + + StringRef getObjCPropertyName() const { return getStringField(1); } + StringRef getObjCPropertyGetterName() const { + return getStringField(2); + } + StringRef getObjCPropertySetterName() const { + return getStringField(3); + } + bool isReadOnlyObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_readonly) != 0; + } + bool isReadWriteObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_readwrite) != 0; + } + bool isAssignObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_assign) != 0; + } + bool isRetainObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_retain) != 0; + } + bool isCopyObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_copy) != 0; + } + bool isNonAtomicObjCProperty() { + return (getUnsignedField(4) & dwarf::DW_APPLE_PROPERTY_nonatomic) != 0; + } + + /// Verify - Verify that a derived type descriptor is well formed. + bool Verify() const; + + /// print - print derived type. + void print(raw_ostream &OS) const; + + /// dump - print derived type to dbgs() with a newline. + void dump() const; + }; + /// getDISubprogram - Find subprogram that is enclosing this scope. DISubprogram getDISubprogram(const MDNode *Scope); diff --git a/include/llvm/Analysis/DominanceFrontier.h b/include/llvm/Analysis/DominanceFrontier.h index d7f74af..a2e0675 100644 --- a/include/llvm/Analysis/DominanceFrontier.h +++ b/include/llvm/Analysis/DominanceFrontier.h @@ -154,6 +154,7 @@ public: /// used to compute a forward dominator frontiers. /// class DominanceFrontier : public DominanceFrontierBase { + virtual void anchor(); public: static char ID; // Pass ID, replacement for typeid DominanceFrontier() : diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h index 15db2d1..c384925 100644 --- a/include/llvm/Analysis/Dominators.h +++ b/include/llvm/Analysis/Dominators.h @@ -321,8 +321,7 @@ public: /// block. This is the same as using operator[] on this class. /// inline DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const { - typename DomTreeNodeMapType::const_iterator I = DomTreeNodes.find(BB); - return I != DomTreeNodes.end() ? I->second : 0; + return DomTreeNodes.lookup(BB); } /// getRootNode - This returns the entry node for the CFG of the function. If @@ -623,9 +622,8 @@ protected: } DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) { - typename DomTreeNodeMapType::iterator I = this->DomTreeNodes.find(BB); - if (I != this->DomTreeNodes.end() && I->second) - return I->second; + if (DomTreeNodeBase<NodeT> *Node = getNode(BB)) + return Node; // Haven't calculated this node yet? Get or calculate the node for the // immediate dominator. @@ -641,8 +639,7 @@ protected: } inline NodeT *getIDom(NodeT *BB) const { - typename DenseMap<NodeT*, NodeT*>::const_iterator I = IDoms.find(BB); - return I != IDoms.end() ? I->second : 0; + return IDoms.lookup(BB); } inline void addRoot(NodeT* BB) { @@ -752,9 +749,11 @@ public: return DT->dominates(A, B); } - // dominates - Return true if A dominates B. This performs the - // special checks necessary if A and B are in the same basic block. - bool dominates(const Instruction *A, const Instruction *B) const; + // dominates - Return true if Def dominates a use in User. This performs + // the special checks necessary if Def and User are in the same basic block. + // Note that Def doesn't dominate a use in Def itself! + bool dominates(const Instruction *Def, const Instruction *User) const; + bool dominates(const Instruction *Def, const BasicBlock *BB) const; bool properlyDominates(const DomTreeNode *A, const DomTreeNode *B) const { return DT->properlyDominates(A, B); @@ -817,7 +816,7 @@ public: DT->splitBlock(NewBB); } - bool isReachableFromEntry(const BasicBlock* A) { + bool isReachableFromEntry(const BasicBlock* A) const { return DT->isReachableFromEntry(A); } diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index 2fb607c..2a3bb9b 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -166,6 +166,10 @@ public: const_iterator end() const { return IVUses.end(); } bool empty() const { return IVUses.empty(); } + bool isIVUserOrOperand(Instruction *Inst) const { + return Processed.count(Inst); + } + void print(raw_ostream &OS, const Module* = 0) const; /// dump - This method is used for debugging. diff --git a/include/llvm/Analysis/IntervalIterator.h b/include/llvm/Analysis/IntervalIterator.h index 82b3294..0968c74 100644 --- a/include/llvm/Analysis/IntervalIterator.h +++ b/include/llvm/Analysis/IntervalIterator.h @@ -101,14 +101,14 @@ public: IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) { OrigContainer = M; if (!ProcessInterval(&M->front())) { - assert(0 && "ProcessInterval should never fail for first interval!"); + llvm_unreachable("ProcessInterval should never fail for first interval!"); } } IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) { OrigContainer = &IP; if (!ProcessInterval(IP.getRootInterval())) { - assert(0 && "ProcessInterval should never fail for first interval!"); + llvm_unreachable("ProcessInterval should never fail for first interval!"); } } diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h index 8717352..f807d48 100644 --- a/include/llvm/Analysis/LoopInfo.h +++ b/include/llvm/Analysis/LoopInfo.h @@ -655,9 +655,7 @@ public: /// block is in no loop (for example the entry node), null is returned. /// LoopT *getLoopFor(const BlockT *BB) const { - typename DenseMap<BlockT *, LoopT *>::const_iterator I= - BBMap.find(const_cast<BlockT*>(BB)); - return I != BBMap.end() ? I->second : 0; + return BBMap.lookup(const_cast<BlockT*>(BB)); } /// operator[] - same as getLoopFor... @@ -696,9 +694,7 @@ public: /// the loop hierarchy tree. void changeLoopFor(BlockT *BB, LoopT *L) { if (!L) { - typename DenseMap<BlockT *, LoopT *>::iterator I = BBMap.find(BB); - if (I != BBMap.end()) - BBMap.erase(I); + BBMap.erase(BB); return; } BBMap[BB] = L; @@ -755,7 +751,7 @@ public: } LoopT *ConsiderForLoop(BlockT *BB, DominatorTreeBase<BlockT> &DT) { - if (BBMap.find(BB) != BBMap.end()) return 0;// Haven't processed this node? + if (BBMap.count(BB)) return 0; // Haven't processed this node? std::vector<BlockT *> TodoStack; diff --git a/include/llvm/Analysis/ProfileInfo.h b/include/llvm/Analysis/ProfileInfo.h index 300a027..6c2e273 100644 --- a/include/llvm/Analysis/ProfileInfo.h +++ b/include/llvm/Analysis/ProfileInfo.h @@ -22,6 +22,7 @@ #define LLVM_ANALYSIS_PROFILEINFO_H #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" #include <cassert> @@ -85,13 +86,11 @@ namespace llvm { // getFunction() - Returns the Function for an Edge, checking for validity. static const FType* getFunction(Edge e) { - if (e.first) { + if (e.first) return e.first->getParent(); - } else if (e.second) { + if (e.second) return e.second->getParent(); - } - assert(0 && "Invalid ProfileInfo::Edge"); - return (const FType*)0; + llvm_unreachable("Invalid ProfileInfo::Edge"); } // getEdge() - Creates an Edge from two BasicBlocks. diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 8661787..727bf1b 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -119,6 +119,10 @@ namespace llvm { /// bool isAllOnesValue() const; + /// isNonConstantNegative - Return true if the specified scev is negated, + /// but not a constant. + bool isNonConstantNegative() const; + /// print - Print out the internal representation of this scalar to the /// specified stream. This should really only be used for debugging /// purposes. @@ -726,16 +730,21 @@ namespace llvm { const SCEV *LHS, const SCEV *RHS); /// getSmallConstantTripCount - Returns the maximum trip count of this loop - /// as a normal unsigned value, if possible. Returns 0 if the trip count is - /// unknown or not constant. - unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitBlock); + /// as a normal unsigned value. Returns 0 if the trip count is unknown or + /// not constant. This "trip count" assumes that control exits via + /// ExitingBlock. More precisely, it is the number of times that control may + /// reach ExitingBlock before taking the branch. For loops with multiple + /// exits, it may not be the number times that the loop header executes if + /// the loop exits prematurely via another branch. + unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock); /// getSmallConstantTripMultiple - Returns the largest constant divisor of /// the trip count of this loop as a normal unsigned value, if /// possible. This means that the actual trip count is always a multiple of /// the returned value (don't forget the trip count could very well be zero - /// as well!). - unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitBlock); + /// as well!). As explained in the comments for getSmallConstantTripCount, + /// this assumes that control exits the loop via ExitingBlock. + unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock); // getExitCount - Get the expression for the number of loop iterations for // which this loop is guaranteed not to exit via ExitingBlock. Otherwise diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h index cd7e7f1..c22fc3a 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpander.h +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -22,6 +22,8 @@ #include <set> namespace llvm { + class TargetLowering; + /// SCEVExpander - This class uses information about analyze scalars to /// rewrite expressions in canonical form. /// @@ -58,6 +60,9 @@ namespace llvm { /// insert the IV increment at this position. Instruction *IVIncInsertPos; + /// Phis that complete an IV chain. Reuse + std::set<AssertingVH<PHINode> > ChainedPhis; + /// CanonicalMode - When true, expressions are expanded in "canonical" /// form. In particular, addrecs are expanded as arithmetic based on /// a canonical induction variable. When false, expression are expanded @@ -100,6 +105,7 @@ namespace llvm { InsertedExpressions.clear(); InsertedValues.clear(); InsertedPostIncValues.clear(); + ChainedPhis.clear(); } /// getOrInsertCanonicalInductionVariable - This method returns the @@ -108,14 +114,18 @@ namespace llvm { /// starts at zero and steps by one on each iteration. PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty); - /// hoistStep - Utility for hoisting an IV increment. - static bool hoistStep(Instruction *IncV, Instruction *InsertPos, - const DominatorTree *DT); + /// getIVIncOperand - Return the induction variable increment's IV operand. + Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos, + bool allowScale); + + /// hoistIVInc - Utility for hoisting an IV increment. + bool hoistIVInc(Instruction *IncV, Instruction *InsertPos); /// replaceCongruentIVs - replace congruent phis with their most canonical /// representative. Return the number of phis eliminated. unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, - SmallVectorImpl<WeakVH> &DeadInsts); + SmallVectorImpl<WeakVH> &DeadInsts, + const TargetLowering *TLI = NULL); /// expandCodeFor - Insert code to directly compute the specified SCEV /// expression into the program. The inserted code is inserted into the @@ -161,6 +171,16 @@ namespace llvm { void clearInsertPoint() { Builder.ClearInsertionPoint(); } + + /// isInsertedInstruction - Return true if the specified instruction was + /// inserted by the code rewriter. If so, the client should not modify the + /// instruction. + bool isInsertedInstruction(Instruction *I) const { + return InsertedValues.count(I) || InsertedPostIncValues.count(I); + } + + void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); } + private: LLVMContext &getContext() const { return SE.getContext(); } @@ -195,13 +215,6 @@ namespace llvm { /// result will be expanded to have that type, with a cast if necessary. Value *expandCodeFor(const SCEV *SH, Type *Ty = 0); - /// isInsertedInstruction - Return true if the specified instruction was - /// inserted by the code rewriter. If so, the client should not modify the - /// instruction. - bool isInsertedInstruction(Instruction *I) const { - return InsertedValues.count(I) || InsertedPostIncValues.count(I); - } - /// getRelevantLoop - Determine the most "relevant" loop for the given SCEV. const Loop *getRelevantLoop(const SCEV *); diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index b6f0ae5..47b3710 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -491,7 +491,6 @@ namespace llvm { RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) { llvm_unreachable("Invalid use of SCEVCouldNotCompute!"); - return RetVal(); } }; } diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index 85c659c..dfd774b 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -17,14 +17,13 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/DataTypes.h" -#include <string> namespace llvm { - template <typename T> class SmallVectorImpl; class Value; class Instruction; class APInt; class TargetData; + class StringRef; /// ComputeMaskedBits - Determine which of the bits specified in Mask are /// known to be either zero or one and return them in the KnownZero/KnownOne @@ -125,16 +124,15 @@ namespace llvm { return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD); } - /// GetConstantStringInfo - This function computes the length of a + /// getConstantStringInfo - This function computes the length of a /// null-terminated C string pointed to by V. If successful, it returns true - /// and returns the string in Str. If unsuccessful, it returns false. If - /// StopAtNul is set to true (the default), the returned string is truncated - /// by a nul character in the global. If StopAtNul is false, the nul - /// character is included in the result string. - bool GetConstantStringInfo(const Value *V, std::string &Str, - uint64_t Offset = 0, - bool StopAtNul = true); - + /// and returns the string in Str. If unsuccessful, it returns false. This + /// does not include the trailing nul character by default. If TrimAtNul is + /// set to false, then this returns any trailing nul characters as well as any + /// other characters that come after it. + bool getConstantStringInfo(const Value *V, StringRef &Str, + uint64_t Offset = 0, bool TrimAtNul = true); + /// GetStringLength - If we can compute the length of the string pointed to by /// the specified pointer, return 'len+1'. If we can't, return 0. uint64_t GetStringLength(Value *V); @@ -174,7 +172,7 @@ namespace llvm { /// the correct dominance relationships for the operands and users hold. /// However, this method can return true for instructions that read memory; /// for such instructions, moving them may change the resulting value. - bool isSafeToSpeculativelyExecute(const Instruction *Inst, + bool isSafeToSpeculativelyExecute(const Value *V, const TargetData *TD = 0); } // end namespace llvm diff --git a/include/llvm/Argument.h b/include/llvm/Argument.h index cd74882..e66075c 100644 --- a/include/llvm/Argument.h +++ b/include/llvm/Argument.h @@ -30,6 +30,7 @@ template<typename ValueSubClass, typename ItemParentClass> /// the function was called with. /// @brief LLVM Argument representation class Argument : public Value, public ilist_node<Argument> { + virtual void anchor(); Function *Parent; friend class SymbolTableListTraits<Argument, Function>; diff --git a/include/llvm/Attributes.h b/include/llvm/Attributes.h index 2d7b33b..0099f17 100644 --- a/include/llvm/Attributes.h +++ b/include/llvm/Attributes.h @@ -22,8 +22,66 @@ namespace llvm { class Type; +namespace Attribute { +/// We use this proxy POD type to allow constructing Attributes constants +/// using initializer lists. Do not use this class directly. +struct AttrConst { + uint64_t v; + AttrConst operator | (const AttrConst Attrs) const { + AttrConst Res = {v | Attrs.v}; + return Res; + } + AttrConst operator ~ () const { + AttrConst Res = {~v}; + return Res; + } +}; +} // namespace Attribute + + /// Attributes - A bitset of attributes. -typedef unsigned Attributes; +class Attributes { + public: + Attributes() : Bits(0) { } + explicit Attributes(uint64_t Val) : Bits(Val) { } + /*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { } + Attributes(const Attributes &Attrs) : Bits(Attrs.Bits) { } + // This is a "safe bool() operator". + operator const void *() const { return Bits ? this : 0; } + bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; } + Attributes &operator = (const Attributes &Attrs) { + Bits = Attrs.Bits; + return *this; + } + bool operator == (const Attributes &Attrs) const { + return Bits == Attrs.Bits; + } + bool operator != (const Attributes &Attrs) const { + return Bits != Attrs.Bits; + } + Attributes operator | (const Attributes &Attrs) const { + return Attributes(Bits | Attrs.Bits); + } + Attributes operator & (const Attributes &Attrs) const { + return Attributes(Bits & Attrs.Bits); + } + Attributes operator ^ (const Attributes &Attrs) const { + return Attributes(Bits ^ Attrs.Bits); + } + Attributes &operator |= (const Attributes &Attrs) { + Bits |= Attrs.Bits; + return *this; + } + Attributes &operator &= (const Attributes &Attrs) { + Bits &= Attrs.Bits; + return *this; + } + Attributes operator ~ () const { return Attributes(~Bits); } + uint64_t Raw() const { return Bits; } + private: + // Currently, we need less than 64 bits. + uint64_t Bits; +}; namespace Attribute { @@ -33,44 +91,55 @@ namespace Attribute { /// results or the function itself. /// @brief Function attributes. -const Attributes None = 0; ///< No attributes have been set -const Attributes ZExt = 1<<0; ///< Zero extended before/after call -const Attributes SExt = 1<<1; ///< Sign extended before/after call -const Attributes NoReturn = 1<<2; ///< Mark the function as not returning -const Attributes InReg = 1<<3; ///< Force argument to be passed in register -const Attributes StructRet = 1<<4; ///< Hidden pointer to structure to return -const Attributes NoUnwind = 1<<5; ///< Function doesn't unwind stack -const Attributes NoAlias = 1<<6; ///< Considered to not alias after call -const Attributes ByVal = 1<<7; ///< Pass structure by value -const Attributes Nest = 1<<8; ///< Nested function static chain -const Attributes ReadNone = 1<<9; ///< Function does not access memory -const Attributes ReadOnly = 1<<10; ///< Function only reads from memory -const Attributes NoInline = 1<<11; ///< inline=never -const Attributes AlwaysInline = 1<<12; ///< inline=always -const Attributes OptimizeForSize = 1<<13; ///< opt_size -const Attributes StackProtect = 1<<14; ///< Stack protection. -const Attributes StackProtectReq = 1<<15; ///< Stack protection required. -const Attributes Alignment = 31<<16; ///< Alignment of parameter (5 bits) +// We declare AttrConst objects that will be used throughout the code +// and also raw uint64_t objects with _i suffix to be used below for other +// constant declarations. This is done to avoid static CTORs and at the same +// time to keep type-safety of Attributes. +#define DECLARE_LLVM_ATTRIBUTE(name, value) \ + const uint64_t name##_i = value; \ + const AttrConst name = {value}; + +DECLARE_LLVM_ATTRIBUTE(None,0) ///< No attributes have been set +DECLARE_LLVM_ATTRIBUTE(ZExt,1<<0) ///< Zero extended before/after call +DECLARE_LLVM_ATTRIBUTE(SExt,1<<1) ///< Sign extended before/after call +DECLARE_LLVM_ATTRIBUTE(NoReturn,1<<2) ///< Mark the function as not returning +DECLARE_LLVM_ATTRIBUTE(InReg,1<<3) ///< Force argument to be passed in register +DECLARE_LLVM_ATTRIBUTE(StructRet,1<<4) ///< Hidden pointer to structure to return +DECLARE_LLVM_ATTRIBUTE(NoUnwind,1<<5) ///< Function doesn't unwind stack +DECLARE_LLVM_ATTRIBUTE(NoAlias,1<<6) ///< Considered to not alias after call +DECLARE_LLVM_ATTRIBUTE(ByVal,1<<7) ///< Pass structure by value +DECLARE_LLVM_ATTRIBUTE(Nest,1<<8) ///< Nested function static chain +DECLARE_LLVM_ATTRIBUTE(ReadNone,1<<9) ///< Function does not access memory +DECLARE_LLVM_ATTRIBUTE(ReadOnly,1<<10) ///< Function only reads from memory +DECLARE_LLVM_ATTRIBUTE(NoInline,1<<11) ///< inline=never +DECLARE_LLVM_ATTRIBUTE(AlwaysInline,1<<12) ///< inline=always +DECLARE_LLVM_ATTRIBUTE(OptimizeForSize,1<<13) ///< opt_size +DECLARE_LLVM_ATTRIBUTE(StackProtect,1<<14) ///< Stack protection. +DECLARE_LLVM_ATTRIBUTE(StackProtectReq,1<<15) ///< Stack protection required. +DECLARE_LLVM_ATTRIBUTE(Alignment,31<<16) ///< Alignment of parameter (5 bits) // stored as log2 of alignment with +1 bias // 0 means unaligned different from align 1 -const Attributes NoCapture = 1<<21; ///< Function creates no aliases of pointer -const Attributes NoRedZone = 1<<22; /// disable redzone -const Attributes NoImplicitFloat = 1<<23; /// disable implicit floating point - /// instructions. -const Attributes Naked = 1<<24; ///< Naked function -const Attributes InlineHint = 1<<25; ///< source said inlining was - ///desirable -const Attributes StackAlignment = 7<<26; ///< Alignment of stack for - ///function (3 bits) stored as log2 - ///of alignment with +1 bias - ///0 means unaligned (different from - ///alignstack(1)) -const Attributes ReturnsTwice = 1<<29; ///< Function can return twice -const Attributes UWTable = 1<<30; ///< Function must be in a unwind - ///table -const Attributes NonLazyBind = 1U<<31; ///< Function is called early and/or - /// often, so lazy binding isn't - /// worthwhile. +DECLARE_LLVM_ATTRIBUTE(NoCapture,1<<21) ///< Function creates no aliases of pointer +DECLARE_LLVM_ATTRIBUTE(NoRedZone,1<<22) /// disable redzone +DECLARE_LLVM_ATTRIBUTE(NoImplicitFloat,1<<23) /// disable implicit floating point + /// instructions. +DECLARE_LLVM_ATTRIBUTE(Naked,1<<24) ///< Naked function +DECLARE_LLVM_ATTRIBUTE(InlineHint,1<<25) ///< source said inlining was + ///desirable +DECLARE_LLVM_ATTRIBUTE(StackAlignment,7<<26) ///< Alignment of stack for + ///function (3 bits) stored as log2 + ///of alignment with +1 bias + ///0 means unaligned (different from + ///alignstack= {1)) +DECLARE_LLVM_ATTRIBUTE(ReturnsTwice,1<<29) ///< Function can return twice +DECLARE_LLVM_ATTRIBUTE(UWTable,1<<30) ///< Function must be in a unwind + ///table +DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or + /// often, so lazy binding isn't + /// worthwhile. +DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on. + +#undef DECLARE_LLVM_ATTRIBUTE /// Note that uwtable is about the ABI or the user mandating an entry in the /// unwind table. The nounwind attribute is about an exception passing by the @@ -85,24 +154,26 @@ const Attributes NonLazyBind = 1U<<31; ///< Function is called early and/or /// uwtable + nounwind = Needs an entry because the ABI says so. /// @brief Attributes that only apply to function parameters. -const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture; +const AttrConst ParameterOnly = {ByVal_i | Nest_i | + StructRet_i | NoCapture_i}; /// @brief Attributes that may be applied to the function itself. These cannot /// be used on return values or function parameters. -const Attributes FunctionOnly = NoReturn | NoUnwind | ReadNone | ReadOnly | - NoInline | AlwaysInline | OptimizeForSize | StackProtect | StackProtectReq | - NoRedZone | NoImplicitFloat | Naked | InlineHint | StackAlignment | - UWTable | NonLazyBind | ReturnsTwice; +const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i | + ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i | + StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i | + Naked_i | InlineHint_i | StackAlignment_i | + UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i}; /// @brief Parameter attributes that do not apply to vararg call arguments. -const Attributes VarArgsIncompatible = StructRet; +const AttrConst VarArgsIncompatible = {StructRet_i}; /// @brief Attributes that are mutually incompatible. -const Attributes MutuallyIncompatible[4] = { - ByVal | InReg | Nest | StructRet, - ZExt | SExt, - ReadNone | ReadOnly, - NoInline | AlwaysInline +const AttrConst MutuallyIncompatible[4] = { + {ByVal_i | InReg_i | Nest_i | StructRet_i}, + {ZExt_i | SExt_i}, + {ReadNone_i | ReadOnly_i}, + {NoInline_i | AlwaysInline_i} }; /// @brief Which attributes cannot be applied to a type. @@ -113,20 +184,20 @@ Attributes typeIncompatible(Type *Ty); inline Attributes constructAlignmentFromInt(unsigned i) { // Default alignment, allow the target to define how to align it. if (i == 0) - return 0; + return None; assert(isPowerOf2_32(i) && "Alignment must be a power of two."); assert(i <= 0x40000000 && "Alignment too large."); - return (Log2_32(i)+1) << 16; + return Attributes((Log2_32(i)+1) << 16); } /// This returns the alignment field of an attribute as a byte alignment value. inline unsigned getAlignmentFromAttrs(Attributes A) { Attributes Align = A & Attribute::Alignment; - if (Align == 0) + if (!Align) return 0; - return 1U << ((Align >> 16) - 1); + return 1U << ((Align.Raw() >> 16) - 1); } /// This turns an int stack alignment (which must be a power of 2) into @@ -134,21 +205,21 @@ inline unsigned getAlignmentFromAttrs(Attributes A) { inline Attributes constructStackAlignmentFromInt(unsigned i) { // Default alignment, allow the target to define how to align it. if (i == 0) - return 0; + return None; assert(isPowerOf2_32(i) && "Alignment must be a power of two."); assert(i <= 0x100 && "Alignment too large."); - return (Log2_32(i)+1) << 26; + return Attributes((Log2_32(i)+1) << 26); } /// This returns the stack alignment field of an attribute as a byte alignment /// value. inline unsigned getStackAlignmentFromAttrs(Attributes A) { Attributes StackAlign = A & Attribute::StackAlignment; - if (StackAlign == 0) + if (!StackAlign) return 0; - return 1U << ((StackAlign >> 26) - 1); + return 1U << ((StackAlign.Raw() >> 26) - 1); } @@ -242,7 +313,7 @@ public: /// paramHasAttr - Return true if the specified parameter index has the /// specified attribute set. bool paramHasAttr(unsigned Idx, Attributes Attr) const { - return (getAttributes(Idx) & Attr) != 0; + return getAttributes(Idx) & Attr; } /// getParamAlignment - Return the alignment for the specified function diff --git a/include/llvm/BasicBlock.h b/include/llvm/BasicBlock.h index da4f921..d2aa167 100644 --- a/include/llvm/BasicBlock.h +++ b/include/llvm/BasicBlock.h @@ -268,6 +268,7 @@ public: /// getLandingPadInst() - Return the landingpad instruction associated with /// the landing pad. LandingPadInst *getLandingPadInst(); + const LandingPadInst *getLandingPadInst() const; private: /// AdjustBlockAddressRefCount - BasicBlock stores the number of BlockAddress diff --git a/include/llvm/Bitcode/Archive.h b/include/llvm/Bitcode/Archive.h index f89a86c..86c44c7 100644 --- a/include/llvm/Bitcode/Archive.h +++ b/include/llvm/Bitcode/Archive.h @@ -394,7 +394,7 @@ class Archive { /// @brief Look up multiple symbols in the archive. bool findModulesDefiningSymbols( std::set<std::string>& symbols, ///< Symbols to be sought - std::set<Module*>& modules, ///< The modules matching \p symbols + SmallVectorImpl<Module*>& modules, ///< The modules matching \p symbols std::string* ErrMessage ///< Error msg storage, if non-zero ); diff --git a/include/llvm/Bitcode/BitCodes.h b/include/llvm/Bitcode/BitCodes.h index 449dc35..28e1ab1 100644 --- a/include/llvm/Bitcode/BitCodes.h +++ b/include/llvm/Bitcode/BitCodes.h @@ -20,6 +20,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include <cassert> namespace llvm { @@ -114,7 +115,6 @@ public: bool hasEncodingData() const { return hasEncodingData(getEncoding()); } static bool hasEncodingData(Encoding E) { switch (E) { - default: assert(0 && "Unknown encoding"); case Fixed: case VBR: return true; @@ -123,6 +123,7 @@ public: case Blob: return false; } + llvm_unreachable("Invalid encoding"); } /// isChar6 - Return true if this character is legal in the Char6 encoding. @@ -139,8 +140,7 @@ public: if (C >= '0' && C <= '9') return C-'0'+26+26; if (C == '.') return 62; if (C == '_') return 63; - assert(0 && "Not a value Char6 character!"); - return 0; + llvm_unreachable("Not a value Char6 character!"); } static char DecodeChar6(unsigned V) { @@ -150,17 +150,18 @@ public: if (V < 26+26+10) return V-26-26+'0'; if (V == 62) return '.'; if (V == 63) return '_'; - assert(0 && "Not a value Char6 character!"); - return ' '; + llvm_unreachable("Not a value Char6 character!"); } }; +template <> struct isPodLike<BitCodeAbbrevOp> { static const bool value=true; }; + /// BitCodeAbbrev - This class represents an abbreviation record. An /// abbreviation allows a complex record that has redundancy to be stored in a /// specialized format instead of the fully-general, fully-vbr, format. class BitCodeAbbrev { - SmallVector<BitCodeAbbrevOp, 8> OperandList; + SmallVector<BitCodeAbbrevOp, 32> OperandList; unsigned char RefCount; // Number of things using this. ~BitCodeAbbrev() {} public: diff --git a/include/llvm/Bitcode/BitstreamReader.h b/include/llvm/Bitcode/BitstreamReader.h index 0437f53..6586829 100644 --- a/include/llvm/Bitcode/BitstreamReader.h +++ b/include/llvm/Bitcode/BitstreamReader.h @@ -15,7 +15,10 @@ #ifndef BITSTREAM_READER_H #define BITSTREAM_READER_H +#include "llvm/ADT/OwningPtr.h" #include "llvm/Bitcode/BitCodes.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/StreamableMemoryObject.h" #include <climits> #include <string> #include <vector> @@ -36,9 +39,7 @@ public: std::vector<std::pair<unsigned, std::string> > RecordNames; }; private: - /// FirstChar/LastChar - This remembers the first and last bytes of the - /// stream. - const unsigned char *FirstChar, *LastChar; + OwningPtr<StreamableMemoryObject> BitcodeBytes; std::vector<BlockInfo> BlockInfoRecords; @@ -47,10 +48,10 @@ private: /// uses this. bool IgnoreBlockInfoNames; - BitstreamReader(const BitstreamReader&); // NOT IMPLEMENTED - void operator=(const BitstreamReader&); // NOT IMPLEMENTED + BitstreamReader(const BitstreamReader&); // DO NOT IMPLEMENT + void operator=(const BitstreamReader&); // DO NOT IMPLEMENT public: - BitstreamReader() : FirstChar(0), LastChar(0), IgnoreBlockInfoNames(true) { + BitstreamReader() : IgnoreBlockInfoNames(true) { } BitstreamReader(const unsigned char *Start, const unsigned char *End) { @@ -58,12 +59,17 @@ public: init(Start, End); } + BitstreamReader(StreamableMemoryObject *bytes) { + BitcodeBytes.reset(bytes); + } + void init(const unsigned char *Start, const unsigned char *End) { - FirstChar = Start; - LastChar = End; assert(((End-Start) & 3) == 0 &&"Bitcode stream not a multiple of 4 bytes"); + BitcodeBytes.reset(getNonStreamedMemoryObject(Start, End)); } + StreamableMemoryObject &getBitcodeBytes() { return *BitcodeBytes; } + ~BitstreamReader() { // Free the BlockInfoRecords. while (!BlockInfoRecords.empty()) { @@ -75,9 +81,6 @@ public: BlockInfoRecords.pop_back(); } } - - const unsigned char *getFirstChar() const { return FirstChar; } - const unsigned char *getLastChar() const { return LastChar; } /// CollectBlockInfoNames - This is called by clients that want block/record /// name information. @@ -122,7 +125,7 @@ public: class BitstreamCursor { friend class Deserializer; BitstreamReader *BitStream; - const unsigned char *NextChar; + size_t NextChar; /// CurWord - This is the current data we have pulled from the stream but have /// not returned to the client. @@ -156,8 +159,7 @@ public: } explicit BitstreamCursor(BitstreamReader &R) : BitStream(&R) { - NextChar = R.getFirstChar(); - assert(NextChar && "Bitstream not initialized yet"); + NextChar = 0; CurWord = 0; BitsInCurWord = 0; CurCodeSize = 2; @@ -167,8 +169,7 @@ public: freeState(); BitStream = &R; - NextChar = R.getFirstChar(); - assert(NextChar && "Bitstream not initialized yet"); + NextChar = 0; CurWord = 0; BitsInCurWord = 0; CurCodeSize = 2; @@ -225,13 +226,39 @@ public: /// GetAbbrevIDWidth - Return the number of bits used to encode an abbrev #. unsigned GetAbbrevIDWidth() const { return CurCodeSize; } - bool AtEndOfStream() const { - return NextChar == BitStream->getLastChar() && BitsInCurWord == 0; + bool isEndPos(size_t pos) { + return BitStream->getBitcodeBytes().isObjectEnd(static_cast<uint64_t>(pos)); + } + + bool canSkipToPos(size_t pos) const { + // pos can be skipped to if it is a valid address or one byte past the end. + return pos == 0 || BitStream->getBitcodeBytes().isValidAddress( + static_cast<uint64_t>(pos - 1)); + } + + unsigned char getByte(size_t pos) { + uint8_t byte = -1; + BitStream->getBitcodeBytes().readByte(pos, &byte); + return byte; + } + + uint32_t getWord(size_t pos) { + uint8_t buf[sizeof(uint32_t)]; + memset(buf, 0xFF, sizeof(buf)); + BitStream->getBitcodeBytes().readBytes(pos, + sizeof(buf), + buf, + NULL); + return *reinterpret_cast<support::ulittle32_t *>(buf); + } + + bool AtEndOfStream() { + return isEndPos(NextChar) && BitsInCurWord == 0; } /// GetCurrentBitNo - Return the bit # of the bit we are reading. uint64_t GetCurrentBitNo() const { - return (NextChar-BitStream->getFirstChar())*CHAR_BIT - BitsInCurWord; + return NextChar*CHAR_BIT - BitsInCurWord; } BitstreamReader *getBitStreamReader() { @@ -246,12 +273,10 @@ public: void JumpToBit(uint64_t BitNo) { uintptr_t ByteNo = uintptr_t(BitNo/8) & ~3; uintptr_t WordBitNo = uintptr_t(BitNo) & 31; - assert(ByteNo <= (uintptr_t)(BitStream->getLastChar()- - BitStream->getFirstChar()) && - "Invalid location"); + assert(canSkipToPos(ByteNo) && "Invalid location"); // Move the cursor to the right word. - NextChar = BitStream->getFirstChar()+ByteNo; + NextChar = ByteNo; BitsInCurWord = 0; CurWord = 0; @@ -272,7 +297,7 @@ public: } // If we run out of data, stop at the end of the stream. - if (NextChar == BitStream->getLastChar()) { + if (isEndPos(NextChar)) { CurWord = 0; BitsInCurWord = 0; return 0; @@ -281,8 +306,7 @@ public: unsigned R = CurWord; // Read the next word from the stream. - CurWord = (NextChar[0] << 0) | (NextChar[1] << 8) | - (NextChar[2] << 16) | (NextChar[3] << 24); + CurWord = getWord(NextChar); NextChar += 4; // Extract NumBits-BitsInCurWord from what we just read. @@ -376,9 +400,8 @@ public: // Check that the block wasn't partially defined, and that the offset isn't // bogus. - const unsigned char *const SkipTo = NextChar + NumWords*4; - if (AtEndOfStream() || SkipTo > BitStream->getLastChar() || - SkipTo < BitStream->getFirstChar()) + size_t SkipTo = NextChar + NumWords*4; + if (AtEndOfStream() || !canSkipToPos(SkipTo)) return true; NextChar = SkipTo; @@ -409,8 +432,7 @@ public: if (NumWordsP) *NumWordsP = NumWords; // Validate that this block is sane. - if (CurCodeSize == 0 || AtEndOfStream() || - NextChar+NumWords*4 > BitStream->getLastChar()) + if (CurCodeSize == 0 || AtEndOfStream()) return true; return false; @@ -455,10 +477,10 @@ private: void ReadAbbreviatedField(const BitCodeAbbrevOp &Op, SmallVectorImpl<uint64_t> &Vals) { assert(!Op.isLiteral() && "Use ReadAbbreviatedLiteral for literals!"); - + // Decode the value as we are commanded. switch (Op.getEncoding()) { - default: assert(0 && "Unknown encoding!"); + default: llvm_unreachable("Unknown encoding!"); case BitCodeAbbrevOp::Fixed: Vals.push_back(Read((unsigned)Op.getEncodingData())); break; @@ -512,24 +534,25 @@ public: SkipToWord(); // 32-bit alignment // Figure out where the end of this blob will be including tail padding. - const unsigned char *NewEnd = NextChar+((NumElts+3)&~3); + size_t NewEnd = NextChar+((NumElts+3)&~3); // If this would read off the end of the bitcode file, just set the // record to empty and return. - if (NewEnd > BitStream->getLastChar()) { + if (!canSkipToPos(NewEnd)) { Vals.append(NumElts, 0); - NextChar = BitStream->getLastChar(); + NextChar = BitStream->getBitcodeBytes().getExtent(); break; } // Otherwise, read the number of bytes. If we can return a reference to // the data, do so to avoid copying it. if (BlobStart) { - *BlobStart = (const char*)NextChar; + *BlobStart = (const char*)BitStream->getBitcodeBytes().getPointer( + NextChar, NumElts); *BlobLen = NumElts; } else { for (; NumElts; ++NextChar, --NumElts) - Vals.push_back(*NextChar); + Vals.push_back(getByte(NextChar)); } // Skip over tail padding. NextChar = NewEnd; diff --git a/include/llvm/Bitcode/BitstreamWriter.h b/include/llvm/Bitcode/BitstreamWriter.h index bfb3a4e..42c68aa 100644 --- a/include/llvm/Bitcode/BitstreamWriter.h +++ b/include/llvm/Bitcode/BitstreamWriter.h @@ -16,13 +16,14 @@ #define BITSTREAM_WRITER_H #include "llvm/ADT/StringRef.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/Bitcode/BitCodes.h" #include <vector> namespace llvm { class BitstreamWriter { - std::vector<unsigned char> &Out; + SmallVectorImpl<char> &Out; /// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use. unsigned CurBit; @@ -59,8 +60,40 @@ class BitstreamWriter { }; std::vector<BlockInfo> BlockInfoRecords; + // BackpatchWord - Backpatch a 32-bit word in the output with the specified + // value. + void BackpatchWord(unsigned ByteNo, unsigned NewWord) { + Out[ByteNo++] = (unsigned char)(NewWord >> 0); + Out[ByteNo++] = (unsigned char)(NewWord >> 8); + Out[ByteNo++] = (unsigned char)(NewWord >> 16); + Out[ByteNo ] = (unsigned char)(NewWord >> 24); + } + + void WriteByte(unsigned char Value) { + Out.push_back(Value); + } + + void WriteWord(unsigned Value) { + unsigned char Bytes[4] = { + (unsigned char)(Value >> 0), + (unsigned char)(Value >> 8), + (unsigned char)(Value >> 16), + (unsigned char)(Value >> 24) }; + Out.append(&Bytes[0], &Bytes[4]); + } + + unsigned GetBufferOffset() const { + return Out.size(); + } + + unsigned GetWordIndex() const { + unsigned Offset = GetBufferOffset(); + assert((Offset & 3) == 0 && "Not 32-bit aligned"); + return Offset / 4; + } + public: - explicit BitstreamWriter(std::vector<unsigned char> &O) + explicit BitstreamWriter(SmallVectorImpl<char> &O) : Out(O), CurBit(0), CurValue(0), CurCodeSize(2) {} ~BitstreamWriter() { @@ -78,10 +111,8 @@ public: } } - std::vector<unsigned char> &getBuffer() { return Out; } - /// \brief Retrieve the current position in the stream, in bits. - uint64_t GetCurrentBitNo() const { return Out.size() * 8 + CurBit; } + uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; } //===--------------------------------------------------------------------===// // Basic Primitives for emitting bits to the stream. @@ -97,11 +128,7 @@ public: } // Add the current word. - unsigned V = CurValue; - Out.push_back((unsigned char)(V >> 0)); - Out.push_back((unsigned char)(V >> 8)); - Out.push_back((unsigned char)(V >> 16)); - Out.push_back((unsigned char)(V >> 24)); + WriteWord(CurValue); if (CurBit) CurValue = Val >> (32-CurBit); @@ -121,11 +148,7 @@ public: void FlushToWord() { if (CurBit) { - unsigned V = CurValue; - Out.push_back((unsigned char)(V >> 0)); - Out.push_back((unsigned char)(V >> 8)); - Out.push_back((unsigned char)(V >> 16)); - Out.push_back((unsigned char)(V >> 24)); + WriteWord(CurValue); CurBit = 0; CurValue = 0; } @@ -164,15 +187,6 @@ public: Emit(Val, CurCodeSize); } - // BackpatchWord - Backpatch a 32-bit word in the output with the specified - // value. - void BackpatchWord(unsigned ByteNo, unsigned NewWord) { - Out[ByteNo++] = (unsigned char)(NewWord >> 0); - Out[ByteNo++] = (unsigned char)(NewWord >> 8); - Out[ByteNo++] = (unsigned char)(NewWord >> 16); - Out[ByteNo ] = (unsigned char)(NewWord >> 24); - } - //===--------------------------------------------------------------------===// // Block Manipulation //===--------------------------------------------------------------------===// @@ -199,7 +213,7 @@ public: EmitVBR(CodeLen, bitc::CodeLenWidth); FlushToWord(); - unsigned BlockSizeWordLoc = static_cast<unsigned>(Out.size()); + unsigned BlockSizeWordIndex = GetWordIndex(); unsigned OldCodeSize = CurCodeSize; // Emit a placeholder, which will be replaced when the block is popped. @@ -209,7 +223,7 @@ public: // Push the outer block's abbrev set onto the stack, start out with an // empty abbrev set. - BlockScope.push_back(Block(OldCodeSize, BlockSizeWordLoc/4)); + BlockScope.push_back(Block(OldCodeSize, BlockSizeWordIndex)); BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); // If there is a blockinfo for this BlockID, add all the predefined abbrevs @@ -239,7 +253,7 @@ public: FlushToWord(); // Compute the size of the block, in words, not counting the size field. - unsigned SizeInWords= static_cast<unsigned>(Out.size())/4-B.StartSizeWord-1; + unsigned SizeInWords = GetWordIndex() - B.StartSizeWord - 1; unsigned ByteNo = B.StartSizeWord*4; // Update the block size field in the header of this sub-block. @@ -275,7 +289,7 @@ private: // Encode the value as we are commanded. switch (Op.getEncoding()) { - default: assert(0 && "Unknown encoding!"); + default: llvm_unreachable("Unknown encoding!"); case BitCodeAbbrevOp::Fixed: if (Op.getEncodingData()) Emit((unsigned)V, (unsigned)Op.getEncodingData()); @@ -355,25 +369,24 @@ private: // Flush to a 32-bit alignment boundary. FlushToWord(); - assert((Out.size() & 3) == 0 && "Not 32-bit aligned"); // Emit each field as a literal byte. if (BlobData) { for (unsigned i = 0; i != BlobLen; ++i) - Out.push_back((unsigned char)BlobData[i]); + WriteByte((unsigned char)BlobData[i]); // Know that blob data is consumed for assertion below. BlobData = 0; } else { for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) { assert(Vals[RecordIdx] < 256 && "Value too large to emit as blob"); - Out.push_back((unsigned char)Vals[RecordIdx]); + WriteByte((unsigned char)Vals[RecordIdx]); } } + // Align end to 32-bits. - while (Out.size() & 3) - Out.push_back(0); - + while (GetBufferOffset() & 3) + WriteByte(0); } else { // Single scalar field. assert(RecordIdx < Vals.size() && "Invalid abbrev/record"); EmitAbbreviatedField(Op, Vals[RecordIdx]); diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h index 48d1695..a8c34cb 100644 --- a/include/llvm/Bitcode/LLVMBitCodes.h +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -94,7 +94,7 @@ namespace bitc { TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty, // paramty x N] - // Code #10 is unused. + TYPE_CODE_HALF = 10, // HALF TYPE_CODE_ARRAY = 11, // ARRAY: [numelts, eltty] TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty] @@ -164,7 +164,8 @@ namespace bitc { CST_CODE_INLINEASM = 18, // INLINEASM: [sideeffect,asmstr,conststr] CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval] CST_CODE_CE_INBOUNDS_GEP = 20,// INBOUNDS_GEP: [n x operands] - CST_CODE_BLOCKADDRESS = 21 // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#] + CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#] + CST_CODE_DATA = 22 // DATA: [n x elements] }; /// CastOpcodes - These are values used in the bitcode files to encode which @@ -271,7 +272,7 @@ namespace bitc { FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#] FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, op0, op1, ...] FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...] - FUNC_CODE_INST_UNWIND = 14, // UNWIND + // 14 is unused. FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE FUNC_CODE_INST_PHI = 16, // PHI: [ty, val0,bb0, ...] diff --git a/include/llvm/Bitcode/ReaderWriter.h b/include/llvm/Bitcode/ReaderWriter.h index fa754c0..cc2b473 100644 --- a/include/llvm/Bitcode/ReaderWriter.h +++ b/include/llvm/Bitcode/ReaderWriter.h @@ -17,35 +17,45 @@ #include <string> namespace llvm { - class Module; - class MemoryBuffer; - class ModulePass; class BitstreamWriter; + class MemoryBuffer; + class DataStreamer; class LLVMContext; + class Module; + class ModulePass; class raw_ostream; - + /// getLazyBitcodeModule - Read the header of the specified bitcode buffer /// and prepare for lazy deserialization of function bodies. If successful, /// this takes ownership of 'buffer' and returns a non-null pointer. On /// error, this returns null, *does not* take ownership of Buffer, and fills /// in *ErrMsg with an error description if ErrMsg is non-null. Module *getLazyBitcodeModule(MemoryBuffer *Buffer, - LLVMContext& Context, + LLVMContext &Context, std::string *ErrMsg = 0); + /// getStreamedBitcodeModule - Read the header of the specified stream + /// and prepare for lazy deserialization and streaming of function bodies. + /// On error, this returns null, and fills in *ErrMsg with an error + /// description if ErrMsg is non-null. + Module *getStreamedBitcodeModule(const std::string &name, + DataStreamer *streamer, + LLVMContext &Context, + std::string *ErrMsg = 0); + /// getBitcodeTargetTriple - Read the header of the specified bitcode /// buffer and extract just the triple information. If successful, /// this returns a string and *does not* take ownership /// of 'buffer'. On error, this returns "", and fills in *ErrMsg /// if ErrMsg is non-null. std::string getBitcodeTargetTriple(MemoryBuffer *Buffer, - LLVMContext& Context, + LLVMContext &Context, std::string *ErrMsg = 0); /// ParseBitcodeFile - Read the specified bitcode file, returning the module. /// If an error occurs, this returns null and fills in *ErrMsg if it is /// non-null. This method *never* takes ownership of Buffer. - Module *ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context, + Module *ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext &Context, std::string *ErrMsg = 0); /// WriteBitcodeToFile - Write the specified module to the specified @@ -53,15 +63,11 @@ namespace llvm { /// should be in "binary" mode. void WriteBitcodeToFile(const Module *M, raw_ostream &Out); - /// WriteBitcodeToStream - Write the specified module to the specified - /// raw output stream. - void WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream); - /// createBitcodeWriterPass - Create and return a pass that writes the module /// to the specified ostream. ModulePass *createBitcodeWriterPass(raw_ostream &Str); - - + + /// isBitcodeWrapper - Return true if the given bytes are the magic bytes /// for an LLVM IR bitcode wrapper. /// @@ -109,21 +115,24 @@ namespace llvm { /// uint32_t BitcodeSize; // Size of traditional bitcode file. /// ... potentially other gunk ... /// }; - /// + /// /// This function is called when we find a file with a matching magic number. /// In this case, skip down to the subsection of the file that is actually a /// BC file. - static inline bool SkipBitcodeWrapperHeader(unsigned char *&BufPtr, - unsigned char *&BufEnd) { + /// If 'VerifyBufferSize' is true, check that the buffer is large enough to + /// contain the whole bitcode file. + static inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr, + const unsigned char *&BufEnd, + bool VerifyBufferSize) { enum { KnownHeaderSize = 4*4, // Size of header we read. OffsetField = 2*4, // Offset in bytes to Offset field. SizeField = 3*4 // Offset in bytes to Size field. }; - + // Must contain the header! if (BufEnd-BufPtr < KnownHeaderSize) return true; - + unsigned Offset = ( BufPtr[OffsetField ] | (BufPtr[OffsetField+1] << 8) | (BufPtr[OffsetField+2] << 16) | @@ -132,9 +141,9 @@ namespace llvm { (BufPtr[SizeField +1] << 8) | (BufPtr[SizeField +2] << 16) | (BufPtr[SizeField +3] << 24)); - + // Verify that Offset+Size fits in the file. - if (Offset+Size > unsigned(BufEnd-BufPtr)) + if (VerifyBufferSize && Offset+Size > unsigned(BufEnd-BufPtr)) return true; BufPtr += Offset; BufEnd = BufPtr+Size; diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h index f0d3423..56a87f1 100644 --- a/include/llvm/CodeGen/AsmPrinter.h +++ b/include/llvm/CodeGen/AsmPrinter.h @@ -18,16 +18,12 @@ #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" namespace llvm { class BlockAddress; class GCStrategy; class Constant; - class ConstantArray; - class ConstantFP; - class ConstantInt; - class ConstantStruct; - class ConstantVector; class GCMetadataPrinter; class GlobalValue; class GlobalVariable; @@ -37,14 +33,11 @@ namespace llvm { class MachineLocation; class MachineLoopInfo; class MachineLoop; - class MachineConstantPool; - class MachineConstantPoolEntry; class MachineConstantPoolValue; class MachineJumpTableInfo; class MachineModuleInfo; class MachineMove; class MCAsmInfo; - class MCInst; class MCContext; class MCSection; class MCStreamer; @@ -56,8 +49,6 @@ namespace llvm { class TargetLoweringObjectFile; class TargetData; class TargetMachine; - class Twine; - class Type; /// AsmPrinter - This class is intended to be used as a driving class for all /// asm writers. @@ -97,6 +88,11 @@ namespace llvm { /// MCSymbol *CurrentFnSym; + /// The symbol used to represent the start of the current function for the + /// purpose of calculating its size (e.g. using the .size directive). By + /// default, this is equal to CurrentFnSym. + MCSymbol *CurrentFnSymForSize; + private: // GCMetadataPrinters - The garbage collection metadata printer table. void *GCMetadataPrinters; // Really a DenseMap. @@ -261,13 +257,20 @@ namespace llvm { /// EmitInstruction - Targets should implement this to emit instructions. virtual void EmitInstruction(const MachineInstr *) { - assert(0 && "EmitInstruction not implemented"); + llvm_unreachable("EmitInstruction not implemented"); } virtual void EmitFunctionEntryLabel(); virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV); + /// EmitXXStructor - Targets can override this to change how global + /// constants that are part of a C++ static/global constructor list are + /// emitted. + virtual void EmitXXStructor(const Constant *CV) { + EmitGlobalConstant(CV); + } + /// isBlockOnlyReachableByFallthough - Return true if the basic block has /// exactly one predecessor and the control transfer mechanism between /// the predecessor and this block is a fall-through. @@ -471,7 +474,7 @@ namespace llvm { const MachineBasicBlock *MBB, unsigned uid) const; void EmitLLVMUsedList(const Constant *List); - void EmitXXStructorList(const Constant *List); + void EmitXXStructorList(const Constant *List, bool isCtor); GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C); }; } diff --git a/include/llvm/CodeGen/BinaryObject.h b/include/llvm/CodeGen/BinaryObject.h deleted file mode 100644 index 8c1431f..0000000 --- a/include/llvm/CodeGen/BinaryObject.h +++ /dev/null @@ -1,353 +0,0 @@ -//===-- llvm/CodeGen/BinaryObject.h - Binary Object. -----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines a Binary Object Aka. "blob" for holding data from code -// generators, ready for data to the object module code writters. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CODEGEN_BINARYOBJECT_H -#define LLVM_CODEGEN_BINARYOBJECT_H - -#include "llvm/CodeGen/MachineRelocation.h" -#include "llvm/Support/DataTypes.h" - -#include <string> -#include <vector> - -namespace llvm { - -typedef std::vector<uint8_t> BinaryData; - -class BinaryObject { -protected: - std::string Name; - bool IsLittleEndian; - bool Is64Bit; - BinaryData Data; - std::vector<MachineRelocation> Relocations; - -public: - /// Constructors and destructor - BinaryObject() {} - - BinaryObject(bool isLittleEndian, bool is64Bit) - : IsLittleEndian(isLittleEndian), Is64Bit(is64Bit) {} - - BinaryObject(const std::string &name, bool isLittleEndian, bool is64Bit) - : Name(name), IsLittleEndian(isLittleEndian), Is64Bit(is64Bit) {} - - ~BinaryObject() {} - - /// getName - get name of BinaryObject - inline std::string getName() const { return Name; } - - /// get size of binary data - size_t size() const { - return Data.size(); - } - - /// get binary data - BinaryData& getData() { - return Data; - } - - /// get machine relocations - const std::vector<MachineRelocation>& getRelocations() const { - return Relocations; - } - - /// hasRelocations - Return true if 'Relocations' is not empty - bool hasRelocations() const { - return !Relocations.empty(); - } - - /// emitZeros - This callback is invoked to emit a arbitrary number - /// of zero bytes to the data stream. - inline void emitZeros(unsigned Size) { - for (unsigned i=0; i < Size; ++i) - emitByte(0); - } - - /// emitByte - This callback is invoked when a byte needs to be - /// written to the data stream. - inline void emitByte(uint8_t B) { - Data.push_back(B); - } - - /// emitWord16 - This callback is invoked when a 16-bit word needs to be - /// written to the data stream in correct endian format and correct size. - inline void emitWord16(uint16_t W) { - if (IsLittleEndian) - emitWord16LE(W); - else - emitWord16BE(W); - } - - /// emitWord16LE - This callback is invoked when a 16-bit word needs to be - /// written to the data stream in correct endian format and correct size. - inline void emitWord16LE(uint16_t W) { - Data.push_back((uint8_t)(W >> 0)); - Data.push_back((uint8_t)(W >> 8)); - } - - /// emitWord16BE - This callback is invoked when a 16-bit word needs to be - /// written to the data stream in correct endian format and correct size. - inline void emitWord16BE(uint16_t W) { - Data.push_back((uint8_t)(W >> 8)); - Data.push_back((uint8_t)(W >> 0)); - } - - /// emitWord - This callback is invoked when a word needs to be - /// written to the data stream in correct endian format and correct size. - inline void emitWord(uint64_t W) { - if (!Is64Bit) - emitWord32(W); - else - emitWord64(W); - } - - /// emitWord32 - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in correct endian format. - inline void emitWord32(uint32_t W) { - if (IsLittleEndian) - emitWordLE(W); - else - emitWordBE(W); - } - - /// emitWord64 - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in correct endian format. - inline void emitWord64(uint64_t W) { - if (IsLittleEndian) - emitDWordLE(W); - else - emitDWordBE(W); - } - - /// emitWord64 - This callback is invoked when a x86_fp80 needs to be - /// written to the data stream in correct endian format. - inline void emitWordFP80(const uint64_t *W, unsigned PadSize) { - if (IsLittleEndian) { - emitWord64(W[0]); - emitWord16(W[1]); - } else { - emitWord16(W[1]); - emitWord64(W[0]); - } - emitZeros(PadSize); - } - - /// emitWordLE - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in little-endian format. - inline void emitWordLE(uint32_t W) { - Data.push_back((uint8_t)(W >> 0)); - Data.push_back((uint8_t)(W >> 8)); - Data.push_back((uint8_t)(W >> 16)); - Data.push_back((uint8_t)(W >> 24)); - } - - /// emitWordBE - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in big-endian format. - /// - inline void emitWordBE(uint32_t W) { - Data.push_back((uint8_t)(W >> 24)); - Data.push_back((uint8_t)(W >> 16)); - Data.push_back((uint8_t)(W >> 8)); - Data.push_back((uint8_t)(W >> 0)); - } - - /// emitDWordLE - This callback is invoked when a 64-bit word needs to be - /// written to the data stream in little-endian format. - inline void emitDWordLE(uint64_t W) { - Data.push_back((uint8_t)(W >> 0)); - Data.push_back((uint8_t)(W >> 8)); - Data.push_back((uint8_t)(W >> 16)); - Data.push_back((uint8_t)(W >> 24)); - Data.push_back((uint8_t)(W >> 32)); - Data.push_back((uint8_t)(W >> 40)); - Data.push_back((uint8_t)(W >> 48)); - Data.push_back((uint8_t)(W >> 56)); - } - - /// emitDWordBE - This callback is invoked when a 64-bit word needs to be - /// written to the data stream in big-endian format. - inline void emitDWordBE(uint64_t W) { - Data.push_back((uint8_t)(W >> 56)); - Data.push_back((uint8_t)(W >> 48)); - Data.push_back((uint8_t)(W >> 40)); - Data.push_back((uint8_t)(W >> 32)); - Data.push_back((uint8_t)(W >> 24)); - Data.push_back((uint8_t)(W >> 16)); - Data.push_back((uint8_t)(W >> 8)); - Data.push_back((uint8_t)(W >> 0)); - } - - /// fixByte - This callback is invoked when a byte needs to be - /// fixup the buffer. - inline void fixByte(uint8_t B, uint32_t offset) { - Data[offset] = B; - } - - /// fixWord16 - This callback is invoked when a 16-bit word needs to - /// fixup the data stream in correct endian format. - inline void fixWord16(uint16_t W, uint32_t offset) { - if (IsLittleEndian) - fixWord16LE(W, offset); - else - fixWord16BE(W, offset); - } - - /// emitWord16LE - This callback is invoked when a 16-bit word needs to - /// fixup the data stream in little endian format. - inline void fixWord16LE(uint16_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 0); - Data[++offset] = (uint8_t)(W >> 8); - } - - /// fixWord16BE - This callback is invoked when a 16-bit word needs to - /// fixup data stream in big endian format. - inline void fixWord16BE(uint16_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 8); - Data[++offset] = (uint8_t)(W >> 0); - } - - /// emitWord - This callback is invoked when a word needs to - /// fixup the data in correct endian format and correct size. - inline void fixWord(uint64_t W, uint32_t offset) { - if (!Is64Bit) - fixWord32(W, offset); - else - fixWord64(W, offset); - } - - /// fixWord32 - This callback is invoked when a 32-bit word needs to - /// fixup the data in correct endian format. - inline void fixWord32(uint32_t W, uint32_t offset) { - if (IsLittleEndian) - fixWord32LE(W, offset); - else - fixWord32BE(W, offset); - } - - /// fixWord32LE - This callback is invoked when a 32-bit word needs to - /// fixup the data in little endian format. - inline void fixWord32LE(uint32_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 0); - Data[++offset] = (uint8_t)(W >> 8); - Data[++offset] = (uint8_t)(W >> 16); - Data[++offset] = (uint8_t)(W >> 24); - } - - /// fixWord32BE - This callback is invoked when a 32-bit word needs to - /// fixup the data in big endian format. - inline void fixWord32BE(uint32_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 24); - Data[++offset] = (uint8_t)(W >> 16); - Data[++offset] = (uint8_t)(W >> 8); - Data[++offset] = (uint8_t)(W >> 0); - } - - /// fixWord64 - This callback is invoked when a 64-bit word needs to - /// fixup the data in correct endian format. - inline void fixWord64(uint64_t W, uint32_t offset) { - if (IsLittleEndian) - fixWord64LE(W, offset); - else - fixWord64BE(W, offset); - } - - /// fixWord64BE - This callback is invoked when a 64-bit word needs to - /// fixup the data in little endian format. - inline void fixWord64LE(uint64_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 0); - Data[++offset] = (uint8_t)(W >> 8); - Data[++offset] = (uint8_t)(W >> 16); - Data[++offset] = (uint8_t)(W >> 24); - Data[++offset] = (uint8_t)(W >> 32); - Data[++offset] = (uint8_t)(W >> 40); - Data[++offset] = (uint8_t)(W >> 48); - Data[++offset] = (uint8_t)(W >> 56); - } - - /// fixWord64BE - This callback is invoked when a 64-bit word needs to - /// fixup the data in big endian format. - inline void fixWord64BE(uint64_t W, uint32_t offset) { - Data[offset] = (uint8_t)(W >> 56); - Data[++offset] = (uint8_t)(W >> 48); - Data[++offset] = (uint8_t)(W >> 40); - Data[++offset] = (uint8_t)(W >> 32); - Data[++offset] = (uint8_t)(W >> 24); - Data[++offset] = (uint8_t)(W >> 16); - Data[++offset] = (uint8_t)(W >> 8); - Data[++offset] = (uint8_t)(W >> 0); - } - - /// emitAlignment - Pad the data to the specified alignment. - void emitAlignment(unsigned Alignment, uint8_t fill = 0) { - if (Alignment <= 1) return; - unsigned PadSize = -Data.size() & (Alignment-1); - for (unsigned i = 0; i<PadSize; ++i) - Data.push_back(fill); - } - - /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be - /// written to the data stream. - void emitULEB128Bytes(uint64_t Value) { - do { - uint8_t Byte = (uint8_t)(Value & 0x7f); - Value >>= 7; - if (Value) Byte |= 0x80; - emitByte(Byte); - } while (Value); - } - - /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be - /// written to the data stream. - void emitSLEB128Bytes(int64_t Value) { - int Sign = Value >> (8 * sizeof(Value) - 1); - bool IsMore; - - do { - uint8_t Byte = (uint8_t)(Value & 0x7f); - Value >>= 7; - IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0; - if (IsMore) Byte |= 0x80; - emitByte(Byte); - } while (IsMore); - } - - /// emitString - This callback is invoked when a String needs to be - /// written to the data stream. - void emitString(const std::string &String) { - for (unsigned i = 0, N = static_cast<unsigned>(String.size()); i<N; ++i) { - unsigned char C = String[i]; - emitByte(C); - } - emitByte(0); - } - - /// getCurrentPCOffset - Return the offset from the start of the emitted - /// buffer that we are currently writing to. - uintptr_t getCurrentPCOffset() const { - return Data.size(); - } - - /// addRelocation - Whenever a relocatable address is needed, it should be - /// noted with this interface. - void addRelocation(const MachineRelocation& relocation) { - Relocations.push_back(relocation); - } - -}; - -} // end namespace llvm - -#endif - diff --git a/include/llvm/CodeGen/DFAPacketizer.h b/include/llvm/CodeGen/DFAPacketizer.h index 0a11e0c..0694caa 100644 --- a/include/llvm/CodeGen/DFAPacketizer.h +++ b/include/llvm/CodeGen/DFAPacketizer.h @@ -26,13 +26,17 @@ #ifndef LLVM_CODEGEN_DFAPACKETIZER_H #define LLVM_CODEGEN_DFAPACKETIZER_H +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/ADT/DenseMap.h" namespace llvm { class MCInstrDesc; class MachineInstr; +class MachineLoopInfo; +class MachineDominatorTree; class InstrItineraryData; +class SUnit; class DFAPacketizer { private: @@ -73,6 +77,70 @@ public: // instruction and change the current state to reflect that change. void reserveResources(llvm::MachineInstr *MI); }; + +// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The +// packetizer works on machine basic blocks. For each instruction I in BB, the +// packetizer consults the DFA to see if machine resources are available to +// execute I. If so, the packetizer checks if I depends on any instruction J in +// the current packet. If no dependency is found, I is added to current packet +// and machine resource is marked as taken. If any dependency is found, a target +// API call is made to prune the dependence. +class VLIWPacketizerList { + const TargetMachine &TM; + const MachineFunction &MF; + const TargetInstrInfo *TII; + + // Encapsulate data types not exposed to the target interface. + void *SchedulerImpl; + +protected: + // Vector of instructions assigned to the current packet. + std::vector<MachineInstr*> CurrentPacketMIs; + // DFA resource tracker. + DFAPacketizer *ResourceTracker; + // Scheduling units. + std::vector<SUnit> SUnits; + +public: + VLIWPacketizerList( + MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, + bool IsPostRA); + + virtual ~VLIWPacketizerList(); + + // PacketizeMIs - Implement this API in the backend to bundle instructions. + void PacketizeMIs(MachineBasicBlock *MBB, + MachineBasicBlock::iterator BeginItr, + MachineBasicBlock::iterator EndItr); + + // getResourceTracker - return ResourceTracker + DFAPacketizer *getResourceTracker() {return ResourceTracker;} + + // addToPacket - Add MI to the current packet. + void addToPacket(MachineInstr *MI); + + // endPacket - End the current packet. + void endPacket(MachineBasicBlock *MBB, MachineInstr *I); + + // ignorePseudoInstruction - Ignore bundling of pseudo instructions. + bool ignorePseudoInstruction(MachineInstr *I, MachineBasicBlock *MBB); + + // isSoloInstruction - return true if instruction I must end previous + // packet. + bool isSoloInstruction(MachineInstr *I); + + // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ + // together. + virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { + return false; + } + + // isLegalToPruneDependencies - Is it legal to prune dependece between SUI + // and SUJ. + virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) { + return false; + } +}; } #endif diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h index 09dac85..8cf22ec 100644 --- a/include/llvm/CodeGen/FunctionLoweringInfo.h +++ b/include/llvm/CodeGen/FunctionLoweringInfo.h @@ -21,10 +21,8 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/IndexedMap.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" -#ifndef NDEBUG -#include "llvm/ADT/SmallSet.h" -#endif #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ISDOpcodes.h" @@ -98,8 +96,8 @@ public: MachineBasicBlock::iterator InsertPt; #ifndef NDEBUG - SmallSet<const Instruction *, 8> CatchInfoLost; - SmallSet<const Instruction *, 8> CatchInfoFound; + SmallPtrSet<const Instruction *, 8> CatchInfoLost; + SmallPtrSet<const Instruction *, 8> CatchInfoFound; #endif struct LiveOutInfo { @@ -112,7 +110,7 @@ public: /// VisitedBBs - The set of basic blocks visited thus far by instruction /// selection. - DenseSet<const BasicBlock*> VisitedBBs; + SmallPtrSet<const BasicBlock*, 4> VisitedBBs; /// PHINodesToUpdate - A list of phi instructions whose operand list will /// be updated after processing the current basic block. @@ -202,7 +200,7 @@ public: /// setArgumentFrameIndex - Record frame index for the byval /// argument. void setArgumentFrameIndex(const Argument *A, int FI); - + /// getArgumentFrameIndex - Get frame index for the byval argument. int getArgumentFrameIndex(const Argument *A); @@ -211,16 +209,18 @@ private: IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo; }; +/// ComputeUsesVAFloatArgument - Determine if any floating-point values are +/// being passed to this variadic function, and set the MachineModuleInfo's +/// usesVAFloatArgument flag if so. This flag is used to emit an undefined +/// reference to _fltused on Windows, which will link in MSVCRT's +/// floating-point support. +void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI); + /// AddCatchInfo - Extract the personality and type infos from an eh.selector /// call, and add them to the specified machine basic block. void AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI, MachineBasicBlock *MBB); -/// CopyCatchInfo - Copy catch information from SuccBB (or one of its -/// successors) to LPad. -void CopyCatchInfo(const BasicBlock *SuccBB, const BasicBlock *LPad, - MachineModuleInfo *MMI, FunctionLoweringInfo &FLI); - /// AddLandingPadInfo - Extract the exception handling information from the /// landingpad instruction and add them to the specified machine module info. void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI, diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h index cb990ed..ab8ab5d 100644 --- a/include/llvm/CodeGen/ISDOpcodes.h +++ b/include/llvm/CodeGen/ISDOpcodes.h @@ -57,7 +57,7 @@ namespace ISD { AssertSext, AssertZext, // Various leaf nodes. - BasicBlock, VALUETYPE, CONDCODE, Register, + BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask, Constant, ConstantFP, GlobalAddress, GlobalTLSAddress, FrameIndex, JumpTable, ConstantPool, ExternalSymbol, BlockAddress, @@ -323,6 +323,9 @@ namespace ISD { // and #2), returning a vector result. All vectors have the same length. // Much like the scalar select and setcc, each bit in the condition selects // whether the corresponding result element is taken from op #1 or op #2. + // At first, the VSELECT condition is of vXi1 type. Later, targets may change + // the condition type in order to match the VSELECT node using a a pattern. + // The condition follows the BooleanContent format of the target. VSELECT, // Select with condition operator - This selects between a true value and diff --git a/include/llvm/CodeGen/JITCodeEmitter.h b/include/llvm/CodeGen/JITCodeEmitter.h index 88e22d6..89f00e9 100644 --- a/include/llvm/CodeGen/JITCodeEmitter.h +++ b/include/llvm/CodeGen/JITCodeEmitter.h @@ -51,6 +51,7 @@ class Function; /// occurred, more memory is allocated, and we reemit the code into it. /// class JITCodeEmitter : public MachineCodeEmitter { + virtual void anchor(); public: virtual ~JITCodeEmitter() {} diff --git a/include/llvm/CodeGen/LexicalScopes.h b/include/llvm/CodeGen/LexicalScopes.h index 0271c5d..6cb5c9e 100644 --- a/include/llvm/CodeGen/LexicalScopes.h +++ b/include/llvm/CodeGen/LexicalScopes.h @@ -153,6 +153,7 @@ private: /// LexicalScope - This class is used to track scope information. /// class LexicalScope { + virtual void anchor(); public: LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A) diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h index a379a30..46dd004 100644 --- a/include/llvm/CodeGen/LinkAllCodegenComponents.h +++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h @@ -31,8 +31,6 @@ namespace { if (std::getenv("bar") != (char*) -1) return; - (void) llvm::createDeadMachineInstructionElimPass(); - (void) llvm::createFastRegisterAllocator(); (void) llvm::createBasicRegisterAllocator(); (void) llvm::createGreedyRegisterAllocator(); @@ -40,12 +38,13 @@ namespace { llvm::linkOcamlGC(); llvm::linkShadowStackGC(); - + (void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default); (void) llvm::createSourceListDAGScheduler(NULL,llvm::CodeGenOpt::Default); (void) llvm::createHybridListDAGScheduler(NULL,llvm::CodeGenOpt::Default); (void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default); (void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default); + (void) llvm::createVLIWDAGScheduler(NULL, llvm::CodeGenOpt::Default); } } ForceCodegenLinking; // Force link by creating a global definition. diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h index f690243..a6008ab 100644 --- a/include/llvm/CodeGen/LiveInterval.h +++ b/include/llvm/CodeGen/LiveInterval.h @@ -43,12 +43,10 @@ namespace llvm { private: enum { HAS_PHI_KILL = 1, - REDEF_BY_EC = 1 << 1, - IS_PHI_DEF = 1 << 2, - IS_UNUSED = 1 << 3 + IS_PHI_DEF = 1 << 1, + IS_UNUSED = 1 << 2 }; - MachineInstr *copy; unsigned char flags; public: @@ -57,23 +55,22 @@ namespace llvm { /// The ID number of this value. unsigned id; - /// The index of the defining instruction (if isDefAccurate() returns true). + /// The index of the defining instruction. SlotIndex def; /// VNInfo constructor. - VNInfo(unsigned i, SlotIndex d, MachineInstr *c) - : copy(c), flags(0), id(i), def(d) + VNInfo(unsigned i, SlotIndex d) + : flags(0), id(i), def(d) { } /// VNInfo construtor, copies values from orig, except for the value number. VNInfo(unsigned i, const VNInfo &orig) - : copy(orig.copy), flags(orig.flags), id(i), def(orig.def) + : flags(orig.flags), id(i), def(orig.def) { } /// Copy from the parameter into this VNInfo. void copyFrom(VNInfo &src) { flags = src.flags; - copy = src.copy; def = src.def; } @@ -86,19 +83,6 @@ namespace llvm { flags = (flags | VNI->flags) & ~IS_UNUSED; } - /// For a register interval, if this VN was definied by a copy instr - /// getCopy() returns a pointer to it, otherwise returns 0. - /// For a stack interval the behaviour of this method is undefined. - MachineInstr* getCopy() const { return copy; } - /// For a register interval, set the copy member. - /// This method should not be called on stack intervals as it may lead to - /// undefined behavior. - void setCopy(MachineInstr *c) { copy = c; } - - /// isDefByCopy - Return true when this value was defined by a copy-like - /// instruction as determined by MachineInstr::isCopyLike. - bool isDefByCopy() const { return copy != 0; } - /// Returns true if one or more kills are PHI nodes. /// Obsolete, do not use! bool hasPHIKill() const { return flags & HAS_PHI_KILL; } @@ -110,17 +94,6 @@ namespace llvm { flags &= ~HAS_PHI_KILL; } - /// Returns true if this value is re-defined by an early clobber somewhere - /// during the live range. - bool hasRedefByEC() const { return flags & REDEF_BY_EC; } - /// Set the "redef by early clobber" flag on this value. - void setHasRedefByEC(bool hasRedef) { - if (hasRedef) - flags |= REDEF_BY_EC; - else - flags &= ~REDEF_BY_EC; - } - /// Returns true if this value is defined by a PHI instruction (or was, /// PHI instrucions may have been eliminated). bool isPHIDef() const { return flags & IS_PHI_DEF; } @@ -294,10 +267,9 @@ namespace llvm { /// getNextValue - Create a new value number and return it. MIIdx specifies /// the instruction that defines the value number. - VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI, - VNInfo::Allocator &VNInfoAllocator) { + VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) { VNInfo *VNI = - new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI); + new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def); valnos.push_back(VNI); return VNI; } @@ -405,6 +377,14 @@ namespace llvm { return I == end() ? 0 : &*I; } + const LiveRange *getLiveRangeBefore(SlotIndex Idx) const { + return getLiveRangeContaining(Idx.getPrevSlot()); + } + + LiveRange *getLiveRangeBefore(SlotIndex Idx) { + return getLiveRangeContaining(Idx.getPrevSlot()); + } + /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL. VNInfo *getVNInfoAt(SlotIndex Idx) const { const_iterator I = FindLiveRangeContaining(Idx); diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h index 30537b4..76201c9 100644 --- a/include/llvm/CodeGen/LiveIntervalAnalysis.h +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -63,8 +63,34 @@ namespace llvm { /// allocatableRegs_ - A bit vector of allocatable registers. BitVector allocatableRegs_; - /// CloneMIs - A list of clones as result of re-materialization. - std::vector<MachineInstr*> CloneMIs; + /// reservedRegs_ - A bit vector of reserved registers. + BitVector reservedRegs_; + + /// RegMaskSlots - Sorted list of instructions with register mask operands. + /// Always use the 'r' slot, RegMasks are normal clobbers, not early + /// clobbers. + SmallVector<SlotIndex, 8> RegMaskSlots; + + /// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a + /// pointer to the corresponding register mask. This pointer can be + /// recomputed as: + /// + /// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]); + /// unsigned OpNum = findRegMaskOperand(MI); + /// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask(); + /// + /// This is kept in a separate vector partly because some standard + /// libraries don't support lower_bound() with mixed objects, partly to + /// improve locality when searching in RegMaskSlots. + /// Also see the comment in LiveInterval::find(). + SmallVector<const uint32_t*, 8> RegMaskBits; + + /// For each basic block number, keep (begin, size) pairs indexing into the + /// RegMaskSlots and RegMaskBits arrays. + /// Note that basic block numbers may not be layout contiguous, that's why + /// we can't just keep track of the first register mask in each basic + /// block. + SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks; public: static char ID; // Pass identification, replacement for typeid @@ -105,6 +131,12 @@ namespace llvm { return allocatableRegs_.test(reg); } + /// isReserved - is the physical register reg reserved in the current + /// function + bool isReserved(unsigned reg) const { + return reservedRegs_.test(reg); + } + /// getScaledIntervalSize - get the size of an interval in "units," /// where every function is composed of one thousand units. This /// measure scales properly with empty index slots in the function. @@ -164,14 +196,6 @@ namespace llvm { return indexes_; } - SlotIndex getZeroIndex() const { - return indexes_->getZeroIndex(); - } - - SlotIndex getInvalidIndex() const { - return indexes_->getInvalidIndex(); - } - /// isNotInMIMap - returns true if the specified machine instr has been /// removed or was never entered in the map. bool isNotInMIMap(const MachineInstr* Instr) const { @@ -203,21 +227,11 @@ namespace llvm { return li.liveAt(getMBBStartIdx(mbb)); } - LiveRange* findEnteringRange(LiveInterval &li, - const MachineBasicBlock *mbb) { - return li.getLiveRangeContaining(getMBBStartIdx(mbb)); - } - bool isLiveOutOfMBB(const LiveInterval &li, const MachineBasicBlock *mbb) const { return li.liveAt(getMBBEndIdx(mbb).getPrevSlot()); } - LiveRange* findExitingRange(LiveInterval &li, - const MachineBasicBlock *mbb) { - return li.getLiveRangeContaining(getMBBEndIdx(mbb).getPrevSlot()); - } - MachineBasicBlock* getMBBFromIndex(SlotIndex index) const { return indexes_->getMBBFromIndex(index); } @@ -234,19 +248,11 @@ namespace llvm { indexes_->replaceMachineInstrInMaps(MI, NewMI); } - void InsertMBBInMaps(MachineBasicBlock *MBB) { - indexes_->insertMBBInMaps(MBB); - } - bool findLiveInMBBs(SlotIndex Start, SlotIndex End, SmallVectorImpl<MachineBasicBlock*> &MBBs) const { return indexes_->findLiveInMBBs(Start, End, MBBs); } - void renumber() { - indexes_->renumberIndexes(); - } - VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; } virtual void getAnalysisUsage(AnalysisUsage &AU) const; @@ -265,20 +271,72 @@ namespace llvm { const SmallVectorImpl<LiveInterval*> *SpillIs, bool &isLoad); - /// intervalIsInOneMBB - Returns true if the specified interval is entirely - /// within a single basic block. - bool intervalIsInOneMBB(const LiveInterval &li) const; - - /// getLastSplitPoint - Return the last possible insertion point in mbb for - /// spilling and splitting code. This is the first terminator, or the call - /// instruction if li is live into a landing pad successor. - MachineBasicBlock::iterator getLastSplitPoint(const LiveInterval &li, - MachineBasicBlock *mbb) const; + /// intervalIsInOneMBB - If LI is confined to a single basic block, return + /// a pointer to that block. If LI is live in to or out of any block, + /// return NULL. + MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const; /// addKillFlags - Add kill flags to any instruction that kills a virtual /// register. void addKillFlags(); + /// handleMove - call this method to notify LiveIntervals that + /// instruction 'mi' has been moved within a basic block. This will update + /// the live intervals for all operands of mi. Moves between basic blocks + /// are not supported. + void handleMove(MachineInstr* MI); + + /// moveIntoBundle - Update intervals for operands of MI so that they + /// begin/end on the SlotIndex for BundleStart. + /// + /// Requires MI and BundleStart to have SlotIndexes, and assumes + /// existing liveness is accurate. BundleStart should be the first + /// instruction in the Bundle. + void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart); + + // Register mask functions. + // + // Machine instructions may use a register mask operand to indicate that a + // large number of registers are clobbered by the instruction. This is + // typically used for calls. + // + // For compile time performance reasons, these clobbers are not recorded in + // the live intervals for individual physical registers. Instead, + // LiveIntervalAnalysis maintains a sorted list of instructions with + // register mask operands. + + /// getRegMaskSlots - Returns a sorted array of slot indices of all + /// instructions with register mask operands. + ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; } + + /// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all + /// instructions with register mask operands in the basic block numbered + /// MBBNum. + ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const { + std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum]; + return getRegMaskSlots().slice(P.first, P.second); + } + + /// getRegMaskBits() - Returns an array of register mask pointers + /// corresponding to getRegMaskSlots(). + ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; } + + /// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding + /// to getRegMaskSlotsInBlock(MBBNum). + ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const { + std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum]; + return getRegMaskBits().slice(P.first, P.second); + } + + /// checkRegMaskInterference - Test if LI is live across any register mask + /// instructions, and compute a bit mask of physical registers that are not + /// clobbered by any of them. + /// + /// Returns false if LI doesn't cross any register mask instructions. In + /// that case, the bit vector is not filled in. + bool checkRegMaskInterference(LiveInterval &LI, + BitVector &UsableRegs); + private: /// computeIntervals - Compute live intervals. void computeIntervals(); @@ -310,13 +368,12 @@ namespace llvm { void handlePhysicalRegisterDef(MachineBasicBlock* mbb, MachineBasicBlock::iterator mi, SlotIndex MIIdx, MachineOperand& MO, - LiveInterval &interval, - MachineInstr *CopyMI); + LiveInterval &interval); /// handleLiveInRegister - Create interval for a livein register. void handleLiveInRegister(MachineBasicBlock* mbb, SlotIndex MIIdx, - LiveInterval &interval, bool isAlias = false); + LiveInterval &interval); /// getReMatImplicitUse - If the remat definition MI has one (for now, we /// only allow one) virtual register operand, then its uses are implicitly @@ -342,6 +399,8 @@ namespace llvm { void printInstrs(raw_ostream &O) const; void dumpInstrs() const; + + class HMEditor; }; } // End llvm namespace diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h index 7ba901f..d4bb409 100644 --- a/include/llvm/CodeGen/LiveVariables.h +++ b/include/llvm/CodeGen/LiveVariables.h @@ -85,17 +85,11 @@ public: /// SparseBitVector<> AliveBlocks; - /// NumUses - Number of uses of this register across the entire function. - /// - unsigned NumUses; - /// Kills - List of MachineInstruction's which are the last use of this /// virtual register (kill it) in their basic block. /// std::vector<MachineInstr*> Kills; - VarInfo() : NumUses(0) {} - /// removeKill - Delete a kill corresponding to the specified /// machine instruction. Returns true if there was a kill /// corresponding to this instruction, false otherwise. @@ -166,6 +160,9 @@ private: // Intermediate data structures /// the last use of the whole register. bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI); + /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask. + void HandleRegMask(const MachineOperand&); + void HandlePhysRegUse(unsigned Reg, MachineInstr *MI); void HandlePhysRegDef(unsigned Reg, MachineInstr *MI, SmallVector<unsigned, 4> &Defs); diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index 10a32d3..576ce94 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -77,6 +77,7 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> { /// (disable optimization). std::vector<uint32_t> Weights; typedef std::vector<uint32_t>::iterator weight_iterator; + typedef std::vector<uint32_t>::const_iterator const_weight_iterator; /// LiveIns - Keep track of the physical registers that are livein of /// the basicblock. @@ -165,7 +166,7 @@ public: bool operator!=(const bundle_iterator &x) const { return !operator==(x); } - + // Increment and decrement operators... bundle_iterator &operator--() { // predecrement - Back up do { @@ -196,7 +197,7 @@ public: IterTy getInstrIterator() const { return MII; - } + } }; typedef Instructions::iterator instr_iterator; @@ -359,7 +360,7 @@ public: const MachineBasicBlock *getLandingPadSuccessor() const; // Code Layout methods. - + /// moveBefore/moveAfter - move 'this' block before or after the specified /// block. This only moves the block, it does not modify the CFG or adjust /// potential fall-throughs at the end of the block. @@ -406,7 +407,7 @@ public: /// in transferSuccessors, and update PHI operands in the successor blocks /// which refer to fromMBB to refer to this. void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB); - + /// isSuccessor - Return true if the specified MBB is a successor of this /// block. bool isSuccessor(const MachineBasicBlock *MBB) const; @@ -424,7 +425,7 @@ public: /// branch to do so (e.g., a table jump). True is a conservative answer. bool canFallThrough(); - /// Returns a pointer to the first instructon in this block that is not a + /// Returns a pointer to the first instructon in this block that is not a /// PHINode instruction. When adding instruction to the beginning of the /// basic block, they should be added before the returned value, not before /// the first instruction, which might be PHI. @@ -470,8 +471,8 @@ public: instr_iterator insert(instr_iterator I, MachineInstr *M) { return Insts.insert(I, M); } - instr_iterator insertAfter(instr_iterator I, MachineInstr *M) { - return Insts.insertAfter(I, M); + instr_iterator insertAfter(instr_iterator I, MachineInstr *M) { + return Insts.insertAfter(I, M); } template<typename IT> @@ -481,8 +482,8 @@ public: iterator insert(iterator I, MachineInstr *M) { return Insts.insert(I.getInstrIterator(), M); } - iterator insertAfter(iterator I, MachineInstr *M) { - return Insts.insertAfter(I.getInstrIterator(), M); + iterator insertAfter(iterator I, MachineInstr *M) { + return Insts.insertAfter(I.getInstrIterator(), M); } /// erase - Remove the specified element or range from the instruction list. @@ -513,7 +514,7 @@ public: /// instruction is a bundle this function will remove all the bundled /// instructions as well. It is up to the caller to keep a list of the /// bundled instructions and re-insert them if desired. This function is - /// *not recommended* for manipulating instructions with bundled. Use + /// *not recommended* for manipulating instructions with bundles. Use /// splice instead. MachineInstr *remove(MachineInstr *I); void clear() { @@ -543,7 +544,7 @@ public: /// removeFromParent - This method unlinks 'this' from the containing /// function, and returns it, but does not delete it. MachineBasicBlock *removeFromParent(); - + /// eraseFromParent - This method unlinks 'this' from the containing /// function and deletes it. void eraseFromParent(); @@ -589,13 +590,14 @@ private: /// getWeightIterator - Return weight iterator corresponding to the I /// successor iterator. weight_iterator getWeightIterator(succ_iterator I); + const_weight_iterator getWeightIterator(const_succ_iterator I) const; friend class MachineBranchProbabilityInfo; /// getSuccWeight - Return weight of the edge from this block to MBB. This /// method should NOT be called directly, but by using getEdgeWeight method /// from MachineBranchProbabilityInfo class. - uint32_t getSuccWeight(MachineBasicBlock *succ); + uint32_t getSuccWeight(const MachineBasicBlock *succ) const; // Methods used to maintain doubly linked list of blocks... diff --git a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h index 3d9d196..a9c7bf7 100644 --- a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h +++ b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h @@ -48,7 +48,7 @@ public: /// that we should not rely on the value itself, but only on the comparison to /// the other block frequencies. We do this to avoid using of floating points. /// - BlockFrequency getBlockFreq(MachineBasicBlock *MBB) const; + BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const; }; } diff --git a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h index e768874..af4db7d 100644 --- a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h +++ b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h @@ -25,6 +25,7 @@ class raw_ostream; class MachineBasicBlock; class MachineBranchProbabilityInfo : public ImmutablePass { + virtual void anchor(); // Default weight value. Used when we don't have information about the edge. // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of @@ -48,12 +49,13 @@ public: // Return edge weight. If we don't have any informations about it - return // DEFAULT_WEIGHT. - uint32_t getEdgeWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst) const; + uint32_t getEdgeWeight(const MachineBasicBlock *Src, + const MachineBasicBlock *Dst) const; // Get sum of the block successors' weights, potentially scaling them to fit // within 32-bits. If scaling is required, sets Scale based on the necessary // adjustment. Any edge weights used with the sum should be divided by Scale. - uint32_t getSumForBlock(MachineBasicBlock *MBB, uint32_t &Scale) const; + uint32_t getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const; // A 'Hot' edge is an edge which probability is >= 80%. bool isEdgeHot(MachineBasicBlock *Src, MachineBasicBlock *Dst) const; diff --git a/include/llvm/CodeGen/MachineCodeEmitter.h b/include/llvm/CodeGen/MachineCodeEmitter.h index 428aada..86e8f27 100644 --- a/include/llvm/CodeGen/MachineCodeEmitter.h +++ b/include/llvm/CodeGen/MachineCodeEmitter.h @@ -20,6 +20,8 @@ #include "llvm/Support/DataTypes.h" #include "llvm/Support/DebugLoc.h" +#include <string> + namespace llvm { class MachineBasicBlock; @@ -49,6 +51,7 @@ class MCSymbol; /// occurred, more memory is allocated, and we reemit the code into it. /// class MachineCodeEmitter { + virtual void anchor(); protected: /// BufferBegin/BufferEnd - Pointers to the start and end of the memory /// allocated for this code buffer. diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h index 29f4f44..d6d65a2 100644 --- a/include/llvm/CodeGen/MachineConstantPool.h +++ b/include/llvm/CodeGen/MachineConstantPool.h @@ -34,6 +34,7 @@ class raw_ostream; /// Abstract base class for all machine specific constantpool value subclasses. /// class MachineConstantPoolValue { + virtual void anchor(); Type *Ty; public: diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h index ab944a2..82a4ac8 100644 --- a/include/llvm/CodeGen/MachineDominators.h +++ b/include/llvm/CodeGen/MachineDominators.h @@ -84,7 +84,8 @@ public: // Loop through the basic block until we find A or B. MachineBasicBlock::iterator I = BBA->begin(); - for (; &*I != A && &*I != B; ++I) /*empty*/; + for (; &*I != A && &*I != B; ++I) + /*empty*/ ; //if(!DT.IsPostDominators) { // A dominates B if it is found first in the basic block. diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index 3a4568a..fd4cac8 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -120,10 +120,12 @@ class MachineFunction { /// Alignment - The alignment of the function. unsigned Alignment; - /// CallsSetJmp - True if the function calls setjmp or sigsetjmp. This is used - /// to limit optimizations which cannot reason about the control flow of - /// setjmp. - bool CallsSetJmp; + /// ExposesReturnsTwice - True if the function calls setjmp or related + /// functions with attribute "returns twice", but doesn't have + /// the attribute itself. + /// This is used to limit optimizations which cannot reason + /// about the control flow of such functions. + bool ExposesReturnsTwice; MachineFunction(const MachineFunction &); // DO NOT IMPLEMENT void operator=(const MachineFunction&); // DO NOT IMPLEMENT @@ -192,15 +194,17 @@ public: if (Alignment < A) Alignment = A; } - /// callsSetJmp - Returns true if the function calls setjmp or sigsetjmp. - bool callsSetJmp() const { - return CallsSetJmp; + /// exposesReturnsTwice - Returns true if the function calls setjmp or + /// any other similar functions with attribute "returns twice" without + /// having the attribute itself. + bool exposesReturnsTwice() const { + return ExposesReturnsTwice; } - /// setCallsSetJmp - Set a flag that indicates if there's a call to setjmp or - /// sigsetjmp. - void setCallsSetJmp(bool B) { - CallsSetJmp = B; + /// setCallsSetJmp - Set a flag that indicates if there's a call to + /// a "returns twice" function. + void setExposesReturnsTwice(bool B) { + ExposesReturnsTwice = B; } /// getInfo - Keep track of various per-function pieces of information for diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h index 904f1a6..7f29d31 100644 --- a/include/llvm/CodeGen/MachineInstr.h +++ b/include/llvm/CodeGen/MachineInstr.h @@ -19,6 +19,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Target/TargetOpcodes.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/ADT/STLExtras.h" @@ -189,11 +190,11 @@ public: /// ---------------- /// | /// ---------------- - /// | MI * | + /// | MI * | /// ---------------- /// | /// ---------------- - /// | MI * | + /// | MI * | /// ---------------- /// In this case, the first MI starts a bundle but is not inside a bundle, the /// next 2 MIs are considered "inside" the bundle. @@ -208,11 +209,11 @@ public: /// ---------------- /// | /// ---------------- - /// | MI * | + /// | MI * | /// ---------------- /// | /// ---------------- - /// | MI * | + /// | MI * | /// ---------------- /// The first instruction has the special opcode "BUNDLE". It's not "inside" /// a bundle, but the next three MIs are. @@ -229,6 +230,10 @@ public: clearFlag(InsideBundle); } + /// isBundled - Return true if this instruction part of a bundle. This is true + /// if either itself or its following instruction is marked "InsideBundle". + bool isBundled() const; + /// getDebugLoc - Returns the debug location id of this MachineInstr. /// DebugLoc getDebugLoc() const { return debugLoc; } @@ -302,9 +307,6 @@ public: /// The first argument is the property being queried. /// The second argument indicates whether the query should look inside /// instruction bundles. - /// If the third argument is true, than the query can return true when *any* - /// of the bundled instructions has the queried property. If it's false, then - /// this can return true iff *all* of the instructions have the property. bool hasProperty(unsigned Flag, QueryType Type = AnyInBundle) const; /// isVariadic - Return true if this instruction can have a variable number of @@ -700,6 +702,7 @@ public: /// that are not dead are skipped. If Overlap is true, then it also looks for /// defs that merely overlap the specified register. If TargetRegisterInfo is /// non-null, then it also checks if there is a def of a super-register. + /// This may also return a register mask operand when Overlap is true. int findRegisterDefOperandIdx(unsigned Reg, bool isDead = false, bool Overlap = false, const TargetRegisterInfo *TRI = NULL) const; @@ -744,7 +747,7 @@ public: /// isRegTiedToUseOperand - Given the index of a register def operand, /// check if the register def is tied to a source operand, due to either /// two-address elimination or inline assembly constraints. Returns the - /// first tied use operand index by reference is UseOpIdx is not null. + /// first tied use operand index by reference if UseOpIdx is not null. bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const; /// isRegTiedToDefOperand - Return true if the use operand of the specified @@ -776,6 +779,10 @@ public: const TargetRegisterInfo *RegInfo, bool AddIfNotFound = false); + /// clearRegisterKills - Clear all kill flags affecting Reg. If RegInfo is + /// provided, this includes super-register kills. + void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo); + /// addRegisterDead - We have determined MI defined a register without a use. /// Look for the operand that defines it and mark it as IsDead. If /// AddIfNotFound is true, add a implicit operand if it's not found. Returns @@ -790,7 +797,10 @@ public: /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as /// dead except those in the UsedRegs list. - void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs, + /// + /// On instructions with register mask operands, also add implicit-def + /// operands for all registers in UsedRegs. + void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, const TargetRegisterInfo &TRI); /// isSafeToMove - Return true if it is safe to move this instruction. If diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h index 8025642..99849a6 100644 --- a/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/include/llvm/CodeGen/MachineInstrBuilder.h @@ -34,6 +34,7 @@ namespace RegState { Undef = 0x20, EarlyClobber = 0x40, Debug = 0x80, + DefineNoRead = Define | Undef, ImplicitDefine = Implicit | Define, ImplicitKill = Implicit | Kill }; @@ -124,6 +125,11 @@ public: return *this; } + const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const { + MI->addOperand(MachineOperand::CreateRegMask(Mask)); + return *this; + } + const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const { MI->addMemOperand(*MI->getParent()->getParent(), MMO); return *this; diff --git a/include/llvm/CodeGen/MachineInstrBundle.h b/include/llvm/CodeGen/MachineInstrBundle.h index 8c150e6..0fb4969 100644 --- a/include/llvm/CodeGen/MachineInstrBundle.h +++ b/include/llvm/CodeGen/MachineInstrBundle.h @@ -19,16 +19,185 @@ namespace llvm { -/// FinalizeBundle - Finalize a machine instruction bundle which includes -/// a sequence of instructions starting from FirstMI to LastMI (inclusive). +/// finalizeBundle - Finalize a machine instruction bundle which includes +/// a sequence of instructions starting from FirstMI to LastMI (exclusive). /// This routine adds a BUNDLE instruction to represent the bundle, it adds /// IsInternalRead markers to MachineOperands which are defined inside the /// bundle, and it copies externally visible defs and uses to the BUNDLE /// instruction. -void FinalizeBundle(MachineBasicBlock &MBB, +void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI); +/// finalizeBundle - Same functionality as the previous finalizeBundle except +/// the last instruction in the bundle is not provided as an input. This is +/// used in cases where bundles are pre-determined by marking instructions +/// with 'InsideBundle' marker. It returns the MBB instruction iterator that +/// points to the end of the bundle. +MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB, + MachineBasicBlock::instr_iterator FirstMI); + +/// finalizeBundles - Finalize instruction bundles in the specified +/// MachineFunction. Return true if any bundles are finalized. +bool finalizeBundles(MachineFunction &MF); + +/// getBundleStart - Returns the first instruction in the bundle containing MI. +/// +static inline MachineInstr *getBundleStart(MachineInstr *MI) { + MachineBasicBlock::instr_iterator I = MI; + while (I->isInsideBundle()) + --I; + return I; +} + +static inline const MachineInstr *getBundleStart(const MachineInstr *MI) { + MachineBasicBlock::const_instr_iterator I = MI; + while (I->isInsideBundle()) + --I; + return I; +} + +//===----------------------------------------------------------------------===// +// MachineOperand iterator +// + +/// MachineOperandIteratorBase - Iterator that can visit all operands on a +/// MachineInstr, or all operands on a bundle of MachineInstrs. This class is +/// not intended to be used directly, use one of the sub-classes instead. +/// +/// Intended use: +/// +/// for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) { +/// if (!MIO->isReg()) +/// continue; +/// ... +/// } +/// +class MachineOperandIteratorBase { + MachineBasicBlock::instr_iterator InstrI, InstrE; + MachineInstr::mop_iterator OpI, OpE; + + // If the operands on InstrI are exhausted, advance InstrI to the next + // bundled instruction with operands. + void advance() { + while (OpI == OpE) { + // Don't advance off the basic block, or into a new bundle. + if (++InstrI == InstrE || !InstrI->isInsideBundle()) + break; + OpI = InstrI->operands_begin(); + OpE = InstrI->operands_end(); + } + } + +protected: + /// MachineOperandIteratorBase - Create an iterator that visits all operands + /// on MI, or all operands on every instruction in the bundle containing MI. + /// + /// @param MI The instruction to examine. + /// @param WholeBundle When true, visit all operands on the entire bundle. + /// + explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) { + if (WholeBundle) { + InstrI = getBundleStart(MI); + InstrE = MI->getParent()->instr_end(); + } else { + InstrI = InstrE = MI; + ++InstrE; + } + OpI = InstrI->operands_begin(); + OpE = InstrI->operands_end(); + if (WholeBundle) + advance(); + } + + MachineOperand &deref() const { return *OpI; } + +public: + /// isValid - Returns true until all the operands have been visited. + bool isValid() const { return OpI != OpE; } + + /// Preincrement. Move to the next operand. + void operator++() { + assert(isValid() && "Cannot advance MIOperands beyond the last operand"); + ++OpI; + advance(); + } + + /// getOperandNo - Returns the number of the current operand relative to its + /// instruction. + /// + unsigned getOperandNo() const { + return OpI - InstrI->operands_begin(); + } + + /// RegInfo - Information about a virtual register used by a set of operands. + /// + struct RegInfo { + /// Reads - One of the operands read the virtual register. This does not + /// include <undef> or <internal> use operands, see MO::readsReg(). + bool Reads; + + /// Writes - One of the operands writes the virtual register. + bool Writes; + + /// Tied - Uses and defs must use the same register. This can be because of + /// a two-address constraint, or there may be a partial redefinition of a + /// sub-register. + bool Tied; + }; + + /// analyzeVirtReg - Analyze how the current instruction or bundle uses a + /// virtual register. This function should not be called after operator++(), + /// it expects a fresh iterator. + /// + /// @param Reg The virtual register to analyze. + /// @param Ops When set, this vector will receive an (MI, OpNum) entry for + /// each operand referring to Reg. + /// @returns A filled-in RegInfo struct. + RegInfo analyzeVirtReg(unsigned Reg, + SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0); +}; + +/// MIOperands - Iterate over operands of a single instruction. +/// +class MIOperands : public MachineOperandIteratorBase { +public: + MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {} + MachineOperand &operator* () const { return deref(); } + MachineOperand *operator->() const { return &deref(); } +}; + +/// ConstMIOperands - Iterate over operands of a single const instruction. +/// +class ConstMIOperands : public MachineOperandIteratorBase { +public: + ConstMIOperands(const MachineInstr *MI) + : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {} + const MachineOperand &operator* () const { return deref(); } + const MachineOperand *operator->() const { return &deref(); } +}; + +/// MIBundleOperands - Iterate over all operands in a bundle of machine +/// instructions. +/// +class MIBundleOperands : public MachineOperandIteratorBase { +public: + MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {} + MachineOperand &operator* () const { return deref(); } + MachineOperand *operator->() const { return &deref(); } +}; + +/// ConstMIBundleOperands - Iterate over all operands in a const bundle of +/// machine instructions. +/// +class ConstMIBundleOperands : public MachineOperandIteratorBase { +public: + ConstMIBundleOperands(const MachineInstr *MI) + : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {} + const MachineOperand &operator* () const { return deref(); } + const MachineOperand *operator->() const { return &deref(); } +}; + } // End llvm namespace #endif diff --git a/include/llvm/CodeGen/MachineJumpTableInfo.h b/include/llvm/CodeGen/MachineJumpTableInfo.h index 6264349..6bd6682 100644 --- a/include/llvm/CodeGen/MachineJumpTableInfo.h +++ b/include/llvm/CodeGen/MachineJumpTableInfo.h @@ -47,7 +47,12 @@ public: /// EK_BlockAddress - Each entry is a plain address of block, e.g.: /// .word LBB123 EK_BlockAddress, - + + /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded + /// with a relocation as gp-relative, e.g.: + /// .gpdword LBB123 + EK_GPRel64BlockAddress, + /// EK_GPRel32BlockAddress - Each entry is an address of block, encoded /// with a relocation as gp-relative, e.g.: /// .gprel32 LBB123 diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h index 2bf7f17..6b88d4a 100644 --- a/include/llvm/CodeGen/MachineModuleInfo.h +++ b/include/llvm/CodeGen/MachineModuleInfo.h @@ -161,10 +161,10 @@ class MachineModuleInfo : public ImmutablePass { /// in this module. bool DbgInfoAvailable; - /// CallsExternalVAFunctionWithFloatingPointArguments - True if this module - /// calls VarArg function with floating point arguments. This is used to emit - /// an undefined reference to fltused on Windows targets. - bool CallsExternalVAFunctionWithFloatingPointArguments; + /// UsesVAFloatArgument - True if this module calls VarArg function with + /// floating-point arguments. This is used to emit an undefined reference + /// to _fltused on Windows targets. + bool UsesVAFloatArgument; public: static char ID; // Pass identification, replacement for typeid @@ -223,12 +223,12 @@ public: bool callsUnwindInit() const { return CallsUnwindInit; } void setCallsUnwindInit(bool b) { CallsUnwindInit = b; } - bool callsExternalVAFunctionWithFloatingPointArguments() const { - return CallsExternalVAFunctionWithFloatingPointArguments; + bool usesVAFloatArgument() const { + return UsesVAFloatArgument; } - void setCallsExternalVAFunctionWithFloatingPointArguments(bool b) { - CallsExternalVAFunctionWithFloatingPointArguments = b; + void setUsesVAFloatArgument(bool b) { + UsesVAFloatArgument = b; } /// getFrameMoves - Returns a reference to a list of moves done in the current diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h index 2a977de..d244dd9 100644 --- a/include/llvm/CodeGen/MachineOperand.h +++ b/include/llvm/CodeGen/MachineOperand.h @@ -48,6 +48,7 @@ public: MO_ExternalSymbol, ///< Name of external global symbol MO_GlobalAddress, ///< Address of a global value MO_BlockAddress, ///< Address of a basic block + MO_RegisterMask, ///< Mask of preserved registers. MO_Metadata, ///< Metadata reference (for debug info) MO_MCSymbol ///< MCSymbol reference (for debug/eh info) }; @@ -141,6 +142,7 @@ private: const ConstantFP *CFP; // For MO_FPImmediate. const ConstantInt *CI; // For MO_CImmediate. Integers > 64bit. int64_t ImmVal; // For MO_Immediate. + const uint32_t *RegMask; // For MO_RegisterMask. const MDNode *MD; // For MO_Metadata. MCSymbol *Sym; // For MO_MCSymbol @@ -220,10 +222,13 @@ public: bool isSymbol() const { return OpKind == MO_ExternalSymbol; } /// isBlockAddress - Tests if this is a MO_BlockAddress operand. bool isBlockAddress() const { return OpKind == MO_BlockAddress; } + /// isRegMask - Tests if this is a MO_RegisterMask operand. + bool isRegMask() const { return OpKind == MO_RegisterMask; } /// isMetadata - Tests if this is a MO_Metadata operand. bool isMetadata() const { return OpKind == MO_Metadata; } bool isMCSymbol() const { return OpKind == MO_MCSymbol; } + //===--------------------------------------------------------------------===// // Accessors for Register Operands //===--------------------------------------------------------------------===// @@ -436,6 +441,28 @@ public: return Contents.OffsetedInfo.Val.SymbolName; } + /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg. + /// It is sometimes necessary to detach the register mask pointer from its + /// machine operand. This static method can be used for such detached bit + /// mask pointers. + static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) { + // See TargetRegisterInfo.h. + assert(PhysReg < (1u << 30) && "Not a physical register"); + return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32)); + } + + /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg. + bool clobbersPhysReg(unsigned PhysReg) const { + return clobbersPhysReg(getRegMask(), PhysReg); + } + + /// getRegMask - Returns a bit mask of registers preserved by this RegMask + /// operand. + const uint32_t *getRegMask() const { + assert(isRegMask() && "Wrong MachineOperand accessor"); + return Contents.RegMask; + } + const MDNode *getMetadata() const { assert(isMetadata() && "Wrong MachineOperand accessor"); return Contents.MD; @@ -582,6 +609,24 @@ public: Op.setTargetFlags(TargetFlags); return Op; } + /// CreateRegMask - Creates a register mask operand referencing Mask. The + /// operand does not take ownership of the memory referenced by Mask, it must + /// remain valid for the lifetime of the operand. + /// + /// A RegMask operand represents a set of non-clobbered physical registers on + /// an instruction that clobbers many registers, typically a call. The bit + /// mask has a bit set for each physreg that is preserved by this + /// instruction, as described in the documentation for + /// TargetRegisterInfo::getCallPreservedMask(). + /// + /// Any physreg with a 0 bit in the mask is clobbered by the instruction. + /// + static MachineOperand CreateRegMask(const uint32_t *Mask) { + assert(Mask && "Missing register mask"); + MachineOperand Op(MachineOperand::MO_RegisterMask); + Op.Contents.RegMask = Mask; + return Op; + } static MachineOperand CreateMetadata(const MDNode *Meta) { MachineOperand Op(MachineOperand::MO_Metadata); Op.Contents.MD = Meta; diff --git a/include/llvm/CodeGen/MachinePassRegistry.h b/include/llvm/CodeGen/MachinePassRegistry.h index 6ee2e90..c41e8e2 100644 --- a/include/llvm/CodeGen/MachinePassRegistry.h +++ b/include/llvm/CodeGen/MachinePassRegistry.h @@ -33,6 +33,7 @@ typedef void *(*MachinePassCtor)(); /// //===----------------------------------------------------------------------===// class MachinePassRegistryListener { + virtual void anchor(); public: MachinePassRegistryListener() {} virtual ~MachinePassRegistryListener() {} diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h index bd674d2..69ce588 100644 --- a/include/llvm/CodeGen/MachineRegisterInfo.h +++ b/include/llvm/CodeGen/MachineRegisterInfo.h @@ -15,12 +15,13 @@ #define LLVM_CODEGEN_MACHINEREGISTERINFO_H #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/CodeGen/MachineInstrBundle.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/IndexedMap.h" #include <vector> namespace llvm { - + /// MachineRegisterInfo - Keep track of information for virtual and physical /// registers, including vreg register classes, use/def chains for registers, /// etc. @@ -46,18 +47,32 @@ class MachineRegisterInfo { /// the allocator should prefer the physical register allocated to the virtual /// register of the hint. IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints; - + /// PhysRegUseDefLists - This is an array of the head of the use/def list for /// physical registers. - MachineOperand **PhysRegUseDefLists; - + MachineOperand **PhysRegUseDefLists; + /// UsedPhysRegs - This is a bit vector that is computed and set by the /// register allocator, and must be kept up to date by passes that run after /// register allocation (though most don't modify this). This is used /// so that the code generator knows which callee save registers to save and /// for other target specific uses. + /// This vector only has bits set for registers explicitly used, not their + /// aliases. BitVector UsedPhysRegs; - + + /// UsedPhysRegMask - Additional used physregs, but including aliases. + BitVector UsedPhysRegMask; + + /// ReservedRegs - This is a bit vector of reserved registers. The target + /// may change its mind about which registers should be reserved. This + /// vector is the frozen set of reserved registers when register allocation + /// started. + BitVector ReservedRegs; + + /// AllocatableRegs - From TRI->getAllocatableSet. + mutable BitVector AllocatableRegs; + /// LiveIns/LiveOuts - Keep track of the physical registers that are /// livein/liveout of the function. Live in values are typically arguments in /// registers, live out values are typically return values in registers. @@ -65,7 +80,7 @@ class MachineRegisterInfo { /// stored in the second element. std::vector<std::pair<unsigned, unsigned> > LiveIns; std::vector<unsigned> LiveOuts; - + MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT public: @@ -141,7 +156,7 @@ public: return use_iterator(getRegUseDefListHead(RegNo)); } static use_iterator use_end() { return use_iterator(0); } - + /// use_empty - Return true if there are no instructions using the specified /// register. bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); } @@ -157,7 +172,7 @@ public: return use_nodbg_iterator(getRegUseDefListHead(RegNo)); } static use_nodbg_iterator use_nodbg_end() { return use_nodbg_iterator(0); } - + /// use_nodbg_empty - Return true if there are no non-Debug instructions /// using the specified register. bool use_nodbg_empty(unsigned RegNo) const { @@ -180,7 +195,7 @@ public: /// That function will return NULL if the virtual registers have incompatible /// constraints. void replaceRegWith(unsigned FromReg, unsigned ToReg); - + /// getRegUseDefListHead - Return the head pointer for the register use/def /// list for the specified virtual or physical register. MachineOperand *&getRegUseDefListHead(unsigned RegNo) { @@ -188,7 +203,7 @@ public: return VRegInfo[RegNo].second; return PhysRegUseDefLists[RegNo]; } - + MachineOperand *getRegUseDefListHead(unsigned RegNo) const { if (TargetRegisterInfo::isVirtualRegister(RegNo)) return VRegInfo[RegNo].second; @@ -205,15 +220,20 @@ public: /// optimization passes which extend register lifetimes and need only /// preserve conservative kill flag information. void clearKillFlags(unsigned Reg) const; - + #ifndef NDEBUG void dumpUses(unsigned RegNo) const; #endif - + + /// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant + /// throughout the function. It is safe to move instructions that read such + /// a physreg. + bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const; + //===--------------------------------------------------------------------===// // Virtual Register Info //===--------------------------------------------------------------------===// - + /// getRegClass - Return the register class of the specified virtual register. /// const TargetRegisterClass *getRegClass(unsigned Reg) const { @@ -254,6 +274,9 @@ public: /// unsigned getNumVirtRegs() const { return VRegInfo.size(); } + /// clearVirtRegs - Remove all virtual registers (after physreg assignment). + void clearVirtRegs(); + /// setRegAllocationHint - Specify a register allocation hint for the /// specified virtual register. void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) { @@ -279,38 +302,87 @@ public: //===--------------------------------------------------------------------===// // Physical Register Use Info //===--------------------------------------------------------------------===// - + /// isPhysRegUsed - Return true if the specified register is used in this /// function. This only works after register allocation. - bool isPhysRegUsed(unsigned Reg) const { return UsedPhysRegs[Reg]; } - + bool isPhysRegUsed(unsigned Reg) const { + return UsedPhysRegs.test(Reg) || UsedPhysRegMask.test(Reg); + } + + /// isPhysRegOrOverlapUsed - Return true if Reg or any overlapping register + /// is used in this function. + bool isPhysRegOrOverlapUsed(unsigned Reg) const { + if (UsedPhysRegMask.test(Reg)) + return true; + for (const uint16_t *AI = TRI->getOverlaps(Reg); *AI; ++AI) + if (UsedPhysRegs.test(*AI)) + return true; + return false; + } + /// setPhysRegUsed - Mark the specified register used in this function. /// This should only be called during and after register allocation. - void setPhysRegUsed(unsigned Reg) { UsedPhysRegs[Reg] = true; } + void setPhysRegUsed(unsigned Reg) { UsedPhysRegs.set(Reg); } /// addPhysRegsUsed - Mark the specified registers used in this function. /// This should only be called during and after register allocation. void addPhysRegsUsed(const BitVector &Regs) { UsedPhysRegs |= Regs; } + /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used. + /// This corresponds to the bit mask attached to register mask operands. + void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) { + UsedPhysRegMask.setBitsNotInMask(RegMask); + } + /// setPhysRegUnused - Mark the specified register unused in this function. /// This should only be called during and after register allocation. - void setPhysRegUnused(unsigned Reg) { UsedPhysRegs[Reg] = false; } + void setPhysRegUnused(unsigned Reg) { + UsedPhysRegs.reset(Reg); + UsedPhysRegMask.reset(Reg); + } + + + //===--------------------------------------------------------------------===// + // Reserved Register Info + //===--------------------------------------------------------------------===// + // + // The set of reserved registers must be invariant during register + // allocation. For example, the target cannot suddenly decide it needs a + // frame pointer when the register allocator has already used the frame + // pointer register for something else. + // + // These methods can be used by target hooks like hasFP() to avoid changing + // the reserved register set during register allocation. + + /// freezeReservedRegs - Called by the register allocator to freeze the set + /// of reserved registers before allocation begins. + void freezeReservedRegs(const MachineFunction&); + + /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called + /// to ensure the set of reserved registers stays constant. + bool reservedRegsFrozen() const { + return !ReservedRegs.empty(); + } + + /// canReserveReg - Returns true if PhysReg can be used as a reserved + /// register. Any register can be reserved before freezeReservedRegs() is + /// called. + bool canReserveReg(unsigned PhysReg) const { + return !reservedRegsFrozen() || ReservedRegs.test(PhysReg); + } - /// closePhysRegsUsed - Expand UsedPhysRegs to its transitive closure over - /// subregisters. That means that if R is used, so are all subregisters. - void closePhysRegsUsed(const TargetRegisterInfo&); //===--------------------------------------------------------------------===// // LiveIn/LiveOut Management //===--------------------------------------------------------------------===// - + /// addLiveIn/Out - Add the specified register as a live in/out. Note that it /// is an error to add the same register to the same set more than once. void addLiveIn(unsigned Reg, unsigned vreg = 0) { LiveIns.push_back(std::make_pair(Reg, vreg)); } void addLiveOut(unsigned Reg) { LiveOuts.push_back(Reg); } - + // Iteration support for live in/out sets. These sets are kept in sorted // order by their register number. typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator @@ -342,7 +414,7 @@ public: private: void HandleVRegListReallocation(); - + public: /// defusechain_iterator - This class provides iterator support for machine /// operands in the function that use or define a specific register. If @@ -370,31 +442,31 @@ public: MachineInstr, ptrdiff_t>::reference reference; typedef std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t>::pointer pointer; - + defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {} defusechain_iterator() : Op(0) {} - + bool operator==(const defusechain_iterator &x) const { return Op == x.Op; } bool operator!=(const defusechain_iterator &x) const { return !operator==(x); } - + /// atEnd - return true if this iterator is equal to reg_end() on the value. bool atEnd() const { return Op == 0; } - + // Iterator traversal: forward iteration only defusechain_iterator &operator++() { // Preincrement assert(Op && "Cannot increment end iterator!"); Op = Op->getNextOperandForReg(); - + // If this is an operand we don't care about, skip it. - while (Op && ((!ReturnUses && Op->isUse()) || + while (Op && ((!ReturnUses && Op->isUse()) || (!ReturnDefs && Op->isDef()) || (SkipDebug && Op->isDebug()))) Op = Op->getNextOperandForReg(); - + return *this; } defusechain_iterator operator++(int) { // Postincrement @@ -412,30 +484,38 @@ public: return MI; } + MachineInstr *skipBundle() { + if (!Op) return 0; + MachineInstr *MI = getBundleStart(Op->getParent()); + do ++*this; + while (Op && getBundleStart(Op->getParent()) == MI); + return MI; + } + MachineOperand &getOperand() const { assert(Op && "Cannot dereference end iterator!"); return *Op; } - + /// getOperandNo - Return the operand # of this MachineOperand in its /// MachineInstr. unsigned getOperandNo() const { assert(Op && "Cannot dereference end iterator!"); return Op - &Op->getParent()->getOperand(0); } - + // Retrieve a reference to the current operand. MachineInstr &operator*() const { assert(Op && "Cannot dereference end iterator!"); return *Op->getParent(); } - + MachineInstr *operator->() const { assert(Op && "Cannot dereference end iterator!"); return Op->getParent(); } }; - + }; } // End llvm namespace diff --git a/include/llvm/CodeGen/ObjectCodeEmitter.h b/include/llvm/CodeGen/ObjectCodeEmitter.h deleted file mode 100644 index d46628c..0000000 --- a/include/llvm/CodeGen/ObjectCodeEmitter.h +++ /dev/null @@ -1,171 +0,0 @@ -//===-- llvm/CodeGen/ObjectCodeEmitter.h - Object Code Emitter -*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Generalized Object Code Emitter, works with ObjectModule and BinaryObject. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CODEGEN_OBJECTCODEEMITTER_H -#define LLVM_CODEGEN_OBJECTCODEEMITTER_H - -#include "llvm/CodeGen/MachineCodeEmitter.h" - -namespace llvm { - -class BinaryObject; -class MachineBasicBlock; -class MachineCodeEmitter; -class MachineFunction; -class MachineConstantPool; -class MachineJumpTableInfo; -class MachineModuleInfo; - -class ObjectCodeEmitter : public MachineCodeEmitter { -protected: - - /// Binary Object (Section or Segment) we are emitting to. - BinaryObject *BO; - - /// MBBLocations - This vector is a mapping from MBB ID's to their address. - /// It is filled in by the StartMachineBasicBlock callback and queried by - /// the getMachineBasicBlockAddress callback. - std::vector<uintptr_t> MBBLocations; - - /// LabelLocations - This vector is a mapping from Label ID's to their - /// address. - std::vector<uintptr_t> LabelLocations; - - /// CPLocations - This is a map of constant pool indices to offsets from the - /// start of the section for that constant pool index. - std::vector<uintptr_t> CPLocations; - - /// CPSections - This is a map of constant pool indices to the Section - /// containing the constant pool entry for that index. - std::vector<uintptr_t> CPSections; - - /// JTLocations - This is a map of jump table indices to offsets from the - /// start of the section for that jump table index. - std::vector<uintptr_t> JTLocations; - -public: - ObjectCodeEmitter(); - ObjectCodeEmitter(BinaryObject *bo); - virtual ~ObjectCodeEmitter(); - - /// setBinaryObject - set the BinaryObject we are writting to - void setBinaryObject(BinaryObject *bo); - - /// emitByte - This callback is invoked when a byte needs to be - /// written to the data stream, without buffer overflow testing. - void emitByte(uint8_t B); - - /// emitWordLE - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in little-endian format. - void emitWordLE(uint32_t W); - - /// emitWordBE - This callback is invoked when a 32-bit word needs to be - /// written to the data stream in big-endian format. - void emitWordBE(uint32_t W); - - /// emitDWordLE - This callback is invoked when a 64-bit word needs to be - /// written to the data stream in little-endian format. - void emitDWordLE(uint64_t W); - - /// emitDWordBE - This callback is invoked when a 64-bit word needs to be - /// written to the data stream in big-endian format. - void emitDWordBE(uint64_t W); - - /// emitAlignment - Move the CurBufferPtr pointer up to the specified - /// alignment (saturated to BufferEnd of course). - void emitAlignment(unsigned Alignment = 0, uint8_t fill = 0); - - /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be - /// written to the data stream. - void emitULEB128Bytes(uint64_t Value); - - /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be - /// written to the data stream. - void emitSLEB128Bytes(uint64_t Value); - - /// emitString - This callback is invoked when a String needs to be - /// written to the data stream. - void emitString(const std::string &String); - - /// getCurrentPCValue - This returns the address that the next emitted byte - /// will be output to. - uintptr_t getCurrentPCValue() const; - - /// getCurrentPCOffset - Return the offset from the start of the emitted - /// buffer that we are currently writing to. - uintptr_t getCurrentPCOffset() const; - - /// addRelocation - Whenever a relocatable address is needed, it should be - /// noted with this interface. - void addRelocation(const MachineRelocation& relocation); - - /// earlyResolveAddresses - True if the code emitter can use symbol addresses - /// during code emission time. The JIT is capable of doing this because it - /// creates jump tables or constant pools in memory on the fly while the - /// object code emitters rely on a linker to have real addresses and should - /// use relocations instead. - bool earlyResolveAddresses() const { return false; } - - /// startFunction - This callback is invoked when the specified function is - /// about to be code generated. This initializes the BufferBegin/End/Ptr - /// fields. - virtual void startFunction(MachineFunction &F) = 0; - - /// finishFunction - This callback is invoked when the specified function has - /// finished code generation. If a buffer overflow has occurred, this method - /// returns true (the callee is required to try again), otherwise it returns - /// false. - virtual bool finishFunction(MachineFunction &F) = 0; - - /// StartMachineBasicBlock - This should be called by the target when a new - /// basic block is about to be emitted. This way the MCE knows where the - /// start of the block is, and can implement getMachineBasicBlockAddress. - virtual void StartMachineBasicBlock(MachineBasicBlock *MBB); - - /// getMachineBasicBlockAddress - Return the address of the specified - /// MachineBasicBlock, only usable after the label for the MBB has been - /// emitted. - virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const; - - /// emitJumpTables - Emit all the jump tables for a given jump table info - /// record to the appropriate section. - virtual void emitJumpTables(MachineJumpTableInfo *MJTI) = 0; - - /// getJumpTableEntryAddress - Return the address of the jump table with index - /// 'Index' in the function that last called initJumpTableInfo. - virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const; - - /// emitConstantPool - For each constant pool entry, figure out which section - /// the constant should live in, allocate space for it, and emit it to the - /// Section data buffer. - virtual void emitConstantPool(MachineConstantPool *MCP) = 0; - - /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in - /// the constant pool that was last emitted with the emitConstantPool method. - virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const; - - /// getConstantPoolEntrySection - Return the section of the 'Index' entry in - /// the constant pool that was last emitted with the emitConstantPool method. - virtual uintptr_t getConstantPoolEntrySection(unsigned Index) const; - - /// Specifies the MachineModuleInfo object. This is used for exception handling - /// purposes. - virtual void setModuleInfo(MachineModuleInfo* Info) = 0; - // to be implemented or depreciated with MachineModuleInfo - -}; // end class ObjectCodeEmitter - -} // end namespace llvm - -#endif - diff --git a/include/llvm/CodeGen/PBQP/HeuristicBase.h b/include/llvm/CodeGen/PBQP/HeuristicBase.h index 791c227..3fee18c 100644 --- a/include/llvm/CodeGen/PBQP/HeuristicBase.h +++ b/include/llvm/CodeGen/PBQP/HeuristicBase.h @@ -157,7 +157,7 @@ namespace PBQP { case 0: s.applyR0(nItr); break; case 1: s.applyR1(nItr); break; case 2: s.applyR2(nItr); break; - default: assert(false && + default: llvm_unreachable( "Optimal reductions of degree > 2 nodes is invalid."); } @@ -186,7 +186,7 @@ namespace PBQP { /// \brief Add a node to the heuristic reduce list. /// @param nItr Node iterator to add to the heuristic reduce list. void addToHeuristicList(Graph::NodeItr nItr) { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Heuristically reduce one of the nodes in the heuristic @@ -194,25 +194,25 @@ namespace PBQP { /// @return True if a reduction takes place, false if the heuristic reduce /// list is empty. void heuristicReduce() { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Prepare a change in the costs on the given edge. /// @param eItr Edge iterator. void preUpdateEdgeCosts(Graph::EdgeItr eItr) { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Handle the change in the costs on the given edge. /// @param eItr Edge iterator. void postUpdateEdgeCostts(Graph::EdgeItr eItr) { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Handle the addition of a new edge into the PBQP graph. /// @param eItr Edge iterator for the added edge. void handleAddEdge(Graph::EdgeItr eItr) { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Handle disconnection of an edge from a node. @@ -223,7 +223,7 @@ namespace PBQP { /// method allows for the effect to be computed only for the remaining /// node in the graph. void handleRemoveEdge(Graph::EdgeItr eItr, Graph::NodeItr nItr) { - assert(false && "Must be implemented in derived class."); + llvm_unreachable("Must be implemented in derived class."); } /// \brief Clean up any structures used by HeuristicBase. diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index b4f6cac..8ce339d 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -15,6 +15,7 @@ #ifndef LLVM_CODEGEN_PASSES_H #define LLVM_CODEGEN_PASSES_H +#include "llvm/Pass.h" #include "llvm/Target/TargetMachine.h" #include <string> @@ -26,7 +27,213 @@ namespace llvm { class TargetLowering; class TargetRegisterClass; class raw_ostream; +} +namespace llvm { + +extern char &NoPassID; // Allow targets to choose not to run a pass. + +class PassConfigImpl; + +/// Target-Independent Code Generator Pass Configuration Options. +/// +/// This is an ImmutablePass solely for the purpose of exposing CodeGen options +/// to the internals of other CodeGen passes. +class TargetPassConfig : public ImmutablePass { +public: + /// Pseudo Pass IDs. These are defined within TargetPassConfig because they + /// are unregistered pass IDs. They are only useful for use with + /// TargetPassConfig APIs to identify multiple occurrences of the same pass. + /// + + /// EarlyTailDuplicate - A clone of the TailDuplicate pass that runs early + /// during codegen, on SSA form. + static char EarlyTailDuplicateID; + + /// PostRAMachineLICM - A clone of the LICM pass that runs during late machine + /// optimization after regalloc. + static char PostRAMachineLICMID; + +protected: + TargetMachine *TM; + PassManagerBase &PM; + PassConfigImpl *Impl; // Internal data structures + bool Initialized; // Flagged after all passes are configured. + + // Target Pass Options + // Targets provide a default setting, user flags override. + // + bool DisableVerify; + + /// Default setting for -enable-tail-merge on this target. + bool EnableTailMerge; + +public: + TargetPassConfig(TargetMachine *tm, PassManagerBase &pm); + // Dummy constructor. + TargetPassConfig(); + + virtual ~TargetPassConfig(); + + static char ID; + + /// Get the right type of TargetMachine for this target. + template<typename TMC> TMC &getTM() const { + return *static_cast<TMC*>(TM); + } + + const TargetLowering *getTargetLowering() const { + return TM->getTargetLowering(); + } + + // + void setInitialized() { Initialized = true; } + + CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); } + + void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); } + + bool getEnableTailMerge() const { return EnableTailMerge; } + void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); } + + /// Allow the target to override a specific pass without overriding the pass + /// pipeline. When passes are added to the standard pipeline at the + /// point where StadardID is expected, add TargetID in its place. + void substitutePass(char &StandardID, char &TargetID); + + /// Allow the target to disable a specific standard pass. + void disablePass(char &ID) { substitutePass(ID, NoPassID); } + + /// Return the pass ssubtituted for StandardID by the target. + /// If no substitution exists, return StandardID. + AnalysisID getPassSubstitution(AnalysisID StandardID) const; + + /// Return true if the optimized regalloc pipeline is enabled. + bool getOptimizeRegAlloc() const; + + /// Add common target configurable passes that perform LLVM IR to IR + /// transforms following machine independent optimization. + virtual void addIRPasses(); + + /// Add common passes that perform LLVM IR to IR transforms in preparation for + /// instruction selection. + virtual void addISelPrepare(); + + /// addInstSelector - This method should install an instruction selector pass, + /// which converts from LLVM code to machine instructions. + virtual bool addInstSelector() { + return true; + } + + /// Add the complete, standard set of LLVM CodeGen passes. + /// Fully developed targets will not generally override this. + virtual void addMachinePasses(); + +protected: + // Helper to verify the analysis is really immutable. + void setOpt(bool &Opt, bool Val); + + /// Methods with trivial inline returns are convenient points in the common + /// codegen pass pipeline where targets may insert passes. Methods with + /// out-of-line standard implementations are major CodeGen stages called by + /// addMachinePasses. Some targets may override major stages when inserting + /// passes is insufficient, but maintaining overriden stages is more work. + /// + + /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM + /// passes (which are run just before instruction selector). + virtual bool addPreISel() { + return true; + } + + /// addMachineSSAOptimization - Add standard passes that optimize machine + /// instructions in SSA form. + virtual void addMachineSSAOptimization(); + + /// addPreRegAlloc - This method may be implemented by targets that want to + /// run passes immediately before register allocation. This should return + /// true if -print-machineinstrs should print after these passes. + virtual bool addPreRegAlloc() { + return false; + } + + /// createTargetRegisterAllocator - Create the register allocator pass for + /// this target at the current optimization level. + virtual FunctionPass *createTargetRegisterAllocator(bool Optimized); + + /// addFastRegAlloc - Add the minimum set of target-independent passes that + /// are required for fast register allocation. + virtual void addFastRegAlloc(FunctionPass *RegAllocPass); + + /// addOptimizedRegAlloc - Add passes related to register allocation. + /// LLVMTargetMachine provides standard regalloc passes for most targets. + virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass); + + /// addFinalizeRegAlloc - This method may be implemented by targets that want + /// to run passes within the regalloc pipeline, immediately after the register + /// allocation pass itself. These passes run as soon as virtual regisiters + /// have been rewritten to physical registers but before and other postRA + /// optimization happens. Targets that have marked instructions for bundling + /// must have finalized those bundles by the time these passes have run, + /// because subsequent passes are not guaranteed to be bundle-aware. + virtual bool addFinalizeRegAlloc() { + return false; + } + + /// addPostRegAlloc - This method may be implemented by targets that want to + /// run passes after register allocation pass pipeline but before + /// prolog-epilog insertion. This should return true if -print-machineinstrs + /// should print after these passes. + virtual bool addPostRegAlloc() { + return false; + } + + /// Add passes that optimize machine instructions after register allocation. + virtual void addMachineLateOptimization(); + + /// addPreSched2 - This method may be implemented by targets that want to + /// run passes after prolog-epilog insertion and before the second instruction + /// scheduling pass. This should return true if -print-machineinstrs should + /// print after these passes. + virtual bool addPreSched2() { + return false; + } + + /// Add standard basic block placement passes. + virtual void addBlockPlacement(); + + /// addPreEmitPass - This pass may be implemented by targets that want to run + /// passes immediately before machine code is emitted. This should return + /// true if -print-machineinstrs should print out the code after the passes. + virtual bool addPreEmitPass() { + return false; + } + + /// Utilities for targets to add passes to the pass manager. + /// + + /// Add a CodeGen pass at this point in the pipeline after checking overrides. + /// Return the pass that was added, or NoPassID. + AnalysisID addPass(char &ID); + + /// addMachinePasses helper to create the target-selected or overriden + /// regalloc pass. + FunctionPass *createRegAllocPass(bool Optimized); + + /// printNoVerify - Add a pass to dump the machine function, if debugging is + /// enabled. + /// + void printNoVerify(const char *Banner) const; + + /// printAndVerify - Add a pass to dump then verify the machine function, if + /// those steps are enabled. + /// + void printAndVerify(const char *Banner) const; +}; +} // namespace llvm + +/// List of target independent CodeGen pass IDs. +namespace llvm { /// createUnreachableBlockEliminationPass - The LLVM code generator does not /// work well with unreachable basic blocks (what live ranges make sense for a /// block that cannot be reached?). As such, a code generator should either @@ -41,31 +248,29 @@ namespace llvm { createMachineFunctionPrinterPass(raw_ostream &OS, const std::string &Banner =""); - /// MachineLoopInfo pass - This pass is a loop analysis pass. - /// + /// MachineLoopInfo - This pass is a loop analysis pass. extern char &MachineLoopInfoID; - /// MachineLoopRanges pass - This pass is an on-demand loop coverage - /// analysis pass. - /// + /// MachineLoopRanges - This pass is an on-demand loop coverage analysis. extern char &MachineLoopRangesID; - /// MachineDominators pass - This pass is a machine dominators analysis pass. - /// + /// MachineDominators - This pass is a machine dominators analysis pass. extern char &MachineDominatorsID; /// EdgeBundles analysis - Bundle machine CFG edges. - /// extern char &EdgeBundlesID; - /// PHIElimination pass - This pass eliminates machine instruction PHI nodes + /// LiveVariables pass - This pass computes the set of blocks in which each + /// variable is life and sets machine operand kill flags. + extern char &LiveVariablesID; + + /// PHIElimination - This pass eliminates machine instruction PHI nodes /// by inserting copy instructions. This destroys SSA information, but is the /// desired input for some register allocators. This pass is "required" by /// these register allocator like this: AU.addRequiredID(PHIEliminationID); - /// extern char &PHIEliminationID; - /// StrongPHIElimination pass - This pass eliminates machine instruction PHI + /// StrongPHIElimination - This pass eliminates machine instruction PHI /// nodes by inserting copy instructions. This destroys SSA information, but /// is the desired input for some register allocators. This pass is /// "required" by these register allocator like this: @@ -76,32 +281,30 @@ namespace llvm { /// LiveStacks pass. An analysis keeping track of the liveness of stack slots. extern char &LiveStacksID; - /// TwoAddressInstruction pass - This pass reduces two-address instructions to + /// TwoAddressInstruction - This pass reduces two-address instructions to /// use two operands. This destroys SSA information but it is desired by /// register allocators. extern char &TwoAddressInstructionPassID; - /// RegisteCoalescer pass - This pass merges live ranges to eliminate copies. - extern char &RegisterCoalescerPassID; + /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs. + extern char &ProcessImplicitDefsID; + + /// RegisterCoalescer - This pass merges live ranges to eliminate copies. + extern char &RegisterCoalescerID; + + /// MachineScheduler - This pass schedules machine instructions. + extern char &MachineSchedulerID; /// SpillPlacement analysis. Suggest optimal placement of spill code between /// basic blocks. - /// extern char &SpillPlacementID; - /// UnreachableMachineBlockElimination pass - This pass removes unreachable + /// UnreachableMachineBlockElimination - This pass removes unreachable /// machine basic blocks. extern char &UnreachableMachineBlockElimID; - /// DeadMachineInstructionElim pass - This pass removes dead machine - /// instructions. - /// - FunctionPass *createDeadMachineInstructionElimPass(); - - /// Creates a register allocator as the user specified on the command line, or - /// picks one that matches OptLevel. - /// - FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel); + /// DeadMachineInstructionElim - This pass removes dead machine instructions. + extern char &DeadMachineInstructionElimID; /// FastRegisterAllocation Pass - This pass register allocates as fast as /// possible. It is best suited for debug code where live ranges are short. @@ -123,55 +326,54 @@ namespace llvm { /// FunctionPass *createDefaultPBQPRegisterAllocator(); - /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code, + /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code, /// and eliminates abstract frame references. - /// - FunctionPass *createPrologEpilogCodeInserter(); + extern char &PrologEpilogCodeInserterID; - /// ExpandPostRAPseudos Pass - This pass expands pseudo instructions after + /// ExpandPostRAPseudos - This pass expands pseudo instructions after /// register allocation. - /// - FunctionPass *createExpandPostRAPseudosPass(); + extern char &ExpandPostRAPseudosID; /// createPostRAScheduler - This pass performs post register allocation /// scheduling. - FunctionPass *createPostRAScheduler(CodeGenOpt::Level OptLevel); + extern char &PostRASchedulerID; - /// BranchFolding Pass - This pass performs machine code CFG based + /// BranchFolding - This pass performs machine code CFG based /// optimizations to delete branches to branches, eliminate branches to /// successor blocks (creating fall throughs), and eliminating branches over /// branches. - FunctionPass *createBranchFoldingPass(bool DefaultEnableTailMerge); + extern char &BranchFolderPassID; - /// TailDuplicate Pass - Duplicate blocks with unconditional branches + /// TailDuplicate - Duplicate blocks with unconditional branches /// into tails of their predecessors. - FunctionPass *createTailDuplicatePass(bool PreRegAlloc = false); + extern char &TailDuplicateID; - /// IfConverter Pass - This pass performs machine code if conversion. - FunctionPass *createIfConverterPass(); + /// IfConverter - This pass performs machine code if conversion. + extern char &IfConverterID; - /// MachineBlockPlacement Pass - This pass places basic blocks based on branch + /// MachineBlockPlacement - This pass places basic blocks based on branch /// probabilities. - FunctionPass *createMachineBlockPlacementPass(); + extern char &MachineBlockPlacementID; - /// MachineBlockPlacementStats Pass - This pass collects statistics about the + /// MachineBlockPlacementStats - This pass collects statistics about the /// basic block placement using branch probabilities and block frequency /// information. - FunctionPass *createMachineBlockPlacementStatsPass(); + extern char &MachineBlockPlacementStatsID; - /// Code Placement Pass - This pass optimize code placement and aligns loop + /// Code Placement - This pass optimize code placement and aligns loop /// headers to target specific alignment boundary. - FunctionPass *createCodePlacementOptPass(); + extern char &CodePlacementOptID; - /// IntrinsicLowering Pass - Performs target-independent LLVM IR - /// transformations for highly portable strategies. + /// GCLowering Pass - Performs target-independent LLVM IR transformations for + /// highly portable strategies. + /// FunctionPass *createGCLoweringPass(); - /// MachineCodeAnalysis Pass - Target-independent pass to mark safe points in - /// machine code. Must be added very late during code generation, just prior - /// to output, and importantly after all CFG transformations (such as branch - /// folding). - FunctionPass *createGCMachineCodeAnalysisPass(); + /// GCMachineCodeAnalysis - Target-independent pass to mark safe points + /// in machine code. Must be added very late during code generation, just + /// prior to output, and importantly after all CFG transformations (such as + /// branch folding). + extern char &GCMachineCodeAnalysisID; /// Deleter Pass - Releases GC metadata. /// @@ -181,34 +383,37 @@ namespace llvm { /// FunctionPass *createGCInfoPrinter(raw_ostream &OS); - /// createMachineCSEPass - This pass performs global CSE on machine - /// instructions. - FunctionPass *createMachineCSEPass(); + /// MachineCSE - This pass performs global CSE on machine instructions. + extern char &MachineCSEID; - /// createMachineLICMPass - This pass performs LICM on machine instructions. - /// - FunctionPass *createMachineLICMPass(bool PreRegAlloc = true); + /// MachineLICM - This pass performs LICM on machine instructions. + extern char &MachineLICMID; - /// createMachineSinkingPass - This pass performs sinking on machine - /// instructions. - FunctionPass *createMachineSinkingPass(); + /// MachineSinking - This pass performs sinking on machine instructions. + extern char &MachineSinkingID; - /// createPeepholeOptimizerPass - This pass performs peephole optimizations - + /// MachineCopyPropagation - This pass performs copy propagation on + /// machine instructions. + extern char &MachineCopyPropagationID; + + /// PeepholeOptimizer - This pass performs peephole optimizations - /// like extension and comparison eliminations. - FunctionPass *createPeepholeOptimizerPass(); + extern char &PeepholeOptimizerID; - /// createOptimizePHIsPass - This pass optimizes machine instruction PHIs + /// OptimizePHIs - This pass optimizes machine instruction PHIs /// to take advantage of opportunities created during DAG legalization. - FunctionPass *createOptimizePHIsPass(); + extern char &OptimizePHIsID; - /// createStackSlotColoringPass - This pass performs stack slot coloring. - FunctionPass *createStackSlotColoringPass(bool); + /// StackSlotColoring - This pass performs stack slot coloring. + extern char &StackSlotColoringID; /// createStackProtectorPass - This pass adds stack protectors to functions. + /// FunctionPass *createStackProtectorPass(const TargetLowering *tli); /// createMachineVerifierPass - This pass verifies cenerated machine code /// instructions for correctness. + /// FunctionPass *createMachineVerifierPass(const char *Banner = 0); /// createDwarfEHPass - This pass mulches exception handling code into a form @@ -217,18 +422,17 @@ namespace llvm { /// createSjLjEHPass - This pass adapts exception handling code to use /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow. + /// FunctionPass *createSjLjEHPass(const TargetLowering *tli); - /// createLocalStackSlotAllocationPass - This pass assigns local frame - /// indices to stack slots relative to one another and allocates - /// base registers to access them when it is estimated by the target to - /// be out of range of normal frame pointer or stack pointer index - /// addressing. - FunctionPass *createLocalStackSlotAllocationPass(); + /// LocalStackSlotAllocation - This pass assigns local frame indices to stack + /// slots relative to one another and allocates base registers to access them + /// when it is estimated by the target to be out of range of normal frame + /// pointer or stack pointer index addressing. + extern char &LocalStackSlotAllocationID; - /// createExpandISelPseudosPass - This pass expands pseudo-instructions. - /// - FunctionPass *createExpandISelPseudosPass(); + /// ExpandISelPseudos - This pass expands pseudo-instructions. + extern char &ExpandISelPseudosID; /// createExecutionDependencyFixPass - This pass fixes execution time /// problems with dependent instructions, such as switching execution @@ -238,9 +442,12 @@ namespace llvm { /// FunctionPass *createExecutionDependencyFixPass(const TargetRegisterClass *RC); - /// createUnpackMachineBundles - This pass unpack machine instruction bundles. - /// - FunctionPass *createUnpackMachineBundlesPass(); + /// UnpackMachineBundles - This pass unpack machine instruction bundles. + extern char &UnpackMachineBundlesID; + + /// FinalizeMachineBundles - This pass finalize machine instruction + /// bundles (created earlier, e.g. during pre-RA scheduling). + extern char &FinalizeMachineBundlesID; } // End llvm namespace diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h index 26b6773..3986a8d 100644 --- a/include/llvm/CodeGen/RegisterScavenging.h +++ b/include/llvm/CodeGen/RegisterScavenging.h @@ -68,6 +68,10 @@ class RegScavenger { /// available, unset means the register is currently being used. BitVector RegsAvailable; + // These BitVectors are only used internally to forward(). They are members + // to avoid frequent reallocations. + BitVector KillRegs, DefRegs; + public: RegScavenger() : MBB(NULL), NumPhysRegs(0), Tracking(false), @@ -130,8 +134,9 @@ private: /// isUsed / isUnused - Test if a register is currently being used. /// - bool isUsed(unsigned Reg) const { return !RegsAvailable.test(Reg); } - bool isUnused(unsigned Reg) const { return RegsAvailable.test(Reg); } + bool isUsed(unsigned Reg) const { + return !RegsAvailable.test(Reg) || ReservedRegs.test(Reg); + } /// isAliasUsed - Is Reg or an alias currently in use? bool isAliasUsed(unsigned Reg) const; @@ -139,7 +144,7 @@ private: /// setUsed / setUnused - Mark the state of one or a number of registers. /// void setUsed(BitVector &Regs) { - RegsAvailable &= ~Regs; + RegsAvailable.reset(Regs); } void setUnused(BitVector &Regs) { RegsAvailable |= Regs; @@ -148,9 +153,6 @@ private: /// Add Reg and all its sub-registers to BV. void addRegWithSubRegs(BitVector &BV, unsigned Reg); - /// Add Reg and its aliases to BV. - void addRegWithAliases(BitVector &BV, unsigned Reg); - /// findSurvivorReg - Return the candidate register that is unused for the /// longest after StartMI. UseMI is set to the instruction where the search /// stopped. diff --git a/include/llvm/CodeGen/ResourcePriorityQueue.h b/include/llvm/CodeGen/ResourcePriorityQueue.h new file mode 100644 index 0000000..fa7011d --- /dev/null +++ b/include/llvm/CodeGen/ResourcePriorityQueue.h @@ -0,0 +1,142 @@ +//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ResourcePriorityQueue class, which is a +// SchedulingPriorityQueue that schedules using DFA state to +// reduce the length of the critical path through the basic block +// on VLIW platforms. +// +//===----------------------------------------------------------------------===// + +#ifndef RESOURCE_PRIORITY_QUEUE_H +#define RESOURCE_PRIORITY_QUEUE_H + +#include "llvm/CodeGen/DFAPacketizer.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" + +namespace llvm { + class ResourcePriorityQueue; + + /// Sorting functions for the Available queue. + struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> { + ResourcePriorityQueue *PQ; + explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {} + + bool operator()(const SUnit* left, const SUnit* right) const; + }; + + class ResourcePriorityQueue : public SchedulingPriorityQueue { + /// SUnits - The SUnits for the current graph. + std::vector<SUnit> *SUnits; + + /// NumNodesSolelyBlocking - This vector contains, for every node in the + /// Queue, the number of nodes that the node is the sole unscheduled + /// predecessor for. This is used as a tie-breaker heuristic for better + /// mobility. + std::vector<unsigned> NumNodesSolelyBlocking; + + /// Queue - The queue. + std::vector<SUnit*> Queue; + + /// RegPressure - Tracking current reg pressure per register class. + /// + std::vector<unsigned> RegPressure; + + /// RegLimit - Tracking the number of allocatable registers per register + /// class. + std::vector<unsigned> RegLimit; + + resource_sort Picker; + const TargetRegisterInfo *TRI; + const TargetLowering *TLI; + const TargetInstrInfo *TII; + const InstrItineraryData* InstrItins; + /// ResourcesModel - Represents VLIW state. + /// Not limited to VLIW targets per say, but assumes + /// definition of DFA by a target. + DFAPacketizer *ResourcesModel; + + /// Resource model - packet/bundle model. Purely + /// internal at the time. + std::vector<SUnit*> Packet; + + /// Heuristics for estimating register pressure. + unsigned ParallelLiveRanges; + signed HorizontalVerticalBalance; + + public: + ResourcePriorityQueue(SelectionDAGISel *IS); + + ~ResourcePriorityQueue() { + delete ResourcesModel; + } + + bool isBottomUp() const { return false; } + + void initNodes(std::vector<SUnit> &sunits); + + void addNode(const SUnit *SU) { + NumNodesSolelyBlocking.resize(SUnits->size(), 0); + } + + void updateNode(const SUnit *SU) {} + + void releaseState() { + SUnits = 0; + } + + unsigned getLatency(unsigned NodeNum) const { + assert(NodeNum < (*SUnits).size()); + return (*SUnits)[NodeNum].getHeight(); + } + + unsigned getNumSolelyBlockNodes(unsigned NodeNum) const { + assert(NodeNum < NumNodesSolelyBlocking.size()); + return NumNodesSolelyBlocking[NodeNum]; + } + + /// Single cost function reflecting benefit of scheduling SU + /// in the current cycle. + signed SUSchedulingCost (SUnit *SU); + + /// InitNumRegDefsLeft - Determine the # of regs defined by this node. + /// + void initNumRegDefsLeft(SUnit *SU); + void updateNumRegDefsLeft(SUnit *SU); + signed regPressureDelta(SUnit *SU, bool RawPressure = false); + signed rawRegPressureDelta (SUnit *SU, unsigned RCId); + + bool empty() const { return Queue.empty(); } + + virtual void push(SUnit *U); + + virtual SUnit *pop(); + + virtual void remove(SUnit *SU); + + virtual void dump(ScheduleDAG* DAG) const; + + /// ScheduledNode - Main resource tracking point. + void ScheduledNode(SUnit *Node); + bool isResourceAvailable(SUnit *SU); + void reserveResources(SUnit *SU); + +private: + void adjustPriorityOfUnscheduledPreds(SUnit *SU); + SUnit *getSingleUnscheduledPred(SUnit *SU); + unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId); + unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId); + }; +} + +#endif diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h index 1bbc6c5..72eadd9 100644 --- a/include/llvm/CodeGen/ScheduleDAG.h +++ b/include/llvm/CodeGen/ScheduleDAG.h @@ -16,7 +16,7 @@ #define LLVM_CODEGEN_SCHEDULEDAG_H #include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetLowering.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/GraphTraits.h" @@ -129,8 +129,7 @@ namespace llvm { Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias && Contents.Order.isArtificial == Other.Contents.Order.isArtificial; } - assert(0 && "Invalid dependency kind!"); - return false; + llvm_unreachable("Invalid dependency kind!"); } bool operator!=(const SDep &Other) const { @@ -232,6 +231,7 @@ namespace llvm { public: SUnit *OrigNode; // If not this, the node from which // this node was cloned. + // (SD scheduling only) // Preds/Succs - The SUnits before/after us in the graph. SmallVector<SDep, 4> Preds; // All sunit predecessors. @@ -427,6 +427,7 @@ namespace llvm { /// implementation to decide. /// class SchedulingPriorityQueue { + virtual void anchor(); unsigned CurCycle; bool HasReadyFilter; public: diff --git a/include/llvm/CodeGen/SchedulerRegistry.h b/include/llvm/CodeGen/SchedulerRegistry.h index 3af6fcf..a582b0c 100644 --- a/include/llvm/CodeGen/SchedulerRegistry.h +++ b/include/llvm/CodeGen/SchedulerRegistry.h @@ -42,7 +42,7 @@ public: : MachinePassRegistryNode(N, D, (MachinePassCtor)C) { Registry.Add(this); } ~RegisterScheduler() { Registry.Remove(this); } - + // Accessors. // @@ -92,6 +92,11 @@ ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS, ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel); +/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down +/// DFA driven list scheduler with clustering heuristic to control +/// register pressure. +ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); /// createDefaultScheduler - This creates an instruction scheduler appropriate /// for the target. ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS, diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index f637ea2..f921ca2 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -51,7 +51,7 @@ public: static void noteHead(SDNode*, SDNode*) {} static void deleteNode(SDNode *) { - assert(0 && "ilist_traits<SDNode> shouldn't see a deleteNode call!"); + llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!"); } private: static void createNode(const SDNode &); @@ -394,6 +394,7 @@ public: unsigned char TargetFlags = 0); SDValue getValueType(EVT); SDValue getRegister(unsigned Reg, EVT VT); + SDValue getRegisterMask(const uint32_t *RegMask); SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label); SDValue getBlockAddress(const BlockAddress *BA, EVT VT, bool isTarget = false, unsigned char TargetFlags = 0); diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index 3c5c5df..ee3f231 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -181,6 +181,7 @@ protected: /// ISelUpdater - helper class to handle updates of the /// instruction selection graph. class ISelUpdater : public SelectionDAG::DAGUpdateListener { + virtual void anchor(); SelectionDAG::allnodes_iterator &ISelPosition; public: explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp) @@ -239,8 +240,7 @@ public: /// succeeds or false if it fails. The number is a private implementation /// detail to the code tblgen produces. virtual bool CheckPatternPredicate(unsigned PredNo) const { - assert(0 && "Tblgen should generate the implementation of this!"); - return 0; + llvm_unreachable("Tblgen should generate the implementation of this!"); } /// CheckNodePredicate - This function is generated by tblgen in the target. @@ -248,20 +248,17 @@ public: /// false if it fails. The number is a private implementation /// detail to the code tblgen produces. virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const { - assert(0 && "Tblgen should generate the implementation of this!"); - return 0; + llvm_unreachable("Tblgen should generate the implementation of this!"); } virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N, unsigned PatternNo, SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) { - assert(0 && "Tblgen should generate the implementation of this!"); - return false; + llvm_unreachable("Tblgen should generate the implementation of this!"); } virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) { - assert(0 && "Tblgen should generate this!"); - return SDValue(); + llvm_unreachable("Tblgen should generate this!"); } SDNode *SelectCodeCommon(SDNode *NodeToMatch, diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 547dacb..da3ea2a 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1114,11 +1114,9 @@ protected: } public: - void getMask(SmallVectorImpl<int> &M) const { + ArrayRef<int> getMask() const { EVT VT = getValueType(0); - M.clear(); - for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) - M.push_back(Mask[i]); + return makeArrayRef(Mask, VT.getVectorNumElements()); } int getMaskElt(unsigned Idx) const { assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"); @@ -1435,6 +1433,23 @@ public: } }; +class RegisterMaskSDNode : public SDNode { + // The memory for RegMask is not owned by the node. + const uint32_t *RegMask; + friend class SelectionDAG; + RegisterMaskSDNode(const uint32_t *mask) + : SDNode(ISD::RegisterMask, DebugLoc(), getSDVTList(MVT::Untyped)), + RegMask(mask) {} +public: + + const uint32_t *getRegMask() const { return RegMask; } + + static bool classof(const RegisterMaskSDNode *) { return true; } + static bool classof(const SDNode *N) { + return N->getOpcode() == ISD::RegisterMask; + } +}; + class BlockAddressSDNode : public SDNode { const BlockAddress *BA; unsigned char TargetFlags; diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h index cb2baa6..d868cb8 100644 --- a/include/llvm/CodeGen/SlotIndexes.h +++ b/include/llvm/CodeGen/SlotIndexes.h @@ -19,7 +19,7 @@ #ifndef LLVM_CODEGEN_SLOTINDEXES_H #define LLVM_CODEGEN_SLOTINDEXES_H -#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineInstrBundle.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/ADT/PointerIntPair.h" @@ -208,6 +208,12 @@ namespace llvm { return A.lie.getPointer() == B.lie.getPointer(); } + /// isEarlierInstr - Return true if A refers to an instruction earlier than + /// B. This is equivalent to A < B && !isSameInstr(A, B). + static bool isEarlierInstr(SlotIndex A, SlotIndex B) { + return A.entry().getIndex() < B.entry().getIndex(); + } + /// Return the distance from this index to the given one. int distance(SlotIndex other) const { return other.getIndex() - getIndex(); @@ -466,11 +472,6 @@ namespace llvm { return SlotIndex(back(), 0); } - /// Returns the invalid index marker for this analysis. - SlotIndex getInvalidIndex() { - return getZeroIndex(); - } - /// Returns the distance between the highest and lowest indexes allocated /// so far. unsigned getIndexesLength() const { @@ -488,12 +489,13 @@ namespace llvm { /// Returns true if the given machine instr is mapped to an index, /// otherwise returns false. bool hasIndex(const MachineInstr *instr) const { - return (mi2iMap.find(instr) != mi2iMap.end()); + return mi2iMap.count(instr); } /// Returns the base index for the given instruction. - SlotIndex getInstructionIndex(const MachineInstr *instr) const { - Mi2IndexMap::const_iterator itr = mi2iMap.find(instr); + SlotIndex getInstructionIndex(const MachineInstr *MI) const { + // Instructions inside a bundle have the same number as the bundle itself. + Mi2IndexMap::const_iterator itr = mi2iMap.find(getBundleStart(MI)); assert(itr != mi2iMap.end() && "Instruction not found in maps."); return itr->second; } @@ -647,6 +649,8 @@ namespace llvm { /// instructions, create the new index after the null indexes instead of /// before them. SlotIndex insertMachineInstrInMaps(MachineInstr *mi, bool Late = false) { + assert(!mi->isInsideBundle() && + "Instructions inside bundles should use bundle start's slot."); assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed."); // Numbering DBG_VALUE instructions could cause code generation to be // affected by debug information. diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h index ca40ccf..5a42136 100644 --- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h +++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h @@ -15,9 +15,9 @@ #ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H #define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H -#include "llvm/ADT/StringRef.h" #include "llvm/MC/SectionKind.h" #include "llvm/Target/TargetLoweringObjectFile.h" +#include "llvm/ADT/StringRef.h" namespace llvm { class MachineModuleInfo; @@ -65,6 +65,11 @@ public: virtual MCSymbol * getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang, MachineModuleInfo *MMI) const; + + virtual const MCSection * + getStaticCtorSection(unsigned Priority = 65535) const; + virtual const MCSection * + getStaticDtorSection(unsigned Priority = 65535) const; }; @@ -73,6 +78,12 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile { public: virtual ~TargetLoweringObjectFileMachO() {} + /// emitModuleFlags - Emit the module flags that specify the garbage + /// collection information. + virtual void emitModuleFlags(MCStreamer &Streamer, + ArrayRef<Module::ModuleFlagEntry> ModuleFlags, + Mangler *Mang, const TargetMachine &TM) const; + virtual const MCSection * SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler *Mang, const TargetMachine &TM) const; diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h index 7a7080f..76c2357 100644 --- a/include/llvm/CodeGen/ValueTypes.h +++ b/include/llvm/CodeGen/ValueTypes.h @@ -16,10 +16,11 @@ #ifndef LLVM_CODEGEN_VALUETYPES_H #define LLVM_CODEGEN_VALUETYPES_H -#include <cassert> -#include <string> #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include <cassert> +#include <string> namespace llvm { class Type; @@ -45,49 +46,56 @@ namespace llvm { FIRST_INTEGER_VALUETYPE = i1, LAST_INTEGER_VALUETYPE = i128, - f32 = 7, // This is a 32 bit floating point value - f64 = 8, // This is a 64 bit floating point value - f80 = 9, // This is a 80 bit floating point value - f128 = 10, // This is a 128 bit floating point value - ppcf128 = 11, // This is a PPC 128-bit floating point value - - v2i8 = 12, // 2 x i8 - v4i8 = 13, // 4 x i8 - v8i8 = 14, // 8 x i8 - v16i8 = 15, // 16 x i8 - v32i8 = 16, // 32 x i8 - v2i16 = 17, // 2 x i16 - v4i16 = 18, // 4 x i16 - v8i16 = 19, // 8 x i16 - v16i16 = 20, // 16 x i16 - v2i32 = 21, // 2 x i32 - v4i32 = 22, // 4 x i32 - v8i32 = 23, // 8 x i32 - v1i64 = 24, // 1 x i64 - v2i64 = 25, // 2 x i64 - v4i64 = 26, // 4 x i64 - v8i64 = 27, // 8 x i64 - - v2f32 = 28, // 2 x f32 - v4f32 = 29, // 4 x f32 - v8f32 = 30, // 8 x f32 - v2f64 = 31, // 2 x f64 - v4f64 = 32, // 4 x f64 + f16 = 7, // This is a 16 bit floating point value + f32 = 8, // This is a 32 bit floating point value + f64 = 9, // This is a 64 bit floating point value + f80 = 10, // This is a 80 bit floating point value + f128 = 11, // This is a 128 bit floating point value + ppcf128 = 12, // This is a PPC 128-bit floating point value + + FIRST_FP_VALUETYPE = f16, + LAST_FP_VALUETYPE = ppcf128, + + v2i8 = 13, // 2 x i8 + v4i8 = 14, // 4 x i8 + v8i8 = 15, // 8 x i8 + v16i8 = 16, // 16 x i8 + v32i8 = 17, // 32 x i8 + v2i16 = 18, // 2 x i16 + v4i16 = 19, // 4 x i16 + v8i16 = 20, // 8 x i16 + v16i16 = 21, // 16 x i16 + v2i32 = 22, // 2 x i32 + v4i32 = 23, // 4 x i32 + v8i32 = 24, // 8 x i32 + v1i64 = 25, // 1 x i64 + v2i64 = 26, // 2 x i64 + v4i64 = 27, // 4 x i64 + v8i64 = 28, // 8 x i64 + + v2f16 = 29, // 2 x f16 + v2f32 = 30, // 2 x f32 + v4f32 = 31, // 4 x f32 + v8f32 = 32, // 8 x f32 + v2f64 = 33, // 2 x f64 + v4f64 = 34, // 4 x f64 FIRST_VECTOR_VALUETYPE = v2i8, LAST_VECTOR_VALUETYPE = v4f64, + FIRST_FP_VECTOR_VALUETYPE = v2f16, + LAST_FP_VECTOR_VALUETYPE = v4f64, - x86mmx = 33, // This is an X86 MMX value + x86mmx = 35, // This is an X86 MMX value - Glue = 34, // This glues nodes together during pre-RA sched + Glue = 36, // This glues nodes together during pre-RA sched - isVoid = 35, // This has no value + isVoid = 37, // This has no value - Untyped = 36, // This value takes a register, but has + Untyped = 38, // This value takes a register, but has // unspecified type. The register class // will be determined by the opcode. - LAST_VALUETYPE = 37, // This always remains at the end of the list. + LAST_VALUETYPE = 39, // This always remains at the end of the list. // This is the current maximum for LAST_VALUETYPE. // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors @@ -143,8 +151,10 @@ namespace llvm { /// isFloatingPoint - Return true if this is a FP, or a vector FP type. bool isFloatingPoint() const { - return ((SimpleTy >= MVT::f32 && SimpleTy <= MVT::ppcf128) || - (SimpleTy >= MVT::v2f32 && SimpleTy <= MVT::v4f64)); + return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE && + SimpleTy <= MVT::LAST_FP_VALUETYPE) || + (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE && + SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE)); } /// isInteger - Return true if this is an integer, or a vector integer type. @@ -203,6 +213,7 @@ namespace llvm { case v2i64: case v4i64: case v8i64: return i64; + case v2f16: return f16; case v2f32: case v4f32: case v8f32: return f32; @@ -233,6 +244,7 @@ namespace llvm { case v2i16: case v2i32: case v2i64: + case v2f16: case v2f32: case v2f64: return 2; case v1i64: return 1; @@ -242,21 +254,23 @@ namespace llvm { unsigned getSizeInBits() const { switch (SimpleTy) { case iPTR: - assert(0 && "Value type size is target-dependent. Ask TLI."); + llvm_unreachable("Value type size is target-dependent. Ask TLI."); case iPTRAny: case iAny: case fAny: - assert(0 && "Value type is overloaded."); + llvm_unreachable("Value type is overloaded."); default: - assert(0 && "getSizeInBits called on extended MVT."); + llvm_unreachable("getSizeInBits called on extended MVT."); case i1 : return 1; case i8 : return 8; case i16 : + case f16: case v2i8: return 16; case f32 : case i32 : case v4i8: - case v2i16: return 32; + case v2i16: + case v2f16: return 32; case x86mmx: case f64 : case i64 : @@ -300,7 +314,9 @@ namespace llvm { static MVT getFloatingPointVT(unsigned BitWidth) { switch (BitWidth) { default: - assert(false && "Bad bit width!"); + llvm_unreachable("Bad bit width!"); + case 16: + return MVT::f16; case 32: return MVT::f32; case 64: @@ -359,6 +375,9 @@ namespace llvm { if (NumElements == 4) return MVT::v4i64; if (NumElements == 8) return MVT::v8i64; break; + case MVT::f16: + if (NumElements == 2) return MVT::v2f16; + break; case MVT::f32: if (NumElements == 2) return MVT::v2f32; if (NumElements == 4) return MVT::v4f32; @@ -424,20 +443,6 @@ namespace llvm { return getExtendedVectorVT(Context, VT, NumElements); } - /// getIntVectorWithNumElements - Return any integer vector type that has - /// the specified number of elements. - static EVT getIntVectorWithNumElements(LLVMContext &C, unsigned NumElts) { - switch (NumElts) { - default: return getVectorVT(C, MVT::i8, NumElts); - case 1: return MVT::v1i64; - case 2: return MVT::v2i32; - case 4: return MVT::v4i16; - case 8: return MVT::v8i8; - case 16: return MVT::v16i8; - } - return MVT::INVALID_SIMPLE_VALUE_TYPE; - } - /// changeVectorElementTypeToInteger - Return a vector with the same number /// of elements as this vector, but with the element type converted to an /// integer type with the same bitwidth. diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td index 0cfb634..6c22690 100644 --- a/include/llvm/CodeGen/ValueTypes.td +++ b/include/llvm/CodeGen/ValueTypes.td @@ -26,39 +26,41 @@ def i16 : ValueType<16 , 3>; // 16-bit integer value def i32 : ValueType<32 , 4>; // 32-bit integer value def i64 : ValueType<64 , 5>; // 64-bit integer value def i128 : ValueType<128, 6>; // 128-bit integer value -def f32 : ValueType<32 , 7>; // 32-bit floating point value -def f64 : ValueType<64 , 8>; // 64-bit floating point value -def f80 : ValueType<80 , 9>; // 80-bit floating point value -def f128 : ValueType<128, 10>; // 128-bit floating point value -def ppcf128: ValueType<128, 11>; // PPC 128-bit floating point value +def f16 : ValueType<16 , 7>; // 32-bit floating point value +def f32 : ValueType<32 , 8>; // 32-bit floating point value +def f64 : ValueType<64 , 9>; // 64-bit floating point value +def f80 : ValueType<80 , 10>; // 80-bit floating point value +def f128 : ValueType<128, 11>; // 128-bit floating point value +def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value -def v2i8 : ValueType<16 , 12>; // 2 x i8 vector value -def v4i8 : ValueType<32 , 13>; // 4 x i8 vector value -def v8i8 : ValueType<64 , 14>; // 8 x i8 vector value -def v16i8 : ValueType<128, 15>; // 16 x i8 vector value -def v32i8 : ValueType<256, 16>; // 32 x i8 vector value -def v2i16 : ValueType<32 , 17>; // 2 x i16 vector value -def v4i16 : ValueType<64 , 18>; // 4 x i16 vector value -def v8i16 : ValueType<128, 19>; // 8 x i16 vector value -def v16i16 : ValueType<256, 20>; // 16 x i16 vector value -def v2i32 : ValueType<64 , 21>; // 2 x i32 vector value -def v4i32 : ValueType<128, 22>; // 4 x i32 vector value -def v8i32 : ValueType<256, 23>; // 8 x i32 vector value -def v1i64 : ValueType<64 , 24>; // 1 x i64 vector value -def v2i64 : ValueType<128, 25>; // 2 x i64 vector value -def v4i64 : ValueType<256, 26>; // 4 x i64 vector value -def v8i64 : ValueType<512, 27>; // 8 x i64 vector value +def v2i8 : ValueType<16 , 13>; // 2 x i8 vector value +def v4i8 : ValueType<32 , 14>; // 4 x i8 vector value +def v8i8 : ValueType<64 , 15>; // 8 x i8 vector value +def v16i8 : ValueType<128, 16>; // 16 x i8 vector value +def v32i8 : ValueType<256, 17>; // 32 x i8 vector value +def v2i16 : ValueType<32 , 18>; // 2 x i16 vector value +def v4i16 : ValueType<64 , 19>; // 4 x i16 vector value +def v8i16 : ValueType<128, 20>; // 8 x i16 vector value +def v16i16 : ValueType<256, 21>; // 16 x i16 vector value +def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value +def v4i32 : ValueType<128, 23>; // 4 x i32 vector value +def v8i32 : ValueType<256, 24>; // 8 x i32 vector value +def v1i64 : ValueType<64 , 25>; // 1 x i64 vector value +def v2i64 : ValueType<128, 26>; // 2 x i64 vector value +def v4i64 : ValueType<256, 27>; // 4 x i64 vector value +def v8i64 : ValueType<512, 28>; // 8 x i64 vector value -def v2f32 : ValueType<64 , 28>; // 2 x f32 vector value -def v4f32 : ValueType<128, 29>; // 4 x f32 vector value -def v8f32 : ValueType<256, 30>; // 8 x f32 vector value -def v2f64 : ValueType<128, 31>; // 2 x f64 vector value -def v4f64 : ValueType<256, 32>; // 4 x f64 vector value +def v2f16 : ValueType<32 , 29>; // 2 x f16 vector value +def v2f32 : ValueType<64 , 30>; // 2 x f32 vector value +def v4f32 : ValueType<128, 31>; // 4 x f32 vector value +def v8f32 : ValueType<256, 32>; // 8 x f32 vector value +def v2f64 : ValueType<128, 33>; // 2 x f64 vector value +def v4f64 : ValueType<256, 34>; // 4 x f64 vector value -def x86mmx : ValueType<64 , 33>; // X86 MMX value -def FlagVT : ValueType<0 , 34>; // Pre-RA sched glue -def isVoid : ValueType<0 , 35>; // Produces no value -def untyped: ValueType<8 , 36>; // Produces an untyped value +def x86mmx : ValueType<64 , 35>; // X86 MMX value +def FlagVT : ValueType<0 , 36>; // Pre-RA sched glue +def isVoid : ValueType<0 , 37>; // Produces no value +def untyped: ValueType<8 , 38>; // Produces an untyped value def MetadataVT: ValueType<0, 250>; // Metadata diff --git a/include/llvm/Config/config.h.cmake b/include/llvm/Config/config.h.cmake index b4356b1..c475014 100644 --- a/include/llvm/Config/config.h.cmake +++ b/include/llvm/Config/config.h.cmake @@ -11,17 +11,8 @@ /* Relative directory for resource files */ #define CLANG_RESOURCE_DIR "${CLANG_RESOURCE_DIR}" -/* 32 bit multilib directory. */ -#define CXX_INCLUDE_32BIT_DIR "${CXX_INCLUDE_32BIT_DIR}" - -/* 64 bit multilib directory. */ -#define CXX_INCLUDE_64BIT_DIR "${CXX_INCLUDE_64BIT_DIR}" - -/* Arch the libstdc++ headers. */ -#define CXX_INCLUDE_ARCH "${CXX_INCLUDE_ARCH}" - -/* Directory with the libstdc++ headers. */ -#define CXX_INCLUDE_ROOT "${CXX_INCLUDE_ROOT}" +/* Directory wherelibstdc++ is installed. */ +#define GCC_INSTALL_PREFIX "${GCC_INSTALL_PREFIX}" /* Directories clang will search for headers */ #define C_INCLUDE_DIRS "${C_INCLUDE_DIRS}" @@ -626,6 +617,12 @@ /* Installation prefix directory */ #cmakedefine LLVM_PREFIX "${LLVM_PREFIX}" +/* Major version of the LLVM API */ +#cmakedefine LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR} + +/* Minor version of the LLVM API */ +#cmakedefine LLVM_VERSION_MINOR ${LLVM_VERSION_MINOR} + /* Define if the OS needs help to load dependent libraries for dlopen(). */ #cmakedefine LTDL_DLOPEN_DEPLIBS ${LTDL_DLOPEN_DEPLIBS} diff --git a/include/llvm/Config/config.h.in b/include/llvm/Config/config.h.in index 30afa58..1a996a2 100644 --- a/include/llvm/Config/config.h.in +++ b/include/llvm/Config/config.h.in @@ -9,18 +9,6 @@ /* Relative directory for resource files */ #undef CLANG_RESOURCE_DIR -/* 32 bit multilib directory. */ -#undef CXX_INCLUDE_32BIT_DIR - -/* 64 bit multilib directory. */ -#undef CXX_INCLUDE_64BIT_DIR - -/* Arch the libstdc++ headers. */ -#undef CXX_INCLUDE_ARCH - -/* Directory with the libstdc++ headers. */ -#undef CXX_INCLUDE_ROOT - /* Directories clang will search for headers */ #undef C_INCLUDE_DIRS @@ -33,6 +21,9 @@ /* Define if timestamp information (e.g., __DATE___) is allowed */ #undef ENABLE_TIMESTAMPS +/* Directory where gcc is installed. */ +#undef GCC_INSTALL_PREFIX + /* Define to 1 if you have the `argz_append' function. */ #undef HAVE_ARGZ_APPEND @@ -624,6 +615,12 @@ /* Installation prefix directory */ #undef LLVM_PREFIX +/* Major version of the LLVM API */ +#undef LLVM_VERSION_MAJOR + +/* Minor version of the LLVM API */ +#undef LLVM_VERSION_MINOR + /* Define if the OS needs help to load dependent libraries for dlopen(). */ #undef LTDL_DLOPEN_DEPLIBS diff --git a/include/llvm/Config/llvm-config.h.cmake b/include/llvm/Config/llvm-config.h.cmake index 0c5b542..3a55e6b 100644 --- a/include/llvm/Config/llvm-config.h.cmake +++ b/include/llvm/Config/llvm-config.h.cmake @@ -106,4 +106,10 @@ /* Installation prefix directory */ #cmakedefine LLVM_PREFIX "${LLVM_PREFIX}" +/* Major version of the LLVM API */ +#cmakedefine LLVM_VERSION_MAJOR ${LLVM_VERSION_MAJOR} + +/* Minor version of the LLVM API */ +#cmakedefine LLVM_VERSION_MINOR ${LLVM_VERSION_MINOR} + #endif diff --git a/include/llvm/Config/llvm-config.h.in b/include/llvm/Config/llvm-config.h.in index 30a935b..2613366 100644 --- a/include/llvm/Config/llvm-config.h.in +++ b/include/llvm/Config/llvm-config.h.in @@ -106,4 +106,10 @@ /* Installation prefix directory */ #undef LLVM_PREFIX +/* Major version of the LLVM API */ +#undef LLVM_VERSION_MAJOR + +/* Minor version of the LLVM API */ +#undef LLVM_VERSION_MINOR + #endif diff --git a/include/llvm/Constant.h b/include/llvm/Constant.h index ecc1fe7..13acdc6 100644 --- a/include/llvm/Constant.h +++ b/include/llvm/Constant.h @@ -41,6 +41,7 @@ namespace llvm { class Constant : public User { void operator=(const Constant &); // Do not implement Constant(const Constant &); // Do not implement + virtual void anchor(); protected: Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps) @@ -90,12 +91,13 @@ public: /// FIXME: This really should not be in VMCore. PossibleRelocationsTy getRelocationInfo() const; - /// getVectorElements - This method, which is only valid on constant of vector - /// type, returns the elements of the vector in the specified smallvector. - /// This handles breaking down a vector undef into undef elements, etc. For - /// constant exprs and other cases we can't handle, we return an empty vector. - void getVectorElements(SmallVectorImpl<Constant*> &Elts) const; - + /// getAggregateElement - For aggregates (struct/array/vector) return the + /// constant that corresponds to the specified element if possible, or null if + /// not. This can return null if the element index is a ConstantExpr, or if + /// 'this' is a constant expr. + Constant *getAggregateElement(unsigned Elt) const; + Constant *getAggregateElement(Constant *Elt) const; + /// destroyConstant - Called if some element of this constant is no longer /// valid. At this point only other constants may be on the use_list for this /// constant. Any constants on our Use list must also be destroy'd. The @@ -103,7 +105,7 @@ public: /// available cached constants. Implementations should call /// destroyConstantImpl as the last thing they do, to destroy all users and /// delete this. - virtual void destroyConstant() { assert(0 && "Not reached!"); } + virtual void destroyConstant() { llvm_unreachable("Not reached!"); } //// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Constant *) { return true; } @@ -129,11 +131,12 @@ public: // to be here to avoid link errors. assert(getNumOperands() == 0 && "replaceUsesOfWithOnConstant must be " "implemented for all constants that have operands!"); - assert(0 && "Constants that do not have operands cannot be using 'From'!"); + llvm_unreachable("Constants that do not have operands cannot be using " + "'From'!"); } - + static Constant *getNullValue(Type* Ty); - + /// @returns the value for an integer constant of the given type that has all /// its bits set to true. /// @brief Get the all ones value diff --git a/include/llvm/Constants.h b/include/llvm/Constants.h index 41e451f..0abe17d 100644 --- a/include/llvm/Constants.h +++ b/include/llvm/Constants.h @@ -34,10 +34,13 @@ class IntegerType; class StructType; class PointerType; class VectorType; +class SequentialType; template<class ConstantClass, class TypeClass, class ValType> struct ConstantCreator; template<class ConstantClass, class TypeClass> +struct ConstantArrayCreator; +template<class ConstantClass, class TypeClass> struct ConvertConstantType; //===----------------------------------------------------------------------===// @@ -45,6 +48,7 @@ struct ConvertConstantType; /// represents both boolean and integral constants. /// @brief Class for constant integers. class ConstantInt : public Constant { + virtual void anchor(); void *operator new(size_t, unsigned); // DO NOT IMPLEMENT ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT ConstantInt(IntegerType *Ty, const APInt& V); @@ -229,6 +233,7 @@ public: /// class ConstantFP : public Constant { APFloat Val; + virtual void anchor(); void *operator new(size_t, unsigned);// DO NOT IMPLEMENT ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT friend class LLVMContextImpl; @@ -296,7 +301,6 @@ public: /// ConstantAggregateZero - All zero aggregate value /// class ConstantAggregateZero : public Constant { - friend struct ConstantCreator<ConstantAggregateZero, Type, char>; void *operator new(size_t, unsigned); // DO NOT IMPLEMENT ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT protected: @@ -308,10 +312,26 @@ protected: return User::operator new(s, 0); } public: - static ConstantAggregateZero* get(Type *Ty); + static ConstantAggregateZero *get(Type *Ty); virtual void destroyConstant(); + /// getSequentialElement - If this CAZ has array or vector type, return a zero + /// with the right element type. + Constant *getSequentialElement() const; + + /// getStructElement - If this CAZ has struct type, return a zero with the + /// right element type for the specified element. + Constant *getStructElement(unsigned Elt) const; + + /// getElementValue - Return a zero of the right value for the specified GEP + /// index. + Constant *getElementValue(Constant *C) const; + + /// getElementValue - Return a zero of the right value for the specified GEP + /// index. + Constant *getElementValue(unsigned Idx) const; + /// Methods for support type inquiry through isa, cast, and dyn_cast: /// static bool classof(const ConstantAggregateZero *) { return true; } @@ -325,8 +345,7 @@ public: /// ConstantArray - Constant Array Declarations /// class ConstantArray : public Constant { - friend struct ConstantCreator<ConstantArray, ArrayType, - std::vector<Constant*> >; + friend struct ConstantArrayCreator<ConstantArray, ArrayType>; ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT protected: ConstantArray(ArrayType *T, ArrayRef<Constant *> Val); @@ -334,15 +353,6 @@ public: // ConstantArray accessors static Constant *get(ArrayType *T, ArrayRef<Constant*> V); - /// This method constructs a ConstantArray and initializes it with a text - /// string. The default behavior (AddNull==true) causes a null terminator to - /// be placed at the end of the array. This effectively increases the length - /// of the array by one (you've been warned). However, in some situations - /// this is not desired so if AddNull==false then the string is copied without - /// null termination. - static Constant *get(LLVMContext &Context, StringRef Initializer, - bool AddNull = true); - /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); @@ -353,28 +363,6 @@ public: return reinterpret_cast<ArrayType*>(Value::getType()); } - /// isString - This method returns true if the array is an array of i8 and - /// the elements of the array are all ConstantInt's. - bool isString() const; - - /// isCString - This method returns true if the array is a string (see - /// @verbatim - /// isString) and it ends in a null byte \0 and does not contains any other - /// @endverbatim - /// null bytes except its terminator. - bool isCString() const; - - /// getAsString - If this array is isString(), then this method converts the - /// array to an std::string and returns it. Otherwise, it asserts out. - /// - std::string getAsString() const; - - /// getAsCString - If this array is isCString(), then this method converts the - /// array (without the trailing null byte) to an std::string and returns it. - /// Otherwise, it asserts out. - /// - std::string getAsCString() const; - virtual void destroyConstant(); virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); @@ -396,8 +384,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantArray, Constant) // ConstantStruct - Constant Struct Declarations // class ConstantStruct : public Constant { - friend struct ConstantCreator<ConstantStruct, StructType, - std::vector<Constant*> >; + friend struct ConstantArrayCreator<ConstantStruct, StructType>; ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT protected: ConstantStruct(StructType *T, ArrayRef<Constant *> Val); @@ -457,8 +444,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantStruct, Constant) /// ConstantVector - Constant Vector Declarations /// class ConstantVector : public Constant { - friend struct ConstantCreator<ConstantVector, VectorType, - std::vector<Constant*> >; + friend struct ConstantArrayCreator<ConstantVector, VectorType>; ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT protected: ConstantVector(VectorType *T, ArrayRef<Constant *> Val); @@ -466,6 +452,10 @@ public: // ConstantVector accessors static Constant *get(ArrayRef<Constant*> V); + /// getSplat - Return a ConstantVector with the specified constant in each + /// element. + static Constant *getSplat(unsigned NumElts, Constant *Elt); + /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); @@ -501,7 +491,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantVector, Constant) /// ConstantPointerNull - a constant pointer value that points to null /// class ConstantPointerNull : public Constant { - friend struct ConstantCreator<ConstantPointerNull, PointerType, char>; void *operator new(size_t, unsigned); // DO NOT IMPLEMENT ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT protected: @@ -533,6 +522,240 @@ public: return V->getValueID() == ConstantPointerNullVal; } }; + +//===----------------------------------------------------------------------===// +/// ConstantDataSequential - A vector or array constant whose element type is a +/// simple 1/2/4/8-byte integer or float/double, and whose elements are just +/// simple data values (i.e. ConstantInt/ConstantFP). This Constant node has no +/// operands because it stores all of the elements of the constant as densely +/// packed data, instead of as Value*'s. +/// +/// This is the common base class of ConstantDataArray and ConstantDataVector. +/// +class ConstantDataSequential : public Constant { + friend class LLVMContextImpl; + /// DataElements - A pointer to the bytes underlying this constant (which is + /// owned by the uniquing StringMap). + const char *DataElements; + + /// Next - This forms a link list of ConstantDataSequential nodes that have + /// the same value but different type. For example, 0,0,0,1 could be a 4 + /// element array of i8, or a 1-element array of i32. They'll both end up in + /// the same StringMap bucket, linked up. + ConstantDataSequential *Next; + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantDataSequential(const ConstantDataSequential &); // DO NOT IMPLEMENT +protected: + explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data) + : Constant(ty, VT, 0, 0), DataElements(Data), Next(0) {} + ~ConstantDataSequential() { delete Next; } + + static Constant *getImpl(StringRef Bytes, Type *Ty); + +protected: + // allocate space for exactly zero operands. + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + + /// isElementTypeCompatible - Return true if a ConstantDataSequential can be + /// formed with a vector or array of the specified element type. + /// ConstantDataArray only works with normal float and int types that are + /// stored densely in memory, not with things like i42 or x86_f80. + static bool isElementTypeCompatible(const Type *Ty); + + /// getElementAsInteger - If this is a sequential container of integers (of + /// any size), return the specified element in the low bits of a uint64_t. + uint64_t getElementAsInteger(unsigned i) const; + + /// getElementAsAPFloat - If this is a sequential container of floating point + /// type, return the specified element as an APFloat. + APFloat getElementAsAPFloat(unsigned i) const; + + /// getElementAsFloat - If this is an sequential container of floats, return + /// the specified element as a float. + float getElementAsFloat(unsigned i) const; + + /// getElementAsDouble - If this is an sequential container of doubles, return + /// the specified element as a double. + double getElementAsDouble(unsigned i) const; + + /// getElementAsConstant - Return a Constant for a specified index's element. + /// Note that this has to compute a new constant to return, so it isn't as + /// efficient as getElementAsInteger/Float/Double. + Constant *getElementAsConstant(unsigned i) const; + + /// getType - Specialize the getType() method to always return a + /// SequentialType, which reduces the amount of casting needed in parts of the + /// compiler. + inline SequentialType *getType() const { + return reinterpret_cast<SequentialType*>(Value::getType()); + } + + /// getElementType - Return the element type of the array/vector. + Type *getElementType() const; + + /// getNumElements - Return the number of elements in the array or vector. + unsigned getNumElements() const; + + /// getElementByteSize - Return the size (in bytes) of each element in the + /// array/vector. The size of the elements is known to be a multiple of one + /// byte. + uint64_t getElementByteSize() const; + + + /// isString - This method returns true if this is an array of i8. + bool isString() const; + + /// isCString - This method returns true if the array "isString", ends with a + /// nul byte, and does not contains any other nul bytes. + bool isCString() const; + + /// getAsString - If this array is isString(), then this method returns the + /// array as a StringRef. Otherwise, it asserts out. + /// + StringRef getAsString() const { + assert(isString() && "Not a string"); + return getRawDataValues(); + } + + /// getAsCString - If this array is isCString(), then this method returns the + /// array (without the trailing null byte) as a StringRef. Otherwise, it + /// asserts out. + /// + StringRef getAsCString() const { + assert(isCString() && "Isn't a C string"); + StringRef Str = getAsString(); + return Str.substr(0, Str.size()-1); + } + + /// getRawDataValues - Return the raw, underlying, bytes of this data. Note + /// that this is an extremely tricky thing to work with, as it exposes the + /// host endianness of the data elements. + StringRef getRawDataValues() const; + + virtual void destroyConstant(); + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + /// + static bool classof(const ConstantDataSequential *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantDataArrayVal || + V->getValueID() == ConstantDataVectorVal; + } +private: + const char *getElementPointer(unsigned Elt) const; +}; + +//===----------------------------------------------------------------------===// +/// ConstantDataArray - An array constant whose element type is a simple +/// 1/2/4/8-byte integer or float/double, and whose elements are just simple +/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no +/// operands because it stores all of the elements of the constant as densely +/// packed data, instead of as Value*'s. +class ConstantDataArray : public ConstantDataSequential { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantDataArray(const ConstantDataArray &); // DO NOT IMPLEMENT + virtual void anchor(); + friend class ConstantDataSequential; + explicit ConstantDataArray(Type *ty, const char *Data) + : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {} +protected: + // allocate space for exactly zero operands. + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + + /// get() constructors - Return a constant with array type with an element + /// count and element type matching the ArrayRef passed in. Note that this + /// can return a ConstantAggregateZero object. + static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<float> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<double> Elts); + + /// getString - This method constructs a CDS and initializes it with a text + /// string. The default behavior (AddNull==true) causes a null terminator to + /// be placed at the end of the array (increasing the length of the string by + /// one more than the StringRef would normally indicate. Pass AddNull=false + /// to disable this behavior. + static Constant *getString(LLVMContext &Context, StringRef Initializer, + bool AddNull = true); + + /// getType - Specialize the getType() method to always return an ArrayType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline ArrayType *getType() const { + return reinterpret_cast<ArrayType*>(Value::getType()); + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + /// + static bool classof(const ConstantDataArray *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantDataArrayVal; + } +}; + +//===----------------------------------------------------------------------===// +/// ConstantDataVector - A vector constant whose element type is a simple +/// 1/2/4/8-byte integer or float/double, and whose elements are just simple +/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no +/// operands because it stores all of the elements of the constant as densely +/// packed data, instead of as Value*'s. +class ConstantDataVector : public ConstantDataSequential { + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + ConstantDataVector(const ConstantDataVector &); // DO NOT IMPLEMENT + virtual void anchor(); + friend class ConstantDataSequential; + explicit ConstantDataVector(Type *ty, const char *Data) + : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {} +protected: + // allocate space for exactly zero operands. + void *operator new(size_t s) { + return User::operator new(s, 0); + } +public: + + /// get() constructors - Return a constant with vector type with an element + /// count and element type matching the ArrayRef passed in. Note that this + /// can return a ConstantAggregateZero object. + static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<float> Elts); + static Constant *get(LLVMContext &Context, ArrayRef<double> Elts); + + /// getSplat - Return a ConstantVector with the specified constant in each + /// element. The specified constant has to be a of a compatible type (i8/i16/ + /// i32/i64/float/double) and must be a ConstantFP or ConstantInt. + static Constant *getSplat(unsigned NumElts, Constant *Elt); + + /// getSplatValue - If this is a splat constant, meaning that all of the + /// elements have the same value, return that value. Otherwise return NULL. + Constant *getSplatValue() const; + + /// getType - Specialize the getType() method to always return a VectorType, + /// which reduces the amount of casting needed in parts of the compiler. + /// + inline VectorType *getType() const { + return reinterpret_cast<VectorType*>(Value::getType()); + } + + /// Methods for support type inquiry through isa, cast, and dyn_cast: + /// + static bool classof(const ConstantDataVector *) { return true; } + static bool classof(const Value *V) { + return V->getValueID() == ConstantDataVectorVal; + } +}; + + /// BlockAddress - The address of a basic block. /// @@ -891,7 +1114,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant) /// LangRef.html#undefvalues for details. /// class UndefValue : public Constant { - friend struct ConstantCreator<UndefValue, Type, char>; void *operator new(size_t, unsigned); // DO NOT IMPLEMENT UndefValue(const UndefValue &); // DO NOT IMPLEMENT protected: @@ -907,6 +1129,22 @@ public: /// static UndefValue *get(Type *T); + /// getSequentialElement - If this Undef has array or vector type, return a + /// undef with the right element type. + UndefValue *getSequentialElement() const; + + /// getStructElement - If this undef has struct type, return a undef with the + /// right element type for the specified element. + UndefValue *getStructElement(unsigned Elt) const; + + /// getElementValue - Return an undef of the right value for the specified GEP + /// index. + UndefValue *getElementValue(Constant *C) const; + + /// getElementValue - Return an undef of the right value for the specified GEP + /// index. + UndefValue *getElementValue(unsigned Idx) const; + virtual void destroyConstant(); /// Methods for support type inquiry through isa, cast, and dyn_cast: diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h index e89fd2e..9f56ed1 100644 --- a/include/llvm/ExecutionEngine/ExecutionEngine.h +++ b/include/llvm/ExecutionEngine/ExecutionEngine.h @@ -15,18 +15,19 @@ #ifndef LLVM_EXECUTION_ENGINE_H #define LLVM_EXECUTION_ENGINE_H -#include <vector> -#include <map> -#include <string> #include "llvm/MC/MCCodeGenInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/ValueMap.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/Mutex.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" +#include <vector> +#include <map> +#include <string> namespace llvm { @@ -228,6 +229,26 @@ public: virtual GenericValue runFunction(Function *F, const std::vector<GenericValue> &ArgValues) = 0; + /// getPointerToNamedFunction - This method returns the address of the + /// specified function by using the dlsym function call. As such it is only + /// useful for resolving library symbols, not code generated symbols. + /// + /// If AbortOnFailure is false and no function with the given name is + /// found, this function silently returns a null pointer. Otherwise, + /// it prints a message to stderr and aborts. + /// + virtual void *getPointerToNamedFunction(const std::string &Name, + bool AbortOnFailure = true) = 0; + + /// mapSectionAddress - map a section to its target address space value. + /// Map the address of a JIT section as returned from the memory manager + /// to the address in the target process as the running code will see it. + /// This is the address which will be used for relocation resolution. + virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) { + llvm_unreachable("Re-mapping of section addresses not supported with this " + "EE!"); + } + /// runStaticConstructorsDestructors - This method is used to execute all of /// the static constructors or destructors for a program. /// diff --git a/include/llvm/ExecutionEngine/JITMemoryManager.h b/include/llvm/ExecutionEngine/JITMemoryManager.h index a63f0da..8eb7590 100644 --- a/include/llvm/ExecutionEngine/JITMemoryManager.h +++ b/include/llvm/ExecutionEngine/JITMemoryManager.h @@ -101,6 +101,22 @@ public: virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart, uint8_t *FunctionEnd) = 0; + /// allocateCodeSection - Allocate a memory block of (at least) the given + /// size suitable for executable code. The SectionID is a unique identifier + /// assigned by the JIT and passed through to the memory manager for + /// the instance class to use if it needs to communicate to the JIT about + /// a given section after the fact. + virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID) = 0; + + /// allocateDataSection - Allocate a memory block of (at least) the given + /// size suitable for data. The SectionID is a unique identifier + /// assigned by the JIT and passed through to the memory manager for + /// the instance class to use if it needs to communicate to the JIT about + /// a given section after the fact. + virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID) = 0; + /// allocateSpace - Allocate a memory block of the given size. This method /// cannot be called between calls to startFunctionBody and endFunctionBody. virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0; diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h index 724b9f0..8ad316b 100644 --- a/include/llvm/ExecutionEngine/RuntimeDyld.h +++ b/include/llvm/ExecutionEngine/RuntimeDyld.h @@ -35,6 +35,16 @@ public: RTDyldMemoryManager() {} virtual ~RTDyldMemoryManager(); + /// allocateCodeSection - Allocate a memory block of (at least) the given + /// size suitable for executable code. + virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID) = 0; + + /// allocateDataSection - Allocate a memory block of (at least) the given + /// size suitable for data. + virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID) = 0; + // Allocate ActualSize bytes, or more, for the named function. Return // a pointer to the allocated memory and update Size to reflect how much // memory was acutally allocated. @@ -54,6 +64,10 @@ class RuntimeDyld { // interface. RuntimeDyldImpl *Dyld; RTDyldMemoryManager *MM; +protected: + // Change the address associated with a section when resolving relocations. + // Any relocations already associated with the symbol will be re-resolved. + void reassignSectionAddress(unsigned SectionID, uint64_t Addr); public: RuntimeDyld(RTDyldMemoryManager*); ~RuntimeDyld(); @@ -65,9 +79,13 @@ public: void *getSymbolAddress(StringRef Name); // Resolve the relocations for all symbols we currently know about. void resolveRelocations(); - // Change the address associated with a symbol when resolving relocations. - // Any relocations already associated with the symbol will be re-resolved. - void reassignSymbolAddress(StringRef Name, uint8_t *Addr); + + /// mapSectionAddress - map a section to its target address space value. + /// Map the address of a JIT section as returned from the memory manager + /// to the address in the target process as the running code will see it. + /// This is the address which will be used for relocation resolution. + void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress); + StringRef getErrorString(); }; diff --git a/include/llvm/GlobalValue.h b/include/llvm/GlobalValue.h index 63dc4ab..81a11a4 100644 --- a/include/llvm/GlobalValue.h +++ b/include/llvm/GlobalValue.h @@ -59,19 +59,18 @@ public: protected: GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps, LinkageTypes linkage, const Twine &Name) - : Constant(ty, vty, Ops, NumOps), Parent(0), - Linkage(linkage), Visibility(DefaultVisibility), Alignment(0), - UnnamedAddr(0) { + : Constant(ty, vty, Ops, NumOps), Linkage(linkage), + Visibility(DefaultVisibility), Alignment(0), UnnamedAddr(0), Parent(0) { setName(Name); } - Module *Parent; // Note: VC++ treats enums as signed, so an extra bit is required to prevent // Linkage and Visibility from turning into negative values. LinkageTypes Linkage : 5; // The linkage of this global unsigned Visibility : 2; // The visibility style of this global unsigned Alignment : 16; // Alignment of this symbol, must be power of two unsigned UnnamedAddr : 1; // This value's address is not significant + Module *Parent; // The containing module. std::string Section; // Section to emit this into, empty mean default public: ~GlobalValue() { diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index ce87f16..33d2043 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -31,6 +31,10 @@ void initializeTransformUtils(PassRegistry&); /// ScalarOpts library. void initializeScalarOpts(PassRegistry&); +/// initializeVectorization - Initialize all passes linked into the +/// Vectorize library. +void initializeVectorization(PassRegistry&); + /// initializeInstCombine - Initialize all passes linked into the /// ScalarOpts library. void initializeInstCombine(PassRegistry&); @@ -67,6 +71,7 @@ void initializeBasicCallGraphPass(PassRegistry&); void initializeBlockExtractorPassPass(PassRegistry&); void initializeBlockFrequencyInfoPass(PassRegistry&); void initializeBlockPlacementPass(PassRegistry&); +void initializeBranchFolderPassPass(PassRegistry&); void initializeBranchProbabilityInfoPass(PassRegistry&); void initializeBreakCriticalEdgesPass(PassRegistry&); void initializeCFGOnlyPrinterPass(PassRegistry&); @@ -77,8 +82,10 @@ void initializeCFGViewerPass(PassRegistry&); void initializeCalculateSpillWeightsPass(PassRegistry&); void initializeCallGraphAnalysisGroup(PassRegistry&); void initializeCodeGenPreparePass(PassRegistry&); +void initializeCodePlacementOptPass(PassRegistry&); void initializeConstantMergePass(PassRegistry&); void initializeConstantPropagationPass(PassRegistry&); +void initializeMachineCopyPropagationPass(PassRegistry&); void initializeCorrelatedValuePropagationPass(PassRegistry&); void initializeDAEPass(PassRegistry&); void initializeDAHPass(PassRegistry&); @@ -94,13 +101,17 @@ void initializeDominanceFrontierPass(PassRegistry&); void initializeDominatorTreePass(PassRegistry&); void initializeEdgeBundlesPass(PassRegistry&); void initializeEdgeProfilerPass(PassRegistry&); +void initializeExpandPostRAPass(PassRegistry&); void initializePathProfilerPass(PassRegistry&); void initializeGCOVProfilerPass(PassRegistry&); void initializeAddressSanitizerPass(PassRegistry&); +void initializeThreadSanitizerPass(PassRegistry&); void initializeEarlyCSEPass(PassRegistry&); void initializeExpandISelPseudosPass(PassRegistry&); void initializeFindUsedTypesPass(PassRegistry&); void initializeFunctionAttrsPass(PassRegistry&); +void initializeGCInfoDeleterPass(PassRegistry&); +void initializeGCMachineCodeAnalysisPass(PassRegistry&); void initializeGCModuleInfoPass(PassRegistry&); void initializeGVNPass(PassRegistry&); void initializeGlobalDCEPass(PassRegistry&); @@ -128,6 +139,7 @@ void initializeLiveStacksPass(PassRegistry&); void initializeLiveVariablesPass(PassRegistry&); void initializeLoaderPassPass(PassRegistry&); void initializePathProfileLoaderPassPass(PassRegistry&); +void initializeLocalStackSlotPassPass(PassRegistry&); void initializeLoopDeletionPass(PassRegistry&); void initializeLoopDependenceAnalysisPass(PassRegistry&); void initializeLoopExtractorPass(PassRegistry&); @@ -155,6 +167,7 @@ void initializeMachineLICMPass(PassRegistry&); void initializeMachineLoopInfoPass(PassRegistry&); void initializeMachineLoopRangesPass(PassRegistry&); void initializeMachineModuleInfoPass(PassRegistry&); +void initializeMachineSchedulerPass(PassRegistry&); void initializeMachineSinkingPass(PassRegistry&); void initializeMachineVerifierPassPass(PassRegistry&); void initializeMemCpyOptPass(PassRegistry&); @@ -166,6 +179,7 @@ void initializeNoAAPass(PassRegistry&); void initializeNoProfileInfoPass(PassRegistry&); void initializeNoPathProfileInfoPass(PassRegistry&); void initializeObjCARCAliasAnalysisPass(PassRegistry&); +void initializeObjCARCAPElimPass(PassRegistry&); void initializeObjCARCExpandPass(PassRegistry&); void initializeObjCARCContractPass(PassRegistry&); void initializeObjCARCOptPass(PassRegistry&); @@ -180,6 +194,7 @@ void initializePostDomOnlyViewerPass(PassRegistry&); void initializePostDomPrinterPass(PassRegistry&); void initializePostDomViewerPass(PassRegistry&); void initializePostDominatorTreePass(PassRegistry&); +void initializePostRASchedulerPass(PassRegistry&); void initializePreVerifierPass(PassRegistry&); void initializePrintDbgInfoPass(PassRegistry&); void initializePrintFunctionPassPass(PassRegistry&); @@ -221,6 +236,8 @@ void initializeStripNonDebugSymbolsPass(PassRegistry&); void initializeStripSymbolsPass(PassRegistry&); void initializeStrongPHIEliminationPass(PassRegistry&); void initializeTailCallElimPass(PassRegistry&); +void initializeTailDuplicatePassPass(PassRegistry&); +void initializeTargetPassConfigPass(PassRegistry&); void initializeTargetDataPass(PassRegistry&); void initializeTargetLibraryInfoPass(PassRegistry&); void initializeTwoAddressInstructionPassPass(PassRegistry&); @@ -232,7 +249,8 @@ void initializeVerifierPass(PassRegistry&); void initializeVirtRegMapPass(PassRegistry&); void initializeInstSimplifierPass(PassRegistry&); void initializeUnpackMachineBundlesPass(PassRegistry&); - +void initializeFinalizeMachineBundlesPass(PassRegistry&); +void initializeBBVectorizePass(PassRegistry&); } #endif diff --git a/include/llvm/Instruction.def b/include/llvm/Instruction.def index d36e4be..e59a052 100644 --- a/include/llvm/Instruction.def +++ b/include/llvm/Instruction.def @@ -99,81 +99,80 @@ HANDLE_TERM_INST ( 2, Br , BranchInst) HANDLE_TERM_INST ( 3, Switch , SwitchInst) HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst) HANDLE_TERM_INST ( 5, Invoke , InvokeInst) -HANDLE_TERM_INST ( 6, Unwind , UnwindInst) -HANDLE_TERM_INST ( 7, Resume , ResumeInst) -HANDLE_TERM_INST ( 8, Unreachable, UnreachableInst) - LAST_TERM_INST ( 8) +HANDLE_TERM_INST ( 6, Resume , ResumeInst) +HANDLE_TERM_INST ( 7, Unreachable, UnreachableInst) + LAST_TERM_INST ( 7) // Standard binary operators... - FIRST_BINARY_INST( 9) -HANDLE_BINARY_INST( 9, Add , BinaryOperator) -HANDLE_BINARY_INST(10, FAdd , BinaryOperator) -HANDLE_BINARY_INST(11, Sub , BinaryOperator) -HANDLE_BINARY_INST(12, FSub , BinaryOperator) -HANDLE_BINARY_INST(13, Mul , BinaryOperator) -HANDLE_BINARY_INST(14, FMul , BinaryOperator) -HANDLE_BINARY_INST(15, UDiv , BinaryOperator) -HANDLE_BINARY_INST(16, SDiv , BinaryOperator) -HANDLE_BINARY_INST(17, FDiv , BinaryOperator) -HANDLE_BINARY_INST(18, URem , BinaryOperator) -HANDLE_BINARY_INST(19, SRem , BinaryOperator) -HANDLE_BINARY_INST(20, FRem , BinaryOperator) + FIRST_BINARY_INST( 8) +HANDLE_BINARY_INST( 8, Add , BinaryOperator) +HANDLE_BINARY_INST( 9, FAdd , BinaryOperator) +HANDLE_BINARY_INST(10, Sub , BinaryOperator) +HANDLE_BINARY_INST(11, FSub , BinaryOperator) +HANDLE_BINARY_INST(12, Mul , BinaryOperator) +HANDLE_BINARY_INST(13, FMul , BinaryOperator) +HANDLE_BINARY_INST(14, UDiv , BinaryOperator) +HANDLE_BINARY_INST(15, SDiv , BinaryOperator) +HANDLE_BINARY_INST(16, FDiv , BinaryOperator) +HANDLE_BINARY_INST(17, URem , BinaryOperator) +HANDLE_BINARY_INST(18, SRem , BinaryOperator) +HANDLE_BINARY_INST(19, FRem , BinaryOperator) // Logical operators (integer operands) -HANDLE_BINARY_INST(21, Shl , BinaryOperator) // Shift left (logical) -HANDLE_BINARY_INST(22, LShr , BinaryOperator) // Shift right (logical) -HANDLE_BINARY_INST(23, AShr , BinaryOperator) // Shift right (arithmetic) -HANDLE_BINARY_INST(24, And , BinaryOperator) -HANDLE_BINARY_INST(25, Or , BinaryOperator) -HANDLE_BINARY_INST(26, Xor , BinaryOperator) - LAST_BINARY_INST(26) +HANDLE_BINARY_INST(20, Shl , BinaryOperator) // Shift left (logical) +HANDLE_BINARY_INST(21, LShr , BinaryOperator) // Shift right (logical) +HANDLE_BINARY_INST(22, AShr , BinaryOperator) // Shift right (arithmetic) +HANDLE_BINARY_INST(23, And , BinaryOperator) +HANDLE_BINARY_INST(24, Or , BinaryOperator) +HANDLE_BINARY_INST(25, Xor , BinaryOperator) + LAST_BINARY_INST(25) // Memory operators... - FIRST_MEMORY_INST(27) -HANDLE_MEMORY_INST(27, Alloca, AllocaInst) // Stack management -HANDLE_MEMORY_INST(28, Load , LoadInst ) // Memory manipulation instrs -HANDLE_MEMORY_INST(29, Store , StoreInst ) -HANDLE_MEMORY_INST(30, GetElementPtr, GetElementPtrInst) -HANDLE_MEMORY_INST(31, Fence , FenceInst ) -HANDLE_MEMORY_INST(32, AtomicCmpXchg , AtomicCmpXchgInst ) -HANDLE_MEMORY_INST(33, AtomicRMW , AtomicRMWInst ) - LAST_MEMORY_INST(33) + FIRST_MEMORY_INST(26) +HANDLE_MEMORY_INST(26, Alloca, AllocaInst) // Stack management +HANDLE_MEMORY_INST(27, Load , LoadInst ) // Memory manipulation instrs +HANDLE_MEMORY_INST(28, Store , StoreInst ) +HANDLE_MEMORY_INST(29, GetElementPtr, GetElementPtrInst) +HANDLE_MEMORY_INST(30, Fence , FenceInst ) +HANDLE_MEMORY_INST(31, AtomicCmpXchg , AtomicCmpXchgInst ) +HANDLE_MEMORY_INST(32, AtomicRMW , AtomicRMWInst ) + LAST_MEMORY_INST(32) // Cast operators ... // NOTE: The order matters here because CastInst::isEliminableCastPair // NOTE: (see Instructions.cpp) encodes a table based on this ordering. - FIRST_CAST_INST(34) -HANDLE_CAST_INST(34, Trunc , TruncInst ) // Truncate integers -HANDLE_CAST_INST(35, ZExt , ZExtInst ) // Zero extend integers -HANDLE_CAST_INST(36, SExt , SExtInst ) // Sign extend integers -HANDLE_CAST_INST(37, FPToUI , FPToUIInst ) // floating point -> UInt -HANDLE_CAST_INST(38, FPToSI , FPToSIInst ) // floating point -> SInt -HANDLE_CAST_INST(39, UIToFP , UIToFPInst ) // UInt -> floating point -HANDLE_CAST_INST(40, SIToFP , SIToFPInst ) // SInt -> floating point -HANDLE_CAST_INST(41, FPTrunc , FPTruncInst ) // Truncate floating point -HANDLE_CAST_INST(42, FPExt , FPExtInst ) // Extend floating point -HANDLE_CAST_INST(43, PtrToInt, PtrToIntInst) // Pointer -> Integer -HANDLE_CAST_INST(44, IntToPtr, IntToPtrInst) // Integer -> Pointer -HANDLE_CAST_INST(45, BitCast , BitCastInst ) // Type cast - LAST_CAST_INST(45) + FIRST_CAST_INST(33) +HANDLE_CAST_INST(33, Trunc , TruncInst ) // Truncate integers +HANDLE_CAST_INST(34, ZExt , ZExtInst ) // Zero extend integers +HANDLE_CAST_INST(35, SExt , SExtInst ) // Sign extend integers +HANDLE_CAST_INST(36, FPToUI , FPToUIInst ) // floating point -> UInt +HANDLE_CAST_INST(37, FPToSI , FPToSIInst ) // floating point -> SInt +HANDLE_CAST_INST(38, UIToFP , UIToFPInst ) // UInt -> floating point +HANDLE_CAST_INST(39, SIToFP , SIToFPInst ) // SInt -> floating point +HANDLE_CAST_INST(40, FPTrunc , FPTruncInst ) // Truncate floating point +HANDLE_CAST_INST(41, FPExt , FPExtInst ) // Extend floating point +HANDLE_CAST_INST(42, PtrToInt, PtrToIntInst) // Pointer -> Integer +HANDLE_CAST_INST(43, IntToPtr, IntToPtrInst) // Integer -> Pointer +HANDLE_CAST_INST(44, BitCast , BitCastInst ) // Type cast + LAST_CAST_INST(44) // Other operators... - FIRST_OTHER_INST(46) -HANDLE_OTHER_INST(46, ICmp , ICmpInst ) // Integer comparison instruction -HANDLE_OTHER_INST(47, FCmp , FCmpInst ) // Floating point comparison instr. -HANDLE_OTHER_INST(48, PHI , PHINode ) // PHI node instruction -HANDLE_OTHER_INST(49, Call , CallInst ) // Call a function -HANDLE_OTHER_INST(50, Select , SelectInst ) // select instruction -HANDLE_OTHER_INST(51, UserOp1, Instruction) // May be used internally in a pass -HANDLE_OTHER_INST(52, UserOp2, Instruction) // Internal to passes only -HANDLE_OTHER_INST(53, VAArg , VAArgInst ) // vaarg instruction -HANDLE_OTHER_INST(54, ExtractElement, ExtractElementInst)// extract from vector -HANDLE_OTHER_INST(55, InsertElement, InsertElementInst) // insert into vector -HANDLE_OTHER_INST(56, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. -HANDLE_OTHER_INST(57, ExtractValue, ExtractValueInst)// extract from aggregate -HANDLE_OTHER_INST(58, InsertValue, InsertValueInst) // insert into aggregate -HANDLE_OTHER_INST(59, LandingPad, LandingPadInst) // Landing pad instruction. - LAST_OTHER_INST(59) + FIRST_OTHER_INST(45) +HANDLE_OTHER_INST(45, ICmp , ICmpInst ) // Integer comparison instruction +HANDLE_OTHER_INST(46, FCmp , FCmpInst ) // Floating point comparison instr. +HANDLE_OTHER_INST(47, PHI , PHINode ) // PHI node instruction +HANDLE_OTHER_INST(48, Call , CallInst ) // Call a function +HANDLE_OTHER_INST(49, Select , SelectInst ) // select instruction +HANDLE_OTHER_INST(50, UserOp1, Instruction) // May be used internally in a pass +HANDLE_OTHER_INST(51, UserOp2, Instruction) // Internal to passes only +HANDLE_OTHER_INST(52, VAArg , VAArgInst ) // vaarg instruction +HANDLE_OTHER_INST(53, ExtractElement, ExtractElementInst)// extract from vector +HANDLE_OTHER_INST(54, InsertElement, InsertElementInst) // insert into vector +HANDLE_OTHER_INST(55, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. +HANDLE_OTHER_INST(56, ExtractValue, ExtractValueInst)// extract from aggregate +HANDLE_OTHER_INST(57, InsertValue, InsertValueInst) // insert into aggregate +HANDLE_OTHER_INST(58, LandingPad, LandingPadInst) // Landing pad instruction. + LAST_OTHER_INST(58) #undef FIRST_TERM_INST #undef HANDLE_TERM_INST diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index e87950e..52fa8d7 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -24,6 +24,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" #include <iterator> +#include <limits.h> namespace llvm { @@ -1670,10 +1671,33 @@ public: /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + Constant *getMask() const { + return reinterpret_cast<Constant*>(getOperand(2)); + } + /// getMaskValue - Return the index from the shuffle mask for the specified /// output result. This is either -1 if the element is undef or a number less /// than 2*numelements. - int getMaskValue(unsigned i) const; + static int getMaskValue(Constant *Mask, unsigned i); + + int getMaskValue(unsigned i) const { + return getMaskValue(getMask(), i); + } + + /// getShuffleMask - Return the full mask for this instruction, where each + /// element is the element number and undef's are returned as -1. + static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result); + + void getShuffleMask(SmallVectorImpl<int> &Result) const { + return getShuffleMask(getMask(), Result); + } + + SmallVector<int, 16> getShuffleMask() const { + SmallVector<int, 16> Mask; + getShuffleMask(Mask); + return Mask; + } + // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const ShuffleVectorInst *) { return true; } @@ -2444,6 +2468,9 @@ class SwitchInst : public TerminatorInst { protected: virtual SwitchInst *clone_impl() const; public: + + enum { ErrorIndex = UINT_MAX }; + static SwitchInst *Create(Value *Value, BasicBlock *Default, unsigned NumCases, Instruction *InsertBefore = 0) { return new SwitchInst(Value, Default, NumCases, InsertBefore); @@ -2465,34 +2492,62 @@ public: return cast<BasicBlock>(getOperand(1)); } - /// getNumCases - return the number of 'cases' in this switch instruction. - /// Note that case #0 is always the default case. + void setDefaultDest(BasicBlock *DefaultCase) { + setOperand(1, reinterpret_cast<Value*>(DefaultCase)); + } + + /// getNumCases - return the number of 'cases' in this switch instruction, + /// except the default case unsigned getNumCases() const { - return getNumOperands()/2; + return getNumOperands()/2 - 1; } - /// getCaseValue - Return the specified case value. Note that case #0, the - /// default destination, does not have a case value. + /// getCaseValue - Return the specified case value. Note that case #0, means + /// first case, not a default case. ConstantInt *getCaseValue(unsigned i) { - assert(i && i < getNumCases() && "Illegal case value to get!"); - return getSuccessorValue(i); + assert(i < getNumCases() && "Illegal case value to get!"); + return reinterpret_cast<ConstantInt*>(getOperand(2 + i*2)); } - /// getCaseValue - Return the specified case value. Note that case #0, the - /// default destination, does not have a case value. + /// getCaseValue - Return the specified case value. Note that case #0, means + /// first case, not a default case. const ConstantInt *getCaseValue(unsigned i) const { - assert(i && i < getNumCases() && "Illegal case value to get!"); - return getSuccessorValue(i); + assert(i < getNumCases() && "Illegal case value to get!"); + return reinterpret_cast<const ConstantInt*>(getOperand(2 + i*2)); + } + + // setSuccessorValue - Updates the value associated with the specified + // case. + void setCaseValue(unsigned i, ConstantInt *CaseValue) { + assert(i < getNumCases() && "Case index # out of range!"); + setOperand(2 + i*2, reinterpret_cast<Value*>(CaseValue)); } /// findCaseValue - Search all of the case values for the specified constant. /// If it is explicitly handled, return the case number of it, otherwise - /// return 0 to indicate that it is handled by the default handler. + /// return ErrorIndex to indicate that it is handled by the default handler. unsigned findCaseValue(const ConstantInt *C) const { - for (unsigned i = 1, e = getNumCases(); i != e; ++i) + for (unsigned i = 0, e = getNumCases(); i != e; ++i) if (getCaseValue(i) == C) return i; - return 0; + return ErrorIndex; + } + + /// resolveSuccessorIndex - Converts case index to index of its successor + /// index in TerminatorInst successors collection. + /// If CaseIndex == ErrorIndex, "default" successor will returned then. + unsigned resolveSuccessorIndex(unsigned CaseIndex) const { + assert((CaseIndex == ErrorIndex || CaseIndex < getNumCases()) && + "Case index # out of range!"); + return CaseIndex != ErrorIndex ? CaseIndex + 1 : 0; + } + + /// resolveCaseIndex - Converts index of successor in TerminatorInst + /// collection to index of case that corresponds to this successor. + unsigned resolveCaseIndex(unsigned SuccessorIndex) const { + assert(SuccessorIndex < getNumSuccessors() && + "Successor index # out of range!"); + return SuccessorIndex != 0 ? SuccessorIndex - 1 : ErrorIndex; } /// findCaseDest - Finds the unique case value for a given successor. Returns @@ -2501,8 +2556,8 @@ public: if (BB == getDefaultDest()) return NULL; ConstantInt *CI = NULL; - for (unsigned i = 1, e = getNumCases(); i != e; ++i) { - if (getSuccessor(i) == BB) { + for (unsigned i = 0, e = getNumCases(); i != e; ++i) { + if (getSuccessor(i + 1) == BB) { if (CI) return NULL; // Multiple cases lead to BB. else CI = getCaseValue(i); } @@ -2514,9 +2569,8 @@ public: /// void addCase(ConstantInt *OnVal, BasicBlock *Dest); - /// removeCase - This method removes the specified successor from the switch - /// instruction. Note that this cannot be used to remove the default - /// destination (successor #0). Also note that this operation may reorder the + /// removeCase - This method removes the specified case and its successor + /// from the switch instruction. Note that this operation may reorder the /// remaining cases at index idx and above. /// void removeCase(unsigned idx); @@ -2531,6 +2585,22 @@ public: setOperand(idx*2+1, (Value*)NewSucc); } + /// Resolves successor for idx-th case. + /// Use getCaseSuccessor instead of TerminatorInst::getSuccessor, + /// since internal SwitchInst organization of operands/successors is + /// hidden and may be changed in any moment. + BasicBlock *getCaseSuccessor(unsigned idx) const { + return getSuccessor(resolveSuccessorIndex(idx)); + } + + /// Set new successor for idx-th case. + /// Use setCaseSuccessor instead of TerminatorInst::setSuccessor, + /// since internal SwitchInst organization of operands/successors is + /// hidden and may be changed in any moment. + void setCaseSuccessor(unsigned idx, BasicBlock *NewSucc) { + setSuccessor(resolveSuccessorIndex(idx), NewSucc); + } + // getSuccessorValue - Return the value associated with the specified // successor. ConstantInt *getSuccessorValue(unsigned idx) const { @@ -2903,42 +2973,6 @@ InvokeInst::InvokeInst(Value *Func, DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value) //===----------------------------------------------------------------------===// -// UnwindInst Class -//===----------------------------------------------------------------------===// - -//===--------------------------------------------------------------------------- -/// UnwindInst - Immediately exit the current function, unwinding the stack -/// until an invoke instruction is found. -/// -class UnwindInst : public TerminatorInst { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT -protected: - virtual UnwindInst *clone_impl() const; -public: - // allocate space for exactly zero operands - void *operator new(size_t s) { - return User::operator new(s, 0); - } - explicit UnwindInst(LLVMContext &C, Instruction *InsertBefore = 0); - explicit UnwindInst(LLVMContext &C, BasicBlock *InsertAtEnd); - - unsigned getNumSuccessors() const { return 0; } - - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UnwindInst *) { return true; } - static inline bool classof(const Instruction *I) { - return I->getOpcode() == Instruction::Unwind; - } - static inline bool classof(const Value *V) { - return isa<Instruction>(V) && classof(cast<Instruction>(V)); - } -private: - virtual BasicBlock *getSuccessorV(unsigned idx) const; - virtual unsigned getNumSuccessorsV() const; - virtual void setSuccessorV(unsigned idx, BasicBlock *B); -}; - -//===----------------------------------------------------------------------===// // ResumeInst Class //===----------------------------------------------------------------------===// diff --git a/include/llvm/IntrinsicInst.h b/include/llvm/IntrinsicInst.h index 4286201..1cebdd2 100644 --- a/include/llvm/IntrinsicInst.h +++ b/include/llvm/IntrinsicInst.h @@ -277,34 +277,6 @@ namespace llvm { } }; - /// EHExceptionInst - This represents the llvm.eh.exception instruction. - /// - class EHExceptionInst : public IntrinsicInst { - public: - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const EHExceptionInst *) { return true; } - static inline bool classof(const IntrinsicInst *I) { - return I->getIntrinsicID() == Intrinsic::eh_exception; - } - static inline bool classof(const Value *V) { - return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); - } - }; - - /// EHSelectorInst - This represents the llvm.eh.selector instruction. - /// - class EHSelectorInst : public IntrinsicInst { - public: - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const EHSelectorInst *) { return true; } - static inline bool classof(const IntrinsicInst *I) { - return I->getIntrinsicID() == Intrinsic::eh_selector; - } - static inline bool classof(const Value *V) { - return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); - } - }; - } #endif diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td index 5f31862..069f907 100644 --- a/include/llvm/Intrinsics.td +++ b/include/llvm/Intrinsics.td @@ -304,10 +304,6 @@ let Properties = [IntrNoMem] in { //===------------------ Exception Handling Intrinsics----------------------===// // -def int_eh_exception : Intrinsic<[llvm_ptr_ty], [], [IntrReadMem]>; -def int_eh_selector : Intrinsic<[llvm_i32_ty], - [llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty]>; -def int_eh_resume : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [Throws]>; // The result of eh.typeid.for depends on the enclosing function, but inside a // given function it is 'const' and may be CSE'd etc. diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td index 2d5d9ff..f4abba9 100644 --- a/include/llvm/IntrinsicsX86.td +++ b/include/llvm/IntrinsicsX86.td @@ -145,10 +145,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Comparison ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse_cmp_ss : + def int_x86_sse_cmp_ss : GCCBuiltin<"__builtin_ia32_cmpss">, Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_sse_cmp_ps : + def int_x86_sse_cmp_ps : GCCBuiltin<"__builtin_ia32_cmpps">, Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">, @@ -281,10 +281,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // FP comparison ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse2_cmp_sd : + def int_x86_sse2_cmp_sd : GCCBuiltin<"__builtin_ia32_cmpsd">, Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_sse2_cmp_pd : + def int_x86_sse2_cmp_pd : GCCBuiltin<"__builtin_ia32_cmppd">, Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">, @@ -452,28 +452,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". llvm_i32_ty], [IntrNoMem]>; } -// Integer comparison ops -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse2_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb128">, - Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, - llvm_v16i8_ty], [IntrNoMem, Commutative]>; - def int_x86_sse2_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw128">, - Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, - llvm_v8i16_ty], [IntrNoMem, Commutative]>; - def int_x86_sse2_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd128">, - Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, - llvm_v4i32_ty], [IntrNoMem, Commutative]>; - def int_x86_sse2_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb128">, - Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, - llvm_v16i8_ty], [IntrNoMem]>; - def int_x86_sse2_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw128">, - Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, - llvm_v8i16_ty], [IntrNoMem]>; - def int_x86_sse2_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd128">, - Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, - llvm_v4i32_ty], [IntrNoMem]>; -} - // Conversion ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse2_cvtdq2pd : GCCBuiltin<"__builtin_ia32_cvtdq2pd">, @@ -792,12 +770,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Vector compare, min, max let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse41_pcmpeqq : GCCBuiltin<"__builtin_ia32_pcmpeqq">, - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], - [IntrNoMem, Commutative]>; - def int_x86_sse42_pcmpgtq : GCCBuiltin<"__builtin_ia32_pcmpgtq">, - Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], - [IntrNoMem]>; def int_x86_sse41_pmaxsb : GCCBuiltin<"__builtin_ia32_pmaxsb128">, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem, Commutative]>; @@ -1120,17 +1092,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx_vpermil_pd : GCCBuiltin<"__builtin_ia32_vpermilpd">, + def int_x86_avx_vpermil_pd : Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx_vpermil_ps : GCCBuiltin<"__builtin_ia32_vpermilps">, + def int_x86_avx_vpermil_ps : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx_vpermil_pd_256 : GCCBuiltin<"__builtin_ia32_vpermilpd256">, + def int_x86_avx_vpermil_pd_256 : Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx_vpermil_ps_256 : GCCBuiltin<"__builtin_ia32_vpermilps256">, + def int_x86_avx_vpermil_ps_256 : Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; } @@ -1300,12 +1272,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // SIMD load ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx_loadu_pd_256 : GCCBuiltin<"__builtin_ia32_loadupd256">, - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>; - def int_x86_avx_loadu_ps_256 : GCCBuiltin<"__builtin_ia32_loadups256">, - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>; - def int_x86_avx_loadu_dq_256 : GCCBuiltin<"__builtin_ia32_loaddqu256">, - Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>; def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">, Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>; } @@ -1521,34 +1487,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". llvm_i32_ty], [IntrNoMem]>; } -// Integer comparison ops -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx2_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb256">, - Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], - [IntrNoMem, Commutative]>; - def int_x86_avx2_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw256">, - Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty], - [IntrNoMem, Commutative]>; - def int_x86_avx2_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd256">, - Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty], - [IntrNoMem, Commutative]>; - def int_x86_avx2_pcmpeq_q : GCCBuiltin<"__builtin_ia32_pcmpeqq256">, - Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty], - [IntrNoMem, Commutative]>; - def int_x86_avx2_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb256">, - Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], - [IntrNoMem]>; - def int_x86_avx2_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw256">, - Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty], - [IntrNoMem]>; - def int_x86_avx2_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd256">, - Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty], - [IntrNoMem]>; - def int_x86_avx2_pcmpgt_q : GCCBuiltin<"__builtin_ia32_pcmpgtq256">, - Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty], - [IntrNoMem]>; -} - // Pack ops. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx2_packsswb : GCCBuiltin<"__builtin_ia32_packsswb256">, @@ -1960,6 +1898,475 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". } //===----------------------------------------------------------------------===// +// XOP + + def int_x86_xop_vpermil2pd : GCCBuiltin<"__builtin_ia32_vpermil2pd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, + llvm_v2f64_ty, llvm_i8_ty], + [IntrNoMem]>; + + def int_x86_xop_vpermil2pd_256 : + GCCBuiltin<"__builtin_ia32_vpermil2pd256">, + Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, + llvm_v4f64_ty, llvm_i8_ty], + [IntrNoMem]>; + + def int_x86_xop_vpermil2ps : GCCBuiltin<"__builtin_ia32_vpermil2ps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, + llvm_v4f32_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpermil2ps_256 : + GCCBuiltin<"__builtin_ia32_vpermil2ps256">, + Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, + llvm_v8f32_ty, llvm_i8_ty], + [IntrNoMem]>; + + def int_x86_xop_vfrcz_pd : + GCCBuiltin<"__builtin_ia32_vfrczpd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>; + def int_x86_xop_vfrcz_ps : + GCCBuiltin<"__builtin_ia32_vfrczps">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; + def int_x86_xop_vfrcz_sd : + GCCBuiltin<"__builtin_ia32_vfrczsd">, + Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], + [IntrNoMem]>; + def int_x86_xop_vfrcz_ss : + GCCBuiltin<"__builtin_ia32_vfrczss">, + Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; + def int_x86_xop_vfrcz_pd_256 : + GCCBuiltin<"__builtin_ia32_vfrczpd256">, + Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>; + def int_x86_xop_vfrcz_ps_256 : + GCCBuiltin<"__builtin_ia32_vfrczps256">, + Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>; + def int_x86_xop_vpcmov : + GCCBuiltin<"__builtin_ia32_vpcmov">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcmov_256 : + GCCBuiltin<"__builtin_ia32_vpcmov_256">, + Intrinsic<[llvm_v4i64_ty], + [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomeqb : + GCCBuiltin<"__builtin_ia32_vpcomeqb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomeqw : + GCCBuiltin<"__builtin_ia32_vpcomeqw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomeqd : + GCCBuiltin<"__builtin_ia32_vpcomeqd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomeqq : + GCCBuiltin<"__builtin_ia32_vpcomeqq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomequb : + GCCBuiltin<"__builtin_ia32_vpcomequb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomequd : + GCCBuiltin<"__builtin_ia32_vpcomequd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomequq : + GCCBuiltin<"__builtin_ia32_vpcomequq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomequw : + GCCBuiltin<"__builtin_ia32_vpcomequw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseb : + GCCBuiltin<"__builtin_ia32_vpcomfalseb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalsed : + GCCBuiltin<"__builtin_ia32_vpcomfalsed">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseq : + GCCBuiltin<"__builtin_ia32_vpcomfalseq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseub : + GCCBuiltin<"__builtin_ia32_vpcomfalseub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseud : + GCCBuiltin<"__builtin_ia32_vpcomfalseud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseuq : + GCCBuiltin<"__builtin_ia32_vpcomfalseuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalseuw : + GCCBuiltin<"__builtin_ia32_vpcomfalseuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomfalsew : + GCCBuiltin<"__builtin_ia32_vpcomfalsew">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeb : + GCCBuiltin<"__builtin_ia32_vpcomgeb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomged : + GCCBuiltin<"__builtin_ia32_vpcomged">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeq : + GCCBuiltin<"__builtin_ia32_vpcomgeq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeub : + GCCBuiltin<"__builtin_ia32_vpcomgeub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeud : + GCCBuiltin<"__builtin_ia32_vpcomgeud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeuq : + GCCBuiltin<"__builtin_ia32_vpcomgeuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgeuw : + GCCBuiltin<"__builtin_ia32_vpcomgeuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgew : + GCCBuiltin<"__builtin_ia32_vpcomgew">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtb : + GCCBuiltin<"__builtin_ia32_vpcomgtb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtd : + GCCBuiltin<"__builtin_ia32_vpcomgtd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtq : + GCCBuiltin<"__builtin_ia32_vpcomgtq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtub : + GCCBuiltin<"__builtin_ia32_vpcomgtub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtud : + GCCBuiltin<"__builtin_ia32_vpcomgtud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtuq : + GCCBuiltin<"__builtin_ia32_vpcomgtuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtuw : + GCCBuiltin<"__builtin_ia32_vpcomgtuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomgtw : + GCCBuiltin<"__builtin_ia32_vpcomgtw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleb : + GCCBuiltin<"__builtin_ia32_vpcomleb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomled : + GCCBuiltin<"__builtin_ia32_vpcomled">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleq : + GCCBuiltin<"__builtin_ia32_vpcomleq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleub : + GCCBuiltin<"__builtin_ia32_vpcomleub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleud : + GCCBuiltin<"__builtin_ia32_vpcomleud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleuq : + GCCBuiltin<"__builtin_ia32_vpcomleuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomleuw : + GCCBuiltin<"__builtin_ia32_vpcomleuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomlew : + GCCBuiltin<"__builtin_ia32_vpcomlew">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltb : + GCCBuiltin<"__builtin_ia32_vpcomltb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltd : + GCCBuiltin<"__builtin_ia32_vpcomltd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltq : + GCCBuiltin<"__builtin_ia32_vpcomltq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltub : + GCCBuiltin<"__builtin_ia32_vpcomltub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltud : + GCCBuiltin<"__builtin_ia32_vpcomltud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltuq : + GCCBuiltin<"__builtin_ia32_vpcomltuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltuw : + GCCBuiltin<"__builtin_ia32_vpcomltuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomltw : + GCCBuiltin<"__builtin_ia32_vpcomltw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneb : + GCCBuiltin<"__builtin_ia32_vpcomneb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomned : + GCCBuiltin<"__builtin_ia32_vpcomned">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneq : + GCCBuiltin<"__builtin_ia32_vpcomneq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneub : + GCCBuiltin<"__builtin_ia32_vpcomneub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneud : + GCCBuiltin<"__builtin_ia32_vpcomneud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneuq : + GCCBuiltin<"__builtin_ia32_vpcomneuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomneuw : + GCCBuiltin<"__builtin_ia32_vpcomneuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomnew : + GCCBuiltin<"__builtin_ia32_vpcomnew">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueb : + GCCBuiltin<"__builtin_ia32_vpcomtrueb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrued : + GCCBuiltin<"__builtin_ia32_vpcomtrued">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueq : + GCCBuiltin<"__builtin_ia32_vpcomtrueq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueub : + GCCBuiltin<"__builtin_ia32_vpcomtrueub">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueud : + GCCBuiltin<"__builtin_ia32_vpcomtrueud">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueuq : + GCCBuiltin<"__builtin_ia32_vpcomtrueuq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtrueuw : + GCCBuiltin<"__builtin_ia32_vpcomtrueuw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpcomtruew : + GCCBuiltin<"__builtin_ia32_vpcomtruew">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vphaddbd : + GCCBuiltin<"__builtin_ia32_vphaddbd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphaddbq : + GCCBuiltin<"__builtin_ia32_vphaddbq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphaddbw : + GCCBuiltin<"__builtin_ia32_vphaddbw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphadddq : + GCCBuiltin<"__builtin_ia32_vphadddq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_xop_vphaddubd : + GCCBuiltin<"__builtin_ia32_vphaddubd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphaddubq : + GCCBuiltin<"__builtin_ia32_vphaddubq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphaddubw : + GCCBuiltin<"__builtin_ia32_vphaddubw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphaddudq : + GCCBuiltin<"__builtin_ia32_vphaddudq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_xop_vphadduwd : + GCCBuiltin<"__builtin_ia32_vphadduwd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_xop_vphadduwq : + GCCBuiltin<"__builtin_ia32_vphadduwq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_xop_vphaddwd : + GCCBuiltin<"__builtin_ia32_vphaddwd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_xop_vphaddwq : + GCCBuiltin<"__builtin_ia32_vphaddwq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_xop_vphsubbw : + GCCBuiltin<"__builtin_ia32_vphsubbw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>; + def int_x86_xop_vphsubdq : + GCCBuiltin<"__builtin_ia32_vphsubdq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>; + def int_x86_xop_vphsubwd : + GCCBuiltin<"__builtin_ia32_vphsubwd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>; + def int_x86_xop_vpmacsdd : + GCCBuiltin<"__builtin_ia32_vpmacsdd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacsdqh : + GCCBuiltin<"__builtin_ia32_vpmacsdqh">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacsdql : + GCCBuiltin<"__builtin_ia32_vpmacsdql">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacssdd : + GCCBuiltin<"__builtin_ia32_vpmacssdd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacssdqh : + GCCBuiltin<"__builtin_ia32_vpmacssdqh">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacssdql : + GCCBuiltin<"__builtin_ia32_vpmacssdql">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacsswd : + GCCBuiltin<"__builtin_ia32_vpmacsswd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacssww : + GCCBuiltin<"__builtin_ia32_vpmacssww">, + Intrinsic<[llvm_v8i16_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacswd : + GCCBuiltin<"__builtin_ia32_vpmacswd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpmacsww : + GCCBuiltin<"__builtin_ia32_vpmacsww">, + Intrinsic<[llvm_v8i16_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpmadcsswd : + GCCBuiltin<"__builtin_ia32_vpmadcsswd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpmadcswd : + GCCBuiltin<"__builtin_ia32_vpmadcswd">, + Intrinsic<[llvm_v4i32_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpperm : + GCCBuiltin<"__builtin_ia32_vpperm">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vprotb : + GCCBuiltin<"__builtin_ia32_vprotb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vprotd : + GCCBuiltin<"__builtin_ia32_vprotd">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vprotq : + GCCBuiltin<"__builtin_ia32_vprotq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vprotw : + GCCBuiltin<"__builtin_ia32_vprotw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpshab : + GCCBuiltin<"__builtin_ia32_vpshab">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpshad : + GCCBuiltin<"__builtin_ia32_vpshad">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpshaq : + GCCBuiltin<"__builtin_ia32_vpshaq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpshaw : + GCCBuiltin<"__builtin_ia32_vpshaw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + def int_x86_xop_vpshlb : + GCCBuiltin<"__builtin_ia32_vpshlb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], + [IntrNoMem]>; + def int_x86_xop_vpshld : + GCCBuiltin<"__builtin_ia32_vpshld">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], + [IntrNoMem]>; + def int_x86_xop_vpshlq : + GCCBuiltin<"__builtin_ia32_vpshlq">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], + [IntrNoMem]>; + def int_x86_xop_vpshlw : + GCCBuiltin<"__builtin_ia32_vpshlw">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], + [IntrNoMem]>; + +//===----------------------------------------------------------------------===// // MMX // Empty MMX state op. diff --git a/include/llvm/LLVMContext.h b/include/llvm/LLVMContext.h index 1c50630..47d2eaa 100644 --- a/include/llvm/LLVMContext.h +++ b/include/llvm/LLVMContext.h @@ -19,6 +19,7 @@ namespace llvm { class LLVMContextImpl; class StringRef; +class Twine; class Instruction; class Module; class SMDiagnostic; @@ -80,9 +81,9 @@ public: /// be prepared to drop the erroneous construct on the floor and "not crash". /// The generated code need not be correct. The error message will be /// implicitly prefixed with "error: " and should not end with a ".". - void emitError(unsigned LocCookie, StringRef ErrorStr); - void emitError(const Instruction *I, StringRef ErrorStr); - void emitError(StringRef ErrorStr); + void emitError(unsigned LocCookie, const Twine &ErrorStr); + void emitError(const Instruction *I, const Twine &ErrorStr); + void emitError(const Twine &ErrorStr); private: // DO NOT IMPLEMENT diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 8b5d0d4..2258d45 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -31,6 +31,7 @@ #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Vectorize.h" #include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h" #include <cstdlib> @@ -97,6 +98,7 @@ namespace { (void) llvm::createNoAAPass(); (void) llvm::createNoProfileInfoPass(); (void) llvm::createObjCARCAliasAnalysisPass(); + (void) llvm::createObjCARCAPElimPass(); (void) llvm::createObjCARCExpandPass(); (void) llvm::createObjCARCContractPass(); (void) llvm::createObjCARCOptPass(); @@ -150,6 +152,7 @@ namespace { (void) llvm::createCorrelatedValuePropagationPass(); (void) llvm::createMemDepPrinter(); (void) llvm::createInstructionSimplifierPass(); + (void) llvm::createBBVectorizePass(); (void)new llvm::IntervalPartition(); (void)new llvm::FindUsedTypes(); diff --git a/include/llvm/MC/MCAsmBackend.h b/include/llvm/MC/MCAsmBackend.h index b841ddb..641ded5 100644 --- a/include/llvm/MC/MCAsmBackend.h +++ b/include/llvm/MC/MCAsmBackend.h @@ -14,15 +14,19 @@ #include "llvm/MC/MCFixup.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" namespace llvm { class MCAsmLayout; +class MCAssembler; class MCELFObjectTargetWriter; class MCFixup; +class MCFragment; class MCInst; class MCInstFragment; class MCObjectWriter; class MCSection; +class MCValue; template<typename T> class SmallVectorImpl; class raw_ostream; @@ -46,8 +50,8 @@ public: /// createELFObjectTargetWriter - Create a new ELFObjectTargetWriter to enable /// non-standard ELFObjectWriters. virtual MCELFObjectTargetWriter *createELFObjectTargetWriter() const { - assert(0 && "createELFObjectTargetWriter is not supported by asm backend"); - return 0; + llvm_unreachable("createELFObjectTargetWriter is not supported by asm " + "backend"); } /// hasReliableSymbolDifference - Check whether this target implements @@ -87,12 +91,21 @@ public: /// getFixupKindInfo - Get information on a fixup kind. virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const; + /// processFixupValue - Target hook to adjust the literal value of a fixup + /// if necessary. IsResolved signals whether the caller believes a relocation + /// is needed; the target can modify the value. The default does nothing. + virtual void processFixupValue(const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFixup &Fixup, const MCFragment *DF, + MCValue &Target, uint64_t &Value, + bool &IsResolved) {} + /// @} - /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided + /// applyFixup - Apply the \arg Value for given \arg Fixup into the provided /// data fragment, at the offset specified by the fixup and following the /// fixup kind as appropriate. - virtual void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, uint64_t Value) const = 0; /// @} @@ -100,11 +113,11 @@ public: /// @name Target Relaxation Interfaces /// @{ - /// MayNeedRelaxation - Check whether the given instruction may need + /// mayNeedRelaxation - Check whether the given instruction may need /// relaxation. /// /// \param Inst - The instruction to test. - virtual bool MayNeedRelaxation(const MCInst &Inst) const = 0; + virtual bool mayNeedRelaxation(const MCInst &Inst) const = 0; /// fixupNeedsRelaxation - Target specific predicate for whether a given /// fixup requires the associated instruction to be relaxed. @@ -119,20 +132,20 @@ public: /// \param Inst - The instruction to relax, which may be the same as the /// output. /// \parm Res [output] - On return, the relaxed instruction. - virtual void RelaxInstruction(const MCInst &Inst, MCInst &Res) const = 0; + virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0; /// @} - /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given + /// writeNopData - Write an (optimal) nop sequence of Count bytes to the given /// output. If the target cannot generate such a sequence, it should return an /// error. /// /// \return - True on success. - virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0; + virtual bool writeNopData(uint64_t Count, MCObjectWriter *OW) const = 0; - /// HandleAssemblerFlag - Handle any target-specific assembler flags. + /// handleAssemblerFlag - Handle any target-specific assembler flags. /// By default, do nothing. - virtual void HandleAssemblerFlag(MCAssemblerFlag Flag) {} + virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {} }; } // End llvm namespace diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h index 5accabc..095ca14 100644 --- a/include/llvm/MC/MCAsmInfo.h +++ b/include/llvm/MC/MCAsmInfo.h @@ -43,7 +43,7 @@ namespace llvm { //===------------------------------------------------------------------===// // Properties to be set by the target writer, used to configure asm printer. // - + /// PointerSize - Pointer size in bytes. /// Default is 4. unsigned PointerSize; @@ -180,6 +180,11 @@ namespace llvm { const char *JT32Begin; // Defaults to "$a." bool SupportsDataRegions; + /// GPRel64Directive - if non-null, a directive that is used to emit a word + /// which should be relocated as a 64-bit GP-relative offset, e.g. .gpdword + /// on Mips. + const char *GPRel64Directive; // Defaults to NULL. + /// GPRel32Directive - if non-null, a directive that is used to emit a word /// which should be relocated as a 32-bit GP-relative offset, e.g. .gpword /// on Mips or .gprel32 on Alpha. @@ -376,6 +381,7 @@ namespace llvm { const char *getData64bitsDirective(unsigned AS = 0) const { return AS == 0 ? Data64bitsDirective : getDataASDirective(64, AS); } + const char *getGPRel64Directive() const { return GPRel64Directive; } const char *getGPRel32Directive() const { return GPRel32Directive; } /// [Code|Data]Begin label name accessors. @@ -546,7 +552,7 @@ namespace llvm { ExceptionsType == ExceptionHandling::ARM || ExceptionsType == ExceptionHandling::Win64); } - bool doesDwarfUsesInlineInfoSection() const { + bool doesDwarfUseInlineInfoSection() const { return DwarfUsesInlineInfoSection; } const char *getDwarfSectionOffsetDirective() const { @@ -555,7 +561,7 @@ namespace llvm { bool doesDwarfRequireRelocationForSectionOffset() const { return DwarfRequiresRelocationForSectionOffset; } - bool doesDwarfUsesLabelOffsetForRanges() const { + bool doesDwarfUseLabelOffsetForRanges() const { return DwarfUsesLabelOffsetForRanges; } bool doesDwarfUseRelocationsForStringPool() const { diff --git a/include/llvm/MC/MCAsmInfoCOFF.h b/include/llvm/MC/MCAsmInfoCOFF.h index ba699d7..0ff3e12 100644 --- a/include/llvm/MC/MCAsmInfoCOFF.h +++ b/include/llvm/MC/MCAsmInfoCOFF.h @@ -14,17 +14,19 @@ namespace llvm { class MCAsmInfoCOFF : public MCAsmInfo { + virtual void anchor(); protected: explicit MCAsmInfoCOFF(); - }; class MCAsmInfoMicrosoft : public MCAsmInfoCOFF { + virtual void anchor(); protected: explicit MCAsmInfoMicrosoft(); }; class MCAsmInfoGNUCOFF : public MCAsmInfoCOFF { + virtual void anchor(); protected: explicit MCAsmInfoGNUCOFF(); }; diff --git a/include/llvm/MC/MCAsmInfoDarwin.h b/include/llvm/MC/MCAsmInfoDarwin.h index 1f6c499..af552de 100644 --- a/include/llvm/MC/MCAsmInfoDarwin.h +++ b/include/llvm/MC/MCAsmInfoDarwin.h @@ -18,7 +18,9 @@ #include "llvm/MC/MCAsmInfo.h" namespace llvm { - struct MCAsmInfoDarwin : public MCAsmInfo { + class MCAsmInfoDarwin : public MCAsmInfo { + virtual void anchor(); + public: explicit MCAsmInfoDarwin(); }; } diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h index 687dd0c..2566241 100644 --- a/include/llvm/MC/MCAssembler.h +++ b/include/llvm/MC/MCAssembler.h @@ -106,6 +106,7 @@ public: }; class MCDataFragment : public MCFragment { + virtual void anchor(); SmallString<32> Contents; /// Fixups - The list of fixups in this fragment. @@ -160,6 +161,8 @@ public: // object with just the MCInst and a code size, then we should just change // MCDataFragment to have an optional MCInst at its end. class MCInstFragment : public MCFragment { + virtual void anchor(); + /// Inst - The instruction this is a fragment for. MCInst Inst; @@ -215,6 +218,8 @@ public: }; class MCAlignFragment : public MCFragment { + virtual void anchor(); + /// Alignment - The alignment to ensure, in bytes. unsigned Alignment; @@ -263,6 +268,8 @@ public: }; class MCFillFragment : public MCFragment { + virtual void anchor(); + /// Value - Value to use for filling bytes. int64_t Value; @@ -300,6 +307,8 @@ public: }; class MCOrgFragment : public MCFragment { + virtual void anchor(); + /// Offset - The offset this fragment should start at. const MCExpr *Offset; @@ -327,6 +336,8 @@ public: }; class MCLEBFragment : public MCFragment { + virtual void anchor(); + /// Value - The value this fragment should contain. const MCExpr *Value; @@ -358,6 +369,8 @@ public: }; class MCDwarfLineAddrFragment : public MCFragment { + virtual void anchor(); + /// LineDelta - the value of the difference between the two line numbers /// between two .loc dwarf directives. int64_t LineDelta; @@ -393,6 +406,8 @@ public: }; class MCDwarfCallFrameFragment : public MCFragment { + virtual void anchor(); + /// AddrDelta - The expression for the difference of the two symbols that /// make up the address delta between two .cfi_* dwarf directives. const MCExpr *AddrDelta; diff --git a/include/llvm/MC/MCCodeGenInfo.h b/include/llvm/MC/MCCodeGenInfo.h index e40a052..d1765e1 100644 --- a/include/llvm/MC/MCCodeGenInfo.h +++ b/include/llvm/MC/MCCodeGenInfo.h @@ -20,7 +20,7 @@ namespace llvm { class MCCodeGenInfo { - /// RelocationModel - Relocation model: statcic, pic, etc. + /// RelocationModel - Relocation model: static, pic, etc. /// Reloc::Model RelocationModel; diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h index 455b45e..8b4e296 100644 --- a/include/llvm/MC/MCContext.h +++ b/include/llvm/MC/MCContext.h @@ -15,6 +15,8 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringMap.h" #include "llvm/Support/Allocator.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/SMLoc.h" #include "llvm/Support/raw_ostream.h" #include <vector> // FIXME: Shouldn't be needed. @@ -43,6 +45,8 @@ namespace llvm { public: typedef StringMap<MCSymbol*, BumpPtrAllocator&> SymbolTable; private: + /// The SourceMgr for this object, if any. + const SourceMgr *SrcMgr; /// The MCAsmInfo for this target. const MCAsmInfo &MAI; @@ -111,9 +115,9 @@ namespace llvm { /// Symbols created for the start and end of this section. MCSymbol *GenDwarfSectionStartSym, *GenDwarfSectionEndSym; - /// The information gathered from labels that will have dwarf subprogram + /// The information gathered from labels that will have dwarf label /// entries when generating dwarf assembly source files. - std::vector<const MCGenDwarfSubprogramEntry *> MCGenDwarfSubprogramEntries; + std::vector<const MCGenDwarfLabelEntry *> MCGenDwarfLabelEntries; /// The string to embed in the debug information for the compile unit, if /// non-empty. @@ -137,9 +141,11 @@ namespace llvm { public: explicit MCContext(const MCAsmInfo &MAI, const MCRegisterInfo &MRI, - const MCObjectFileInfo *MOFI); + const MCObjectFileInfo *MOFI, const SourceMgr *Mgr = 0); ~MCContext(); + const SourceMgr *getSourceManager() const { return SrcMgr; } + const MCAsmInfo &getAsmInfo() const { return MAI; } const MCRegisterInfo &getRegisterInfo() const { return MRI; } @@ -287,12 +293,12 @@ namespace llvm { void setGenDwarfSectionEndSym(MCSymbol *Sym) { GenDwarfSectionEndSym = Sym; } - const std::vector<const MCGenDwarfSubprogramEntry *> - &getMCGenDwarfSubprogramEntries() const { - return MCGenDwarfSubprogramEntries; + const std::vector<const MCGenDwarfLabelEntry *> + &getMCGenDwarfLabelEntries() const { + return MCGenDwarfLabelEntries; } - void addMCGenDwarfSubprogramEntry(const MCGenDwarfSubprogramEntry *E) { - MCGenDwarfSubprogramEntries.push_back(E); + void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry *E) { + MCGenDwarfLabelEntries.push_back(E); } void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; } @@ -315,6 +321,11 @@ namespace llvm { } void Deallocate(void *Ptr) { } + + // Unrecoverable error has occured. Display the best diagnostic we can + // and bail via exit(1). For now, most MC backend errors are unrecoverable. + // FIXME: We should really do something about that. + LLVM_ATTRIBUTE_NORETURN void FatalError(SMLoc L, const Twine &Msg); }; } // end namespace llvm diff --git a/include/llvm/MC/MCDisassembler.h b/include/llvm/MC/MCDisassembler.h index 454277d..4b5fbec 100644 --- a/include/llvm/MC/MCDisassembler.h +++ b/include/llvm/MC/MCDisassembler.h @@ -90,7 +90,7 @@ public: /// @return - An array of instruction information, with one entry for /// each MCInst opcode this disassembler returns. /// NULL if there is no info for this target. - virtual EDInstInfo *getEDInfo() const { return (EDInstInfo*)0; } + virtual const EDInstInfo *getEDInfo() const { return (EDInstInfo*)0; } private: // diff --git a/include/llvm/MC/MCDwarf.h b/include/llvm/MC/MCDwarf.h index 9c77218..a0fc9e5 100644 --- a/include/llvm/MC/MCDwarf.h +++ b/include/llvm/MC/MCDwarf.h @@ -212,7 +212,7 @@ namespace llvm { // // This emits the Dwarf file and the line tables. // - static void Emit(MCStreamer *MCOS); + static const MCSymbol *Emit(MCStreamer *MCOS); }; class MCDwarfLineAddr { @@ -235,12 +235,12 @@ namespace llvm { // When generating dwarf for assembly source files this emits the Dwarf // sections. // - static void Emit(MCStreamer *MCOS); + static void Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol); }; // When generating dwarf for assembly source files this is the info that is - // needed to be gathered for each symbol that will have a dwarf2_subprogram. - class MCGenDwarfSubprogramEntry { + // needed to be gathered for each symbol that will have a dwarf label. + class MCGenDwarfLabelEntry { private: // Name of the symbol without a leading underbar, if any. StringRef Name; @@ -248,14 +248,12 @@ namespace llvm { unsigned FileNumber; // The line number this symbol is at. unsigned LineNumber; - // The low_pc for the dwarf2_subprogram is taken from this symbol. The - // high_pc is taken from the next symbol's value or the end of the section - // for the last symbol + // The low_pc for the dwarf label is taken from this symbol. MCSymbol *Label; public: - MCGenDwarfSubprogramEntry(StringRef name, unsigned fileNumber, - unsigned lineNumber, MCSymbol *label) : + MCGenDwarfLabelEntry(StringRef name, unsigned fileNumber, + unsigned lineNumber, MCSymbol *label) : Name(name), FileNumber(fileNumber), LineNumber(lineNumber), Label(label){} StringRef getName() const { return Name; } @@ -271,21 +269,23 @@ namespace llvm { class MCCFIInstruction { public: - enum OpType { SameValue, Remember, Restore, Move, RelMove }; + enum OpType { SameValue, RememberState, RestoreState, Move, RelMove, Escape, + Restore}; private: OpType Operation; MCSymbol *Label; // Move to & from location. MachineLocation Destination; MachineLocation Source; + std::vector<char> Values; public: MCCFIInstruction(OpType Op, MCSymbol *L) : Operation(Op), Label(L) { - assert(Op == Remember || Op == Restore); + assert(Op == RememberState || Op == RestoreState); } MCCFIInstruction(OpType Op, MCSymbol *L, unsigned Register) : Operation(Op), Label(L), Destination(Register) { - assert(Op == SameValue); + assert(Op == SameValue || Op == Restore); } MCCFIInstruction(MCSymbol *L, const MachineLocation &D, const MachineLocation &S) @@ -296,16 +296,24 @@ namespace llvm { : Operation(Op), Label(L), Destination(D), Source(S) { assert(Op == RelMove); } + MCCFIInstruction(OpType Op, MCSymbol *L, StringRef Vals) + : Operation(Op), Label(L), Values(Vals.begin(), Vals.end()) { + assert(Op == Escape); + } OpType getOperation() const { return Operation; } MCSymbol *getLabel() const { return Label; } const MachineLocation &getDestination() const { return Destination; } const MachineLocation &getSource() const { return Source; } + const StringRef getValues() const { + return StringRef(&Values[0], Values.size()); + } }; struct MCDwarfFrameInfo { MCDwarfFrameInfo() : Begin(0), End(0), Personality(0), Lsda(0), Function(0), Instructions(), PersonalityEncoding(), - LsdaEncoding(0), CompactUnwindEncoding(0) {} + LsdaEncoding(0), CompactUnwindEncoding(0), + IsSignalFrame(false) {} MCSymbol *Begin; MCSymbol *End; const MCSymbol *Personality; @@ -315,6 +323,7 @@ namespace llvm { unsigned PersonalityEncoding; unsigned LsdaEncoding; uint32_t CompactUnwindEncoding; + bool IsSignalFrame; }; class MCDwarfFrameEmitter { diff --git a/include/llvm/MC/MCELFObjectWriter.h b/include/llvm/MC/MCELFObjectWriter.h index 3c150dc..6e9f5d8 100644 --- a/include/llvm/MC/MCELFObjectWriter.h +++ b/include/llvm/MC/MCELFObjectWriter.h @@ -12,26 +12,53 @@ #include "llvm/MC/MCObjectWriter.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ELF.h" namespace llvm { class MCELFObjectTargetWriter { - const Triple::OSType OSType; + const uint8_t OSABI; const uint16_t EMachine; const unsigned HasRelocationAddend : 1; const unsigned Is64Bit : 1; + protected: - MCELFObjectTargetWriter(bool Is64Bit_, Triple::OSType OSType_, + + MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_, uint16_t EMachine_, bool HasRelocationAddend_); public: - virtual ~MCELFObjectTargetWriter(); + static uint8_t getOSABI(Triple::OSType OSType) { + switch (OSType) { + case Triple::FreeBSD: + return ELF::ELFOSABI_FREEBSD; + case Triple::Linux: + return ELF::ELFOSABI_LINUX; + default: + return ELF::ELFOSABI_NONE; + } + } + + virtual ~MCELFObjectTargetWriter() {} + + virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, + bool IsPCRel, bool IsRelocWithSymbol, + int64_t Addend) const = 0; + virtual unsigned getEFlags() const; + virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm, + const MCValue &Target, + const MCFragment &F, + const MCFixup &Fixup, + bool IsPCRel) const; + virtual void adjustFixupOffset(const MCFixup &Fixup, + uint64_t &RelocOffset); + /// @name Accessors /// @{ - Triple::OSType getOSType() { return OSType; } + uint8_t getOSABI() { return OSABI; } uint16_t getEMachine() { return EMachine; } bool hasRelocationAddend() { return HasRelocationAddend; } - bool is64Bit() { return Is64Bit; } + bool is64Bit() const { return Is64Bit; } /// @} }; diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h index e7f5b6a..2ae1e84 100644 --- a/include/llvm/MC/MCExpr.h +++ b/include/llvm/MC/MCExpr.h @@ -162,6 +162,7 @@ public: VK_TPOFF, VK_DTPOFF, VK_TLVP, // Mach-O thread local variable relocation + VK_SECREL, // FIXME: We'd really like to use the generic Kinds listed above for these. VK_ARM_PLT, // ARM-style PLT references. i.e., (PLT) instead of @PLT VK_ARM_TLSGD, // ditto for TLSGD, GOT, GOTOFF, TPOFF and GOTTPOFF @@ -169,6 +170,7 @@ public: VK_ARM_GOTOFF, VK_ARM_TPOFF, VK_ARM_GOTTPOFF, + VK_ARM_TARGET1, VK_PPC_TOC, VK_PPC_DARWIN_HA16, // ha16(symbol) @@ -176,7 +178,6 @@ public: VK_PPC_GAS_HA16, // symbol@ha VK_PPC_GAS_LO16, // symbol@l - VK_Mips_None, VK_Mips_GPREL, VK_Mips_GOT_CALL, VK_Mips_GOT16, @@ -205,7 +206,9 @@ private: const VariantKind Kind; explicit MCSymbolRefExpr(const MCSymbol *_Symbol, VariantKind _Kind) - : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol), Kind(_Kind) {} + : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol), Kind(_Kind) { + assert(Symbol); + } public: /// @name Construction diff --git a/include/llvm/MC/MCFixup.h b/include/llvm/MC/MCFixup.h index 7404270..16e9eb7 100644 --- a/include/llvm/MC/MCFixup.h +++ b/include/llvm/MC/MCFixup.h @@ -11,6 +11,8 @@ #define LLVM_MC_MCFIXUP_H #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SMLoc.h" #include <cassert> namespace llvm { @@ -30,6 +32,10 @@ enum MCFixupKind { FK_GPRel_2, ///< A two-byte gp relative fixup. FK_GPRel_4, ///< A four-byte gp relative fixup. FK_GPRel_8, ///< A eight-byte gp relative fixup. + FK_SecRel_1, ///< A one-byte section relative fixup. + FK_SecRel_2, ///< A two-byte section relative fixup. + FK_SecRel_4, ///< A four-byte section relative fixup. + FK_SecRel_8, ///< A eight-byte section relative fixup. FirstTargetFixupKind = 128, @@ -65,14 +71,17 @@ class MCFixup { /// determine how the operand value should be encoded into the instruction. unsigned Kind; + /// The source location which gave rise to the fixup, if any. + SMLoc Loc; public: static MCFixup Create(uint32_t Offset, const MCExpr *Value, - MCFixupKind Kind) { + MCFixupKind Kind, SMLoc Loc = SMLoc()) { assert(unsigned(Kind) < MaxTargetFixupKind && "Kind out of range!"); MCFixup FI; FI.Value = Value; FI.Offset = Offset; FI.Kind = unsigned(Kind); + FI.Loc = Loc; return FI; } @@ -87,13 +96,15 @@ public: /// size. It is an error to pass an unsupported size. static MCFixupKind getKindForSize(unsigned Size, bool isPCRel) { switch (Size) { - default: assert(0 && "Invalid generic fixup size!"); + default: llvm_unreachable("Invalid generic fixup size!"); case 1: return isPCRel ? FK_PCRel_1 : FK_Data_1; case 2: return isPCRel ? FK_PCRel_2 : FK_Data_2; case 4: return isPCRel ? FK_PCRel_4 : FK_Data_4; case 8: return isPCRel ? FK_PCRel_8 : FK_Data_8; } } + + SMLoc getLoc() const { return Loc; } }; } // End llvm namespace diff --git a/include/llvm/MC/MCInst.h b/include/llvm/MC/MCInst.h index a5e4632..397a37d 100644 --- a/include/llvm/MC/MCInst.h +++ b/include/llvm/MC/MCInst.h @@ -19,12 +19,14 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/SMLoc.h" namespace llvm { class raw_ostream; class MCAsmInfo; class MCInstPrinter; class MCExpr; +class MCInst; /// MCOperand - Instances of this class represent operands of the MCInst class. /// This is a simple discriminated union. @@ -34,7 +36,8 @@ class MCOperand { kRegister, ///< Register operand. kImmediate, ///< Immediate operand. kFPImmediate, ///< Floating-point immediate operand. - kExpr ///< Relocatable immediate operand. + kExpr, ///< Relocatable immediate operand. + kInst ///< Sub-instruction operand. }; unsigned char Kind; @@ -43,6 +46,7 @@ class MCOperand { int64_t ImmVal; double FPImmVal; const MCExpr *ExprVal; + const MCInst *InstVal; }; public: @@ -53,6 +57,7 @@ public: bool isImm() const { return Kind == kImmediate; } bool isFPImm() const { return Kind == kFPImmediate; } bool isExpr() const { return Kind == kExpr; } + bool isInst() const { return Kind == kInst; } /// getReg - Returns the register number. unsigned getReg() const { @@ -94,6 +99,15 @@ public: ExprVal = Val; } + const MCInst *getInst() const { + assert(isInst() && "This is not a sub-instruction"); + return InstVal; + } + void setInst(const MCInst *Val) { + assert(isInst() && "This is not a sub-instruction"); + InstVal = Val; + } + static MCOperand CreateReg(unsigned Reg) { MCOperand Op; Op.Kind = kRegister; @@ -118,6 +132,12 @@ public: Op.ExprVal = Val; return Op; } + static MCOperand CreateInst(const MCInst *Val) { + MCOperand Op; + Op.Kind = kInst; + Op.InstVal = Val; + return Op; + } void print(raw_ostream &OS, const MCAsmInfo *MAI) const; void dump() const; @@ -129,14 +149,17 @@ template <> struct isPodLike<MCOperand> { static const bool value = true; }; /// instruction. class MCInst { unsigned Opcode; + SMLoc Loc; SmallVector<MCOperand, 8> Operands; public: MCInst() : Opcode(0) {} void setOpcode(unsigned Op) { Opcode = Op; } - unsigned getOpcode() const { return Opcode; } + void setLoc(SMLoc loc) { Loc = loc; } + SMLoc getLoc() const { return Loc; } + const MCOperand &getOperand(unsigned i) const { return Operands[i]; } MCOperand &getOperand(unsigned i) { return Operands[i]; } unsigned getNumOperands() const { return Operands.size(); } diff --git a/include/llvm/MC/MCInstPrinter.h b/include/llvm/MC/MCInstPrinter.h index 01ad2d3..4d2adfa 100644 --- a/include/llvm/MC/MCInstPrinter.h +++ b/include/llvm/MC/MCInstPrinter.h @@ -14,6 +14,7 @@ namespace llvm { class MCInst; class raw_ostream; class MCAsmInfo; +class MCRegisterInfo; class StringRef; /// MCInstPrinter - This is an instance of a target assembly language printer @@ -25,6 +26,7 @@ protected: /// assembly emission is disable. raw_ostream *CommentStream; const MCAsmInfo &MAI; + const MCRegisterInfo &MRI; /// The current set of available features. unsigned AvailableFeatures; @@ -32,8 +34,8 @@ protected: /// Utility function for printing annotations. void printAnnotation(raw_ostream &OS, StringRef Annot); public: - MCInstPrinter(const MCAsmInfo &mai) - : CommentStream(0), MAI(mai), AvailableFeatures(0) {} + MCInstPrinter(const MCAsmInfo &mai, const MCRegisterInfo &mri) + : CommentStream(0), MAI(mai), MRI(mri), AvailableFeatures(0) {} virtual ~MCInstPrinter(); diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h index 6d71cf5..ca497b8 100644 --- a/include/llvm/MC/MCInstrDesc.h +++ b/include/llvm/MC/MCInstrDesc.h @@ -137,7 +137,6 @@ public: unsigned short NumDefs; // Num of args that are definitions unsigned short SchedClass; // enum identifying instr sched class unsigned short Size; // Number of bytes in encoding. - const char * Name; // Name of the instruction record in td file unsigned Flags; // Flags identifying machine instr class uint64_t TSFlags; // Target Specific Flag values const unsigned *ImplicitUses; // Registers implicitly read by this instr @@ -161,12 +160,6 @@ public: return Opcode; } - /// getName - Return the name of the record in the .td file for this - /// instruction, for example "ADD8ri". - const char *getName() const { - return Name; - } - /// getNumOperands - Return the number of declared MachineOperands for this /// MachineInstruction. Note that variadic (isVariadic() returns true) /// instructions may have additional operands at the end of the list, and note diff --git a/include/llvm/MC/MCInstrInfo.h b/include/llvm/MC/MCInstrInfo.h index a63e5fa..1d3a36c 100644 --- a/include/llvm/MC/MCInstrInfo.h +++ b/include/llvm/MC/MCInstrInfo.h @@ -24,14 +24,19 @@ namespace llvm { /// MCInstrInfo - Interface to description of machine instruction set /// class MCInstrInfo { - const MCInstrDesc *Desc; // Raw array to allow static init'n - unsigned NumOpcodes; // Number of entries in the desc array + const MCInstrDesc *Desc; // Raw array to allow static init'n + const unsigned *InstrNameIndices; // Array for name indices in InstrNameData + const char *InstrNameData; // Instruction name string pool + unsigned NumOpcodes; // Number of entries in the desc array public: /// InitMCInstrInfo - Initialize MCInstrInfo, called by TableGen /// auto-generated routines. *DO NOT USE*. - void InitMCInstrInfo(const MCInstrDesc *D, unsigned NO) { + void InitMCInstrInfo(const MCInstrDesc *D, const unsigned *NI, const char *ND, + unsigned NO) { Desc = D; + InstrNameIndices = NI; + InstrNameData = ND; NumOpcodes = NO; } @@ -44,6 +49,12 @@ public: assert(Opcode < NumOpcodes && "Invalid opcode!"); return Desc[Opcode]; } + + /// getName - Returns the name for the instructions with the given opcode. + const char *getName(unsigned Opcode) const { + assert(Opcode < NumOpcodes && "Invalid opcode!"); + return &InstrNameData[InstrNameIndices[Opcode]]; + } }; } // End llvm namespace diff --git a/include/llvm/MC/MCObjectFileInfo.h b/include/llvm/MC/MCObjectFileInfo.h index d91b11b..5d5801f 100644 --- a/include/llvm/MC/MCObjectFileInfo.h +++ b/include/llvm/MC/MCObjectFileInfo.h @@ -22,10 +22,6 @@ namespace llvm { class MCContext; class MCSection; class Triple; - - namespace Structors { - enum OutputOrder { None, PriorityOrder, ReversePriorityOrder }; - } class MCObjectFileInfo { protected: @@ -51,6 +47,8 @@ protected: unsigned FDEEncoding; unsigned FDECFIEncoding; unsigned TTypeEncoding; + // Section flags for eh_frame + unsigned EHSectionFlags; /// TextSection - Section directive for standard text. /// @@ -113,7 +111,7 @@ protected: const MCSection *TLSExtraDataSection; /// TLSDataSection - Section directive for Thread Local data. - /// ELF and MachO only. + /// ELF, MachO and COFF. const MCSection *TLSDataSection; // Defaults to ".tdata". /// TLSBSSSection - Section directive for Thread Local uninitialized data. @@ -167,11 +165,6 @@ protected: const MCSection *DrectveSection; const MCSection *PDataSection; const MCSection *XDataSection; - - /// StructorOutputOrder - Whether the static ctor/dtor list should be output - /// in no particular order, in order of increasing priority or the reverse: - /// in order of decreasing priority (the default). - Structors::OutputOrder StructorOutputOrder; // Default is reverse order. public: void InitMCObjectFileInfo(StringRef TT, Reloc::Model RM, CodeModel::Model CM, @@ -197,8 +190,6 @@ public: const MCSection *getTextSection() const { return TextSection; } const MCSection *getDataSection() const { return DataSection; } const MCSection *getBSSSection() const { return BSSSection; } - const MCSection *getStaticCtorSection() const { return StaticCtorSection; } - const MCSection *getStaticDtorSection() const { return StaticDtorSection; } const MCSection *getLSDASection() const { return LSDASection; } const MCSection *getCompactUnwindSection() const{ return CompactUnwindSection; @@ -300,10 +291,6 @@ public: return EHFrameSection; } - Structors::OutputOrder getStructorOutputOrder() const { - return StructorOutputOrder; - } - private: enum Environment { IsMachO, IsELF, IsCOFF }; Environment Env; diff --git a/include/llvm/MC/MCObjectStreamer.h b/include/llvm/MC/MCObjectStreamer.h index 01d254a..a69075d 100644 --- a/include/llvm/MC/MCObjectStreamer.h +++ b/include/llvm/MC/MCObjectStreamer.h @@ -34,6 +34,8 @@ class MCObjectStreamer : public MCStreamer { MCSectionData *CurSectionData; virtual void EmitInstToData(const MCInst &Inst) = 0; + virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame); + virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame); protected: MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB, @@ -70,7 +72,7 @@ public: virtual void ChangeSection(const MCSection *Section); virtual void EmitInstruction(const MCInst &Inst); virtual void EmitInstToFragment(const MCInst &Inst); - virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value); + virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value); virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel, const MCSymbol *Label, @@ -78,7 +80,7 @@ public: virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel, const MCSymbol *Label); virtual void EmitGPRel32Value(const MCExpr *Value); - virtual void Finish(); + virtual void FinishImpl(); /// @} }; diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h index 9bd565f..f6a566a 100644 --- a/include/llvm/MC/MCObjectWriter.h +++ b/include/llvm/MC/MCObjectWriter.h @@ -192,8 +192,6 @@ public: unsigned Padding = 0); }; -MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit); - } // End llvm namespace #endif diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h index 039deaa..793c709 100644 --- a/include/llvm/MC/MCParser/MCAsmParser.h +++ b/include/llvm/MC/MCParser/MCAsmParser.h @@ -64,6 +64,9 @@ public: MCTargetAsmParser &getTargetParser() const { return *TargetParser; } void setTargetParser(MCTargetAsmParser &P); + virtual unsigned getAssemblerDialect() { return 0;} + virtual void setAssemblerDialect(unsigned i) { } + bool getShowParsedOperands() const { return ShowParsedOperands; } void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; } diff --git a/include/llvm/MC/MCRegisterInfo.h b/include/llvm/MC/MCRegisterInfo.h index 6f314aa..762558d 100644 --- a/include/llvm/MC/MCRegisterInfo.h +++ b/include/llvm/MC/MCRegisterInfo.h @@ -17,6 +17,7 @@ #define LLVM_MC_MCREGISTERINFO_H #include "llvm/ADT/DenseMap.h" +#include "llvm/Support/ErrorHandling.h" #include <cassert> namespace llvm { @@ -24,28 +25,18 @@ namespace llvm { /// MCRegisterClass - Base class of TargetRegisterClass. class MCRegisterClass { public: - typedef const unsigned* iterator; - typedef const unsigned* const_iterator; -private: - unsigned ID; + typedef const uint16_t* iterator; + typedef const uint16_t* const_iterator; + const char *Name; - const unsigned RegSize, Alignment; // Size & Alignment of register in bytes - const int CopyCost; + const iterator RegsBegin; + const uint8_t *const RegSet; + const uint8_t RegsSize; + const uint8_t RegSetSize; + const uint8_t ID; + const uint8_t RegSize, Alignment; // Size & Alignment of register in bytes + const int8_t CopyCost; const bool Allocatable; - const iterator RegsBegin, RegsEnd; - const unsigned char *const RegSet; - const unsigned RegSetSize; -public: - MCRegisterClass(unsigned id, const char *name, - unsigned RS, unsigned Al, int CC, bool Allocable, - iterator RB, iterator RE, const unsigned char *Bits, - unsigned NumBytes) - : ID(id), Name(name), RegSize(RS), Alignment(Al), CopyCost(CC), - Allocatable(Allocable), RegsBegin(RB), RegsEnd(RE), RegSet(Bits), - RegSetSize(NumBytes) { - for (iterator i = RegsBegin; i != RegsEnd; ++i) - assert(contains(*i) && "Bit field corrupted."); - } /// getID() - Return the register class ID number. /// @@ -58,11 +49,11 @@ public: /// begin/end - Return all of the registers in this class. /// iterator begin() const { return RegsBegin; } - iterator end() const { return RegsEnd; } + iterator end() const { return RegsBegin + RegsSize; } /// getNumRegs - Return the number of registers in this class. /// - unsigned getNumRegs() const { return (unsigned)(RegsEnd-RegsBegin); } + unsigned getNumRegs() const { return RegsSize; } /// getRegister - Return the specified register in the class. /// @@ -115,10 +106,10 @@ public: /// of AX. /// struct MCRegisterDesc { - const char *Name; // Printable name for the reg (for debugging) - const unsigned *Overlaps; // Overlapping registers, described above - const unsigned *SubRegs; // Sub-register set, described above - const unsigned *SuperRegs; // Super-register set, described above + const char *Name; // Printable name for the reg (for debugging) + uint16_t Overlaps; // Overlapping registers, described above + uint16_t SubRegs; // Sub-register set, described above + uint16_t SuperRegs; // Super-register set, described above }; /// MCRegisterInfo base class - We assume that the target defines a static @@ -142,6 +133,12 @@ private: unsigned RAReg; // Return address register const MCRegisterClass *Classes; // Pointer to the regclass array unsigned NumClasses; // Number of entries in the array + const uint16_t *Overlaps; // Pointer to the overlaps array + const uint16_t *SubRegs; // Pointer to the subregs array + const uint16_t *SuperRegs; // Pointer to the superregs array + const uint16_t *SubRegIndices; // Pointer to the subreg lookup + // array. + unsigned NumSubRegIndices; // Number of subreg indices. DenseMap<unsigned, int> L2DwarfRegs; // LLVM to Dwarf regs mapping DenseMap<unsigned, int> EHL2DwarfRegs; // LLVM to Dwarf regs mapping EH DenseMap<unsigned, unsigned> Dwarf2LRegs; // Dwarf to LLVM regs mapping @@ -152,12 +149,21 @@ public: /// InitMCRegisterInfo - Initialize MCRegisterInfo, called by TableGen /// auto-generated routines. *DO NOT USE*. void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA, - const MCRegisterClass *C, unsigned NC) { + const MCRegisterClass *C, unsigned NC, + const uint16_t *O, const uint16_t *Sub, + const uint16_t *Super, + const uint16_t *SubIndices, + unsigned NumIndices) { Desc = D; NumRegs = NR; RAReg = RA; Classes = C; + Overlaps = O; + SubRegs = Sub; + SuperRegs = Super; NumClasses = NC; + SubRegIndices = SubIndices; + NumSubRegIndices = NumIndices; } /// mapLLVMRegToDwarfReg - Used to initialize LLVM register to Dwarf @@ -212,9 +218,9 @@ public: /// register, or a null list of there are none. The list returned is zero /// terminated. /// - const unsigned *getAliasSet(unsigned RegNo) const { + const uint16_t *getAliasSet(unsigned RegNo) const { // The Overlaps set always begins with Reg itself. - return get(RegNo).Overlaps + 1; + return Overlaps + get(RegNo).Overlaps + 1; } /// getOverlaps - Return a list of registers that overlap Reg, including @@ -222,8 +228,8 @@ public: /// list. /// These are exactly the registers in { x | regsOverlap(x, Reg) }. /// - const unsigned *getOverlaps(unsigned RegNo) const { - return get(RegNo).Overlaps; + const uint16_t *getOverlaps(unsigned RegNo) const { + return Overlaps + get(RegNo).Overlaps; } /// getSubRegisters - Return the list of registers that are sub-registers of @@ -231,8 +237,35 @@ public: /// returned is zero terminated and sorted according to super-sub register /// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH. /// - const unsigned *getSubRegisters(unsigned RegNo) const { - return get(RegNo).SubRegs; + const uint16_t *getSubRegisters(unsigned RegNo) const { + return SubRegs + get(RegNo).SubRegs; + } + + /// getSubReg - Returns the physical register number of sub-register "Index" + /// for physical register RegNo. Return zero if the sub-register does not + /// exist. + unsigned getSubReg(unsigned Reg, unsigned Idx) const { + return *(SubRegIndices + (Reg - 1) * NumSubRegIndices + Idx - 1); + } + + /// getMatchingSuperReg - Return a super-register of the specified register + /// Reg so its sub-register of index SubIdx is Reg. + unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, + const MCRegisterClass *RC) const { + for (const uint16_t *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs) + if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR)) + return SR; + return 0; + } + + /// getSubRegIndex - For a given register pair, return the sub-register index + /// if the second register is a sub-register of the first. Return zero + /// otherwise. + unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const { + for (unsigned I = 1; I <= NumSubRegIndices; ++I) + if (getSubReg(RegNo, I) == SubRegNo) + return I; + return 0; } /// getSuperRegisters - Return the list of registers that are super-registers @@ -240,8 +273,8 @@ public: /// returned is zero terminated and sorted according to super-sub register /// relations. e.g. X86::AL's super-register list is AX, EAX, RAX. /// - const unsigned *getSuperRegisters(unsigned RegNo) const { - return get(RegNo).SuperRegs; + const uint16_t *getSuperRegisters(unsigned RegNo) const { + return SuperRegs + get(RegNo).SuperRegs; } /// getName - Return the human-readable symbolic target-specific name for the @@ -273,8 +306,7 @@ public: const DenseMap<unsigned, unsigned> &M = isEH ? EHDwarf2LRegs : Dwarf2LRegs; const DenseMap<unsigned, unsigned>::const_iterator I = M.find(RegNum); if (I == M.end()) { - assert(0 && "Invalid RegNum"); - return -1; + llvm_unreachable("Invalid RegNum"); } return I->second; } diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h index 1c1fe02..9932306 100644 --- a/include/llvm/MC/MCStreamer.h +++ b/include/llvm/MC/MCStreamer.h @@ -94,6 +94,10 @@ namespace llvm { const MCExpr *ForceExpAbs(const MCExpr* Expr); + void RecordProcStart(MCDwarfFrameInfo &Frame); + virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame); + void RecordProcEnd(MCDwarfFrameInfo &Frame); + virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame); void EmitFrames(bool usingCFI); MCWin64EHUnwindInfo *getCurrentW64UnwindInfo(){return CurrentW64UnwindInfo;} @@ -334,6 +338,11 @@ namespace llvm { /// EndCOFFSymbolDef - Marks the end of the symbol definition. virtual void EndCOFFSymbolDef() = 0; + /// EmitCOFFSecRel32 - Emits a COFF section relative relocation. + /// + /// @param Symbol - Symbol the section relative realocation should point to. + virtual void EmitCOFFSecRel32(MCSymbol const *Symbol); + /// EmitELFSize - Emit an ELF .size directive. /// /// This corresponds to an assembler statement such as: @@ -432,6 +441,13 @@ namespace llvm { void EmitSymbolValue(const MCSymbol *Sym, unsigned Size, unsigned AddrSpace = 0); + /// EmitGPRel64Value - Emit the expression @p Value into the output as a + /// gprel64 (64-bit GP relative) value. + /// + /// This is used to implement assembler directives such as .gpdword on + /// targets that support them. + virtual void EmitGPRel64Value(const MCExpr *Value); + /// EmitGPRel32Value - Emit the expression @p Value into the output as a /// gprel32 (32-bit GP relative) value. /// @@ -494,7 +510,8 @@ namespace llvm { /// @param Offset - The offset to reach. This may be an expression, but the /// expression must be associated with the current section. /// @param Value - The value to use when filling bytes. - virtual void EmitValueToOffset(const MCExpr *Offset, + /// @return false on success, true if the offset was invalid. + virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value = 0) = 0; /// @} @@ -531,8 +548,8 @@ namespace llvm { virtual void EmitCompactUnwindEncoding(uint32_t CompactUnwindEncoding); virtual void EmitCFISections(bool EH, bool Debug); - virtual void EmitCFIStartProc(); - virtual void EmitCFIEndProc(); + void EmitCFIStartProc(); + void EmitCFIEndProc(); virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset); virtual void EmitCFIDefCfaOffset(int64_t Offset); virtual void EmitCFIDefCfaRegister(int64_t Register); @@ -542,8 +559,11 @@ namespace llvm { virtual void EmitCFIRememberState(); virtual void EmitCFIRestoreState(); virtual void EmitCFISameValue(int64_t Register); + virtual void EmitCFIRestore(int64_t Register); virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset); virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment); + virtual void EmitCFIEscape(StringRef Values); + virtual void EmitCFISignalFrame(); virtual void EmitWin64EHStartProc(const MCSymbol *Symbol); virtual void EmitWin64EHEndProc(); @@ -583,8 +603,10 @@ namespace llvm { virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList, bool isVector); + /// FinishImpl - Streamer specific finalization. + virtual void FinishImpl() = 0; /// Finish - Finish emission of machine code. - virtual void Finish() = 0; + void Finish(); }; /// createNullStreamer - Create a dummy machine code streamer, which does @@ -644,12 +666,6 @@ namespace llvm { raw_ostream &OS, MCCodeEmitter *CE, bool RelaxAll, bool NoExecStack); - /// createLoggingStreamer - Create a machine code streamer which just logs the - /// API calls and then dispatches to another streamer. - /// - /// The new streamer takes ownership of the \arg Child. - MCStreamer *createLoggingStreamer(MCStreamer *Child, raw_ostream &OS); - /// createPureStreamer - Create a machine code streamer which will generate /// "pure" MC object files, for use with MC-JIT and testing tools. /// diff --git a/include/llvm/MC/MCWinCOFFObjectWriter.h b/include/llvm/MC/MCWinCOFFObjectWriter.h new file mode 100644 index 0000000..7a0b1ff --- /dev/null +++ b/include/llvm/MC/MCWinCOFFObjectWriter.h @@ -0,0 +1,36 @@ +//===-- llvm/MC/MCWinCOFFObjectWriter.h - Win COFF Object Writer *- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_MC_MCWINCOFFOBJECTWRITER_H +#define LLVM_MC_MCWINCOFFOBJECTWRITER_H + +namespace llvm { + class MCWinCOFFObjectTargetWriter { + const unsigned Machine; + + protected: + MCWinCOFFObjectTargetWriter(unsigned Machine_); + + public: + virtual ~MCWinCOFFObjectTargetWriter() {} + + unsigned getMachine() const { return Machine; } + virtual unsigned getRelocType(unsigned FixupKind) const = 0; + }; + + /// \brief Construct a new Win COFF writer instance. + /// + /// \param MOTW - The target specific WinCOFF writer subclass. + /// \param OS - The stream to write to. + /// \returns The constructed object writer. + MCObjectWriter *createWinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW, + raw_ostream &OS); +} // End llvm namespace + +#endif diff --git a/include/llvm/Metadata.h b/include/llvm/Metadata.h index 59b4b26..29458ef 100644 --- a/include/llvm/Metadata.h +++ b/include/llvm/Metadata.h @@ -36,6 +36,7 @@ template<typename ValueSubClass, typename ItemParentClass> /// These are used to efficiently contain a byte sequence for metadata. /// MDString is always unnamed. class MDString : public Value { + virtual void anchor(); MDString(const MDString &); // DO NOT IMPLEMENT StringRef Str; @@ -134,6 +135,9 @@ public: /// deleteTemporary - Deallocate a node created by getTemporary. The /// node must not have any users. static void deleteTemporary(MDNode *N); + + /// replaceOperandWith - Replace a specific operand. + void replaceOperandWith(unsigned i, Value *NewVal); /// getOperand - Return specified operand. Value *getOperand(unsigned i) const; diff --git a/include/llvm/Module.h b/include/llvm/Module.h index 8ce5ec4..4ca8fde 100644 --- a/include/llvm/Module.h +++ b/include/llvm/Module.h @@ -154,6 +154,39 @@ public: /// An enumeration for describing the size of a pointer on the target machine. enum PointerSize { AnyPointerSize, Pointer32, Pointer64 }; + /// An enumeration for the supported behaviors of module flags. The following + /// module flags behavior values are supported: + /// + /// Value Behavior + /// ----- -------- + /// 1 Error + /// Emits an error if two values disagree. + /// + /// 2 Warning + /// Emits a warning if two values disagree. + /// + /// 3 Require + /// Emits an error when the specified value is not present + /// or doesn't have the specified value. It is an error for + /// two (or more) llvm.module.flags with the same ID to have + /// the Require behavior but different values. There may be + /// multiple Require flags per ID. + /// + /// 4 Override + /// Uses the specified value if the two values disagree. It + /// is an error for two (or more) llvm.module.flags with the + /// same ID to have the Override behavior but different + /// values. + enum ModFlagBehavior { Error = 1, Warning = 2, Require = 3, Override = 4 }; + + struct ModuleFlagEntry { + ModFlagBehavior Behavior; + MDString *Key; + Value *Val; + ModuleFlagEntry(ModFlagBehavior B, MDString *K, Value *V) + : Behavior(B), Key(K), Val(V) {} + }; + /// @} /// @name Member Variables /// @{ @@ -373,6 +406,30 @@ public: void eraseNamedMetadata(NamedMDNode *NMD); /// @} +/// @name Module Flags Accessors +/// @{ + + /// getModuleFlagsMetadata - Returns the module flags in the provided vector. + void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const; + + /// getModuleFlagsMetadata - Returns the NamedMDNode in the module that + /// represents module-level flags. This method returns null if there are no + /// module-level flags. + NamedMDNode *getModuleFlagsMetadata() const; + + /// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module + /// that represents module-level flags. If module-level flags aren't found, + /// it creates the named metadata that contains them. + NamedMDNode *getOrInsertModuleFlagsMetadata(); + + /// addModuleFlag - Add a module-level flag to the module-level flags + /// metadata. It will create the module-level flags named metadata if it + /// doesn't already exist. + void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Value *Val); + void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val); + void addModuleFlag(MDNode *Node); + +/// @} /// @name Materialization /// @{ diff --git a/include/llvm/Object/Archive.h b/include/llvm/Object/Archive.h index 9e1369a..e6df856 100644 --- a/include/llvm/Object/Archive.h +++ b/include/llvm/Object/Archive.h @@ -22,6 +22,7 @@ namespace llvm { namespace object { class Archive : public Binary { + virtual void anchor(); public: class Child { const Archive *Parent; diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h index 73bde8e..4f90187 100644 --- a/include/llvm/Object/COFF.h +++ b/include/llvm/Object/COFF.h @@ -109,11 +109,8 @@ protected: virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const; virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const; virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const; - virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const; - virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const; - virtual error_code isSymbolWeak(DataRefImpl Symb, bool &Res) const; + virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const; virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; - virtual error_code isSymbolAbsolute(DataRefImpl Symb, bool &Res) const; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const; @@ -148,16 +145,26 @@ protected: virtual error_code getRelocationValueString(DataRefImpl Rel, SmallVectorImpl<char> &Result) const; + virtual error_code getLibraryNext(DataRefImpl LibData, + LibraryRef &Result) const; + virtual error_code getLibraryPath(DataRefImpl LibData, + StringRef &Result) const; + public: COFFObjectFile(MemoryBuffer *Object, error_code &ec); virtual symbol_iterator begin_symbols() const; virtual symbol_iterator end_symbols() const; + virtual symbol_iterator begin_dynamic_symbols() const; + virtual symbol_iterator end_dynamic_symbols() const; + virtual library_iterator begin_libraries_needed() const; + virtual library_iterator end_libraries_needed() const; virtual section_iterator begin_sections() const; virtual section_iterator end_sections() const; virtual uint8_t getBytesInAddress() const; virtual StringRef getFileFormatName() const; virtual unsigned getArch() const; + virtual StringRef getLoadName() const; error_code getHeader(const coff_file_header *&Res) const; error_code getSection(int32_t index, const coff_section *&Res) const; diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h new file mode 100644 index 0000000..e746b0a --- /dev/null +++ b/include/llvm/Object/ELF.h @@ -0,0 +1,1827 @@ +//===- ELF.h - ELF object file implementation -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the ELFObjectFile template class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_OBJECT_ELF_H +#define LLVM_OBJECT_ELF_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/Triple.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Object/ObjectFile.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ELF.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <limits> +#include <utility> + +namespace llvm { +namespace object { + +// Templates to choose Elf_Addr and Elf_Off depending on is64Bits. +template<support::endianness target_endianness> +struct ELFDataTypeTypedefHelperCommon { + typedef support::detail::packed_endian_specific_integral + <uint16_t, target_endianness, support::aligned> Elf_Half; + typedef support::detail::packed_endian_specific_integral + <uint32_t, target_endianness, support::aligned> Elf_Word; + typedef support::detail::packed_endian_specific_integral + <int32_t, target_endianness, support::aligned> Elf_Sword; + typedef support::detail::packed_endian_specific_integral + <uint64_t, target_endianness, support::aligned> Elf_Xword; + typedef support::detail::packed_endian_specific_integral + <int64_t, target_endianness, support::aligned> Elf_Sxword; +}; + +template<support::endianness target_endianness, bool is64Bits> +struct ELFDataTypeTypedefHelper; + +/// ELF 32bit types. +template<support::endianness target_endianness> +struct ELFDataTypeTypedefHelper<target_endianness, false> + : ELFDataTypeTypedefHelperCommon<target_endianness> { + typedef uint32_t value_type; + typedef support::detail::packed_endian_specific_integral + <value_type, target_endianness, support::aligned> Elf_Addr; + typedef support::detail::packed_endian_specific_integral + <value_type, target_endianness, support::aligned> Elf_Off; +}; + +/// ELF 64bit types. +template<support::endianness target_endianness> +struct ELFDataTypeTypedefHelper<target_endianness, true> + : ELFDataTypeTypedefHelperCommon<target_endianness>{ + typedef uint64_t value_type; + typedef support::detail::packed_endian_specific_integral + <value_type, target_endianness, support::aligned> Elf_Addr; + typedef support::detail::packed_endian_specific_integral + <value_type, target_endianness, support::aligned> Elf_Off; +}; + +// I really don't like doing this, but the alternative is copypasta. +#define LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Addr Elf_Addr; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Off Elf_Off; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Half Elf_Half; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Word Elf_Word; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sword Elf_Sword; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Xword Elf_Xword; \ +typedef typename \ + ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sxword Elf_Sxword; + + // Section header. +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Shdr_Base; + +template<support::endianness target_endianness> +struct Elf_Shdr_Base<target_endianness, false> { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Word sh_name; // Section name (index into string table) + Elf_Word sh_type; // Section type (SHT_*) + Elf_Word sh_flags; // Section flags (SHF_*) + Elf_Addr sh_addr; // Address where section is to be loaded + Elf_Off sh_offset; // File offset of section data, in bytes + Elf_Word sh_size; // Size of section, in bytes + Elf_Word sh_link; // Section type-specific header table index link + Elf_Word sh_info; // Section type-specific extra information + Elf_Word sh_addralign;// Section address alignment + Elf_Word sh_entsize; // Size of records contained within the section +}; + +template<support::endianness target_endianness> +struct Elf_Shdr_Base<target_endianness, true> { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Word sh_name; // Section name (index into string table) + Elf_Word sh_type; // Section type (SHT_*) + Elf_Xword sh_flags; // Section flags (SHF_*) + Elf_Addr sh_addr; // Address where section is to be loaded + Elf_Off sh_offset; // File offset of section data, in bytes + Elf_Xword sh_size; // Size of section, in bytes + Elf_Word sh_link; // Section type-specific header table index link + Elf_Word sh_info; // Section type-specific extra information + Elf_Xword sh_addralign;// Section address alignment + Elf_Xword sh_entsize; // Size of records contained within the section +}; + +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Shdr_Impl : Elf_Shdr_Base<target_endianness, is64Bits> { + using Elf_Shdr_Base<target_endianness, is64Bits>::sh_entsize; + using Elf_Shdr_Base<target_endianness, is64Bits>::sh_size; + + /// @brief Get the number of entities this section contains if it has any. + unsigned getEntityCount() const { + if (sh_entsize == 0) + return 0; + return sh_size / sh_entsize; + } +}; + +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Sym_Base; + +template<support::endianness target_endianness> +struct Elf_Sym_Base<target_endianness, false> { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Word st_name; // Symbol name (index into string table) + Elf_Addr st_value; // Value or address associated with the symbol + Elf_Word st_size; // Size of the symbol + unsigned char st_info; // Symbol's type and binding attributes + unsigned char st_other; // Must be zero; reserved + Elf_Half st_shndx; // Which section (header table index) it's defined in +}; + +template<support::endianness target_endianness> +struct Elf_Sym_Base<target_endianness, true> { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Word st_name; // Symbol name (index into string table) + unsigned char st_info; // Symbol's type and binding attributes + unsigned char st_other; // Must be zero; reserved + Elf_Half st_shndx; // Which section (header table index) it's defined in + Elf_Addr st_value; // Value or address associated with the symbol + Elf_Xword st_size; // Size of the symbol +}; + +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Sym_Impl : Elf_Sym_Base<target_endianness, is64Bits> { + using Elf_Sym_Base<target_endianness, is64Bits>::st_info; + + // These accessors and mutators correspond to the ELF32_ST_BIND, + // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification: + unsigned char getBinding() const { return st_info >> 4; } + unsigned char getType() const { return st_info & 0x0f; } + void setBinding(unsigned char b) { setBindingAndType(b, getType()); } + void setType(unsigned char t) { setBindingAndType(getBinding(), t); } + void setBindingAndType(unsigned char b, unsigned char t) { + st_info = (b << 4) + (t & 0x0f); + } +}; + +// Elf_Dyn: Entry in the dynamic table +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Dyn_Base; + +template<support::endianness target_endianness> +struct Elf_Dyn_Base<target_endianness, false> { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Sword d_tag; + union { + Elf_Word d_val; + Elf_Addr d_ptr; + } d_un; +}; + +template<support::endianness target_endianness> +struct Elf_Dyn_Base<target_endianness, true> { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Sxword d_tag; + union { + Elf_Xword d_val; + Elf_Addr d_ptr; + } d_un; +}; + +template<support::endianness target_endianness, bool is64Bits> +struct Elf_Dyn_Impl : Elf_Dyn_Base<target_endianness, is64Bits> { + using Elf_Dyn_Base<target_endianness, is64Bits>::d_tag; + using Elf_Dyn_Base<target_endianness, is64Bits>::d_un; + int64_t getTag() const { return d_tag; } + uint64_t getVal() const { return d_un.d_val; } + uint64_t getPtr() const { return d_un.ptr; } +}; + +template<support::endianness target_endianness, bool is64Bits> +class ELFObjectFile; + +// DynRefImpl: Reference to an entry in the dynamic table +// This is an ELF-specific interface. +template<support::endianness target_endianness, bool is64Bits> +class DynRefImpl { + typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn; + typedef ELFObjectFile<target_endianness, is64Bits> OwningType; + + DataRefImpl DynPimpl; + const OwningType *OwningObject; + +public: + DynRefImpl() : OwningObject(NULL) { + std::memset(&DynPimpl, 0, sizeof(DynPimpl)); + } + + DynRefImpl(DataRefImpl DynP, const OwningType *Owner); + + bool operator==(const DynRefImpl &Other) const; + bool operator <(const DynRefImpl &Other) const; + + error_code getNext(DynRefImpl &Result) const; + int64_t getTag() const; + uint64_t getVal() const; + uint64_t getPtr() const; + + DataRefImpl getRawDataRefImpl() const; +}; + +// Elf_Rel: Elf Relocation +template<support::endianness target_endianness, bool is64Bits, bool isRela> +struct Elf_Rel_Base; + +template<support::endianness target_endianness> +struct Elf_Rel_Base<target_endianness, false, false> { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf_Word r_info; // Symbol table index and type of relocation to apply +}; + +template<support::endianness target_endianness> +struct Elf_Rel_Base<target_endianness, true, false> { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf_Xword r_info; // Symbol table index and type of relocation to apply +}; + +template<support::endianness target_endianness> +struct Elf_Rel_Base<target_endianness, false, true> { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf_Word r_info; // Symbol table index and type of relocation to apply + Elf_Sword r_addend; // Compute value for relocatable field by adding this +}; + +template<support::endianness target_endianness> +struct Elf_Rel_Base<target_endianness, true, true> { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Addr r_offset; // Location (file byte offset, or program virtual addr) + Elf_Xword r_info; // Symbol table index and type of relocation to apply + Elf_Sxword r_addend; // Compute value for relocatable field by adding this. +}; + +template<support::endianness target_endianness, bool is64Bits, bool isRela> +struct Elf_Rel_Impl; + +template<support::endianness target_endianness, bool isRela> +struct Elf_Rel_Impl<target_endianness, true, isRela> + : Elf_Rel_Base<target_endianness, true, isRela> { + using Elf_Rel_Base<target_endianness, true, isRela>::r_info; + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + + // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE, + // and ELF64_R_INFO macros defined in the ELF specification: + uint64_t getSymbol() const { return (r_info >> 32); } + unsigned char getType() const { + return (unsigned char) (r_info & 0xffffffffL); + } + void setSymbol(uint64_t s) { setSymbolAndType(s, getType()); } + void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); } + void setSymbolAndType(uint64_t s, unsigned char t) { + r_info = (s << 32) + (t&0xffffffffL); + } +}; + +template<support::endianness target_endianness, bool isRela> +struct Elf_Rel_Impl<target_endianness, false, isRela> + : Elf_Rel_Base<target_endianness, false, isRela> { + using Elf_Rel_Base<target_endianness, false, isRela>::r_info; + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + + // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE, + // and ELF32_R_INFO macros defined in the ELF specification: + uint32_t getSymbol() const { return (r_info >> 8); } + unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); } + void setSymbol(uint32_t s) { setSymbolAndType(s, getType()); } + void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); } + void setSymbolAndType(uint32_t s, unsigned char t) { + r_info = (s << 8) + t; + } +}; + + +template<support::endianness target_endianness, bool is64Bits> +class ELFObjectFile : public ObjectFile { + LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) + + typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr; + typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym; + typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn; + typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel; + typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela; + typedef DynRefImpl<target_endianness, is64Bits> DynRef; + typedef content_iterator<DynRef> dyn_iterator; + +protected: + struct Elf_Ehdr { + unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes + Elf_Half e_type; // Type of file (see ET_*) + Elf_Half e_machine; // Required architecture for this file (see EM_*) + Elf_Word e_version; // Must be equal to 1 + Elf_Addr e_entry; // Address to jump to in order to start program + Elf_Off e_phoff; // Program header table's file offset, in bytes + Elf_Off e_shoff; // Section header table's file offset, in bytes + Elf_Word e_flags; // Processor-specific flags + Elf_Half e_ehsize; // Size of ELF header, in bytes + Elf_Half e_phentsize;// Size of an entry in the program header table + Elf_Half e_phnum; // Number of entries in the program header table + Elf_Half e_shentsize;// Size of an entry in the section header table + Elf_Half e_shnum; // Number of entries in the section header table + Elf_Half e_shstrndx; // Section header table index of section name + // string table + bool checkMagic() const { + return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0; + } + unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; } + unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; } + }; + // This flag is used for classof, to distinguish ELFObjectFile from + // its subclass. If more subclasses will be created, this flag will + // have to become an enum. + bool isDyldELFObject; + +private: + typedef SmallVector<const Elf_Shdr*, 1> Sections_t; + typedef DenseMap<unsigned, unsigned> IndexMap_t; + typedef DenseMap<const Elf_Shdr*, SmallVector<uint32_t, 1> > RelocMap_t; + + const Elf_Ehdr *Header; + const Elf_Shdr *SectionHeaderTable; + const Elf_Shdr *dot_shstrtab_sec; // Section header string table. + const Elf_Shdr *dot_strtab_sec; // Symbol header string table. + const Elf_Shdr *dot_dynstr_sec; // Dynamic symbol string table. + Sections_t SymbolTableSections; + IndexMap_t SymbolTableSectionsIndexMap; + DenseMap<const Elf_Sym*, ELF::Elf64_Word> ExtendedSymbolTable; + + const Elf_Shdr *dot_dynamic_sec; // .dynamic + // Pointer to SONAME entry in dynamic string table + // This is set the first time getLoadName is called. + mutable const char *dt_soname; + + /// @brief Map sections to an array of relocation sections that reference + /// them sorted by section index. + RelocMap_t SectionRelocMap; + + /// @brief Get the relocation section that contains \a Rel. + const Elf_Shdr *getRelSection(DataRefImpl Rel) const { + return getSection(Rel.w.b); + } + + bool isRelocationHasAddend(DataRefImpl Rel) const; + template<typename T> + const T *getEntry(uint16_t Section, uint32_t Entry) const; + template<typename T> + const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const; + const Elf_Shdr *getSection(DataRefImpl index) const; + const Elf_Shdr *getSection(uint32_t index) const; + const Elf_Rel *getRel(DataRefImpl Rel) const; + const Elf_Rela *getRela(DataRefImpl Rela) const; + const char *getString(uint32_t section, uint32_t offset) const; + const char *getString(const Elf_Shdr *section, uint32_t offset) const; + error_code getSymbolName(const Elf_Shdr *section, + const Elf_Sym *Symb, + StringRef &Res) const; + void VerifyStrTab(const Elf_Shdr *sh) const; + +protected: + const Elf_Sym *getSymbol(DataRefImpl Symb) const; // FIXME: Should be private? + void validateSymbol(DataRefImpl Symb) const; + +public: + const Elf_Dyn *getDyn(DataRefImpl DynData) const; + +protected: + virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const; + virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const; + virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const; + virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const; + virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const; + virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const; + virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const; + virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; + virtual error_code getSymbolSection(DataRefImpl Symb, + section_iterator &Res) const; + + friend class DynRefImpl<target_endianness, is64Bits>; + virtual error_code getDynNext(DataRefImpl DynData, DynRef &Result) const; + + virtual error_code getLibraryNext(DataRefImpl Data, LibraryRef &Result) const; + virtual error_code getLibraryPath(DataRefImpl Data, StringRef &Res) const; + + virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const; + virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const; + virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const; + virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const; + virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res) const; + virtual error_code getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const; + virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const; + virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const; + virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const; + virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, + bool &Result) const; + virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const; + virtual relocation_iterator getSectionRelEnd(DataRefImpl Sec) const; + + virtual error_code getRelocationNext(DataRefImpl Rel, + RelocationRef &Res) const; + virtual error_code getRelocationAddress(DataRefImpl Rel, + uint64_t &Res) const; + virtual error_code getRelocationOffset(DataRefImpl Rel, + uint64_t &Res) const; + virtual error_code getRelocationSymbol(DataRefImpl Rel, + SymbolRef &Res) const; + virtual error_code getRelocationType(DataRefImpl Rel, + uint64_t &Res) const; + virtual error_code getRelocationTypeName(DataRefImpl Rel, + SmallVectorImpl<char> &Result) const; + virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel, + int64_t &Res) const; + virtual error_code getRelocationValueString(DataRefImpl Rel, + SmallVectorImpl<char> &Result) const; + +public: + ELFObjectFile(MemoryBuffer *Object, error_code &ec); + virtual symbol_iterator begin_symbols() const; + virtual symbol_iterator end_symbols() const; + + virtual symbol_iterator begin_dynamic_symbols() const; + virtual symbol_iterator end_dynamic_symbols() const; + + virtual section_iterator begin_sections() const; + virtual section_iterator end_sections() const; + + virtual library_iterator begin_libraries_needed() const; + virtual library_iterator end_libraries_needed() const; + + virtual dyn_iterator begin_dynamic_table() const; + virtual dyn_iterator end_dynamic_table() const; + + virtual uint8_t getBytesInAddress() const; + virtual StringRef getFileFormatName() const; + virtual unsigned getArch() const; + virtual StringRef getLoadName() const; + + uint64_t getNumSections() const; + uint64_t getStringTableIndex() const; + ELF::Elf64_Word getSymbolTableIndex(const Elf_Sym *symb) const; + const Elf_Shdr *getSection(const Elf_Sym *symb) const; + + // Methods for type inquiry through isa, cast, and dyn_cast + bool isDyldType() const { return isDyldELFObject; } + static inline bool classof(const Binary *v) { + return v->getType() == Binary::isELF; + } + static inline bool classof(const ELFObjectFile *v) { return true; } +}; + +template<support::endianness target_endianness, bool is64Bits> +void ELFObjectFile<target_endianness, is64Bits> + ::validateSymbol(DataRefImpl Symb) const { + const Elf_Sym *symb = getSymbol(Symb); + const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b]; + // FIXME: We really need to do proper error handling in the case of an invalid + // input file. Because we don't use exceptions, I think we'll just pass + // an error object around. + if (!( symb + && SymbolTableSection + && symb >= (const Elf_Sym*)(base() + + SymbolTableSection->sh_offset) + && symb < (const Elf_Sym*)(base() + + SymbolTableSection->sh_offset + + SymbolTableSection->sh_size))) + // FIXME: Proper error handling. + report_fatal_error("Symb must point to a valid symbol!"); +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolNext(DataRefImpl Symb, + SymbolRef &Result) const { + validateSymbol(Symb); + const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b]; + + ++Symb.d.a; + // Check to see if we are at the end of this symbol table. + if (Symb.d.a >= SymbolTableSection->getEntityCount()) { + // We are at the end. If there are other symbol tables, jump to them. + // If the symbol table is .dynsym, we are iterating dynamic symbols, + // and there is only one table of these. + if (Symb.d.b != 0) { + ++Symb.d.b; + Symb.d.a = 1; // The 0th symbol in ELF is fake. + } + // Otherwise return the terminator. + if (Symb.d.b == 0 || Symb.d.b >= SymbolTableSections.size()) { + Symb.d.a = std::numeric_limits<uint32_t>::max(); + Symb.d.b = std::numeric_limits<uint32_t>::max(); + } + } + + Result = SymbolRef(Symb, this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolName(DataRefImpl Symb, + StringRef &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + return getSymbolName(SymbolTableSections[Symb.d.b], symb, Result); +} + +template<support::endianness target_endianness, bool is64Bits> +ELF::Elf64_Word ELFObjectFile<target_endianness, is64Bits> + ::getSymbolTableIndex(const Elf_Sym *symb) const { + if (symb->st_shndx == ELF::SHN_XINDEX) + return ExtendedSymbolTable.lookup(symb); + return symb->st_shndx; +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr * +ELFObjectFile<target_endianness, is64Bits> + ::getSection(const Elf_Sym *symb) const { + if (symb->st_shndx == ELF::SHN_XINDEX) + return getSection(ExtendedSymbolTable.lookup(symb)); + if (symb->st_shndx >= ELF::SHN_LORESERVE) + return 0; + return getSection(symb->st_shndx); +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolFileOffset(DataRefImpl Symb, + uint64_t &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + const Elf_Shdr *Section; + switch (getSymbolTableIndex(symb)) { + case ELF::SHN_COMMON: + // Unintialized symbols have no offset in the object file + case ELF::SHN_UNDEF: + Result = UnknownAddressOrSize; + return object_error::success; + case ELF::SHN_ABS: + Result = symb->st_value; + return object_error::success; + default: Section = getSection(symb); + } + + switch (symb->getType()) { + case ELF::STT_SECTION: + Result = Section ? Section->sh_addr : UnknownAddressOrSize; + return object_error::success; + case ELF::STT_FUNC: + case ELF::STT_OBJECT: + case ELF::STT_NOTYPE: + Result = symb->st_value + + (Section ? Section->sh_offset : 0); + return object_error::success; + default: + Result = UnknownAddressOrSize; + return object_error::success; + } +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolAddress(DataRefImpl Symb, + uint64_t &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + const Elf_Shdr *Section; + switch (getSymbolTableIndex(symb)) { + case ELF::SHN_COMMON: + case ELF::SHN_UNDEF: + Result = UnknownAddressOrSize; + return object_error::success; + case ELF::SHN_ABS: + Result = symb->st_value; + return object_error::success; + default: Section = getSection(symb); + } + + switch (symb->getType()) { + case ELF::STT_SECTION: + Result = Section ? Section->sh_addr : UnknownAddressOrSize; + return object_error::success; + case ELF::STT_FUNC: + case ELF::STT_OBJECT: + case ELF::STT_NOTYPE: + Result = symb->st_value + (Section ? Section->sh_addr : 0); + return object_error::success; + default: + Result = UnknownAddressOrSize; + return object_error::success; + } +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolSize(DataRefImpl Symb, + uint64_t &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + if (symb->st_size == 0) + Result = UnknownAddressOrSize; + Result = symb->st_size; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolNMTypeChar(DataRefImpl Symb, + char &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + const Elf_Shdr *Section = getSection(symb); + + char ret = '?'; + + if (Section) { + switch (Section->sh_type) { + case ELF::SHT_PROGBITS: + case ELF::SHT_DYNAMIC: + switch (Section->sh_flags) { + case (ELF::SHF_ALLOC | ELF::SHF_EXECINSTR): + ret = 't'; break; + case (ELF::SHF_ALLOC | ELF::SHF_WRITE): + ret = 'd'; break; + case ELF::SHF_ALLOC: + case (ELF::SHF_ALLOC | ELF::SHF_MERGE): + case (ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS): + ret = 'r'; break; + } + break; + case ELF::SHT_NOBITS: ret = 'b'; + } + } + + switch (getSymbolTableIndex(symb)) { + case ELF::SHN_UNDEF: + if (ret == '?') + ret = 'U'; + break; + case ELF::SHN_ABS: ret = 'a'; break; + case ELF::SHN_COMMON: ret = 'c'; break; + } + + switch (symb->getBinding()) { + case ELF::STB_GLOBAL: ret = ::toupper(ret); break; + case ELF::STB_WEAK: + if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF) + ret = 'w'; + else + if (symb->getType() == ELF::STT_OBJECT) + ret = 'V'; + else + ret = 'W'; + } + + if (ret == '?' && symb->getType() == ELF::STT_SECTION) { + StringRef name; + if (error_code ec = getSymbolName(Symb, name)) + return ec; + Result = StringSwitch<char>(name) + .StartsWith(".debug", 'N') + .StartsWith(".note", 'n') + .Default('?'); + return object_error::success; + } + + Result = ret; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolType(DataRefImpl Symb, + SymbolRef::Type &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + + switch (symb->getType()) { + case ELF::STT_NOTYPE: + Result = SymbolRef::ST_Unknown; + break; + case ELF::STT_SECTION: + Result = SymbolRef::ST_Debug; + break; + case ELF::STT_FILE: + Result = SymbolRef::ST_File; + break; + case ELF::STT_FUNC: + Result = SymbolRef::ST_Function; + break; + case ELF::STT_OBJECT: + case ELF::STT_COMMON: + case ELF::STT_TLS: + Result = SymbolRef::ST_Data; + break; + default: + Result = SymbolRef::ST_Other; + break; + } + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolFlags(DataRefImpl Symb, + uint32_t &Result) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + + Result = SymbolRef::SF_None; + + if (symb->getBinding() != ELF::STB_LOCAL) + Result |= SymbolRef::SF_Global; + + if (symb->getBinding() == ELF::STB_WEAK) + Result |= SymbolRef::SF_Weak; + + if (symb->st_shndx == ELF::SHN_ABS) + Result |= SymbolRef::SF_Absolute; + + if (symb->getType() == ELF::STT_FILE || + symb->getType() == ELF::STT_SECTION) + Result |= SymbolRef::SF_FormatSpecific; + + if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF) + Result |= SymbolRef::SF_Undefined; + + if (symb->getType() == ELF::STT_COMMON || + getSymbolTableIndex(symb) == ELF::SHN_COMMON) + Result |= SymbolRef::SF_Common; + + if (symb->getType() == ELF::STT_TLS) + Result |= SymbolRef::SF_ThreadLocal; + + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolSection(DataRefImpl Symb, + section_iterator &Res) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + const Elf_Shdr *sec = getSection(symb); + if (!sec) + Res = end_sections(); + else { + DataRefImpl Sec; + Sec.p = reinterpret_cast<intptr_t>(sec); + Res = section_iterator(SectionRef(Sec, this)); + } + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionNext(DataRefImpl Sec, SectionRef &Result) const { + const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p); + sec += Header->e_shentsize; + Sec.p = reinterpret_cast<intptr_t>(sec); + Result = SectionRef(Sec, this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionName(DataRefImpl Sec, + StringRef &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + Result = StringRef(getString(dot_shstrtab_sec, sec->sh_name)); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionAddress(DataRefImpl Sec, + uint64_t &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + Result = sec->sh_addr; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionSize(DataRefImpl Sec, + uint64_t &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + Result = sec->sh_size; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionContents(DataRefImpl Sec, + StringRef &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + const char *start = (const char*)base() + sec->sh_offset; + Result = StringRef(start, sec->sh_size); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSectionAlignment(DataRefImpl Sec, + uint64_t &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + Result = sec->sh_addralign; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::isSectionText(DataRefImpl Sec, + bool &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + if (sec->sh_flags & ELF::SHF_EXECINSTR) + Result = true; + else + Result = false; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::isSectionData(DataRefImpl Sec, + bool &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) + && sec->sh_type == ELF::SHT_PROGBITS) + Result = true; + else + Result = false; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::isSectionBSS(DataRefImpl Sec, + bool &Result) const { + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) + && sec->sh_type == ELF::SHT_NOBITS) + Result = true; + else + Result = false; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::sectionContainsSymbol(DataRefImpl Sec, + DataRefImpl Symb, + bool &Result) const { + // FIXME: Unimplemented. + Result = false; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +relocation_iterator ELFObjectFile<target_endianness, is64Bits> + ::getSectionRelBegin(DataRefImpl Sec) const { + DataRefImpl RelData; + memset(&RelData, 0, sizeof(RelData)); + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec); + if (sec != 0 && ittr != SectionRelocMap.end()) { + RelData.w.a = getSection(ittr->second[0])->sh_info; + RelData.w.b = ittr->second[0]; + RelData.w.c = 0; + } + return relocation_iterator(RelocationRef(RelData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +relocation_iterator ELFObjectFile<target_endianness, is64Bits> + ::getSectionRelEnd(DataRefImpl Sec) const { + DataRefImpl RelData; + memset(&RelData, 0, sizeof(RelData)); + const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p); + typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec); + if (sec != 0 && ittr != SectionRelocMap.end()) { + // Get the index of the last relocation section for this section. + std::size_t relocsecindex = ittr->second[ittr->second.size() - 1]; + const Elf_Shdr *relocsec = getSection(relocsecindex); + RelData.w.a = relocsec->sh_info; + RelData.w.b = relocsecindex; + RelData.w.c = relocsec->sh_size / relocsec->sh_entsize; + } + return relocation_iterator(RelocationRef(RelData, this)); +} + +// Relocations +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationNext(DataRefImpl Rel, + RelocationRef &Result) const { + ++Rel.w.c; + const Elf_Shdr *relocsec = getSection(Rel.w.b); + if (Rel.w.c >= (relocsec->sh_size / relocsec->sh_entsize)) { + // We have reached the end of the relocations for this section. See if there + // is another relocation section. + typename RelocMap_t::mapped_type relocseclist = + SectionRelocMap.lookup(getSection(Rel.w.a)); + + // Do a binary search for the current reloc section index (which must be + // present). Then get the next one. + typename RelocMap_t::mapped_type::const_iterator loc = + std::lower_bound(relocseclist.begin(), relocseclist.end(), Rel.w.b); + ++loc; + + // If there is no next one, don't do anything. The ++Rel.w.c above sets Rel + // to the end iterator. + if (loc != relocseclist.end()) { + Rel.w.b = *loc; + Rel.w.a = 0; + } + } + Result = RelocationRef(Rel, this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationSymbol(DataRefImpl Rel, + SymbolRef &Result) const { + uint32_t symbolIdx; + const Elf_Shdr *sec = getSection(Rel.w.b); + switch (sec->sh_type) { + default : + report_fatal_error("Invalid section type in Rel!"); + case ELF::SHT_REL : { + symbolIdx = getRel(Rel)->getSymbol(); + break; + } + case ELF::SHT_RELA : { + symbolIdx = getRela(Rel)->getSymbol(); + break; + } + } + DataRefImpl SymbolData; + IndexMap_t::const_iterator it = SymbolTableSectionsIndexMap.find(sec->sh_link); + if (it == SymbolTableSectionsIndexMap.end()) + report_fatal_error("Relocation symbol table not found!"); + SymbolData.d.a = symbolIdx; + SymbolData.d.b = it->second; + Result = SymbolRef(SymbolData, this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationAddress(DataRefImpl Rel, + uint64_t &Result) const { + uint64_t offset; + const Elf_Shdr *sec = getSection(Rel.w.b); + switch (sec->sh_type) { + default : + report_fatal_error("Invalid section type in Rel!"); + case ELF::SHT_REL : { + offset = getRel(Rel)->r_offset; + break; + } + case ELF::SHT_RELA : { + offset = getRela(Rel)->r_offset; + break; + } + } + + Result = offset; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationOffset(DataRefImpl Rel, + uint64_t &Result) const { + uint64_t offset; + const Elf_Shdr *sec = getSection(Rel.w.b); + switch (sec->sh_type) { + default : + report_fatal_error("Invalid section type in Rel!"); + case ELF::SHT_REL : { + offset = getRel(Rel)->r_offset; + break; + } + case ELF::SHT_RELA : { + offset = getRela(Rel)->r_offset; + break; + } + } + + Result = offset - sec->sh_addr; + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationType(DataRefImpl Rel, + uint64_t &Result) const { + const Elf_Shdr *sec = getSection(Rel.w.b); + switch (sec->sh_type) { + default : + report_fatal_error("Invalid section type in Rel!"); + case ELF::SHT_REL : { + Result = getRel(Rel)->getType(); + break; + } + case ELF::SHT_RELA : { + Result = getRela(Rel)->getType(); + break; + } + } + return object_error::success; +} + +#define LLVM_ELF_SWITCH_RELOC_TYPE_NAME(enum) \ + case ELF::enum: res = #enum; break; + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationTypeName(DataRefImpl Rel, + SmallVectorImpl<char> &Result) const { + const Elf_Shdr *sec = getSection(Rel.w.b); + uint8_t type; + StringRef res; + switch (sec->sh_type) { + default : + return object_error::parse_failed; + case ELF::SHT_REL : { + type = getRel(Rel)->getType(); + break; + } + case ELF::SHT_RELA : { + type = getRela(Rel)->getType(); + break; + } + } + switch (Header->e_machine) { + case ELF::EM_X86_64: + switch (type) { + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_NONE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOT32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PLT32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_COPY); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GLOB_DAT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_JUMP_SLOT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_RELATIVE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPCREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32S); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPMOD64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSGD); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSLD); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTTPOFF); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTOFF64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE64); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32_TLSDESC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC); + default: + res = "Unknown"; + } + break; + case ELF::EM_386: + switch (type) { + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_NONE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOT32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PLT32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_COPY); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GLOB_DAT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_JUMP_SLOT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_RELATIVE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTOFF); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTPC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32PLT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTIE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_PUSH); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_POP); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_PUSH); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_POP); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDO_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE_32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPMOD32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTDESC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_IRELATIVE); + default: + res = "Unknown"; + } + break; + default: + res = "Unknown"; + } + Result.append(res.begin(), res.end()); + return object_error::success; +} + +#undef LLVM_ELF_SWITCH_RELOC_TYPE_NAME + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationAdditionalInfo(DataRefImpl Rel, + int64_t &Result) const { + const Elf_Shdr *sec = getSection(Rel.w.b); + switch (sec->sh_type) { + default : + report_fatal_error("Invalid section type in Rel!"); + case ELF::SHT_REL : { + Result = 0; + return object_error::success; + } + case ELF::SHT_RELA : { + Result = getRela(Rel)->r_addend; + return object_error::success; + } + } +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getRelocationValueString(DataRefImpl Rel, + SmallVectorImpl<char> &Result) const { + const Elf_Shdr *sec = getSection(Rel.w.b); + uint8_t type; + StringRef res; + int64_t addend = 0; + uint16_t symbol_index = 0; + switch (sec->sh_type) { + default : + return object_error::parse_failed; + case ELF::SHT_REL : { + type = getRel(Rel)->getType(); + symbol_index = getRel(Rel)->getSymbol(); + // TODO: Read implicit addend from section data. + break; + } + case ELF::SHT_RELA : { + type = getRela(Rel)->getType(); + symbol_index = getRela(Rel)->getSymbol(); + addend = getRela(Rel)->r_addend; + break; + } + } + const Elf_Sym *symb = getEntry<Elf_Sym>(sec->sh_link, symbol_index); + StringRef symname; + if (error_code ec = getSymbolName(getSection(sec->sh_link), symb, symname)) + return ec; + switch (Header->e_machine) { + case ELF::EM_X86_64: + switch (type) { + case ELF::R_X86_64_32S: + res = symname; + break; + case ELF::R_X86_64_PC32: { + std::string fmtbuf; + raw_string_ostream fmt(fmtbuf); + fmt << symname << (addend < 0 ? "" : "+") << addend << "-P"; + fmt.flush(); + Result.append(fmtbuf.begin(), fmtbuf.end()); + } + break; + default: + res = "Unknown"; + } + break; + default: + res = "Unknown"; + } + if (Result.empty()) + Result.append(res.begin(), res.end()); + return object_error::success; +} + +// Verify that the last byte in the string table in a null. +template<support::endianness target_endianness, bool is64Bits> +void ELFObjectFile<target_endianness, is64Bits> + ::VerifyStrTab(const Elf_Shdr *sh) const { + const char *strtab = (const char*)base() + sh->sh_offset; + if (strtab[sh->sh_size - 1] != 0) + // FIXME: Proper error handling. + report_fatal_error("String table must end with a null terminator!"); +} + +template<support::endianness target_endianness, bool is64Bits> +ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object + , error_code &ec) + : ObjectFile(Binary::isELF, Object, ec) + , isDyldELFObject(false) + , SectionHeaderTable(0) + , dot_shstrtab_sec(0) + , dot_strtab_sec(0) + , dot_dynstr_sec(0) + , dot_dynamic_sec(0) + , dt_soname(0) { + + const uint64_t FileSize = Data->getBufferSize(); + + if (sizeof(Elf_Ehdr) > FileSize) + // FIXME: Proper error handling. + report_fatal_error("File too short!"); + + Header = reinterpret_cast<const Elf_Ehdr *>(base()); + + if (Header->e_shoff == 0) + return; + + const uint64_t SectionTableOffset = Header->e_shoff; + + if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize) + // FIXME: Proper error handling. + report_fatal_error("Section header table goes past end of file!"); + + // The getNumSections() call below depends on SectionHeaderTable being set. + SectionHeaderTable = + reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset); + const uint64_t SectionTableSize = getNumSections() * Header->e_shentsize; + + if (SectionTableOffset + SectionTableSize > FileSize) + // FIXME: Proper error handling. + report_fatal_error("Section table goes past end of file!"); + + // To find the symbol tables we walk the section table to find SHT_SYMTAB. + const Elf_Shdr* SymbolTableSectionHeaderIndex = 0; + const Elf_Shdr* sh = SectionHeaderTable; + + // Reserve SymbolTableSections[0] for .dynsym + SymbolTableSections.push_back(NULL); + + for (uint64_t i = 0, e = getNumSections(); i != e; ++i) { + if (sh->sh_type == ELF::SHT_SYMTAB_SHNDX) { + if (SymbolTableSectionHeaderIndex) + // FIXME: Proper error handling. + report_fatal_error("More than one .symtab_shndx!"); + SymbolTableSectionHeaderIndex = sh; + } + if (sh->sh_type == ELF::SHT_SYMTAB) { + SymbolTableSectionsIndexMap[i] = SymbolTableSections.size(); + SymbolTableSections.push_back(sh); + } + if (sh->sh_type == ELF::SHT_DYNSYM) { + if (SymbolTableSections[0] != NULL) + // FIXME: Proper error handling. + report_fatal_error("More than one .dynsym!"); + SymbolTableSectionsIndexMap[i] = 0; + SymbolTableSections[0] = sh; + } + if (sh->sh_type == ELF::SHT_REL || sh->sh_type == ELF::SHT_RELA) { + SectionRelocMap[getSection(sh->sh_info)].push_back(i); + } + if (sh->sh_type == ELF::SHT_DYNAMIC) { + if (dot_dynamic_sec != NULL) + // FIXME: Proper error handling. + report_fatal_error("More than one .dynamic!"); + dot_dynamic_sec = sh; + } + ++sh; + } + + // Sort section relocation lists by index. + for (typename RelocMap_t::iterator i = SectionRelocMap.begin(), + e = SectionRelocMap.end(); i != e; ++i) { + std::sort(i->second.begin(), i->second.end()); + } + + // Get string table sections. + dot_shstrtab_sec = getSection(getStringTableIndex()); + if (dot_shstrtab_sec) { + // Verify that the last byte in the string table in a null. + VerifyStrTab(dot_shstrtab_sec); + } + + // Merge this into the above loop. + for (const char *i = reinterpret_cast<const char *>(SectionHeaderTable), + *e = i + getNumSections() * Header->e_shentsize; + i != e; i += Header->e_shentsize) { + const Elf_Shdr *sh = reinterpret_cast<const Elf_Shdr*>(i); + if (sh->sh_type == ELF::SHT_STRTAB) { + StringRef SectionName(getString(dot_shstrtab_sec, sh->sh_name)); + if (SectionName == ".strtab") { + if (dot_strtab_sec != 0) + // FIXME: Proper error handling. + report_fatal_error("Already found section named .strtab!"); + dot_strtab_sec = sh; + VerifyStrTab(dot_strtab_sec); + } else if (SectionName == ".dynstr") { + if (dot_dynstr_sec != 0) + // FIXME: Proper error handling. + report_fatal_error("Already found section named .dynstr!"); + dot_dynstr_sec = sh; + VerifyStrTab(dot_dynstr_sec); + } + } + } + + // Build symbol name side-mapping if there is one. + if (SymbolTableSectionHeaderIndex) { + const Elf_Word *ShndxTable = reinterpret_cast<const Elf_Word*>(base() + + SymbolTableSectionHeaderIndex->sh_offset); + error_code ec; + for (symbol_iterator si = begin_symbols(), + se = end_symbols(); si != se; si.increment(ec)) { + if (ec) + report_fatal_error("Fewer extended symbol table entries than symbols!"); + if (*ShndxTable != ELF::SHN_UNDEF) + ExtendedSymbolTable[getSymbol(si->getRawDataRefImpl())] = *ShndxTable; + ++ShndxTable; + } + } +} + +template<support::endianness target_endianness, bool is64Bits> +symbol_iterator ELFObjectFile<target_endianness, is64Bits> + ::begin_symbols() const { + DataRefImpl SymbolData; + memset(&SymbolData, 0, sizeof(SymbolData)); + if (SymbolTableSections.size() <= 1) { + SymbolData.d.a = std::numeric_limits<uint32_t>::max(); + SymbolData.d.b = std::numeric_limits<uint32_t>::max(); + } else { + SymbolData.d.a = 1; // The 0th symbol in ELF is fake. + SymbolData.d.b = 1; // The 0th table is .dynsym + } + return symbol_iterator(SymbolRef(SymbolData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +symbol_iterator ELFObjectFile<target_endianness, is64Bits> + ::end_symbols() const { + DataRefImpl SymbolData; + memset(&SymbolData, 0, sizeof(SymbolData)); + SymbolData.d.a = std::numeric_limits<uint32_t>::max(); + SymbolData.d.b = std::numeric_limits<uint32_t>::max(); + return symbol_iterator(SymbolRef(SymbolData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +symbol_iterator ELFObjectFile<target_endianness, is64Bits> + ::begin_dynamic_symbols() const { + DataRefImpl SymbolData; + memset(&SymbolData, 0, sizeof(SymbolData)); + if (SymbolTableSections[0] == NULL) { + SymbolData.d.a = std::numeric_limits<uint32_t>::max(); + SymbolData.d.b = std::numeric_limits<uint32_t>::max(); + } else { + SymbolData.d.a = 1; // The 0th symbol in ELF is fake. + SymbolData.d.b = 0; // The 0th table is .dynsym + } + return symbol_iterator(SymbolRef(SymbolData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +symbol_iterator ELFObjectFile<target_endianness, is64Bits> + ::end_dynamic_symbols() const { + DataRefImpl SymbolData; + memset(&SymbolData, 0, sizeof(SymbolData)); + SymbolData.d.a = std::numeric_limits<uint32_t>::max(); + SymbolData.d.b = std::numeric_limits<uint32_t>::max(); + return symbol_iterator(SymbolRef(SymbolData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +section_iterator ELFObjectFile<target_endianness, is64Bits> + ::begin_sections() const { + DataRefImpl ret; + memset(&ret, 0, sizeof(DataRefImpl)); + ret.p = reinterpret_cast<intptr_t>(base() + Header->e_shoff); + return section_iterator(SectionRef(ret, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +section_iterator ELFObjectFile<target_endianness, is64Bits> + ::end_sections() const { + DataRefImpl ret; + memset(&ret, 0, sizeof(DataRefImpl)); + ret.p = reinterpret_cast<intptr_t>(base() + + Header->e_shoff + + (Header->e_shentsize*getNumSections())); + return section_iterator(SectionRef(ret, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +typename ELFObjectFile<target_endianness, is64Bits>::dyn_iterator +ELFObjectFile<target_endianness, is64Bits>::begin_dynamic_table() const { + DataRefImpl DynData; + memset(&DynData, 0, sizeof(DynData)); + if (dot_dynamic_sec == NULL || dot_dynamic_sec->sh_size == 0) { + DynData.d.a = std::numeric_limits<uint32_t>::max(); + } else { + DynData.d.a = 0; + } + return dyn_iterator(DynRef(DynData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +typename ELFObjectFile<target_endianness, is64Bits>::dyn_iterator +ELFObjectFile<target_endianness, is64Bits> + ::end_dynamic_table() const { + DataRefImpl DynData; + memset(&DynData, 0, sizeof(DynData)); + DynData.d.a = std::numeric_limits<uint32_t>::max(); + return dyn_iterator(DynRef(DynData, this)); +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getDynNext(DataRefImpl DynData, + DynRef &Result) const { + ++DynData.d.a; + + // Check to see if we are at the end of .dynamic + if (DynData.d.a >= dot_dynamic_sec->getEntityCount()) { + // We are at the end. Return the terminator. + DynData.d.a = std::numeric_limits<uint32_t>::max(); + } + + Result = DynRef(DynData, this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +StringRef +ELFObjectFile<target_endianness, is64Bits>::getLoadName() const { + if (!dt_soname) { + // Find the DT_SONAME entry + dyn_iterator it = begin_dynamic_table(); + dyn_iterator ie = end_dynamic_table(); + error_code ec; + while (it != ie) { + if (it->getTag() == ELF::DT_SONAME) + break; + it.increment(ec); + if (ec) + report_fatal_error("dynamic table iteration failed"); + } + if (it != ie) { + if (dot_dynstr_sec == NULL) + report_fatal_error("Dynamic string table is missing"); + dt_soname = getString(dot_dynstr_sec, it->getVal()); + } else { + dt_soname = ""; + } + } + return dt_soname; +} + +template<support::endianness target_endianness, bool is64Bits> +library_iterator ELFObjectFile<target_endianness, is64Bits> + ::begin_libraries_needed() const { + // Find the first DT_NEEDED entry + dyn_iterator i = begin_dynamic_table(); + dyn_iterator e = end_dynamic_table(); + error_code ec; + while (i != e) { + if (i->getTag() == ELF::DT_NEEDED) + break; + i.increment(ec); + if (ec) + report_fatal_error("dynamic table iteration failed"); + } + // Use the same DataRefImpl format as DynRef. + return library_iterator(LibraryRef(i->getRawDataRefImpl(), this)); +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getLibraryNext(DataRefImpl Data, + LibraryRef &Result) const { + // Use the same DataRefImpl format as DynRef. + dyn_iterator i = dyn_iterator(DynRef(Data, this)); + dyn_iterator e = end_dynamic_table(); + + // Skip the current dynamic table entry. + error_code ec; + if (i != e) { + i.increment(ec); + // TODO: proper error handling + if (ec) + report_fatal_error("dynamic table iteration failed"); + } + + // Find the next DT_NEEDED entry. + while (i != e) { + if (i->getTag() == ELF::DT_NEEDED) + break; + i.increment(ec); + if (ec) + report_fatal_error("dynamic table iteration failed"); + } + Result = LibraryRef(i->getRawDataRefImpl(), this); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getLibraryPath(DataRefImpl Data, StringRef &Res) const { + dyn_iterator i = dyn_iterator(DynRef(Data, this)); + if (i == end_dynamic_table()) + report_fatal_error("getLibraryPath() called on iterator end"); + + if (i->getTag() != ELF::DT_NEEDED) + report_fatal_error("Invalid library_iterator"); + + // This uses .dynstr to lookup the name of the DT_NEEDED entry. + // THis works as long as DT_STRTAB == .dynstr. This is true most of + // the time, but the specification allows exceptions. + // TODO: This should really use DT_STRTAB instead. Doing this requires + // reading the program headers. + if (dot_dynstr_sec == NULL) + report_fatal_error("Dynamic string table is missing"); + Res = getString(dot_dynstr_sec, i->getVal()); + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +library_iterator ELFObjectFile<target_endianness, is64Bits> + ::end_libraries_needed() const { + dyn_iterator e = end_dynamic_table(); + // Use the same DataRefImpl format as DynRef. + return library_iterator(LibraryRef(e->getRawDataRefImpl(), this)); +} + +template<support::endianness target_endianness, bool is64Bits> +uint8_t ELFObjectFile<target_endianness, is64Bits>::getBytesInAddress() const { + return is64Bits ? 8 : 4; +} + +template<support::endianness target_endianness, bool is64Bits> +StringRef ELFObjectFile<target_endianness, is64Bits> + ::getFileFormatName() const { + switch(Header->e_ident[ELF::EI_CLASS]) { + case ELF::ELFCLASS32: + switch(Header->e_machine) { + case ELF::EM_386: + return "ELF32-i386"; + case ELF::EM_X86_64: + return "ELF32-x86-64"; + case ELF::EM_ARM: + return "ELF32-arm"; + default: + return "ELF32-unknown"; + } + case ELF::ELFCLASS64: + switch(Header->e_machine) { + case ELF::EM_386: + return "ELF64-i386"; + case ELF::EM_X86_64: + return "ELF64-x86-64"; + default: + return "ELF64-unknown"; + } + default: + // FIXME: Proper error handling. + report_fatal_error("Invalid ELFCLASS!"); + } +} + +template<support::endianness target_endianness, bool is64Bits> +unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const { + switch(Header->e_machine) { + case ELF::EM_386: + return Triple::x86; + case ELF::EM_X86_64: + return Triple::x86_64; + case ELF::EM_ARM: + return Triple::arm; + default: + return Triple::UnknownArch; + } +} + +template<support::endianness target_endianness, bool is64Bits> +uint64_t ELFObjectFile<target_endianness, is64Bits>::getNumSections() const { + assert(Header && "Header not initialized!"); + if (Header->e_shnum == ELF::SHN_UNDEF) { + assert(SectionHeaderTable && "SectionHeaderTable not initialized!"); + return SectionHeaderTable->sh_size; + } + return Header->e_shnum; +} + +template<support::endianness target_endianness, bool is64Bits> +uint64_t +ELFObjectFile<target_endianness, is64Bits>::getStringTableIndex() const { + if (Header->e_shnum == ELF::SHN_UNDEF) { + if (Header->e_shstrndx == ELF::SHN_HIRESERVE) + return SectionHeaderTable->sh_link; + if (Header->e_shstrndx >= getNumSections()) + return 0; + } + return Header->e_shstrndx; +} + + +template<support::endianness target_endianness, bool is64Bits> +template<typename T> +inline const T * +ELFObjectFile<target_endianness, is64Bits>::getEntry(uint16_t Section, + uint32_t Entry) const { + return getEntry<T>(getSection(Section), Entry); +} + +template<support::endianness target_endianness, bool is64Bits> +template<typename T> +inline const T * +ELFObjectFile<target_endianness, is64Bits>::getEntry(const Elf_Shdr * Section, + uint32_t Entry) const { + return reinterpret_cast<const T *>( + base() + + Section->sh_offset + + (Entry * Section->sh_entsize)); +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym * +ELFObjectFile<target_endianness, is64Bits>::getSymbol(DataRefImpl Symb) const { + return getEntry<Elf_Sym>(SymbolTableSections[Symb.d.b], Symb.d.a); +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Dyn * +ELFObjectFile<target_endianness, is64Bits>::getDyn(DataRefImpl DynData) const { + return getEntry<Elf_Dyn>(dot_dynamic_sec, DynData.d.a); +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rel * +ELFObjectFile<target_endianness, is64Bits>::getRel(DataRefImpl Rel) const { + return getEntry<Elf_Rel>(Rel.w.b, Rel.w.c); +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rela * +ELFObjectFile<target_endianness, is64Bits>::getRela(DataRefImpl Rela) const { + return getEntry<Elf_Rela>(Rela.w.b, Rela.w.c); +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr * +ELFObjectFile<target_endianness, is64Bits>::getSection(DataRefImpl Symb) const { + const Elf_Shdr *sec = getSection(Symb.d.b); + if (sec->sh_type != ELF::SHT_SYMTAB || sec->sh_type != ELF::SHT_DYNSYM) + // FIXME: Proper error handling. + report_fatal_error("Invalid symbol table section!"); + return sec; +} + +template<support::endianness target_endianness, bool is64Bits> +const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr * +ELFObjectFile<target_endianness, is64Bits>::getSection(uint32_t index) const { + if (index == 0) + return 0; + if (!SectionHeaderTable || index >= getNumSections()) + // FIXME: Proper error handling. + report_fatal_error("Invalid section index!"); + + return reinterpret_cast<const Elf_Shdr *>( + reinterpret_cast<const char *>(SectionHeaderTable) + + (index * Header->e_shentsize)); +} + +template<support::endianness target_endianness, bool is64Bits> +const char *ELFObjectFile<target_endianness, is64Bits> + ::getString(uint32_t section, + ELF::Elf32_Word offset) const { + return getString(getSection(section), offset); +} + +template<support::endianness target_endianness, bool is64Bits> +const char *ELFObjectFile<target_endianness, is64Bits> + ::getString(const Elf_Shdr *section, + ELF::Elf32_Word offset) const { + assert(section && section->sh_type == ELF::SHT_STRTAB && "Invalid section!"); + if (offset >= section->sh_size) + // FIXME: Proper error handling. + report_fatal_error("Symbol name offset outside of string table!"); + return (const char *)base() + section->sh_offset + offset; +} + +template<support::endianness target_endianness, bool is64Bits> +error_code ELFObjectFile<target_endianness, is64Bits> + ::getSymbolName(const Elf_Shdr *section, + const Elf_Sym *symb, + StringRef &Result) const { + if (symb->st_name == 0) { + const Elf_Shdr *section = getSection(symb); + if (!section) + Result = ""; + else + Result = getString(dot_shstrtab_sec, section->sh_name); + return object_error::success; + } + + if (section == SymbolTableSections[0]) { + // Symbol is in .dynsym, use .dynstr string table + Result = getString(dot_dynstr_sec, symb->st_name); + } else { + // Use the default symbol table name section. + Result = getString(dot_strtab_sec, symb->st_name); + } + return object_error::success; +} + +template<support::endianness target_endianness, bool is64Bits> +inline DynRefImpl<target_endianness, is64Bits> + ::DynRefImpl(DataRefImpl DynP, const OwningType *Owner) + : DynPimpl(DynP) + , OwningObject(Owner) {} + +template<support::endianness target_endianness, bool is64Bits> +inline bool DynRefImpl<target_endianness, is64Bits> + ::operator==(const DynRefImpl &Other) const { + return DynPimpl == Other.DynPimpl; +} + +template<support::endianness target_endianness, bool is64Bits> +inline bool DynRefImpl<target_endianness, is64Bits> + ::operator <(const DynRefImpl &Other) const { + return DynPimpl < Other.DynPimpl; +} + +template<support::endianness target_endianness, bool is64Bits> +inline error_code DynRefImpl<target_endianness, is64Bits> + ::getNext(DynRefImpl &Result) const { + return OwningObject->getDynNext(DynPimpl, Result); +} + +template<support::endianness target_endianness, bool is64Bits> +inline int64_t DynRefImpl<target_endianness, is64Bits> + ::getTag() const { + return OwningObject->getDyn(DynPimpl)->d_tag; +} + +template<support::endianness target_endianness, bool is64Bits> +inline uint64_t DynRefImpl<target_endianness, is64Bits> + ::getVal() const { + return OwningObject->getDyn(DynPimpl)->d_un.d_val; +} + +template<support::endianness target_endianness, bool is64Bits> +inline uint64_t DynRefImpl<target_endianness, is64Bits> + ::getPtr() const { + return OwningObject->getDyn(DynPimpl)->d_un.d_ptr; +} + +template<support::endianness target_endianness, bool is64Bits> +inline DataRefImpl DynRefImpl<target_endianness, is64Bits> + ::getRawDataRefImpl() const { + return DynPimpl; +} + +} +} + +#endif diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h index 7e3a90d..1aae85a 100644 --- a/include/llvm/Object/MachO.h +++ b/include/llvm/Object/MachO.h @@ -32,12 +32,17 @@ public: virtual symbol_iterator begin_symbols() const; virtual symbol_iterator end_symbols() const; + virtual symbol_iterator begin_dynamic_symbols() const; + virtual symbol_iterator end_dynamic_symbols() const; + virtual library_iterator begin_libraries_needed() const; + virtual library_iterator end_libraries_needed() const; virtual section_iterator begin_sections() const; virtual section_iterator end_sections() const; virtual uint8_t getBytesInAddress() const; virtual StringRef getFileFormatName() const; virtual unsigned getArch() const; + virtual StringRef getLoadName() const; MachOObject *getObject() { return MachOObj; } @@ -53,11 +58,8 @@ protected: virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const; virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const; virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const; - virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const; - virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const; - virtual error_code isSymbolWeak(DataRefImpl Symb, bool &Res) const; + virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const; virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; - virtual error_code isSymbolAbsolute(DataRefImpl Symb, bool &Res) const; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const; @@ -93,6 +95,9 @@ protected: SmallVectorImpl<char> &Result) const; virtual error_code getRelocationHidden(DataRefImpl Rel, bool &Result) const; + virtual error_code getLibraryNext(DataRefImpl LibData, LibraryRef &Res) const; + virtual error_code getLibraryPath(DataRefImpl LibData, StringRef &Res) const; + private: MachOObject *MachOObj; mutable uint32_t RegisteredStringTable; diff --git a/include/llvm/Object/MachOObject.h b/include/llvm/Object/MachOObject.h index 51be847..0560402 100644 --- a/include/llvm/Object/MachOObject.h +++ b/include/llvm/Object/MachOObject.h @@ -177,14 +177,14 @@ public: void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const; /// @} - + /// @name Object Dump Facilities /// @{ /// dump - Support for debugging, callable in GDB: V->dump() // void dump() const; void dumpHeader() const; - + /// print - Implement operator<< on Value. /// void print(raw_ostream &O) const; @@ -192,7 +192,7 @@ public: /// @} }; - + inline raw_ostream &operator<<(raw_ostream &OS, const MachOObject &V) { V.print(OS); return OS; diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h index fd180d4..1e9d895 100644 --- a/include/llvm/Object/ObjectFile.h +++ b/include/llvm/Object/ObjectFile.h @@ -20,6 +20,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MemoryBuffer.h" #include <cstring> +#include <vector> namespace llvm { namespace object { @@ -179,14 +180,26 @@ public: } enum Type { + ST_Unknown, // Type not specified ST_Data, ST_Debug, - ST_External, // Defined in another object file ST_File, ST_Function, ST_Other }; + enum Flags { + SF_None = 0, + SF_Undefined = 1U << 0, // Symbol is defined in another object file + SF_Global = 1U << 1, // Global symbol + SF_Weak = 1U << 2, // Weak symbol + SF_Absolute = 1U << 3, // Absolute symbol + SF_ThreadLocal = 1U << 4, // Thread local symbol + SF_Common = 1U << 5, // Symbol has common linkage + SF_FormatSpecific = 1U << 31 // Specific to the object file format + // (e.g. section symbols) + }; + SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner); bool operator==(const SymbolRef &Other) const; @@ -204,19 +217,8 @@ public: /// nm for this symbol. error_code getNMTypeChar(char &Result) const; - /// Returns true for symbols that are internal to the object file format such - /// as section symbols. - error_code isInternal(bool &Result) const; - - /// Returns true for symbols that can be used in another objects, - /// such as library functions - error_code isGlobal(bool &Result) const; - - /// Returns true for weak symbols. - error_code isWeak(bool &Result) const; - - /// @brief Return true for absolute symbols. - error_code isAbsolute(bool &Result) const; + /// Get symbol flags (bitwise OR of SymbolRef::Flags) + error_code getFlags(uint32_t &Result) const; /// @brief Get section this symbol is defined in reference to. Result is /// end_sections() if it is undefined or is an absolute symbol. @@ -226,13 +228,39 @@ public: }; typedef content_iterator<SymbolRef> symbol_iterator; +/// LibraryRef - This is a value type class that represents a single library in +/// the list of libraries needed by a shared or dynamic object. +class LibraryRef { + friend class SectionRef; + DataRefImpl LibraryPimpl; + const ObjectFile *OwningObject; + +public: + LibraryRef() : OwningObject(NULL) { + std::memset(&LibraryPimpl, 0, sizeof(LibraryPimpl)); + } + + LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner); + + bool operator==(const LibraryRef &Other) const; + bool operator <(const LibraryRef &Other) const; + + error_code getNext(LibraryRef &Result) const; + + // Get the path to this library, as stored in the object file. + error_code getPath(StringRef &Result) const; + + DataRefImpl getRawDataRefImpl() const; +}; +typedef content_iterator<LibraryRef> library_iterator; + const uint64_t UnknownAddressOrSize = ~0ULL; /// ObjectFile - This class is the base class for all object file types. /// Concrete instances of this object are created by createObjectFile, which /// figure out which type to create. class ObjectFile : public Binary { -private: + virtual void anchor(); ObjectFile(); // = delete ObjectFile(const ObjectFile &other); // = delete @@ -260,10 +288,8 @@ protected: virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const = 0; virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const = 0; - virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const = 0; - virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const = 0; - virtual error_code isSymbolWeak(DataRefImpl Symb, bool &Res) const = 0; - virtual error_code isSymbolAbsolute(DataRefImpl Symb, bool &Res) const = 0; + virtual error_code getSymbolFlags(DataRefImpl Symb, + uint32_t &Res) const = 0; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const = 0; @@ -307,14 +333,25 @@ protected: return object_error::success; } + // Same for LibraryRef + friend class LibraryRef; + virtual error_code getLibraryNext(DataRefImpl Lib, LibraryRef &Res) const = 0; + virtual error_code getLibraryPath(DataRefImpl Lib, StringRef &Res) const = 0; + public: virtual symbol_iterator begin_symbols() const = 0; virtual symbol_iterator end_symbols() const = 0; + virtual symbol_iterator begin_dynamic_symbols() const = 0; + virtual symbol_iterator end_dynamic_symbols() const = 0; + virtual section_iterator begin_sections() const = 0; virtual section_iterator end_sections() const = 0; + virtual library_iterator begin_libraries_needed() const = 0; + virtual library_iterator end_libraries_needed() const = 0; + /// @brief The number of bytes used to represent an address in this object /// file format. virtual uint8_t getBytesInAddress() const = 0; @@ -322,6 +359,11 @@ public: virtual StringRef getFileFormatName() const = 0; virtual /* Triple::ArchType */ unsigned getArch() const = 0; + /// For shared objects, returns the name which this object should be + /// loaded from at runtime. This corresponds to DT_SONAME on ELF and + /// LC_ID_DYLIB (install name) on MachO. + virtual StringRef getLoadName() const = 0; + /// @returns Pointer to ObjectFile subclass to handle this type of object. /// @param ObjectPath The path to the object file. ObjectPath.isObject must /// return true. @@ -378,20 +420,8 @@ inline error_code SymbolRef::getNMTypeChar(char &Result) const { return OwningObject->getSymbolNMTypeChar(SymbolPimpl, Result); } -inline error_code SymbolRef::isInternal(bool &Result) const { - return OwningObject->isSymbolInternal(SymbolPimpl, Result); -} - -inline error_code SymbolRef::isGlobal(bool &Result) const { - return OwningObject->isSymbolGlobal(SymbolPimpl, Result); -} - -inline error_code SymbolRef::isWeak(bool &Result) const { - return OwningObject->isSymbolWeak(SymbolPimpl, Result); -} - -inline error_code SymbolRef::isAbsolute(bool &Result) const { - return OwningObject->isSymbolAbsolute(SymbolPimpl, Result); +inline error_code SymbolRef::getFlags(uint32_t &Result) const { + return OwningObject->getSymbolFlags(SymbolPimpl, Result); } inline error_code SymbolRef::getSection(section_iterator &Result) const { @@ -518,6 +548,26 @@ inline error_code RelocationRef::getValueString(SmallVectorImpl<char> &Result) inline error_code RelocationRef::getHidden(bool &Result) const { return OwningObject->getRelocationHidden(RelocationPimpl, Result); } +// Inline function definitions. +inline LibraryRef::LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner) + : LibraryPimpl(LibraryP) + , OwningObject(Owner) {} + +inline bool LibraryRef::operator==(const LibraryRef &Other) const { + return LibraryPimpl == Other.LibraryPimpl; +} + +inline bool LibraryRef::operator <(const LibraryRef &Other) const { + return LibraryPimpl < Other.LibraryPimpl; +} + +inline error_code LibraryRef::getNext(LibraryRef &Result) const { + return OwningObject->getLibraryNext(LibraryPimpl, Result); +} + +inline error_code LibraryRef::getPath(StringRef &Result) const { + return OwningObject->getLibraryPath(LibraryPimpl, Result); +} } // end namespace object } // end namespace llvm diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h index 4fd4b2c..888537d 100644 --- a/include/llvm/Pass.h +++ b/include/llvm/Pass.h @@ -53,7 +53,7 @@ typedef const void* AnalysisID; /// Ordering of pass manager types is important here. enum PassManagerType { PMT_Unknown = 0, - PMT_ModulePassManager = 1, ///< MPPassManager + PMT_ModulePassManager = 1, ///< MPPassManager PMT_CallGraphPassManager, ///< CGPassManager PMT_FunctionPassManager, ///< FPPassManager PMT_LoopPassManager, ///< LPPassManager @@ -84,14 +84,14 @@ class Pass { PassKind Kind; void operator=(const Pass&); // DO NOT IMPLEMENT Pass(const Pass &); // DO NOT IMPLEMENT - + public: explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { } virtual ~Pass(); - + PassKind getPassKind() const { return Kind; } - + /// getPassName - Return a nice clean name for a pass. This usually /// implemented in terms of the name that is registered by one of the /// Registration templates, but can be overloaded directly. @@ -119,12 +119,12 @@ public: const std::string &Banner) const = 0; /// Each pass is responsible for assigning a pass manager to itself. - /// PMS is the stack of available pass manager. - virtual void assignPassManager(PMStack &, + /// PMS is the stack of available pass manager. + virtual void assignPassManager(PMStack &, PassManagerType) {} /// Check if available pass managers are suitable for this pass or not. virtual void preparePassManager(PMStack &); - + /// Return what kind of Pass Manager can manage this pass. virtual PassManagerType getPotentialPassManagerType() const; @@ -159,9 +159,9 @@ public: virtual void *getAdjustedAnalysisPointer(AnalysisID ID); virtual ImmutablePass *getAsImmutablePass(); virtual PMDataManager *getAsPMDataManager(); - + /// verifyAnalysis() - This member can be implemented by a analysis pass to - /// check state of analysis information. + /// check state of analysis information. virtual void verifyAnalysis() const; // dumpPassStructure - Implement the -debug-passes=PassStructure option @@ -175,6 +175,10 @@ public: // argument string, or null if it is not known. static const PassInfo *lookupPassInfo(StringRef Arg); + // createPass - Create a object for the specified pass class, + // or null if it is not known. + static Pass *createPass(AnalysisID ID); + /// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to /// get analysis information that might be around, for example to update it. /// This is different than getAnalysis in that it can fail (if the analysis @@ -226,7 +230,7 @@ public: /// being operated on. virtual bool runOnModule(Module &M) = 0; - virtual void assignPassManager(PMStack &PMS, + virtual void assignPassManager(PMStack &PMS, PassManagerType T); /// Return what kind of Pass Manager can manage this pass. @@ -259,9 +263,9 @@ public: /// bool runOnModule(Module &) { return false; } - explicit ImmutablePass(char &pid) + explicit ImmutablePass(char &pid) : ModulePass(pid) {} - + // Force out-of-line virtual method. virtual ~ImmutablePass(); }; @@ -286,7 +290,7 @@ public: /// any necessary per-module initialization. /// virtual bool doInitialization(Module &); - + /// runOnFunction - Virtual method overriden by subclasses to do the /// per-function processing of the pass. /// @@ -297,7 +301,7 @@ public: /// virtual bool doFinalization(Module &); - virtual void assignPassManager(PMStack &PMS, + virtual void assignPassManager(PMStack &PMS, PassManagerType T); /// Return what kind of Pass Manager can manage this pass. @@ -348,7 +352,7 @@ public: /// virtual bool doFinalization(Module &); - virtual void assignPassManager(PMStack &PMS, + virtual void assignPassManager(PMStack &PMS, PassManagerType T); /// Return what kind of Pass Manager can manage this pass. diff --git a/include/llvm/PassManager.h b/include/llvm/PassManager.h index c8b5dca..ce5fda7 100644 --- a/include/llvm/PassManager.h +++ b/include/llvm/PassManager.h @@ -53,17 +53,13 @@ public: /// will be destroyed as well, so there is no need to delete the pass. This /// implies that all passes MUST be allocated with 'new'. void add(Pass *P); - + /// run - Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the module, and if so, return true. bool run(Module &M); private: - /// addImpl - Add a pass to the queue of passes to run, without - /// checking whether to add a printer pass. - void addImpl(Pass *P); - - /// PassManagerImpl_New is the actual class. PassManager is just the + /// PassManagerImpl_New is the actual class. PassManager is just the /// wraper to publish simple pass manager interface PassManagerImpl *PM; }; @@ -75,11 +71,11 @@ public: /// but does not take ownership of, the specified Module. explicit FunctionPassManager(Module *M); ~FunctionPassManager(); - + /// add - Add a pass to the queue of passes to run. This passes /// ownership of the Pass to the PassManager. When the /// PassManager_X is destroyed, the pass will be destroyed as well, so - /// there is no need to delete the pass. (TODO delete passes.) + /// there is no need to delete the pass. /// This implies that all passes MUST be allocated with 'new'. void add(Pass *P); @@ -88,20 +84,16 @@ public: /// so, return true. /// bool run(Function &F); - + /// doInitialization - Run all of the initializers for the function passes. /// bool doInitialization(); - + /// doFinalization - Run all of the finalizers for the function passes. /// bool doFinalization(); - -private: - /// addImpl - Add a pass to the queue of passes to run, without - /// checking whether to add a printer pass. - void addImpl(Pass *P); +private: FunctionPassManagerImpl *FPM; Module *M; }; diff --git a/include/llvm/PassManagers.h b/include/llvm/PassManagers.h index c05347d..fa29f50 100644 --- a/include/llvm/PassManagers.h +++ b/include/llvm/PassManagers.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file declares the LLVM Pass Manager infrastructure. +// This file declares the LLVM Pass Manager infrastructure. // //===----------------------------------------------------------------------===// @@ -24,11 +24,11 @@ //===----------------------------------------------------------------------===// // Overview: // The Pass Manager Infrastructure manages passes. It's responsibilities are: -// +// // o Manage optimization pass execution order // o Make required Analysis information available before pass P is run // o Release memory occupied by dead passes -// o If Analysis information is dirtied by a pass then regenerate Analysis +// o If Analysis information is dirtied by a pass then regenerate Analysis // information before it is consumed by another pass. // // Pass Manager Infrastructure uses multiple pass managers. They are @@ -43,13 +43,13 @@ // // [o] class PMTopLevelManager; // -// Two top level managers, PassManager and FunctionPassManager, derive from -// PMTopLevelManager. PMTopLevelManager manages information used by top level +// Two top level managers, PassManager and FunctionPassManager, derive from +// PMTopLevelManager. PMTopLevelManager manages information used by top level // managers such as last user info. // // [o] class PMDataManager; // -// PMDataManager manages information, e.g. list of available analysis info, +// PMDataManager manages information, e.g. list of available analysis info, // used by a pass manager to manage execution order of passes. It also provides // a place to implement common pass manager APIs. All pass managers derive from // PMDataManager. @@ -82,7 +82,7 @@ // relies on PassManagerImpl to do all the tasks. // // [o] class PassManagerImpl : public Pass, public PMDataManager, -// public PMDTopLevelManager +// public PMTopLevelManager // // PassManagerImpl is a top level pass manager responsible for managing // MPPassManagers. @@ -109,7 +109,7 @@ enum PassDebuggingString { ON_REGION_MSG, // " 'on Region ...\n'" ON_LOOP_MSG, // " 'on Loop ...\n'" ON_CG_MSG // "' on Call Graph ...\n'" -}; +}; /// PassManagerPrettyStackEntry - This is used to print informative information /// about what pass is running when/if a stack trace is generated. @@ -124,19 +124,19 @@ public: : P(p), V(&v), M(0) {} // When P is run on V PassManagerPrettyStackEntry(Pass *p, Module &m) : P(p), V(0), M(&m) {} // When P is run on M - + /// print - Emit information about this stack frame to OS. virtual void print(raw_ostream &OS) const; }; - - + + //===----------------------------------------------------------------------===// // PMStack // /// PMStack - This class implements a stack data structure of PMDataManager /// pointers. /// -/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers +/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers /// using PMStack. Each Pass implements assignPassManager() to connect itself /// with appropriate manager. assignPassManager() walks PMStack to find /// suitable manager. @@ -174,9 +174,8 @@ protected: void initializeAllAnalysisInfo(); private: - /// This is implemented by top level pass manager and used by - /// schedulePass() to add analysis info passes that are not available. - virtual void addTopLevelPass(Pass *P) = 0; + virtual PMDataManager *getAsPMDataManager() = 0; + virtual PassManagerType getTopLevelPassManagerType() = 0; public: /// Schedule pass P for execution. Make sure that passes required by @@ -198,7 +197,7 @@ public: /// Find analysis usage information for the pass P. AnalysisUsage *findAnalysisUsage(Pass *P); - virtual ~PMTopLevelManager(); + virtual ~PMTopLevelManager(); /// Add immutable pass and initialize it. inline void addImmutablePass(ImmutablePass *P) { @@ -228,7 +227,7 @@ public: PMStack activeStack; protected: - + /// Collection of pass managers SmallVector<PMDataManager *, 8> PassManagers; @@ -254,7 +253,7 @@ private: }; - + //===----------------------------------------------------------------------===// // PMDataManager @@ -268,7 +267,7 @@ public: } virtual ~PMDataManager(); - + virtual Pass *getAsPass() = 0; /// Augment AvailableAnalysis by adding analysis made available by pass P. @@ -279,16 +278,16 @@ public: /// Remove Analysis that is not preserved by the pass void removeNotPreservedAnalysis(Pass *P); - + /// Remove dead passes used by P. - void removeDeadPasses(Pass *P, StringRef Msg, + void removeDeadPasses(Pass *P, StringRef Msg, enum PassDebuggingString); /// Remove P. - void freePass(Pass *P, StringRef Msg, + void freePass(Pass *P, StringRef Msg, enum PassDebuggingString); - /// Add pass P into the PassVector. Update + /// Add pass P into the PassVector. Update /// AvailableAnalysis appropriately if ProcessAnalysis is true. void add(Pass *P, bool ProcessAnalysis = true); @@ -300,7 +299,7 @@ public: virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F); /// Initialize available analysis information. - void initializeAnalysisInfo() { + void initializeAnalysisInfo() { AvailableAnalysis.clear(); for (unsigned i = 0; i < PMT_Last; ++i) InheritedAnalysis[i] = NULL; @@ -347,9 +346,9 @@ public: return (unsigned)PassVector.size(); } - virtual PassManagerType getPassManagerType() const { + virtual PassManagerType getPassManagerType() const { assert ( 0 && "Invalid use of getPassManagerType"); - return PMT_Unknown; + return PMT_Unknown; } std::map<AnalysisID, Pass*> *getAvailableAnalysis() { @@ -377,17 +376,17 @@ protected: // then PMT_Last active pass mangers. std::map<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last]; - + /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions /// or higher is specified. bool isPassDebuggingExecutionsOrMore() const; - + private: void dumpAnalysisUsage(StringRef Msg, const Pass *P, const AnalysisUsage::VectorType &Set) const; - // Set of available Analysis. This information is used while scheduling - // pass. If a pass requires an analysis which is not available then + // Set of available Analysis. This information is used while scheduling + // pass. If a pass requires an analysis which is not available then // the required analysis pass is scheduled to run before the pass itself is // scheduled to run. std::map<AnalysisID, Pass*> AvailableAnalysis; @@ -403,27 +402,27 @@ private: // FPPassManager // /// FPPassManager manages BBPassManagers and FunctionPasses. -/// It batches all function passes and basic block pass managers together and -/// sequence them to process one function at a time before processing next +/// It batches all function passes and basic block pass managers together and +/// sequence them to process one function at a time before processing next /// function. class FPPassManager : public ModulePass, public PMDataManager { public: static char ID; - explicit FPPassManager() + explicit FPPassManager() : ModulePass(ID), PMDataManager() { } - + /// run - Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the module, and if so, return true. bool runOnFunction(Function &F); bool runOnModule(Module &M); - + /// cleanup - After running all passes, clean up pass manager cache. void cleanup(); /// doInitialization - Run all of the initializers for the function passes. /// bool doInitialization(Module &M); - + /// doFinalization - Run all of the finalizers for the function passes. /// bool doFinalization(Module &M); @@ -449,8 +448,8 @@ public: return FP; } - virtual PassManagerType getPassManagerType() const { - return PMT_FunctionPassManager; + virtual PassManagerType getPassManagerType() const { + return PMT_FunctionPassManager; } }; diff --git a/include/llvm/Support/CFG.h b/include/llvm/Support/CFG.h index 6e354f9..f5dc8ea 100644 --- a/include/llvm/Support/CFG.h +++ b/include/llvm/Support/CFG.h @@ -71,6 +71,12 @@ public: unsigned getOperandNo() const { return It.getOperandNo(); } + + /// getUse - Return the operand Use in the predecessor's terminator + /// of the successor. + Use &getUse() const { + return It.getUse(); + } }; typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator; diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h index a2990e4..c212d2d 100644 --- a/include/llvm/Support/CommandLine.h +++ b/include/llvm/Support/CommandLine.h @@ -40,7 +40,7 @@ namespace cl { //===----------------------------------------------------------------------===// // ParseCommandLineOptions - Command line option processing entry point. // -void ParseCommandLineOptions(int argc, char **argv, +void ParseCommandLineOptions(int argc, const char * const *argv, const char *Overview = 0, bool ReadResponseFiles = false); @@ -83,10 +83,10 @@ void MarkOptionsChanged(); // enum NumOccurrencesFlag { // Flags for the number of occurrences allowed - Optional = 0x01, // Zero or One occurrence - ZeroOrMore = 0x02, // Zero or more occurrences allowed - Required = 0x03, // One occurrence required - OneOrMore = 0x04, // One or more occurrences required + Optional = 0x00, // Zero or One occurrence + ZeroOrMore = 0x01, // Zero or more occurrences allowed + Required = 0x02, // One occurrence required + OneOrMore = 0x03, // One or more occurrences required // ConsumeAfter - Indicates that this option is fed anything that follows the // last positional argument required by the application (it is an error if @@ -95,23 +95,20 @@ enum NumOccurrencesFlag { // Flags for the number of occurrences allowed // found. Once a filename is found, all of the succeeding arguments are // passed, unprocessed, to the ConsumeAfter option. // - ConsumeAfter = 0x05, - - OccurrencesMask = 0x07 + ConsumeAfter = 0x04 }; enum ValueExpected { // Is a value required for the option? - ValueOptional = 0x08, // The value can appear... or not - ValueRequired = 0x10, // The value is required to appear! - ValueDisallowed = 0x18, // A value may not be specified (for flags) - ValueMask = 0x18 + // zero reserved for the unspecified value + ValueOptional = 0x01, // The value can appear... or not + ValueRequired = 0x02, // The value is required to appear! + ValueDisallowed = 0x03 // A value may not be specified (for flags) }; enum OptionHidden { // Control whether -help shows this option - NotHidden = 0x20, // Option included in -help & -help-hidden - Hidden = 0x40, // -help doesn't, but -help-hidden does - ReallyHidden = 0x60, // Neither -help nor -help-hidden show this arg - HiddenMask = 0x60 + NotHidden = 0x00, // Option included in -help & -help-hidden + Hidden = 0x01, // -help doesn't, but -help-hidden does + ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg }; // Formatting flags - This controls special features that the option might have @@ -130,18 +127,16 @@ enum OptionHidden { // Control whether -help shows this option // enum FormattingFlags { - NormalFormatting = 0x000, // Nothing special - Positional = 0x080, // Is a positional argument, no '-' required - Prefix = 0x100, // Can this option directly prefix its value? - Grouping = 0x180, // Can this option group with other options? - FormattingMask = 0x180 // Union of the above flags. + NormalFormatting = 0x00, // Nothing special + Positional = 0x01, // Is a positional argument, no '-' required + Prefix = 0x02, // Can this option directly prefix its value? + Grouping = 0x03 // Can this option group with other options? }; enum MiscFlags { // Miscellaneous flags to adjust argument - CommaSeparated = 0x200, // Should this cl::list split between commas? - PositionalEatsArgs = 0x400, // Should this positional cl::list eat -args? - Sink = 0x800, // Should this cl::list eat all unknown options? - MiscMask = 0xE00 // Union of the above flags. + CommaSeparated = 0x01, // Should this cl::list split between commas? + PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args? + Sink = 0x04 // Should this cl::list eat all unknown options? }; @@ -168,7 +163,15 @@ class Option { virtual void anchor(); int NumOccurrences; // The number of times specified - int Flags; // Flags for the argument + // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid + // problems with signed enums in bitfields. + unsigned Occurrences : 3; // enum NumOccurrencesFlag + // not using the enum type for 'Value' because zero is an implementation + // detail representing the non-value + unsigned Value : 2; + unsigned HiddenFlag : 2; // enum OptionHidden + unsigned Formatting : 2; // enum FormattingFlags + unsigned Misc : 3; unsigned Position; // Position of last occurrence of the option unsigned AdditionalVals;// Greater than 0 for multi-valued option. Option *NextRegistered; // Singly linked list of registered options. @@ -178,21 +181,20 @@ public: const char *ValueStr; // String describing what the value of this option is inline enum NumOccurrencesFlag getNumOccurrencesFlag() const { - return static_cast<enum NumOccurrencesFlag>(Flags & OccurrencesMask); + return (enum NumOccurrencesFlag)Occurrences; } inline enum ValueExpected getValueExpectedFlag() const { - int VE = Flags & ValueMask; - return VE ? static_cast<enum ValueExpected>(VE) + return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault(); } inline enum OptionHidden getOptionHiddenFlag() const { - return static_cast<enum OptionHidden>(Flags & HiddenMask); + return (enum OptionHidden)HiddenFlag; } inline enum FormattingFlags getFormattingFlag() const { - return static_cast<enum FormattingFlags>(Flags & FormattingMask); + return (enum FormattingFlags)Formatting; } inline unsigned getMiscFlags() const { - return Flags & MiscMask; + return Misc; } inline unsigned getPosition() const { return Position; } inline unsigned getNumAdditionalVals() const { return AdditionalVals; } @@ -206,27 +208,21 @@ public: void setArgStr(const char *S) { ArgStr = S; } void setDescription(const char *S) { HelpStr = S; } void setValueStr(const char *S) { ValueStr = S; } - - void setFlag(unsigned Flag, unsigned FlagMask) { - Flags &= ~FlagMask; - Flags |= Flag; - } - void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) { - setFlag(Val, OccurrencesMask); + Occurrences = Val; } - void setValueExpectedFlag(enum ValueExpected Val) { setFlag(Val, ValueMask); } - void setHiddenFlag(enum OptionHidden Val) { setFlag(Val, HiddenMask); } - void setFormattingFlag(enum FormattingFlags V) { setFlag(V, FormattingMask); } - void setMiscFlag(enum MiscFlags M) { setFlag(M, M); } + void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; } + void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; } + void setFormattingFlag(enum FormattingFlags V) { Formatting = V; } + void setMiscFlag(enum MiscFlags M) { Misc |= M; } void setPosition(unsigned pos) { Position = pos; } protected: - explicit Option(unsigned DefaultFlags) - : NumOccurrences(0), Flags(DefaultFlags | NormalFormatting), Position(0), + explicit Option(enum NumOccurrencesFlag Occurrences, + enum OptionHidden Hidden) + : NumOccurrences(0), Occurrences(Occurrences), HiddenFlag(Hidden), + Formatting(NormalFormatting), Position(0), AdditionalVals(0), NextRegistered(0), ArgStr(""), HelpStr(""), ValueStr("") { - assert(getNumOccurrencesFlag() != 0 && - getOptionHiddenFlag() != 0 && "Not all default flags specified!"); } inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; } @@ -341,7 +337,7 @@ struct OptionValueBase : public GenericOptionValue { bool hasValue() const { return false; } - const DataType &getValue() const { assert(false && "no default value"); } + const DataType &getValue() const { llvm_unreachable("no default value"); } // Some options may take their value from a different data type. template<class DT> @@ -1177,14 +1173,14 @@ public: // One option... template<class M0t> - explicit opt(const M0t &M0) : Option(Optional | NotHidden) { + explicit opt(const M0t &M0) : Option(Optional, NotHidden) { apply(M0, this); done(); } // Two options... template<class M0t, class M1t> - opt(const M0t &M0, const M1t &M1) : Option(Optional | NotHidden) { + opt(const M0t &M0, const M1t &M1) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); done(); } @@ -1192,21 +1188,21 @@ public: // Three options... template<class M0t, class M1t, class M2t> opt(const M0t &M0, const M1t &M1, - const M2t &M2) : Option(Optional | NotHidden) { + const M2t &M2) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); done(); } // Four options... template<class M0t, class M1t, class M2t, class M3t> opt(const M0t &M0, const M1t &M1, const M2t &M2, - const M3t &M3) : Option(Optional | NotHidden) { + const M3t &M3) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); done(); } // Five options... template<class M0t, class M1t, class M2t, class M3t, class M4t> opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4) : Option(Optional | NotHidden) { + const M4t &M4) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); done(); @@ -1215,7 +1211,7 @@ public: template<class M0t, class M1t, class M2t, class M3t, class M4t, class M5t> opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4, const M5t &M5) : Option(Optional | NotHidden) { + const M4t &M4, const M5t &M5) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); done(); @@ -1225,7 +1221,7 @@ public: class M4t, class M5t, class M6t> opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, - const M6t &M6) : Option(Optional | NotHidden) { + const M6t &M6) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); done(); @@ -1235,7 +1231,7 @@ public: class M4t, class M5t, class M6t, class M7t> opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, const M6t &M6, - const M7t &M7) : Option(Optional | NotHidden) { + const M7t &M7) : Option(Optional, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); done(); @@ -1344,34 +1340,34 @@ public: // One option... template<class M0t> - explicit list(const M0t &M0) : Option(ZeroOrMore | NotHidden) { + explicit list(const M0t &M0) : Option(ZeroOrMore, NotHidden) { apply(M0, this); done(); } // Two options... template<class M0t, class M1t> - list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) { + list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); done(); } // Three options... template<class M0t, class M1t, class M2t> list(const M0t &M0, const M1t &M1, const M2t &M2) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); done(); } // Four options... template<class M0t, class M1t, class M2t, class M3t> list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); done(); } // Five options... template<class M0t, class M1t, class M2t, class M3t, class M4t> list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4) : Option(ZeroOrMore | NotHidden) { + const M4t &M4) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); done(); @@ -1380,7 +1376,7 @@ public: template<class M0t, class M1t, class M2t, class M3t, class M4t, class M5t> list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) { + const M4t &M4, const M5t &M5) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); done(); @@ -1390,7 +1386,7 @@ public: class M4t, class M5t, class M6t> list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, const M6t &M6) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); done(); @@ -1400,7 +1396,7 @@ public: class M4t, class M5t, class M6t, class M7t> list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, const M6t &M6, - const M7t &M7) : Option(ZeroOrMore | NotHidden) { + const M7t &M7) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); done(); @@ -1542,34 +1538,34 @@ public: // One option... template<class M0t> - explicit bits(const M0t &M0) : Option(ZeroOrMore | NotHidden) { + explicit bits(const M0t &M0) : Option(ZeroOrMore, NotHidden) { apply(M0, this); done(); } // Two options... template<class M0t, class M1t> - bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) { + bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); done(); } // Three options... template<class M0t, class M1t, class M2t> bits(const M0t &M0, const M1t &M1, const M2t &M2) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); done(); } // Four options... template<class M0t, class M1t, class M2t, class M3t> bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); done(); } // Five options... template<class M0t, class M1t, class M2t, class M3t, class M4t> bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4) : Option(ZeroOrMore | NotHidden) { + const M4t &M4) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); done(); @@ -1578,7 +1574,7 @@ public: template<class M0t, class M1t, class M2t, class M3t, class M4t, class M5t> bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, - const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) { + const M4t &M4, const M5t &M5) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); done(); @@ -1588,7 +1584,7 @@ public: class M4t, class M5t, class M6t> bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, const M6t &M6) - : Option(ZeroOrMore | NotHidden) { + : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); done(); @@ -1598,7 +1594,7 @@ public: class M4t, class M5t, class M6t, class M7t> bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3, const M4t &M4, const M5t &M5, const M6t &M6, - const M7t &M7) : Option(ZeroOrMore | NotHidden) { + const M7t &M7) : Option(ZeroOrMore, NotHidden) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this); done(); @@ -1638,27 +1634,27 @@ public: // One option... template<class M0t> - explicit alias(const M0t &M0) : Option(Optional | Hidden), AliasFor(0) { + explicit alias(const M0t &M0) : Option(Optional, Hidden), AliasFor(0) { apply(M0, this); done(); } // Two options... template<class M0t, class M1t> - alias(const M0t &M0, const M1t &M1) : Option(Optional | Hidden), AliasFor(0) { + alias(const M0t &M0, const M1t &M1) : Option(Optional, Hidden), AliasFor(0) { apply(M0, this); apply(M1, this); done(); } // Three options... template<class M0t, class M1t, class M2t> alias(const M0t &M0, const M1t &M1, const M2t &M2) - : Option(Optional | Hidden), AliasFor(0) { + : Option(Optional, Hidden), AliasFor(0) { apply(M0, this); apply(M1, this); apply(M2, this); done(); } // Four options... template<class M0t, class M1t, class M2t, class M3t> alias(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3) - : Option(Optional | Hidden), AliasFor(0) { + : Option(Optional, Hidden), AliasFor(0) { apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this); done(); } diff --git a/include/llvm/Support/DataStream.h b/include/llvm/Support/DataStream.h new file mode 100644 index 0000000..fedb0c9 --- /dev/null +++ b/include/llvm/Support/DataStream.h @@ -0,0 +1,38 @@ +//===---- llvm/Support/DataStream.h - Lazy bitcode streaming ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header defines DataStreamer, which fetches bytes of data from +// a stream source. It provides support for streaming (lazy reading) of +// data, e.g. bitcode +// +//===----------------------------------------------------------------------===// + + +#ifndef LLVM_SUPPORT_DATASTREAM_H_ +#define LLVM_SUPPORT_DATASTREAM_H_ + +#include <string> + +namespace llvm { + +class DataStreamer { +public: + /// Fetch bytes [start-end) from the stream, and write them to the + /// buffer pointed to by buf. Returns the number of bytes actually written. + virtual size_t GetBytes(unsigned char *buf, size_t len) = 0; + + virtual ~DataStreamer(); +}; + +DataStreamer *getDataFileStreamer(const std::string &Filename, + std::string *Err); + +} + +#endif // LLVM_SUPPORT_DATASTREAM_H_ diff --git a/include/llvm/Support/Dwarf.h b/include/llvm/Support/Dwarf.h index 357f555a..e57fbf7 100644 --- a/include/llvm/Support/Dwarf.h +++ b/include/llvm/Support/Dwarf.h @@ -22,7 +22,8 @@ namespace llvm { // Debug info constants. enum { - LLVMDebugVersion = (11 << 16), // Current version of debug information. + LLVMDebugVersion = (12 << 16), // Current version of debug information. + LLVMDebugVersion11 = (11 << 16), // Constant for version 11. LLVMDebugVersion10 = (10 << 16), // Constant for version 10. LLVMDebugVersion9 = (9 << 16), // Constant for version 9. LLVMDebugVersion8 = (8 << 16), // Constant for version 8. @@ -130,6 +131,7 @@ enum dwarf_constants { DW_TAG_GNU_template_parameter_pack = 0x4107, DW_TAG_GNU_formal_parameter_pack = 0x4108, DW_TAG_lo_user = 0x4080, + DW_TAG_APPLE_Property = 0x4200, DW_TAG_hi_user = 0xffff, // Children flag @@ -269,6 +271,7 @@ enum dwarf_constants { DW_AT_APPLE_property_setter = 0x3fea, DW_AT_APPLE_property_attribute = 0x3feb, DW_AT_APPLE_objc_complete_type = 0x3fec, + DW_AT_APPLE_property = 0x3fed, // Attribute form encodings DW_FORM_addr = 0x01, diff --git a/include/llvm/Support/Endian.h b/include/llvm/Support/Endian.h index af1b506..733ab75 100644 --- a/include/llvm/Support/Endian.h +++ b/include/llvm/Support/Endian.h @@ -98,6 +98,9 @@ public: operator value_type() const { return endian::read_le<value_type, unaligned>(Value); } + void operator=(value_type newValue) { + endian::write_le<value_type, unaligned>((void *)&Value, newValue); + } private: uint8_t Value[sizeof(value_type)]; }; @@ -108,6 +111,9 @@ public: operator value_type() const { return endian::read_be<value_type, unaligned>(Value); } + void operator=(value_type newValue) { + endian::write_be<value_type, unaligned>((void *)&Value, newValue); + } private: uint8_t Value[sizeof(value_type)]; }; @@ -118,6 +124,9 @@ public: operator value_type() const { return endian::read_le<value_type, aligned>(&Value); } + void operator=(value_type newValue) { + endian::write_le<value_type, aligned>((void *)&Value, newValue); + } private: value_type Value; }; @@ -128,6 +137,9 @@ public: operator value_type() const { return endian::read_be<value_type, aligned>(&Value); } + void operator=(value_type newValue) { + endian::write_be<value_type, aligned>((void *)&Value, newValue); + } private: value_type Value; }; diff --git a/include/llvm/Support/InstVisitor.h b/include/llvm/Support/InstVisitor.h index a661c4f..4b1f3c9 100644 --- a/include/llvm/Support/InstVisitor.h +++ b/include/llvm/Support/InstVisitor.h @@ -162,7 +162,6 @@ public: RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);} RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);} RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(TerminatorInst);} - RetTy visitUnwindInst(UnwindInst &I) { DELEGATE(TerminatorInst);} RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);} RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);} RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);} diff --git a/include/llvm/Support/JSONParser.h b/include/llvm/Support/JSONParser.h new file mode 100644 index 0000000..11149f1 --- /dev/null +++ b/include/llvm/Support/JSONParser.h @@ -0,0 +1,448 @@ +//===--- JSONParser.h - Simple JSON parser ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a JSON parser. +// +// See http://www.json.org/ for an overview. +// See http://www.ietf.org/rfc/rfc4627.txt for the full standard. +// +// FIXME: Currently this supports a subset of JSON. Specifically, support +// for numbers, booleans and null for values is missing. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_JSON_PARSER_H +#define LLVM_SUPPORT_JSON_PARSER_H + +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SourceMgr.h" + +namespace llvm { + +class JSONContainer; +class JSONString; +class JSONValue; +class JSONKeyValuePair; + +/// \brief Base class for a parsable JSON atom. +/// +/// This class has no semantics other than being a unit of JSON data which can +/// be parsed out of a JSON document. +class JSONAtom { +public: + /// \brief Possible types of JSON objects. + enum Kind { JK_KeyValuePair, JK_Array, JK_Object, JK_String }; + + /// \brief Returns the type of this value. + Kind getKind() const { return MyKind; } + + static bool classof(const JSONAtom *Atom) { return true; } + +protected: + JSONAtom(Kind MyKind) : MyKind(MyKind) {} + +private: + Kind MyKind; +}; + +/// \brief A parser for JSON text. +/// +/// Use an object of JSONParser to iterate over the values of a JSON text. +/// All objects are parsed during the iteration, so you can only iterate once +/// over the JSON text, but the cost of partial iteration is minimized. +/// Create a new JSONParser if you want to iterate multiple times. +class JSONParser { +public: + /// \brief Create a JSONParser for the given input. + /// + /// Parsing is started via parseRoot(). Access to the object returned from + /// parseRoot() will parse the input lazily. + JSONParser(StringRef Input, SourceMgr *SM); + + /// \brief Returns the outermost JSON value (either an array or an object). + /// + /// Can return NULL if the input does not start with an array or an object. + /// The object is not parsed yet - the caller must iterate over the + /// returned object to trigger parsing. + /// + /// A JSONValue can be either a JSONString, JSONObject or JSONArray. + JSONValue *parseRoot(); + + /// \brief Parses the JSON text and returns whether it is valid JSON. + /// + /// In case validate() return false, failed() will return true and + /// getErrorMessage() will return the parsing error. + bool validate(); + + /// \brief Returns true if an error occurs during parsing. + /// + /// If there was an error while parsing an object that was created by + /// iterating over the result of 'parseRoot', 'failed' will return true. + bool failed() const; + +private: + /// \brief These methods manage the implementation details of parsing new JSON + /// atoms. + /// @{ + JSONString *parseString(); + JSONValue *parseValue(); + JSONKeyValuePair *parseKeyValuePair(); + /// @} + + /// \brief Helpers to parse the elements out of both forms of containers. + /// @{ + const JSONAtom *parseElement(JSONAtom::Kind ContainerKind); + StringRef::iterator parseFirstElement(JSONAtom::Kind ContainerKind, + char StartChar, char EndChar, + const JSONAtom *&Element); + StringRef::iterator parseNextElement(JSONAtom::Kind ContainerKind, + char EndChar, + const JSONAtom *&Element); + /// @} + + /// \brief Whitespace parsing. + /// @{ + void nextNonWhitespace(); + bool isWhitespace(); + /// @} + + /// \brief These methods are used for error handling. + /// { + void setExpectedError(StringRef Expected, StringRef Found); + void setExpectedError(StringRef Expected, char Found); + bool errorIfAtEndOfFile(StringRef Message); + bool errorIfNotAt(char C, StringRef Message); + /// } + + /// \brief Skips all elements in the given container. + bool skipContainer(const JSONContainer &Container); + + /// \brief Skips to the next position behind the given JSON atom. + bool skip(const JSONAtom &Atom); + + /// All nodes are allocated by the parser and will be deallocated when the + /// parser is destroyed. + BumpPtrAllocator ValueAllocator; + + /// \brief The original input to the parser. + MemoryBuffer *InputBuffer; + + /// \brief The source manager used for diagnostics and buffer management. + SourceMgr *SM; + + /// \brief The current position in the parse stream. + StringRef::iterator Position; + + /// \brief The end position for fast EOF checks without introducing + /// unnecessary dereferences. + StringRef::iterator End; + + /// \brief If true, an error has occurred. + bool Failed; + + friend class JSONContainer; +}; + + +/// \brief Base class for JSON value objects. +/// +/// This object represents an abstract JSON value. It is the root node behind +/// the group of JSON entities that can represent top-level values in a JSON +/// document. It has no API, and is just a placeholder in the type hierarchy of +/// nodes. +class JSONValue : public JSONAtom { +protected: + JSONValue(Kind MyKind) : JSONAtom(MyKind) {} + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + switch (Atom->getKind()) { + case JK_Array: + case JK_Object: + case JK_String: + return true; + case JK_KeyValuePair: + return false; + } + llvm_unreachable("Invalid JSONAtom kind"); + } + static bool classof(const JSONValue *Value) { return true; } + ///@} +}; + +/// \brief Gives access to the text of a JSON string. +/// +/// FIXME: Implement a method to return the unescaped text. +class JSONString : public JSONValue { +public: + /// \brief Returns the underlying parsed text of the string. + /// + /// This is the unescaped content of the JSON text. + /// See http://www.ietf.org/rfc/rfc4627.txt for details. + StringRef getRawText() const { return RawText; } + +private: + JSONString(StringRef RawText) : JSONValue(JK_String), RawText(RawText) {} + + StringRef RawText; + + friend class JSONParser; + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + return Atom->getKind() == JK_String; + } + static bool classof(const JSONString *String) { return true; } + ///@} +}; + +/// \brief A (key, value) tuple of type (JSONString *, JSONValue *). +/// +/// Note that JSONKeyValuePair is not a JSONValue, it is a bare JSONAtom. +/// JSONKeyValuePairs can be elements of a JSONObject, but not of a JSONArray. +/// They are not viable as top-level values either. +class JSONKeyValuePair : public JSONAtom { +public: + const JSONString * const Key; + const JSONValue * const Value; + +private: + JSONKeyValuePair(const JSONString *Key, const JSONValue *Value) + : JSONAtom(JK_KeyValuePair), Key(Key), Value(Value) {} + + friend class JSONParser; + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + return Atom->getKind() == JK_KeyValuePair; + } + static bool classof(const JSONKeyValuePair *KeyValuePair) { return true; } + ///@} +}; + +/// \brief Implementation of JSON containers (arrays and objects). +/// +/// JSONContainers drive the lazy parsing of JSON arrays and objects via +/// forward iterators. +class JSONContainer : public JSONValue { +private: + /// \brief An iterator that parses the underlying container during iteration. + /// + /// Iterators on the same collection use shared state, so when multiple copies + /// of an iterator exist, only one is allowed to be used for iteration; + /// iterating multiple copies of an iterator of the same collection will lead + /// to undefined behavior. + class AtomIterator { + public: + AtomIterator(const AtomIterator &I) : Container(I.Container) {} + + /// \brief Iterator interface. + ///@{ + bool operator==(const AtomIterator &I) const { + if (isEnd() || I.isEnd()) + return isEnd() == I.isEnd(); + return Container->Position == I.Container->Position; + } + bool operator!=(const AtomIterator &I) const { + return !(*this == I); + } + AtomIterator &operator++() { + Container->parseNextElement(); + return *this; + } + const JSONAtom *operator*() { + return Container->Current; + } + ///@} + + private: + /// \brief Create an iterator for which 'isEnd' returns true. + AtomIterator() : Container(0) {} + + /// \brief Create an iterator for the given container. + AtomIterator(const JSONContainer *Container) : Container(Container) {} + + bool isEnd() const { + return Container == 0 || Container->Position == StringRef::iterator(); + } + + const JSONContainer * const Container; + + friend class JSONContainer; + }; + +protected: + /// \brief An iterator for the specified AtomT. + /// + /// Used for the implementation of iterators for JSONArray and JSONObject. + template <typename AtomT> + class IteratorTemplate : public std::iterator<std::forward_iterator_tag, + const AtomT*> { + public: + explicit IteratorTemplate(const AtomIterator& AtomI) + : AtomI(AtomI) {} + + bool operator==(const IteratorTemplate &I) const { + return AtomI == I.AtomI; + } + bool operator!=(const IteratorTemplate &I) const { return !(*this == I); } + + IteratorTemplate &operator++() { + ++AtomI; + return *this; + } + + const AtomT *operator*() { return dyn_cast<AtomT>(*AtomI); } + + private: + AtomIterator AtomI; + }; + + JSONContainer(JSONParser *Parser, char StartChar, char EndChar, + JSONAtom::Kind ContainerKind) + : JSONValue(ContainerKind), Parser(Parser), + Position(), Current(0), Started(false), + StartChar(StartChar), EndChar(EndChar) {} + + /// \brief Returns a lazy parsing iterator over the container. + /// + /// As the iterator drives the parse stream, begin() must only be called + /// once per container. + AtomIterator atom_begin() const { + if (Started) + report_fatal_error("Cannot parse container twice."); + Started = true; + // Set up the position and current element when we begin iterating over the + // container. + Position = Parser->parseFirstElement(getKind(), StartChar, EndChar, Current); + return AtomIterator(this); + } + AtomIterator atom_end() const { + return AtomIterator(); + } + +private: + AtomIterator atom_current() const { + if (!Started) + return atom_begin(); + + return AtomIterator(this); + } + + /// \brief Parse the next element in the container into the Current element. + /// + /// This routine is called as an iterator into this container walks through + /// its elements. It mutates the container's internal current node to point to + /// the next atom of the container. + void parseNextElement() const { + Parser->skip(*Current); + Position = Parser->parseNextElement(getKind(), EndChar, Current); + } + + // For parsing, JSONContainers call back into the JSONParser. + JSONParser * const Parser; + + // 'Position', 'Current' and 'Started' store the state of the parse stream + // for iterators on the container, they don't change the container's elements + // and are thus marked as mutable. + mutable StringRef::iterator Position; + mutable const JSONAtom *Current; + mutable bool Started; + + const char StartChar; + const char EndChar; + + friend class JSONParser; + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + switch (Atom->getKind()) { + case JK_Array: + case JK_Object: + return true; + case JK_KeyValuePair: + case JK_String: + return false; + } + llvm_unreachable("Invalid JSONAtom kind"); + } + static bool classof(const JSONContainer *Container) { return true; } + ///@} +}; + +/// \brief A simple JSON array. +class JSONArray : public JSONContainer { +public: + typedef IteratorTemplate<JSONValue> const_iterator; + + /// \brief Returns a lazy parsing iterator over the container. + /// + /// As the iterator drives the parse stream, begin() must only be called + /// once per container. + const_iterator begin() const { return const_iterator(atom_begin()); } + const_iterator end() const { return const_iterator(atom_end()); } + +private: + JSONArray(JSONParser *Parser) + : JSONContainer(Parser, '[', ']', JSONAtom::JK_Array) {} + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + return Atom->getKind() == JSONAtom::JK_Array; + } + static bool classof(const JSONArray *Array) { return true; } + ///@} + + friend class JSONParser; +}; + +/// \brief A JSON object: an iterable list of JSON key-value pairs. +class JSONObject : public JSONContainer { +public: + typedef IteratorTemplate<JSONKeyValuePair> const_iterator; + + /// \brief Returns a lazy parsing iterator over the container. + /// + /// As the iterator drives the parse stream, begin() must only be called + /// once per container. + const_iterator begin() const { return const_iterator(atom_begin()); } + const_iterator end() const { return const_iterator(atom_end()); } + +private: + JSONObject(JSONParser *Parser) + : JSONContainer(Parser, '{', '}', JSONAtom::JK_Object) {} + +public: + /// \brief dyn_cast helpers + ///@{ + static bool classof(const JSONAtom *Atom) { + return Atom->getKind() == JSONAtom::JK_Object; + } + static bool classof(const JSONObject *Object) { return true; } + ///@} + + friend class JSONParser; +}; + +} // end namespace llvm + +#endif // LLVM_SUPPORT_JSON_PARSER_H diff --git a/include/llvm/Support/LockFileManager.h b/include/llvm/Support/LockFileManager.h new file mode 100644 index 0000000..e2fa8eb --- /dev/null +++ b/include/llvm/Support/LockFileManager.h @@ -0,0 +1,74 @@ +//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H +#define LLVM_SUPPORT_LOCKFILEMANAGER_H + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/system_error.h" +#include <utility> // for std::pair + +namespace llvm { + +/// \brief Class that manages the creation of a lock file to aid +/// implicit coordination between different processes. +/// +/// The implicit coordination works by creating a ".lock" file alongside +/// the file that we're coordinating for, using the atomicity of the file +/// system to ensure that only a single process can create that ".lock" file. +/// When the lock file is removed, the owning process has finished the +/// operation. +class LockFileManager { +public: + /// \brief Describes the state of a lock file. + enum LockFileState { + /// \brief The lock file has been created and is owned by this instance + /// of the object. + LFS_Owned, + /// \brief The lock file already exists and is owned by some other + /// instance. + LFS_Shared, + /// \brief An error occurred while trying to create or find the lock + /// file. + LFS_Error + }; + +private: + SmallString<128> LockFileName; + SmallString<128> UniqueLockFileName; + + Optional<std::pair<std::string, int> > Owner; + Optional<error_code> Error; + + LockFileManager(const LockFileManager &); + LockFileManager &operator=(const LockFileManager &); + + static Optional<std::pair<std::string, int> > + readLockFile(StringRef LockFileName); + + static bool processStillExecuting(StringRef Hostname, int PID); + +public: + + LockFileManager(StringRef FileName); + ~LockFileManager(); + + /// \brief Determine the state of the lock file. + LockFileState getState() const; + + operator LockFileState() const { return getState(); } + + /// \brief For a shared lock, wait until the owner releases the lock. + void waitForUnlock(); +}; + +} // end namespace llvm + +#endif // LLVM_SUPPORT_LOCKFILEMANAGER_H diff --git a/include/llvm/Support/MemoryObject.h b/include/llvm/Support/MemoryObject.h index dec0f13..b778b08 100644 --- a/include/llvm/Support/MemoryObject.h +++ b/include/llvm/Support/MemoryObject.h @@ -23,19 +23,19 @@ class MemoryObject { public: /// Destructor - Override as necessary. virtual ~MemoryObject(); - + /// getBase - Returns the lowest valid address in the region. /// /// @result - The lowest valid address. virtual uint64_t getBase() const = 0; - + /// getExtent - Returns the size of the region in bytes. (The region is - /// contiguous, so the highest valid address of the region + /// contiguous, so the highest valid address of the region /// is getBase() + getExtent() - 1). /// /// @result - The size of the region. virtual uint64_t getExtent() const = 0; - + /// readByte - Tries to read a single byte from the region. /// /// @param address - The address of the byte, in the same space as getBase(). @@ -43,7 +43,7 @@ public: /// @result - 0 if successful; -1 if not. Failure may be due to a /// bounds violation or an implementation-specific error. virtual int readByte(uint64_t address, uint8_t* ptr) const = 0; - + /// readBytes - Tries to read a contiguous range of bytes from the /// region, up to the end of the region. /// You should override this function if there is a quicker @@ -67,4 +67,3 @@ public: } #endif - diff --git a/include/llvm/Support/PatternMatch.h b/include/llvm/Support/PatternMatch.h index f0fb516..221fa8b 100644 --- a/include/llvm/Support/PatternMatch.h +++ b/include/llvm/Support/PatternMatch.h @@ -31,6 +31,7 @@ #include "llvm/Constants.h" #include "llvm/Instructions.h" +#include "llvm/Operator.h" namespace llvm { namespace PatternMatch { @@ -97,12 +98,19 @@ struct apint_match { Res = &CI->getValue(); return true; } + // FIXME: Remove this. if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) { Res = &CI->getValue(); return true; } + if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V)) + if (ConstantInt *CI = + dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) { + Res = &CI->getValue(); + return true; + } return false; } }; @@ -143,9 +151,13 @@ struct cst_pred_ty : public Predicate { bool match(ITy *V) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) return this->isValue(CI->getValue()); + // FIXME: Remove this. if (const ConstantVector *CV = dyn_cast<ConstantVector>(V)) if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) return this->isValue(CI->getValue()); + if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V)) + if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) + return this->isValue(CI->getValue()); return false; } }; @@ -163,12 +175,22 @@ struct api_pred_ty : public Predicate { Res = &CI->getValue(); return true; } + + // FIXME: remove. if (const ConstantVector *CV = dyn_cast<ConstantVector>(V)) if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) if (this->isValue(CI->getValue())) { Res = &CI->getValue(); return true; } + + if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V)) + if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) + if (this->isValue(CI->getValue())) { + Res = &CI->getValue(); + return true; + } + return false; } }; @@ -441,6 +463,26 @@ m_IDiv(const LHS &L, const RHS &R) { } //===----------------------------------------------------------------------===// +// Class that matches exact binary ops. +// +template<typename SubPattern_t> +struct Exact_match { + SubPattern_t SubPattern; + + Exact_match(const SubPattern_t &SP) : SubPattern(SP) {} + + template<typename OpTy> + bool match(OpTy *V) { + if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V)) + return PEO->isExact() && SubPattern.match(V); + return false; + } +}; + +template<typename T> +inline Exact_match<T> m_Exact(const T &SubPattern) { return SubPattern; } + +//===----------------------------------------------------------------------===// // Matchers for CmpInst classes // @@ -529,10 +571,8 @@ struct CastClass_match { template<typename OpTy> bool match(OpTy *V) { - if (CastInst *I = dyn_cast<CastInst>(V)) - return I->getOpcode() == Opcode && Op.match(I->getOperand(0)); - if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) - return CE->getOpcode() == Opcode && Op.match(CE->getOperand(0)); + if (Operator *O = dyn_cast<Operator>(V)) + return O->getOpcode() == Opcode && Op.match(O->getOperand(0)); return false; } }; @@ -585,21 +625,18 @@ struct not_match { template<typename OpTy> bool match(OpTy *V) { - if (Instruction *I = dyn_cast<Instruction>(V)) - if (I->getOpcode() == Instruction::Xor) - return matchIfNot(I->getOperand(0), I->getOperand(1)); - if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) - if (CE->getOpcode() == Instruction::Xor) - return matchIfNot(CE->getOperand(0), CE->getOperand(1)); + if (Operator *O = dyn_cast<Operator>(V)) + if (O->getOpcode() == Instruction::Xor) + return matchIfNot(O->getOperand(0), O->getOperand(1)); return false; } private: bool matchIfNot(Value *LHS, Value *RHS) { - if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) - return CI->isAllOnesValue() && L.match(LHS); - if (ConstantVector *CV = dyn_cast<ConstantVector>(RHS)) - return CV->isAllOnesValue() && L.match(LHS); - return false; + return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) || + // FIXME: Remove CV. + isa<ConstantVector>(RHS)) && + cast<Constant>(RHS)->isAllOnesValue() && + L.match(LHS); } }; @@ -615,19 +652,16 @@ struct neg_match { template<typename OpTy> bool match(OpTy *V) { - if (Instruction *I = dyn_cast<Instruction>(V)) - if (I->getOpcode() == Instruction::Sub) - return matchIfNeg(I->getOperand(0), I->getOperand(1)); - if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) - if (CE->getOpcode() == Instruction::Sub) - return matchIfNeg(CE->getOperand(0), CE->getOperand(1)); + if (Operator *O = dyn_cast<Operator>(V)) + if (O->getOpcode() == Instruction::Sub) + return matchIfNeg(O->getOperand(0), O->getOperand(1)); return false; } private: bool matchIfNeg(Value *LHS, Value *RHS) { - if (ConstantInt *C = dyn_cast<ConstantInt>(LHS)) - return C->isZero() && L.match(RHS); - return false; + return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) || + isa<ConstantAggregateZero>(LHS)) && + L.match(RHS); } }; @@ -644,12 +678,9 @@ struct fneg_match { template<typename OpTy> bool match(OpTy *V) { - if (Instruction *I = dyn_cast<Instruction>(V)) - if (I->getOpcode() == Instruction::FSub) - return matchIfFNeg(I->getOperand(0), I->getOperand(1)); - if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) - if (CE->getOpcode() == Instruction::FSub) - return matchIfFNeg(CE->getOperand(0), CE->getOperand(1)); + if (Operator *O = dyn_cast<Operator>(V)) + if (O->getOpcode() == Instruction::FSub) + return matchIfFNeg(O->getOperand(0), O->getOperand(1)); return false; } private: diff --git a/include/llvm/Support/Process.h b/include/llvm/Support/Process.h index 27ef267..3379922 100644 --- a/include/llvm/Support/Process.h +++ b/include/llvm/Support/Process.h @@ -138,9 +138,6 @@ namespace sys { /// Resets the terminals colors, or returns an escape sequence to do so. static const char *ResetColor(); - - /// Change the program working directory to that given by \arg Path. - static void SetWorkingDirectory(std::string Path); /// @} }; } diff --git a/include/llvm/Support/Recycler.h b/include/llvm/Support/Recycler.h index d8f8c78..fa6e189 100644 --- a/include/llvm/Support/Recycler.h +++ b/include/llvm/Support/Recycler.h @@ -17,6 +17,7 @@ #include "llvm/ADT/ilist.h" #include "llvm/Support/AlignOf.h" +#include "llvm/Support/ErrorHandling.h" #include <cassert> namespace llvm { @@ -52,7 +53,7 @@ struct ilist_traits<RecyclerStruct> : static void noteHead(RecyclerStruct*, RecyclerStruct*) {} static void deleteNode(RecyclerStruct *) { - assert(0 && "Recycler's ilist_traits shouldn't see a deleteNode call!"); + llvm_unreachable("Recycler's ilist_traits shouldn't see a deleteNode call!"); } }; diff --git a/include/llvm/Support/SaveAndRestore.h b/include/llvm/Support/SaveAndRestore.h new file mode 100644 index 0000000..ffa99b9 --- /dev/null +++ b/include/llvm/Support/SaveAndRestore.h @@ -0,0 +1,47 @@ +//===-- SaveAndRestore.h - Utility -------------------------------*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides utility classes that uses RAII to save and restore +// values. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_SAVERESTORE +#define LLVM_ADT_SAVERESTORE + +namespace llvm { + +// SaveAndRestore - A utility class that uses RAII to save and restore +// the value of a variable. +template<typename T> +struct SaveAndRestore { + SaveAndRestore(T& x) : X(x), old_value(x) {} + SaveAndRestore(T& x, const T &new_value) : X(x), old_value(x) { + X = new_value; + } + ~SaveAndRestore() { X = old_value; } + T get() { return old_value; } +private: + T& X; + T old_value; +}; + +// SaveOr - Similar to SaveAndRestore. Operates only on bools; the old +// value of a variable is saved, and during the dstor the old value is +// or'ed with the new value. +struct SaveOr { + SaveOr(bool& x) : X(x), old_value(x) { x = false; } + ~SaveOr() { X |= old_value; } +private: + bool& X; + const bool old_value; +}; + +} +#endif diff --git a/include/llvm/Support/StreamableMemoryObject.h b/include/llvm/Support/StreamableMemoryObject.h new file mode 100644 index 0000000..531dbb2 --- /dev/null +++ b/include/llvm/Support/StreamableMemoryObject.h @@ -0,0 +1,181 @@ +//===- StreamableMemoryObject.h - Streamable data interface -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +#ifndef STREAMABLEMEMORYOBJECT_H_ +#define STREAMABLEMEMORYOBJECT_H_ + +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/MemoryObject.h" +#include "llvm/Support/DataStream.h" +#include <vector> + +namespace llvm { + +/// StreamableMemoryObject - Interface to data which might be streamed. +/// Streamability has 2 important implications/restrictions. First, the data +/// might not yet exist in memory when the request is made. This just means +/// that readByte/readBytes might have to block or do some work to get it. +/// More significantly, the exact size of the object might not be known until +/// it has all been fetched. This means that to return the right result, +/// getExtent must also wait for all the data to arrive; therefore it should +/// not be called on objects which are actually streamed (this would defeat +/// the purpose of streaming). Instead, isValidAddress and isObjectEnd can be +/// used to test addresses without knowing the exact size of the stream. +/// Finally, getPointer can be used instead of readBytes to avoid extra copying. +class StreamableMemoryObject : public MemoryObject { + public: + /// Destructor - Override as necessary. + virtual ~StreamableMemoryObject(); + + /// getBase - Returns the lowest valid address in the region. + /// + /// @result - The lowest valid address. + virtual uint64_t getBase() const = 0; + + /// getExtent - Returns the size of the region in bytes. (The region is + /// contiguous, so the highest valid address of the region + /// is getBase() + getExtent() - 1). + /// May block until all bytes in the stream have been read + /// + /// @result - The size of the region. + virtual uint64_t getExtent() const = 0; + + /// readByte - Tries to read a single byte from the region. + /// May block until (address - base) bytes have been read + /// @param address - The address of the byte, in the same space as getBase(). + /// @param ptr - A pointer to a byte to be filled in. Must be non-NULL. + /// @result - 0 if successful; -1 if not. Failure may be due to a + /// bounds violation or an implementation-specific error. + virtual int readByte(uint64_t address, uint8_t* ptr) const = 0; + + /// readBytes - Tries to read a contiguous range of bytes from the + /// region, up to the end of the region. + /// May block until (address - base + size) bytes have + /// been read. Additionally, StreamableMemoryObjects will + /// not do partial reads - if size bytes cannot be read, + /// readBytes will fail. + /// + /// @param address - The address of the first byte, in the same space as + /// getBase(). + /// @param size - The maximum number of bytes to copy. + /// @param buf - A pointer to a buffer to be filled in. Must be non-NULL + /// and large enough to hold size bytes. + /// @param copied - A pointer to a nunber that is filled in with the number + /// of bytes actually read. May be NULL. + /// @result - 0 if successful; -1 if not. Failure may be due to a + /// bounds violation or an implementation-specific error. + virtual int readBytes(uint64_t address, + uint64_t size, + uint8_t* buf, + uint64_t* copied) const = 0; + + /// getPointer - Ensures that the requested data is in memory, and returns + /// A pointer to it. More efficient than using readBytes if the + /// data is already in memory. + /// May block until (address - base + size) bytes have been read + /// @param address - address of the byte, in the same space as getBase() + /// @param size - amount of data that must be available on return + /// @result - valid pointer to the requested data + virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const = 0; + + /// isValidAddress - Returns true if the address is within the object + /// (i.e. between base and base + extent - 1 inclusive) + /// May block until (address - base) bytes have been read + /// @param address - address of the byte, in the same space as getBase() + /// @result - true if the address may be read with readByte() + virtual bool isValidAddress(uint64_t address) const = 0; + + /// isObjectEnd - Returns true if the address is one past the end of the + /// object (i.e. if it is equal to base + extent) + /// May block until (address - base) bytes have been read + /// @param address - address of the byte, in the same space as getBase() + /// @result - true if the address is equal to base + extent + virtual bool isObjectEnd(uint64_t address) const = 0; +}; + +/// StreamingMemoryObject - interface to data which is actually streamed from +/// a DataStreamer. In addition to inherited members, it has the +/// dropLeadingBytes and setKnownObjectSize methods which are not applicable +/// to non-streamed objects. +class StreamingMemoryObject : public StreamableMemoryObject { +public: + StreamingMemoryObject(DataStreamer *streamer); + virtual uint64_t getBase() const { return 0; } + virtual uint64_t getExtent() const; + virtual int readByte(uint64_t address, uint8_t* ptr) const; + virtual int readBytes(uint64_t address, + uint64_t size, + uint8_t* buf, + uint64_t* copied) const ; + virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const { + // This could be fixed by ensuring the bytes are fetched and making a copy, + // requiring that the bitcode size be known, or otherwise ensuring that + // the memory doesn't go away/get reallocated, but it's + // not currently necessary. Users that need the pointer don't stream. + assert(0 && "getPointer in streaming memory objects not allowed"); + return NULL; + } + virtual bool isValidAddress(uint64_t address) const; + virtual bool isObjectEnd(uint64_t address) const; + + /// Drop s bytes from the front of the stream, pushing the positions of the + /// remaining bytes down by s. This is used to skip past the bitcode header, + /// since we don't know a priori if it's present, and we can't put bytes + /// back into the stream once we've read them. + bool dropLeadingBytes(size_t s); + + /// If the data object size is known in advance, many of the operations can + /// be made more efficient, so this method should be called before reading + /// starts (although it can be called anytime). + void setKnownObjectSize(size_t size); + +private: + const static uint32_t kChunkSize = 4096 * 4; + mutable std::vector<unsigned char> Bytes; + OwningPtr<DataStreamer> Streamer; + mutable size_t BytesRead; // Bytes read from stream + size_t BytesSkipped;// Bytes skipped at start of stream (e.g. wrapper/header) + mutable size_t ObjectSize; // 0 if unknown, set if wrapper seen or EOF reached + mutable bool EOFReached; + + // Fetch enough bytes such that Pos can be read or EOF is reached + // (i.e. BytesRead > Pos). Return true if Pos can be read. + // Unlike most of the functions in BitcodeReader, returns true on success. + // Most of the requests will be small, but we fetch at kChunkSize bytes + // at a time to avoid making too many potentially expensive GetBytes calls + bool fetchToPos(size_t Pos) const { + if (EOFReached) return Pos < ObjectSize; + while (Pos >= BytesRead) { + Bytes.resize(BytesRead + BytesSkipped + kChunkSize); + size_t bytes = Streamer->GetBytes(&Bytes[BytesRead + BytesSkipped], + kChunkSize); + BytesRead += bytes; + if (bytes < kChunkSize) { + if (ObjectSize && BytesRead < Pos) + assert(0 && "Unexpected short read fetching bitcode"); + if (BytesRead <= Pos) { // reached EOF/ran out of bytes + ObjectSize = BytesRead; + EOFReached = true; + return false; + } + } + } + return true; + } + + StreamingMemoryObject(const StreamingMemoryObject&); // DO NOT IMPLEMENT + void operator=(const StreamingMemoryObject&); // DO NOT IMPLEMENT +}; + +StreamableMemoryObject *getNonStreamedMemoryObject( + const unsigned char *Start, const unsigned char *End); + +} +#endif // STREAMABLEMEMORYOBJECT_H_ diff --git a/include/llvm/Support/TargetRegistry.h b/include/llvm/Support/TargetRegistry.h index ea55c91..93bdbca 100644 --- a/include/llvm/Support/TargetRegistry.h +++ b/include/llvm/Support/TargetRegistry.h @@ -104,6 +104,7 @@ namespace llvm { typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T, unsigned SyntaxVariant, const MCAsmInfo &MAI, + const MCRegisterInfo &MRI, const MCSubtargetInfo &STI); typedef MCCodeEmitter *(*MCCodeEmitterCtorTy)(const MCInstrInfo &II, const MCSubtargetInfo &STI, @@ -392,10 +393,11 @@ namespace llvm { MCInstPrinter *createMCInstPrinter(unsigned SyntaxVariant, const MCAsmInfo &MAI, + const MCRegisterInfo &MRI, const MCSubtargetInfo &STI) const { if (!MCInstPrinterCtorFn) return 0; - return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, STI); + return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, MRI, STI); } @@ -786,7 +788,7 @@ namespace llvm { /// extern "C" void LLVMInitializeFooTargetInfo() { /// RegisterTarget<Triple::foo> X(TheFooTarget, "foo", "Foo description"); /// } - template<Triple::ArchType TargetArchType = Triple::InvalidArch, + template<Triple::ArchType TargetArchType = Triple::UnknownArch, bool HasJIT = false> struct RegisterTarget { RegisterTarget(Target &T, const char *Name, const char *Desc) { diff --git a/include/llvm/Support/system_error.h b/include/llvm/Support/system_error.h index 2c15b69..af81206 100644 --- a/include/llvm/Support/system_error.h +++ b/include/llvm/Support/system_error.h @@ -470,17 +470,6 @@ template <> struct hash<std::error_code>; namespace llvm { -template <class T, T v> -struct integral_constant { - typedef T value_type; - static const value_type value = v; - typedef integral_constant<T,v> type; - operator value_type() { return value; } -}; - -typedef integral_constant<bool, true> true_type; -typedef integral_constant<bool, false> false_type; - // is_error_code_enum template <class Tp> struct is_error_code_enum : public false_type {}; @@ -738,6 +727,10 @@ class error_code { public: error_code() : _val_(0), _cat_(&system_category()) {} + static error_code success() { + return error_code(); + } + error_code(int _val, const error_category& _cat) : _val_(_val), _cat_(&_cat) {} diff --git a/include/llvm/Support/type_traits.h b/include/llvm/Support/type_traits.h index 515295b..85d90b1 100644 --- a/include/llvm/Support/type_traits.h +++ b/include/llvm/Support/type_traits.h @@ -17,6 +17,8 @@ #ifndef LLVM_SUPPORT_TYPE_TRAITS_H #define LLVM_SUPPORT_TYPE_TRAITS_H +#include "llvm/Support/DataTypes.h" +#include <cstddef> #include <utility> // This is actually the conforming implementation which works with abstract @@ -68,17 +70,62 @@ struct isPodLike<std::pair<T, U> > { }; +template <class T, T v> +struct integral_constant { + typedef T value_type; + static const value_type value = v; + typedef integral_constant<T,v> type; + operator value_type() { return value; } +}; + +typedef integral_constant<bool, true> true_type; +typedef integral_constant<bool, false> false_type; + /// \brief Metafunction that determines whether the two given types are /// equivalent. -template<typename T, typename U> -struct is_same { - static const bool value = false; -}; +template<typename T, typename U> struct is_same : public false_type {}; +template<typename T> struct is_same<T, T> : public true_type {}; + +/// \brief Metafunction that removes const qualification from a type. +template <typename T> struct remove_const { typedef T type; }; +template <typename T> struct remove_const<const T> { typedef T type; }; -template<typename T> -struct is_same<T, T> { - static const bool value = true; +/// \brief Metafunction that removes volatile qualification from a type. +template <typename T> struct remove_volatile { typedef T type; }; +template <typename T> struct remove_volatile<volatile T> { typedef T type; }; + +/// \brief Metafunction that removes both const and volatile qualification from +/// a type. +template <typename T> struct remove_cv { + typedef typename remove_const<typename remove_volatile<T>::type>::type type; }; + +/// \brief Helper to implement is_integral metafunction. +template <typename T> struct is_integral_impl : false_type {}; +template <> struct is_integral_impl< bool> : true_type {}; +template <> struct is_integral_impl< char> : true_type {}; +template <> struct is_integral_impl< signed char> : true_type {}; +template <> struct is_integral_impl<unsigned char> : true_type {}; +template <> struct is_integral_impl< wchar_t> : true_type {}; +template <> struct is_integral_impl< short> : true_type {}; +template <> struct is_integral_impl<unsigned short> : true_type {}; +template <> struct is_integral_impl< int> : true_type {}; +template <> struct is_integral_impl<unsigned int> : true_type {}; +template <> struct is_integral_impl< long> : true_type {}; +template <> struct is_integral_impl<unsigned long> : true_type {}; +template <> struct is_integral_impl< long long> : true_type {}; +template <> struct is_integral_impl<unsigned long long> : true_type {}; + +/// \brief Metafunction that determines whether the given type is an integral +/// type. +template <typename T> +struct is_integral : is_integral_impl<T> {}; + +/// \brief Metafunction that determines whether the given type is a pointer +/// type. +template <typename T> struct is_pointer : false_type {}; +template <typename T> struct is_pointer<T*> : true_type {}; + // enable_if_c - Enable/disable a template based on a metafunction template<bool Cond, typename T = void> diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h index 0cf1725..5e68c10 100644 --- a/include/llvm/TableGen/Record.h +++ b/include/llvm/TableGen/Record.h @@ -20,6 +20,7 @@ #include "llvm/Support/Allocator.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <map> @@ -32,7 +33,6 @@ class BitsRecTy; class IntRecTy; class StringRecTy; class ListRecTy; -class CodeRecTy; class DagRecTy; class RecordRecTy; @@ -43,7 +43,6 @@ class BitInit; class BitsInit; class IntInit; class StringInit; -class CodeInit; class ListInit; class UnOpInit; class BinOpInit; @@ -68,6 +67,7 @@ class RecordKeeper; class RecTy { ListRecTy *ListTy; + virtual void anchor(); public: RecTy() : ListTy(0) {} virtual ~RecTy() {} @@ -99,7 +99,6 @@ public: // These methods should only be called from subclasses of Init virtual Init *convertValue( TernOpInit *UI) { return convertValue((TypedInit*)UI); } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -119,7 +118,6 @@ public: // These methods should only be called by subclasses of RecTy. virtual bool baseClassOf(const IntRecTy *RHS) const { return false; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } }; @@ -144,7 +142,6 @@ public: virtual Init *convertValue( IntInit *II); virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return (Init*)VB; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -165,7 +162,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return true; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } @@ -189,7 +185,6 @@ public: virtual Init *convertValue( IntInit *II); virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -212,7 +207,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return true; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } @@ -233,7 +227,6 @@ public: virtual Init *convertValue( IntInit *II) { return (Init*)II; } virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -255,7 +248,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return true; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } @@ -279,7 +271,6 @@ public: virtual Init *convertValue( BinOpInit *BO); virtual Init *convertValue( TernOpInit *BO) { return RecTy::convertValue(BO);} - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -298,7 +289,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return false; } virtual bool baseClassOf(const StringRecTy *RHS) const { return true; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } }; @@ -322,7 +312,6 @@ public: virtual Init *convertValue( IntInit *II) { return 0; } virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI); - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( DagInit *DI) { return 0; } @@ -346,47 +335,6 @@ public: virtual bool baseClassOf(const ListRecTy *RHS) const { return RHS->getElementType()->typeIsConvertibleTo(Ty); } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } - virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } - virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } -}; - -/// CodeRecTy - 'code' - Represent an code fragment, function or method. -/// -class CodeRecTy : public RecTy { - static CodeRecTy Shared; - CodeRecTy() {} -public: - static CodeRecTy *get() { return &Shared; } - - virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; } - virtual Init *convertValue( BitInit *BI) { return 0; } - virtual Init *convertValue( BitsInit *BI) { return 0; } - virtual Init *convertValue( IntInit *II) { return 0; } - virtual Init *convertValue(StringInit *SI) { return 0; } - virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return (Init*)CI; } - virtual Init *convertValue(VarBitInit *VB) { return 0; } - virtual Init *convertValue( DefInit *DI) { return 0; } - virtual Init *convertValue( DagInit *DI) { return 0; } - virtual Init *convertValue( UnOpInit *UI) { return RecTy::convertValue(UI);} - virtual Init *convertValue( BinOpInit *UI) { return RecTy::convertValue(UI);} - virtual Init *convertValue( TernOpInit *UI) { return RecTy::convertValue(UI);} - virtual Init *convertValue( TypedInit *TI); - virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} - virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - - std::string getAsString() const { return "code"; } - - bool typeIsConvertibleTo(const RecTy *RHS) const { - return RHS->baseClassOf(this); - } - virtual bool baseClassOf(const BitRecTy *RHS) const { return false; } - virtual bool baseClassOf(const BitsRecTy *RHS) const { return false; } - virtual bool baseClassOf(const IntRecTy *RHS) const { return false; } - virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } - virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return true; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } }; @@ -405,7 +353,6 @@ public: virtual Init *convertValue( IntInit *II) { return 0; } virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( DefInit *DI) { return 0; } virtual Init *convertValue( UnOpInit *BO); @@ -427,7 +374,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return false; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return true; } virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; } }; @@ -451,7 +397,6 @@ public: virtual Init *convertValue( IntInit *II) { return 0; } virtual Init *convertValue(StringInit *SI) { return 0; } virtual Init *convertValue( ListInit *LI) { return 0; } - virtual Init *convertValue( CodeInit *CI) { return 0; } virtual Init *convertValue(VarBitInit *VB) { return 0; } virtual Init *convertValue( UnOpInit *UI) { return RecTy::convertValue(UI);} virtual Init *convertValue( BinOpInit *UI) { return RecTy::convertValue(UI);} @@ -472,7 +417,6 @@ public: virtual bool baseClassOf(const IntRecTy *RHS) const { return false; } virtual bool baseClassOf(const StringRecTy *RHS) const { return false; } virtual bool baseClassOf(const ListRecTy *RHS) const { return false; } - virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; } virtual bool baseClassOf(const DagRecTy *RHS) const { return false; } virtual bool baseClassOf(const RecordRecTy *RHS) const; }; @@ -489,6 +433,7 @@ RecTy *resolveTypes(RecTy *T1, RecTy *T2); class Init { Init(const Init &); // Do not define. Init &operator=(const Init &); // Do not define. + virtual void anchor(); protected: Init(void) {} @@ -617,6 +562,7 @@ class UnsetInit : public Init { UnsetInit() : Init() {} UnsetInit(const UnsetInit &); // Do not define. UnsetInit &operator=(const UnsetInit &Other); // Do not define. + virtual void anchor(); public: static UnsetInit *get(); @@ -638,6 +584,7 @@ class BitInit : public Init { explicit BitInit(bool V) : Value(V) {} BitInit(const BitInit &Other); // Do not define. BitInit &operator=(BitInit &Other); // Do not define. + virtual void anchor(); public: static BitInit *get(bool V); @@ -725,8 +672,7 @@ public: /// virtual Init *resolveBitReference(Record &R, const RecordVal *RV, unsigned Bit) const { - assert(0 && "Illegal bit reference off int"); - return 0; + llvm_unreachable("Illegal bit reference off int"); } /// resolveListElementReference - This method is used to implement @@ -734,8 +680,7 @@ public: /// now, we return the resolved value, otherwise we return null. virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { - assert(0 && "Illegal element reference off int"); - return 0; + llvm_unreachable("Illegal element reference off int"); } }; @@ -750,9 +695,10 @@ class StringInit : public TypedInit { StringInit(const StringInit &Other); // Do not define. StringInit &operator=(const StringInit &Other); // Do not define. + virtual void anchor(); public: - static StringInit *get(const std::string &V); + static StringInit *get(StringRef); const std::string &getValue() const { return Value; } @@ -769,8 +715,7 @@ public: /// virtual Init *resolveBitReference(Record &R, const RecordVal *RV, unsigned Bit) const { - assert(0 && "Illegal bit reference off string"); - return 0; + llvm_unreachable("Illegal bit reference off string"); } /// resolveListElementReference - This method is used to implement @@ -778,31 +723,8 @@ public: /// now, we return the resolved value, otherwise we return null. virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { - assert(0 && "Illegal element reference off string"); - return 0; - } -}; - -/// CodeInit - "[{...}]" - Represent a code fragment. -/// -class CodeInit : public Init { - std::string Value; - - explicit CodeInit(const std::string &V) : Value(V) {} - - CodeInit(const CodeInit &Other); // Do not define. - CodeInit &operator=(const CodeInit &Other); // Do not define. - -public: - static CodeInit *get(const std::string &V); - - const std::string &getValue() const { return Value; } - - virtual Init *convertInitializerTo(RecTy *Ty) const { - return Ty->convertValue(const_cast<CodeInit *>(this)); + llvm_unreachable("Illegal element reference off string"); } - - virtual std::string getAsString() const { return "[{" + Value + "}]"; } }; /// ListInit - [AL, AH, CL] - Represent a list of defs @@ -861,8 +783,7 @@ public: /// virtual Init *resolveBitReference(Record &R, const RecordVal *RV, unsigned Bit) const { - assert(0 && "Illegal bit reference off list"); - return 0; + llvm_unreachable("Illegal bit reference off list"); } /// resolveListElementReference - This method is used to implement @@ -1207,8 +1128,7 @@ public: /// virtual Init *resolveBitReference(Record &R, const RecordVal *RV, unsigned Bit) const { - assert(0 && "Illegal bit reference off def"); - return 0; + llvm_unreachable("Illegal bit reference off def"); } /// resolveListElementReference - This method is used to implement @@ -1216,8 +1136,7 @@ public: /// now, we return the resolved value, otherwise we return null. virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { - assert(0 && "Illegal element reference off def"); - return 0; + llvm_unreachable("Illegal element reference off def"); } }; @@ -1326,14 +1245,12 @@ public: virtual Init *resolveBitReference(Record &R, const RecordVal *RV, unsigned Bit) const { - assert(0 && "Illegal bit reference off dag"); - return 0; + llvm_unreachable("Illegal bit reference off dag"); } virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { - assert(0 && "Illegal element reference off dag"); - return 0; + llvm_unreachable("Illegal element reference off dag"); } }; @@ -1448,19 +1365,22 @@ public: return isTemplateArg(StringInit::get(Name.str())); } - const RecordVal *getValue(StringRef Name) const { + const RecordVal *getValue(const Init *Name) const { for (unsigned i = 0, e = Values.size(); i != e; ++i) - if (Values[i].getName() == Name) return &Values[i]; + if (Values[i].getNameInit() == Name) return &Values[i]; return 0; } - RecordVal *getValue(StringRef Name) { + const RecordVal *getValue(StringRef Name) const { + return getValue(StringInit::get(Name)); + } + RecordVal *getValue(const Init *Name) { for (unsigned i = 0, e = Values.size(); i != e; ++i) - if (Values[i].getName() == Name) return &Values[i]; + if (Values[i].getNameInit() == Name) return &Values[i]; return 0; } - - const RecordVal *getValue(Init *Name) const; - RecordVal *getValue(Init *Name); + RecordVal *getValue(StringRef Name) { + return getValue(StringInit::get(Name)); + } void addTemplateArg(Init *Name) { assert(!isTemplateArg(Name) && "Template arg already defined!"); @@ -1471,7 +1391,7 @@ public: } void addValue(const RecordVal &RV) { - assert(getValue(RV.getName()) == 0 && "Value already added!"); + assert(getValue(RV.getNameInit()) == 0 && "Value already added!"); Values.push_back(RV); if (Values.size() > 1) // Keep NAME at the end of the list. It makes record dumps a @@ -1488,7 +1408,7 @@ public: Values.erase(Values.begin()+i); return; } - assert(0 && "Cannot remove an entry that does not exist!"); + llvm_unreachable("Cannot remove an entry that does not exist!"); } void removeValue(StringRef Name) { @@ -1598,12 +1518,6 @@ public: /// the value is not the right type. /// DagInit *getValueAsDag(StringRef FieldName) const; - - /// getValueAsCode - This method looks up the specified field and returns - /// its value as the string data in a CodeInit, throwing an exception if the - /// field does not exist or if the value is not a code object. - /// - std::string getValueAsCode(StringRef FieldName) const; }; raw_ostream &operator<<(raw_ostream &OS, const Record &R); @@ -1621,6 +1535,7 @@ struct MultiClass { class RecordKeeper { std::map<std::string, Record*> Classes, Defs; + public: ~RecordKeeper() { for (std::map<std::string, Record*>::iterator I = Classes.begin(), diff --git a/include/llvm/TableGen/TableGenAction.h b/include/llvm/TableGen/TableGenAction.h index 9f1c23c..733ae62 100644 --- a/include/llvm/TableGen/TableGenAction.h +++ b/include/llvm/TableGen/TableGenAction.h @@ -21,6 +21,7 @@ class raw_ostream; class RecordKeeper; class TableGenAction { + virtual void anchor(); public: virtual ~TableGenAction() {} diff --git a/include/llvm/TableGen/TableGenBackend.h b/include/llvm/TableGen/TableGenBackend.h index 853f92e..3ebcd92 100644 --- a/include/llvm/TableGen/TableGenBackend.h +++ b/include/llvm/TableGen/TableGenBackend.h @@ -16,7 +16,6 @@ #define LLVM_TABLEGEN_TABLEGENBACKEND_H #include "llvm/Support/raw_ostream.h" -#include <string> namespace llvm { @@ -24,6 +23,7 @@ class Record; class RecordKeeper; struct TableGenBackend { + virtual void anchor(); virtual ~TableGenBackend() {} // run - All TableGen backends should implement the run method, which should @@ -34,7 +34,7 @@ struct TableGenBackend { public: // Useful helper routines... /// EmitSourceFileHeader - Output a LLVM style file header to the specified /// ostream. - void EmitSourceFileHeader(const std::string &Desc, raw_ostream &OS) const; + void EmitSourceFileHeader(StringRef Desc, raw_ostream &OS) const; }; diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index c9b6a8e..fa1ec55 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -22,8 +22,12 @@ include "llvm/Intrinsics.td" class RegisterClass; // Forward def // SubRegIndex - Use instances of SubRegIndex to identify subregisters. -class SubRegIndex { +class SubRegIndex<list<SubRegIndex> comps = []> { string Namespace = ""; + + // ComposedOf - A list of two SubRegIndex instances, [A, B]. + // This indicates that this SubRegIndex is the result of composing A and B. + list<SubRegIndex> ComposedOf = comps; } // RegAltNameIndex - The alternate name set to use for register operands of @@ -86,6 +90,12 @@ class Register<string n, list<string> altNames = []> { // This is used by the x86-64 and ARM Thumb targets where some registers // require larger instruction encodings. int CostPerUse = 0; + + // CoveredBySubRegs - When this bit is set, the value of this register is + // completely determined by the value of its sub-registers. For example, the + // x86 register AX is covered by its sub-registers AL and AH, but EAX is not + // covered by its sub-register AX. + bit CoveredBySubRegs = 0; } // RegisterWithSubRegs - This can be used to define instances of Register which @@ -194,12 +204,15 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment, // // (decimate GPR, 2) - Pick every N'th element, starting with the first. // +// (interleave A, B, ...) - Interleave the elements from each argument list. +// // All of these operators work on ordered sets, not lists. That means // duplicates are removed from sub-expressions. // Set operators. The rest is defined in TargetSelectionDAG.td. def sequence; def decimate; +def interleave; // RegisterTuples - Automatically generate super-registers by forming tuples of // sub-registers. This is useful for modeling register sequence constraints @@ -712,7 +725,15 @@ class AsmParser { // function of the AsmParser class to call on every matched instruction. // This can be used to perform target specific instruction post-processing. string AsmParserInstCleanup = ""; +} +def DefaultAsmParser : AsmParser; +//===----------------------------------------------------------------------===// +// AsmParserVariant - Subtargets can have multiple different assembly parsers +// (e.g. AT&T vs Intel syntax on X86 for example). This class can be +// implemented by targets to describe such variants. +// +class AsmParserVariant { // Variant - AsmParsers can be of multiple different variants. Variants are // used to support targets that need to parser multiple formats for the // assembly language. @@ -729,7 +750,7 @@ class AsmParser { // purposes of matching. string RegisterPrefix = ""; } -def DefaultAsmParser : AsmParser; +def DefaultAsmParserVariant : AsmParserVariant; /// AssemblerPredicate - This is a Predicate that can be used when the assembler /// matches instructions and aliases. @@ -840,6 +861,10 @@ class Target { // AssemblyParsers - The AsmParser instances available for this target. list<AsmParser> AssemblyParsers = [DefaultAsmParser]; + /// AssemblyParserVariants - The AsmParserVariant instances available for + /// this target. + list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant]; + // AssemblyWriters - The AsmWriter instances available for this target. list<AsmWriter> AssemblyWriters = [DefaultAsmWriter]; } diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td index 6da3ba1..a53ed29 100644 --- a/include/llvm/Target/TargetCallingConv.td +++ b/include/llvm/Target/TargetCallingConv.td @@ -133,3 +133,14 @@ class CCDelegateTo<CallingConv cc> : CCAction { class CallingConv<list<CCAction> actions> { list<CCAction> Actions = actions; } + +/// CalleeSavedRegs - A list of callee saved registers for a given calling +/// convention. The order of registers is used by PrologEpilogInsertion when +/// allocation stack slots for saved registers. +/// +/// For each CalleeSavedRegs def, TableGen will emit a FOO_SaveList array for +/// returning from getCalleeSavedRegs(), and a FOO_RegMask bit mask suitable for +/// returning from getCallPreservedMask(). +class CalleeSavedRegs<dag saves> { + dag SaveList = saves; +} diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index 8409229..d1e380c 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -15,6 +15,7 @@ #define LLVM_TARGET_TARGETINSTRINFO_H #include "llvm/MC/MCInstrInfo.h" +#include "llvm/CodeGen/DFAPacketizer.h" #include "llvm/CodeGen/MachineFunction.h" namespace llvm { @@ -278,8 +279,7 @@ public: /// This is only invoked in cases where AnalyzeBranch returns success. It /// returns the number of instructions that were removed. virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const { - assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!"); - return 0; + llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!"); } /// InsertBranch - Insert branch code into the end of the specified @@ -296,8 +296,7 @@ public: MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const { - assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!"); - return 0; + llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!"); } /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything @@ -353,12 +352,28 @@ public: return false; } + /// isProfitableToUnpredicate - Return true if it's profitable to unpredicate + /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually + /// exclusive predicates. + /// e.g. + /// subeq r0, r1, #1 + /// addne r0, r1, #1 + /// => + /// sub r0, r1, #1 + /// addne r0, r1, #1 + /// + /// This may be profitable is conditional instructions are always executed. + virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const { + return false; + } + /// copyPhysReg - Emit instructions to copy a pair of physical registers. virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!"); + llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); } /// storeRegToStackSlot - Store the specified register of the given register @@ -371,7 +386,8 @@ public: unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!"); + llvm_unreachable("Target didn't implement " + "TargetInstrInfo::storeRegToStackSlot!"); } /// loadRegFromStackSlot - Load the specified register of the given register @@ -383,7 +399,8 @@ public: unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!"); + llvm_unreachable("Target didn't implement " + "TargetInstrInfo::loadRegFromStackSlot!"); } /// expandPostRAPseudo - This function is called for all pseudo instructions @@ -646,7 +663,7 @@ public: virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, - SDNode *UseNode, unsigned UseIdx) const; + SDNode *UseNode, unsigned UseIdx) const = 0; /// getOutputLatency - Compute and return the output dependency latency of a /// a given pair of defs which both target the same register. This is usually @@ -665,7 +682,7 @@ public: unsigned *PredCost = 0) const; virtual int getInstrLatency(const InstrItineraryData *ItinData, - SDNode *Node) const; + SDNode *Node) const = 0; /// isHighLatencyDef - Return true if this opcode has high latency to its /// result. @@ -795,6 +812,12 @@ public: breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {} + /// Create machine specific model for scheduling. + virtual DFAPacketizer* + CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const { + return NULL; + } + private: int CallFrameSetupOpcode, CallFrameDestroyOpcode; }; @@ -839,6 +862,13 @@ public: virtual bool isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const; + using TargetInstrInfo::getOperandLatency; + virtual int getOperandLatency(const InstrItineraryData *ItinData, + SDNode *DefNode, unsigned DefIdx, + SDNode *UseNode, unsigned UseIdx) const; + using TargetInstrInfo::getInstrLatency; + virtual int getInstrLatency(const InstrItineraryData *ItinData, + SDNode *Node) const; bool usePreRAHazardRecognizer() const; diff --git a/include/llvm/Target/TargetJITInfo.h b/include/llvm/Target/TargetJITInfo.h index b198eb6..80b78f8 100644 --- a/include/llvm/Target/TargetJITInfo.h +++ b/include/llvm/Target/TargetJITInfo.h @@ -30,6 +30,7 @@ namespace llvm { /// TargetJITInfo - Target specific information required by the Just-In-Time /// code generator. class TargetJITInfo { + virtual void anchor(); public: virtual ~TargetJITInfo() {} @@ -45,8 +46,8 @@ namespace llvm { /// ptr. virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, JITCodeEmitter &JCE) { - assert(0 && "This target doesn't implement emitGlobalValueIndirectSym!"); - return 0; + llvm_unreachable("This target doesn't implement " + "emitGlobalValueIndirectSym!"); } /// Records the required size and alignment for a call stub in bytes. @@ -57,8 +58,6 @@ namespace llvm { /// Returns the maximum size and alignment for a call stub on this target. virtual StubLayout getStubLayout() { llvm_unreachable("This target doesn't implement getStubLayout!"); - StubLayout Result = {0, 0}; - return Result; } /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a @@ -68,15 +67,13 @@ namespace llvm { /// aligned from the address the JCE was set up to emit at. virtual void *emitFunctionStub(const Function* F, void *Target, JITCodeEmitter &JCE) { - assert(0 && "This target doesn't implement emitFunctionStub!"); - return 0; + llvm_unreachable("This target doesn't implement emitFunctionStub!"); } /// getPICJumpTableEntry - Returns the value of the jumptable entry for the /// specific basic block. virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) { - assert(0 && "This target doesn't implement getPICJumpTableEntry!"); - return 0; + llvm_unreachable("This target doesn't implement getPICJumpTableEntry!"); } /// LazyResolverFn - This typedef is used to represent the function that @@ -97,8 +94,7 @@ namespace llvm { /// function, and giving the JIT the target function used to do the lazy /// resolving. virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn) { - assert(0 && "Not implemented for this target!"); - return 0; + llvm_unreachable("Not implemented for this target!"); } /// relocate - Before the JIT can run a block of code that has been emitted, @@ -114,8 +110,7 @@ namespace llvm { /// handling thread local variables. This method returns a value only /// meaningful to the target. virtual char* allocateThreadLocalMemory(size_t size) { - assert(0 && "This target does not implement thread local storage!"); - return 0; + llvm_unreachable("This target does not implement thread local storage!"); } /// needsGOT - Allows a target to specify that it would like the diff --git a/include/llvm/Target/TargetLibraryInfo.h b/include/llvm/Target/TargetLibraryInfo.h index 0d7a949..70e26bf 100644 --- a/include/llvm/Target/TargetLibraryInfo.h +++ b/include/llvm/Target/TargetLibraryInfo.h @@ -199,6 +199,15 @@ namespace llvm { truncf, /// long double truncl(long double x); truncl, + /// int __cxa_atexit(void (*f)(void *), void *p, void *d); + cxa_atexit, + /// void __cxa_guard_abort(guard_t *guard); + /// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi. + cxa_guard_abort, + /// int __cxa_guard_acquire(guard_t *guard); + cxa_guard_acquire, + /// void __cxa_guard_release(guard_t *guard); + cxa_guard_release, NumLibFuncs }; @@ -208,6 +217,7 @@ namespace llvm { /// library functions are available for the current target, and allows a /// frontend to disable optimizations through -fno-builtin etc. class TargetLibraryInfo : public ImmutablePass { + virtual void anchor(); unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4]; llvm::DenseMap<unsigned, std::string> CustomNames; static const char* StandardNames[LibFunc::NumLibFuncs]; diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 4b66956..4f3e432 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -53,6 +53,17 @@ namespace llvm { class TargetLoweringObjectFile; class Value; + namespace Sched { + enum Preference { + None, // No preference + Source, // Follow source order. + RegPressure, // Scheduling for lowest register pressure. + Hybrid, // Scheduling for both latency and register pressure. + ILP, // Scheduling for ILP in low register pressure mode. + VLIW // Scheduling for VLIW targets. + }; + } + // FIXME: should this be here? namespace TLSModel { enum Model { @@ -107,8 +118,6 @@ public: static ISD::NodeType getExtendForContent(BooleanContent Content) { switch (Content) { - default: - assert(false && "Unknown BooleanContent!"); case UndefinedBooleanContent: // Extend by adding rubbish bits. return ISD::ANY_EXTEND; @@ -119,6 +128,7 @@ public: // Extend by copying the sign bit. return ISD::SIGN_EXTEND; } + llvm_unreachable("Invalid content kind"); } /// NOTE: The constructor takes ownership of TLOF. @@ -191,9 +201,9 @@ public: /// getRegClassFor - Return the register class that should be used for the /// specified value type. - virtual TargetRegisterClass *getRegClassFor(EVT VT) const { + virtual const TargetRegisterClass *getRegClassFor(EVT VT) const { assert(VT.isSimple() && "getRegClassFor called on illegal type!"); - TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; + const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; assert(RC && "This value type is not natively supported!"); return RC; } @@ -284,11 +294,9 @@ public: VT = getTypeToTransformTo(Context, VT); break; default: - assert(false && "Type is not legal nor is it to be expanded!"); - return VT; + llvm_unreachable("Type is not legal nor is it to be expanded!"); } } - return VT; } /// getVectorTypeBreakdown - Vector types are broken down into some number of @@ -557,8 +565,7 @@ public: if (VT.isInteger()) { return getRegisterType(Context, getTypeToTransformTo(Context, VT)); } - assert(0 && "Unsupported extended type!"); - return EVT(MVT::Other); // Not reached + llvm_unreachable("Unsupported extended type!"); } /// getNumRegisters - Return the number of registers that this ValueType will @@ -583,8 +590,7 @@ public: unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); return (BitWidth + RegWidth - 1) / RegWidth; } - assert(0 && "Unsupported extended type!"); - return 0; // Not reached + llvm_unreachable("Unsupported extended type!"); } /// ShouldShrinkFPConstant - If true, then instruction selection should @@ -682,10 +688,10 @@ public: return StackPointerRegisterToSaveRestore; } - /// getExceptionAddressRegister - If a physical register, this returns + /// getExceptionPointerRegister - If a physical register, this returns /// the register that receives the exception address on entry to a landing /// pad. - unsigned getExceptionAddressRegister() const { + unsigned getExceptionPointerRegister() const { return ExceptionPointerRegister; } @@ -775,8 +781,7 @@ public: LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, MCContext &/*Ctx*/) const { - assert(0 && "Need to implement this hook if target has custom JTIs"); - return 0; + llvm_unreachable("Need to implement this hook if target has custom JTIs"); } /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC @@ -1038,7 +1043,7 @@ protected: /// addRegisterClass - Add the specified register class as an available /// regclass for the specified value type. This indicates the selector can /// handle values of that class natively. - void addRegisterClass(EVT VT, TargetRegisterClass *RC) { + void addRegisterClass(EVT VT, const TargetRegisterClass *RC) { assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); AvailableRegClasses.push_back(std::make_pair(VT, RC)); RegClassForVT[VT.getSimpleVT().SimpleTy] = RC; @@ -1201,8 +1206,7 @@ public: const SmallVectorImpl<ISD::InputArg> &/*Ins*/, DebugLoc /*dl*/, SelectionDAG &/*DAG*/, SmallVectorImpl<SDValue> &/*InVals*/) const { - assert(0 && "Not Implemented"); - return SDValue(); // this is here to silence compiler errors + llvm_unreachable("Not Implemented"); } /// LowerCallTo - This function lowers an abstract call to a function into an @@ -1229,7 +1233,8 @@ public: LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, bool isInreg, unsigned NumFixedArgs, CallingConv::ID CallConv, bool isTailCall, - bool isReturnValueUsed, SDValue Callee, ArgListTy &Args, + bool doesNotRet, bool isReturnValueUsed, + SDValue Callee, ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) const; /// LowerCall - This hook must be implemented to lower calls into the @@ -1241,14 +1246,13 @@ public: virtual SDValue LowerCall(SDValue /*Chain*/, SDValue /*Callee*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, - bool &/*isTailCall*/, + bool /*doesNotRet*/, bool &/*isTailCall*/, const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, const SmallVectorImpl<SDValue> &/*OutVals*/, const SmallVectorImpl<ISD::InputArg> &/*Ins*/, DebugLoc /*dl*/, SelectionDAG &/*DAG*/, SmallVectorImpl<SDValue> &/*InVals*/) const { - assert(0 && "Not Implemented"); - return SDValue(); // this is here to silence compiler errors + llvm_unreachable("Not Implemented"); } /// HandleByVal - Target-specific cleanup for formal ByVal parameters. @@ -1278,8 +1282,7 @@ public: const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, const SmallVectorImpl<SDValue> &/*OutVals*/, DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const { - assert(0 && "Not Implemented"); - return SDValue(); // this is here to silence compiler errors + llvm_unreachable("Not Implemented"); } /// isUsedByReturnOnly - Return true if result of the specified node is used @@ -1344,7 +1347,7 @@ public: virtual void ReplaceNodeResults(SDNode * /*N*/, SmallVectorImpl<SDValue> &/*Results*/, SelectionDAG &/*DAG*/) const { - assert(0 && "ReplaceNodeResults not implemented for this target!"); + llvm_unreachable("ReplaceNodeResults not implemented for this target!"); } /// getTargetNodeName() - This method returns the name of a target specific @@ -1758,7 +1761,7 @@ private: /// RegClassForVT - This indicates the default register class to use for /// each ValueType the target supports natively. - TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; + const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; EVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; @@ -1930,12 +1933,9 @@ private: // Vectors with illegal element types are expanded. EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); return LegalizeKind(TypeSplitVector, NVT); - - assert(false && "Unable to handle this kind of vector type"); - return LegalizeKind(TypeLegal, VT); } - std::vector<std::pair<EVT, TargetRegisterClass*> > AvailableRegClasses; + std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses; /// TargetDAGCombineArray - Targets can specify ISD nodes that they would /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h index 7d06cec..88f28a6 100644 --- a/include/llvm/Target/TargetLoweringObjectFile.h +++ b/include/llvm/Target/TargetLoweringObjectFile.h @@ -15,9 +15,11 @@ #ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H #define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H -#include "llvm/ADT/StringRef.h" +#include "llvm/Module.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/SectionKind.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringRef.h" namespace llvm { class MachineModuleInfo; @@ -29,6 +31,7 @@ namespace llvm { class MCSectionMachO; class MCSymbol; class MCStreamer; + class NamedMDNode; class GlobalValue; class TargetMachine; @@ -53,7 +56,13 @@ public: virtual void emitPersonalityValue(MCStreamer &Streamer, const TargetMachine &TM, const MCSymbol *Sym) const; - + + /// emitModuleFlags - Emit the module flags that the platform cares about. + virtual void emitModuleFlags(MCStreamer &, + ArrayRef<Module::ModuleFlagEntry>, + Mangler *, const TargetMachine &) const { + } + /// shouldEmitUsedDirectiveFor - This hook allows targets to selectively /// decide not to emit the UsedDirective for some symbols in llvm.used. /// FIXME: REMOVE this (rdar://7071300) @@ -86,9 +95,7 @@ public: const TargetMachine &TM) const { return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM); } - - - + /// getExplicitSectionGlobal - Targets should implement this method to assign /// a section to globals with an explicit section specfied. The /// implementation of this method can assume that GV->hasSection() is true. @@ -121,7 +128,18 @@ public: const MCExpr * getExprForDwarfReference(const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const; - + + virtual const MCSection * + getStaticCtorSection(unsigned Priority = 65535) const { + (void)Priority; + return StaticCtorSection; + } + virtual const MCSection * + getStaticDtorSection(unsigned Priority = 65535) const { + (void)Priority; + return StaticDtorSection; + } + protected: virtual const MCSection * SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h index c169e06..d4535db 100644 --- a/include/llvm/Target/TargetMachine.h +++ b/include/llvm/Target/TargetMachine.h @@ -38,21 +38,13 @@ class TargetInstrInfo; class TargetIntrinsicInfo; class TargetJITInfo; class TargetLowering; +class TargetPassConfig; class TargetRegisterInfo; class TargetSelectionDAGInfo; class TargetSubtargetInfo; class formatted_raw_ostream; class raw_ostream; -namespace Sched { - enum Preference { - None, // No preference - RegPressure, // Scheduling for lowest register pressure. - Hybrid, // Scheduling for both latency and register pressure. - ILP // Scheduling for ILP in low register pressure mode. - }; -} - //===----------------------------------------------------------------------===// /// /// TargetMachine - Primary interface to the complete machine description for @@ -209,6 +201,10 @@ public: /// Default, or Aggressive. CodeGenOpt::Level getOptLevel() const; + void setFastISel(bool Enable) { Options.EnableFastISel = Enable; } + + bool shouldPrintMachineCode() const { return Options.PrintMachineCode; } + /// getAsmVerbosityDefault - Returns the default value of asm verbosity. /// static bool getAsmVerbosityDefault(); @@ -241,10 +237,6 @@ public: CGFT_Null // Do not emit any output. }; - /// getEnableTailMergeDefault - the default setting for -enable-tail-merge - /// on this target. User flag overrides. - virtual bool getEnableTailMergeDefault() const { return true; } - /// addPassesToEmitFile - Add passes to the specified pass manager to get the /// specified file emitted. Typically this will involve several steps of code /// generation. This method should return true if emission of this file type @@ -291,24 +283,11 @@ protected: // Can only create subclasses. Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); - /// printNoVerify - Add a pass to dump the machine function, if debugging is - /// enabled. - /// - void printNoVerify(PassManagerBase &PM, const char *Banner) const; - - /// printAndVerify - Add a pass to dump then verify the machine function, if - /// those steps are enabled. - /// - void printAndVerify(PassManagerBase &PM, const char *Banner) const; - -private: - /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for - /// both emitting to assembly files or machine code output. - /// - bool addCommonCodeGenPasses(PassManagerBase &, - bool DisableVerify, MCContext *&OutCtx); - public: + /// createPassConfig - Create a pass configuration object to be used by + /// addPassToEmitX methods for generating a pipeline of CodeGen passes. + virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); + /// addPassesToEmitFile - Add passes to the specified pass manager to get the /// specified file emitted. Typically this will involve several steps of code /// generation. @@ -337,51 +316,6 @@ public: raw_ostream &OS, bool DisableVerify = true); - /// Target-Independent Code Generator Pass Configuration Options. - - /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM - /// passes (which are run just before instruction selector). - virtual bool addPreISel(PassManagerBase &) { - return true; - } - - /// addInstSelector - This method should install an instruction selector pass, - /// which converts from LLVM code to machine instructions. - virtual bool addInstSelector(PassManagerBase &) { - return true; - } - - /// addPreRegAlloc - This method may be implemented by targets that want to - /// run passes immediately before register allocation. This should return - /// true if -print-machineinstrs should print after these passes. - virtual bool addPreRegAlloc(PassManagerBase &) { - return false; - } - - /// addPostRegAlloc - This method may be implemented by targets that want - /// to run passes after register allocation but before prolog-epilog - /// insertion. This should return true if -print-machineinstrs should print - /// after these passes. - virtual bool addPostRegAlloc(PassManagerBase &) { - return false; - } - - /// addPreSched2 - This method may be implemented by targets that want to - /// run passes after prolog-epilog insertion and before the second instruction - /// scheduling pass. This should return true if -print-machineinstrs should - /// print after these passes. - virtual bool addPreSched2(PassManagerBase &) { - return false; - } - - /// addPreEmitPass - This pass may be implemented by targets that want to run - /// passes immediately before machine code is emitted. This should return - /// true if -print-machineinstrs should print out the code after the passes. - virtual bool addPreEmitPass(PassManagerBase &) { - return false; - } - - /// addCodeEmitter - This pass should be overridden by the target to add a /// code emitter, if supported. If this is not supported, 'true' should be /// returned. @@ -389,10 +323,6 @@ public: JITCodeEmitter &) { return true; } - - /// getEnableTailMergeDefault - the default setting for -enable-tail-merge - /// on this target. User flag overrides. - virtual bool getEnableTailMergeDefault() const { return true; } }; } // End llvm namespace diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h index 3001816..7730ab9 100644 --- a/include/llvm/Target/TargetOptions.h +++ b/include/llvm/Target/TargetOptions.h @@ -30,10 +30,6 @@ namespace llvm { }; } - /// StrongPHIElim - This flag enables more aggressive PHI elimination - /// wth earlier copy coalescing. - extern bool StrongPHIElim; - class TargetOptions { public: TargetOptions() @@ -43,8 +39,9 @@ namespace llvm { NoNaNsFPMath(false), HonorSignDependentRoundingFPMathOption(false), UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false), JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false), - GuaranteedTailCallOpt(false), StackAlignmentOverride(0), - RealignStack(true), DisableJumpTables(false), EnableFastISel(false), + GuaranteedTailCallOpt(false), DisableTailCalls(false), + StackAlignmentOverride(0), RealignStack(true), + DisableJumpTables(false), EnableFastISel(false), EnableSegmentedStacks(false), TrapFuncName(""), FloatABIType(FloatABI::Default) {} @@ -114,7 +111,7 @@ namespace llvm { /// assume that the rounding mode may dynamically change. unsigned HonorSignDependentRoundingFPMathOption : 1; bool HonorSignDependentRoundingFPMath() const; - + /// UseSoftFloat - This flag is enabled when the -soft-float flag is /// specified on the command line. When this flag is on, the code generator /// will generate libcalls to the software floating point library instead of @@ -147,6 +144,10 @@ namespace llvm { /// as their parent function, etc.), using an alternate ABI if necessary. unsigned GuaranteedTailCallOpt : 1; + /// DisableTailCalls - This flag controls whether we will use tail calls. + /// Disabling them may be useful to maintain a correct call stack. + unsigned DisableTailCalls : 1; + /// StackAlignmentOverride - Override default stack alignment for target. unsigned StackAlignmentOverride; @@ -154,7 +155,7 @@ namespace llvm { /// automatically realigned, if needed. unsigned RealignStack : 1; - /// DisableJumpTables - This flag indicates jump tables should not be + /// DisableJumpTables - This flag indicates jump tables should not be /// generated. unsigned DisableJumpTables : 1; @@ -162,7 +163,7 @@ namespace llvm { /// which trades away generated code quality in favor of reducing /// compile time. unsigned EnableFastISel : 1; - + unsigned EnableSegmentedStacks : 1; /// getTrapFunctionName - If this returns a non-empty string, this means diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h index 682aa50..c6e3086 100644 --- a/include/llvm/Target/TargetRegisterInfo.h +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -20,6 +20,7 @@ #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/CallingConv.h" #include <cassert> #include <functional> @@ -33,25 +34,18 @@ class raw_ostream; class TargetRegisterClass { public: - typedef const unsigned* iterator; - typedef const unsigned* const_iterator; - typedef const EVT* vt_iterator; + typedef const uint16_t* iterator; + typedef const uint16_t* const_iterator; + typedef const MVT::SimpleValueType* vt_iterator; typedef const TargetRegisterClass* const * sc_iterator; -private: + + // Instance variables filled by tablegen, do not use! const MCRegisterClass *MC; const vt_iterator VTs; const unsigned *SubClassMask; const sc_iterator SuperClasses; const sc_iterator SuperRegClasses; -public: - TargetRegisterClass(const MCRegisterClass *MC, const EVT *vts, - const unsigned *subcm, - const TargetRegisterClass * const *supcs, - const TargetRegisterClass * const *superregcs) - : MC(MC), VTs(vts), SubClassMask(subcm), SuperClasses(supcs), - SuperRegClasses(superregcs) {} - - virtual ~TargetRegisterClass() {} // Allow subclasses + ArrayRef<uint16_t> (*OrderFunc)(const MachineFunction&); /// getID() - Return the register class ID number. /// @@ -108,7 +102,7 @@ public: /// bool hasType(EVT vt) const { for(int i = 0; VTs[i] != MVT::Other; ++i) - if (VTs[i] == vt) + if (EVT(VTs[i]) == vt) return true; return false; } @@ -165,7 +159,7 @@ public: /// getSubClassMask - Returns a bit vector of subclasses, including this one. /// The vector is indexed by class IDs, see hasSubClassEq() above for how to /// use it. - const unsigned *getSubClassMask() const { + const uint32_t *getSubClassMask() const { return SubClassMask; } @@ -196,9 +190,8 @@ public: /// /// By default, this method returns all registers in the class. /// - virtual - ArrayRef<unsigned> getRawAllocationOrder(const MachineFunction &MF) const { - return makeArrayRef(begin(), getNumRegs()); + ArrayRef<uint16_t> getRawAllocationOrder(const MachineFunction &MF) const { + return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs()); } }; @@ -332,7 +325,7 @@ public: if (regA == regB) return true; if (isVirtualRegister(regA) || isVirtualRegister(regB)) return false; - for (const unsigned *regList = getOverlaps(regA)+1; *regList; ++regList) { + for (const uint16_t *regList = getOverlaps(regA)+1; *regList; ++regList) { if (*regList == regB) return true; } return false; @@ -347,7 +340,7 @@ public: /// isSuperRegister - Returns true if regB is a super-register of regA. /// bool isSuperRegister(unsigned regA, unsigned regB) const { - for (const unsigned *regList = getSuperRegisters(regA); *regList;++regList){ + for (const uint16_t *regList = getSuperRegisters(regA); *regList;++regList){ if (*regList == regB) return true; } return false; @@ -356,10 +349,33 @@ public: /// getCalleeSavedRegs - Return a null-terminated list of all of the /// callee saved registers on this target. The register should be in the /// order of desired callee-save stack frame offset. The first register is - /// closed to the incoming stack pointer if stack grows down, and vice versa. - virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0) + /// closest to the incoming stack pointer if stack grows down, and vice versa. + /// + virtual const uint16_t* getCalleeSavedRegs(const MachineFunction *MF = 0) const = 0; + /// getCallPreservedMask - Return a mask of call-preserved registers for the + /// given calling convention on the current sub-target. The mask should + /// include all call-preserved aliases. This is used by the register + /// allocator to determine which registers can be live across a call. + /// + /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries. + /// A set bit indicates that all bits of the corresponding register are + /// preserved across the function call. The bit mask is expected to be + /// sub-register complete, i.e. if A is preserved, so are all its + /// sub-registers. + /// + /// Bits are numbered from the LSB, so the bit for physical register Reg can + /// be found as (Mask[Reg / 32] >> Reg % 32) & 1. + /// + /// A NULL pointer means that no register mask will be used, and call + /// instructions should use implicit-def operands to indicate call clobbered + /// registers. + /// + virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const { + // The default mask clobbers everything. All targets should override. + return 0; + } /// getReservedRegs - Returns a bitset indexed by physical register number /// indicating if a register is a special register that has particular uses @@ -367,24 +383,11 @@ public: /// used by register scavenger to determine what registers are free. virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0; - /// getSubReg - Returns the physical register number of sub-register "Index" - /// for physical register RegNo. Return zero if the sub-register does not - /// exist. - virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0; - - /// getSubRegIndex - For a given register pair, return the sub-register index - /// if the second register is a sub-register of the first. Return zero - /// otherwise. - virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0; - /// getMatchingSuperReg - Return a super-register of the specified register /// Reg so its sub-register of index SubIdx is Reg. unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const { - for (const unsigned *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs) - if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR)) - return SR; - return 0; + return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC); } /// canCombineSubRegIndices - Given a register class and a list of @@ -402,11 +405,11 @@ public: /// getMatchingSuperRegClass - Return a subclass of the specified register /// class A so that each register in it has a sub-register of the /// specified sub-register index which is in the specified register class B. + /// + /// TableGen will synthesize missing A sub-classes. virtual const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, - const TargetRegisterClass *B, unsigned Idx) const { - return 0; - } + const TargetRegisterClass *B, unsigned Idx) const =0; /// getSubClassWithSubReg - Returns the largest legal sub-class of RC that /// supports the sub-register index Idx. @@ -419,6 +422,7 @@ public: /// supported by the full GR32 register class in 64-bit mode, but only by the /// GR32_ABCD regiister class in 32-bit mode. /// + /// TableGen will synthesize missing RC sub-classes. virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const =0; @@ -469,8 +473,7 @@ public: /// values. If a target supports multiple different pointer register classes, /// kind specifies which one is indicated. virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const { - assert(0 && "Target didn't implement getPointerRegClass!"); - return 0; // Must return a value in order to compile with VS 2005 + llvm_unreachable("Target didn't implement getPointerRegClass!"); } /// getCrossCopyRegClass - Returns a legal register class to copy a register @@ -508,7 +511,7 @@ public: /// /// Register allocators need only call this function to resolve /// target-dependent hints, but it should work without hinting as well. - virtual ArrayRef<unsigned> + virtual ArrayRef<uint16_t> getRawAllocationOrder(const TargetRegisterClass *RC, unsigned HintType, unsigned HintReg, const MachineFunction &MF) const { @@ -607,22 +610,22 @@ public: virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB, unsigned BaseReg, int FrameIdx, int64_t Offset) const { - assert(0 && "materializeFrameBaseRegister does not exist on this target"); + llvm_unreachable("materializeFrameBaseRegister does not exist on this " + "target"); } /// resolveFrameIndex - Resolve a frame index operand of an instruction /// to reference the indicated base register plus offset instead. virtual void resolveFrameIndex(MachineBasicBlock::iterator I, unsigned BaseReg, int64_t Offset) const { - assert(0 && "resolveFrameIndex does not exist on this target"); + llvm_unreachable("resolveFrameIndex does not exist on this target"); } /// isFrameOffsetLegal - Determine whether a given offset immediate is /// encodable to resolve a frame index. virtual bool isFrameOffsetLegal(const MachineInstr *MI, int64_t Offset) const { - assert(0 && "isFrameOffsetLegal does not exist on this target"); - return false; // Must return a value in order to compile with VS 2005 + llvm_unreachable("isFrameOffsetLegal does not exist on this target"); } /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog @@ -636,7 +639,8 @@ public: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const { - assert(0 && "Call Frame Pseudo Instructions do not exist on this target!"); + llvm_unreachable("Call Frame Pseudo Instructions do not exist on this " + "target!"); } diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index 3288dd4..f55cf0e 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -657,6 +657,51 @@ def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; }]>; +def extloadvi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1; +}]>; +def extloadvi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; +}]>; +def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; +}]>; +def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; +}]>; +def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f32; +}]>; +def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f64; +}]>; + +def sextloadvi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1; +}]>; +def sextloadvi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; +}]>; +def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; +}]>; +def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; +}]>; + +def zextloadvi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1; +}]>; +def zextloadvi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8; +}]>; +def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16; +}]>; +def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32; +}]>; + // store fragments. def unindexedstore : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h index 9556c7a..d9d8ce4 100644 --- a/include/llvm/Target/TargetSubtargetInfo.h +++ b/include/llvm/Target/TargetSubtargetInfo.h @@ -39,7 +39,7 @@ public: // AntiDepBreakMode - Type of anti-dependence breaking that should // be performed before post-RA scheduling. typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode; - typedef SmallVectorImpl<TargetRegisterClass*> RegClassVector; + typedef SmallVectorImpl<const TargetRegisterClass*> RegClassVector; virtual ~TargetSubtargetInfo(); diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h index f9d7f9e..18176e8 100644 --- a/include/llvm/Transforms/IPO.h +++ b/include/llvm/Transforms/IPO.h @@ -94,6 +94,7 @@ Pass *createFunctionInliningPass(int Threshold); /// createAlwaysInlinerPass - Return a new pass object that inlines only /// functions that are marked as "always_inline". Pass *createAlwaysInlinerPass(); +Pass *createAlwaysInlinerPass(bool InsertLifetime); //===----------------------------------------------------------------------===// /// createPruneEHPass - Return a new pass object which transforms invoke diff --git a/include/llvm/Transforms/IPO/InlinerPass.h b/include/llvm/Transforms/IPO/InlinerPass.h index 3ac4c59..1feaaa4 100644 --- a/include/llvm/Transforms/IPO/InlinerPass.h +++ b/include/llvm/Transforms/IPO/InlinerPass.h @@ -31,7 +31,7 @@ namespace llvm { /// struct Inliner : public CallGraphSCCPass { explicit Inliner(char &ID); - explicit Inliner(char &ID, int Threshold); + explicit Inliner(char &ID, int Threshold, bool InsertLifetime); /// getAnalysisUsage - For this class, we declare that we require and preserve /// the call graph. If the derived class implements this method, it should @@ -87,6 +87,9 @@ private: // InlineThreshold - Cache the value here for easy access. unsigned InlineThreshold; + // InsertLifetime - Insert @llvm.lifetime intrinsics. + bool InsertLifetime; + /// shouldInline - Return true if the inliner should attempt to /// inline at the given CallSite. bool shouldInline(CallSite CS); diff --git a/include/llvm/Transforms/IPO/PassManagerBuilder.h b/include/llvm/Transforms/IPO/PassManagerBuilder.h index d265bda..a1b4f5c 100644 --- a/include/llvm/Transforms/IPO/PassManagerBuilder.h +++ b/include/llvm/Transforms/IPO/PassManagerBuilder.h @@ -60,6 +60,10 @@ public: /// out of the frontend. EP_EarlyAsPossible, + /// EP_ModuleOptimizerEarly - This extension point allows adding passes + /// just before the main module-level optimization passes. + EP_ModuleOptimizerEarly, + /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to /// the end of the loop optimizer. EP_LoopOptimizerEnd, @@ -95,6 +99,7 @@ public: bool DisableSimplifyLibCalls; bool DisableUnitAtATime; bool DisableUnrollLoops; + bool Vectorize; private: /// ExtensionList - This is list of all of the extensions that are registered. diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h index baa6364..bbf3a69 100644 --- a/include/llvm/Transforms/Instrumentation.h +++ b/include/llvm/Transforms/Instrumentation.h @@ -17,6 +17,7 @@ namespace llvm { class ModulePass; +class FunctionPass; // Insert edge profiling instrumentation ModulePass *createEdgeProfilerPass(); @@ -34,6 +35,8 @@ ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true, // Insert AddressSanitizer (address sanity checking) instrumentation ModulePass *createAddressSanitizerPass(); +// Insert ThreadSanitizer (race detection) instrumentation +FunctionPass *createThreadSanitizerPass(); } // End llvm namespace diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 2c4f650..7f055d4 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -327,6 +327,12 @@ Pass *createCorrelatedValuePropagationPass(); //===----------------------------------------------------------------------===// // +// ObjCARCAPElim - ObjC ARC autorelease pool elimination. +// +Pass *createObjCARCAPElimPass(); + +//===----------------------------------------------------------------------===// +// // ObjCARCExpand - ObjC ARC preliminary simplifications. // Pass *createObjCARCExpandPass(); diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h index 674c2d0..b7b5d29 100644 --- a/include/llvm/Transforms/Utils/Cloning.h +++ b/include/llvm/Transforms/Utils/Cloning.h @@ -56,21 +56,13 @@ struct ClonedCodeInfo { /// call instruction. bool ContainsCalls; - /// ContainsUnwinds - This is set to true if the cloned code contains an - /// unwind instruction. - bool ContainsUnwinds; - /// ContainsDynamicAllocas - This is set to true if the cloned code contains /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in /// the entry block or they are in the entry block but are not a constant /// size. bool ContainsDynamicAllocas; - ClonedCodeInfo() { - ContainsCalls = false; - ContainsUnwinds = false; - ContainsDynamicAllocas = false; - } + ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {} }; @@ -134,8 +126,8 @@ inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){ /// Clone OldFunc into NewFunc, transforming the old arguments into references /// to VMap values. Note that if NewFunc already has basic blocks, the ones /// cloned into it will be added to the end of the function. This function -/// fills in a list of return instructions, and can optionally append the -/// specified suffix to all values cloned. +/// fills in a list of return instructions, and can optionally remap types +/// and/or append the specified suffix to all values cloned. /// /// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue /// mappings. @@ -145,7 +137,8 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, bool ModuleLevelChanges, SmallVectorImpl<ReturnInst*> &Returns, const char *NameSuffix = "", - ClonedCodeInfo *CodeInfo = 0); + ClonedCodeInfo *CodeInfo = 0, + ValueMapTypeRemapper *TypeMapper = 0); /// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, /// except that it does some simple constant prop and DCE on the fly. The @@ -204,9 +197,9 @@ public: /// exists in the instruction stream. Similarly this will inline a recursive /// function by one level. /// -bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI); -bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI); -bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI); +bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime = true); +bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, bool InsertLifetime = true); +bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI, bool InsertLifetime = true); } // End llvm namespace diff --git a/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/include/llvm/Transforms/Utils/CmpInstAnalysis.h new file mode 100644 index 0000000..7ad7bdd --- /dev/null +++ b/include/llvm/Transforms/Utils/CmpInstAnalysis.h @@ -0,0 +1,66 @@ +//===-- CmpInstAnalysis.h - Utils to help fold compare insts ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file holds routines to help analyse compare instructions +// and fold them into constants or other compare instructions +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H +#define LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H + +#include "llvm/InstrTypes.h" + +namespace llvm { + class ICmpInst; + class Value; + + /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits + /// are carefully arranged to allow folding of expressions such as: + /// + /// (A < B) | (A > B) --> (A != B) + /// + /// Note that this is only valid if the first and second predicates have the + /// same sign. Is illegal to do: (A u< B) | (A s> B) + /// + /// Three bits are used to represent the condition, as follows: + /// 0 A > B + /// 1 A == B + /// 2 A < B + /// + /// <=> Value Definition + /// 000 0 Always false + /// 001 1 A > B + /// 010 2 A == B + /// 011 3 A >= B + /// 100 4 A < B + /// 101 5 A != B + /// 110 6 A <= B + /// 111 7 Always true + /// + unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false); + + /// getICmpValue - This is the complement of getICmpCode, which turns an + /// opcode and two operands into either a constant true or false, or the + /// predicate for a new ICmp instruction. The sign is passed in to determine + /// which kind of predicate to use in the new icmp instruction. + /// Non-NULL return value will be a true or false constant. + /// NULL return means a new ICmp is needed. The predicate for which is + /// output in NewICmpPred. + Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, + CmpInst::Predicate &NewICmpPred); + + /// PredicatesFoldable - Return true if both predicates match sign or if at + /// least one of them is an equality comparison (which is signless). + bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2); + +} // end namespace llvm + +#endif + diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h index 15d65bc..a9adbd7 100644 --- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h +++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h @@ -380,7 +380,7 @@ public: if (!SomePHI) break; if (CheckIfPHIMatches(SomePHI)) { - RecordMatchingPHI(SomePHI); + RecordMatchingPHIs(BlockList); break; } // Match failed: clear all the PHITag values. @@ -437,38 +437,17 @@ public: return true; } - /// RecordMatchingPHI - For a PHI node that matches, record it and its input - /// PHIs in both the BBMap and the AvailableVals mapping. - void RecordMatchingPHI(PhiT *PHI) { - SmallVector<PhiT*, 20> WorkList; - WorkList.push_back(PHI); - - // Record this PHI. - BlkT *BB = PHI->getParent(); - ValT PHIVal = Traits::GetPHIValue(PHI); - (*AvailableVals)[BB] = PHIVal; - BBMap[BB]->AvailableVal = PHIVal; - - while (!WorkList.empty()) { - PHI = WorkList.pop_back_val(); - - // Iterate through the PHI's incoming values. - for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI), - E = Traits::PHI_end(PHI); I != E; ++I) { - ValT IncomingVal = I.getIncomingValue(); - PhiT *IncomingPHI = Traits::ValueIsPHI(IncomingVal, Updater); - if (!IncomingPHI) continue; - BB = IncomingPHI->getParent(); - BBInfo *Info = BBMap[BB]; - if (!Info || Info->AvailableVal) - continue; - - // Record the PHI and add it to the worklist. - (*AvailableVals)[BB] = IncomingVal; - Info->AvailableVal = IncomingVal; - WorkList.push_back(IncomingPHI); + /// RecordMatchingPHIs - For each PHI node that matches, record it in both + /// the BBMap and the AvailableVals mapping. + void RecordMatchingPHIs(BlockListTy *BlockList) { + for (typename BlockListTy::iterator I = BlockList->begin(), + E = BlockList->end(); I != E; ++I) + if (PhiT *PHI = (*I)->PHITag) { + BlkT *BB = PHI->getParent(); + ValT PHIVal = Traits::GetPHIValue(PHI); + (*AvailableVals)[BB] = PHIVal; + BBMap[BB]->AvailableVal = PHIVal; } - } } }; diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h index 21d433a..c9bd916 100644 --- a/include/llvm/Transforms/Utils/SimplifyIndVar.h +++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h @@ -33,6 +33,7 @@ class ScalarEvolution; /// Interface for visiting interesting IV users that are recognized but not /// simplified by this utility. class IVVisitor { + virtual void anchor(); public: virtual ~IVVisitor() {} virtual void visitCast(CastInst *Cast) = 0; diff --git a/include/llvm/Transforms/Vectorize.h b/include/llvm/Transforms/Vectorize.h new file mode 100644 index 0000000..dfc099d --- /dev/null +++ b/include/llvm/Transforms/Vectorize.h @@ -0,0 +1,30 @@ +//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes for accessor functions that expose passes +// in the Vectorize transformations library. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_H +#define LLVM_TRANSFORMS_VECTORIZE_H + +namespace llvm { + +class BasicBlockPass; + +//===----------------------------------------------------------------------===// +// +// BBVectorize - A basic-block vectorization pass. +// +BasicBlockPass *createBBVectorizePass(); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Type.h b/include/llvm/Type.h index a571b4d..185258d 100644 --- a/include/llvm/Type.h +++ b/include/llvm/Type.h @@ -16,6 +16,7 @@ #define LLVM_TYPE_H #include "llvm/Support/Casting.h" +#include "llvm/Support/DataTypes.h" namespace llvm { @@ -25,6 +26,7 @@ class raw_ostream; class Module; class LLVMContext; class LLVMContextImpl; +class StringRef; template<class GraphType> struct GraphTraits; /// The instances of the Type class are immutable: once they are created, @@ -47,23 +49,24 @@ public: enum TypeID { // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date. VoidTyID = 0, ///< 0: type with no size - FloatTyID, ///< 1: 32-bit floating point type - DoubleTyID, ///< 2: 64-bit floating point type - X86_FP80TyID, ///< 3: 80-bit floating point type (X87) - FP128TyID, ///< 4: 128-bit floating point type (112-bit mantissa) - PPC_FP128TyID, ///< 5: 128-bit floating point type (two 64-bits, PowerPC) - LabelTyID, ///< 6: Labels - MetadataTyID, ///< 7: Metadata - X86_MMXTyID, ///< 8: MMX vectors (64 bits, X86 specific) + HalfTyID, ///< 1: 16-bit floating point type + FloatTyID, ///< 2: 32-bit floating point type + DoubleTyID, ///< 3: 64-bit floating point type + X86_FP80TyID, ///< 4: 80-bit floating point type (X87) + FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa) + PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC) + LabelTyID, ///< 7: Labels + MetadataTyID, ///< 8: Metadata + X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific) // Derived types... see DerivedTypes.h file. // Make sure FirstDerivedTyID stays up to date! - IntegerTyID, ///< 9: Arbitrary bit width integers - FunctionTyID, ///< 10: Functions - StructTyID, ///< 11: Structures - ArrayTyID, ///< 12: Arrays - PointerTyID, ///< 13: Pointers - VectorTyID, ///< 14: SIMD 'packed' format, or other vector type + IntegerTyID, ///< 10: Arbitrary bit width integers + FunctionTyID, ///< 11: Functions + StructTyID, ///< 12: Structures + ArrayTyID, ///< 13: Arrays + PointerTyID, ///< 14: Pointers + VectorTyID, ///< 15: SIMD 'packed' format, or other vector type NumTypeIDs, // Must remain as last defined ID LastPrimitiveTyID = X86_MMXTyID, @@ -74,21 +77,32 @@ private: /// Context - This refers to the LLVMContext in which this type was uniqued. LLVMContext &Context; - TypeID ID : 8; // The current base type of this type. - unsigned SubclassData : 24; // Space for subclasses to store data + // Due to Ubuntu GCC bug 910363: + // https://bugs.launchpad.net/ubuntu/+source/gcc-4.5/+bug/910363 + // Bitpack ID and SubclassData manually. + // Note: TypeID : low 8 bit; SubclassData : high 24 bit. + uint32_t IDAndSubclassData; protected: friend class LLVMContextImpl; explicit Type(LLVMContext &C, TypeID tid) - : Context(C), ID(tid), SubclassData(0), - NumContainedTys(0), ContainedTys(0) {} + : Context(C), IDAndSubclassData(0), + NumContainedTys(0), ContainedTys(0) { + setTypeID(tid); + } ~Type() {} - - unsigned getSubclassData() const { return SubclassData; } + + void setTypeID(TypeID ID) { + IDAndSubclassData = (ID & 0xFF) | (IDAndSubclassData & 0xFFFFFF00); + assert(getTypeID() == ID && "TypeID data too large for field"); + } + + unsigned getSubclassData() const { return IDAndSubclassData >> 8; } + void setSubclassData(unsigned val) { - SubclassData = val; + IDAndSubclassData = (IDAndSubclassData & 0xFF) | (val << 8); // Ensure we don't have any accidental truncation. - assert(SubclassData == val && "Subclass data too large for field"); + assert(getSubclassData() == val && "Subclass data too large for field"); } /// NumContainedTys - Keeps track of how many Type*'s there are in the @@ -116,49 +130,54 @@ public: /// getTypeID - Return the type id for the type. This will return one /// of the TypeID enum elements defined above. /// - TypeID getTypeID() const { return ID; } + TypeID getTypeID() const { return (TypeID)(IDAndSubclassData & 0xFF); } /// isVoidTy - Return true if this is 'void'. - bool isVoidTy() const { return ID == VoidTyID; } + bool isVoidTy() const { return getTypeID() == VoidTyID; } + + /// isHalfTy - Return true if this is 'half', a 16-bit IEEE fp type. + bool isHalfTy() const { return getTypeID() == HalfTyID; } /// isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type. - bool isFloatTy() const { return ID == FloatTyID; } + bool isFloatTy() const { return getTypeID() == FloatTyID; } /// isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type. - bool isDoubleTy() const { return ID == DoubleTyID; } + bool isDoubleTy() const { return getTypeID() == DoubleTyID; } /// isX86_FP80Ty - Return true if this is x86 long double. - bool isX86_FP80Ty() const { return ID == X86_FP80TyID; } + bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; } /// isFP128Ty - Return true if this is 'fp128'. - bool isFP128Ty() const { return ID == FP128TyID; } + bool isFP128Ty() const { return getTypeID() == FP128TyID; } /// isPPC_FP128Ty - Return true if this is powerpc long double. - bool isPPC_FP128Ty() const { return ID == PPC_FP128TyID; } + bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; } /// isFloatingPointTy - Return true if this is one of the five floating point /// types bool isFloatingPointTy() const { - return ID == FloatTyID || ID == DoubleTyID || - ID == X86_FP80TyID || ID == FP128TyID || ID == PPC_FP128TyID; + return getTypeID() == HalfTyID || getTypeID() == FloatTyID || + getTypeID() == DoubleTyID || + getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID || + getTypeID() == PPC_FP128TyID; } /// isX86_MMXTy - Return true if this is X86 MMX. - bool isX86_MMXTy() const { return ID == X86_MMXTyID; } + bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; } /// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP. /// bool isFPOrFPVectorTy() const; /// isLabelTy - Return true if this is 'label'. - bool isLabelTy() const { return ID == LabelTyID; } + bool isLabelTy() const { return getTypeID() == LabelTyID; } /// isMetadataTy - Return true if this is 'metadata'. - bool isMetadataTy() const { return ID == MetadataTyID; } + bool isMetadataTy() const { return getTypeID() == MetadataTyID; } /// isIntegerTy - True if this is an instance of IntegerType. /// - bool isIntegerTy() const { return ID == IntegerTyID; } + bool isIntegerTy() const { return getTypeID() == IntegerTyID; } /// isIntegerTy - Return true if this is an IntegerType of the given width. bool isIntegerTy(unsigned Bitwidth) const; @@ -170,23 +189,23 @@ public: /// isFunctionTy - True if this is an instance of FunctionType. /// - bool isFunctionTy() const { return ID == FunctionTyID; } + bool isFunctionTy() const { return getTypeID() == FunctionTyID; } /// isStructTy - True if this is an instance of StructType. /// - bool isStructTy() const { return ID == StructTyID; } + bool isStructTy() const { return getTypeID() == StructTyID; } /// isArrayTy - True if this is an instance of ArrayType. /// - bool isArrayTy() const { return ID == ArrayTyID; } + bool isArrayTy() const { return getTypeID() == ArrayTyID; } /// isPointerTy - True if this is an instance of PointerType. /// - bool isPointerTy() const { return ID == PointerTyID; } + bool isPointerTy() const { return getTypeID() == PointerTyID; } /// isVectorTy - True if this is an instance of VectorType. /// - bool isVectorTy() const { return ID == VectorTyID; } + bool isVectorTy() const { return getTypeID() == VectorTyID; } /// canLosslesslyBitCastTo - Return true if this type could be converted /// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts @@ -202,14 +221,14 @@ public: /// Here are some useful little methods to query what type derived types are /// Note that all other types can just compare to see if this == Type::xxxTy; /// - bool isPrimitiveType() const { return ID <= LastPrimitiveTyID; } - bool isDerivedType() const { return ID >= FirstDerivedTyID; } + bool isPrimitiveType() const { return getTypeID() <= LastPrimitiveTyID; } + bool isDerivedType() const { return getTypeID() >= FirstDerivedTyID; } /// isFirstClassType - Return true if the type is "first class", meaning it /// is a valid type for a Value. /// bool isFirstClassType() const { - return ID != FunctionTyID && ID != VoidTyID; + return getTypeID() != FunctionTyID && getTypeID() != VoidTyID; } /// isSingleValueType - Return true if the type is a valid type for a @@ -217,8 +236,9 @@ public: /// and array types. /// bool isSingleValueType() const { - return (ID != VoidTyID && isPrimitiveType()) || - ID == IntegerTyID || ID == PointerTyID || ID == VectorTyID; + return (getTypeID() != VoidTyID && isPrimitiveType()) || + getTypeID() == IntegerTyID || getTypeID() == PointerTyID || + getTypeID() == VectorTyID; } /// isAggregateType - Return true if the type is an aggregate type. This @@ -227,7 +247,7 @@ public: /// does not include vector types. /// bool isAggregateType() const { - return ID == StructTyID || ID == ArrayTyID; + return getTypeID() == StructTyID || getTypeID() == ArrayTyID; } /// isSized - Return true if it makes sense to take the size of this type. To @@ -236,12 +256,14 @@ public: /// bool isSized() const { // If it's a primitive, it is always sized. - if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID || - ID == X86_MMXTyID) + if (getTypeID() == IntegerTyID || isFloatingPointTy() || + getTypeID() == PointerTyID || + getTypeID() == X86_MMXTyID) return true; // If it is not something that can have a size (e.g. a function or label), // it doesn't have a size. - if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID) + if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && + getTypeID() != VectorTyID) return false; // Otherwise we have to try harder to decide. return isSizedDerivedType(); @@ -273,10 +295,6 @@ public: /// otherwise return 'this'. Type *getScalarType(); - /// getNumElements - If this is a vector type, return the number of elements, - /// otherwise return zero. - unsigned getNumElements(); - //===--------------------------------------------------------------------===// // Type Iteration support. // @@ -298,6 +316,34 @@ public: unsigned getNumContainedTypes() const { return NumContainedTys; } //===--------------------------------------------------------------------===// + // Helper methods corresponding to subclass methods. This forces a cast to + // the specified subclass and calls its accessor. "getVectorNumElements" (for + // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is + // only intended to cover the core methods that are frequently used, helper + // methods should not be added here. + + unsigned getIntegerBitWidth() const; + + Type *getFunctionParamType(unsigned i) const; + unsigned getFunctionNumParams() const; + bool isFunctionVarArg() const; + + StringRef getStructName() const; + unsigned getStructNumElements() const; + Type *getStructElementType(unsigned N) const; + + Type *getSequentialElementType() const; + + uint64_t getArrayNumElements() const; + Type *getArrayElementType() const { return getSequentialElementType(); } + + unsigned getVectorNumElements() const; + Type *getVectorElementType() const { return getSequentialElementType(); } + + unsigned getPointerAddressSpace() const; + Type *getPointerElementType() const { return getSequentialElementType(); } + + //===--------------------------------------------------------------------===// // Static members exported by the Type class itself. Useful for getting // instances of Type. // @@ -310,6 +356,7 @@ public: // static Type *getVoidTy(LLVMContext &C); static Type *getLabelTy(LLVMContext &C); + static Type *getHalfTy(LLVMContext &C); static Type *getFloatTy(LLVMContext &C); static Type *getDoubleTy(LLVMContext &C); static Type *getMetadataTy(LLVMContext &C); @@ -328,6 +375,7 @@ public: // Convenience methods for getting pointer types with one of the above builtin // types as pointee. // + static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0); diff --git a/include/llvm/User.h b/include/llvm/User.h index d3f4f21..c52f32f 100644 --- a/include/llvm/User.h +++ b/include/llvm/User.h @@ -19,6 +19,7 @@ #ifndef LLVM_USER_H #define LLVM_USER_H +#include "llvm/Support/ErrorHandling.h" #include "llvm/Value.h" namespace llvm { @@ -65,11 +66,11 @@ public: void operator delete(void *Usr); /// placement delete - required by std, but never called. void operator delete(void*, unsigned) { - assert(0 && "Constructor throws?"); + llvm_unreachable("Constructor throws?"); } /// placement delete - required by std, but never called. void operator delete(void*, unsigned, bool) { - assert(0 && "Constructor throws?"); + llvm_unreachable("Constructor throws?"); } protected: template <int Idx, typename U> static Use &OpFrom(const U *that) { diff --git a/include/llvm/Value.h b/include/llvm/Value.h index 9b47143..9c658cf 100644 --- a/include/llvm/Value.h +++ b/include/llvm/Value.h @@ -17,7 +17,6 @@ #include "llvm/Use.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" -#include <string> namespace llvm { @@ -194,6 +193,8 @@ public: BlockAddressVal, // This is an instance of BlockAddress ConstantExprVal, // This is an instance of ConstantExpr ConstantAggregateZeroVal, // This is an instance of ConstantAggregateZero + ConstantDataArrayVal, // This is an instance of ConstantDataArray + ConstantDataVectorVal, // This is an instance of ConstantDataVector ConstantIntVal, // This is an instance of ConstantInt ConstantFPVal, // This is an instance of ConstantFP ConstantArrayVal, // This is an instance of ConstantArray |