aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp16
-rw-r--r--lib/Analysis/AliasAnalysisCounter.cpp6
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp4
-rw-r--r--lib/Analysis/AliasSetTracker.cpp4
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp56
-rw-r--r--lib/Analysis/BlockFrequencyInfo.cpp2
-rw-r--r--lib/Analysis/BranchProbabilityInfo.cpp5
-rw-r--r--lib/Analysis/CaptureTracking.cpp14
-rw-r--r--lib/Analysis/ConstantFolding.cpp240
-rw-r--r--lib/Analysis/DIBuilder.cpp94
-rw-r--r--lib/Analysis/DebugInfo.cpp97
-rw-r--r--lib/Analysis/DominanceFrontier.cpp2
-rw-r--r--lib/Analysis/IVUsers.cpp9
-rw-r--r--lib/Analysis/InlineCost.cpp166
-rw-r--r--lib/Analysis/InstructionSimplify.cpp127
-rw-r--r--lib/Analysis/LazyValueInfo.cpp73
-rw-r--r--lib/Analysis/LoopDependenceAnalysis.cpp2
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp21
-rw-r--r--lib/Analysis/PHITransAddr.cpp2
-rw-r--r--lib/Analysis/PathNumbering.cpp4
-rw-r--r--lib/Analysis/RegionInfo.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp110
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp386
-rw-r--r--lib/Analysis/ScalarEvolutionNormalization.cpp2
-rw-r--r--lib/Analysis/SparsePropagation.cpp3
-rw-r--r--lib/Analysis/ValueTracking.cpp355
26 files changed, 1045 insertions, 757 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index bd132c0..95c834b 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -440,3 +440,19 @@ bool llvm::isIdentifiedObject(const Value *V) {
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
}
+
+/// isKnownNonNull - Return true if we know that the specified value is never
+/// null.
+bool llvm::isKnownNonNull(const Value *V) {
+ // Alloca never returns null, malloc might.
+ if (isa<AllocaInst>(V)) return true;
+
+ // A byval argument is never null.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->hasByValAttr();
+
+ // Global values are not null unless extern weak.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return !GV->hasExternalWeakLinkage();
+ return false;
+}
diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp
index d947220..9f219f5 100644
--- a/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/lib/Analysis/AliasAnalysisCounter.cpp
@@ -127,9 +127,8 @@ AliasAnalysis::AliasResult
AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
- const char *AliasString;
+ const char *AliasString = 0;
switch (R) {
- default: llvm_unreachable("Unknown alias type!");
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
case PartialAlias: Partial++; AliasString = "Partial alias"; break;
@@ -154,9 +153,8 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
const Location &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
- const char *MRString;
+ const char *MRString = 0;
switch (R) {
- default: llvm_unreachable("Unknown mod/ref type!");
case NoModRef: NoMR++; MRString = "NoModRef"; break;
case Ref: JustRef++; MRString = "JustRef"; break;
case Mod: JustMod++; MRString = "JustMod"; break;
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index 37271b9..ac72983 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -193,8 +193,6 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::MustAlias:
PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
++MustAlias; break;
- default:
- errs() << "Unknown alias query result!\n";
}
}
}
@@ -223,8 +221,6 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
++ModRef; break;
- default:
- errs() << "Unknown alias query result!\n";
}
}
}
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index 3fcd3b5..f80e2fb 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -189,7 +189,9 @@ bool AliasSet::aliasesUnknownInst(Instruction *Inst, AliasAnalysis &AA) const {
}
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.getModRefInfo(Inst, I.getPointer(), I.getSize()) !=
+ if (AA.getModRefInfo(Inst, AliasAnalysis::Location(I.getPointer(),
+ I.getSize(),
+ I.getTBAAInfo())) !=
AliasAnalysis::NoModRef)
return true;
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 568983a..20ecfd2 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -42,22 +42,6 @@ using namespace llvm;
// Useful predicates
//===----------------------------------------------------------------------===//
-/// isKnownNonNull - Return true if we know that the specified value is never
-/// null.
-static bool isKnownNonNull(const Value *V) {
- // Alloca never returns null, malloc might.
- if (isa<AllocaInst>(V)) return true;
-
- // A byval argument is never null.
- if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValAttr();
-
- // Global values are not null unless extern weak.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
- return !GV->hasExternalWeakLinkage();
- return false;
-}
-
/// isNonEscapingLocalObject - Return true if the pointer is to a function-local
/// object that never escapes from the function.
static bool isNonEscapingLocalObject(const Value *V) {
@@ -100,42 +84,59 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
+static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+ bool RoundToAlign = false) {
Type *AccessTy;
+ unsigned Align;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
AccessTy = GV->getType()->getElementType();
+ Align = GV->getAlignment();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
if (!AI->isArrayAllocation())
AccessTy = AI->getType()->getElementType();
else
return AliasAnalysis::UnknownSize;
+ Align = AI->getAlignment();
} else if (const CallInst* CI = extractMallocCall(V)) {
- if (!isArrayMalloc(V, &TD))
+ if (!RoundToAlign && !isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return C->getZExtValue();
return AliasAnalysis::UnknownSize;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
- if (A->hasByValAttr())
+ if (A->hasByValAttr()) {
AccessTy = cast<PointerType>(A->getType())->getElementType();
- else
+ Align = A->getParamAlignment();
+ } else {
return AliasAnalysis::UnknownSize;
+ }
} else {
return AliasAnalysis::UnknownSize;
}
-
- if (AccessTy->isSized())
- return TD.getTypeAllocSize(AccessTy);
- return AliasAnalysis::UnknownSize;
+
+ if (!AccessTy->isSized())
+ return AliasAnalysis::UnknownSize;
+
+ uint64_t Size = TD.getTypeAllocSize(AccessTy);
+ // If there is an explicitly specified alignment, and we need to
+ // take alignment into account, round up the size. (If the alignment
+ // is implicit, getTypeAllocSize is sufficient.)
+ if (RoundToAlign && Align)
+ Size = RoundUpToAlignment(Size, Align);
+
+ return Size;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const TargetData &TD) {
- uint64_t ObjectSize = getObjectSize(V, TD);
+ // This function needs to use the aligned object size because we allow
+ // reads a bit past the end given sufficient alignment.
+ uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
+
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -977,10 +978,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
//
// TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
// practical effect of this is protecting TBAA in the case of dynamic
- // indices into arrays of unions. An alternative way to solve this would
- // be to have clang emit extra metadata for unions and/or union accesses.
- // A union-specific solution wouldn't handle the problem for malloc'd
- // memory however.
+ // indices into arrays of unions or malloc'd memory.
return PartialAlias;
}
diff --git a/lib/Analysis/BlockFrequencyInfo.cpp b/lib/Analysis/BlockFrequencyInfo.cpp
index d16665f..8a660f7 100644
--- a/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/lib/Analysis/BlockFrequencyInfo.cpp
@@ -58,6 +58,6 @@ void BlockFrequencyInfo::print(raw_ostream &O, const Module *) const {
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
///
-BlockFrequency BlockFrequencyInfo::getBlockFreq(BasicBlock *BB) const {
+BlockFrequency BlockFrequencyInfo::getBlockFreq(const BasicBlock *BB) const {
return BFI->getBlockFreq(BB);
}
diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp
index f9461c0..2730ce6 100644
--- a/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/lib/Analysis/BranchProbabilityInfo.cpp
@@ -65,8 +65,9 @@ static const uint32_t UR_TAKEN_WEIGHT = 1;
///
/// This is the weight for a branch not being taken toward a block that
/// terminates (eventually) in unreachable. Such a branch is essentially never
-/// taken.
-static const uint32_t UR_NONTAKEN_WEIGHT = 1023;
+/// taken. Set the weight to an absurdly high value so that nested loops don't
+/// easily subsume it.
+static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1;
static const uint32_t PH_TAKEN_WEIGHT = 20;
static const uint32_t PH_NONTAKEN_WEIGHT = 12;
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index 9a7992e..dd33eeb 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -16,6 +16,8 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/CaptureTracking.h"
using namespace llvm;
@@ -30,8 +32,8 @@ namespace {
bool shouldExplore(Use *U) { return true; }
- bool captured(Instruction *I) {
- if (isa<ReturnInst>(I) && !ReturnCaptures)
+ bool captured(Use *U) {
+ if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
return false;
Captured = true;
@@ -117,7 +119,7 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
for (CallSite::arg_iterator A = B; A != E; ++A)
if (A->get() == V && !CS.doesNotCapture(A - B))
// The parameter is not marked 'nocapture' - captured.
- if (Tracker->captured(I))
+ if (Tracker->captured(U))
return;
break;
}
@@ -130,7 +132,7 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
case Instruction::Store:
if (V == I->getOperand(0))
// Stored the pointer - conservatively assume it may be captured.
- if (Tracker->captured(I))
+ if (Tracker->captured(U))
return;
// Storing to the pointee does not cause the pointer to be captured.
break;
@@ -158,12 +160,12 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
break;
// Otherwise, be conservative. There are crazy ways to capture pointers
// using comparisons.
- if (Tracker->captured(I))
+ if (Tracker->captured(U))
return;
break;
default:
// Something else - be conservative and say it is captured.
- if (Tracker->captured(I))
+ if (Tracker->captured(U))
return;
break;
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 8e2f263..7a0a4e1 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -52,6 +52,42 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy())
return Constant::getAllOnesValue(DestTy);
+ // Handle a vector->integer cast.
+ if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
+ ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
+ if (CDV == 0)
+ return ConstantExpr::getBitCast(C, DestTy);
+
+ unsigned NumSrcElts = CDV->getType()->getNumElements();
+
+ Type *SrcEltTy = CDV->getType()->getElementType();
+
+ // If the vector is a vector of floating point, convert it to vector of int
+ // to simplify things.
+ if (SrcEltTy->isFloatingPointTy()) {
+ unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
+ Type *SrcIVTy =
+ VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
+ // Ask VMCore to do the conversion now that #elts line up.
+ C = ConstantExpr::getBitCast(C, SrcIVTy);
+ CDV = cast<ConstantDataVector>(C);
+ }
+
+ // Now that we know that the input value is a vector of integers, just shift
+ // and insert them into our result.
+ unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
+ APInt Result(IT->getBitWidth(), 0);
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ Result <<= BitShift;
+ if (TD.isLittleEndian())
+ Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
+ else
+ Result |= CDV->getElementAsInteger(i);
+ }
+
+ return ConstantInt::get(IT, Result);
+ }
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
@@ -65,17 +101,16 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
// If this is a bitcast from constant vector -> vector, fold it.
- ConstantVector *CV = dyn_cast<ConstantVector>(C);
- if (CV == 0)
+ if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
- unsigned NumSrcElt = CV->getNumOperands();
+ unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
- Type *SrcEltTy = CV->getType()->getElementType();
+ Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
@@ -95,7 +130,6 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
- if (!C) return ConstantExpr::getBitCast(C, DestTy);
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
@@ -109,8 +143,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
- CV = dyn_cast<ConstantVector>(C);
- if (!CV) // If VMCore wasn't able to fold it, bail out.
+ // If VMCore wasn't able to fold it, bail out.
+ if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
+ !isa<ConstantDataVector>(C))
return C;
}
@@ -132,7 +167,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++));
+ Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@@ -149,28 +184,29 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
Result.push_back(Elt);
}
- } else {
- // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
- unsigned Ratio = NumDstElt/NumSrcElt;
- unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+ return ConstantVector::get(Result);
+ }
+
+ // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
+ unsigned Ratio = NumDstElt/NumSrcElt;
+ unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+
+ // Loop over each source value, expanding into multiple results.
+ for (unsigned i = 0; i != NumSrcElt; ++i) {
+ Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
+ if (!Src) // Reject constantexpr elements.
+ return ConstantExpr::getBitCast(C, DestTy);
- // Loop over each source value, expanding into multiple results.
- for (unsigned i = 0; i != NumSrcElt; ++i) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i));
- if (!Src) // Reject constantexpr elements.
- return ConstantExpr::getBitCast(C, DestTy);
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
+ for (unsigned j = 0; j != Ratio; ++j) {
+ // Shift the piece of the value into the right place, depending on
+ // endianness.
+ Constant *Elt = ConstantExpr::getLShr(Src,
+ ConstantInt::get(Src->getType(), ShiftAmt));
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
- unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
- for (unsigned j = 0; j != Ratio; ++j) {
- // Shift the piece of the value into the right place, depending on
- // endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
- ConstantInt::get(Src->getType(), ShiftAmt));
- ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
- // Truncate and remember this piece.
- Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
- }
+ // Truncate and remember this piece.
+ Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
@@ -273,7 +309,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
@@ -311,12 +347,20 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
// not reached.
}
- if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
- uint64_t EltSize = TD.getTypeAllocSize(CA->getType()->getElementType());
+ if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
+ isa<ConstantDataSequential>(C)) {
+ Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
+ uint64_t EltSize = TD.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
- for (; Index != CA->getType()->getNumElements(); ++Index) {
- if (!ReadDataFromGlobal(CA->getOperand(Index), Offset, CurPtr,
+ uint64_t NumElts;
+ if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
+ NumElts = AT->getNumElements();
+ else
+ NumElts = cast<VectorType>(C->getType())->getNumElements();
+
+ for (; Index != NumElts; ++Index) {
+ if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
BytesLeft, TD))
return false;
if (EltSize >= BytesLeft)
@@ -328,30 +372,12 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
- if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
- uint64_t EltSize = TD.getTypeAllocSize(CV->getType()->getElementType());
- uint64_t Index = ByteOffset / EltSize;
- uint64_t Offset = ByteOffset - Index * EltSize;
- for (; Index != CV->getType()->getNumElements(); ++Index) {
- if (!ReadDataFromGlobal(CV->getOperand(Index), Offset, CurPtr,
- BytesLeft, TD))
- return false;
- if (EltSize >= BytesLeft)
- return true;
- Offset = 0;
- BytesLeft -= EltSize;
- CurPtr += EltSize;
- }
- return true;
- }
-
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
- BytesLeft, TD);
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ BytesLeft, TD);
}
// Otherwise, unknown initializer type.
@@ -446,9 +472,9 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
- std::string Str;
- if (TD && GetConstantStringInfo(CE, Str) && !Str.empty()) {
- unsigned StrLen = Str.length();
+ StringRef Str;
+ if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) {
+ unsigned StrLen = Str.size();
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
// Replace load with immediate integer if the result is an integer or fp
@@ -836,7 +862,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
- case Instruction::FCmp: assert(0 && "Invalid for compares");
+ case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
@@ -990,56 +1016,30 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// constant expression, or null if something is funny and we can't decide.
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
- if (CE->getOperand(1) != Constant::getNullValue(CE->getOperand(1)->getType()))
+ if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
-
+
// Loop over all of the operands, tracking down which value we are
- // addressing...
- gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
- for (++I; I != E; ++I)
- if (StructType *STy = dyn_cast<StructType>(*I)) {
- ConstantInt *CU = cast<ConstantInt>(I.getOperand());
- assert(CU->getZExtValue() < STy->getNumElements() &&
- "Struct index out of range!");
- unsigned El = (unsigned)CU->getZExtValue();
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
- C = CS->getOperand(El);
- } else if (isa<ConstantAggregateZero>(C)) {
- C = Constant::getNullValue(STy->getElementType(El));
- } else if (isa<UndefValue>(C)) {
- C = UndefValue::get(STy->getElementType(El));
- } else {
- return 0;
- }
- } else if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
- if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
- if (CI->getZExtValue() >= ATy->getNumElements())
- return 0;
- if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
- C = CA->getOperand(CI->getZExtValue());
- else if (isa<ConstantAggregateZero>(C))
- C = Constant::getNullValue(ATy->getElementType());
- else if (isa<UndefValue>(C))
- C = UndefValue::get(ATy->getElementType());
- else
- return 0;
- } else if (VectorType *VTy = dyn_cast<VectorType>(*I)) {
- if (CI->getZExtValue() >= VTy->getNumElements())
- return 0;
- if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
- C = CP->getOperand(CI->getZExtValue());
- else if (isa<ConstantAggregateZero>(C))
- C = Constant::getNullValue(VTy->getElementType());
- else if (isa<UndefValue>(C))
- C = UndefValue::get(VTy->getElementType());
- else
- return 0;
- } else {
- return 0;
- }
- } else {
- return 0;
- }
+ // addressing.
+ for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
+ C = C->getAggregateElement(CE->getOperand(i));
+ if (C == 0) return 0;
+ }
+ return C;
+}
+
+/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
+/// indices (with an *implied* zero pointer index that is not in the list),
+/// return the constant value being addressed by a virtual load, or null if
+/// something is funny and we can't decide.
+Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
+ ArrayRef<Constant*> Indices) {
+ // Loop over all of the operands, tracking down which value we are
+ // addressing.
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ C = C->getAggregateElement(Indices[i]);
+ if (C == 0) return 0;
+ }
return C;
}
@@ -1125,7 +1125,6 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold float/double");
- return 0; // dummy return to suppress warning
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
@@ -1142,7 +1141,6 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold float/double");
- return 0; // dummy return to suppress warning
}
/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
@@ -1153,11 +1151,8 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// available for the result. Returns null if the conversion cannot be
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
-static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
- Type *Ty) {
- assert(Op && "Called with NULL operand");
- APFloat Val(Op->getValueAPF());
-
+static Constant *ConstantFoldConvertToInt(const APFloat &Val,
+ bool roundTowardZero, Type *Ty) {
// All of these conversion intrinsics form an integer of at most 64bits.
unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
assert(ResultWidth <= 64 &&
@@ -1309,24 +1304,31 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
}
- if (ConstantVector *Op = dyn_cast<ConstantVector>(Operands[0])) {
+ // Support ConstantVector in case we have an Undef in the top.
+ if (isa<ConstantVector>(Operands[0]) ||
+ isa<ConstantDataVector>(Operands[0])) {
+ Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/false, Ty);
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1388,7 +1390,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
APInt Res;
bool Overflow;
switch (F->getIntrinsicID()) {
- default: assert(0 && "Invalid case");
+ default: llvm_unreachable("Invalid case");
case Intrinsic::sadd_with_overflow:
Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
break;
diff --git a/lib/Analysis/DIBuilder.cpp b/lib/Analysis/DIBuilder.cpp
index 85dcc46..f0bdc48 100644
--- a/lib/Analysis/DIBuilder.cpp
+++ b/lib/Analysis/DIBuilder.cpp
@@ -76,10 +76,11 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
StringRef Directory, StringRef Producer,
bool isOptimized, StringRef Flags,
unsigned RunTimeVer) {
- assert (Lang <= dwarf::DW_LANG_D && Lang >= dwarf::DW_LANG_C89
- && "Invalid Language tag");
- assert (!Filename.empty()
- && "Unable to create compile unit without filename");
+ assert(((Lang <= dwarf::DW_LANG_Python && Lang >= dwarf::DW_LANG_C89) ||
+ (Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
+ "Invalid Language tag");
+ assert(!Filename.empty() &&
+ "Unable to create compile unit without filename");
Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) };
TempEnumTypes = MDNode::getTemporary(VMContext, TElts);
Value *THElts[] = { TempEnumTypes };
@@ -358,13 +359,53 @@ DIType DIBuilder::createObjCIVar(StringRef Name,
return DIType(MDNode::get(VMContext, Elts));
}
+/// createObjCIVar - Create debugging information entry for Objective-C
+/// instance variable.
+DIType DIBuilder::createObjCIVar(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType Ty, MDNode *PropertyNode) {
+ // TAG_member is encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_member),
+ getNonCompileUnitScope(File),
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ Ty,
+ PropertyNode
+ };
+ return DIType(MDNode::get(VMContext, Elts));
+}
+
+/// createObjCProperty - Create debugging information entry for Objective-C
+/// property.
+DIObjCProperty DIBuilder::createObjCProperty(StringRef Name,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_APPLE_Property),
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, GetterName),
+ MDString::get(VMContext, SetterName),
+ ConstantInt::get(Type::getInt32Ty(VMContext), PropertyAttributes)
+ };
+ return DIObjCProperty(MDNode::get(VMContext, Elts));
+}
+
/// createClassType - Create debugging information entry for a class.
DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits, uint64_t AlignInBits,
uint64_t OffsetInBits, unsigned Flags,
DIType DerivedFrom, DIArray Elements,
- MDNode *VTableHoder, MDNode *TemplateParams) {
+ MDNode *VTableHolder, MDNode *TemplateParams) {
// TAG_class_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_class_type),
@@ -379,7 +420,7 @@ DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
DerivedFrom,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- VTableHoder,
+ VTableHolder,
TemplateParams
};
return DIType(MDNode::get(VMContext, Elts));
@@ -440,7 +481,7 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -465,7 +506,7 @@ DIType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -484,9 +525,9 @@ DIType DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) {
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
ParameterTypes,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -500,7 +541,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
- DIArray Elements) {
+ DIArray Elements) {
// TAG_enumeration_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type),
@@ -512,7 +553,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -628,6 +669,31 @@ DIType DIBuilder::createTemporaryType(DIFile F) {
return DIType(Node);
}
+/// createForwardDecl - Create a temporary forward-declared type that
+/// can be RAUW'd if the full type is seen.
+DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
+ unsigned Line, unsigned RuntimeLang) {
+ // Create a temporary MDNode.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ NULL, // TheCU
+ MDString::get(VMContext, Name),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Line),
+ // To ease transition include sizes etc of 0.
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext),
+ DIDescriptor::FlagFwdDecl),
+ NULL,
+ DIArray(),
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts);
+ return DIType(Node);
+}
+
/// getOrCreateArray - Get a DIArray, create one if required.
DIArray DIBuilder::getOrCreateArray(ArrayRef<Value *> Elements) {
if (Elements.empty()) {
@@ -738,7 +804,7 @@ DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope,
Elts.push_back(MDString::get(VMContext, Name));
Elts.push_back(F);
Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext),
- (LineNo | (ArgNo << 24))));
+ (LineNo | (ArgNo << 24))));
Elts.push_back(Ty);
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
@@ -854,7 +920,7 @@ DINameSpace DIBuilder::createNameSpace(DIDescriptor Scope, StringRef Name,
/// createLexicalBlockFile - This creates a new MDNode that encapsulates
/// an existing scope with a new filename.
DILexicalBlockFile DIBuilder::createLexicalBlockFile(DIDescriptor Scope,
- DIFile File) {
+ DIFile File) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block),
Scope,
diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp
index 640ad95..585a087 100644
--- a/lib/Analysis/DebugInfo.cpp
+++ b/lib/Analysis/DebugInfo.cpp
@@ -289,6 +289,10 @@ bool DIDescriptor::isEnumerator() const {
return DbgNode && getTag() == dwarf::DW_TAG_enumerator;
}
+/// isObjCProperty - Return true if the specified tag is DW_TAG
+bool DIDescriptor::isObjCProperty() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_APPLE_Property;
+}
//===----------------------------------------------------------------------===//
// Simple Descriptor Constructors and other Methods
//===----------------------------------------------------------------------===//
@@ -482,6 +486,7 @@ bool DINameSpace::Verify() const {
/// return base type size.
uint64_t DIDerivedType::getOriginalTypeSize() const {
unsigned Tag = getTag();
+
if (Tag == dwarf::DW_TAG_member || Tag == dwarf::DW_TAG_typedef ||
Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
Tag == dwarf::DW_TAG_restrict_type) {
@@ -490,7 +495,13 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
// approach.
if (!BaseType.isValid())
return getSizeInBits();
- if (BaseType.isDerivedType())
+ // If this is a derived type, go ahead and get the base type, unless
+ // it's a reference then it's just the size of the field. Pointer types
+ // have no need of this since they're a different type of qualification
+ // on the type.
+ if (BaseType.getTag() == dwarf::DW_TAG_reference_type)
+ return getSizeInBits();
+ else if (BaseType.isDerivedType())
return DIDerivedType(BaseType).getOriginalTypeSize();
else
return BaseType.getSizeInBits();
@@ -499,6 +510,13 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
return getSizeInBits();
}
+/// getObjCProperty - Return property node, if this ivar is associated with one.
+MDNode *DIDerivedType::getObjCProperty() const {
+ if (getVersion() <= LLVMDebugVersion11 || DbgNode->getNumOperands() <= 10)
+ return NULL;
+ return dyn_cast_or_null<MDNode>(DbgNode->getOperand(10));
+}
+
/// isInlinedFnArgument - Return true if this variable provides debugging
/// information for an inlined function arguments.
bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
@@ -565,8 +583,7 @@ StringRef DIScope::getFilename() const {
return DIType(DbgNode).getFilename();
if (isFile())
return DIFile(DbgNode).getFilename();
- assert(0 && "Invalid DIScope!");
- return StringRef();
+ llvm_unreachable("Invalid DIScope!");
}
StringRef DIScope::getDirectory() const {
@@ -586,8 +603,7 @@ StringRef DIScope::getDirectory() const {
return DIType(DbgNode).getDirectory();
if (isFile())
return DIFile(DbgNode).getDirectory();
- assert(0 && "Invalid DIScope!");
- return StringRef();
+ llvm_unreachable("Invalid DIScope!");
}
DIArray DICompileUnit::getEnumTypes() const {
@@ -632,6 +648,32 @@ DIArray DICompileUnit::getGlobalVariables() const {
}
//===----------------------------------------------------------------------===//
+// DIDescriptor: vtable anchors for all descriptors.
+//===----------------------------------------------------------------------===//
+
+void DIScope::anchor() { }
+
+void DICompileUnit::anchor() { }
+
+void DIFile::anchor() { }
+
+void DIType::anchor() { }
+
+void DIBasicType::anchor() { }
+
+void DIDerivedType::anchor() { }
+
+void DICompositeType::anchor() { }
+
+void DISubprogram::anchor() { }
+
+void DILexicalBlock::anchor() { }
+
+void DINameSpace::anchor() { }
+
+void DILexicalBlockFile::anchor() { }
+
+//===----------------------------------------------------------------------===//
// DIDescriptor: dump routines for all descriptors.
//===----------------------------------------------------------------------===//
@@ -679,8 +721,13 @@ void DIType::print(raw_ostream &OS) const {
if (isBasicType())
DIBasicType(DbgNode).print(OS);
- else if (isDerivedType())
- DIDerivedType(DbgNode).print(OS);
+ else if (isDerivedType()) {
+ DIDerivedType DTy = DIDerivedType(DbgNode);
+ DTy.print(OS);
+ DICompositeType CTy = getDICompositeType(DTy);
+ if (CTy.Verify())
+ CTy.print(OS);
+ }
else if (isCompositeType())
DICompositeType(DbgNode).print(OS);
else {
@@ -698,7 +745,9 @@ void DIBasicType::print(raw_ostream &OS) const {
/// print - Print derived type.
void DIDerivedType::print(raw_ostream &OS) const {
- OS << "\n\t Derived From: "; getTypeDerivedFrom().print(OS);
+ OS << "\n\t Derived From: ";
+ getTypeDerivedFrom().print(OS);
+ OS << "\n\t";
}
/// print - Print composite type.
@@ -932,22 +981,22 @@ void DebugInfoFinder::processModule(Module &M) {
DICompileUnit CU(CU_Nodes->getOperand(i));
addCompileUnit(CU);
if (CU.getVersion() > LLVMDebugVersion10) {
- DIArray GVs = CU.getGlobalVariables();
- for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i) {
- DIGlobalVariable DIG(GVs.getElement(i));
- if (addGlobalVariable(DIG))
- processType(DIG.getType());
- }
- DIArray SPs = CU.getSubprograms();
- for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i)
- processSubprogram(DISubprogram(SPs.getElement(i)));
- DIArray EnumTypes = CU.getEnumTypes();
- for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i)
- processType(DIType(EnumTypes.getElement(i)));
- DIArray RetainedTypes = CU.getRetainedTypes();
- for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i)
- processType(DIType(RetainedTypes.getElement(i)));
- return;
+ DIArray GVs = CU.getGlobalVariables();
+ for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i) {
+ DIGlobalVariable DIG(GVs.getElement(i));
+ if (addGlobalVariable(DIG))
+ processType(DIG.getType());
+ }
+ DIArray SPs = CU.getSubprograms();
+ for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i)
+ processSubprogram(DISubprogram(SPs.getElement(i)));
+ DIArray EnumTypes = CU.getEnumTypes();
+ for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i)
+ processType(DIType(EnumTypes.getElement(i)));
+ DIArray RetainedTypes = CU.getRetainedTypes();
+ for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i)
+ processType(DIType(RetainedTypes.getElement(i)));
+ return;
}
}
}
diff --git a/lib/Analysis/DominanceFrontier.cpp b/lib/Analysis/DominanceFrontier.cpp
index 6de4e1e..1604576 100644
--- a/lib/Analysis/DominanceFrontier.cpp
+++ b/lib/Analysis/DominanceFrontier.cpp
@@ -35,6 +35,8 @@ namespace {
};
}
+void DominanceFrontier::anchor() { }
+
const DominanceFrontier::DomSetType &
DominanceFrontier::calculate(const DominatorTree &DT,
const DomTreeNode *Node) {
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp
index d0ca892..cad22f8 100644
--- a/lib/Analysis/IVUsers.cpp
+++ b/lib/Analysis/IVUsers.cpp
@@ -83,6 +83,11 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool IVUsers::AddUsersIfInteresting(Instruction *I) {
+ // Add this IV user to the Processed set before returning false to ensure that
+ // all IV users are members of the set. See IVUsers::isIVUserOrOperand.
+ if (!Processed.insert(I))
+ return true; // Instruction already handled.
+
if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
@@ -93,9 +98,6 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
return false;
- if (!Processed.insert(I))
- return true; // Instruction already handled.
-
// Get the symbolic expression for this instruction.
const SCEV *ISE = SE->getSCEV(I);
@@ -268,6 +270,7 @@ void IVStrideUse::transformToPostInc(const Loop *L) {
void IVStrideUse::deleted() {
// Remove this user from the list.
+ Parent->Processed.erase(this->getUser());
Parent->IVUses.erase(this);
// this now dangles!
}
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 1f332e8..b326ba7 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -63,8 +63,22 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
// Special handling for calls.
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
- if (isa<DbgInfoIntrinsic>(II))
- continue; // Debug intrinsics don't count as size.
+ if (const IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(II)) {
+ switch (IntrinsicI->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ case Intrinsic::ptr_annotation:
+ case Intrinsic::var_annotation:
+ // These intrinsics don't count as size.
+ continue;
+ }
+ }
ImmutableCallSite CS(cast<Instruction>(II));
@@ -72,7 +86,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
// If a function is both internal and has a single use, then it is
// extremely likely to get inlined in the future (it was probably
// exposed by an interleaved devirtualization pass).
- if (F->hasInternalLinkage() && F->hasOneUse())
+ if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
++NumInlineCandidates;
// If this call is to function itself, then the function is recursive.
@@ -138,7 +152,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
// FIXME: This logic isn't really right; we can safely inline functions
// with indirectbr's as long as no other function or global references the
// blockaddress of a block within the current function. And as a QOI issue,
- // if someone is using a blockaddress wihtout an indirectbr, and that
+ // if someone is using a blockaddress without an indirectbr, and that
// reference somehow ends up in another function or global, we probably
// don't want to inline this function.
if (isa<IndirectBrInst>(BB->getTerminator()))
@@ -207,22 +221,120 @@ unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
if (!V->getType()->isPointerTy()) return 0; // Not a pointer
unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *I = cast<Instruction>(*UI);
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += InlineConstants::InstrCost;
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has variable indices, we won't be able to do much with it.
- if (GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP);
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
- // Track pointer through bitcasts.
- Reduction += CountCodeReductionForAlloca(BCI);
- } else {
- // If there is some other strange instruction, we're not going to be able
- // to do much if we inline this.
- return 0;
+
+ // Looking at ICmpInsts will never abort the analysis and return zero, and
+ // analyzing them is expensive, so save them for last so that we don't do
+ // extra work that we end up throwing out.
+ SmallVector<ICmpInst *, 4> ICmpInsts;
+
+ SmallVector<Value *, 4> Worklist;
+ Worklist.push_back(V);
+ do {
+ Value *V = Worklist.pop_back_val();
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI){
+ Instruction *I = cast<Instruction>(*UI);
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isSimple())
+ return 0;
+ Reduction += InlineConstants::InstrCost;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ if (!SI->isSimple())
+ return 0;
+ Reduction += InlineConstants::InstrCost;
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has variable indices, we won't be able to do much with it.
+ if (!GEP->hasAllConstantIndices())
+ return 0;
+ // A non-zero GEP will likely become a mask operation after SROA.
+ if (GEP->hasAllZeroIndices())
+ Reduction += InlineConstants::InstrCost;
+ Worklist.push_back(GEP);
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+ // Track pointer through bitcasts.
+ Worklist.push_back(BCI);
+ Reduction += InlineConstants::InstrCost;
+ } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
+ // SROA can handle a select of alloca iff all uses of the alloca are
+ // loads, and dereferenceable. We assume it's dereferenceable since
+ // we're told the input is an alloca.
+ for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple()) return 0;
+ }
+ // We don't know whether we'll be deleting the rest of the chain of
+ // instructions from the SelectInst on, because we don't know whether
+ // the other side of the select is also an alloca or not.
+ continue;
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return 0;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ // SROA can usually chew through these intrinsics.
+ Reduction += InlineConstants::InstrCost;
+ break;
+ }
+ } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
+ if (!isa<Constant>(ICI->getOperand(1)))
+ return 0;
+ ICmpInsts.push_back(ICI);
+ } else {
+ // If there is some other strange instruction, we're not going to be
+ // able to do much if we inline this.
+ return 0;
+ }
}
+ } while (!Worklist.empty());
+
+ while (!ICmpInsts.empty()) {
+ ICmpInst *ICI = ICmpInsts.pop_back_val();
+
+ // An icmp pred (alloca, C) becomes true if the predicate is true when
+ // equal and false otherwise.
+ bool Result = ICI->isTrueWhenEqual();
+
+ SmallVector<Instruction *, 4> Worklist;
+ Worklist.push_back(ICI);
+ do {
+ Instruction *U = Worklist.pop_back_val();
+ Reduction += InlineConstants::InstrCost;
+ for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
+ UI != UE; ++UI) {
+ Instruction *I = dyn_cast<Instruction>(*UI);
+ if (!I || I->mayHaveSideEffects()) continue;
+ if (I->getNumOperands() == 1)
+ Worklist.push_back(I);
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ // If BO produces the same value as U, then the other operand is
+ // irrelevant and we can put it into the Worklist to continue
+ // deleting dead instructions. If BO produces the same value as the
+ // other operand, we can delete BO but that's it.
+ if (Result == true) {
+ if (BO->getOpcode() == Instruction::Or)
+ Worklist.push_back(I);
+ if (BO->getOpcode() == Instruction::And)
+ Reduction += InlineConstants::InstrCost;
+ } else {
+ if (BO->getOpcode() == Instruction::Or ||
+ BO->getOpcode() == Instruction::Xor)
+ Reduction += InlineConstants::InstrCost;
+ if (BO->getOpcode() == Instruction::And)
+ Worklist.push_back(I);
+ }
+ }
+ if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
+ BasicBlock *BB = BI->getSuccessor(Result ? 0 : 1);
+ if (BB->getSinglePredecessor())
+ Reduction += InlineConstants::InstrCost * NumBBInsts[BB];
+ }
+ }
+ } while (!Worklist.empty());
}
return Reduction;
@@ -232,10 +344,12 @@ unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
/// from the specified function.
void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
// If this function contains a call that "returns twice" (e.g., setjmp or
- // _setjmp), never inline it. This is a hack because we depend on the user
- // marking their local variables as volatile if they are live across a setjmp
- // call, and they probably won't do this in callers.
- callsSetJmp = F->callsFunctionThatReturnsTwice();
+ // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
+ // This is a hack because we depend on the user marking their local variables
+ // as volatile if they are live across a setjmp call, and they probably
+ // won't do this in callers.
+ exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
+ !F->hasFnAttr(Attribute::ReturnsTwice);
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
@@ -265,7 +379,7 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F,
/// NeverInline - returns true if the function should never be inlined into
/// any caller
bool InlineCostAnalyzer::FunctionInfo::NeverInline() {
- return (Metrics.callsSetJmp || Metrics.isRecursive ||
+ return (Metrics.exposesReturnsTwice || Metrics.isRecursive ||
Metrics.containsIndirectBr);
}
// getSpecializationBonus - The heuristic used to determine the per-call
@@ -420,7 +534,7 @@ int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
// Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
+ InlineCost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
return InlineCost;
}
@@ -634,7 +748,7 @@ InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
// FIXME: If any of these three are true for the callee, the callee was
// not inlined into the caller, so I think they're redundant here.
- CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
+ CallerMetrics.exposesReturnsTwice |= CalleeMetrics.exposesReturnsTwice;
CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index f1cfd6c..370ab96 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -21,6 +21,7 @@
#include "llvm/Operator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -92,7 +93,8 @@ static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
// If we have a DominatorTree then do a precise test.
if (DT)
- return DT->dominates(I, P);
+ return !DT->isReachableFromEntry(P->getParent()) ||
+ !DT->isReachableFromEntry(I->getParent()) || DT->dominates(I, P);
// Otherwise, if the instruction is in the entry block, and is not an invoke,
// then it obviously dominates all phi nodes.
@@ -476,6 +478,11 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// the original comparison.
if (TCmp == FCmp)
return TCmp;
+
+ // The remaining cases only make sense if the select condition has the same
+ // type as the result of the comparison, so bail out if this is not so.
+ if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
+ return 0;
// If the false value simplified to false, then the result of the compare
// is equal to "Cond && TCmp". This also catches the case when the false
// value simplified to false and the true value to true, returning "Cond".
@@ -812,14 +819,10 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
return Op0;
// (X / Y) * Y -> X if the division is exact.
- Value *X = 0, *Y = 0;
- if ((match(Op0, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op1) || // (X / Y) * Y
- (match(Op1, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op0)) { // Y * (X / Y)
- PossiblyExactOperator *Div =
- cast<PossiblyExactOperator>(Y == Op1 ? Op0 : Op1);
- if (Div->isExact())
- return X;
- }
+ Value *X = 0;
+ if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
+ match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
+ return X;
// i1 mul -> and.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
@@ -1162,8 +1165,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// (X >> A) << A -> X
Value *X;
- if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1))) &&
- cast<PossiblyExactOperator>(Op0)->isExact())
+ if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
return 0;
}
@@ -1520,6 +1522,29 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
return 0;
}
+/// stripPointerAdjustments - This is like Value::stripPointerCasts, but also
+/// removes inbounds gep operations, regardless of their indices.
+static Value *stripPointerAdjustmentsImpl(Value *V,
+ SmallPtrSet<GEPOperator*, 8> &VisitedGEPs) {
+ GEPOperator *GEP = dyn_cast<GEPOperator>(V);
+ if (GEP == 0 || !GEP->isInBounds())
+ return V;
+
+ // If we've already seen this GEP, we will end up infinitely looping. This
+ // can happen in unreachable code.
+ if (!VisitedGEPs.insert(GEP))
+ return V;
+
+ return stripPointerAdjustmentsImpl(GEP->getOperand(0)->stripPointerCasts(),
+ VisitedGEPs);
+}
+
+static Value *stripPointerAdjustments(Value *V) {
+ SmallPtrSet<GEPOperator*, 8> VisitedGEPs;
+ return stripPointerAdjustmentsImpl(V, VisitedGEPs);
+}
+
+
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
@@ -1585,23 +1610,51 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
- // icmp <alloca*>, <global/alloca*/null> - Different stack variables have
- // different addresses, and what's more the address of a stack variable is
- // never null or equal to the address of a global. Note that generalizing
- // to the case where LHS is a global variable address or null is pointless,
- // since if both LHS and RHS are constants then we already constant folded
- // the compare, and if only one of them is then we moved it to RHS already.
- if (isa<AllocaInst>(LHS) && (isa<GlobalValue>(RHS) || isa<AllocaInst>(RHS) ||
- isa<ConstantPointerNull>(RHS)))
- // We already know that LHS != RHS.
- return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+ // icmp <object*>, <object*/null> - Different identified objects have
+ // different addresses (unless null), and what's more the address of an
+ // identified local is never equal to another argument (again, barring null).
+ // Note that generalizing to the case where LHS is a global variable address
+ // or null is pointless, since if both LHS and RHS are constants then we
+ // already constant folded the compare, and if only one of them is then we
+ // moved it to RHS already.
+ Value *LHSPtr = LHS->stripPointerCasts();
+ Value *RHSPtr = RHS->stripPointerCasts();
+ if (LHSPtr == RHSPtr)
+ return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
+
+ // Be more aggressive about stripping pointer adjustments when checking a
+ // comparison of an alloca address to another object. We can rip off all
+ // inbounds GEP operations, even if they are variable.
+ LHSPtr = stripPointerAdjustments(LHSPtr);
+ if (llvm::isIdentifiedObject(LHSPtr)) {
+ RHSPtr = stripPointerAdjustments(RHSPtr);
+ if (llvm::isKnownNonNull(LHSPtr) || llvm::isKnownNonNull(RHSPtr)) {
+ // If both sides are different identified objects, they aren't equal
+ // unless they're null.
+ if (LHSPtr != RHSPtr && llvm::isIdentifiedObject(RHSPtr))
+ return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+
+ // A local identified object (alloca or noalias call) can't equal any
+ // incoming argument, unless they're both null.
+ if (isa<Instruction>(LHSPtr) && isa<Argument>(RHSPtr))
+ return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+ }
+
+ // Assume that the constant null is on the right.
+ if (llvm::isKnownNonNull(LHSPtr) && isa<ConstantPointerNull>(RHSPtr))
+ return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+ } else if (isa<Argument>(LHSPtr)) {
+ RHSPtr = stripPointerAdjustments(RHSPtr);
+ // An alloca can't be equal to an argument.
+ if (isa<AllocaInst>(RHSPtr))
+ return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+ }
// If we are comparing with zero then try hard since this is a common case.
if (match(RHS, m_Zero())) {
bool LHSKnownNonNegative, LHSKnownNegative;
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_ULT:
return getFalse(ITy);
case ICmpInst::ICMP_UGE:
@@ -1771,8 +1824,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
// LHS <u RHS.
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
@@ -1831,8 +1883,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// bits there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_EQ:
return ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_NE:
@@ -2207,6 +2258,28 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getFalse(ITy);
}
+ // Simplify comparisons of GEPs.
+ if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
+ if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
+ if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
+ GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
+ (ICmpInst::isEquality(Pred) ||
+ (GLHS->isInBounds() && GRHS->isInBounds() &&
+ Pred == ICmpInst::getSignedPredicate(Pred)))) {
+ // The bases are equal and the indices are constant. Build a constant
+ // expression GEP with the same indices and a null base pointer to see
+ // what constant folding can make out of it.
+ Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
+ SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
+ Constant *NewLHS = ConstantExpr::getGetElementPtr(Null, IndicesLHS);
+
+ SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
+ Constant *NewRHS = ConstantExpr::getGetElementPtr(Null, IndicesRHS);
+ return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
+ }
+ }
+ }
+
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index d27d911..279d6a9 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -24,14 +24,15 @@
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include <map>
#include <stack>
using namespace llvm;
+using namespace PatternMatch;
char LazyValueInfo::ID = 0;
INITIALIZE_PASS_BEGIN(LazyValueInfo, "lazy-value-info",
@@ -309,50 +310,6 @@ namespace {
};
}
-namespace llvm {
- template<>
- struct DenseMapInfo<LVIValueHandle> {
- typedef DenseMapInfo<Value*> PointerInfo;
- static inline LVIValueHandle getEmptyKey() {
- return LVIValueHandle(PointerInfo::getEmptyKey(),
- static_cast<LazyValueInfoCache*>(0));
- }
- static inline LVIValueHandle getTombstoneKey() {
- return LVIValueHandle(PointerInfo::getTombstoneKey(),
- static_cast<LazyValueInfoCache*>(0));
- }
- static unsigned getHashValue(const LVIValueHandle &Val) {
- return PointerInfo::getHashValue(Val);
- }
- static bool isEqual(const LVIValueHandle &LHS, const LVIValueHandle &RHS) {
- return LHS == RHS;
- }
- };
-
- template<>
- struct DenseMapInfo<std::pair<AssertingVH<BasicBlock>, Value*> > {
- typedef std::pair<AssertingVH<BasicBlock>, Value*> PairTy;
- typedef DenseMapInfo<AssertingVH<BasicBlock> > APointerInfo;
- typedef DenseMapInfo<Value*> BPointerInfo;
- static inline PairTy getEmptyKey() {
- return std::make_pair(APointerInfo::getEmptyKey(),
- BPointerInfo::getEmptyKey());
- }
- static inline PairTy getTombstoneKey() {
- return std::make_pair(APointerInfo::getTombstoneKey(),
- BPointerInfo::getTombstoneKey());
- }
- static unsigned getHashValue( const PairTy &Val) {
- return APointerInfo::getHashValue(Val.first) ^
- BPointerInfo::getHashValue(Val.second);
- }
- static bool isEqual(const PairTy &LHS, const PairTy &RHS) {
- return APointerInfo::isEqual(LHS.first, RHS.first) &&
- BPointerInfo::isEqual(LHS.second, RHS.second);
- }
- };
-}
-
namespace {
/// LazyValueInfoCache - This is the cache kept by LazyValueInfo which
/// maintains information about queries across the clients' queries.
@@ -364,7 +321,7 @@ namespace {
/// ValueCache - This is all of the cached information for all values,
/// mapped from Value* to key information.
- DenseMap<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+ std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
/// OverDefinedCache - This tracks, on a per-block basis, the set of
/// values that are over-defined at the end of that block. This is required
@@ -492,7 +449,7 @@ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
E = ToErase.end(); I != E; ++I)
OverDefinedCache.erase(*I);
- for (DenseMap<LVIValueHandle, ValueCacheEntryTy>::iterator
+ for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
I->second.erase(BB);
}
@@ -840,9 +797,8 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition());
- if (ICI && ICI->getOperand(0) == Val &&
- isa<Constant>(ICI->getOperand(1))) {
- if (ICI->isEquality()) {
+ if (ICI && isa<Constant>(ICI->getOperand(1))) {
+ if (ICI->isEquality() && ICI->getOperand(0) == Val) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
@@ -852,12 +808,23 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
return true;
}
- if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
+ // Recognize the range checking idiom that InstCombine produces.
+ // (X-C1) u< C2 --> [C1, C1+C2)
+ ConstantInt *NegOffset = 0;
+ if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
+ match(ICI->getOperand(0), m_Add(m_Specific(Val),
+ m_ConstantInt(NegOffset)));
+
+ ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1));
+ if (CI && (ICI->getOperand(0) == Val || NegOffset)) {
// Calculate the range of values that would satisfy the comparison.
ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
ConstantRange TrueValues =
ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
+ if (NegOffset) // Apply the offset from above.
+ TrueValues = TrueValues.subtract(NegOffset->getValue());
+
// If we're interested in the false dest, invert the condition.
if (!isTrueDest) TrueValues = TrueValues.inverse();
@@ -899,8 +866,8 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// BBFrom to BBTo.
unsigned NumEdges = 0;
ConstantInt *EdgeVal = 0;
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) {
- if (SI->getSuccessor(i) != BBTo) continue;
+ for (unsigned i = 0, e = SI->getNumCases(); i != e; ++i) {
+ if (SI->getCaseSuccessor(i) != BBTo) continue;
if (NumEdges++) break;
EdgeVal = SI->getCaseValue(i);
}
diff --git a/lib/Analysis/LoopDependenceAnalysis.cpp b/lib/Analysis/LoopDependenceAnalysis.cpp
index 3997ac4..463269d 100644
--- a/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -91,8 +91,6 @@ static Value *GetPointerOperand(Value *I) {
if (StoreInst *i = dyn_cast<StoreInst>(I))
return i->getPointerOperand();
llvm_unreachable("Value is no load or store instruction!");
- // Never reached.
- return 0;
}
static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 704e27b..3a544f3 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -323,14 +323,20 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
!TD.fitsInLegalInteger(NewLoadByteSize*8))
return 0;
+ if (LIOffs+NewLoadByteSize > MemLocEnd &&
+ LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
+ // We will be reading past the location accessed by the original program.
+ // While this is safe in a regular build, Address Safety analysis tools
+ // may start reporting false warnings. So, don't do widening.
+ return 0;
+ }
+
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
return NewLoadByteSize;
NewLoadByteSize <<= 1;
}
-
- return 0;
}
namespace {
@@ -344,13 +350,18 @@ namespace {
bool shouldExplore(Use *U) {
Instruction *I = cast<Instruction>(U->getUser());
- if (BeforeHere != I && DT->dominates(BeforeHere, I))
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
return false;
return true;
}
- bool captured(Instruction *I) {
- if (BeforeHere != I && DT->dominates(BeforeHere, I))
+ bool captured(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
return false;
Captured = true;
return true;
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index 80ea219..ca06300 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -74,7 +74,6 @@ static bool VerifySubExpr(Value *Expr,
errs() << *I << '\n';
llvm_unreachable("Either something is missing from InstInputs or "
"CanPHITrans is wrong.");
- return false;
}
// Validate the operands of the instruction.
@@ -101,7 +100,6 @@ bool PHITransAddr::Verify() const {
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
errs() << " InstInput #" << i << " is " << *InstInputs[i] << "\n";
llvm_unreachable("This is unexpected.");
- return false;
}
// a-ok.
diff --git a/lib/Analysis/PathNumbering.cpp b/lib/Analysis/PathNumbering.cpp
index 0e3b6e6..80c5222 100644
--- a/lib/Analysis/PathNumbering.cpp
+++ b/lib/Analysis/PathNumbering.cpp
@@ -386,8 +386,8 @@ void BallLarusDag::buildNode(BLBlockNodeMap& inDag, BLNodeStack& dfsStack) {
}
TerminatorInst* terminator = currentNode->getBlock()->getTerminator();
- if(isa<ReturnInst>(terminator) || isa<UnreachableInst>(terminator)
- || isa<ResumeInst>(terminator) || isa<UnwindInst>(terminator))
+ if(isa<ReturnInst>(terminator) || isa<UnreachableInst>(terminator) ||
+ isa<ResumeInst>(terminator))
addEdge(currentNode, getExit(),0);
currentNode->setColor(BallLarusNode::GRAY);
diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp
index 828913d..b507b1e 100644
--- a/lib/Analysis/RegionInfo.cpp
+++ b/lib/Analysis/RegionInfo.cpp
@@ -650,7 +650,7 @@ void RegionInfo::buildRegionsTree(DomTreeNode *N, Region *region) {
// This basic block is a start block of a region. It is already in the
// BBtoRegion relation. Only the child basic blocks have to be updated.
if (it != BBtoRegion.end()) {
- Region *newRegion = it->second;;
+ Region *newRegion = it->second;
region->addSubRegion(getTopMostParent(newRegion));
region = newRegion;
} else {
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index daf7742..0c0ceeb 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -259,11 +259,9 @@ Type *SCEV::getType() const {
return cast<SCEVUnknown>(this)->getType();
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return 0;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return 0;
}
bool SCEV::isZero() const {
@@ -284,6 +282,20 @@ bool SCEV::isAllOnesValue() const {
return false;
}
+/// isNonConstantNegative - Return true if the specified scev is negated, but
+/// not a constant.
+bool SCEV::isNonConstantNegative() const {
+ const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
+ if (!Mul) return false;
+
+ // If there is a constant factor, it will be first.
+ const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
+ if (!SC) return false;
+
+ // Return true if the value is negative, this matches things like (-42 * V).
+ return SC->getValue()->getValue().isNegative();
+}
+
SCEVCouldNotCompute::SCEVCouldNotCompute() :
SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
@@ -597,11 +609,8 @@ namespace {
}
default:
- break;
+ llvm_unreachable("Unknown SCEV kind!");
}
-
- llvm_unreachable("Unknown SCEV kind!");
- return 0;
}
};
}
@@ -3925,13 +3934,19 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
//
/// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
-/// normal unsigned value, if possible. Returns 0 if the trip count is unknown
-/// or not constant. Will also return 0 if the maximum trip count is very large
-/// (>= 2^32)
-unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
- BasicBlock *ExitBlock) {
+/// normal unsigned value. Returns 0 if the trip count is unknown or not
+/// constant. Will also return 0 if the maximum trip count is very large (>=
+/// 2^32).
+///
+/// This "trip count" assumes that control exits via ExitingBlock. More
+/// precisely, it is the number of times that control may reach ExitingBlock
+/// before taking the branch. For loops with multiple exits, it may not be the
+/// number times that the loop header executes because the loop may exit
+/// prematurely via another branch.
+unsigned ScalarEvolution::
+getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock) {
const SCEVConstant *ExitCount =
- dyn_cast<SCEVConstant>(getExitCount(L, ExitBlock));
+ dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
if (!ExitCount)
return 0;
@@ -3954,9 +3969,12 @@ unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
/// multiple of a constant (which is also the case if the trip count is simply
/// constant, use getSmallConstantTripCount for that case), Will also return 1
/// if the trip count is very large (>= 2^32).
-unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
- BasicBlock *ExitBlock) {
- const SCEV *ExitCount = getExitCount(L, ExitBlock);
+///
+/// As explained in the comments for getSmallConstantTripCount, this assumes
+/// that control exits the loop via ExitingBlock.
+unsigned ScalarEvolution::
+getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) {
+ const SCEV *ExitCount = getExitCount(L, ExitingBlock);
if (ExitCount == getCouldNotCompute())
return 1;
@@ -4562,40 +4580,6 @@ EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
return cast<SCEVConstant>(Val)->getValue();
}
-/// GetAddressedElementFromGlobal - Given a global variable with an initializer
-/// and a GEP expression (missing the pointer index) indexing into it, return
-/// the addressed element of the initializer or null if the index expression is
-/// invalid.
-static Constant *
-GetAddressedElementFromGlobal(GlobalVariable *GV,
- const std::vector<ConstantInt*> &Indices) {
- Constant *Init = GV->getInitializer();
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- uint64_t Idx = Indices[i]->getZExtValue();
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
- assert(Idx < CS->getNumOperands() && "Bad struct index!");
- Init = cast<Constant>(CS->getOperand(Idx));
- } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
- if (Idx >= CA->getNumOperands()) return 0; // Bogus program
- Init = cast<Constant>(CA->getOperand(Idx));
- } else if (isa<ConstantAggregateZero>(Init)) {
- if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
- assert(Idx < STy->getNumElements() && "Bad struct index!");
- Init = Constant::getNullValue(STy->getElementType(Idx));
- } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
- if (Idx >= ATy->getNumElements()) return 0; // Bogus program
- Init = Constant::getNullValue(ATy->getElementType());
- } else {
- llvm_unreachable("Unknown constant aggregate type!");
- }
- return 0;
- } else {
- return 0; // Unknown initializer type
- }
- }
- return Init;
-}
-
/// ComputeLoadConstantCompareExitLimit - Given an exit condition of
/// 'icmp op load X, cst', try to see if we can compute the backedge
/// execution count.
@@ -4623,7 +4607,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
// Okay, we allow one non-constant index into the GEP instruction.
Value *VarIdx = 0;
- std::vector<ConstantInt*> Indexes;
+ std::vector<Constant*> Indexes;
unsigned VarIdxNum = 0;
for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
@@ -4657,7 +4641,8 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
// Form the GEP offset.
Indexes[VarIdxNum] = Val;
- Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
+ Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
+ Indexes);
if (Result == 0) break; // Cannot compute!
// Evaluate the condition for this iteration.
@@ -5297,7 +5282,6 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
}
llvm_unreachable("Unknown SCEV type!");
- return 0;
}
/// getSCEVAtScope - This is a convenience function which does
@@ -5928,7 +5912,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
switch (Pred) {
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
- break;
case ICmpInst::ICMP_SGT:
Pred = ICmpInst::ICMP_SLT;
std::swap(LHS, RHS);
@@ -6779,11 +6762,8 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
return LoopInvariant;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return LoopVariant;
- default: break;
+ default: llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return LoopVariant;
}
bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
@@ -6865,11 +6845,9 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
return ProperlyDominatesBlock;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return DoesNotDominateBlock;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return DoesNotDominateBlock;
}
bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
@@ -6915,11 +6893,9 @@ bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
return false;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return false;
}
void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index f3cf549..69507be 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -19,6 +19,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
@@ -30,6 +31,19 @@ using namespace llvm;
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
+ // This function must be called with the builder having a valid insertion
+ // point. It doesn't need to be the actual IP where the uses of the returned
+ // cast will be added, but it must dominate such IP.
+ // We use this precondition to produce a cast that will dominate all its
+ // uses. In particular, this is crucial for the case where the builder's
+ // insertion point *is* the point where we were asked to put the cast.
+ // Since we don't know the the builder's insertion point is actually
+ // where the uses will be added (only that it dominates it), we are
+ // not allowed to move it.
+ BasicBlock::iterator BIP = Builder.GetInsertPoint();
+
+ Instruction *Ret = NULL;
+
// Check to see if there is already a cast!
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
@@ -37,27 +51,35 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
if (U->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(U))
if (CI->getOpcode() == Op) {
- // If the cast isn't where we want it, fix it.
- if (BasicBlock::iterator(CI) != IP) {
+ // If the cast isn't where we want it, create a new cast at IP.
+ // Likewise, do not reuse a cast at BIP because it must dominate
+ // instructions that might be inserted before BIP.
+ if (BasicBlock::iterator(CI) != IP || BIP == IP) {
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
- Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
+ Ret = CastInst::Create(Op, V, Ty, "", IP);
+ Ret->takeName(CI);
+ CI->replaceAllUsesWith(Ret);
CI->setOperand(0, UndefValue::get(V->getType()));
- rememberInstruction(NewCI);
- return NewCI;
+ break;
}
- rememberInstruction(CI);
- return CI;
+ Ret = CI;
+ break;
}
}
// Create a new cast.
- Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
- rememberInstruction(I);
- return I;
+ if (!Ret)
+ Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
+
+ // We assert at the end of the function since IP might point to an
+ // instruction with different dominance properties than a cast
+ // (an invoke for example) and not dominate BIP (but the cast does).
+ assert(SE.DT->dominates(Ret, BIP));
+
+ rememberInstruction(Ret);
+ return Ret;
}
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
@@ -120,8 +142,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
- while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) ||
- isa<LandingPadInst>(IP))
+ while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
++IP;
return ReuseOrCreateCast(I, Ty, Op, IP);
}
@@ -497,6 +518,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
V = InsertNoopCastOfTo(V,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
+ assert(!isa<Instruction>(V) ||
+ SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
+
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
@@ -593,20 +617,6 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
return expand(SE.getAddExpr(Ops));
}
-/// isNonConstantNegative - Return true if the specified scev is negated, but
-/// not a constant.
-static bool isNonConstantNegative(const SCEV *F) {
- const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F);
- if (!Mul) return false;
-
- // If there is a constant factor, it will be first.
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
- if (!SC) return false;
-
- // Return true if the value is negative, this matches things like (-42 * V).
- return SC->getValue()->getValue().isNegative();
-}
-
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
/// SCEV expansion. If they are nested, this is the most nested. If they are
/// neighboring, pick the later.
@@ -660,7 +670,6 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
- return 0;
}
namespace {
@@ -685,10 +694,10 @@ public:
// If one operand is a non-constant negative and the other is not,
// put the non-constant negative on the right so that a sub can
// be used instead of a negate and add.
- if (isNonConstantNegative(LHS.second)) {
- if (!isNonConstantNegative(RHS.second))
+ if (LHS.second->isNonConstantNegative()) {
+ if (!RHS.second->isNonConstantNegative())
return false;
- } else if (isNonConstantNegative(RHS.second))
+ } else if (RHS.second->isNonConstantNegative())
return true;
// Otherwise they are equivalent according to this comparison.
@@ -749,7 +758,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
for (++I; I != E && I->first == CurLoop; ++I)
NewOps.push_back(I->second);
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
- } else if (isNonConstantNegative(Op)) {
+ } else if (Op->isNonConstantNegative()) {
// Instead of doing a negate and add, just do a subtract.
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
@@ -880,58 +889,108 @@ bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
return isNormalAddRecExprPHI(PN, IncV, L);
}
-/// Determine if this cyclic phi is in a form that would have been generated by
-/// LSR. We don't care if the phi was actually expanded in this pass, as long
-/// as it is in a low-cost form, for example, no implied multiplication. This
-/// should match any patterns generated by getAddRecExprPHILiterally and
-/// expandAddtoGEP.
-bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
- const Loop *L) {
+/// getIVIncOperand returns an induction variable increment's induction
+/// variable operand.
+///
+/// If allowScale is set, any type of GEP is allowed as long as the nonIV
+/// operands dominate InsertPos.
+///
+/// If allowScale is not set, ensure that a GEP increment conforms to one of the
+/// simple patterns generated by getAddRecExprPHILiterally and
+/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
+Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
+ Instruction *InsertPos,
+ bool allowScale) {
+ if (IncV == InsertPos)
+ return NULL;
+
switch (IncV->getOpcode()) {
+ default:
+ return NULL;
// Check for a simple Add/Sub or GEP of a loop invariant step.
case Instruction::Add:
- case Instruction::Sub:
- return IncV->getOperand(0) == PN
- && L->isLoopInvariant(IncV->getOperand(1));
+ case Instruction::Sub: {
+ Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
+ if (!OInst || SE.DT->dominates(OInst, InsertPos))
+ return dyn_cast<Instruction>(IncV->getOperand(0));
+ return NULL;
+ }
case Instruction::BitCast:
- IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0));
- if (!IncV)
- return false;
- // fall-thru to GEP handling
- case Instruction::GetElementPtr: {
- // This must be a pointer addition of constants (pretty) or some number of
- // address-size elements (ugly).
+ return dyn_cast<Instruction>(IncV->getOperand(0));
+ case Instruction::GetElementPtr:
for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
I != E; ++I) {
if (isa<Constant>(*I))
continue;
- // ugly geps have 2 operands.
- // i1* is used by the expander to represent an address-size element.
+ if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
+ if (!SE.DT->dominates(OInst, InsertPos))
+ return NULL;
+ }
+ if (allowScale) {
+ // allow any kind of GEP as long as it can be hoisted.
+ continue;
+ }
+ // This must be a pointer addition of constants (pretty), which is already
+ // handled, or some number of address-size elements (ugly). Ugly geps
+ // have 2 operands. i1* is used by the expander to represent an
+ // address-size element.
if (IncV->getNumOperands() != 2)
- return false;
+ return NULL;
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
- return false;
- // Ensure the operands dominate the insertion point. I don't know of a
- // case when this would not be true, so this is somewhat untested.
- if (L == IVIncInsertLoop) {
- for (User::op_iterator OI = IncV->op_begin()+1,
- OE = IncV->op_end(); OI != OE; ++OI)
- if (Instruction *OInst = dyn_cast<Instruction>(OI))
- if (!SE.DT->dominates(OInst, IVIncInsertPos))
- return false;
- }
+ return NULL;
break;
}
- IncV = dyn_cast<Instruction>(IncV->getOperand(0));
- if (IncV && IncV->getOpcode() == Instruction::BitCast)
- IncV = dyn_cast<Instruction>(IncV->getOperand(0));
- return IncV == PN;
+ return dyn_cast<Instruction>(IncV->getOperand(0));
}
- default:
+}
+
+/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
+/// it available to other uses in this loop. Recursively hoist any operands,
+/// until we reach a value that dominates InsertPos.
+bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
+ if (SE.DT->dominates(IncV, InsertPos))
+ return true;
+
+ // InsertPos must itself dominate IncV so that IncV's new position satisfies
+ // its existing users.
+ if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
return false;
+
+ // Check that the chain of IV operands leading back to Phi can be hoisted.
+ SmallVector<Instruction*, 4> IVIncs;
+ for(;;) {
+ Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
+ if (!Oper)
+ return false;
+ // IncV is safe to hoist.
+ IVIncs.push_back(IncV);
+ IncV = Oper;
+ if (SE.DT->dominates(IncV, InsertPos))
+ break;
}
+ for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
+ E = IVIncs.rend(); I != E; ++I) {
+ (*I)->moveBefore(InsertPos);
+ }
+ return true;
+}
+
+/// Determine if this cyclic phi is in a form that would have been generated by
+/// LSR. We don't care if the phi was actually expanded in this pass, as long
+/// as it is in a low-cost form, for example, no implied multiplication. This
+/// should match any patterns generated by getAddRecExprPHILiterally and
+/// expandAddtoGEP.
+bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
+ const Loop *L) {
+ for(Instruction *IVOper = IncV;
+ (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
+ /*allowScale=*/false));) {
+ if (IVOper == PN)
+ return true;
+ }
+ return false;
}
/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
@@ -991,26 +1050,28 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
if (LSRMode) {
if (!isExpandedAddRecExprPHI(PN, IncV, L))
continue;
+ if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
+ continue;
}
else {
if (!isNormalAddRecExprPHI(PN, IncV, L))
continue;
+ if (L == IVIncInsertLoop)
+ do {
+ if (SE.DT->dominates(IncV, IVIncInsertPos))
+ break;
+ // Make sure the increment is where we want it. But don't move it
+ // down past a potential existing post-inc user.
+ IncV->moveBefore(IVIncInsertPos);
+ IVIncInsertPos = IncV;
+ IncV = cast<Instruction>(IncV->getOperand(0));
+ } while (IncV != PN);
}
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
// Remember the increment.
rememberInstruction(IncV);
- if (L == IVIncInsertLoop)
- do {
- if (SE.DT->dominates(IncV, IVIncInsertPos))
- break;
- // Make sure the increment is where we want it. But don't move it
- // down past a potential existing post-inc user.
- IncV->moveBefore(IVIncInsertPos);
- IVIncInsertPos = IncV;
- IncV = cast<Instruction>(IncV->getOperand(0));
- } while (IncV != PN);
return PN;
}
}
@@ -1019,6 +1080,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+ // Another AddRec may need to be recursively expanded below. For example, if
+ // this AddRec is quadratic, the StepV may itself be an AddRec in this
+ // loop. Remove this loop from the PostIncLoops set before expanding such
+ // AddRecs. Otherwise, we cannot find a valid position for the step
+ // (i.e. StepV can never dominate its loop header). Ideally, we could do
+ // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
+ // so it's not worth implementing SmallPtrSet::swap.
+ PostIncLoopSet SavedPostIncLoops = PostIncLoops;
+ PostIncLoops.clear();
+
// Expand code for the start value.
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
L->getHeader()->begin());
@@ -1034,7 +1105,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// If the stride is negative, insert a sub instead of an add for the increment
// (unless it's a constant, because subtracts of constants are canonicalized
// to adds).
- bool useSubtract = !ExpandTy->isPointerTy() && isNonConstantNegative(Step);
+ bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
// Expand the step somewhere that dominates the loop header.
@@ -1073,6 +1144,10 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+ // After expanding subexpressions, restore the PostIncLoops set so the caller
+ // can ensure that IVIncrement dominates the current uses.
+ PostIncLoops = SavedPostIncLoops;
+
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
@@ -1153,7 +1228,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
// but hopefully expandCodeFor handles that.
bool useSubtract =
- !ExpandTy->isPointerTy() && isNonConstantNegative(Step);
+ !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
// Expand the step somewhere that dominates the loop header.
@@ -1400,10 +1475,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
- Instruction *I) {
- BasicBlock::iterator IP = I;
- while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
- ++IP;
+ Instruction *IP) {
Builder.SetInsertPoint(IP->getParent(), IP);
return expandCodeFor(SH, Ty);
}
@@ -1429,14 +1501,23 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (!L) break;
if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
+ else {
+ // LSR sets the insertion point for AddRec start/step values to the
+ // block start to simplify value reuse, even though it's an invalid
+ // position. SCEVExpander must correct for this in all cases.
+ InsertPt = L->getHeader()->getFirstInsertionPt();
+ }
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstInsertionPt();
- while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
+ while (InsertPt != Builder.GetInsertPoint()
+ && (isInsertedInstruction(InsertPt)
+ || isa<DbgInfoIntrinsic>(InsertPt))) {
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
+ }
break;
}
@@ -1471,23 +1552,9 @@ void SCEVExpander::rememberInstruction(Value *I) {
InsertedPostIncValues.insert(I);
else
InsertedValues.insert(I);
-
- // If we just claimed an existing instruction and that instruction had
- // been the insert point, adjust the insert point forward so that
- // subsequently inserted code will be dominated.
- if (Builder.GetInsertPoint() == I) {
- BasicBlock::iterator It = cast<Instruction>(I);
- do { ++It; } while (isInsertedInstruction(It) ||
- isa<DbgInfoIntrinsic>(It));
- Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
- }
}
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
- // If we acquired more instructions since the old insert point was saved,
- // advance past them.
- while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
-
Builder.SetInsertPoint(BB, I);
}
@@ -1515,40 +1582,13 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
return V;
}
-/// hoistStep - Attempt to hoist an IV increment above a potential use.
-///
-/// To successfully hoist, two criteria must be met:
-/// - IncV operands dominate InsertPos and
-/// - InsertPos dominates IncV
-///
-/// Meeting the second condition means that we don't need to check all of IncV's
-/// existing uses (it's moving up in the domtree).
-///
-/// This does not yet recursively hoist the operands, although that would
-/// not be difficult.
-///
-/// This does not require a SCEVExpander instance and could be replaced by a
-/// general code-insertion helper.
-bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
- const DominatorTree *DT) {
- if (DT->dominates(IncV, InsertPos))
- return true;
-
- if (!DT->dominates(InsertPos->getParent(), IncV->getParent()))
- return false;
-
- if (IncV->mayHaveSideEffects())
- return false;
-
- // Attempt to hoist IncV
- for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end();
- OI != OE; ++OI) {
- Instruction *OInst = dyn_cast<Instruction>(OI);
- if (OInst && !DT->dominates(OInst, InsertPos))
- return false;
- }
- IncV->moveBefore(InsertPos);
- return true;
+/// Sort values by integer width for replaceCongruentIVs.
+static bool width_descending(Value *lhs, Value *rhs) {
+ // Put pointers at the back and make sure pointer < pointer = false.
+ if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
+ return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
+ return rhs->getType()->getPrimitiveSizeInBits()
+ < lhs->getType()->getPrimitiveSizeInBits();
}
/// replaceCongruentIVs - Check for congruent phis in this loop header and
@@ -1558,23 +1598,45 @@ bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts) {
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ const TargetLowering *TLI) {
+ // Find integer phis in order of increasing width.
+ SmallVector<PHINode*, 8> Phis;
+ for (BasicBlock::iterator I = L->getHeader()->begin();
+ PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
+ Phis.push_back(Phi);
+ }
+ if (TLI)
+ std::sort(Phis.begin(), Phis.end(), width_descending);
+
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
- for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- PHINode *Phi = cast<PHINode>(I);
+ // Process phis from wide to narrow. Mapping wide phis to the their truncation
+ // so narrow phis can reuse them.
+ for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
+ PEnd = Phis.end(); PIter != PEnd; ++PIter) {
+ PHINode *Phi = *PIter;
+
if (!SE.isSCEVable(Phi->getType()))
continue;
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
if (!OrigPhiRef) {
OrigPhiRef = Phi;
+ if (Phi->getType()->isIntegerTy() && TLI
+ && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
+ // This phi can be freely truncated to the narrowest phi type. Map the
+ // truncated expression to it so it will be reused for narrow types.
+ const SCEV *TruncExpr =
+ SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
+ ExprToIVMap[TruncExpr] = Phi;
+ }
continue;
}
- // If one phi derives from the other via GEPs, types may differ.
- // We could consider adding a bitcast here to handle it.
- if (OrigPhiRef->getType() != Phi->getType())
+ // Replacing a pointer phi with an integer phi or vice-versa doesn't make
+ // sense.
+ if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
continue;
if (BasicBlock *LatchBlock = L->getLoopLatch()) {
@@ -1583,32 +1645,56 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
Instruction *IsomorphicInc =
cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
- // If this phi is more canonical, swap it with the original.
- if (!isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)
- && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L)) {
+ // If this phi has the same width but is more canonical, replace the
+ // original with it. As part of the "more canonical" determination,
+ // respect a prior decision to use an IV chain.
+ if (OrigPhiRef->getType() == Phi->getType()
+ && !(ChainedPhis.count(Phi)
+ || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
+ && (ChainedPhis.count(Phi)
+ || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
std::swap(OrigPhiRef, Phi);
std::swap(OrigInc, IsomorphicInc);
}
// Replacing the congruent phi is sufficient because acyclic redundancy
// elimination, CSE/GVN, should handle the rest. However, once SCEV proves
// that a phi is congruent, it's often the head of an IV user cycle that
- // is isomorphic with the original phi. So it's worth eagerly cleaning up
- // the common case of a single IV increment.
- if (OrigInc != IsomorphicInc &&
- OrigInc->getType() == IsomorphicInc->getType() &&
- SE.getSCEV(OrigInc) == SE.getSCEV(IsomorphicInc) &&
- hoistStep(OrigInc, IsomorphicInc, DT)) {
+ // is isomorphic with the original phi. It's worth eagerly cleaning up the
+ // common case of a single IV increment so that DeleteDeadPHIs can remove
+ // cycles that had postinc uses.
+ const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
+ IsomorphicInc->getType());
+ if (OrigInc != IsomorphicInc
+ && TruncExpr == SE.getSCEV(IsomorphicInc)
+ && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
+ || hoistIVInc(OrigInc, IsomorphicInc))) {
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv.inc: "
<< *IsomorphicInc << '\n');
- IsomorphicInc->replaceAllUsesWith(OrigInc);
+ Value *NewInc = OrigInc;
+ if (OrigInc->getType() != IsomorphicInc->getType()) {
+ Instruction *IP = isa<PHINode>(OrigInc)
+ ? (Instruction*)L->getHeader()->getFirstInsertionPt()
+ : OrigInc->getNextNode();
+ IRBuilder<> Builder(IP);
+ Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
+ NewInc = Builder.
+ CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
+ }
+ IsomorphicInc->replaceAllUsesWith(NewInc);
DeadInsts.push_back(IsomorphicInc);
}
}
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
++NumElim;
- Phi->replaceAllUsesWith(OrigPhiRef);
+ Value *NewIV = OrigPhiRef;
+ if (OrigPhiRef->getType() != Phi->getType()) {
+ IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
+ Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
+ NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
+ }
+ Phi->replaceAllUsesWith(NewIV);
DeadInsts.push_back(Phi);
}
return NumElim;
diff --git a/lib/Analysis/ScalarEvolutionNormalization.cpp b/lib/Analysis/ScalarEvolutionNormalization.cpp
index c66ecd6..dd2ed4f 100644
--- a/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -118,7 +118,6 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
// Conservatively use AnyWrap until/unless we need FlagNW.
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
switch (Kind) {
- default: llvm_unreachable("Unexpected transform name!");
case NormalizeAutodetect:
if (IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
const SCEV *TransformedStep =
@@ -191,7 +190,6 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
}
llvm_unreachable("Unexpected SCEV kind!");
- return 0;
}
/// Manage recursive transformation across an expression DAG. Revisiting
diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp
index 035bcea..0c7d05f 100644
--- a/lib/Analysis/SparsePropagation.cpp
+++ b/lib/Analysis/SparsePropagation.cpp
@@ -195,7 +195,8 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
}
- Succs[SI.findCaseValue(cast<ConstantInt>(C))] = true;
+ unsigned CCase = SI.findCaseValue(cast<ConstantInt>(C));
+ Succs[SI.resolveSuccessorIndex(CCase)] = true;
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index ef19e06..b5811f2 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -88,18 +88,21 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
// Handle a constant vector by taking the intersection of the known bits of
- // each element.
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
+ // each element. There is no real need to handle ConstantVector here, because
+ // we don't handle undef in any particularly useful way.
+ if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
+ // We know that CDS must be a vector of integers. Take the intersection of
+ // each element.
KnownZero.setAllBits(); KnownOne.setAllBits();
- for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
- APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
- ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2,
- TD, Depth);
- KnownZero &= KnownZero2;
- KnownOne &= KnownOne2;
+ APInt Elt(KnownZero.getBitWidth(), 0);
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ Elt = CDS->getElementAsInteger(i);
+ KnownZero &= ~Elt;
+ KnownOne &= Elt;
}
return;
}
+
// The address of an aligned GlobalValue has trailing zeros.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
@@ -714,10 +717,17 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
- case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz: {
unsigned LowBits = Log2_32(BitWidth)+1;
+ // If this call is undefined for 0, the result will be less than 2^n.
+ if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
+ LowBits -= 1;
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ break;
+ }
+ case Intrinsic::ctpop: {
+ unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
@@ -804,11 +814,9 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
// An exact divide or right shift can only shift off zero bits, so the result
// is a power of two only if the first operand is a power of two and not
// copying a sign bit (sdiv int_min, 2).
- if (match(V, m_LShr(m_Value(), m_Value())) ||
- match(V, m_UDiv(m_Value(), m_Value()))) {
- PossiblyExactOperator *PEO = cast<PossiblyExactOperator>(V);
- if (PEO->isExact())
- return isPowerOfTwo(PEO->getOperand(0), TD, OrZero, Depth);
+ if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
+ match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
+ return isPowerOfTwo(cast<Operator>(V)->getOperand(0), TD, OrZero, Depth);
}
return false;
@@ -872,10 +880,8 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
return true;
}
// div exact can only produce a zero if the dividend is zero.
- else if (match(V, m_IDiv(m_Value(X), m_Value()))) {
- PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
- if (BO->isExact())
- return isKnownNonZero(X, TD, Depth);
+ else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
+ return isKnownNonZero(X, TD, Depth);
}
// X + Y.
else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
@@ -989,30 +995,28 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
- case Instruction::AShr:
+ case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- // ashr X, C -> adds C sign bits.
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
- Tmp += C->getZExtValue();
+ // ashr X, C -> adds C sign bits. Vectors too.
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
+ Tmp += ShAmt->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
- // vector ashr X, <C, C, C, C> -> adds C sign bits
- if (ConstantVector *C = dyn_cast<ConstantVector>(U->getOperand(1))) {
- if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
- Tmp += CI->getZExtValue();
- if (Tmp > TyBits) Tmp = TyBits;
- }
- }
return Tmp;
- case Instruction::Shl:
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ }
+ case Instruction::Shl: {
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- if (C->getZExtValue() >= TyBits || // Bad shift.
- C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
- return Tmp - C->getZExtValue();
+ Tmp2 = ShAmt->getZExtValue();
+ if (Tmp2 >= TyBits || // Bad shift.
+ Tmp2 >= Tmp) break; // Shifted all sign bits out.
+ return Tmp - Tmp2;
}
break;
+ }
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: // NOT is handled here.
@@ -1354,25 +1358,21 @@ Value *llvm::isBytewiseValue(Value *V) {
}
}
- // A ConstantArray is splatable if all its members are equal and also
- // splatable.
- if (ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
- if (CA->getNumOperands() == 0)
- return 0;
-
- Value *Val = isBytewiseValue(CA->getOperand(0));
+ // A ConstantDataArray/Vector is splatable if all its members are equal and
+ // also splatable.
+ if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
+ Value *Elt = CA->getElementAsConstant(0);
+ Value *Val = isBytewiseValue(Elt);
if (!Val)
return 0;
- for (unsigned I = 1, E = CA->getNumOperands(); I != E; ++I)
- if (CA->getOperand(I-1) != CA->getOperand(I))
+ for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
+ if (CA->getElementAsConstant(I) != Elt)
return 0;
return Val;
}
- // FIXME: Vector types (e.g., <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>).
-
// Conceptually, we could handle things like:
// %a = zext i8 %X to i16
// %b = shl i16 %a, 8
@@ -1469,50 +1469,44 @@ static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
Instruction *InsertBefore) {
// Nothing to index? Just return V then (this is useful at the end of our
- // recursion)
+ // recursion).
if (idx_range.empty())
return V;
- // We have indices, so V should have an indexable type
- assert((V->getType()->isStructTy() || V->getType()->isArrayTy())
- && "Not looking at a struct or array?");
- assert(ExtractValueInst::getIndexedType(V->getType(), idx_range)
- && "Invalid indices for type?");
- CompositeType *PTy = cast<CompositeType>(V->getType());
-
- if (isa<UndefValue>(V))
- return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
- idx_range));
- else if (isa<ConstantAggregateZero>(V))
- return Constant::getNullValue(ExtractValueInst::getIndexedType(PTy,
- idx_range));
- else if (Constant *C = dyn_cast<Constant>(V)) {
- if (isa<ConstantArray>(C) || isa<ConstantStruct>(C))
- // Recursively process this constant
- return FindInsertedValue(C->getOperand(idx_range[0]), idx_range.slice(1),
- InsertBefore);
- } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
+ // We have indices, so V should have an indexable type.
+ assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
+ "Not looking at a struct or array?");
+ assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
+ "Invalid indices for type?");
+
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ C = C->getAggregateElement(idx_range[0]);
+ if (C == 0) return 0;
+ return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
+ }
+
+ if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
// Loop the indices for the insertvalue instruction in parallel with the
// requested indices
const unsigned *req_idx = idx_range.begin();
for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
i != e; ++i, ++req_idx) {
if (req_idx == idx_range.end()) {
- if (InsertBefore)
- // The requested index identifies a part of a nested aggregate. Handle
- // this specially. For example,
- // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
- // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
- // %C = extractvalue {i32, { i32, i32 } } %B, 1
- // This can be changed into
- // %A = insertvalue {i32, i32 } undef, i32 10, 0
- // %C = insertvalue {i32, i32 } %A, i32 11, 1
- // which allows the unused 0,0 element from the nested struct to be
- // removed.
- return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
- InsertBefore);
- else
- // We can't handle this without inserting insertvalues
+ // We can't handle this without inserting insertvalues
+ if (!InsertBefore)
return 0;
+
+ // The requested index identifies a part of a nested aggregate. Handle
+ // this specially. For example,
+ // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
+ // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
+ // %C = extractvalue {i32, { i32, i32 } } %B, 1
+ // This can be changed into
+ // %A = insertvalue {i32, i32 } undef, i32 10, 0
+ // %C = insertvalue {i32, i32 } %A, i32 11, 1
+ // which allows the unused 0,0 element from the nested struct to be
+ // removed.
+ return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
+ InsertBefore);
}
// This insert value inserts something else than what we are looking for.
@@ -1528,7 +1522,9 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
return FindInsertedValue(I->getInsertedValueOperand(),
makeArrayRef(req_idx, idx_range.end()),
InsertBefore);
- } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
+ }
+
+ if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
// If we're extracting a value from an aggregrate that was extracted from
// something else, we can extract from that something else directly instead.
// However, we will need to chain I's indices with the requested indices.
@@ -1596,33 +1592,19 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
}
-/// GetConstantStringInfo - This function computes the length of a
+/// getConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false.
-bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
- uint64_t Offset, bool StopAtNul) {
- // If V is NULL then return false;
- if (V == NULL) return false;
-
- // Look through bitcast instructions.
- if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul);
-
- // If the value is not a GEP instruction nor a constant expression with a
- // GEP instruction, then return false because ConstantArray can't occur
- // any other way.
- const User *GEP = 0;
- if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
- GEP = GEPI;
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (CE->getOpcode() == Instruction::BitCast)
- return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul);
- if (CE->getOpcode() != Instruction::GetElementPtr)
- return false;
- GEP = CE;
- }
+bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
+ uint64_t Offset, bool TrimAtNul) {
+ assert(V);
+
+ // Look through bitcast instructions and geps.
+ V = V->stripPointerCasts();
- if (GEP) {
+ // If the value is a GEP instructionor constant expression, treat it as an
+ // offset.
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Make sure the GEP has exactly three arguments.
if (GEP->getNumOperands() != 3)
return false;
@@ -1647,51 +1629,48 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
StartIdx = CI->getZExtValue();
else
return false;
- return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset,
- StopAtNul);
+ return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset);
}
// The GEP instruction, constant or instruction, must reference a global
// variable that is a constant and is initialized. The referenced constant
// initializer is the array that we'll use for optimization.
- const GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
+ const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return false;
- const Constant *GlobalInit = GV->getInitializer();
-
+
// Handle the all-zeros case
- if (GlobalInit->isNullValue()) {
+ if (GV->getInitializer()->isNullValue()) {
// This is a degenerate case. The initializer is constant zero so the
// length of the string must be zero.
- Str.clear();
+ Str = "";
return true;
}
// Must be a Constant Array
- const ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (Array == 0 || !Array->getType()->getElementType()->isIntegerTy(8))
+ const ConstantDataArray *Array =
+ dyn_cast<ConstantDataArray>(GV->getInitializer());
+ if (Array == 0 || !Array->isString())
return false;
// Get the number of elements in the array
- uint64_t NumElts = Array->getType()->getNumElements();
-
+ uint64_t NumElts = Array->getType()->getArrayNumElements();
+
+ // Start out with the entire array in the StringRef.
+ Str = Array->getAsString();
+
if (Offset > NumElts)
return false;
- // Traverse the constant array from 'Offset' which is the place the GEP refers
- // to in the array.
- Str.reserve(NumElts-Offset);
- for (unsigned i = Offset; i != NumElts; ++i) {
- const Constant *Elt = Array->getOperand(i);
- const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI) // This array isn't suitable, non-int initializer.
- return false;
- if (StopAtNul && CI->isZero())
- return true; // we found end of string, success!
- Str += (char)CI->getZExtValue();
- }
+ // Skip over 'offset' bytes.
+ Str = Str.substr(Offset);
- // The array isn't null terminated, but maybe this is a memcpy, not a strcpy.
+ if (TrimAtNul) {
+ // Trim off the \0 and anything after it. If the array is not nul
+ // terminated, we just return the whole end of string. The client may know
+ // some other way that the string is length-bound.
+ Str = Str.substr(0, Str.find('\0'));
+ }
return true;
}
@@ -1703,8 +1682,7 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
/// the specified pointer, return 'len+1'. If we can't, return 0.
static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
// Look through noop bitcast instructions.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- return GetStringLengthH(BCI->getOperand(0), PHIs);
+ V = V->stripPointerCasts();
// If this is a PHI node, there are two cases: either we have already seen it
// or we haven't.
@@ -1740,83 +1718,13 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
if (Len1 != Len2) return 0;
return Len1;
}
-
- // As a special-case, "@string = constant i8 0" is also a string with zero
- // length, not wrapped in a bitcast or GEP.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (GV->getInitializer()->isNullValue()) return 1;
- return 0;
- }
-
- // If the value is not a GEP instruction nor a constant expression with a
- // GEP instruction, then return unknown.
- User *GEP = 0;
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
- GEP = GEPI;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (CE->getOpcode() != Instruction::GetElementPtr)
- return 0;
- GEP = CE;
- } else {
- return 0;
- }
-
- // Make sure the GEP has exactly three arguments.
- if (GEP->getNumOperands() != 3)
- return 0;
-
- // Check to make sure that the first operand of the GEP is an integer and
- // has value 0 so that we are sure we're indexing into the initializer.
- if (ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
- if (!Idx->isZero())
- return 0;
- } else
- return 0;
-
- // If the second index isn't a ConstantInt, then this is a variable index
- // into the array. If this occurs, we can't say anything meaningful about
- // the string.
- uint64_t StartIdx = 0;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
- StartIdx = CI->getZExtValue();
- else
- return 0;
-
- // The GEP instruction, constant or instruction, must reference a global
- // variable that is a constant and is initialized. The referenced constant
- // initializer is the array that we'll use for optimization.
- GlobalVariable* GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
- if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
- GV->mayBeOverridden())
+
+ // Otherwise, see if we can read the string.
+ StringRef StrData;
+ if (!getConstantStringInfo(V, StrData))
return 0;
- Constant *GlobalInit = GV->getInitializer();
-
- // Handle the ConstantAggregateZero case, which is a degenerate case. The
- // initializer is constant zero so the length of the string must be zero.
- if (isa<ConstantAggregateZero>(GlobalInit))
- return 1; // Len = 0 offset by 1.
-
- // Must be a Constant Array
- ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (!Array || !Array->getType()->getElementType()->isIntegerTy(8))
- return false;
- // Get the number of elements in the array
- uint64_t NumElts = Array->getType()->getNumElements();
-
- // Traverse the constant array from StartIdx (derived above) which is
- // the place the GEP refers to in the array.
- for (unsigned i = StartIdx; i != NumElts; ++i) {
- Constant *Elt = Array->getOperand(i);
- ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI) // This array isn't suitable, non-int initializer.
- return 0;
- if (CI->isZero())
- return i-StartIdx+1; // We found end of string, success!
- }
-
- return 0; // The array isn't null terminated, conservatively return 'unknown'.
+ return StrData.size()+1;
}
/// GetStringLength - If we can compute the length of the string pointed to by
@@ -1876,8 +1784,12 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
return true;
}
-bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
+bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const TargetData *TD) {
+ const Operator *Inst = dyn_cast<Operator>(V);
+ if (!Inst)
+ return false;
+
for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
if (C->canTrap())
@@ -1912,11 +1824,31 @@ bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
return false;
return LI->getPointerOperand()->isDereferenceablePointer();
}
- case Instruction::Call:
+ case Instruction::Call: {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::bswap:
+ case Intrinsic::ctlz:
+ case Intrinsic::ctpop:
+ case Intrinsic::cttz:
+ case Intrinsic::objectsize:
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ return true;
+ // TODO: some fp intrinsics are marked as having the same error handling
+ // as libm. They're safe to speculate when they won't error.
+ // TODO: are convert_{from,to}_fp16 safe?
+ // TODO: can we list target-specific intrinsics here?
+ default: break;
+ }
+ }
return false; // The called function could have undefined behavior or
- // side-effects.
- // FIXME: We should special-case some intrinsics (bswap,
- // overflow-checking arithmetic, etc.)
+ // side-effects, even if marked readnone nounwind.
+ }
case Instruction::VAArg:
case Instruction::Alloca:
case Instruction::Invoke:
@@ -1926,7 +1858,6 @@ bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
case Instruction::Br:
case Instruction::IndirectBr:
case Instruction::Switch:
- case Instruction::Unwind:
case Instruction::Unreachable:
case Instruction::Fence:
case Instruction::LandingPad: