aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Analysis/ScalarEvolutionExpander.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis/ScalarEvolutionExpander.cpp')
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp400
1 files changed, 339 insertions, 61 deletions
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 4310e3c..e27da96 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -15,6 +15,7 @@
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/STLExtras.h"
@@ -137,6 +138,10 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
+ // Don't count dbg.value against the ScanLimit, to avoid perturbing the
+ // generated code.
+ if (isa<DbgInfoIntrinsic>(IP))
+ ScanLimit++;
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
IP->getOperand(1) == RHS)
return IP;
@@ -144,15 +149,34 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
}
}
+ // Save the original insertion point so we can restore it when we're done.
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+
+ // Move the insertion point out of as many loops as we can.
+ while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader) break;
+
+ // Ok, move up a level.
+ Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ }
+
// If we haven't found this binop, insert it.
Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp");
rememberInstruction(BO);
+
+ // Restore the original insert point.
+ if (SaveInsertBB)
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+
return BO;
}
/// FactorOutConstant - Test if S is divisible by Factor, using signed
/// division. If so, update S with Factor divided out and return true.
-/// S need not be evenly divisble if a reasonable remainder can be
+/// S need not be evenly divisible if a reasonable remainder can be
/// computed.
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
@@ -462,7 +486,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
break;
}
- // If none of the operands were convertable to proper GEP indices, cast
+ // If none of the operands were convertible to proper GEP indices, cast
// the base to i8* and do an ugly getelementptr with that. It's still
// better than ptrtoint+arithmetic+inttoptr at least.
if (!AnyNonZeroIndices) {
@@ -486,6 +510,10 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
+ // Don't count dbg.value against the ScanLimit, to avoid perturbing the
+ // generated code.
+ if (isa<DbgInfoIntrinsic>(IP))
+ ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
return IP;
@@ -493,12 +521,56 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
+ // Save the original insertion point so we can restore it when we're done.
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+
+ // Move the insertion point out of as many loops as we can.
+ while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader) break;
+
+ // Ok, move up a level.
+ Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ }
+
// Emit a GEP.
Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
rememberInstruction(GEP);
+
+ // Restore the original insert point.
+ if (SaveInsertBB)
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+
return GEP;
}
+ // Save the original insertion point so we can restore it when we're done.
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+
+ // Move the insertion point out of as many loops as we can.
+ while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ if (!L->isLoopInvariant(V)) break;
+
+ bool AnyIndexNotLoopInvariant = false;
+ for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
+ E = GepIndices.end(); I != E; ++I)
+ if (!L->isLoopInvariant(*I)) {
+ AnyIndexNotLoopInvariant = true;
+ break;
+ }
+ if (AnyIndexNotLoopInvariant)
+ break;
+
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader) break;
+
+ // Ok, move up a level.
+ Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ }
+
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
// because ScalarEvolution may have changed the address arithmetic to
// compute a value which is beyond the end of the allocated object.
@@ -511,6 +583,11 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
"scevgep");
Ops.push_back(SE.getUnknown(GEP));
rememberInstruction(GEP);
+
+ // Restore the original insert point.
+ if (SaveInsertBB)
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+
return expand(SE.getAddExpr(Ops));
}
@@ -528,70 +605,179 @@ static bool isNonConstantNegative(const SCEV *F) {
return SC->getValue()->getValue().isNegative();
}
-Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
- int NumOperands = S->getNumOperands();
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
+/// SCEV expansion. If they are nested, this is the most nested. If they are
+/// neighboring, pick the later.
+static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
+ DominatorTree &DT) {
+ if (!A) return B;
+ if (!B) return A;
+ if (A->contains(B)) return B;
+ if (B->contains(A)) return A;
+ if (DT.dominates(A->getHeader(), B->getHeader())) return B;
+ if (DT.dominates(B->getHeader(), A->getHeader())) return A;
+ return A; // Arbitrarily break the tie.
+}
- // Find the index of an operand to start with. Choose the operand with
- // pointer type, if there is one, or the last operand otherwise.
- int PIdx = 0;
- for (; PIdx != NumOperands - 1; ++PIdx)
- if (isa<PointerType>(S->getOperand(PIdx)->getType())) break;
+/// GetRelevantLoop - Get the most relevant loop associated with the given
+/// expression, according to PickMostRelevantLoop.
+static const Loop *GetRelevantLoop(const SCEV *S, LoopInfo &LI,
+ DominatorTree &DT) {
+ if (isa<SCEVConstant>(S))
+ return 0;
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
+ if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
+ return LI.getLoopFor(I->getParent());
+ return 0;
+ }
+ if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
+ const Loop *L = 0;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
+ L = AR->getLoop();
+ for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
+ I != E; ++I)
+ L = PickMostRelevantLoop(L, GetRelevantLoop(*I, LI, DT), DT);
+ return L;
+ }
+ if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
+ return GetRelevantLoop(C->getOperand(), LI, DT);
+ if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S))
+ return PickMostRelevantLoop(GetRelevantLoop(D->getLHS(), LI, DT),
+ GetRelevantLoop(D->getRHS(), LI, DT),
+ DT);
+ llvm_unreachable("Unexpected SCEV type!");
+}
- // Expand code for the operand that we chose.
- Value *V = expand(S->getOperand(PIdx));
+/// LoopCompare - Compare loops by PickMostRelevantLoop.
+class LoopCompare {
+ DominatorTree &DT;
+public:
+ explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
+
+ bool operator()(std::pair<const Loop *, const SCEV *> LHS,
+ std::pair<const Loop *, const SCEV *> RHS) const {
+ // Compare loops with PickMostRelevantLoop.
+ if (LHS.first != RHS.first)
+ return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
+
+ // If one operand is a non-constant negative and the other is not,
+ // put the non-constant negative on the right so that a sub can
+ // be used instead of a negate and add.
+ if (isNonConstantNegative(LHS.second)) {
+ if (!isNonConstantNegative(RHS.second))
+ return false;
+ } else if (isNonConstantNegative(RHS.second))
+ return true;
- // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
- // comments on expandAddToGEP for details.
- if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) {
- // Take the operand at PIdx out of the list.
- const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
- SmallVector<const SCEV *, 8> NewOps;
- NewOps.insert(NewOps.end(), Ops.begin(), Ops.begin() + PIdx);
- NewOps.insert(NewOps.end(), Ops.begin() + PIdx + 1, Ops.end());
- // Make a GEP.
- return expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, V);
+ // Otherwise they are equivalent according to this comparison.
+ return false;
}
+};
- // Otherwise, we'll expand the rest of the SCEVAddExpr as plain integer
- // arithmetic.
- V = InsertNoopCastOfTo(V, Ty);
+Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
+ const Type *Ty = SE.getEffectiveSCEVType(S->getType());
- // Emit a bunch of add instructions
- for (int i = NumOperands-1; i >= 0; --i) {
- if (i == PIdx) continue;
- const SCEV *Op = S->getOperand(i);
- if (isNonConstantNegative(Op)) {
+ // Collect all the add operands in a loop, along with their associated loops.
+ // Iterate in reverse so that constants are emitted last, all else equal, and
+ // so that pointer operands are inserted first, which the code below relies on
+ // to form more involved GEPs.
+ SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
+ for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
+ E(S->op_begin()); I != E; ++I)
+ OpsAndLoops.push_back(std::make_pair(GetRelevantLoop(*I, *SE.LI, *SE.DT),
+ *I));
+
+ // Sort by loop. Use a stable sort so that constants follow non-constants and
+ // pointer operands precede non-pointer operands.
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+
+ // Emit instructions to add all the operands. Hoist as much as possible
+ // out of loops, and form meaningful getelementptrs where possible.
+ Value *Sum = 0;
+ for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
+ I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
+ const Loop *CurLoop = I->first;
+ const SCEV *Op = I->second;
+ if (!Sum) {
+ // This is the first operand. Just expand it.
+ Sum = expand(Op);
+ ++I;
+ } else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
+ // The running sum expression is a pointer. Try to form a getelementptr
+ // at this level with that as the base.
+ SmallVector<const SCEV *, 4> NewOps;
+ for (; I != E && I->first == CurLoop; ++I)
+ NewOps.push_back(I->second);
+ Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
+ } else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
+ // The running sum is an integer, and there's a pointer at this level.
+ // Try to form a getelementptr.
+ SmallVector<const SCEV *, 4> NewOps;
+ NewOps.push_back(SE.getUnknown(Sum));
+ for (++I; I != E && I->first == CurLoop; ++I)
+ NewOps.push_back(I->second);
+ Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
+ } else if (isNonConstantNegative(Op)) {
+ // Instead of doing a negate and add, just do a subtract.
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
- V = InsertBinop(Instruction::Sub, V, W);
+ Sum = InsertNoopCastOfTo(Sum, Ty);
+ Sum = InsertBinop(Instruction::Sub, Sum, W);
+ ++I;
} else {
+ // A simple add.
Value *W = expandCodeFor(Op, Ty);
- V = InsertBinop(Instruction::Add, V, W);
+ Sum = InsertNoopCastOfTo(Sum, Ty);
+ // Canonicalize a constant to the RHS.
+ if (isa<Constant>(Sum)) std::swap(Sum, W);
+ Sum = InsertBinop(Instruction::Add, Sum, W);
+ ++I;
}
}
- return V;
+
+ return Sum;
}
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
- int FirstOp = 0; // Set if we should emit a subtract.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
- if (SC->getValue()->isAllOnesValue())
- FirstOp = 1;
-
- int i = S->getNumOperands()-2;
- Value *V = expandCodeFor(S->getOperand(i+1), Ty);
-
- // Emit a bunch of multiply instructions
- for (; i >= FirstOp; --i) {
- Value *W = expandCodeFor(S->getOperand(i), Ty);
- V = InsertBinop(Instruction::Mul, V, W);
+
+ // Collect all the mul operands in a loop, along with their associated loops.
+ // Iterate in reverse so that constants are emitted last, all else equal.
+ SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
+ for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
+ E(S->op_begin()); I != E; ++I)
+ OpsAndLoops.push_back(std::make_pair(GetRelevantLoop(*I, *SE.LI, *SE.DT),
+ *I));
+
+ // Sort by loop. Use a stable sort so that constants follow non-constants.
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+
+ // Emit instructions to mul all the operands. Hoist as much as possible
+ // out of loops.
+ Value *Prod = 0;
+ for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
+ I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
+ const SCEV *Op = I->second;
+ if (!Prod) {
+ // This is the first operand. Just expand it.
+ Prod = expand(Op);
+ ++I;
+ } else if (Op->isAllOnesValue()) {
+ // Instead of doing a multiply by negative one, just do a negate.
+ Prod = InsertNoopCastOfTo(Prod, Ty);
+ Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
+ ++I;
+ } else {
+ // A simple mul.
+ Value *W = expandCodeFor(Op, Ty);
+ Prod = InsertNoopCastOfTo(Prod, Ty);
+ // Canonicalize a constant to the RHS.
+ if (isa<Constant>(Prod)) std::swap(Prod, W);
+ Prod = InsertBinop(Instruction::Mul, Prod, W);
+ ++I;
+ }
}
- // -1 * ... ---> 0 - ...
- if (FirstOp == 1)
- V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V);
- return V;
+ return Prod;
}
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
@@ -641,8 +827,65 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Reuse a previously-inserted PHI, if present.
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I)
- if (isInsertedInstruction(PN) && SE.getSCEV(PN) == Normalized)
- return PN;
+ if (SE.isSCEVable(PN->getType()) &&
+ (SE.getEffectiveSCEVType(PN->getType()) ==
+ SE.getEffectiveSCEVType(Normalized->getType())) &&
+ SE.getSCEV(PN) == Normalized)
+ if (BasicBlock *LatchBlock = L->getLoopLatch()) {
+ Instruction *IncV =
+ cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+
+ // Determine if this is a well-behaved chain of instructions leading
+ // back to the PHI. It probably will be, if we're scanning an inner
+ // loop already visited by LSR for example, but it wouldn't have
+ // to be.
+ do {
+ if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV)) {
+ IncV = 0;
+ break;
+ }
+ // If any of the operands don't dominate the insert position, bail.
+ // Addrec operands are always loop-invariant, so this can only happen
+ // if there are instructions which haven't been hoisted.
+ for (User::op_iterator OI = IncV->op_begin()+1,
+ OE = IncV->op_end(); OI != OE; ++OI)
+ if (Instruction *OInst = dyn_cast<Instruction>(OI))
+ if (!SE.DT->dominates(OInst, IVIncInsertPos)) {
+ IncV = 0;
+ break;
+ }
+ if (!IncV)
+ break;
+ // Advance to the next instruction.
+ IncV = dyn_cast<Instruction>(IncV->getOperand(0));
+ if (!IncV)
+ break;
+ if (IncV->mayHaveSideEffects()) {
+ IncV = 0;
+ break;
+ }
+ } while (IncV != PN);
+
+ if (IncV) {
+ // Ok, the add recurrence looks usable.
+ // Remember this PHI, even in post-inc mode.
+ InsertedValues.insert(PN);
+ // Remember the increment.
+ IncV = cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
+ rememberInstruction(IncV);
+ if (L == IVIncInsertLoop)
+ do {
+ if (SE.DT->dominates(IncV, IVIncInsertPos))
+ break;
+ // Make sure the increment is where we want it. But don't move it
+ // down past a potential existing post-inc user.
+ IncV->moveBefore(IVIncInsertPos);
+ IVIncInsertPos = IncV;
+ IncV = cast<Instruction>(IncV->getOperand(0));
+ } while (IncV != PN);
+ return PN;
+ }
+ }
// Save the original insertion point so we can restore it when we're done.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
@@ -658,7 +901,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// negative, insert a sub instead of an add for the increment (unless it's a
// constant, because subtracts of constants are canonicalized to adds).
const SCEV *Step = Normalized->getStepRecurrence(SE);
- bool isPointer = isa<PointerType>(ExpandTy);
+ bool isPointer = ExpandTy->isPointerTy();
bool isNegative = !isPointer && isNonConstantNegative(Step);
if (isNegative)
Step = SE.getNegativeSCEV(Step);
@@ -713,7 +956,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Restore the original insert point.
if (SaveInsertBB)
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
@@ -763,7 +1006,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const Type *ExpandTy = PostLoopScale ? IntTy : STy;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
- // Accomodate post-inc mode, if necessary.
+ // Accommodate post-inc mode, if necessary.
Value *Result;
if (L != PostIncLoop)
Result = PN;
@@ -776,6 +1019,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Re-apply any non-loop-dominating scale.
if (PostLoopScale) {
+ Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateMul(Result,
expandCodeFor(PostLoopScale, IntTy));
rememberInstruction(Result);
@@ -787,6 +1031,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
+ Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateAdd(Result,
expandCodeFor(PostLoopOffset, IntTy));
rememberInstruction(Result);
@@ -806,7 +1051,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
PHINode *CanonicalIV = 0;
if (PHINode *PN = L->getCanonicalInductionVariable())
if (SE.isSCEVable(PN->getType()) &&
- isa<IntegerType>(SE.getEffectiveSCEVType(PN->getType())) &&
+ SE.getEffectiveSCEVType(PN->getType())->isIntegerTy() &&
SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
@@ -827,7 +1072,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
@@ -1022,13 +1267,24 @@ Value *SCEVExpander::expand(const SCEV *S) {
L = L->getParentLoop())
if (S->isLoopInvariant(L)) {
if (!L) break;
- if (BasicBlock *Preheader = L->getLoopPreheader())
+ if (BasicBlock *Preheader = L->getLoopPreheader()) {
InsertPt = Preheader->getTerminator();
+ BasicBlock::iterator IP = InsertPt;
+ // Back past any debug info instructions. Sometimes we inserted
+ // something earlier before debug info but after any real instructions.
+ // This should behave the same as if debug info was not present.
+ while (IP != Preheader->begin()) {
+ --IP;
+ if (!isa<DbgInfoIntrinsic>(IP))
+ break;
+ InsertPt = IP;
+ }
+ }
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
- if (L && S->hasComputableLoopEvolution(L))
+ if (L && S->hasComputableLoopEvolution(L) && L != PostIncLoop)
InsertPt = L->getHeader()->getFirstNonPHI();
while (isInsertedInstruction(InsertPt))
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
@@ -1053,10 +1309,32 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (!PostIncLoop)
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}
+void SCEVExpander::rememberInstruction(Value *I) {
+ if (!PostIncLoop)
+ InsertedValues.insert(I);
+
+ // If we just claimed an existing instruction and that instruction had
+ // been the insert point, adjust the insert point forward so that
+ // subsequently inserted code will be dominated.
+ if (Builder.GetInsertPoint() == I) {
+ BasicBlock::iterator It = cast<Instruction>(I);
+ do { ++It; } while (isInsertedInstruction(It));
+ Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
+ }
+}
+
+void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
+ // If we acquired more instructions since the old insert point was saved,
+ // advance past them.
+ while (isInsertedInstruction(I)) ++I;
+
+ Builder.SetInsertPoint(BB, I);
+}
+
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
@@ -1064,13 +1342,13 @@ Value *SCEVExpander::expand(const SCEV *S) {
Value *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
const Type *Ty) {
- assert(Ty->isInteger() && "Can only insert integer induction variables!");
+ assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty),
SE.getIntegerSCEV(1, Ty), L);
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
Value *V = expandCodeFor(H, 0, L->getHeader()->begin());
if (SaveInsertBB)
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
return V;
}