aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/InstCombine/InstCombineCompares.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/InstCombine/InstCombineCompares.cpp')
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp218
1 files changed, 96 insertions, 122 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9bb65ef..8c0ad52 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -15,11 +15,11 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
using namespace PatternMatch;
@@ -28,15 +28,6 @@ static ConstantInt *getOne(Constant *C) {
return ConstantInt::get(cast<IntegerType>(C->getType()), 1);
}
-/// AddOne - Add one to a ConstantInt
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-/// SubOne - Subtract one from a ConstantInt
-static Constant *SubOne(Constant *C) {
- return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
-}
-
static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
}
@@ -227,7 +218,7 @@ Instruction *InstCombiner::
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
CmpInst &ICI, ConstantInt *AndCst) {
// We need TD information to know the pointer size unless this is inbounds.
- if (!GEP->isInBounds() && TD == 0)
+ if (!GEP->isInBounds() && DL == 0)
return 0;
Constant *Init = GV->getInitializer();
@@ -316,7 +307,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, TD, TLI);
+ CompareRHS, DL, TLI);
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
@@ -395,7 +386,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds()) {
- Type *IntPtrTy = TD->getIntPtrType(GEP->getType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
Idx = Builder->CreateTrunc(Idx, IntPtrTy);
@@ -484,8 +475,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// - Default to i32
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
Ty = Idx->getType();
- else if (TD)
- Ty = TD->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
+ else if (DL)
+ Ty = DL->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
else if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
@@ -512,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
- DataLayout &TD = *IC.getDataLayout();
+ const DataLayout &DL = *IC.getDataLayout();
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if
@@ -529,9 +520,9 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
} else {
@@ -547,7 +538,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
Value *VariableIdx = GEP->getOperand(i);
// Determine the scale factor of the variable element. For example, this is
// 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
// Verify that there are no other variable indices. If so, emit the hard way.
for (++i, ++GTI; i != e; ++i, ++GTI) {
@@ -559,9 +550,9 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
}
@@ -571,7 +562,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return
// the index.
- Type *IntPtrTy = TD.getIntPtrType(GEP->getOperand(0)->getType());
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed,
@@ -624,7 +615,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
RHS = BCI->getOperand(0);
Value *PtrBase = GEPLHS->getOperand(0);
- if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
+ if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
// ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
// This transformation (ignoring the base and scales) is valid because we
// know pointers can't overflow since the gep is inbounds. See if we can
@@ -657,7 +648,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If we're comparing GEPs with two base pointers that only differ in type
// and both GEPs have only constant indices or just one use, then fold
// the compare with the adjusted indices.
- if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
+ if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
(GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
PtrBase->stripPointerCasts() ==
@@ -728,7 +719,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// Only lower this if the icmp is the only user of the GEP or if we expect
// the result to fold to a constant!
- if (TD &&
+ if (DL &&
GEPsInBounds &&
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
@@ -1078,17 +1069,17 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
break;
- case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
- if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
+ case Instruction::Xor: // (icmp pred (xor X, XorCst), CI)
+ if (ConstantInt *XorCst = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
// If this is a comparison that tests the signbit (X < 0) or (x > -1),
// fold the xor.
if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
(ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
Value *CompareVal = LHSI->getOperand(0);
- // If the sign bit of the XorCST is not set, there is no change to
+ // If the sign bit of the XorCst is not set, there is no change to
// the operation, just stop using the Xor.
- if (!XorCST->isNegative()) {
+ if (!XorCst->isNegative()) {
ICI.setOperand(0, CompareVal);
Worklist.Add(LHSI);
return &ICI;
@@ -1110,8 +1101,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (LHSI->hasOneUse()) {
// (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
- if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
- const APInt &SignBit = XorCST->getValue();
+ if (!ICI.isEquality() && XorCst->getValue().isSignBit()) {
+ const APInt &SignBit = XorCst->getValue();
ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
@@ -1120,8 +1111,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
// (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
- if (!ICI.isEquality() && XorCST->isMaxValue(true)) {
- const APInt &NotSignBit = XorCST->getValue();
+ if (!ICI.isEquality() && XorCst->isMaxValue(true)) {
+ const APInt &NotSignBit = XorCst->getValue();
ICmpInst::Predicate Pred = ICI.isSigned()
? ICI.getUnsignedPredicate()
: ICI.getSignedPredicate();
@@ -1134,20 +1125,20 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
// iff -C is a power of 2
if (ICI.getPredicate() == ICmpInst::ICMP_UGT &&
- XorCST->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
- return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCST);
+ XorCst->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
+ return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCst);
// (icmp ult (xor X, C), -C) -> (icmp uge X, C)
// iff -C is a power of 2
if (ICI.getPredicate() == ICmpInst::ICMP_ULT &&
- XorCST->getValue() == -RHSV && RHSV.isPowerOf2())
- return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCST);
+ XorCst->getValue() == -RHSV && RHSV.isPowerOf2())
+ return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCst);
}
break;
- case Instruction::And: // (icmp pred (and X, AndCST), RHS)
+ case Instruction::And: // (icmp pred (and X, AndCst), RHS)
if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
LHSI->getOperand(0)->hasOneUse()) {
- ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
+ ConstantInt *AndCst = cast<ConstantInt>(LHSI->getOperand(1));
// If the LHS is an AND of a truncating cast, we can widen the
// and/compare to be the input width without changing the value
@@ -1158,10 +1149,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// Extending a relational comparison when we're checking the sign
// bit would not work.
if (ICI.isEquality() ||
- (!AndCST->isNegative() && RHSV.isNonNegative())) {
+ (!AndCst->isNegative() && RHSV.isNonNegative())) {
Value *NewAnd =
Builder->CreateAnd(Cast->getOperand(0),
- ConstantExpr::getZExt(AndCST, Cast->getSrcTy()));
+ ConstantExpr::getZExt(AndCst, Cast->getSrcTy()));
NewAnd->takeName(LHSI);
return new ICmpInst(ICI.getPredicate(), NewAnd,
ConstantExpr::getZExt(RHS, Cast->getSrcTy()));
@@ -1177,7 +1168,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
Value *NewAnd =
Builder->CreateAnd(Cast->getOperand(0),
- ConstantExpr::getTrunc(AndCST, Ty));
+ ConstantExpr::getTrunc(AndCst, Ty));
NewAnd->takeName(LHSI);
return new ICmpInst(ICI.getPredicate(), NewAnd,
ConstantExpr::getTrunc(RHS, Ty));
@@ -1194,45 +1185,54 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
ConstantInt *ShAmt;
ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
- Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
- Type *AndTy = AndCST->getType(); // Type of the and.
-
- // We can fold this as long as we can't shift unknown bits
- // into the mask. This can happen with signed shift
- // rights, as they sign-extend. With logical shifts,
- // we must still make sure the comparison is not signed
- // because we are effectively changing the
- // position of the sign bit (PR17827).
- // TODO: We can relax these constraints a bit more.
+
+ // This seemingly simple opportunity to fold away a shift turns out to
+ // be rather complicated. See PR17827
+ // ( http://llvm.org/bugs/show_bug.cgi?id=17827 ) for details.
if (ShAmt) {
bool CanFold = false;
unsigned ShiftOpcode = Shift->getOpcode();
if (ShiftOpcode == Instruction::AShr) {
- // To test for the bad case of the signed shr, see if any
- // of the bits shifted in could be tested after the mask.
- uint32_t TyBits = Ty->getPrimitiveSizeInBits();
- int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
-
- uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
- if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
- AndCST->getValue()) == 0)
+ // There may be some constraints that make this possible,
+ // but nothing simple has been discovered yet.
+ CanFold = false;
+ } else if (ShiftOpcode == Instruction::Shl) {
+ // For a left shift, we can fold if the comparison is not signed.
+ // We can also fold a signed comparison if the mask value and
+ // comparison value are not negative. These constraints may not be
+ // obvious, but we can prove that they are correct using an SMT
+ // solver.
+ if (!ICI.isSigned() || (!AndCst->isNegative() && !RHS->isNegative()))
+ CanFold = true;
+ } else if (ShiftOpcode == Instruction::LShr) {
+ // For a logical right shift, we can fold if the comparison is not
+ // signed. We can also fold a signed comparison if the shifted mask
+ // value and the shifted comparison value are not negative.
+ // These constraints may not be obvious, but we can prove that they
+ // are correct using an SMT solver.
+ if (!ICI.isSigned())
CanFold = true;
- } else if (ShiftOpcode == Instruction::Shl ||
- ShiftOpcode == Instruction::LShr) {
- CanFold = !ICI.isSigned();
+ else {
+ ConstantInt *ShiftedAndCst =
+ cast<ConstantInt>(ConstantExpr::getShl(AndCst, ShAmt));
+ ConstantInt *ShiftedRHSCst =
+ cast<ConstantInt>(ConstantExpr::getShl(RHS, ShAmt));
+
+ if (!ShiftedAndCst->isNegative() && !ShiftedRHSCst->isNegative())
+ CanFold = true;
+ }
}
if (CanFold) {
Constant *NewCst;
- if (Shift->getOpcode() == Instruction::Shl)
+ if (ShiftOpcode == Instruction::Shl)
NewCst = ConstantExpr::getLShr(RHS, ShAmt);
else
NewCst = ConstantExpr::getShl(RHS, ShAmt);
// Check to see if we are shifting out any of the bits being
// compared.
- if (ConstantExpr::get(Shift->getOpcode(),
- NewCst, ShAmt) != RHS) {
+ if (ConstantExpr::get(ShiftOpcode, NewCst, ShAmt) != RHS) {
// If we shifted bits out, the fold is not going to work out.
// As a special case, check to see if this means that the
// result is always true or false now.
@@ -1242,12 +1242,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
return ReplaceInstUsesWith(ICI, Builder->getTrue());
} else {
ICI.setOperand(1, NewCst);
- Constant *NewAndCST;
- if (Shift->getOpcode() == Instruction::Shl)
- NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
+ Constant *NewAndCst;
+ if (ShiftOpcode == Instruction::Shl)
+ NewAndCst = ConstantExpr::getLShr(AndCst, ShAmt);
else
- NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
- LHSI->setOperand(1, NewAndCST);
+ NewAndCst = ConstantExpr::getShl(AndCst, ShAmt);
+ LHSI->setOperand(1, NewAndCst);
LHSI->setOperand(0, Shift->getOperand(0));
Worklist.Add(Shift); // Shift is dead.
return &ICI;
@@ -1264,10 +1264,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// Compute C << Y.
Value *NS;
if (Shift->getOpcode() == Instruction::LShr) {
- NS = Builder->CreateShl(AndCST, Shift->getOperand(1));
+ NS = Builder->CreateShl(AndCst, Shift->getOperand(1));
} else {
// Insert a logical shift.
- NS = Builder->CreateLShr(AndCST, Shift->getOperand(1));
+ NS = Builder->CreateLShr(AndCst, Shift->getOperand(1));
}
// Compute X & (C << Y).
@@ -1278,12 +1278,12 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
return &ICI;
}
- // Replace ((X & AndCST) > RHSV) with ((X & AndCST) != 0), if any
- // bit set in (X & AndCST) will produce a result greater than RHSV.
+ // Replace ((X & AndCst) > RHSV) with ((X & AndCst) != 0), if any
+ // bit set in (X & AndCst) will produce a result greater than RHSV.
if (ICI.getPredicate() == ICmpInst::ICMP_UGT) {
- unsigned NTZ = AndCST->getValue().countTrailingZeros();
- if ((NTZ < AndCST->getBitWidth()) &&
- APInt::getOneBitSet(AndCST->getBitWidth(), NTZ).ugt(RHSV))
+ unsigned NTZ = AndCst->getValue().countTrailingZeros();
+ if ((NTZ < AndCst->getBitWidth()) &&
+ APInt::getOneBitSet(AndCst->getBitWidth(), NTZ).ugt(RHSV))
return new ICmpInst(ICmpInst::ICMP_NE, LHSI,
Constant::getNullValue(RHS->getType()));
}
@@ -1792,8 +1792,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
- if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
+ if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
+ DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
Value *RHSOp = 0;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
@@ -1937,16 +1937,15 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// and truncates that discard the high bits of the add. Verify that this is
// the case.
Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
- for (Value::use_iterator UI = OrigAdd->use_begin(), E = OrigAdd->use_end();
- UI != E; ++UI) {
- if (*UI == AddWithCst) continue;
+ for (User *U : OrigAdd->users()) {
+ if (U == AddWithCst) continue;
// Only accept truncates for now. We would really like a nice recursive
// predicate like SimplifyDemandedBits, but which goes downwards the use-def
// chain to see which bits of a value are actually demanded. If the
// original add had another add which was then immediately truncated, we
// could still do the transformation.
- TruncInst *TI = dyn_cast<TruncInst>(*UI);
+ TruncInst *TI = dyn_cast<TruncInst>(U);
if (TI == 0 ||
TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0;
}
@@ -2048,7 +2047,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I,
/// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
/// should be swapped.
-/// The descision is based on how many times these two operands are reused
+/// The decision is based on how many times these two operands are reused
/// as subtract operands and their positions in those instructions.
/// The rational is that several architectures use the same instruction for
/// both subtract and cmp, thus it is better if the order of those operands
@@ -2064,12 +2063,12 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0,
// Each time Op0 is the first operand, count -1: swapping is bad, the
// subtract has already the same layout as the compare.
// Each time Op0 is the second operand, count +1: swapping is good, the
- // subtract has a diffrent layout as the compare.
+ // subtract has a different layout as the compare.
// At the end, if the benefit is greater than 0, Op0 should come second to
// expose more CSE opportunities.
int GlobalSwapBenefits = 0;
- for (Value::const_use_iterator UI = Op0->use_begin(), UIEnd = Op0->use_end(); UI != UIEnd; ++UI) {
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(*UI);
+ for (const User *U : Op0->users()) {
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
continue;
// If Op0 is the first argument, this is not beneficial to swap the
@@ -2104,7 +2103,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Changed = true;
}
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// comparing -val or val with non-zero is the same as just comparing val
@@ -2172,8 +2171,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
- else if (TD) // Pointers require TD info to get their size.
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
+ else if (DL) // Pointers require DL info to get their size.
+ BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());
bool isSignBit = false;
@@ -2468,7 +2467,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// operands has at least one user besides the compare (the select),
// which would often largely negate the benefit of folding anyway.
if (I.hasOneUse())
- if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
+ if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
(SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
return 0;
@@ -2532,8 +2531,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
- if (RHSC->isNullValue() && TD &&
- TD->getIntPtrType(RHSC->getType()) ==
+ if (RHSC->isNullValue() && DL &&
+ DL->getIntPtrType(RHSC->getType()) ==
LHSI->getOperand(0)->getType())
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
Constant::getNullValue(LHSI->getOperand(0)->getType()));
@@ -3229,7 +3228,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Simplify 'fcmp pred X, X'
@@ -3313,31 +3312,6 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
return NV;
break;
- case Instruction::Select: {
- // If either operand of the select is a constant, we can fold the
- // comparison into the select arms, which will cause one to be
- // constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
- if (LHSI->hasOneUse()) {
- if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
- // Fold the known value into the constant operand.
- Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op2 = Builder->CreateFCmp(I.getPredicate(),
- LHSI->getOperand(2), RHSC, I.getName());
- } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
- // Fold the known value into the constant operand.
- Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
- RHSC, I.getName());
- }
- }
-
- if (Op1)
- return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
- break;
- }
case Instruction::FSub: {
// fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
Value *Op;