aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorGabor Greif <ggreif@gmail.com>2010-04-15 12:46:56 +0000
committerGabor Greif <ggreif@gmail.com>2010-04-15 12:46:56 +0000
commit9ee17208115482441953127615231c59a2f4d052 (patch)
treefea1c2465e14ae8c3d89e0cf07588155acf8a23d /lib/Transforms
parent45d95a1ad5eb72c65db8cc691a10339aa7c9c024 (diff)
downloadexternal_llvm-9ee17208115482441953127615231c59a2f4d052.zip
external_llvm-9ee17208115482441953127615231c59a2f4d052.tar.gz
external_llvm-9ee17208115482441953127615231c59a2f4d052.tar.bz2
back out r101364, as it trips the linux nightlybot on some clang C++ tests
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101368 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp26
-rw-r--r--lib/Transforms/IPO/LowerSetJmp.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp159
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp16
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp10
-rw-r--r--lib/Transforms/Instrumentation/ProfilingUtils.cpp8
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp12
-rw-r--r--lib/Transforms/Scalar/GVN.cpp6
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp2
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp20
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp150
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp4
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp4
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp38
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp2
17 files changed, 238 insertions, 235 deletions
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index fbd77bd..0d51be7 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -222,12 +222,12 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
GS.HasPHIUser = true;
} else if (isa<CmpInst>(I)) {
} else if (isa<MemTransferInst>(I)) {
- if (I->getOperand(0) == V)
- GS.StoredType = GlobalStatus::isStored;
if (I->getOperand(1) == V)
+ GS.StoredType = GlobalStatus::isStored;
+ if (I->getOperand(2) == V)
GS.isLoaded = true;
} else if (isa<MemSetInst>(I)) {
- assert(I->getOperand(0) == V && "Memset only takes one pointer!");
+ assert(I->getOperand(1) == V && "Memset only takes one pointer!");
GS.StoredType = GlobalStatus::isStored;
} else {
return true; // Any other non-load instruction might take address!
@@ -1323,8 +1323,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// if (F2) { free(F2); F2 = 0; }
// }
// The malloc can also fail if its argument is too large.
- Constant *ConstantZero = ConstantInt::get(CI->getOperand(0)->getType(), 0);
- Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getOperand(0),
+ Constant *ConstantZero = ConstantInt::get(CI->getOperand(1)->getType(), 0);
+ Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getOperand(1),
ConstantZero, "isneg");
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
@@ -1511,10 +1511,10 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
- if (NElems == ConstantInt::get(CI->getOperand(0)->getType(), 1))
+ if (NElems == ConstantInt::get(CI->getOperand(1)->getType(), 1))
if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
AllocTy = AT->getElementType();
-
+
const StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
if (!AllocSTy)
return false;
@@ -1641,7 +1641,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// bool.
Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
- // If we've already replaced the input, StoredVal will be a cast or
+ // If we're already replaced the input, StoredVal will be a cast or
// select instruction. If not, it will be a load of the original
// global.
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
@@ -2262,8 +2262,8 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
InstResult =
ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
- getVal(Values, SI->getOperand(1)),
- getVal(Values, SI->getOperand(2)));
+ getVal(Values, SI->getOperand(1)),
+ getVal(Values, SI->getOperand(2)));
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
Constant *P = getVal(Values, GEP->getOperand(0));
SmallVector<Constant*, 8> GEPOps;
@@ -2295,14 +2295,14 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
// Cannot handle inline asm.
- if (isa<InlineAsm>(CI->getCalledValue())) return false;
+ if (isa<InlineAsm>(CI->getOperand(0))) return false;
// Resolve function pointers.
- Function *Callee = dyn_cast<Function>(getVal(Values, CI->getCalledValue()));
+ Function *Callee = dyn_cast<Function>(getVal(Values, CI->getOperand(0)));
if (!Callee) return false; // Cannot resolve.
SmallVector<Constant*, 8> Formals;
- for (User::op_iterator i = CI->op_begin(), e = CI->op_end() - 1;
+ for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end();
i != e; ++i)
Formals.push_back(getVal(Values, *i));
diff --git a/lib/Transforms/IPO/LowerSetJmp.cpp b/lib/Transforms/IPO/LowerSetJmp.cpp
index d1eaadd..4d61e83 100644
--- a/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -262,8 +262,8 @@ void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
// char*. It returns "void", so it doesn't need to replace any of
// Inst's uses and doesn't get a name.
CastInst* CI =
- new BitCastInst(Inst->getOperand(0), SBPTy, "LJBuf", Inst);
- Value *Args[] = { CI, Inst->getOperand(1) };
+ new BitCastInst(Inst->getOperand(1), SBPTy, "LJBuf", Inst);
+ Value *Args[] = { CI, Inst->getOperand(2) };
CallInst::Create(ThrowLongJmp, Args, Args + 2, "", Inst);
SwitchValuePair& SVP = SwitchValMap[Inst->getParent()->getParent()];
@@ -378,7 +378,7 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
const Type* SBPTy =
Type::getInt8PtrTy(Inst->getContext());
CastInst* BufPtr =
- new BitCastInst(Inst->getOperand(0), SBPTy, "SBJmpBuf", Inst);
+ new BitCastInst(Inst->getOperand(1), SBPTy, "SBJmpBuf", Inst);
Value *Args[] = {
GetSetJmpMap(Func), BufPtr,
ConstantInt::get(Type::getInt32Ty(Inst->getContext()), SetJmpIDMap[Func]++)
@@ -473,7 +473,7 @@ void LowerSetJmp::visitCallInst(CallInst& CI)
// Construct the new "invoke" instruction.
TerminatorInst* Term = OldBB->getTerminator();
- std::vector<Value*> Params(CI.op_begin(), CI.op_end() - 1);
+ std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
InvokeInst* II =
InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
Params.begin(), Params.end(), CI.getName(), Term);
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index c531fd3..e025b05 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -109,8 +109,8 @@ unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(0));
- unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
+ unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
+ unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@@ -122,7 +122,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
// load/store.
- ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(2));
+ ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
if (MemOpLength == 0) return 0;
// Source and destination pointer types are always "i8*" for intrinsic. See
@@ -137,9 +137,9 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// Use an integer load+store unless we can find something better.
unsigned SrcAddrSp =
- cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace();
+ cast<PointerType>(MI->getOperand(2)->getType())->getAddressSpace();
unsigned DstAddrSp =
- cast<PointerType>(MI->getOperand(0)->getType())->getAddressSpace();
+ cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace();
const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
@@ -151,8 +151,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// an i64 load+store, here because this improves the odds that the source or
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
- Value *StrippedDest = MI->getOperand(0)->stripPointerCasts();
- if (StrippedDest != MI->getOperand(0)) {
+ Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
+ if (StrippedDest != MI->getOperand(1)) {
const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
@@ -186,15 +186,15 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
- Value *Src = Builder->CreateBitCast(MI->getOperand(1), NewSrcPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getOperand(0), NewDstPtrTy);
+ Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewSrcPtrTy);
+ Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewDstPtrTy);
Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
InsertNewInstBefore(L, *MI);
InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
*MI);
// Set the size of the copy to 0, it will be deleted on the next iteration.
- MI->setOperand(2, Constant::getNullValue(MemOpLength->getType()));
+ MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
return MI;
}
@@ -258,7 +258,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallSite(&CI);
-
+
// Intrinsics cannot occur in an invoke, so handle them here instead of in
// visitCallSite.
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
@@ -282,12 +282,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
if (GVSrc->isConstant()) {
- Module *M = MMI->getParent()->getParent()->getParent();
+ Module *M = CI.getParent()->getParent()->getParent();
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
- const Type *Tys[3] = { CI.getOperand(0)->getType(),
- CI.getOperand(1)->getType(),
- CI.getOperand(2)->getType() };
- MMI->setCalledFunction(
+ const Type *Tys[3] = { CI.getOperand(1)->getType(),
+ CI.getOperand(2)->getType(),
+ CI.getOperand(3)->getType() };
+ CI.setOperand(0,
Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
Changed = true;
}
@@ -297,19 +297,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// memmove(x,x,size) -> noop.
if (MTI->getSource() == MTI->getDest())
return EraseInstFromFunction(CI);
+ }
- // If we can determine a pointer alignment that is bigger than currently
- // set, update the alignment.
- if (Instruction *I = SimplifyMemTransfer(MTI))
+ // If we can determine a pointer alignment that is bigger than currently
+ // set, update the alignment.
+ if (isa<MemTransferInst>(MI)) {
+ if (Instruction *I = SimplifyMemTransfer(MI))
return I;
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
if (Instruction *I = SimplifyMemSet(MSI))
return I;
}
-
+
if (Changed) return II;
}
-
+
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
@@ -317,10 +319,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (!TD) break;
const Type *ReturnTy = CI.getType();
- bool Min = (cast<ConstantInt>(II->getOperand(1))->getZExtValue() == 1);
+ bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
// Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getOperand(0)->stripPointerCasts();
+ Value *Op1 = II->getOperand(1)->stripPointerCasts();
// If we've stripped down to a single global variable that we
// can know the size of then just return that.
@@ -388,6 +390,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
return ReplaceInstUsesWith(CI, RetVal);
+
}
// Do not return "I don't know" here. Later optimization passes could
@@ -396,45 +399,45 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(0)))
+ if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
if (Operand->getIntrinsicID() == Intrinsic::bswap)
- return ReplaceInstUsesWith(CI, Operand->getOperand(0));
+ return ReplaceInstUsesWith(CI, Operand->getOperand(1));
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
- if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(0))) {
+ if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap) {
unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
TI->getType()->getPrimitiveSizeInBits();
Value *CV = ConstantInt::get(Operand->getType(), C);
- Value *V = Builder->CreateLShr(Operand->getOperand(0), CV);
+ Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
return new TruncInst(V, TI->getType());
}
}
break;
case Intrinsic::powi:
- if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(1))) {
+ if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
// powi(x, 0) -> 1.0
if (Power->isZero())
return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
// powi(x, 1) -> x
if (Power->isOne())
- return ReplaceInstUsesWith(CI, II->getOperand(0));
+ return ReplaceInstUsesWith(CI, II->getOperand(1));
// powi(x, -1) -> 1/x
if (Power->isAllOnesValue())
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
- II->getOperand(0));
+ II->getOperand(1));
}
break;
case Intrinsic::cttz: {
// If all bits below the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(0), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned TrailingZeros = KnownOne.countTrailingZeros();
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
@@ -447,11 +450,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ctlz: {
// If all bits above the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(0), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned LeadingZeros = KnownOne.countLeadingZeros();
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
@@ -462,8 +465,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
break;
case Intrinsic::uadd_with_overflow: {
- Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
- const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType());
+ Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
+ const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
@@ -507,19 +510,19 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// FALL THROUGH uadd into sadd
case Intrinsic::sadd_with_overflow:
// Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(0)) &&
- !isa<Constant>(II->getOperand(1))) {
- Value *LHS = II->getOperand(0);
- II->setOperand(0, II->getOperand(1));
- II->setOperand(1, LHS);
+ if (isa<Constant>(II->getOperand(1)) &&
+ !isa<Constant>(II->getOperand(2))) {
+ Value *LHS = II->getOperand(1);
+ II->setOperand(1, II->getOperand(2));
+ II->setOperand(2, LHS);
return II;
}
// X + undef -> undef
- if (isa<UndefValue>(II->getOperand(1)))
+ if (isa<UndefValue>(II->getOperand(2)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(1))) {
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
// X + 0 -> {X, false}
if (RHS->isZero()) {
Constant *V[] = {
@@ -527,7 +530,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(0), 0);
+ return InsertValueInst::Create(Struct, II->getOperand(1), 0);
}
}
break;
@@ -535,38 +538,38 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ssub_with_overflow:
// undef - X -> undef
// X - undef -> undef
- if (isa<UndefValue>(II->getOperand(0)) ||
- isa<UndefValue>(II->getOperand(1)))
+ if (isa<UndefValue>(II->getOperand(1)) ||
+ isa<UndefValue>(II->getOperand(2)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(1))) {
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
// X - 0 -> {X, false}
if (RHS->isZero()) {
Constant *V[] = {
- UndefValue::get(II->getOperand(0)->getType()),
+ UndefValue::get(II->getOperand(1)->getType()),
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(0), 0);
+ return InsertValueInst::Create(Struct, II->getOperand(1), 0);
}
}
break;
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
// Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(0)) &&
- !isa<Constant>(II->getOperand(1))) {
- Value *LHS = II->getOperand(0);
- II->setOperand(0, II->getOperand(1));
- II->setOperand(1, LHS);
+ if (isa<Constant>(II->getOperand(1)) &&
+ !isa<Constant>(II->getOperand(2))) {
+ Value *LHS = II->getOperand(1);
+ II->setOperand(1, II->getOperand(2));
+ II->setOperand(2, LHS);
return II;
}
// X * undef -> undef
- if (isa<UndefValue>(II->getOperand(1)))
+ if (isa<UndefValue>(II->getOperand(2)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(1))) {
+ if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
// X*0 -> {0, false}
if (RHSI->isZero())
return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
@@ -574,11 +577,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// X * 1 -> {X, false}
if (RHSI->equalsInt(1)) {
Constant *V[] = {
- UndefValue::get(II->getOperand(0)->getType()),
+ UndefValue::get(II->getOperand(1)->getType()),
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(0), 0);
+ return InsertValueInst::Create(Struct, II->getOperand(1), 0);
}
}
break;
@@ -589,8 +592,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_loadu_dq:
// Turn PPC lvx -> load if the pointer is known aligned.
// Turn X86 loadups -> load if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(0), 16) >= 16) {
- Value *Ptr = Builder->CreateBitCast(II->getOperand(0),
+ if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
+ Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
}
@@ -598,22 +601,22 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
+ if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(0)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
- return new StoreInst(II->getOperand(0), Ptr);
+ PointerType::getUnqual(II->getOperand(1)->getType());
+ Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
+ return new StoreInst(II->getOperand(1), Ptr);
}
break;
case Intrinsic::x86_sse_storeu_ps:
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(0), 16) >= 16) {
+ if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(1)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(0), OpPtrTy);
- return new StoreInst(II->getOperand(1), Ptr);
+ PointerType::getUnqual(II->getOperand(2)->getType());
+ Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
+ return new StoreInst(II->getOperand(2), Ptr);
}
break;
@@ -621,12 +624,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// These intrinsics only demands the 0th element of its input vector. If
// we can simplify the input based on that, do so now.
unsigned VWidth =
- cast<VectorType>(II->getOperand(0)->getType())->getNumElements();
+ cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
APInt DemandedElts(VWidth, 1);
APInt UndefElts(VWidth, 0);
- if (Value *V = SimplifyDemandedVectorElts(II->getOperand(0), DemandedElts,
+ if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
UndefElts)) {
- II->setOperand(0, V);
+ II->setOperand(1, V);
return II;
}
break;
@@ -634,7 +637,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(2))) {
+ if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
// Check that all of the elements are integer constants or undefs.
@@ -649,8 +652,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
- Value *Op0 = Builder->CreateBitCast(II->getOperand(0), Mask->getType());
- Value *Op1 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
+ Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
+ Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once.
@@ -683,7 +686,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
- if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(0))) {
+ if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
BasicBlock::iterator BI = SS;
if (&*++BI == II)
@@ -840,7 +843,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
CS.getInstruction());
- // If CS does not return void then replaceAllUsesWith undef.
+ // If CS dues not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
CS.getInstruction()->
@@ -1134,7 +1137,7 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
IntrinsicInst *Tramp =
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
- Function *NestF = cast<Function>(Tramp->getOperand(1)->stripPointerCasts());
+ Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
@@ -1175,7 +1178,7 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
do {
if (Idx == NestIdx) {
// Add the chain argument and attributes.
- Value *NestVal = Tramp->getOperand(2);
+ Value *NestVal = Tramp->getOperand(3);
if (NestVal->getType() != NestTy)
NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
NewArgs.push_back(NestVal);
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 1812e1d..861cf92 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1423,7 +1423,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
switch (II->getIntrinsicID()) {
case Intrinsic::bswap:
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(0));
+ ICI.setOperand(0, II->getOperand(1));
ICI.setOperand(1, ConstantInt::get(II->getContext(), RHSV.byteSwap()));
return &ICI;
case Intrinsic::ctlz:
@@ -1431,7 +1431,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
if (RHSV == RHS->getType()->getBitWidth()) {
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(0));
+ ICI.setOperand(0, II->getOperand(1));
ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0));
return &ICI;
}
@@ -1440,7 +1440,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// popcount(A) == 0 -> A == 0 and likewise for !=
if (RHS->isZero()) {
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(0));
+ ICI.setOperand(0, II->getOperand(1));
ICI.setOperand(1, RHS);
return &ICI;
}
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index e50d0bc..836bda3 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -404,7 +404,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == Op1C->getZExtValue()){
bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
- Value *Cmp = Builder->CreateICmpEQ(II->getOperand(0), RHS);
+ Value *Cmp = Builder->CreateICmpEQ(II->getOperand(1), RHS);
return new ZExtInst(Cmp, II->getType());
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 4144770..cd41844 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -732,10 +732,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// the right place.
Instruction *NewVal;
if (InputBit > ResultBit)
- NewVal = BinaryOperator::CreateLShr(II->getOperand(0),
+ NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
ConstantInt::get(I->getType(), InputBit-ResultBit));
else
- NewVal = BinaryOperator::CreateShl(II->getOperand(0),
+ NewVal = BinaryOperator::CreateShl(I->getOperand(1),
ConstantInt::get(I->getType(), ResultBit-InputBit));
NewVal->takeName(I);
return InsertNewInstBefore(NewVal, *I);
@@ -1052,12 +1052,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Intrinsic::x86_sse2_mul_sd:
case Intrinsic::x86_sse2_min_sd:
case Intrinsic::x86_sse2_max_sd:
- TmpV = SimplifyDemandedVectorElts(II->getOperand(0), DemandedElts,
- UndefElts, Depth+1);
- if (TmpV) { II->setOperand(0, TmpV); MadeChange = true; }
TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
- UndefElts2, Depth+1);
+ UndefElts, Depth+1);
if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
+ TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
+ UndefElts2, Depth+1);
+ if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
// If only the low elt is demanded and this is a scalarizable intrinsic,
// scalarize it now.
@@ -1069,8 +1069,8 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Intrinsic::x86_sse2_sub_sd:
case Intrinsic::x86_sse2_mul_sd:
// TODO: Lower MIN/MAX/ABS/etc
- Value *LHS = II->getOperand(0);
- Value *RHS = II->getOperand(1);
+ Value *LHS = II->getOperand(1);
+ Value *RHS = II->getOperand(2);
// Extract the element as scalars.
LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
ConstantInt::get(Type::getInt32Ty(I->getContext()), 0U)), *II);
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1293de8..af9ec5c 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -711,7 +711,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
Instruction *InstCombiner::visitFree(Instruction &FI) {
- Value *Op = FI.getOperand(0);
+ Value *Op = FI.getOperand(1);
// free undef -> unreachable.
if (isa<UndefValue>(Op)) {
@@ -896,7 +896,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
// We're extracting from an intrinsic, see if we're the only user, which
// allows us to simplify multiple result intrinsics to simpler things that
- // just get one value.
+ // just get one value..
if (II->hasOneUse()) {
// Check if we're grabbing the overflow bit or the result of a 'with
// overflow' intrinsic. If it's the latter we can remove the intrinsic
@@ -905,7 +905,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
+ Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateAdd(LHS, RHS);
@@ -914,7 +914,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
+ Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateSub(LHS, RHS);
@@ -923,7 +923,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
+ Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateMul(LHS, RHS);
diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index d534690..8662a82 100644
--- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -73,10 +73,10 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
if (AI->getType() != ArgVTy) {
Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy,
false);
- InitCall->setOperand(1,
+ InitCall->setOperand(2,
CastInst::Create(opcode, AI, ArgVTy, "argv.cast", InitCall));
} else {
- InitCall->setOperand(1, AI);
+ InitCall->setOperand(2, AI);
}
/* FALL THROUGH */
@@ -93,12 +93,12 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
opcode = CastInst::getCastOpcode(AI, true,
Type::getInt32Ty(Context), true);
- InitCall->setOperand(0,
+ InitCall->setOperand(1,
CastInst::Create(opcode, AI, Type::getInt32Ty(Context),
"argc.cast", InitCall));
} else {
AI->replaceAllUsesWith(InitCall);
- InitCall->setOperand(0, AI);
+ InitCall->setOperand(1, AI);
}
case 0: break;
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index d159c94..09c01d3 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -123,14 +123,14 @@ static Value *getPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
- return MI->getOperand(0);
+ return MI->getOperand(1);
switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
- return I->getOperand(0);
- case Intrinsic::lifetime_end:
return I->getOperand(1);
+ case Intrinsic::lifetime_end:
+ return I->getOperand(2);
}
}
@@ -152,7 +152,7 @@ static unsigned getStoreSize(Instruction *I, const TargetData *TD) {
case Intrinsic::init_trampoline:
return -1u;
case Intrinsic::lifetime_end:
- Len = I->getOperand(0);
+ Len = I->getOperand(1);
break;
}
}
@@ -287,7 +287,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(/*FIXME: Call*/Instruction *F, MemDepResult Dep) {
+bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
Instruction *Dependency = Dep.getInst();
@@ -297,7 +297,7 @@ bool DSE::handleFreeWithNonTrivialDependency(/*FIXME: Call*/Instruction *F, MemD
Value *DepPointer = getPointerOperand(Dependency)->getUnderlyingObject();
// Check for aliasing.
- if (AA.alias(F->getOperand(0), 1, DepPointer, 1) !=
+ if (AA.alias(F->getOperand(1), 1, DepPointer, 1) !=
AliasAnalysis::MustAlias)
return false;
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 5a1f20e..2c62815 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -271,7 +271,7 @@ Expression ValueTable::create_expression(CallInst* C) {
e.function = C->getCalledFunction();
e.opcode = Expression::CALL;
- for (CallInst::op_iterator I = C->op_begin(), E = C->op_end() - 1;
+ for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
I != E; ++I)
e.varargs.push_back(lookup_or_add(*I));
@@ -452,7 +452,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
return nextValueNumber++;
}
- for (unsigned i = 0, e = C->getNumOperands() - 1; i < e; ++i) {
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
uint32_t c_vn = lookup_or_add(C->getOperand(i));
uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
if (c_vn != cd_vn) {
@@ -508,7 +508,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- for (unsigned i = 0, e = C->getNumOperands() - 1; i < e; ++i) {
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
uint32_t c_vn = lookup_or_add(C->getOperand(i));
uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
if (c_vn != cd_vn) {
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 3611b8e..3b305ae 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -744,7 +744,7 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
const Type *ArgTys[3] = { M->getRawDest()->getType(),
M->getRawSource()->getType(),
M->getLength()->getType() };
- M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys, 3));
+ M->setOperand(0,Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys, 3));
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index c58c858..061042a 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -254,7 +254,7 @@ bool SROA::performScalarRepl(Function &F) {
if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
DEBUG(dbgs() << " memcpy = " << *TheCopy << '\n');
- Constant *TheSrc = cast<Constant>(TheCopy->getOperand(1));
+ Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
TheCopy->eraseFromParent(); // Don't mutate the global.
AI->eraseFromParent();
@@ -404,11 +404,11 @@ void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
isSafeGEP(GEPI, AI, GEPOffset, Info);
if (!Info.isUnsafe)
isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
if (Length)
isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
- UI.getOperandNo() == 0, Info);
+ UI.getOperandNo() == 1, Info);
else
MarkUnsafe(Info);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -756,7 +756,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
}
// Process each element of the aggregate.
- Value *TheFn = MI->getCalledValue();
+ Value *TheFn = MI->getOperand(0);
const Type *BytePtrTy = MI->getRawDest()->getType();
bool SROADest = MI->getRawDest() == Inst;
@@ -814,7 +814,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If the stored element is zero (common case), just store a null
// constant.
Constant *StoreVal;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(1))) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
if (CI->isZero()) {
StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
} else {
@@ -877,7 +877,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Value *Ops[] = {
SROADest ? EltPtr : OtherElt, // Dest ptr
SROADest ? OtherElt : EltPtr, // Src ptr
- ConstantInt::get(MI->getOperand(2)->getType(), EltSize), // Size
+ ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
// Align
ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
MI->getVolatileCst()
@@ -892,8 +892,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
} else {
assert(isa<MemSetInst>(MI));
Value *Ops[] = {
- EltPtr, MI->getOperand(1), // Dest, Value,
- ConstantInt::get(MI->getOperand(2)->getType(), EltSize), // Size
+ EltPtr, MI->getOperand(2), // Dest, Value,
+ ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
Zero, // Align
ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
};
@@ -1737,12 +1737,12 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
if (isOffset) return false;
// If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 0) return false;
+ if (UI.getOperandNo() != 1) return false;
MemIntrinsic *MI = cast<MemIntrinsic>(U);
// If the source of the memcpy/move is not a constant global, reject it.
- if (!PointsToConstantGlobal(MI->getOperand(1)))
+ if (!PointsToConstantGlobal(MI->getOperand(2)))
return false;
// Otherwise, the transform is safe. Remember the copy instruction.
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 37f3b3f..b053cfc 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -110,8 +110,8 @@ struct StrCatOpt : public LibCallOptimization {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(0);
- Value *Src = CI->getOperand(1);
+ Value *Dst = CI->getOperand(1);
+ Value *Src = CI->getOperand(2);
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
@@ -162,12 +162,12 @@ struct StrNCatOpt : public StrCatOpt {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(0);
- Value *Src = CI->getOperand(1);
+ Value *Dst = CI->getOperand(1);
+ Value *Src = CI->getOperand(2);
uint64_t Len;
// We don't do anything if length is not constant
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(2)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
Len = LengthArg->getZExtValue();
else
return 0;
@@ -207,11 +207,11 @@ struct StrChrOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getReturnType())
return 0;
- Value *SrcStr = CI->getOperand(0);
+ Value *SrcStr = CI->getOperand(1);
// If the second operand is non-constant, see if we can compute the length
// of the input string and turn this into memchr.
- ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getOperand(1));
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getOperand(2));
if (CharC == 0) {
// These optimizations require TargetData.
if (!TD) return 0;
@@ -220,7 +220,7 @@ struct StrChrOpt : public LibCallOptimization {
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
- return EmitMemChr(SrcStr, CI->getOperand(1), // include nul.
+ return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
ConstantInt::get(TD->getIntPtrType(*Context), Len),
B, TD);
}
@@ -265,7 +265,7 @@ struct StrCmpOpt : public LibCallOptimization {
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Str1P = CI->getOperand(0), *Str2P = CI->getOperand(1);
+ Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
if (Str1P == Str2P) // strcmp(x,x) -> 0
return ConstantInt::get(CI->getType(), 0);
@@ -314,13 +314,13 @@ struct StrNCmpOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Str1P = CI->getOperand(0), *Str2P = CI->getOperand(1);
+ Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
return ConstantInt::get(CI->getType(), 0);
// Get the length argument if it is constant.
uint64_t Length;
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(2)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
Length = LengthArg->getZExtValue();
else
return 0;
@@ -365,7 +365,7 @@ struct StrCpyOpt : public LibCallOptimization {
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Dst = CI->getOperand(0), *Src = CI->getOperand(1);
+ Value *Dst = CI->getOperand(1), *Src = CI->getOperand(2);
if (Dst == Src) // strcpy(x,x) -> x
return Src;
@@ -381,7 +381,7 @@ struct StrCpyOpt : public LibCallOptimization {
if (OptChkCall)
EmitMemCpyChk(Dst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len),
- CI->getOperand(2), B, TD);
+ CI->getOperand(3), B, TD);
else
EmitMemCpy(Dst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len),
@@ -402,9 +402,9 @@ struct StrNCpyOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Dst = CI->getOperand(0);
- Value *Src = CI->getOperand(1);
- Value *LenOp = CI->getOperand(2);
+ Value *Dst = CI->getOperand(1);
+ Value *Src = CI->getOperand(2);
+ Value *LenOp = CI->getOperand(3);
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
@@ -452,7 +452,7 @@ struct StrLenOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy())
return 0;
- Value *Src = CI->getOperand(0);
+ Value *Src = CI->getOperand(1);
// Constant folding: strlen("xyz") -> 3
if (uint64_t Len = GetStringLength(Src))
@@ -477,7 +477,7 @@ struct StrToOpt : public LibCallOptimization {
!FT->getParamType(1)->isPointerTy())
return 0;
- Value *EndPtr = CI->getOperand(1);
+ Value *EndPtr = CI->getOperand(2);
if (isa<ConstantPointerNull>(EndPtr)) {
CI->setOnlyReadsMemory();
CI->addAttribute(1, Attribute::NoCapture);
@@ -500,17 +500,17 @@ struct StrStrOpt : public LibCallOptimization {
return 0;
// fold strstr(x, x) -> x.
- if (CI->getOperand(0) == CI->getOperand(1))
- return B.CreateBitCast(CI->getOperand(0), CI->getType());
+ if (CI->getOperand(1) == CI->getOperand(2))
+ return B.CreateBitCast(CI->getOperand(1), CI->getType());
// See if either input string is a constant string.
std::string SearchStr, ToFindStr;
- bool HasStr1 = GetConstantStringInfo(CI->getOperand(0), SearchStr);
- bool HasStr2 = GetConstantStringInfo(CI->getOperand(1), ToFindStr);
+ bool HasStr1 = GetConstantStringInfo(CI->getOperand(1), SearchStr);
+ bool HasStr2 = GetConstantStringInfo(CI->getOperand(2), ToFindStr);
// fold strstr(x, "") -> x.
if (HasStr2 && ToFindStr.empty())
- return B.CreateBitCast(CI->getOperand(0), CI->getType());
+ return B.CreateBitCast(CI->getOperand(1), CI->getType());
// If both strings are known, constant fold it.
if (HasStr1 && HasStr2) {
@@ -520,14 +520,14 @@ struct StrStrOpt : public LibCallOptimization {
return Constant::getNullValue(CI->getType());
// strstr("abcd", "bc") -> gep((char*)"abcd", 1)
- Value *Result = CastToCStr(CI->getOperand(0), B);
+ Value *Result = CastToCStr(CI->getOperand(1), B);
Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
return B.CreateBitCast(Result, CI->getType());
}
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1)
- return B.CreateBitCast(EmitStrChr(CI->getOperand(0), ToFindStr[0], B, TD),
+ return B.CreateBitCast(EmitStrChr(CI->getOperand(1), ToFindStr[0], B, TD),
CI->getType());
return 0;
}
@@ -545,13 +545,13 @@ struct MemCmpOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy(32))
return 0;
- Value *LHS = CI->getOperand(0), *RHS = CI->getOperand(1);
+ Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
if (LHS == RHS) // memcmp(s,s,x) -> 0
return Constant::getNullValue(CI->getType());
// Make sure we have a constant length.
- ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getOperand(2));
+ ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getOperand(3));
if (!LenC) return 0;
uint64_t Len = LenC->getZExtValue();
@@ -595,9 +595,9 @@ struct MemCpyOpt : public LibCallOptimization {
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
- EmitMemCpy(CI->getOperand(0), CI->getOperand(1),
- CI->getOperand(2), 1, false, B, TD);
- return CI->getOperand(0);
+ EmitMemCpy(CI->getOperand(1), CI->getOperand(2),
+ CI->getOperand(3), 1, false, B, TD);
+ return CI->getOperand(1);
}
};
@@ -617,9 +617,9 @@ struct MemMoveOpt : public LibCallOptimization {
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
- EmitMemMove(CI->getOperand(0), CI->getOperand(1),
- CI->getOperand(2), 1, false, B, TD);
- return CI->getOperand(0);
+ EmitMemMove(CI->getOperand(1), CI->getOperand(2),
+ CI->getOperand(3), 1, false, B, TD);
+ return CI->getOperand(1);
}
};
@@ -639,10 +639,10 @@ struct MemSetOpt : public LibCallOptimization {
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getOperand(1), Type::getInt8Ty(*Context),
- false);
- EmitMemSet(CI->getOperand(0), Val, CI->getOperand(2), false, B, TD);
- return CI->getOperand(0);
+ Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context),
+ false);
+ EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
+ return CI->getOperand(1);
}
};
@@ -663,7 +663,7 @@ struct PowOpt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op1 = CI->getOperand(0), *Op2 = CI->getOperand(1);
+ Value *Op1 = CI->getOperand(1), *Op2 = CI->getOperand(2);
if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
if (Op1C->isExactlyValue(1.0)) // pow(1.0, x) -> 1.0
return Op1C;
@@ -717,7 +717,7 @@ struct Exp2Opt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op = CI->getOperand(0);
+ Value *Op = CI->getOperand(1);
// Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
// Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < 32
Value *LdExpArg = 0;
@@ -769,7 +769,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
return 0;
// If this is something like 'floor((double)floatval)', convert to floorf.
- FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getOperand(0));
+ FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getOperand(1));
if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
return 0;
@@ -798,7 +798,7 @@ struct FFSOpt : public LibCallOptimization {
!FT->getParamType(0)->isIntegerTy())
return 0;
- Value *Op = CI->getOperand(0);
+ Value *Op = CI->getOperand(1);
// Constant fold.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
@@ -834,7 +834,7 @@ struct IsDigitOpt : public LibCallOptimization {
return 0;
// isdigit(c) -> (c-'0') <u 10
- Value *Op = CI->getOperand(0);
+ Value *Op = CI->getOperand(1);
Op = B.CreateSub(Op, ConstantInt::get(Type::getInt32Ty(*Context), '0'),
"isdigittmp");
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 10),
@@ -855,7 +855,7 @@ struct IsAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c <u 128
- Value *Op = CI->getOperand(0);
+ Value *Op = CI->getOperand(1);
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 128),
"isascii");
return B.CreateZExt(Op, CI->getType());
@@ -874,7 +874,7 @@ struct AbsOpt : public LibCallOptimization {
return 0;
// abs(x) -> x >s -1 ? x : -x
- Value *Op = CI->getOperand(0);
+ Value *Op = CI->getOperand(1);
Value *Pos = B.CreateICmpSGT(Op,
Constant::getAllOnesValue(Op->getType()),
"ispos");
@@ -896,7 +896,7 @@ struct ToAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c & 0x7f
- return B.CreateAnd(CI->getOperand(0),
+ return B.CreateAnd(CI->getOperand(1),
ConstantInt::get(CI->getType(),0x7F));
}
};
@@ -919,7 +919,7 @@ struct PrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(0), FormatStr))
+ if (!GetConstantStringInfo(CI->getOperand(1), FormatStr))
return 0;
// Empty format string -> noop.
@@ -951,10 +951,10 @@ struct PrintFOpt : public LibCallOptimization {
}
// Optimize specific format strings.
- // printf("%c", chr) --> putchar(chr)
+ // printf("%c", chr) --> putchar(*(i8*)dst)
if (FormatStr == "%c" && CI->getNumOperands() > 2 &&
- CI->getOperand(1)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getOperand(1), B, TD);
+ CI->getOperand(2)->getType()->isIntegerTy()) {
+ Value *Res = EmitPutChar(CI->getOperand(2), B, TD);
if (CI->use_empty()) return CI;
return B.CreateIntCast(Res, CI->getType(), true);
@@ -962,9 +962,9 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumOperands() > 2 &&
- CI->getOperand(1)->getType()->isPointerTy() &&
+ CI->getOperand(2)->getType()->isPointerTy() &&
CI->use_empty()) {
- EmitPutS(CI->getOperand(1), B, TD);
+ EmitPutS(CI->getOperand(2), B, TD);
return CI;
}
return 0;
@@ -985,7 +985,7 @@ struct SPrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(1), FormatStr))
+ if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
return 0;
// If we just have a format string (nothing else crazy) transform it.
@@ -1000,7 +1000,7 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
- EmitMemCpy(CI->getOperand(0), CI->getOperand(1), // Copy the nul byte.
+ EmitMemCpy(CI->getOperand(1), CI->getOperand(2), // Copy the nul byte.
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()+1), 1, false, B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
@@ -1014,10 +1014,10 @@ struct SPrintFOpt : public LibCallOptimization {
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
- if (!CI->getOperand(2)->getType()->isIntegerTy()) return 0;
- Value *V = B.CreateTrunc(CI->getOperand(2),
+ if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
+ Value *V = B.CreateTrunc(CI->getOperand(3),
Type::getInt8Ty(*Context), "char");
- Value *Ptr = CastToCStr(CI->getOperand(0), B);
+ Value *Ptr = CastToCStr(CI->getOperand(1), B);
B.CreateStore(V, Ptr);
Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1),
"nul");
@@ -1031,13 +1031,13 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
- if (!CI->getOperand(2)->getType()->isPointerTy()) return 0;
+ if (!CI->getOperand(3)->getType()->isPointerTy()) return 0;
- Value *Len = EmitStrLen(CI->getOperand(2), B, TD);
+ Value *Len = EmitStrLen(CI->getOperand(3), B, TD);
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
- EmitMemCpy(CI->getOperand(0), CI->getOperand(2), IncLen, 1, false, B, TD);
+ EmitMemCpy(CI->getOperand(1), CI->getOperand(3), IncLen, 1, false, B, TD);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
@@ -1061,8 +1061,8 @@ struct FWriteOpt : public LibCallOptimization {
return 0;
// Get the element size and count.
- ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getOperand(1));
- ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getOperand(2));
+ ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getOperand(2));
+ ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getOperand(3));
if (!SizeC || !CountC) return 0;
uint64_t Bytes = SizeC->getZExtValue()*CountC->getZExtValue();
@@ -1072,8 +1072,8 @@ struct FWriteOpt : public LibCallOptimization {
// If this is writing one byte, turn it into fputc.
if (Bytes == 1) { // fwrite(S,1,1,F) -> fputc(S[0],F)
- Value *Char = B.CreateLoad(CastToCStr(CI->getOperand(0), B), "char");
- EmitFPutC(Char, CI->getOperand(3), B, TD);
+ Value *Char = B.CreateLoad(CastToCStr(CI->getOperand(1), B), "char");
+ EmitFPutC(Char, CI->getOperand(4), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
@@ -1097,11 +1097,11 @@ struct FPutsOpt : public LibCallOptimization {
return 0;
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
- uint64_t Len = GetStringLength(CI->getOperand(0));
+ uint64_t Len = GetStringLength(CI->getOperand(1));
if (!Len) return 0;
- EmitFWrite(CI->getOperand(0),
+ EmitFWrite(CI->getOperand(1),
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getOperand(1), B, TD);
+ CI->getOperand(2), B, TD);
return CI; // Known to have no uses (see above).
}
};
@@ -1120,7 +1120,7 @@ struct FPrintFOpt : public LibCallOptimization {
// All the optimizations depend on the format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(1), FormatStr))
+ if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
return 0;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
@@ -1132,10 +1132,10 @@ struct FPrintFOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- EmitFWrite(CI->getOperand(1),
+ EmitFWrite(CI->getOperand(2),
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()),
- CI->getOperand(0), B, TD);
+ CI->getOperand(1), B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1146,17 +1146,17 @@ struct FPrintFOpt : public LibCallOptimization {
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
- // fprintf(F, "%c", chr) --> fputc(chr, F)
- if (!CI->getOperand(2)->getType()->isIntegerTy()) return 0;
- EmitFPutC(CI->getOperand(2), CI->getOperand(0), B, TD);
+ // fprintf(F, "%c", chr) --> *(i8*)dst = chr
+ if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
+ EmitFPutC(CI->getOperand(3), CI->getOperand(1), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
if (FormatStr[1] == 's') {
- // fprintf(F, "%s", str) --> fputs(str, F)
- if (!CI->getOperand(2)->getType()->isPointerTy() || !CI->use_empty())
+ // fprintf(F, "%s", str) -> fputs(str, F)
+ if (!CI->getOperand(3)->getType()->isPointerTy() || !CI->use_empty())
return 0;
- EmitFPutS(CI->getOperand(2), CI->getOperand(0), B, TD);
+ EmitFPutS(CI->getOperand(3), CI->getOperand(1), B, TD);
return CI;
}
return 0;
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 667e4d9..162d902 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -250,7 +250,7 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
// If we are passing this argument into call as the corresponding
// argument operand, then the argument is dynamically constant.
// Otherwise, we cannot transform this function safely.
- if (CI->getOperand(ArgNo) == Arg)
+ if (CI->getOperand(ArgNo+1) == Arg)
return true;
}
@@ -442,7 +442,7 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// required PHI nodes, add entries into the PHI node for the actual
// parameters passed into the tail-recursive call.
for (unsigned i = 0, e = CI->getNumOperands()-1; i != e; ++i)
- ArgumentPHIs[i]->addIncoming(CI->getOperand(i), BB);
+ ArgumentPHIs[i]->addIncoming(CI->getOperand(i+1), BB);
// If we are introducing an accumulator variable to eliminate the recursion,
// do so now. Note that we _know_ that no subsequent tail recursion
diff --git a/lib/Transforms/Utils/AddrModeMatcher.cpp b/lib/Transforms/Utils/AddrModeMatcher.cpp
index 474b5fd..ea9d1c1 100644
--- a/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -382,7 +382,7 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
std::vector<InlineAsm::ConstraintInfo>
Constraints = IA->ParseConstraints();
- unsigned ArgNo = 0; // ArgNo - The operand of the CallInst.
+ unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
@@ -450,7 +450,7 @@ static bool FindAllMemoryUses(Instruction *I,
if (CallInst *CI = dyn_cast<CallInst>(U)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
- if (!IA) return true;
+ if (IA == 0) return true;
// If this is a memory operand, we're cool, otherwise bail out.
if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 1e20430..767fa3a 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -395,11 +395,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(3, 2, false)) {
- EmitMemCpy(CI->getOperand(0), CI->getOperand(1), CI->getOperand(2),
+
+ if (isFoldable(4, 3, false)) {
+ EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
1, false, B, TD);
- replaceCall(CI->getOperand(0));
+ replaceCall(CI->getOperand(1));
return true;
}
return false;
@@ -418,11 +418,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(3, 2, false)) {
- EmitMemMove(CI->getOperand(0), CI->getOperand(1), CI->getOperand(2),
+
+ if (isFoldable(4, 3, false)) {
+ EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
1, false, B, TD);
- replaceCall(CI->getOperand(0));
+ replaceCall(CI->getOperand(1));
return true;
}
return false;
@@ -436,12 +436,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(3, 2, false)) {
- Value *Val = B.CreateIntCast(CI->getOperand(1), B.getInt8Ty(),
+
+ if (isFoldable(4, 3, false)) {
+ Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
false);
- EmitMemSet(CI->getOperand(0), Val, CI->getOperand(2), false, B, TD);
- replaceCall(CI->getOperand(0));
+ EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
+ replaceCall(CI->getOperand(1));
return true;
}
return false;
@@ -462,8 +462,8 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
// st[rp]cpy_chk call which may fail at runtime if the size is too long.
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
- if (isFoldable(2, 1, true)) {
- Value *Ret = EmitStrCpy(CI->getOperand(0), CI->getOperand(1), B, TD,
+ if (isFoldable(3, 2, true)) {
+ Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD,
Name.substr(2, 6));
replaceCall(Ret);
return true;
@@ -479,10 +479,10 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
!FT->getParamType(2)->isIntegerTy() ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(3, 2, false)) {
- Value *Ret = EmitStrNCpy(CI->getOperand(0), CI->getOperand(1),
- CI->getOperand(2), B, TD, Name.substr(2, 7));
+
+ if (isFoldable(4, 3, false)) {
+ Value *Ret = EmitStrNCpy(CI->getOperand(1), CI->getOperand(2),
+ CI->getOperand(3), B, TD, Name.substr(2, 7));
replaceCall(Ret);
return true;
}
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index a7828a4..75c9ccd 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -66,7 +66,7 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// Next, create the new invoke instruction, inserting it at the end
// of the old basic block.
- SmallVector<Value*, 8> InvokeArgs(CI->op_begin(), CI->op_end() - 1);
+ SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
InvokeInst *II =
InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
InvokeArgs.begin(), InvokeArgs.end(),