aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2009-08-13 21:58:54 +0000
committerOwen Anderson <resistor@mac.com>2009-08-13 21:58:54 +0000
commit1d0be15f89cb5056e20e2d24faa8d6afb1573bca (patch)
tree2cdabe223bfce83bd12e10dd557147a2f68c9bf8 /lib/Transforms/Scalar
parentd163e8b14c8aa5bbbb129e3f0dffdbe7213a3c72 (diff)
downloadexternal_llvm-1d0be15f89cb5056e20e2d24faa8d6afb1573bca.zip
external_llvm-1d0be15f89cb5056e20e2d24faa8d6afb1573bca.tar.gz
external_llvm-1d0be15f89cb5056e20e2d24faa8d6afb1573bca.tar.bz2
Push LLVMContexts through the IntegerType APIs.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78948 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp3
-rw-r--r--lib/Transforms/Scalar/CondPropagate.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp3
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp18
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp212
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp11
-rw-r--r--lib/Transforms/Scalar/LICM.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopIndexSplit.cpp3
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp3
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp21
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp20
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp20
-rw-r--r--lib/Transforms/Scalar/PredicateSimplifier.cpp9
-rw-r--r--lib/Transforms/Scalar/Reg2Mem.cpp3
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp12
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp59
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp2
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp183
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp2
19 files changed, 316 insertions, 272 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 4fe9bcf..9a59dca 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -599,7 +599,8 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
} else {
DEBUG(errs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
- const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
+ const Type *IntPtrTy =
+ TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
Value *Result = 0;
// Start with the scale value.
diff --git a/lib/Transforms/Scalar/CondPropagate.cpp b/lib/Transforms/Scalar/CondPropagate.cpp
index c85d031..88b5652 100644
--- a/lib/Transforms/Scalar/CondPropagate.cpp
+++ b/lib/Transforms/Scalar/CondPropagate.cpp
@@ -124,7 +124,7 @@ void CondProp::SimplifyBlock(BasicBlock *BB) {
// Succ is now dead, but we cannot delete it without potentially
// invalidating iterators elsewhere. Just insert an unreachable
// instruction in it and delete this block later on.
- new UnreachableInst(Succ);
+ new UnreachableInst(BB->getContext(), Succ);
DeadBlocks.push_back(Succ);
MadeChange = true;
}
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 21a5289..c782f7d 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -1562,7 +1562,8 @@ bool GVN::performPRE(Function& F) {
Instruction *CurInst = BI++;
if (isa<AllocationInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
- isa<PHINode>(CurInst) || (CurInst->getType() == Type::VoidTy) ||
+ isa<PHINode>(CurInst) ||
+ (CurInst->getType() == Type::getVoidTy(F.getContext())) ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
isa<DbgInfoIntrinsic>(CurInst))
continue;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index b33c805..0f8a878 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -634,7 +634,8 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
// Check incoming value.
ConstantFP *InitValue = dyn_cast<ConstantFP>(PH->getIncomingValue(IncomingEdge));
if (!InitValue) return;
- uint64_t newInitValue = Type::Int32Ty->getPrimitiveSizeInBits();
+ uint64_t newInitValue =
+ Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
if (!convertToInt(InitValue->getValueAPF(), &newInitValue))
return;
@@ -650,7 +651,8 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
IncrVIndex = 0;
IncrValue = dyn_cast<ConstantFP>(Incr->getOperand(IncrVIndex));
if (!IncrValue) return;
- uint64_t newIncrValue = Type::Int32Ty->getPrimitiveSizeInBits();
+ uint64_t newIncrValue =
+ Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
if (!convertToInt(IncrValue->getValueAPF(), &newIncrValue))
return;
@@ -681,7 +683,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
EVIndex = 0;
EV = dyn_cast<ConstantFP>(EC->getOperand(EVIndex));
if (!EV) return;
- uint64_t intEV = Type::Int32Ty->getPrimitiveSizeInBits();
+ uint64_t intEV = Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
if (!convertToInt(EV->getValueAPF(), &intEV))
return;
@@ -714,20 +716,22 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return;
// Insert new integer induction variable.
- PHINode *NewPHI = PHINode::Create(Type::Int32Ty,
+ PHINode *NewPHI = PHINode::Create(Type::getInt32Ty(PH->getContext()),
PH->getName()+".int", PH);
- NewPHI->addIncoming(ConstantInt::get(Type::Int32Ty, newInitValue),
+ NewPHI->addIncoming(ConstantInt::get(Type::getInt32Ty(PH->getContext()),
+ newInitValue),
PH->getIncomingBlock(IncomingEdge));
Value *NewAdd = BinaryOperator::CreateAdd(NewPHI,
- ConstantInt::get(Type::Int32Ty,
+ ConstantInt::get(Type::getInt32Ty(PH->getContext()),
newIncrValue),
Incr->getName()+".int", Incr);
NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge));
// The back edge is edge 1 of newPHI, whatever it may have been in the
// original PHI.
- ConstantInt *NewEV = ConstantInt::get(Type::Int32Ty, intEV);
+ ConstantInt *NewEV = ConstantInt::get(Type::getInt32Ty(PH->getContext()),
+ intEV);
Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV);
Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1));
ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index 04c225f..7a98b48 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -435,7 +435,7 @@ static bool isOnlyUse(Value *V) {
static const Type *getPromotedType(const Type *Ty) {
if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
if (ITy->getBitWidth() < 32)
- return Type::Int32Ty;
+ return Type::getInt32Ty(Ty->getContext());
}
return Ty;
}
@@ -473,12 +473,14 @@ isEliminableCastPair(
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy,
- TD ? TD->getIntPtrType() : 0);
+ TD ? TD->getIntPtrType(CI->getContext()) : 0);
// We don't want to form an inttoptr or ptrtoint that converts to an integer
// type that differs from the pointer size.
- if ((Res == Instruction::IntToPtr && SrcTy != TD->getIntPtrType()) ||
- (Res == Instruction::PtrToInt && DstTy != TD->getIntPtrType()))
+ if ((Res == Instruction::IntToPtr &&
+ SrcTy != TD->getIntPtrType(CI->getContext())) ||
+ (Res == Instruction::PtrToInt &&
+ DstTy != TD->getIntPtrType(CI->getContext())))
Res = 0;
return Instruction::CastOps(Res);
@@ -1587,9 +1589,9 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
std::vector<Constant*> Elts;
for (unsigned i = 0; i < VWidth; ++i) {
if (UndefElts[i])
- Elts.push_back(UndefValue::get(Type::Int32Ty));
+ Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
else
- Elts.push_back(ConstantInt::get(Type::Int32Ty,
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
Shuffle->getMaskValue(i)));
}
I->setOperand(2, ConstantVector::get(Elts));
@@ -1720,9 +1722,9 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
Value *RHS = II->getOperand(2);
// Extract the element as scalars.
LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
- ConstantInt::get(Type::Int32Ty, 0U, false), "tmp"), *II);
+ ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
RHS = InsertNewInstBefore(ExtractElementInst::Create(RHS,
- ConstantInt::get(Type::Int32Ty, 0U, false), "tmp"), *II);
+ ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
switch (II->getIntrinsicID()) {
default: llvm_unreachable("Case stmts out of sync!");
@@ -1741,7 +1743,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
Instruction *New =
InsertElementInst::Create(
UndefValue::get(II->getType()), TmpV,
- ConstantInt::get(Type::Int32Ty, 0U, false), II->getName());
+ ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), II->getName());
InsertNewInstBefore(New, *II);
AddSoonDeadInstToWorklist(*II, 0);
return New;
@@ -1912,7 +1914,7 @@ static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
if (isa<Constant>(TV) || isa<Constant>(FV)) {
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType() == Type::Int1Ty) return 0;
+ if (SI->getType() == Type::getInt1Ty(*IC->getContext())) return 0;
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
@@ -2066,7 +2068,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// zext(bool) + C -> bool ? C + 1 : C
if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
- if (ZI->getSrcTy() == Type::Int1Ty)
+ if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
}
@@ -2109,9 +2111,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
const Type *MiddleType = 0;
switch (Size) {
default: break;
- case 32: MiddleType = Type::Int32Ty; break;
- case 16: MiddleType = Type::Int16Ty; break;
- case 8: MiddleType = Type::Int8Ty; break;
+ case 32: MiddleType = Type::getInt32Ty(*Context); break;
+ case 16: MiddleType = Type::getInt16Ty(*Context); break;
+ case 8: MiddleType = Type::getInt8Ty(*Context); break;
}
if (MiddleType) {
Instruction *NewTrunc = new TruncInst(XorLHS, MiddleType, "sext");
@@ -2121,7 +2123,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- if (I.getType() == Type::Int1Ty)
+ if (I.getType() == Type::getInt1Ty(*Context))
return BinaryOperator::CreateXor(LHS, RHS);
// X + X --> X << 1
@@ -2466,11 +2468,11 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// C - zext(bool) -> bool ? C - 1 : C
if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
- if (ZI->getSrcTy() == Type::Int1Ty)
+ if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
}
- if (I.getType() == Type::Int1Ty)
+ if (I.getType() == Type::getInt1Ty(*Context))
return BinaryOperator::CreateXor(Op0, Op1);
if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
@@ -2726,7 +2728,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
}
- if (I.getType() == Type::Int1Ty)
+ if (I.getType() == Type::getInt1Ty(*Context))
return BinaryOperator::CreateAnd(Op0, I.getOperand(1));
// If one of the operands of the multiply is a cast from a boolean value, then
@@ -2735,11 +2737,11 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
// formed.
CastInst *BoolCast = 0;
if (ZExtInst *CI = dyn_cast<ZExtInst>(Op0))
- if (CI->getOperand(0)->getType() == Type::Int1Ty)
+ if (CI->getOperand(0)->getType() == Type::getInt1Ty(*Context))
BoolCast = CI;
if (!BoolCast)
if (ZExtInst *CI = dyn_cast<ZExtInst>(I.getOperand(1)))
- if (CI->getOperand(0)->getType() == Type::Int1Ty)
+ if (CI->getOperand(0)->getType() == Type::getInt1Ty(*Context))
BoolCast = CI;
if (BoolCast) {
if (ICmpInst *SCI = dyn_cast<ICmpInst>(BoolCast->getOperand(0))) {
@@ -2974,7 +2976,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
// It can't be division by zero, hence it must be division by one.
- if (I.getType() == Type::Int1Ty)
+ if (I.getType() == Type::getInt1Ty(*Context))
return ReplaceInstUsesWith(I, Op0);
if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
@@ -5335,7 +5337,7 @@ static bool AddWithOverflow(Constant *&Result, Constant *In1,
if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
+ Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
if (HasAddOverflow(ExtractElement(Result, Idx, Context),
ExtractElement(In1, Idx, Context),
ExtractElement(In2, Idx, Context),
@@ -5371,7 +5373,7 @@ static bool SubWithOverflow(Constant *&Result, Constant *In1,
if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
+ Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
if (HasSubOverflow(ExtractElement(Result, Idx, Context),
ExtractElement(In1, Idx, Context),
ExtractElement(In2, Idx, Context),
@@ -5392,7 +5394,7 @@ static bool SubWithOverflow(Constant *&Result, Constant *In1,
static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
TargetData &TD = *IC.getTargetData();
gep_type_iterator GTI = gep_type_begin(GEP);
- const Type *IntPtrTy = TD.getIntPtrType();
+ const Type *IntPtrTy = TD.getIntPtrType(I.getContext());
LLVMContext *Context = IC.getContext();
Value *Result = Constant::getNullValue(IntPtrTy);
@@ -5542,7 +5544,8 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
- VariableIdx = new TruncInst(VariableIdx, TD.getIntPtrType(),
+ VariableIdx = new TruncInst(VariableIdx,
+ TD.getIntPtrType(VariableIdx->getContext()),
VariableIdx->getName(), &I);
return VariableIdx;
}
@@ -5563,7 +5566,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
return 0;
// Okay, we can do this evaluation. Start by converting the index to intptr.
- const Type *IntPtrTy = TD.getIntPtrType();
+ const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
if (VariableIdx->getType() != IntPtrTy)
VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
true /*SExt*/,
@@ -5661,7 +5664,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (NumDifferences == 0) // SAME GEP?
return ReplaceInstUsesWith(I, // No comparison is needed here.
- ConstantInt::get(Type::Int1Ty,
+ ConstantInt::get(Type::getInt1Ty(*Context),
ICmpInst::isTrueWhenEqual(Cond)));
else if (NumDifferences == 1) {
@@ -5923,7 +5926,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
}
if (isa<UndefValue>(Op1)) // fcmp pred X, undef -> undef
- return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty));
+ return ReplaceInstUsesWith(I, UndefValue::get(Type::getInt1Ty(*Context)));
// Handle fcmp with constant RHS
if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
@@ -5993,11 +5996,11 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// icmp X, X
if (Op0 == Op1)
- return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
+ return ReplaceInstUsesWith(I, ConstantInt::get(Type::getInt1Ty(*Context),
I.isTrueWhenEqual()));
if (isa<UndefValue>(Op1)) // X icmp undef -> undef
- return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty));
+ return ReplaceInstUsesWith(I, UndefValue::get(Type::getInt1Ty(*Context)));
// icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
// addresses never equal each other! We already know that Op0 != Op1.
@@ -6005,11 +6008,11 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
isa<ConstantPointerNull>(Op0)) &&
(isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
isa<ConstantPointerNull>(Op1)))
- return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
+ return ReplaceInstUsesWith(I, ConstantInt::get(Type::getInt1Ty(*Context),
!I.isTrueWhenEqual()));
// icmp's with boolean values can always be turned into bitwise operations
- if (Ty == Type::Int1Ty) {
+ if (Ty == Type::getInt1Ty(*Context)) {
switch (I.getPredicate()) {
default: llvm_unreachable("Invalid icmp instruction!");
case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
@@ -6348,7 +6351,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// can assume it is successful and remove the malloc.
if (LHSI->hasOneUse() && isa<ConstantPointerNull>(RHSC)) {
AddToWorkList(LHSI);
- return ReplaceInstUsesWith(I, ConstantInt::get(Type::Int1Ty,
+ return ReplaceInstUsesWith(I, ConstantInt::get(Type::getInt1Ty(*Context),
!I.isTrueWhenEqual()));
}
break;
@@ -6933,7 +6936,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
ShAmt);
if (Comp != RHS) {// Comparing against a bit that we know is zero.
bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
- Constant *Cst = ConstantInt::get(Type::Int1Ty, IsICMP_NE);
+ Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
return ReplaceInstUsesWith(ICI, Cst);
}
@@ -6997,7 +7000,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
if (Comp != RHSV) { // Comparing against a bit that we know is zero.
bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
- Constant *Cst = ConstantInt::get(Type::Int1Ty, IsICMP_NE);
+ Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
return ReplaceInstUsesWith(ICI, Cst);
}
@@ -7139,7 +7142,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
Constant *NotCI = ConstantExpr::getNot(RHS);
if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
return ReplaceInstUsesWith(ICI,
- ConstantInt::get(Type::Int1Ty,
+ ConstantInt::get(Type::getInt1Ty(*Context),
isICMP_NE));
}
break;
@@ -7150,7 +7153,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// comparison can never succeed!
if ((RHSV & ~BOC->getValue()) != 0)
return ReplaceInstUsesWith(ICI,
- ConstantInt::get(Type::Int1Ty,
+ ConstantInt::get(Type::getInt1Ty(*Context),
isICMP_NE));
// If we have ((X & C) == C), turn it into ((X & C) != 0).
@@ -7692,7 +7695,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
case 32 :
case 64 :
case 128:
- SExtType = IntegerType::get(Ty->getBitWidth() - ShiftAmt1);
+ SExtType = IntegerType::get(*Context, Ty->getBitWidth() - ShiftAmt1);
break;
default: break;
}
@@ -7774,11 +7777,11 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
///
static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
int &Offset, LLVMContext *Context) {
- assert(Val->getType() == Type::Int32Ty && "Unexpected allocation size type!");
+ assert(Val->getType() == Type::getInt32Ty(*Context) && "Unexpected allocation size type!");
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
Offset = CI->getZExtValue();
Scale = 0;
- return ConstantInt::get(Type::Int32Ty, 0);
+ return ConstantInt::get(Type::getInt32Ty(*Context), 0);
} else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
if (I->getOpcode() == Instruction::Shl) {
@@ -7875,7 +7878,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Amt = NumElements;
} else {
// If the allocation size is constant, form a constant mul expression
- Amt = ConstantInt::get(Type::Int32Ty, Scale);
+ Amt = ConstantInt::get(Type::getInt32Ty(*Context), Scale);
if (isa<ConstantInt>(NumElements))
Amt = ConstantExpr::getMul(cast<ConstantInt>(NumElements),
cast<ConstantInt>(Amt));
@@ -7887,7 +7890,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
}
if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
- Value *Off = ConstantInt::get(Type::Int32Ty, Offset, true);
+ Value *Off = ConstantInt::get(Type::getInt32Ty(*Context), Offset, true);
Instruction *Tmp = BinaryOperator::CreateAdd(Amt, Off, "tmp");
Amt = InsertNewInstBefore(Tmp, AI);
}
@@ -8173,7 +8176,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- const Type *IntPtrTy = TD->getIntPtrType();
+ const Type *IntPtrTy = TD->getIntPtrType(*Context);
int64_t FirstIdx = 0;
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
@@ -8202,7 +8205,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
"Offset must stay within the indexed type");
unsigned Elt = SL->getElementContainingOffset(Offset);
- NewIndices.push_back(ConstantInt::get(Type::Int32Ty, Elt));
+ NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Elt));
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
@@ -8579,7 +8582,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
// (X&4) == 2 --> false
// (X&4) != 2 --> true
- Constant *Res = ConstantInt::get(Type::Int1Ty, isNE);
+ Constant *Res = ConstantInt::get(Type::getInt1Ty(*Context), isNE);
Res = ConstantExpr::getZExt(Res, CI.getType());
return ReplaceInstUsesWith(CI, Res);
}
@@ -8708,7 +8711,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
Value *Src = CI.getOperand(0);
// Canonicalize sign-extend from i1 to a select.
- if (Src->getType() == Type::Int1Ty)
+ if (Src->getType() == Type::getInt1Ty(*Context))
return SelectInst::Create(Src,
Constant::getAllOnesValue(CI.getType()),
Constant::getNullValue(CI.getType()));
@@ -8796,12 +8799,12 @@ static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
// that can accurately represent it. This allows us to turn
// (float)((double)X+2.0) into x+2.0f.
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
- if (CFP->getType() == Type::PPC_FP128Ty)
+ if (CFP->getType() == Type::getPPC_FP128Ty(*Context))
return V; // No constant folding of this.
// See if the value can be truncated to float and then reextended.
if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context))
return V;
- if (CFP->getType() == Type::DoubleTy)
+ if (CFP->getType() == Type::getDoubleTy(*Context))
return V; // Won't shrink.
if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context))
return V;
@@ -8912,7 +8915,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
if (TD &&
CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
Value *P = InsertNewInstBefore(new PtrToIntInst(CI.getOperand(0),
- TD->getIntPtrType(),
+ TD->getIntPtrType(CI.getContext()),
"tmp"), CI);
return new TruncInst(P, CI.getType());
}
@@ -8930,7 +8933,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
CI.getOperand(0)->getType()->getScalarSizeInBits() >
TD->getPointerSizeInBits()) {
Value *P = InsertNewInstBefore(new TruncInst(CI.getOperand(0),
- TD->getIntPtrType(),
+ TD->getIntPtrType(CI.getContext()),
"tmp"), CI);
return new IntToPtrInst(P, CI.getType());
}
@@ -8981,7 +8984,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If the source and destination are pointers, and this cast is equivalent
// to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
// This can enhance SROA and other transforms that want type-safe pointers.
- Constant *ZeroUInt = Constant::getNullValue(Type::Int32Ty);
+ Constant *ZeroUInt = Constant::getNullValue(Type::getInt32Ty(*Context));
unsigned NumZeros = 0;
while (SrcElTy != DstElTy &&
isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
@@ -9007,7 +9010,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Value *Elem = InsertCastBefore(Instruction::BitCast, Src,
DestVTy->getElementType(), CI);
return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
- Constant::getNullValue(Type::Int32Ty));
+ Constant::getNullValue(Type::getInt32Ty(*Context)));
}
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
}
@@ -9017,7 +9020,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (SrcVTy->getNumElements() == 1) {
if (!isa<VectorType>(DestTy)) {
Instruction *Elem =
- ExtractElementInst::Create(Src, Constant::getNullValue(Type::Int32Ty));
+ ExtractElementInst::Create(Src, Constant::getNullValue(Type::getInt32Ty(*Context)));
InsertNewInstBefore(Elem, CI);
return CastInst::Create(Instruction::BitCast, Elem, DestTy);
}
@@ -9401,7 +9404,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return ReplaceInstUsesWith(SI, FalseVal);
}
- if (SI.getType() == Type::Int1Ty) {
+ if (SI.getType() == Type::getInt1Ty(*Context)) {
if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
if (C->getZExtValue()) {
// Change: A = select B, true, C --> A = or B, C
@@ -9708,7 +9711,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// Use an integer load+store unless we can find something better.
Type *NewPtrTy =
- PointerType::getUnqual(IntegerType::get(Size<<3));
+ PointerType::getUnqual(IntegerType::get(*Context, Size<<3));
// Memcpy forces the use of i8* for the source and destination. That means
// that if you're using memcpy to move one double around, you'll get a cast
@@ -9769,7 +9772,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// Extract the length and alignment and fill if they are constant.
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
- if (!LenC || !FillC || FillC->getType() != Type::Int8Ty)
+ if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(*Context))
return 0;
uint64_t Len = LenC->getZExtValue();
Alignment = MI->getAlignment();
@@ -9779,7 +9782,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
- const Type *ITy = IntegerType::get(Len*8); // n=1 -> i8.
+ const Type *ITy = IntegerType::get(*Context, Len*8); // n=1 -> i8.
Value *Dest = MI->getDest();
Dest = InsertBitCastBefore(Dest, PointerType::getUnqual(ITy), *MI);
@@ -9962,14 +9965,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (ExtractedElts[Idx] == 0) {
Instruction *Elt =
ExtractElementInst::Create(Idx < 16 ? Op0 : Op1,
- ConstantInt::get(Type::Int32Ty, Idx&15, false), "tmp");
+ ConstantInt::get(Type::getInt32Ty(*Context), Idx&15, false), "tmp");
InsertNewInstBefore(Elt, CI);
ExtractedElts[Idx] = Elt;
}
// Insert this value into the result vector.
Result = InsertElementInst::Create(Result, ExtractedElts[Idx],
- ConstantInt::get(Type::Int32Ty, i, false),
+ ConstantInt::get(Type::getInt32Ty(*Context), i, false),
"tmp");
InsertNewInstBefore(cast<Instruction>(Result), CI);
}
@@ -10073,7 +10076,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// If the call and callee calling conventions don't match, this call must
// be unreachable, as the call is undefined.
new StoreInst(ConstantInt::getTrue(*Context),
- UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
+ UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context))),
OldCall);
if (!OldCall->use_empty())
OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
@@ -10087,7 +10090,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// undef so that we know that this code is not reachable, despite the fact
// that we can't modify the CFG here.
new StoreInst(ConstantInt::getTrue(*Context),
- UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
+ UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context))),
CS.getInstruction());
if (!CS.getInstruction()->use_empty())
@@ -10162,14 +10165,14 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Conversion is ok if changing from one pointer type to another or from
// a pointer to an integer of the same size.
!((isa<PointerType>(OldRetTy) || !TD ||
- OldRetTy == TD->getIntPtrType()) &&
+ OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
(isa<PointerType>(NewRetTy) || !TD ||
- NewRetTy == TD->getIntPtrType())))
+ NewRetTy == TD->getIntPtrType(Caller->getContext()))))
return false; // Cannot transform this return value.
if (!Caller->use_empty() &&
// void -> non-void is handled specially
- NewRetTy != Type::VoidTy && !CastInst::isCastable(NewRetTy, OldRetTy))
+ NewRetTy != Type::getVoidTy(*Context) && !CastInst::isCastable(NewRetTy, OldRetTy))
return false; // Cannot transform this return value.
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
@@ -10210,8 +10213,10 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Converting from one pointer type to another or between a pointer and an
// integer of the same size is safe even if we do not have a body.
bool isConvertible = ActTy == ParamTy ||
- (TD && ((isa<PointerType>(ParamTy) || ParamTy == TD->getIntPtrType()) &&
- (isa<PointerType>(ActTy) || ActTy == TD->getIntPtrType())));
+ (TD && ((isa<PointerType>(ParamTy) ||
+ ParamTy == TD->getIntPtrType(Caller->getContext())) &&
+ (isa<PointerType>(ActTy) ||
+ ActTy == TD->getIntPtrType(Caller->getContext()))));
if (Callee->isDeclaration() && !isConvertible) return false;
}
@@ -10302,7 +10307,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (Attributes FnAttrs = CallerPAL.getFnAttributes())
attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
- if (NewRetTy == Type::VoidTy)
+ if (NewRetTy == Type::getVoidTy(*Context))
Caller->setName(""); // Void type should not have a name.
const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
@@ -10328,7 +10333,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Insert a cast of the return type as necessary.
Value *NV = NC;
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
- if (NV->getType() != Type::VoidTy) {
+ if (NV->getType() != Type::getVoidTy(*Context)) {
Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
OldRetTy, false);
NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
@@ -10348,7 +10353,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
}
}
- if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
+ if (Caller->getType() != Type::getVoidTy(*Context) && !Caller->use_empty())
Caller->replaceAllUsesWith(NV);
Caller->eraseFromParent();
RemoveFromWorkList(Caller);
@@ -10494,7 +10499,7 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
setCallingConv(cast<CallInst>(Caller)->getCallingConv());
cast<CallInst>(NewCaller)->setAttributes(NewPAL);
}
- if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
+ if (Caller->getType() != Type::getVoidTy(*Context) && !Caller->use_empty())
Caller->replaceAllUsesWith(NewCaller);
Caller->eraseFromParent();
RemoveFromWorkList(Caller);
@@ -11044,10 +11049,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *Op = *i;
if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) {
if (Constant *C = dyn_cast<Constant>(Op)) {
- *i = ConstantExpr::getTrunc(C, TD->getIntPtrType());
+ *i = ConstantExpr::getTrunc(C, TD->getIntPtrType(GEP.getContext()));
MadeChange = true;
} else {
- Op = InsertCastBefore(Instruction::Trunc, Op, TD->getIntPtrType(),
+ Op = InsertCastBefore(Instruction::Trunc, Op,
+ TD->getIntPtrType(GEP.getContext()),
GEP);
*i = Op;
MadeChange = true;
@@ -11055,11 +11061,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
} else if (TD->getTypeSizeInBits(Op->getType())
< TD->getPointerSizeInBits()) {
if (Constant *C = dyn_cast<Constant>(Op)) {
- *i = ConstantExpr::getSExt(C, TD->getIntPtrType());
+ *i = ConstantExpr::getSExt(C, TD->getIntPtrType(GEP.getContext()));
MadeChange = true;
} else {
- Op = InsertCastBefore(Instruction::SExt, Op, TD->getIntPtrType(),
- GEP);
+ Op = InsertCastBefore(Instruction::SExt, Op,
+ TD->getIntPtrType(GEP.getContext()), GEP);
*i = Op;
MadeChange = true;
}
@@ -11127,7 +11133,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Convert SO1 to GO1's type.
SO1 = InsertCastToIntPtrTy(SO1, GO1->getType(), &GEP, this);
} else {
- const Type *PT = TD->getIntPtrType();
+ const Type *PT = TD->getIntPtrType(GEP.getContext());
SO1 = InsertCastToIntPtrTy(SO1, PT, &GEP, this);
GO1 = InsertCastToIntPtrTy(GO1, PT, &GEP, this);
}
@@ -11238,7 +11244,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeAllocSize(ResElTy)) {
Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::Int32Ty);
+ Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
Idx[1] = GEP.getOperand(1);
GetElementPtrInst *NewGEP =
GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName());
@@ -11254,7 +11260,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) {
+ if (TD && isa<ArrayType>(SrcElTy) && ResElTy == Type::getInt8Ty(*Context)) {
uint64_t ArrayEltSize =
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
@@ -11302,7 +11308,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Insert the new GEP instruction.
Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::Int32Ty);
+ Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
Idx[1] = NewIdx;
Instruction *NewGEP =
GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName());
@@ -11399,7 +11405,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
//
- Value *NullIdx = Constant::getNullValue(Type::Int32Ty);
+ Value *NullIdx = Constant::getNullValue(Type::getInt32Ty(*Context));
Value *Idx[2];
Idx[0] = NullIdx;
Idx[1] = NullIdx;
@@ -11437,7 +11443,7 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
if (isa<UndefValue>(Op)) {
// Insert a new store to null because we cannot modify the CFG here.
new StoreInst(ConstantInt::getTrue(*Context),
- UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), &FI);
+ UndefValue::get(PointerType::getUnqual(Type::getInt1Ty(*Context))), &FI);
return EraseInstFromFunction(FI);
}
@@ -11532,7 +11538,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
Value *Idxs[2];
- Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty);
+ Idxs[0] = Idxs[1] = Constant::getNullValue(Type::getInt32Ty(*Context));
CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
SrcTy = cast<PointerType>(CastOp->getType());
SrcPTy = SrcTy->getElementType();
@@ -11726,7 +11732,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
// constants.
if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
// Index through pointer.
- Constant *Zero = Constant::getNullValue(Type::Int32Ty);
+ Constant *Zero = Constant::getNullValue(Type::getInt32Ty(*IC.getContext()));
NewGEPIndices.push_back(Zero);
while (1) {
@@ -12505,7 +12511,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
}
return ExtractElementInst::Create(Src,
- ConstantInt::get(Type::Int32Ty, SrcIdx, false));
+ ConstantInt::get(Type::getInt32Ty(*Context), SrcIdx, false));
}
}
// FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
@@ -12524,15 +12530,15 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
- Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
+ Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
return true;
} else if (V == LHS) {
for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::Int32Ty, i));
+ Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
return true;
} else if (V == RHS) {
for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::Int32Ty, i+NumElts));
+ Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i+NumElts));
return true;
} else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
@@ -12549,7 +12555,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
// If so, update the mask to reflect the inserted undef.
- Mask[InsertedIdx] = UndefValue::get(Type::Int32Ty);
+ Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(*Context));
return true;
}
} else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
@@ -12566,11 +12572,11 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
// If so, update the mask to reflect the inserted value.
if (EI->getOperand(0) == LHS) {
Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::Int32Ty, ExtractedIdx);
+ ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
} else {
assert(EI->getOperand(0) == RHS);
Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::Int32Ty, ExtractedIdx+NumElts);
+ ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx+NumElts);
}
return true;
@@ -12595,10 +12601,10 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
- Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
+ Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
return V;
} else if (isa<ConstantAggregateZero>(V)) {
- Mask.assign(NumElts, ConstantInt::get(Type::Int32Ty, 0));
+ Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(*Context), 0));
return V;
} else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
@@ -12619,7 +12625,7 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
RHS = EI->getOperand(0);
Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context);
Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::Int32Ty, NumElts+ExtractedIdx);
+ ConstantInt::get(Type::getInt32Ty(*Context), NumElts+ExtractedIdx);
return V;
}
@@ -12629,7 +12635,7 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
// Everything but the extracted element is replaced with the RHS.
for (unsigned i = 0; i != NumElts; ++i) {
if (i != InsertedIdx)
- Mask[i] = ConstantInt::get(Type::Int32Ty, NumElts+i);
+ Mask[i] = ConstantInt::get(Type::getInt32Ty(*Context), NumElts+i);
}
return V;
}
@@ -12647,7 +12653,7 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
// Otherwise, can't do anything fancy. Return an identity vector.
for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::Int32Ty, i));
+ Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
return V;
}
@@ -12691,14 +12697,14 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
// Build a new shuffle mask.
std::vector<Constant*> Mask;
if (isa<UndefValue>(VecOp))
- Mask.assign(NumVectorElts, UndefValue::get(Type::Int32Ty));
+ Mask.assign(NumVectorElts, UndefValue::get(Type::getInt32Ty(*Context)));
else {
assert(isa<ConstantAggregateZero>(VecOp) && "Unknown thing");
- Mask.assign(NumVectorElts, ConstantInt::get(Type::Int32Ty,
+ Mask.assign(NumVectorElts, ConstantInt::get(Type::getInt32Ty(*Context),
NumVectorElts));
}
Mask[InsertedIdx] =
- ConstantInt::get(Type::Int32Ty, ExtractedIdx);
+ ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
return new ShuffleVectorInst(EI->getOperand(0), VecOp,
ConstantVector::get(Mask));
}
@@ -12763,15 +12769,15 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
std::vector<Constant*> Elts;
for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] >= 2*e)
- Elts.push_back(UndefValue::get(Type::Int32Ty));
+ Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
else {
if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
(Mask[i] < e && isa<UndefValue>(LHS))) {
Mask[i] = 2*e; // Turn into undef.
- Elts.push_back(UndefValue::get(Type::Int32Ty));
+ Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
} else {
Mask[i] = Mask[i] % e; // Force to LHS.
- Elts.push_back(ConstantInt::get(Type::Int32Ty, Mask[i]));
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Mask[i]));
}
}
}
@@ -12827,9 +12833,9 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
std::vector<Constant*> Elts;
for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
if (NewMask[i] >= LHSInNElts*2) {
- Elts.push_back(UndefValue::get(Type::Int32Ty));
+ Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
} else {
- Elts.push_back(ConstantInt::get(Type::Int32Ty, NewMask[i]));
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), NewMask[i]));
}
}
return new ShuffleVectorInst(LHSSVI->getOperand(0),
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 6125f8b..ff04cec 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -435,7 +435,8 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB,
<< "' folding condition to '" << BranchDir << "': "
<< *BB->getTerminator());
++NumFolds;
- DestBI->setCondition(ConstantInt::get(Type::Int1Ty, BranchDir));
+ DestBI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
+ BranchDir));
ConstantFoldTerminator(BB);
return true;
}
@@ -757,7 +758,8 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
// We can only do the simplification for phi nodes of 'false' with AND or
// 'true' with OR. See if we have any entries in the phi for this.
unsigned PredNo = ~0U;
- ConstantInt *PredCst = ConstantInt::get(Type::Int1Ty, !isAnd);
+ ConstantInt *PredCst = ConstantInt::get(Type::getInt1Ty(BB->getContext()),
+ !isAnd);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
if (PN->getIncomingValue(i) == PredCst) {
PredNo = i;
@@ -921,8 +923,9 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, BasicBlock *PredBB,
// account for entry from PredBB.
DenseMap<Instruction*, Value*> ValueMapping;
- BasicBlock *NewBB =
- BasicBlock::Create(BB->getName()+".thread", BB->getParent(), BB);
+ BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
+ BB->getName()+".thread",
+ BB->getParent(), BB);
NewBB->moveAfter(PredBB);
BasicBlock::iterator BI = BB->begin();
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 02a33a7..f4f20e4 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -510,7 +510,7 @@ void LICM::sink(Instruction &I) {
// Firstly, we create a stack object to hold the value...
AllocaInst *AI = 0;
- if (I.getType() != Type::VoidTy) {
+ if (I.getType() != Type::getVoidTy(I.getContext())) {
AI = new AllocaInst(I.getType(), 0, I.getName(),
I.getParent()->getParent()->getEntryBlock().begin());
CurAST->add(AI);
diff --git a/lib/Transforms/Scalar/LoopIndexSplit.cpp b/lib/Transforms/Scalar/LoopIndexSplit.cpp
index f5e5d35..792b753 100644
--- a/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ b/lib/Transforms/Scalar/LoopIndexSplit.cpp
@@ -702,7 +702,8 @@ void LoopIndexSplit::removeBlocks(BasicBlock *DeadBB, Loop *LP,
E = df_end(DN); DI != E; ++DI) {
BasicBlock *BB = DI->getBlock();
WorkList.push_back(BB);
- BB->replaceAllUsesWith(UndefValue::get(Type::LabelTy));
+ BB->replaceAllUsesWith(UndefValue::get(
+ Type::getLabelTy(DeadBB->getContext())));
}
while (!WorkList.empty()) {
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 8c5de3e..687304a 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -435,7 +435,8 @@ void LoopRotate::preserveCanonicalLoopForm(LPPassManager &LPM) {
// Right now original pre-header has two successors, new header and
// exit block. Insert new block between original pre-header and
// new header such that loop's new pre-header has only one successor.
- BasicBlock *NewPreHeader = BasicBlock::Create("bb.nph",
+ BasicBlock *NewPreHeader = BasicBlock::Create(OrigHeader->getContext(),
+ "bb.nph",
OrigHeader->getParent(),
NewHeader);
LoopInfo &LI = LPM.getAnalysis<LoopInfo>();
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 0db3a96..9a5a226 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -903,7 +903,8 @@ bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
// If this is a load or other access, pass the type of the access in.
- const Type *AccessTy = Type::VoidTy;
+ const Type *AccessTy =
+ Type::getVoidTy(UsersToProcess[i].Inst->getContext());
if (isAddressUse(UsersToProcess[i].Inst,
UsersToProcess[i].OperandValToReplace))
AccessTy = getAccessType(UsersToProcess[i].Inst);
@@ -935,7 +936,8 @@ bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
// If this is a load or other access, pass the type of the access in.
- const Type *AccessTy = Type::VoidTy;
+ const Type *AccessTy =
+ Type::getVoidTy(UsersToProcess[i].Inst->getContext());
if (isAddressUse(UsersToProcess[i].Inst,
UsersToProcess[i].OperandValToReplace))
AccessTy = getAccessType(UsersToProcess[i].Inst);
@@ -1534,7 +1536,9 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
const SCEV *NewCommon = CommonExprs;
const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
- MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE);
+ MoveImmediateValues(TLI, Type::getVoidTy(
+ L->getLoopPreheader()->getContext()),
+ NewCommon, Imm, true, L, SE);
if (!Imm->isZero()) {
bool DoSink = true;
@@ -1549,7 +1553,8 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
if (GV || Offset)
// Pass VoidTy as the AccessTy to be conservative, because
// there could be multiple access types among all the uses.
- DoSink = IsImmFoldedIntoAddrMode(GV, Offset, Type::VoidTy,
+ DoSink = IsImmFoldedIntoAddrMode(GV, Offset,
+ Type::getVoidTy(L->getLoopPreheader()->getContext()),
UsersToProcess, TLI);
if (DoSink) {
@@ -1580,8 +1585,10 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
- IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
- SE->getIntegerSCEV(0, Type::Int32Ty),
+ IVExpr ReuseIV(SE->getIntegerSCEV(0,
+ Type::getInt32Ty(Preheader->getContext())),
+ SE->getIntegerSCEV(0,
+ Type::getInt32Ty(Preheader->getContext())),
0);
/// Choose a strength-reduction strategy and prepare for it by creating
@@ -1943,7 +1950,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
NewCmpTy = NewCmpLHS->getType();
NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
- const Type *NewCmpIntTy = IntegerType::get(NewTyBits);
+ const Type *NewCmpIntTy = IntegerType::get(Cond->getContext(), NewTyBits);
if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
// Check if it is possible to rewrite it using
// an iv / stride of a smaller integer type.
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 57672f9..bbc99f6 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -511,7 +511,8 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
// Insert a conditional branch on LIC to the two preheaders. The original
// code is the true version and the new code is the false version.
Value *BranchVal = LIC;
- if (!isa<ConstantInt>(Val) || Val->getType() != Type::Int1Ty)
+ if (!isa<ConstantInt>(Val) ||
+ Val->getType() != Type::getInt1Ty(LIC->getContext()))
BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val, "tmp");
else if (Val != ConstantInt::getTrue(Val->getContext()))
// We want to enter the new loop when the condition is true.
@@ -793,7 +794,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
// dominates the latch).
LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
Pred->getTerminator()->eraseFromParent();
- new UnreachableInst(Pred);
+ new UnreachableInst(BB->getContext(), Pred);
// The loop is now broken, remove it from LI.
RemoveLoopFromHierarchy(L);
@@ -907,12 +908,13 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC
// in the loop with the appropriate one directly.
- if (IsEqual || (isa<ConstantInt>(Val) && Val->getType() == Type::Int1Ty)) {
+ if (IsEqual || (isa<ConstantInt>(Val) &&
+ Val->getType() == Type::getInt1Ty(Val->getContext()))) {
Value *Replacement;
if (IsEqual)
Replacement = Val;
else
- Replacement = ConstantInt::get(Type::Int1Ty,
+ Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()),
!cast<ConstantInt>(Val)->getZExtValue());
for (unsigned i = 0, e = Users.size(); i != e; ++i)
@@ -1024,10 +1026,11 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
break;
case Instruction::And:
if (isa<ConstantInt>(I->getOperand(0)) &&
- I->getOperand(0)->getType() == Type::Int1Ty) // constant -> RHS
+ // constant -> RHS
+ I->getOperand(0)->getType() == Type::getInt1Ty(I->getContext()))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType() == Type::Int1Ty) {
+ if (CB->getType() == Type::getInt1Ty(I->getContext())) {
if (CB->isOne()) // X & 1 -> X
ReplaceUsesOfWith(I, I->getOperand(0), Worklist, L, LPM);
else // X & 0 -> 0
@@ -1037,10 +1040,11 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
break;
case Instruction::Or:
if (isa<ConstantInt>(I->getOperand(0)) &&
- I->getOperand(0)->getType() == Type::Int1Ty) // constant -> RHS
+ // constant -> RHS
+ I->getOperand(0)->getType() == Type::getInt1Ty(I->getContext()))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType() == Type::Int1Ty) {
+ if (CB->getType() == Type::getInt1Ty(I->getContext())) {
if (CB->isOne()) // X | 1 -> 1
ReplaceUsesOfWith(I, I->getOperand(1), Worklist, L, LPM);
else // X | 0 -> X
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 224a136..1c8badc 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -38,15 +38,15 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred");
/// byte store (e.g. i16 0x1234), return null.
static Value *isBytewiseValue(Value *V, LLVMContext& Context) {
// All byte-wide stores are splatable, even of arbitrary variables.
- if (V->getType() == Type::Int8Ty) return V;
+ if (V->getType() == Type::getInt8Ty(Context)) return V;
// Constant float and double values can be handled as integer values if the
// corresponding integer value is "byteable". An important case is 0.0.
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
- if (CFP->getType() == Type::FloatTy)
- V = ConstantExpr::getBitCast(CFP, Type::Int32Ty);
- if (CFP->getType() == Type::DoubleTy)
- V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
+ if (CFP->getType() == Type::getFloatTy(Context))
+ V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
+ if (CFP->getType() == Type::getDoubleTy(Context))
+ V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
// Don't handle long double formats, which have strange constraints.
}
@@ -431,7 +431,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
BasicBlock::iterator InsertPt = BI;
if (MemSetF == 0) {
- const Type *Tys[] = {Type::Int64Ty};
+ const Type *Tys[] = {Type::getInt64Ty(SI->getContext())};
MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset,
Tys, 1);
}
@@ -440,7 +440,8 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
StartPtr = Range.StartPtr;
// Cast the start ptr to be i8* as memset requires.
- const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty);
+ const Type *i8Ptr =
+ PointerType::getUnqual(Type::getInt8Ty(SI->getContext()));
if (StartPtr->getType() != i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
InsertPt);
@@ -448,9 +449,10 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
// size
- ConstantInt::get(Type::Int64Ty, Range.End-Range.Start),
+ ConstantInt::get(Type::getInt64Ty(SI->getContext()),
+ Range.End-Range.Start),
// align
- ConstantInt::get(Type::Int32Ty, Range.Alignment)
+ ConstantInt::get(Type::getInt32Ty(SI->getContext()), Range.Alignment)
};
Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
DEBUG(cerr << "Replace stores:\n";
diff --git a/lib/Transforms/Scalar/PredicateSimplifier.cpp b/lib/Transforms/Scalar/PredicateSimplifier.cpp
index f9427bb..8332f56 100644
--- a/lib/Transforms/Scalar/PredicateSimplifier.cpp
+++ b/lib/Transforms/Scalar/PredicateSimplifier.cpp
@@ -469,8 +469,8 @@ namespace {
/// valueNumber - finds the value number for V under the Subtree. If
/// there is no value number, returns zero.
unsigned valueNumber(Value *V, DomTreeDFS::Node *Subtree) {
- if (!(isa<Constant>(V) || isa<Argument>(V) || isa<Instruction>(V))
- || V->getType() == Type::VoidTy) return 0;
+ if (!(isa<Constant>(V) || isa<Argument>(V) || isa<Instruction>(V)) ||
+ V->getType() == Type::getVoidTy(V->getContext())) return 0;
VNMapType::iterator E = VNMap.end();
VNPair pair(V, 0, Subtree);
@@ -496,7 +496,8 @@ namespace {
unsigned newVN(Value *V) {
assert((isa<Constant>(V) || isa<Argument>(V) || isa<Instruction>(V)) &&
"Bad Value for value numbering.");
- assert(V->getType() != Type::VoidTy && "Won't value number a void value");
+ assert(V->getType() != Type::getVoidTy(V->getContext()) &&
+ "Won't value number a void value");
Values.push_back(V);
@@ -1310,7 +1311,7 @@ namespace {
TerminatorInst *TI = BB->getTerminator();
TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
TI->eraseFromParent();
- new UnreachableInst(BB);
+ new UnreachableInst(TI->getContext(), BB);
++NumBlocks;
modified = true;
}
diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp
index e1075a6..b0db317 100644
--- a/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -69,7 +69,8 @@ namespace {
CastInst *AllocaInsertionPoint =
CastInst::Create(Instruction::BitCast,
- Constant::getNullValue(Type::Int32Ty), Type::Int32Ty,
+ Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ Type::getInt32Ty(F.getContext()),
"reg2mem alloca point", I);
// Find the escaped instructions. But don't create stack slots for
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 8062932..c0c44b5 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -1184,7 +1184,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
if (F == 0 || !F->hasLocalLinkage()) {
CallOverdefined:
// Void return and not tracking callee, just bail.
- if (I->getType() == Type::VoidTy) return;
+ if (I->getType() == Type::getVoidTy(I->getContext())) return;
// Otherwise, if we have a single return value case, and if the function is
// a declaration, maybe we can constant fold it.
@@ -1350,7 +1350,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
// Look for instructions which produce undef values.
- if (I->getType() == Type::VoidTy) continue;
+ if (I->getType() == Type::getVoidTy(F.getContext())) continue;
LatticeVal &LV = getValueState(I);
if (!LV.isUndefined()) continue;
@@ -1589,7 +1589,7 @@ bool SCCP::runOnFunction(Function &F) {
//
for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
Instruction *Inst = BI++;
- if (Inst->getType() == Type::VoidTy ||
+ if (Inst->getType() == Type::getVoidTy(F.getContext()) ||
isa<TerminatorInst>(Inst))
continue;
@@ -1760,12 +1760,12 @@ bool IPSCCP::runOnModule(Module &M) {
if (&*BB != &F->front())
BlocksToErase.push_back(BB);
else
- new UnreachableInst(BB);
+ new UnreachableInst(M.getContext(), BB);
} else {
for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
Instruction *Inst = BI++;
- if (Inst->getType() == Type::VoidTy)
+ if (Inst->getType() == Type::getVoidTy(M.getContext()))
continue;
LatticeVal &IV = Values[Inst];
@@ -1842,7 +1842,7 @@ bool IPSCCP::runOnModule(Module &M) {
for (DenseMap<Function*, LatticeVal>::const_iterator I = RV.begin(),
E = RV.end(); I != E; ++I)
if (!I->second.isOverdefined() &&
- I->first->getReturnType() != Type::VoidTy) {
+ I->first->getReturnType() != Type::getVoidTy(M.getContext())) {
Function *F = I->first;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index cacf3db..6857162 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -306,7 +306,7 @@ bool SROA::performScalarRepl(Function &F) {
DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n";
// Create and insert the integer alloca.
- const Type *NewTy = IntegerType::get(AllocaSize*8);
+ const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
ConvertUsesToScalar(AI, NewAI, 0);
}
@@ -417,7 +417,8 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
// expanded itself once the worklist is rerun.
//
SmallVector<Value*, 8> NewArgs;
- NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
+ NewArgs.push_back(Constant::getNullValue(
+ Type::getInt32Ty(AI->getContext())));
NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
NewArgs.end(), "", GEPI);
@@ -764,7 +765,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
const Type *BytePtrTy = MI->getRawDest()->getType();
bool SROADest = MI->getRawDest() == BCInst;
- Constant *Zero = Constant::getNullValue(Type::Int32Ty);
+ Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
@@ -772,7 +773,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
unsigned OtherEltAlign = MemAlignment;
if (OtherPtr) {
- Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) };
+ Value *Idx[2] = { Zero,
+ ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
OtherPtr->getNameStr()+"."+Twine(i),
MI);
@@ -873,7 +875,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
SROADest ? EltPtr : OtherElt, // Dest ptr
SROADest ? OtherElt : EltPtr, // Src ptr
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- ConstantInt::get(Type::Int32Ty, OtherEltAlign) // Align
+ // Align
+ ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
};
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
} else {
@@ -910,7 +913,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
// Handle tail padding by extending the operand
if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
SrcVal = new ZExtInst(SrcVal,
- IntegerType::get(AllocaSizeBits), "", SI);
+ IntegerType::get(SI->getContext(), AllocaSizeBits),
+ "", SI);
DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI;
@@ -942,7 +946,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
if (FieldSizeBits != AllocaSizeBits)
EltVal = new TruncInst(EltVal,
- IntegerType::get(FieldSizeBits), "", SI);
+ IntegerType::get(SI->getContext(), FieldSizeBits),
+ "", SI);
Value *DestField = NewElts[i];
if (EltVal->getType() == FieldTy) {
// Storing to an integer field of this size, just do it.
@@ -985,7 +990,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
// Truncate down to an integer of the right size.
if (ElementSizeBits != AllocaSizeBits)
EltVal = new TruncInst(EltVal,
- IntegerType::get(ElementSizeBits),"",SI);
+ IntegerType::get(SI->getContext(),
+ ElementSizeBits),"",SI);
Value *DestField = NewElts[i];
if (EltVal->getType() == ArrayEltTy) {
// Storing to an integer field of this size, just do it.
@@ -1040,7 +1046,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
}
Value *ResultVal =
- Constant::getNullValue(IntegerType::get(AllocaSizeBits));
+ Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Load the value from the alloca. If the NewElt is an aggregate, cast
@@ -1053,7 +1059,8 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
- const IntegerType *FieldIntTy = IntegerType::get(FieldSizeBits);
+ const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
+ FieldSizeBits);
if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
!isa<VectorType>(FieldTy))
SrcField = new BitCastInst(SrcField,
@@ -1186,7 +1193,8 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
return;
if (NumElements == 1) {
- GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
+ GEPI->setOperand(2,
+ Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())));
return;
}
@@ -1198,12 +1206,12 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
"isone");
// Insert the new GEP instructions, which are properly indexed.
SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
- Indices[1] = Constant::getNullValue(Type::Int32Ty);
+ Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()));
Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
Indices.begin(),
Indices.end(),
GEPI->getName()+".0", GEPI);
- Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
+ Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1);
Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
Indices.begin(),
Indices.end(),
@@ -1263,7 +1271,7 @@ static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
unsigned AllocaSize, const TargetData &TD,
LLVMContext &Context) {
// If this could be contributing to a vector, analyze it.
- if (VecTy != Type::VoidTy) { // either null or a vector type.
+ if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type.
// If the In type is a vector that is the same size as the alloca, see if it
// matches the existing VecTy.
@@ -1276,7 +1284,8 @@ static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
VecTy = VInTy;
return;
}
- } else if (In == Type::FloatTy || In == Type::DoubleTy ||
+ } else if (In == Type::getFloatTy(Context) ||
+ In == Type::getDoubleTy(Context) ||
(isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 &&
isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
// If we're accessing something that could be an element of a vector, see
@@ -1297,7 +1306,7 @@ static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
// Otherwise, we have a case that we can't handle with an optimized vector
// form. We can still turn this into a large integer.
- VecTy = Type::VoidTy;
+ VecTy = Type::getVoidTy(Context);
}
/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
@@ -1548,9 +1557,8 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
}
// Return the element extracted out of it.
- Value *V = Builder.CreateExtractElement(FromVal,
- ConstantInt::get(Type::Int32Ty,Elt),
- "tmp");
+ Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
+ Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
if (V->getType() != ToType)
V = Builder.CreateBitCast(V, ToType, "tmp");
return V;
@@ -1613,10 +1621,12 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
if (LIBitWidth < NTy->getBitWidth())
FromVal =
- Builder.CreateTrunc(FromVal, IntegerType::get(LIBitWidth), "tmp");
+ Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
+ LIBitWidth), "tmp");
else if (LIBitWidth > NTy->getBitWidth())
FromVal =
- Builder.CreateZExt(FromVal, IntegerType::get(LIBitWidth), "tmp");
+ Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
+ LIBitWidth), "tmp");
// If the result is an integer, this is a trunc or bitcast.
if (isa<IntegerType>(ToType)) {
@@ -1668,7 +1678,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
SV = Builder.CreateInsertElement(Old, SV,
- ConstantInt::get(Type::Int32Ty, Elt),
+ ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
"tmp");
return SV;
}
@@ -1701,9 +1711,10 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
- SV = Builder.CreateBitCast(SV, IntegerType::get(SrcWidth), "tmp");
+ SV = Builder.CreateBitCast(SV,
+ IntegerType::get(SV->getContext(),SrcWidth), "tmp");
else if (isa<PointerType>(SV->getType()))
- SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp");
+ SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp");
// Zero extend or truncate the value if needed.
if (SV->getType() != AllocaType) {
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 3ea6ddd..5de79c4 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -65,7 +65,7 @@ static void ChangeToUnreachable(Instruction *I, LLVMContext &Context) {
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
(*SI)->removePredecessor(BB);
- new UnreachableInst(I);
+ new UnreachableInst(I->getContext(), I);
// All instructions after this are dead.
BasicBlock::iterator BBI = I, BBE = BB->end();
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 2ac980f..64013d5 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -126,7 +126,7 @@ public:
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
Value *LibCallOptimization::CastToCStr(Value *V, IRBuilder<> &B) {
return
- B.CreateBitCast(V, PointerType::getUnqual(Type::Int8Ty), "cstr");
+ B.CreateBitCast(V, PointerType::getUnqual(Type::getInt8Ty(*Context)), "cstr");
}
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
@@ -139,8 +139,8 @@ Value *LibCallOptimization::EmitStrLen(Value *Ptr, IRBuilder<> &B) {
Attribute::NoUnwind);
Constant *StrLen =M->getOrInsertFunction("strlen", AttrListPtr::get(AWI, 2),
- TD->getIntPtrType(),
- PointerType::getUnqual(Type::Int8Ty),
+ TD->getIntPtrType(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
NULL);
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
if (const Function *F = dyn_cast<Function>(StrLen->stripPointerCasts()))
@@ -159,7 +159,7 @@ Value *LibCallOptimization::EmitMemCpy(Value *Dst, Value *Src, Value *Len,
Tys[0] = Len->getType();
Value *MemCpy = Intrinsic::getDeclaration(M, IID, Tys, 1);
return B.CreateCall4(MemCpy, CastToCStr(Dst, B), CastToCStr(Src, B), Len,
- ConstantInt::get(Type::Int32Ty, Align));
+ ConstantInt::get(Type::getInt32Ty(*Context), Align));
}
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
@@ -171,9 +171,9 @@ Value *LibCallOptimization::EmitMemChr(Value *Ptr, Value *Val,
AWI = AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind);
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(&AWI, 1),
- PointerType::getUnqual(Type::Int8Ty),
- PointerType::getUnqual(Type::Int8Ty),
- Type::Int32Ty, TD->getIntPtrType(),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ Type::getInt32Ty(*Context), TD->getIntPtrType(*Context),
NULL);
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
@@ -194,10 +194,10 @@ Value *LibCallOptimization::EmitMemCmp(Value *Ptr1, Value *Ptr2,
Attribute::NoUnwind);
Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI, 3),
- Type::Int32Ty,
- PointerType::getUnqual(Type::Int8Ty),
- PointerType::getUnqual(Type::Int8Ty),
- TD->getIntPtrType(), NULL);
+ Type::getInt32Ty(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ TD->getIntPtrType(*Context), NULL);
CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
Len, "memcmp");
@@ -215,7 +215,7 @@ Value *LibCallOptimization::EmitMemSet(Value *Dst, Value *Val,
const Type *Tys[1];
Tys[0] = Len->getType();
Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 1);
- Value *Align = ConstantInt::get(Type::Int32Ty, 1);
+ Value *Align = ConstantInt::get(Type::getInt32Ty(*Context), 1);
return B.CreateCall4(MemSet, CastToCStr(Dst, B), Val, Len, Align);
}
@@ -226,12 +226,12 @@ Value *LibCallOptimization::EmitMemSet(Value *Dst, Value *Val,
Value *LibCallOptimization::EmitUnaryFloatFnCall(Value *Op, const char *Name,
IRBuilder<> &B) {
char NameBuffer[20];
- if (Op->getType() != Type::DoubleTy) {
+ if (Op->getType() != Type::getDoubleTy(*Context)) {
// If we need to add a suffix, copy into NameBuffer.
unsigned NameLen = strlen(Name);
assert(NameLen < sizeof(NameBuffer)-2);
memcpy(NameBuffer, Name, NameLen);
- if (Op->getType() == Type::FloatTy)
+ if (Op->getType() == Type::getFloatTy(*Context))
NameBuffer[NameLen] = 'f'; // floorf
else
NameBuffer[NameLen] = 'l'; // floorl
@@ -254,10 +254,10 @@ Value *LibCallOptimization::EmitUnaryFloatFnCall(Value *Op, const char *Name,
/// is an integer.
void LibCallOptimization::EmitPutChar(Value *Char, IRBuilder<> &B) {
Module *M = Caller->getParent();
- Value *PutChar = M->getOrInsertFunction("putchar", Type::Int32Ty,
- Type::Int32Ty, NULL);
+ Value *PutChar = M->getOrInsertFunction("putchar", Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context), NULL);
CallInst *CI = B.CreateCall(PutChar,
- B.CreateIntCast(Char, Type::Int32Ty, "chari"),
+ B.CreateIntCast(Char, Type::getInt32Ty(*Context), "chari"),
"putchar");
if (const Function *F = dyn_cast<Function>(PutChar->stripPointerCasts()))
@@ -273,8 +273,8 @@ void LibCallOptimization::EmitPutS(Value *Str, IRBuilder<> &B) {
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI, 2),
- Type::Int32Ty,
- PointerType::getUnqual(Type::Int8Ty),
+ Type::getInt32Ty(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
NULL);
CallInst *CI = B.CreateCall(PutS, CastToCStr(Str, B), "puts");
if (const Function *F = dyn_cast<Function>(PutS->stripPointerCasts()))
@@ -291,12 +291,12 @@ void LibCallOptimization::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B) {
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Constant *F;
if (isa<PointerType>(File->getType()))
- F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI, 2), Type::Int32Ty,
- Type::Int32Ty, File->getType(), NULL);
+ F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI, 2), Type::getInt32Ty(*Context),
+ Type::getInt32Ty(*Context), File->getType(), NULL);
else
- F = M->getOrInsertFunction("fputc", Type::Int32Ty, Type::Int32Ty,
+ F = M->getOrInsertFunction("fputc", Type::getInt32Ty(*Context), Type::getInt32Ty(*Context),
File->getType(), NULL);
- Char = B.CreateIntCast(Char, Type::Int32Ty, "chari");
+ Char = B.CreateIntCast(Char, Type::getInt32Ty(*Context), "chari");
CallInst *CI = B.CreateCall2(F, Char, File, "fputc");
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
@@ -313,12 +313,12 @@ void LibCallOptimization::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B) {
AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
Constant *F;
if (isa<PointerType>(File->getType()))
- F = M->getOrInsertFunction("fputs", AttrListPtr::get(AWI, 3), Type::Int32Ty,
- PointerType::getUnqual(Type::Int8Ty),
+ F = M->getOrInsertFunction("fputs", AttrListPtr::get(AWI, 3), Type::getInt32Ty(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
File->getType(), NULL);
else
- F = M->getOrInsertFunction("fputs", Type::Int32Ty,
- PointerType::getUnqual(Type::Int8Ty),
+ F = M->getOrInsertFunction("fputs", Type::getInt32Ty(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
File->getType(), NULL);
CallInst *CI = B.CreateCall2(F, CastToCStr(Str, B), File, "fputs");
@@ -338,17 +338,17 @@ void LibCallOptimization::EmitFWrite(Value *Ptr, Value *Size, Value *File,
Constant *F;
if (isa<PointerType>(File->getType()))
F = M->getOrInsertFunction("fwrite", AttrListPtr::get(AWI, 3),
- TD->getIntPtrType(),
- PointerType::getUnqual(Type::Int8Ty),
- TD->getIntPtrType(), TD->getIntPtrType(),
+ TD->getIntPtrType(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ TD->getIntPtrType(*Context), TD->getIntPtrType(*Context),
File->getType(), NULL);
else
- F = M->getOrInsertFunction("fwrite", TD->getIntPtrType(),
- PointerType::getUnqual(Type::Int8Ty),
- TD->getIntPtrType(), TD->getIntPtrType(),
+ F = M->getOrInsertFunction("fwrite", TD->getIntPtrType(*Context),
+ PointerType::getUnqual(Type::getInt8Ty(*Context)),
+ TD->getIntPtrType(*Context), TD->getIntPtrType(*Context),
File->getType(), NULL);
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
- ConstantInt::get(TD->getIntPtrType(), 1), File);
+ ConstantInt::get(TD->getIntPtrType(*Context), 1), File);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
@@ -449,7 +449,8 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
// Must be a Constant Array
ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (!Array || Array->getType()->getElementType() != Type::Int8Ty)
+ if (!Array ||
+ Array->getType()->getElementType() != Type::getInt8Ty(V->getContext()))
return false;
// Get the number of elements in the array
@@ -528,7 +529,7 @@ struct VISIBILITY_HIDDEN ExitOpt : public LibCallOptimization {
BasicBlock::iterator Dead = CI, E = OldTI; ++Dead;
while (Dead != E) {
BasicBlock::iterator Next = next(Dead);
- if (Dead->getType() != Type::VoidTy)
+ if (Dead->getType() != Type::getVoidTy(*Context))
Dead->replaceAllUsesWith(UndefValue::get(Dead->getType()));
Dead->eraseFromParent();
Dead = Next;
@@ -555,7 +556,7 @@ struct VISIBILITY_HIDDEN StrCatOpt : public LibCallOptimization {
// Verify the "strcat" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- FT->getReturnType() != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getReturnType() != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType())
return 0;
@@ -590,7 +591,7 @@ struct VISIBILITY_HIDDEN StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
EmitMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(), Len+1), 1, B);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len+1), 1, B);
}
};
@@ -602,7 +603,7 @@ struct VISIBILITY_HIDDEN StrNCatOpt : public StrCatOpt {
// Verify the "strncat" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- FT->getReturnType() != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getReturnType() != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType() ||
!isa<IntegerType>(FT->getParamType(2)))
@@ -647,7 +648,7 @@ struct VISIBILITY_HIDDEN StrChrOpt : public LibCallOptimization {
// Verify the "strchr" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- FT->getReturnType() != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getReturnType() != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
FT->getParamType(0) != FT->getReturnType())
return 0;
@@ -658,11 +659,11 @@ struct VISIBILITY_HIDDEN StrChrOpt : public LibCallOptimization {
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getOperand(2));
if (CharC == 0) {
uint64_t Len = GetStringLength(SrcStr);
- if (Len == 0 || FT->getParamType(1) != Type::Int32Ty) // memchr needs i32.
+ if (Len == 0 || FT->getParamType(1) != Type::getInt32Ty(*Context)) // memchr needs i32.
return 0;
return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
- ConstantInt::get(TD->getIntPtrType(), Len), B);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), B);
}
// Otherwise, the character is a constant, see if the first argument is
@@ -687,7 +688,7 @@ struct VISIBILITY_HIDDEN StrChrOpt : public LibCallOptimization {
}
// strchr(s+n,c) -> gep(s+n+i,c)
- Value *Idx = ConstantInt::get(Type::Int64Ty, i);
+ Value *Idx = ConstantInt::get(Type::getInt64Ty(*Context), i);
return B.CreateGEP(SrcStr, Idx, "strchr");
}
};
@@ -699,9 +700,9 @@ struct VISIBILITY_HIDDEN StrCmpOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 || FT->getReturnType() != Type::Int32Ty ||
+ if (FT->getNumParams() != 2 || FT->getReturnType() != Type::getInt32Ty(*Context) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != PointerType::getUnqual(Type::Int8Ty))
+ FT->getParamType(0) != PointerType::getUnqual(Type::getInt8Ty(*Context)))
return 0;
Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
@@ -728,7 +729,7 @@ struct VISIBILITY_HIDDEN StrCmpOpt : public LibCallOptimization {
uint64_t Len2 = GetStringLength(Str2P);
if (Len1 && Len2) {
return EmitMemCmp(Str1P, Str2P,
- ConstantInt::get(TD->getIntPtrType(),
+ ConstantInt::get(TD->getIntPtrType(*Context),
std::min(Len1, Len2)), B);
}
@@ -743,9 +744,9 @@ struct VISIBILITY_HIDDEN StrNCmpOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 3 || FT->getReturnType() != Type::Int32Ty ||
+ if (FT->getNumParams() != 3 || FT->getReturnType() != Type::getInt32Ty(*Context) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getParamType(0) != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
!isa<IntegerType>(FT->getParamType(2)))
return 0;
@@ -791,7 +792,7 @@ struct VISIBILITY_HIDDEN StrCpyOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != PointerType::getUnqual(Type::Int8Ty))
+ FT->getParamType(0) != PointerType::getUnqual(Type::getInt8Ty(*Context)))
return 0;
Value *Dst = CI->getOperand(1), *Src = CI->getOperand(2);
@@ -805,7 +806,7 @@ struct VISIBILITY_HIDDEN StrCpyOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(), Len), 1, B);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B);
return Dst;
}
};
@@ -818,7 +819,7 @@ struct VISIBILITY_HIDDEN StrNCpyOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- FT->getParamType(0) != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getParamType(0) != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
!isa<IntegerType>(FT->getParamType(2)))
return 0;
@@ -833,7 +834,7 @@ struct VISIBILITY_HIDDEN StrNCpyOpt : public LibCallOptimization {
if (SrcLen == 0) {
// strncpy(x, "", y) -> memset(x, '\0', y, 1)
- EmitMemSet(Dst, ConstantInt::get(Type::Int8Ty, '\0'), LenOp, B);
+ EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'), LenOp, B);
return Dst;
}
@@ -850,7 +851,7 @@ struct VISIBILITY_HIDDEN StrNCpyOpt : public LibCallOptimization {
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(), Len), 1, B);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B);
return Dst;
}
@@ -863,7 +864,7 @@ struct VISIBILITY_HIDDEN StrLenOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 ||
- FT->getParamType(0) != PointerType::getUnqual(Type::Int8Ty) ||
+ FT->getParamType(0) != PointerType::getUnqual(Type::getInt8Ty(*Context)) ||
!isa<IntegerType>(FT->getReturnType()))
return 0;
@@ -912,7 +913,7 @@ struct VISIBILITY_HIDDEN MemCmpOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || !isa<PointerType>(FT->getParamType(0)) ||
!isa<PointerType>(FT->getParamType(1)) ||
- FT->getReturnType() != Type::Int32Ty)
+ FT->getReturnType() != Type::getInt32Ty(*Context))
return 0;
Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
@@ -938,7 +939,7 @@ struct VISIBILITY_HIDDEN MemCmpOpt : public LibCallOptimization {
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS ^ *(int*)RHS) != 0
if ((Len == 2 || Len == 4) && IsOnlyUsedInZeroEqualityComparison(CI)) {
const Type *PTy = PointerType::getUnqual(Len == 2 ?
- Type::Int16Ty : Type::Int32Ty);
+ Type::getInt16Ty(*Context) : Type::getInt32Ty(*Context));
LHS = B.CreateBitCast(LHS, PTy, "tmp");
RHS = B.CreateBitCast(RHS, PTy, "tmp");
LoadInst *LHSV = B.CreateLoad(LHS, "lhsv");
@@ -960,7 +961,7 @@ struct VISIBILITY_HIDDEN MemCpyOpt : public LibCallOptimization {
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!isa<PointerType>(FT->getParamType(0)) ||
!isa<PointerType>(FT->getParamType(1)) ||
- FT->getParamType(2) != TD->getIntPtrType())
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
@@ -978,19 +979,19 @@ struct VISIBILITY_HIDDEN MemMoveOpt : public LibCallOptimization {
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!isa<PointerType>(FT->getParamType(0)) ||
!isa<PointerType>(FT->getParamType(1)) ||
- FT->getParamType(2) != TD->getIntPtrType())
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
Module *M = Caller->getParent();
Intrinsic::ID IID = Intrinsic::memmove;
const Type *Tys[1];
- Tys[0] = TD->getIntPtrType();
+ Tys[0] = TD->getIntPtrType(*Context);
Value *MemMove = Intrinsic::getDeclaration(M, IID, Tys, 1);
Value *Dst = CastToCStr(CI->getOperand(1), B);
Value *Src = CastToCStr(CI->getOperand(2), B);
Value *Size = CI->getOperand(3);
- Value *Align = ConstantInt::get(Type::Int32Ty, 1);
+ Value *Align = ConstantInt::get(Type::getInt32Ty(*Context), 1);
B.CreateCall4(MemMove, Dst, Src, Size, Align);
return CI->getOperand(1);
}
@@ -1005,11 +1006,11 @@ struct VISIBILITY_HIDDEN MemSetOpt : public LibCallOptimization {
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!isa<PointerType>(FT->getParamType(0)) ||
!isa<IntegerType>(FT->getParamType(1)) ||
- FT->getParamType(2) != TD->getIntPtrType())
+ FT->getParamType(2) != TD->getIntPtrType(*Context))
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getOperand(2), Type::Int8Ty, false);
+ Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context), false);
EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B);
return CI->getOperand(1);
}
@@ -1088,28 +1089,28 @@ struct VISIBILITY_HIDDEN Exp2Opt : public LibCallOptimization {
Value *LdExpArg = 0;
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
- LdExpArg = B.CreateSExt(OpC->getOperand(0), Type::Int32Ty, "tmp");
+ LdExpArg = B.CreateSExt(OpC->getOperand(0), Type::getInt32Ty(*Context), "tmp");
} else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
- LdExpArg = B.CreateZExt(OpC->getOperand(0), Type::Int32Ty, "tmp");
+ LdExpArg = B.CreateZExt(OpC->getOperand(0), Type::getInt32Ty(*Context), "tmp");
}
if (LdExpArg) {
const char *Name;
- if (Op->getType() == Type::FloatTy)
+ if (Op->getType() == Type::getFloatTy(*Context))
Name = "ldexpf";
- else if (Op->getType() == Type::DoubleTy)
+ else if (Op->getType() == Type::getDoubleTy(*Context))
Name = "ldexp";
else
Name = "ldexpl";
Constant *One = ConstantFP::get(*Context, APFloat(1.0f));
- if (Op->getType() != Type::FloatTy)
+ if (Op->getType() != Type::getFloatTy(*Context))
One = ConstantExpr::getFPExtend(One, Op->getType());
Module *M = Caller->getParent();
Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
- Op->getType(), Type::Int32Ty,NULL);
+ Op->getType(), Type::getInt32Ty(*Context),NULL);
CallInst *CI = B.CreateCall2(Callee, One, LdExpArg);
if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
@@ -1126,19 +1127,19 @@ struct VISIBILITY_HIDDEN Exp2Opt : public LibCallOptimization {
struct VISIBILITY_HIDDEN UnaryDoubleFPOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
const FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 1 || FT->getReturnType() != Type::DoubleTy ||
- FT->getParamType(0) != Type::DoubleTy)
+ if (FT->getNumParams() != 1 || FT->getReturnType() != Type::getDoubleTy(*Context) ||
+ FT->getParamType(0) != Type::getDoubleTy(*Context))
return 0;
// If this is something like 'floor((double)floatval)', convert to floorf.
FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getOperand(1));
- if (Cast == 0 || Cast->getOperand(0)->getType() != Type::FloatTy)
+ if (Cast == 0 || Cast->getOperand(0)->getType() != Type::getFloatTy(*Context))
return 0;
// floor((double)floatval) -> (double)floorf(floatval)
Value *V = Cast->getOperand(0);
V = EmitUnaryFloatFnCall(V, Callee->getName().data(), B);
- return B.CreateFPExt(V, Type::DoubleTy);
+ return B.CreateFPExt(V, Type::getDoubleTy(*Context));
}
};
@@ -1154,7 +1155,7 @@ struct VISIBILITY_HIDDEN FFSOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
- if (FT->getNumParams() != 1 || FT->getReturnType() != Type::Int32Ty ||
+ if (FT->getNumParams() != 1 || FT->getReturnType() != Type::getInt32Ty(*Context) ||
!isa<IntegerType>(FT->getParamType(0)))
return 0;
@@ -1164,7 +1165,7 @@ struct VISIBILITY_HIDDEN FFSOpt : public LibCallOptimization {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->getValue() == 0) // ffs(0) -> 0.
return Constant::getNullValue(CI->getType());
- return ConstantInt::get(Type::Int32Ty, // ffs(c) -> cttz(c)+1
+ return ConstantInt::get(Type::getInt32Ty(*Context), // ffs(c) -> cttz(c)+1
CI->getValue().countTrailingZeros()+1);
}
@@ -1174,10 +1175,10 @@ struct VISIBILITY_HIDDEN FFSOpt : public LibCallOptimization {
Intrinsic::cttz, &ArgType, 1);
Value *V = B.CreateCall(F, Op, "cttz");
V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1), "tmp");
- V = B.CreateIntCast(V, Type::Int32Ty, false, "tmp");
+ V = B.CreateIntCast(V, Type::getInt32Ty(*Context), false, "tmp");
Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType), "tmp");
- return B.CreateSelect(Cond, V, ConstantInt::get(Type::Int32Ty, 0));
+ return B.CreateSelect(Cond, V, ConstantInt::get(Type::getInt32Ty(*Context), 0));
}
};
@@ -1189,14 +1190,14 @@ struct VISIBILITY_HIDDEN IsDigitOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- FT->getParamType(0) != Type::Int32Ty)
+ FT->getParamType(0) != Type::getInt32Ty(*Context))
return 0;
// isdigit(c) -> (c-'0') <u 10
Value *Op = CI->getOperand(1);
- Op = B.CreateSub(Op, ConstantInt::get(Type::Int32Ty, '0'),
+ Op = B.CreateSub(Op, ConstantInt::get(Type::getInt32Ty(*Context), '0'),
"isdigittmp");
- Op = B.CreateICmpULT(Op, ConstantInt::get(Type::Int32Ty, 10),
+ Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 10),
"isdigit");
return B.CreateZExt(Op, CI->getType());
}
@@ -1210,12 +1211,12 @@ struct VISIBILITY_HIDDEN IsAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- FT->getParamType(0) != Type::Int32Ty)
+ FT->getParamType(0) != Type::getInt32Ty(*Context))
return 0;
// isascii(c) -> c <u 128
Value *Op = CI->getOperand(1);
- Op = B.CreateICmpULT(Op, ConstantInt::get(Type::Int32Ty, 128),
+ Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 128),
"isascii");
return B.CreateZExt(Op, CI->getType());
}
@@ -1251,7 +1252,7 @@ struct VISIBILITY_HIDDEN ToAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
- FT->getParamType(0) != Type::Int32Ty)
+ FT->getParamType(0) != Type::getInt32Ty(*Context))
return 0;
// isascii(c) -> c & 0x7f
@@ -1273,7 +1274,7 @@ struct VISIBILITY_HIDDEN PrintFOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() < 1 || !isa<PointerType>(FT->getParamType(0)) ||
!(isa<IntegerType>(FT->getReturnType()) ||
- FT->getReturnType() == Type::VoidTy))
+ FT->getReturnType() == Type::getVoidTy(*Context)))
return 0;
// Check for a fixed format string.
@@ -1288,7 +1289,7 @@ struct VISIBILITY_HIDDEN PrintFOpt : public LibCallOptimization {
// printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
- EmitPutChar(ConstantInt::get(Type::Int32Ty, FormatStr[0]), B);
+ EmitPutChar(ConstantInt::get(Type::getInt32Ty(*Context), FormatStr[0]), B);
return CI->use_empty() ? (Value*)CI :
ConstantInt::get(CI->getType(), 1);
}
@@ -1299,7 +1300,7 @@ struct VISIBILITY_HIDDEN PrintFOpt : public LibCallOptimization {
// Create a string literal with no \n on it. We expect the constant merge
// pass to be run after this pass, to merge duplicate strings.
FormatStr.erase(FormatStr.end()-1);
- Constant *C = ConstantArray::get(FormatStr, true);
+ Constant *C = ConstantArray::get(*Context, FormatStr, true);
C = new GlobalVariable(*Callee->getParent(), C->getType(), true,
GlobalVariable::InternalLinkage, C, "str");
EmitPutS(C, B);
@@ -1354,7 +1355,7 @@ struct VISIBILITY_HIDDEN SPrintFOpt : public LibCallOptimization {
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
EmitMemCpy(CI->getOperand(1), CI->getOperand(2), // Copy the nul byte.
- ConstantInt::get(TD->getIntPtrType(), FormatStr.size()+1),1,B);
+ ConstantInt::get(TD->getIntPtrType(*Context), FormatStr.size()+1),1,B);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1367,11 +1368,11 @@ struct VISIBILITY_HIDDEN SPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
if (!isa<IntegerType>(CI->getOperand(3)->getType())) return 0;
- Value *V = B.CreateTrunc(CI->getOperand(3), Type::Int8Ty, "char");
+ Value *V = B.CreateTrunc(CI->getOperand(3), Type::getInt8Ty(*Context), "char");
Value *Ptr = CastToCStr(CI->getOperand(1), B);
B.CreateStore(V, Ptr);
- Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::Int32Ty, 1), "nul");
- B.CreateStore(Constant::getNullValue(Type::Int8Ty), Ptr);
+ Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1), "nul");
+ B.CreateStore(Constant::getNullValue(Type::getInt8Ty(*Context)), Ptr);
return ConstantInt::get(CI->getType(), 1);
}
@@ -1444,7 +1445,7 @@ struct VISIBILITY_HIDDEN FPutsOpt : public LibCallOptimization {
uint64_t Len = GetStringLength(CI->getOperand(1));
if (!Len) return 0;
EmitFWrite(CI->getOperand(1),
- ConstantInt::get(TD->getIntPtrType(), Len-1),
+ ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
CI->getOperand(2), B);
return CI; // Known to have no uses (see above).
}
@@ -1473,7 +1474,7 @@ struct VISIBILITY_HIDDEN FPrintFOpt : public LibCallOptimization {
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
return 0; // We found a format specifier.
- EmitFWrite(CI->getOperand(2), ConstantInt::get(TD->getIntPtrType(),
+ EmitFWrite(CI->getOperand(2), ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()),
CI->getOperand(1), B);
return ConstantInt::get(CI->getType(), FormatStr.size());
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 34ee57c..b84a1f0 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -394,7 +394,7 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// create the new entry block, allowing us to branch back to the old entry.
if (OldEntry == 0) {
OldEntry = &F->getEntryBlock();
- BasicBlock *NewEntry = BasicBlock::Create("", F, OldEntry);
+ BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
NewEntry->takeName(OldEntry);
OldEntry->setName("tailrecurse");
BranchInst::Create(OldEntry, NewEntry);