aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authorVictor Hernandez <vhernandez@apple.com>2009-10-23 21:09:37 +0000
committerVictor Hernandez <vhernandez@apple.com>2009-10-23 21:09:37 +0000
commit7b929dad59785f62a66f7c58615082f98441e95e (patch)
treefe3eb2cd3b56e7c3e89454d73e56986f3ce12ba2 /lib/Transforms
parent4ab74cdc124af6b4f57c2d2d09548e01d64a1f34 (diff)
downloadexternal_llvm-7b929dad59785f62a66f7c58615082f98441e95e.zip
external_llvm-7b929dad59785f62a66f7c58615082f98441e95e.tar.gz
external_llvm-7b929dad59785f62a66f7c58615082f98441e95e.tar.bz2
Remove AllocationInst. Since MallocInst went away, AllocaInst is the only subclass of AllocationInst, so it no longer is necessary.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@84969 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/Scalar/GVN.cpp8
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp18
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp2
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp61
-rw-r--r--lib/Transforms/Scalar/TailDuplication.cpp2
5 files changed, 45 insertions, 46 deletions
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 8859324..00911b5 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -1243,7 +1243,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
UndefValue::get(LI->getType())));
continue;
@@ -1585,7 +1585,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
@@ -1653,7 +1653,7 @@ bool GVN::processInstruction(Instruction *I,
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
- } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
+ } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
return false;
}
@@ -1803,7 +1803,7 @@ bool GVN::performPRE(Function& F) {
BE = CurrentBlock->end(); BI != BE; ) {
Instruction *CurInst = BI++;
- if (isa<AllocationInst>(CurInst) ||
+ if (isa<AllocaInst>(CurInst) ||
isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
CurInst->getType()->isVoidTy() ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index b41b5d4..21554c1 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -284,7 +284,7 @@ namespace {
Instruction *visitInvokeInst(InvokeInst &II);
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
- Instruction *visitAllocationInst(AllocationInst &AI);
+ Instruction *visitAllocaInst(AllocaInst &AI);
Instruction *visitFreeInst(FreeInst &FI);
Instruction *visitLoadInst(LoadInst &LI);
Instruction *visitStoreInst(StoreInst &SI);
@@ -425,7 +425,7 @@ namespace {
bool isSub, Instruction &I);
Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
bool isSigned, bool Inside, Instruction &IB);
- Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
+ Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
Instruction *MatchBSwap(BinaryOperator &I);
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
@@ -7745,7 +7745,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
/// try to eliminate the cast by moving the type information into the alloc.
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
- AllocationInst &AI) {
+ AllocaInst &AI) {
const PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder);
@@ -7817,7 +7817,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
}
- AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
+ AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
New->setAlignment(AI.getAlignment());
New->takeName(&AI);
@@ -8878,7 +8878,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// size, rewrite the allocation instruction to allocate the "right" type.
// There is no need to modify malloc calls because it is their bitcast that
// needs to be cleaned up.
- if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
return V;
@@ -11199,7 +11199,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (Offset == 0) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
- if (isa<AllocationInst>(BCI->getOperand(0)) ||
+ if (isa<AllocaInst>(BCI->getOperand(0)) ||
isMalloc(BCI->getOperand(0))) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
@@ -11238,21 +11238,21 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return 0;
}
-Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
+Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
const Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
- AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
+ AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
// allocas if possible...also skip interleaved debug info
//
BasicBlock::iterator It = New;
- while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
+ while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index b745097..e3dd54e 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -410,7 +410,7 @@ private:
void visitCallSite (CallSite CS);
void visitUnwindInst (TerminatorInst &I) { /*returns void*/ }
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
- void visitAllocationInst(Instruction &I) { markOverdefined(&I); }
+ void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
void visitVANextInst (Instruction &I) { markOverdefined(&I); }
void visitVAArgInst (Instruction &I) { markOverdefined(&I); }
void visitFreeInst (Instruction &I) { /*returns void*/ }
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 610d874..2e3b694 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -100,32 +100,32 @@ namespace {
void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
- int isSafeAllocaToScalarRepl(AllocationInst *AI);
+ int isSafeAllocaToScalarRepl(AllocaInst *AI);
- void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
+ void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
AllocaInfo &Info);
- void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
+ void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
AllocaInfo &Info);
- void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
+ void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
unsigned OpNo, AllocaInfo &Info);
- void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
+ void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI,
AllocaInfo &Info);
- void DoScalarReplacement(AllocationInst *AI,
- std::vector<AllocationInst*> &WorkList);
+ void DoScalarReplacement(AllocaInst *AI,
+ std::vector<AllocaInst*> &WorkList);
void CleanupGEP(GetElementPtrInst *GEP);
- void CleanupAllocaUsers(AllocationInst *AI);
- AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
+ void CleanupAllocaUsers(AllocaInst *AI);
+ AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
- void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
+ void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
- AllocationInst *AI,
+ AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
+ void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+ void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
@@ -135,7 +135,7 @@ namespace {
uint64_t Offset, IRBuilder<> &Builder);
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
uint64_t Offset, IRBuilder<> &Builder);
- static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
+ static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
};
}
@@ -213,18 +213,18 @@ static uint64_t getNumSAElements(const Type *T) {
// them if they are only used by getelementptr instructions.
//
bool SROA::performScalarRepl(Function &F) {
- std::vector<AllocationInst*> WorkList;
+ std::vector<AllocaInst*> WorkList;
// Scan the entry basic block, adding any alloca's and mallocs to the worklist
BasicBlock &BB = F.getEntryBlock();
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
- if (AllocationInst *A = dyn_cast<AllocationInst>(I))
+ if (AllocaInst *A = dyn_cast<AllocaInst>(I))
WorkList.push_back(A);
// Process the worklist
bool Changed = false;
while (!WorkList.empty()) {
- AllocationInst *AI = WorkList.back();
+ AllocaInst *AI = WorkList.back();
WorkList.pop_back();
// Handle dead allocas trivially. These can be formed by SROA'ing arrays
@@ -335,8 +335,8 @@ bool SROA::performScalarRepl(Function &F) {
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
/// predicate, do SROA now.
-void SROA::DoScalarReplacement(AllocationInst *AI,
- std::vector<AllocationInst*> &WorkList) {
+void SROA::DoScalarReplacement(AllocaInst *AI,
+ std::vector<AllocaInst*> &WorkList) {
DEBUG(errs() << "Found inst to SROA: " << *AI << '\n');
SmallVector<AllocaInst*, 32> ElementAllocas;
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
@@ -455,7 +455,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
/// getelementptr instruction of an array aggregate allocation. isFirstElt
/// indicates whether Ptr is known to the start of the aggregate.
///
-void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
+void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
AllocaInfo &Info) {
for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
I != E; ++I) {
@@ -520,7 +520,7 @@ static bool AllUsersAreLoads(Value *Ptr) {
/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
/// aggregate allocation.
///
-void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
+void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
AllocaInfo &Info) {
if (BitCastInst *C = dyn_cast<BitCastInst>(User))
return isSafeUseOfBitCastedAllocation(C, AI, Info);
@@ -605,7 +605,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
/// intrinsic can be promoted by SROA. At this point, we know that the operand
/// of the memintrinsic is a pointer to the beginning of the allocation.
-void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
+void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
unsigned OpNo, AllocaInfo &Info) {
// If not constant length, give up.
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
@@ -632,7 +632,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
/// are
-void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
+void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI,
AllocaInfo &Info) {
for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
UI != E; ++UI) {
@@ -690,7 +690,7 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
/// to its first element. Transform users of the cast to use the new values
/// instead.
-void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
+void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
while (UI != UE) {
@@ -729,7 +729,7 @@ void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
/// Rewrite it to copy or set the elements of the scalarized memory.
void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
- AllocationInst *AI,
+ AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
// If this is a memcpy/memmove, construct the other pointer as the
@@ -905,8 +905,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
/// overwrites the entire allocation. Extract out the pieces of the stored
/// integer and store them individually.
-void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
- AllocationInst *AI,
+void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts){
// Extract each element out of the integer according to its structure offset
// and store the element value to the individual alloca.
@@ -1029,7 +1028,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
/// an integer. Load the individual pieces to form the aggregate value.
-void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
@@ -1162,7 +1161,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
/// or 1 if safe after canonicalization has been performed.
///
-int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
+int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// Loop over the use list of the alloca. We can only transform it if all of
// the users are safe to transform.
AllocaInfo Info;
@@ -1245,7 +1244,7 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
/// CleanupAllocaUsers - If SROA reported that it can promote the specified
/// allocation, but only if cleaned up, perform the cleanups required.
-void SROA::CleanupAllocaUsers(AllocationInst *AI) {
+void SROA::CleanupAllocaUsers(AllocaInst *AI) {
// At this point, we know that the end result will be SROA'd and promoted, so
// we can insert ugly code if required so long as sroa+mem2reg will clean it
// up.
@@ -1853,7 +1852,7 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
/// modified by a copy from a constant global. If we can prove this, we can
/// replace any uses of the alloca with uses of the global directly.
-Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
+Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
Instruction *TheCopy = 0;
if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
return TheCopy;
diff --git a/lib/Transforms/Scalar/TailDuplication.cpp b/lib/Transforms/Scalar/TailDuplication.cpp
index 68689d6..4864e23 100644
--- a/lib/Transforms/Scalar/TailDuplication.cpp
+++ b/lib/Transforms/Scalar/TailDuplication.cpp
@@ -129,7 +129,7 @@ bool TailDup::shouldEliminateUnconditionalBranch(TerminatorInst *TI,
if (isa<CallInst>(I) || isa<InvokeInst>(I)) return false;
// Also alloca and malloc.
- if (isa<AllocationInst>(I)) return false;
+ if (isa<AllocaInst>(I)) return false;
// Some vector instructions can expand into a number of instructions.
if (isa<ShuffleVectorInst>(I) || isa<ExtractElementInst>(I) ||