diff options
author | Duncan Sands <baldrick@free.fr> | 2010-02-16 11:11:14 +0000 |
---|---|---|
committer | Duncan Sands <baldrick@free.fr> | 2010-02-16 11:11:14 +0000 |
commit | 1df9859c40492511b8aa4321eb76496005d3b75b (patch) | |
tree | 3e65bf258ff243ac3c149c418c7f201fbc9097d6 /lib/Analysis | |
parent | 30fb00aac02682cf1edef9f89b905621aa7a3c04 (diff) | |
download | external_llvm-1df9859c40492511b8aa4321eb76496005d3b75b.zip external_llvm-1df9859c40492511b8aa4321eb76496005d3b75b.tar.gz external_llvm-1df9859c40492511b8aa4321eb76496005d3b75b.tar.bz2 |
There are two ways of checking for a given type, for example isa<PointerType>(T)
and T->isPointerTy(). Convert most instances of the first form to the second form.
Requested by Chris.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96344 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis')
-rw-r--r-- | lib/Analysis/AliasAnalysisEvaluator.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 4 | ||||
-rw-r--r-- | lib/Analysis/CaptureTracking.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/ConstantFolding.cpp | 4 | ||||
-rw-r--r-- | lib/Analysis/IPA/Andersens.cpp | 68 | ||||
-rw-r--r-- | lib/Analysis/IPA/GlobalsModRef.cpp | 4 | ||||
-rw-r--r-- | lib/Analysis/InlineCost.cpp | 4 | ||||
-rw-r--r-- | lib/Analysis/MemoryDependenceAnalysis.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/PointerTracking.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 48 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolutionAliasAnalysis.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolutionExpander.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/ValueTracking.cpp | 12 |
13 files changed, 84 insertions, 84 deletions
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp index 6b0a956..308b9e3 100644 --- a/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -115,11 +115,11 @@ bool AAEval::runOnFunction(Function &F) { SetVector<CallSite> CallSites; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) - if (isa<PointerType>(I->getType())) // Add all pointer arguments + if (I->getType()->isPointerTy()) // Add all pointer arguments Pointers.insert(I); for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { - if (isa<PointerType>(I->getType())) // Add all pointer instructions + if (I->getType()->isPointerTy()) // Add all pointer instructions Pointers.insert(&*I); Instruction &Inst = *I; User::op_iterator OI = Inst.op_begin(); @@ -128,7 +128,7 @@ bool AAEval::runOnFunction(Function &F) { isa<Function>(CS.getCalledValue())) ++OI; // Skip actual functions for direct function calls. for (; OI != Inst.op_end(); ++OI) - if (isa<PointerType>((*OI)->getType()) && !isa<ConstantPointerNull>(*OI)) + if ((*OI)->getType()->isPointerTy() && !isa<ConstantPointerNull>(*OI)) Pointers.insert(*OI); if (CS.getInstruction()) CallSites.insert(CS); diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 36b831c..31a649d 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -290,7 +290,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) { for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); CI != CE; ++CI, ++ArgNo) { // Only look at the no-capture pointer arguments. - if (!isa<PointerType>((*CI)->getType()) || + if (!(*CI)->getType()->isPointerTy() || !CS.paramHasAttr(ArgNo+1, Attribute::NoCapture)) continue; @@ -662,7 +662,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size, // Are we checking for alias of the same value? if (V1 == V2) return MustAlias; - if (!isa<PointerType>(V1->getType()) || !isa<PointerType>(V2->getType())) + if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) return NoAlias; // Scalars cannot alias each other // Figure out what objects these things are pointing to if we can. diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp index 10a8b11..8767c18 100644 --- a/lib/Analysis/CaptureTracking.cpp +++ b/lib/Analysis/CaptureTracking.cpp @@ -44,7 +44,7 @@ static int const Threshold = 20; /// counts as capturing it or not. bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures) { - assert(isa<PointerType>(V->getType()) && "Capture is for pointers only!"); + assert(V->getType()->isPointerTy() && "Capture is for pointers only!"); SmallVector<Use*, Threshold> Worklist; SmallSet<Use*, Threshold> Visited; int Count = 0; diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 808e6fa..6bab5ec 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -359,7 +359,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, MapTy = Type::getInt32PtrTy(C->getContext()); else if (LoadTy->isDoubleTy()) MapTy = Type::getInt64PtrTy(C->getContext()); - else if (isa<VectorType>(LoadTy)) { + else if (LoadTy->isVectorTy()) { MapTy = IntegerType::get(C->getContext(), TD.getTypeAllocSizeInBits(LoadTy)); MapTy = PointerType::getUnqual(MapTy); @@ -605,7 +605,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps, SmallVector<Constant*, 32> NewIdxs; do { if (const SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { - if (isa<PointerType>(ATy)) { + if (ATy->isPointerTy()) { // The only pointer indexing we'll do is on the first index of the GEP. if (!NewIdxs.empty()) break; diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp index 4180206..2e35a56 100644 --- a/lib/Analysis/IPA/Andersens.cpp +++ b/lib/Analysis/IPA/Andersens.cpp @@ -750,7 +750,7 @@ void Andersens::IdentifyObjects(Module &M) { // The function itself is a memory object. unsigned First = NumObjects; ValueNodes[F] = NumObjects++; - if (isa<PointerType>(F->getFunctionType()->getReturnType())) + if (F->getFunctionType()->getReturnType()->isPointerTy()) ReturnNodes[F] = NumObjects++; if (F->getFunctionType()->isVarArg()) VarargNodes[F] = NumObjects++; @@ -760,7 +760,7 @@ void Andersens::IdentifyObjects(Module &M) { for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) { - if (isa<PointerType>(I->getType())) + if (I->getType()->isPointerTy()) ValueNodes[I] = NumObjects++; } MaxK[First] = NumObjects - First; @@ -771,7 +771,7 @@ void Andersens::IdentifyObjects(Module &M) { for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { // If this is an heap or stack allocation, create a node for the memory // object. - if (isa<PointerType>(II->getType())) { + if (II->getType()->isPointerTy()) { ValueNodes[&*II] = NumObjects++; if (AllocaInst *AI = dyn_cast<AllocaInst>(&*II)) ObjectNodes[AI] = NumObjects++; @@ -801,7 +801,7 @@ void Andersens::IdentifyObjects(Module &M) { /// getNodeForConstantPointer - Return the node corresponding to the constant /// pointer itself. unsigned Andersens::getNodeForConstantPointer(Constant *C) { - assert(isa<PointerType>(C->getType()) && "Not a constant pointer!"); + assert(C->getType()->isPointerTy() && "Not a constant pointer!"); if (isa<ConstantPointerNull>(C) || isa<UndefValue>(C)) return NullPtr; @@ -828,7 +828,7 @@ unsigned Andersens::getNodeForConstantPointer(Constant *C) { /// getNodeForConstantPointerTarget - Return the node POINTED TO by the /// specified constant pointer. unsigned Andersens::getNodeForConstantPointerTarget(Constant *C) { - assert(isa<PointerType>(C->getType()) && "Not a constant pointer!"); + assert(C->getType()->isPointerTy() && "Not a constant pointer!"); if (isa<ConstantPointerNull>(C)) return NullObject; @@ -857,7 +857,7 @@ unsigned Andersens::getNodeForConstantPointerTarget(Constant *C) { void Andersens::AddGlobalInitializerConstraints(unsigned NodeIndex, Constant *C) { if (C->getType()->isSingleValueType()) { - if (isa<PointerType>(C->getType())) + if (C->getType()->isPointerTy()) Constraints.push_back(Constraint(Constraint::Copy, NodeIndex, getNodeForConstantPointer(C))); } else if (C->isNullValue()) { @@ -878,7 +878,7 @@ void Andersens::AddGlobalInitializerConstraints(unsigned NodeIndex, /// returned by this function. void Andersens::AddConstraintsForNonInternalLinkage(Function *F) { for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) - if (isa<PointerType>(I->getType())) + if (I->getType()->isPointerTy()) // If this is an argument of an externally accessible function, the // incoming pointer might point to anything. Constraints.push_back(Constraint(Constraint::Copy, getNode(I), @@ -940,8 +940,8 @@ bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) { const FunctionType *FTy = F->getFunctionType(); if (FTy->getNumParams() > 1 && - isa<PointerType>(FTy->getParamType(0)) && - isa<PointerType>(FTy->getParamType(1))) { + FTy->getParamType(0)->isPointerTy() && + FTy->getParamType(1)->isPointerTy()) { // *Dest = *Src, which requires an artificial graph node to represent the // constraint. It is broken up into *Dest = temp, temp = *Src @@ -966,7 +966,7 @@ bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) { F->getName() == "strtok") { const FunctionType *FTy = F->getFunctionType(); if (FTy->getNumParams() > 0 && - isa<PointerType>(FTy->getParamType(0))) { + FTy->getParamType(0)->isPointerTy()) { Constraints.push_back(Constraint(Constraint::Copy, getNode(CS.getInstruction()), getNode(CS.getArgument(0)))); @@ -984,7 +984,7 @@ bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) { /// true. bool Andersens::AnalyzeUsesOfFunction(Value *V) { - if (!isa<PointerType>(V->getType())) return true; + if (!V->getType()->isPointerTy()) return true; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) if (isa<LoadInst>(*UI)) { @@ -1063,7 +1063,7 @@ void Andersens::CollectConstraints(Module &M) { for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { // Set up the return value node. - if (isa<PointerType>(F->getFunctionType()->getReturnType())) + if (F->getFunctionType()->getReturnType()->isPointerTy()) GraphNodes[getReturnNode(F)].setValue(F); if (F->getFunctionType()->isVarArg()) GraphNodes[getVarargNode(F)].setValue(F); @@ -1071,7 +1071,7 @@ void Andersens::CollectConstraints(Module &M) { // Set up incoming argument nodes. for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) - if (isa<PointerType>(I->getType())) + if (I->getType()->isPointerTy()) getNodeValue(*I); // At some point we should just add constraints for the escaping functions @@ -1087,7 +1087,7 @@ void Andersens::CollectConstraints(Module &M) { visit(F); } else { // External functions that return pointers return the universal set. - if (isa<PointerType>(F->getFunctionType()->getReturnType())) + if (F->getFunctionType()->getReturnType()->isPointerTy()) Constraints.push_back(Constraint(Constraint::Copy, getReturnNode(F), UniversalSet)); @@ -1096,7 +1096,7 @@ void Andersens::CollectConstraints(Module &M) { // stored into them. for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) - if (isa<PointerType>(I->getType())) { + if (I->getType()->isPointerTy()) { // Pointers passed into external functions could have anything stored // through them. Constraints.push_back(Constraint(Constraint::Store, getNode(I), @@ -1159,7 +1159,7 @@ void Andersens::visitAlloc(Instruction &I) { } void Andersens::visitReturnInst(ReturnInst &RI) { - if (RI.getNumOperands() && isa<PointerType>(RI.getOperand(0)->getType())) + if (RI.getNumOperands() && RI.getOperand(0)->getType()->isPointerTy()) // return V --> <Copy/retval{F}/v> Constraints.push_back(Constraint(Constraint::Copy, getReturnNode(RI.getParent()->getParent()), @@ -1167,14 +1167,14 @@ void Andersens::visitReturnInst(ReturnInst &RI) { } void Andersens::visitLoadInst(LoadInst &LI) { - if (isa<PointerType>(LI.getType())) + if (LI.getType()->isPointerTy()) // P1 = load P2 --> <Load/P1/P2> Constraints.push_back(Constraint(Constraint::Load, getNodeValue(LI), getNode(LI.getOperand(0)))); } void Andersens::visitStoreInst(StoreInst &SI) { - if (isa<PointerType>(SI.getOperand(0)->getType())) + if (SI.getOperand(0)->getType()->isPointerTy()) // store P1, P2 --> <Store/P2/P1> Constraints.push_back(Constraint(Constraint::Store, getNode(SI.getOperand(1)), @@ -1188,7 +1188,7 @@ void Andersens::visitGetElementPtrInst(GetElementPtrInst &GEP) { } void Andersens::visitPHINode(PHINode &PN) { - if (isa<PointerType>(PN.getType())) { + if (PN.getType()->isPointerTy()) { unsigned PNN = getNodeValue(PN); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) // P1 = phi P2, P3 --> <Copy/P1/P2>, <Copy/P1/P3>, ... @@ -1199,8 +1199,8 @@ void Andersens::visitPHINode(PHINode &PN) { void Andersens::visitCastInst(CastInst &CI) { Value *Op = CI.getOperand(0); - if (isa<PointerType>(CI.getType())) { - if (isa<PointerType>(Op->getType())) { + if (CI.getType()->isPointerTy()) { + if (Op->getType()->isPointerTy()) { // P1 = cast P2 --> <Copy/P1/P2> Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(CI), getNode(CI.getOperand(0)))); @@ -1213,7 +1213,7 @@ void Andersens::visitCastInst(CastInst &CI) { getNodeValue(CI); #endif } - } else if (isa<PointerType>(Op->getType())) { + } else if (Op->getType()->isPointerTy()) { // int = cast P1 --> <Copy/Univ/P1> #if 0 Constraints.push_back(Constraint(Constraint::Copy, @@ -1226,7 +1226,7 @@ void Andersens::visitCastInst(CastInst &CI) { } void Andersens::visitSelectInst(SelectInst &SI) { - if (isa<PointerType>(SI.getType())) { + if (SI.getType()->isPointerTy()) { unsigned SIN = getNodeValue(SI); // P1 = select C, P2, P3 ---> <Copy/P1/P2>, <Copy/P1/P3> Constraints.push_back(Constraint(Constraint::Copy, SIN, @@ -1254,9 +1254,9 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { if (F && F->isDeclaration() && AddConstraintsForExternalCall(CS, F)) return; - if (isa<PointerType>(CS.getType())) { + if (CS.getType()->isPointerTy()) { unsigned CSN = getNode(CS.getInstruction()); - if (!F || isa<PointerType>(F->getFunctionType()->getReturnType())) { + if (!F || F->getFunctionType()->getReturnType()->isPointerTy()) { if (IsDeref) Constraints.push_back(Constraint(Constraint::Load, CSN, getNode(CallValue), CallReturnPos)); @@ -1269,7 +1269,7 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { Constraints.push_back(Constraint(Constraint::Copy, CSN, UniversalSet)); } - } else if (F && isa<PointerType>(F->getFunctionType()->getReturnType())) { + } else if (F && F->getFunctionType()->getReturnType()->isPointerTy()) { #if FULL_UNIVERSAL Constraints.push_back(Constraint(Constraint::Copy, UniversalSet, @@ -1291,7 +1291,7 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { for (; AI != AE && ArgI != ArgE; ++AI, ++ArgI) { #if !FULL_UNIVERSAL - if (external && isa<PointerType>((*ArgI)->getType())) + if (external && (*ArgI)->getType()->isPointerTy()) { // Add constraint that ArgI can now point to anything due to // escaping, as can everything it points to. The second portion of @@ -1301,8 +1301,8 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { UniversalSet)); } #endif - if (isa<PointerType>(AI->getType())) { - if (isa<PointerType>((*ArgI)->getType())) { + if (AI->getType()->isPointerTy()) { + if ((*ArgI)->getType()->isPointerTy()) { // Copy the actual argument into the formal argument. Constraints.push_back(Constraint(Constraint::Copy, getNode(AI), getNode(*ArgI))); @@ -1310,7 +1310,7 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { Constraints.push_back(Constraint(Constraint::Copy, getNode(AI), UniversalSet)); } - } else if (isa<PointerType>((*ArgI)->getType())) { + } else if ((*ArgI)->getType()->isPointerTy()) { #if FULL_UNIVERSAL Constraints.push_back(Constraint(Constraint::Copy, UniversalSet, @@ -1326,7 +1326,7 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { //Indirect Call unsigned ArgPos = CallFirstArgPos; for (; ArgI != ArgE; ++ArgI) { - if (isa<PointerType>((*ArgI)->getType())) { + if ((*ArgI)->getType()->isPointerTy()) { // Copy the actual argument into the formal argument. Constraints.push_back(Constraint(Constraint::Store, getNode(CallValue), @@ -1341,14 +1341,14 @@ void Andersens::AddConstraintsForCall(CallSite CS, Function *F) { // Copy all pointers passed through the varargs section to the varargs node. if (F && F->getFunctionType()->isVarArg()) for (; ArgI != ArgE; ++ArgI) - if (isa<PointerType>((*ArgI)->getType())) + if ((*ArgI)->getType()->isPointerTy()) Constraints.push_back(Constraint(Constraint::Copy, getVarargNode(F), getNode(*ArgI))); // If more arguments are passed in than we track, just drop them on the floor. } void Andersens::visitCallSite(CallSite CS) { - if (isa<PointerType>(CS.getType())) + if (CS.getType()->isPointerTy()) getNodeValue(*CS.getInstruction()); if (Function *F = CS.getCalledFunction()) { @@ -2782,7 +2782,7 @@ void Andersens::PrintNode(const Node *N) const { assert(N->getValue() != 0 && "Never set node label!"); Value *V = N->getValue(); if (Function *F = dyn_cast<Function>(V)) { - if (isa<PointerType>(F->getFunctionType()->getReturnType()) && + if (F->getFunctionType()->getReturnType()->isPointerTy() && N == &GraphNodes[getReturnNode(F)]) { dbgs() << F->getName() << ":retval"; return; diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index ec94bc8..7b43089 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -213,7 +213,7 @@ void GlobalsModRef::AnalyzeGlobals(Module &M) { ++NumNonAddrTakenGlobalVars; // If this global holds a pointer type, see if it is an indirect global. - if (isa<PointerType>(I->getType()->getElementType()) && + if (I->getType()->getElementType()->isPointerTy() && AnalyzeIndirectGlobalMemory(I)) ++NumIndirectGlobalVars; } @@ -231,7 +231,7 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V, std::vector<Function*> &Readers, std::vector<Function*> &Writers, GlobalValue *OkayStoreDest) { - if (!isa<PointerType>(V->getType())) return true; + if (!V->getType()->isPointerTy()) return true; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 972d034..ca50a17 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -84,7 +84,7 @@ unsigned InlineCostAnalyzer::FunctionInfo:: // unsigned InlineCostAnalyzer::FunctionInfo:: CountCodeReductionForAlloca(Value *V) { - if (!isa<PointerType>(V->getType())) return 0; // Not a pointer + if (!V->getType()->isPointerTy()) return 0; // Not a pointer unsigned Reduction = 0; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ Instruction *I = cast<Instruction>(*UI); @@ -175,7 +175,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) { this->usesDynamicAlloca = true; } - if (isa<ExtractElementInst>(II) || isa<VectorType>(II->getType())) + if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy()) ++NumVectorInsts; if (const CastInst *CI = dyn_cast<CastInst>(II)) { diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 2d74709d..183edf4 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -580,7 +580,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { void MemoryDependenceAnalysis:: getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB, SmallVectorImpl<NonLocalDepResult> &Result) { - assert(isa<PointerType>(Pointer->getType()) && + assert(Pointer->getType()->isPointerTy() && "Can't get pointer deps of a non-pointer!"); Result.clear(); @@ -1009,7 +1009,7 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { /// in more places that cached info does not necessarily keep. void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { // If Ptr isn't really a pointer, just ignore it. - if (!isa<PointerType>(Ptr->getType())) return; + if (!Ptr->getType()->isPointerTy()) return; // Flush store info for the pointer. RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); // Flush load info for the pointer. @@ -1050,7 +1050,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { // Remove it from both the load info and the store info. The instruction // can't be in either of these maps if it is non-pointer. - if (isa<PointerType>(RemInst->getType())) { + if (RemInst->getType()->isPointerTy()) { RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); } diff --git a/lib/Analysis/PointerTracking.cpp b/lib/Analysis/PointerTracking.cpp index 8da07e7..ce7ac89 100644 --- a/lib/Analysis/PointerTracking.cpp +++ b/lib/Analysis/PointerTracking.cpp @@ -231,7 +231,7 @@ void PointerTracking::print(raw_ostream &OS, const Module* M) const { // this should be safe for the same reason its safe for SCEV. PointerTracking &PT = *const_cast<PointerTracking*>(this); for (inst_iterator I=inst_begin(*FF), E=inst_end(*FF); I != E; ++I) { - if (!isa<PointerType>(I->getType())) + if (!I->getType()->isPointerTy()) continue; Value *Base; const SCEV *Limit, *Offset; diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 9ee7d3a..82200cd 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -214,8 +214,8 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scTruncate, op, ty) { - assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate non-integer value!"); } @@ -226,8 +226,8 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scZeroExtend, op, ty) { - assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot zero extend non-integer value!"); } @@ -238,8 +238,8 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID, const SCEV *op, const Type *ty) : SCEVCastExpr(ID, scSignExtend, op, ty) { - assert((Op->getType()->isIntegerTy() || isa<PointerType>(Op->getType())) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot sign extend non-integer value!"); } @@ -416,7 +416,7 @@ bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); // Ignore vector types here so that ScalarEvolutionExpander doesn't // emit getelementptrs that index into vectors. - if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) { + if (Ty->isStructTy() || Ty->isArrayTy()) { CTy = Ty; FieldNo = CE->getOperand(2); return true; @@ -518,9 +518,9 @@ namespace { // Order pointer values after integer values. This helps SCEVExpander // form GEPs. - if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) + if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy()) return false; - if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) + if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy()) return true; // Compare getValueID values. @@ -2308,7 +2308,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) { /// has access to target-specific information. bool ScalarEvolution::isSCEVable(const Type *Ty) const { // Integers and pointers are always SCEVable. - return Ty->isIntegerTy() || isa<PointerType>(Ty); + return Ty->isIntegerTy() || Ty->isPointerTy(); } /// getTypeSizeInBits - Return the size in bits of the specified type, @@ -2326,7 +2326,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { // The only other support type is pointer. Without TargetData, conservatively // assume pointers are 64-bit. - assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!"); + assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); return 64; } @@ -2341,7 +2341,7 @@ const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { return Ty; // The only other support type is pointer. - assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); + assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); if (TD) return TD->getIntPtrType(getContext()); // Without TargetData, conservatively assume pointers are 64-bit. @@ -2412,8 +2412,8 @@ const SCEV * ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion @@ -2429,8 +2429,8 @@ const SCEV * ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion @@ -2445,8 +2445,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, const SCEV * ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or zero extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrZeroExtend cannot truncate!"); @@ -2461,8 +2461,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or sign extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrSignExtend cannot truncate!"); @@ -2478,8 +2478,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or any extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrAnyExtend cannot truncate!"); @@ -2493,8 +2493,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { const SCEV * ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); - assert((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && - (Ty->isIntegerTy() || isa<PointerType>(Ty)) && + assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && + (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or noop with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && "getTruncateOrNoop cannot extend!"); diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp index 498c4a8..564ec4b 100644 --- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -89,7 +89,7 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) { } else if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { // If there's a pointer operand, it'll be sorted at the end of the list. const SCEV *Last = A->getOperand(A->getNumOperands()-1); - if (isa<PointerType>(Last->getType())) + if (Last->getType()->isPointerTy()) return GetBaseValue(Last); } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // This is a leaf node. diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index c2e1f89..ccd6d6b 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -536,7 +536,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { // pointer type, if there is one, or the last operand otherwise. int PIdx = 0; for (; PIdx != NumOperands - 1; ++PIdx) - if (isa<PointerType>(S->getOperand(PIdx)->getType())) break; + if (S->getOperand(PIdx)->getType()->isPointerTy()) break; // Expand code for the operand that we chose. Value *V = expand(S->getOperand(PIdx)); @@ -702,7 +702,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, // negative, insert a sub instead of an add for the increment (unless it's a // constant, because subtracts of constants are canonicalized to adds). const SCEV *Step = Normalized->getStepRecurrence(SE); - bool isPointer = isa<PointerType>(ExpandTy); + bool isPointer = ExpandTy->isPointerTy(); bool isNegative = !isPointer && isNonConstantNegative(Step); if (isNegative) Step = SE.getNegativeSCEV(Step); @@ -852,7 +852,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { PHINode *CanonicalIV = 0; if (PHINode *PN = L->getCanonicalInductionVariable()) if (SE.isSCEVable(PN->getType()) && - isa<IntegerType>(SE.getEffectiveSCEVType(PN->getType())) && + SE.getEffectiveSCEVType(PN->getType())->isIntegerTy() && SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) CanonicalIV = PN; diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 7cc9c0d..09344a3 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -49,7 +49,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); unsigned BitWidth = Mask.getBitWidth(); - assert((V->getType()->isIntOrIntVectorTy() || isa<PointerType>(V->getType())) + assert((V->getType()->isIntOrIntVectorTy() || V->getType()->isPointerTy()) && "Not integer or pointer type!"); assert((!TD || TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && @@ -249,7 +249,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, unsigned SrcBitWidth; // Note that we handle pointer operands here because of inttoptr/ptrtoint // which fall through here. - if (isa<PointerType>(SrcTy)) + if (SrcTy->isPointerTy()) SrcBitWidth = TD->getTypeSizeInBits(SrcTy); else SrcBitWidth = SrcTy->getScalarSizeInBits(); @@ -269,10 +269,10 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, } case Instruction::BitCast: { const Type *SrcTy = I->getOperand(0)->getType(); - if ((SrcTy->isIntegerTy() || isa<PointerType>(SrcTy)) && + if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && // TODO: For now, not handling conversions like: // (bitcast i64 %x to <2 x i32>) - !isa<VectorType>(I->getType())) { + !I->getType()->isVectorTy()) { ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD, Depth+1); return; @@ -980,7 +980,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { /// may not be represented in the result. static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, const TargetData *TD, unsigned Depth) { - assert(isa<IntegerType>(V->getType()) && "Not an integer value"); + assert(V->getType()->isIntegerTy() && "Not an integer value"); // Limit our recursion depth. if (Depth == 6) { @@ -1253,7 +1253,7 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin, if (idx_begin == idx_end) return V; // We have indices, so V should have an indexable type - assert((isa<StructType>(V->getType()) || isa<ArrayType>(V->getType())) + assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?"); assert(ExtractValueInst::getIndexedType(V->getType(), idx_begin, idx_end) && "Invalid indices for type?"); |