diff options
author | Chris Lattner <sabre@nondot.org> | 2004-04-05 01:30:19 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2004-04-05 01:30:19 +0000 |
commit | 28977af72a11fcad5d1b54d7a96b3df02828f6fc (patch) | |
tree | 671a8fa4df839cb653ebd22dda8fa39639b3afa7 /lib | |
parent | 830b6f98686f40f86811dceb5497433ebac385e1 (diff) | |
download | external_llvm-28977af72a11fcad5d1b54d7a96b3df02828f6fc.zip external_llvm-28977af72a11fcad5d1b54d7a96b3df02828f6fc.tar.gz external_llvm-28977af72a11fcad5d1b54d7a96b3df02828f6fc.tar.bz2 |
Support getelementptr instructions which use uint's to index into structure
types and can have arbitrary 32- and 64-bit integer types indexing into
sequential types.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@12653 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 49 | ||||
-rw-r--r-- | lib/Analysis/DataStructure/Local.cpp | 3 | ||||
-rw-r--r-- | lib/Target/TargetData.cpp | 24 | ||||
-rw-r--r-- | lib/Target/X86/InstSelectSimple.cpp | 14 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelSimple.cpp | 14 | ||||
-rw-r--r-- | lib/Transforms/ExprTypeConvert.cpp | 31 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/ProfilingUtils.cpp | 6 | ||||
-rw-r--r-- | lib/Transforms/LevelRaise.cpp | 7 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 133 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SCCP.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/TransformInternals.cpp | 110 | ||||
-rw-r--r-- | lib/Transforms/Utils/LowerInvoke.cpp | 10 | ||||
-rw-r--r-- | lib/VMCore/ConstantFold.cpp | 8 | ||||
-rw-r--r-- | lib/VMCore/Type.cpp | 9 | ||||
-rw-r--r-- | lib/VMCore/iMemory.cpp | 7 |
16 files changed, 219 insertions, 210 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 4a8a680..f9c63cd 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -353,6 +353,19 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, return MayAlias; } +static bool ValuesEqual(Value *V1, Value *V2) { + if (V1->getType() == V2->getType()) + return V1 == V2; + if (Constant *C1 = dyn_cast<Constant>(V1)) + if (Constant *C2 = dyn_cast<Constant>(V2)) { + // Sign extend the constants to long types. + C1 = ConstantExpr::getSignExtend(C1, Type::LongTy); + C2 = ConstantExpr::getSignExtend(C2, Type::LongTy); + return C1 == C2; + } + return false; +} + /// CheckGEPInstructions - Check two GEP instructions with known must-aliasing /// base pointers. This checks to see if the index expressions preclude the /// pointers from aliasing... @@ -376,7 +389,7 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector<Value*> &GEP1Ops, unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands); unsigned UnequalOper = 0; while (UnequalOper != MinOperands && - GEP1Ops[UnequalOper] == GEP2Ops[UnequalOper]) { + ValuesEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) { // Advance through the type as we go... ++UnequalOper; if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty)) @@ -418,7 +431,7 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector<Value*> &GEP1Ops, if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work... // Scan for the first operand that is constant and unequal in the - // two getelemenptrs... + // two getelementptrs... unsigned FirstConstantOper = UnequalOper; for (; FirstConstantOper != MinOperands; ++FirstConstantOper) { const Value *G1Oper = GEP1Ops[FirstConstantOper]; @@ -427,13 +440,23 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector<Value*> &GEP1Ops, if (G1Oper != G2Oper) // Found non-equal constant indexes... if (Constant *G1OC = dyn_cast<Constant>(const_cast<Value*>(G1Oper))) if (Constant *G2OC = dyn_cast<Constant>(const_cast<Value*>(G2Oper))) { - // Make sure they are comparable (ie, not constant expressions)... - // and make sure the GEP with the smaller leading constant is GEP1. - Constant *Compare = ConstantExpr::get(Instruction::SetGT, G1OC, G2OC); - if (ConstantBool *CV = dyn_cast<ConstantBool>(Compare)) { - if (CV->getValue()) // If they are comparable and G2 > G1 - std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2 - break; + if (G1OC->getType() != G2OC->getType()) { + // Sign extend both operands to long. + G1OC = ConstantExpr::getSignExtend(G1OC, Type::LongTy); + G2OC = ConstantExpr::getSignExtend(G2OC, Type::LongTy); + GEP1Ops[FirstConstantOper] = G1OC; + GEP2Ops[FirstConstantOper] = G2OC; + } + + if (G1OC != G2OC) { + // Make sure they are comparable (ie, not constant expressions)... + // and make sure the GEP with the smaller leading constant is GEP1. + Constant *Compare = ConstantExpr::getSetGT(G1OC, G2OC); + if (ConstantBool *CV = dyn_cast<ConstantBool>(Compare)) { + if (CV->getValue()) // If they are comparable and G2 > G1 + std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2 + break; + } } } BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->getTypeAtIndex(G1Oper); @@ -443,7 +466,7 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector<Value*> &GEP1Ops, // point, the GEP instructions have run through all of their operands, and we // haven't found evidence that there are any deltas between the GEP's. // However, one GEP may have more operands than the other. If this is the - // case, there may still be hope. This this now. + // case, there may still be hope. Check this now. if (FirstConstantOper == MinOperands) { // Make GEP1Ops be the longer one if there is a longer one. if (GEP1Ops.size() < GEP2Ops.size()) @@ -494,10 +517,8 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector<Value*> &GEP1Ops, // initial equal sequence of variables into constant zeros to start with. for (unsigned i = 0; i != FirstConstantOper; ++i) { if (!isa<Constant>(GEP1Ops[i]) || isa<ConstantExpr>(GEP1Ops[i]) || - !isa<Constant>(GEP2Ops[i]) || isa<ConstantExpr>(GEP2Ops[i])) { - GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType()); - GEP2Ops[i] = Constant::getNullValue(GEP2Ops[i]->getType()); - } + !isa<Constant>(GEP2Ops[i]) || isa<ConstantExpr>(GEP2Ops[i])) + GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Type::UIntTy); } // We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok diff --git a/lib/Analysis/DataStructure/Local.cpp b/lib/Analysis/DataStructure/Local.cpp index 26bd17b..c969cc6 100644 --- a/lib/Analysis/DataStructure/Local.cpp +++ b/lib/Analysis/DataStructure/Local.cpp @@ -350,7 +350,8 @@ void GraphBuilder::visitGetElementPtrInst(User &GEP) { #if 0 // Handle the pointer index specially... if (GEP.getNumOperands() > 1 && - GEP.getOperand(1) != ConstantSInt::getNullValue(Type::LongTy)) { + (!isa<Constant>(GEP.getOperand(1)) || + !cast<Constant>(GEP.getOperand(1))->isNullValue())) { // If we already know this is an array being accessed, don't do anything... if (!TopTypeRec.isArray) { diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp index 271b6d8..78d5d2e 100644 --- a/lib/Target/TargetData.cpp +++ b/lib/Target/TargetData.cpp @@ -20,6 +20,7 @@ #include "llvm/Module.h" #include "llvm/DerivedTypes.h" #include "llvm/Constants.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" using namespace llvm; // Handle the Pass registration stuff necessary to use TargetData's. @@ -218,17 +219,11 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, assert(isa<PointerType>(Ty) && "Illegal argument for getIndexedOffset()"); uint64_t Result = 0; - for (unsigned CurIDX = 0; CurIDX != Idx.size(); ++CurIDX) { - if (Idx[CurIDX]->getType() == Type::LongTy) { - // Update Ty to refer to current element - Ty = cast<SequentialType>(Ty)->getElementType(); - - // Get the array index and the size of each array element. - int64_t arrayIdx = cast<ConstantSInt>(Idx[CurIDX])->getValue(); - Result += arrayIdx * (int64_t)getTypeSize(Ty); - } else { - const StructType *STy = cast<StructType>(Ty); - assert(Idx[CurIDX]->getType() == Type::UByteTy && "Illegal struct idx"); + generic_gep_type_iterator<std::vector<Value*>::const_iterator> + TI = gep_type_begin(ptrTy, Idx.begin(), Idx.end()); + for (unsigned CurIDX = 0; CurIDX != Idx.size(); ++CurIDX, ++TI) { + if (const StructType *STy = dyn_cast<StructType>(*TI)) { + assert(Idx[CurIDX]->getType() == Type::UIntTy && "Illegal struct idx"); unsigned FieldNo = cast<ConstantUInt>(Idx[CurIDX])->getValue(); // Get structure layout information... @@ -240,6 +235,13 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, // Update Ty to refer to current element Ty = STy->getElementType(FieldNo); + } else { + // Update Ty to refer to current element + Ty = cast<SequentialType>(Ty)->getElementType(); + + // Get the array index and the size of each array element. + int64_t arrayIdx = cast<ConstantInt>(Idx[CurIDX])->getRawValue(); + Result += arrayIdx * (int64_t)getTypeSize(Ty); } } diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp index fa7eef1..ad52353 100644 --- a/lib/Target/X86/InstSelectSimple.cpp +++ b/lib/Target/X86/InstSelectSimple.cpp @@ -2704,12 +2704,13 @@ void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, // idx is the index into the array. Unlike with structure // indices, we may not know its actual value at code-generation // time. - assert(idx->getType() == Type::LongTy && "Bad GEP array index!"); // If idx is a constant, fold it into the offset. unsigned TypeSize = TD.getTypeSize(SqTy->getElementType()); if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) { Disp += TypeSize*CSI->getValue(); + } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) { + Disp += TypeSize*CUI->getValue(); } else { // If the index reg is already taken, we can't handle this index. if (IndexReg) return; @@ -2833,12 +2834,7 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, GEPOps.pop_back(); // Consume a GEP operand GEPTypes.pop_back(); - // idx is the index into the array. Unlike with structure - // indices, we may not know its actual value at code-generation - // time. - assert(idx->getType() == Type::LongTy && "Bad GEP array index!"); - - // Most GEP instructions use a [cast (int/uint) to LongTy] as their + // Many GEP instructions use a [cast (int/uint) to LongTy] as their // operand on X86. Handle this case directly now... if (CastInst *CI = dyn_cast<CastInst>(idx)) if (CI->getOperand(0)->getType() == Type::IntTy || @@ -2852,9 +2848,9 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, unsigned elementSize = TD.getTypeSize(ElTy); // If idxReg is a constant, we don't need to perform the multiply! - if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) { + if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) { if (!CSI->isNullValue()) { - unsigned Offset = elementSize*CSI->getValue(); + unsigned Offset = elementSize*CSI->getRawValue(); unsigned Reg = makeAnotherReg(Type::UIntTy); BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg) .addReg(Reg).addImm(Offset); diff --git a/lib/Target/X86/X86ISelSimple.cpp b/lib/Target/X86/X86ISelSimple.cpp index fa7eef1..ad52353 100644 --- a/lib/Target/X86/X86ISelSimple.cpp +++ b/lib/Target/X86/X86ISelSimple.cpp @@ -2704,12 +2704,13 @@ void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, // idx is the index into the array. Unlike with structure // indices, we may not know its actual value at code-generation // time. - assert(idx->getType() == Type::LongTy && "Bad GEP array index!"); // If idx is a constant, fold it into the offset. unsigned TypeSize = TD.getTypeSize(SqTy->getElementType()); if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) { Disp += TypeSize*CSI->getValue(); + } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) { + Disp += TypeSize*CUI->getValue(); } else { // If the index reg is already taken, we can't handle this index. if (IndexReg) return; @@ -2833,12 +2834,7 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, GEPOps.pop_back(); // Consume a GEP operand GEPTypes.pop_back(); - // idx is the index into the array. Unlike with structure - // indices, we may not know its actual value at code-generation - // time. - assert(idx->getType() == Type::LongTy && "Bad GEP array index!"); - - // Most GEP instructions use a [cast (int/uint) to LongTy] as their + // Many GEP instructions use a [cast (int/uint) to LongTy] as their // operand on X86. Handle this case directly now... if (CastInst *CI = dyn_cast<CastInst>(idx)) if (CI->getOperand(0)->getType() == Type::IntTy || @@ -2852,9 +2848,9 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, unsigned elementSize = TD.getTypeSize(ElTy); // If idxReg is a constant, we don't need to perform the multiply! - if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) { + if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) { if (!CSI->isNullValue()) { - unsigned Offset = elementSize*CSI->getValue(); + unsigned Offset = elementSize*CSI->getRawValue(); unsigned Reg = makeAnotherReg(Type::UIntTy); BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg) .addReg(Reg).addImm(Offset); diff --git a/lib/Transforms/ExprTypeConvert.cpp b/lib/Transforms/ExprTypeConvert.cpp index b30ddf1..bde24f0 100644 --- a/lib/Transforms/ExprTypeConvert.cpp +++ b/lib/Transforms/ExprTypeConvert.cpp @@ -797,10 +797,13 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty, // stream, so we have to delete it when we're done. // if (DataSize != 1) { - // FIXME, PR82 - TempScale = BinaryOperator::create(Instruction::Mul, Index, - ConstantSInt::get(Type::LongTy, - DataSize)); + Value *CST; + if (Index->getType()->isSigned()) + CST = ConstantSInt::get(Index->getType(), DataSize); + else + CST = ConstantUInt::get(Index->getType(), DataSize); + + TempScale = BinaryOperator::create(Instruction::Mul, Index, CST); Index = TempScale; } @@ -1012,8 +1015,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal, if (const CompositeType *CT = dyn_cast<CompositeType>(LoadedTy)) { std::vector<Value*> Indices; - // FIXME, PR82 - Indices.push_back(ConstantSInt::get(Type::LongTy, 0)); + Indices.push_back(Constant::getNullValue(Type::UIntTy)); unsigned Offset = 0; // No offset, get first leaf. LoadedTy = getStructOffsetType(CT, Offset, Indices, TD, false); @@ -1049,8 +1051,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal, const StructType *SElTy = cast<StructType>(ElTy); std::vector<Value*> Indices; - // FIXME, PR82 - Indices.push_back(Constant::getNullValue(Type::LongTy)); + Indices.push_back(Constant::getNullValue(Type::UIntTy)); unsigned Offset = 0; const Type *Ty = getStructOffsetType(ElTy, Offset, Indices, TD,false); @@ -1079,8 +1080,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal, if (isa<StructType>(ValTy)) { std::vector<Value*> Indices; - // FIXME: PR82 - Indices.push_back(Constant::getNullValue(Type::LongTy)); + Indices.push_back(Constant::getNullValue(Type::UIntTy)); unsigned Offset = 0; ValTy = getStructOffsetType(ValTy, Offset, Indices, TD, false); @@ -1112,10 +1112,13 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal, if (DataSize != 1) { // Insert a multiply of the old element type is not a unit size... - Index = BinaryOperator::create(Instruction::Mul, Index, - // FIXME: PR82 - ConstantSInt::get(Type::LongTy, DataSize), - "scale", It); + Value *CST; + if (Index->getType()->isSigned()) + CST = ConstantSInt::get(Index->getType(), DataSize); + else + CST = ConstantUInt::get(Index->getType(), DataSize); + + Index = BinaryOperator::create(Instruction::Mul, Index, CST, "scale", It); } // Perform the conversion now... diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp index 5baefe9..4cdcb9e 100644 --- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp +++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp @@ -40,7 +40,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName, while (isa<AllocaInst>(InsertPos)) ++InsertPos; ConstantPointerRef *ArrayCPR = ConstantPointerRef::get(Array); - std::vector<Constant*> GEPIndices(2, Constant::getNullValue(Type::LongTy)); + std::vector<Constant*> GEPIndices(2, Constant::getNullValue(Type::IntTy)); Args[2] = ConstantExpr::getGetElementPtr(ArrayCPR, GEPIndices); unsigned NumElements = @@ -89,8 +89,8 @@ void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum, // Create the getelementptr constant expression std::vector<Constant*> Indices(2); - Indices[0] = Constant::getNullValue(Type::LongTy); - Indices[1] = ConstantSInt::get(Type::LongTy, CounterNum); + Indices[0] = Constant::getNullValue(Type::IntTy); + Indices[1] = ConstantSInt::get(Type::IntTy, CounterNum); Constant *ElementPtr = ConstantExpr::getGetElementPtr(CounterArray, Indices); // Load, increment and store the value back. diff --git a/lib/Transforms/LevelRaise.cpp b/lib/Transforms/LevelRaise.cpp index edc42b7..aeb836d 100644 --- a/lib/Transforms/LevelRaise.cpp +++ b/lib/Transforms/LevelRaise.cpp @@ -370,9 +370,8 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) { // Build the index vector, full of all zeros std::vector<Value*> Indices; - Indices.push_back(ConstantSInt::get(Type::LongTy, 0)); // FIXME, PR82 + Indices.push_back(Constant::getNullValue(Type::UIntTy)); while (CurCTy && !isa<PointerType>(CurCTy)) { - const Type *IdxType; if (const StructType *CurSTy = dyn_cast<StructType>(CurCTy)) { // Check for a zero element struct type... if we have one, bail. if (CurSTy->getNumElements() == 0) break; @@ -381,14 +380,12 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) { // offset zero in the struct. // ElTy = CurSTy->getElementType(0); - IdxType = Type::UByteTy; // FIXME when PR82 is fixed. } else { ElTy = cast<ArrayType>(CurCTy)->getElementType(); - IdxType = Type::LongTy; // FIXME when PR82 is fixed. } // Insert a zero to index through this type... - Indices.push_back(Constant::getNullValue(IdxType)); + Indices.push_back(Constant::getNullValue(Type::UIntTy)); // Did we find what we're looking for? if (ElTy->isLosslesslyConvertibleTo(DestPointedTy)) break; diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 13c6b26..a1e6c99 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -44,9 +44,10 @@ #include "llvm/Target/TargetData.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/InstIterator.h" #include "llvm/Support/InstVisitor.h" -#include "llvm/Support/CallSite.h" #include "Support/Debug.h" #include "Support/Statistic.h" #include <algorithm> @@ -92,6 +93,8 @@ namespace { AU.setPreservesCFG(); } + TargetData &getTargetData() const { return *TD; } + // Visitation implementation - Implement instruction combining for different // instruction types. The semantics are as follows: // Return Value: @@ -127,6 +130,7 @@ namespace { Instruction *visitCallSite(CallSite CS); bool transformConstExprCastCall(CallSite CS); + public: // InsertNewInstBefore - insert an instruction New before instruction Old // in the program. Add the new instruction to the worklist. // @@ -139,7 +143,6 @@ namespace { return New; } - public: // ReplaceInstUsesWith - This method is to be used when an instruction is // found to be dead, replacable with another preexisting expression. Here // we add all uses of I to the worklist, replace all uses of I with the new @@ -2272,6 +2275,20 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { return 0; } +static Value *InsertSignExtendToPtrTy(Value *V, const Type *DTy, + Instruction *InsertPoint, + InstCombiner *IC) { + unsigned PS = IC->getTargetData().getPointerSize(); + const Type *VTy = V->getType(); + Instruction *Cast; + if (!VTy->isSigned() && VTy->getPrimitiveSize() < PS) + // We must insert a cast to ensure we sign-extend. + V = IC->InsertNewInstBefore(new CastInst(V, VTy->getSignedVersion(), + V->getName()), *InsertPoint); + return IC->InsertNewInstBefore(new CastInst(V, DTy, V->getName()), + *InsertPoint); +} + Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Is it 'getelementptr %P, long 0' or 'getelementptr %P' @@ -2286,6 +2303,37 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (GEP.getNumOperands() == 2 && HasZeroPointerIndex) return ReplaceInstUsesWith(GEP, GEP.getOperand(0)); + // Eliminate unneeded casts for indices. + bool MadeChange = false; + for (unsigned i = 1, e = GEP.getNumOperands(); i != e; ++i) + if (CastInst *CI = dyn_cast<CastInst>(GEP.getOperand(i))) { + Value *Src = CI->getOperand(0); + const Type *SrcTy = Src->getType(); + const Type *DestTy = CI->getType(); + if (Src->getType()->isInteger()) { + if (SrcTy->getPrimitiveSize() == DestTy->getPrimitiveSize()) { + // We can always eliminate a cast from ulong or long to the other. We + // can always eliminate a cast from uint to int or the other on 32-bit + // pointer platforms. + if (DestTy->getPrimitiveSize() >= TD->getPointerSize()) { + MadeChange = true; + GEP.setOperand(i, Src); + } + } else if (SrcTy->getPrimitiveSize() < DestTy->getPrimitiveSize() && + SrcTy->getPrimitiveSize() == 4) { + // We can always eliminate a cast from int to [u]long. We can + // eliminate a cast from uint to [u]long iff the target is a 32-bit + // pointer target. + if (SrcTy->isSigned() || + SrcTy->getPrimitiveSize() >= TD->getPointerSize()) { + MadeChange = true; + GEP.setOperand(i, Src); + } + } + } + } + if (MadeChange) return &GEP; + // Combine Indices - If the source pointer to this getelementptr instruction // is a getelementptr instruction, combine the indices of the two // getelementptr instructions into a single instruction. @@ -2304,14 +2352,17 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Can we combine the two pointer arithmetics offsets? if (SrcGEPOperands.size() == 2 && isa<Constant>(SrcGEPOperands[1]) && isa<Constant>(GEP.getOperand(1))) { + Constant *SGC = cast<Constant>(SrcGEPOperands[1]); + Constant *GC = cast<Constant>(GEP.getOperand(1)); + if (SGC->getType() != GC->getType()) { + SGC = ConstantExpr::getSignExtend(SGC, Type::LongTy); + GC = ConstantExpr::getSignExtend(GC, Type::LongTy); + } + // Replace: gep (gep %P, long C1), long C2, ... // With: gep %P, long (C1+C2), ... - Value *Sum = ConstantExpr::get(Instruction::Add, - cast<Constant>(SrcGEPOperands[1]), - cast<Constant>(GEP.getOperand(1))); - assert(Sum && "Constant folding of longs failed!?"); GEP.setOperand(0, SrcGEPOperands[0]); - GEP.setOperand(1, Sum); + GEP.setOperand(1, ConstantExpr::getAdd(SGC, GC)); if (Instruction *I = dyn_cast<Instruction>(GEP.getOperand(0))) AddUsersToWorkList(*I); // Reduce use count of Src return &GEP; @@ -2327,29 +2378,65 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { cast<Instruction>(SrcGEPOperands[0])->getNumOperands() == 2) return 0; // Wait until our source is folded to completion. - Value *Sum = BinaryOperator::create(Instruction::Add, SrcGEPOperands[1], - GEP.getOperand(1), - GEP.getOperand(0)->getName()+".sum", - &GEP); + Value *Sum, *SO1 = SrcGEPOperands[1], *GO1 = GEP.getOperand(1); + if (SO1 == Constant::getNullValue(SO1->getType())) { + Sum = GO1; + } else if (GO1 == Constant::getNullValue(GO1->getType())) { + Sum = SO1; + } else { + // If they aren't the same type, convert both to an integer of the + // target's pointer size. + if (SO1->getType() != GO1->getType()) { + if (Constant *SO1C = dyn_cast<Constant>(SO1)) { + SO1 = ConstantExpr::getCast(SO1C, GO1->getType()); + } else if (Constant *GO1C = dyn_cast<Constant>(GO1)) { + GO1 = ConstantExpr::getCast(GO1C, SO1->getType()); + } else { + unsigned PS = TD->getPointerSize(); + Instruction *Cast; + if (SO1->getType()->getPrimitiveSize() == PS) { + // Convert GO1 to SO1's type. + GO1 = InsertSignExtendToPtrTy(GO1, SO1->getType(), &GEP, this); + + } else if (GO1->getType()->getPrimitiveSize() == PS) { + // Convert SO1 to GO1's type. + SO1 = InsertSignExtendToPtrTy(SO1, GO1->getType(), &GEP, this); + } else { + const Type *PT = TD->getIntPtrType(); + SO1 = InsertSignExtendToPtrTy(SO1, PT, &GEP, this); + GO1 = InsertSignExtendToPtrTy(GO1, PT, &GEP, this); + } + } + } + Sum = BinaryOperator::create(Instruction::Add, SO1, GO1, + GEP.getOperand(0)->getName()+".sum", &GEP); + } GEP.setOperand(0, SrcGEPOperands[0]); GEP.setOperand(1, Sum); WorkList.push_back(cast<Instruction>(Sum)); return &GEP; - } else if (*GEP.idx_begin() == Constant::getNullValue(Type::LongTy) && + } else if (isa<Constant>(*GEP.idx_begin()) && + cast<Constant>(*GEP.idx_begin())->isNullValue() && SrcGEPOperands.size() != 1) { // Otherwise we can do the fold if the first index of the GEP is a zero Indices.insert(Indices.end(), SrcGEPOperands.begin()+1, SrcGEPOperands.end()); Indices.insert(Indices.end(), GEP.idx_begin()+1, GEP.idx_end()); - } else if (SrcGEPOperands.back() == Constant::getNullValue(Type::LongTy)) { - // FIXME: when we allow indices to be non-long values, support this for - // other types! - - // If the src gep ends with a constant array index, merge this get into - // it, even if we have a non-zero array index. - Indices.insert(Indices.end(), SrcGEPOperands.begin()+1, - SrcGEPOperands.end()-1); - Indices.insert(Indices.end(), GEP.idx_begin(), GEP.idx_end()); + } else if (SrcGEPOperands.back() == + Constant::getNullValue(SrcGEPOperands.back()->getType())) { + // We have to check to make sure this really is an ARRAY index we are + // ending up with, not a struct index. + generic_gep_type_iterator<std::vector<Value*>::iterator> + GTI = gep_type_begin(SrcGEPOperands[0]->getType(), + SrcGEPOperands.begin()+1, SrcGEPOperands.end()); + std::advance(GTI, SrcGEPOperands.size()-2); + if (isa<SequentialType>(*GTI)) { + // If the src gep ends with a constant array index, merge this get into + // it, even if we have a non-zero array index. + Indices.insert(Indices.end(), SrcGEPOperands.begin()+1, + SrcGEPOperands.end()-1); + Indices.insert(Indices.end(), GEP.idx_begin(), GEP.idx_end()); + } } if (!Indices.empty()) @@ -2428,7 +2515,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Now that I is pointing to the first non-allocation-inst in the block, // insert our getelementptr instruction... // - std::vector<Value*> Idx(2, Constant::getNullValue(Type::LongTy)); + std::vector<Value*> Idx(2, Constant::getNullValue(Type::IntTy)); Value *V = new GetElementPtrInst(New, Idx, New->getName()+".sub", It); // Now make everything use the getelementptr instead of the original @@ -2469,7 +2556,7 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) { /// expression, or null if something is funny. /// static Constant *GetGEPGlobalInitializer(Constant *C, ConstantExpr *CE) { - if (CE->getOperand(1) != Constant::getNullValue(Type::LongTy)) + if (CE->getOperand(1) != Constant::getNullValue(CE->getOperand(1)->getType())) return 0; // Do not allow stepping over the value! // Loop over all of the operands, tracking down which value we are diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index 75be397..8d550b8 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -712,7 +712,7 @@ void SCCP::visitGetElementPtrInst(GetElementPtrInst &I) { /// null if something is funny. /// static Constant *GetGEPGlobalInitializer(Constant *C, ConstantExpr *CE) { - if (CE->getOperand(1) != Constant::getNullValue(Type::LongTy)) + if (CE->getOperand(1) != Constant::getNullValue(CE->getOperand(1)->getType())) return 0; // Do not allow stepping over the value! // Loop over all of the operands, tracking down which value we are diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index c1c4759..9598918 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -193,7 +193,7 @@ bool SROA::performScalarRepl(Function &F) { // std::string OldName = GEPI->getName(); // Steal the old name... std::vector<Value*> NewArgs; - NewArgs.push_back(Constant::getNullValue(Type::LongTy)); + NewArgs.push_back(Constant::getNullValue(Type::IntTy)); NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end()); GEPI->setName(""); RepValue = diff --git a/lib/Transforms/TransformInternals.cpp b/lib/Transforms/TransformInternals.cpp index 9039f59..971a58d 100644 --- a/lib/Transforms/TransformInternals.cpp +++ b/lib/Transforms/TransformInternals.cpp @@ -35,8 +35,7 @@ static const Type *getStructOffsetStep(const StructType *STy, uint64_t &Offset, (i == SL->MemberOffsets.size()-1 || Offset < SL->MemberOffsets[i+1])); // Make sure to save the current index... - // FIXME for PR82 - Indices.push_back(ConstantUInt::get(Type::UByteTy, i)); + Indices.push_back(ConstantUInt::get(Type::UIntTy, i)); Offset = SL->MemberOffsets[i]; return STy->getContainedType(i); } @@ -75,8 +74,10 @@ const Type *llvm::getStructOffsetType(const Type *Ty, unsigned &Offset, NextType = ATy->getElementType(); unsigned ChildSize = TD.getTypeSize(NextType); - // FIXME for PR82 - Indices.push_back(ConstantSInt::get(Type::LongTy, Offset/ChildSize)); + if (ConstantSInt::isValueValidForType(Type::IntTy, Offset/ChildSize)) + Indices.push_back(ConstantSInt::get(Type::IntTy, Offset/ChildSize)); + else + Indices.push_back(ConstantSInt::get(Type::LongTy, Offset/ChildSize)); ThisOffset = (Offset/ChildSize)*ChildSize; } else { Offset = 0; // Return the offset that we were able to achieve @@ -99,105 +100,6 @@ const Type *llvm::ConvertibleToGEP(const Type *Ty, Value *OffsetVal, std::vector<Value*> &Indices, const TargetData &TD, BasicBlock::iterator *BI) { - const CompositeType *CompTy = dyn_cast<CompositeType>(Ty); - if (CompTy == 0) return 0; - - // See if the cast is of an integer expression that is either a constant, - // or a value scaled by some amount with a possible offset. - // - ExprType Expr = ClassifyExpr(OffsetVal); - - // Get the offset and scale values if they exists... - // A scale of zero with Expr.Var != 0 means a scale of 1. - // - int64_t Offset = Expr.Offset ? getConstantValue(Expr.Offset) : 0; - int64_t Scale = Expr.Scale ? getConstantValue(Expr.Scale) : 0; - - if (Expr.Var && Scale == 0) Scale = 1; // Scale != 0 if Expr.Var != 0 - - // Loop over the Scale and Offset values, filling in the Indices vector for - // our final getelementptr instruction. - // - const Type *NextTy = CompTy; - do { - if (!isa<CompositeType>(NextTy)) - return 0; // Type must not be ready for processing... - CompTy = cast<CompositeType>(NextTy); - - if (const StructType *StructTy = dyn_cast<StructType>(CompTy)) { - // Step into the appropriate element of the structure... - uint64_t ActualOffset = (Offset < 0) ? 0 : (uint64_t)Offset; - NextTy = getStructOffsetStep(StructTy, ActualOffset, Indices, TD); - Offset -= ActualOffset; - } else { - const Type *ElTy = cast<SequentialType>(CompTy)->getElementType(); - if (!ElTy->isSized() || (isa<PointerType>(CompTy) && !Indices.empty())) - return 0; // Type is unreasonable... escape! - unsigned ElSize = TD.getTypeSize(ElTy); - if (ElSize == 0) return 0; // Avoid division by zero... - int64_t ElSizeS = ElSize; - - // See if the user is indexing into a different cell of this array... - if (Scale && (Scale >= ElSizeS || -Scale >= ElSizeS)) { - // A scale n*ElSize might occur if we are not stepping through - // array by one. In this case, we will have to insert math to munge - // the index. - // - int64_t ScaleAmt = Scale/ElSizeS; - if (Scale-ScaleAmt*ElSizeS) - return 0; // Didn't scale by a multiple of element size, bail out - Scale = 0; // Scale is consumed - - int64_t Index = Offset/ElSize; // is zero unless Offset > ElSize - Offset -= Index*ElSize; // Consume part of the offset - - if (BI) { // Generate code? - BasicBlock *BB = (*BI)->getParent(); - if (Expr.Var->getType() != Type::LongTy) // FIXME for PR82 - Expr.Var = new CastInst(Expr.Var, Type::LongTy, // FIXME for PR82 - Expr.Var->getName()+"-idxcast", *BI); - - if (ScaleAmt && ScaleAmt != 1) { - // If we have to scale up our index, do so now - // FIXME for PR82 - Value *ScaleAmtVal = ConstantSInt::get(Type::LongTy, ScaleAmt); - Expr.Var = BinaryOperator::create(Instruction::Mul, Expr.Var, - ScaleAmtVal, - Expr.Var->getName()+"-scale",*BI); - } - - if (Index) { // Add an offset to the index - // FIXME for PR82 - Value *IndexAmt = ConstantSInt::get(Type::LongTy, Index); - Expr.Var = BinaryOperator::create(Instruction::Add, Expr.Var, - IndexAmt, - Expr.Var->getName()+"-offset", - *BI); - } - } - - Indices.push_back(Expr.Var); - Expr.Var = 0; - } else if (Offset >= (int64_t)ElSize || -Offset >= (int64_t)ElSize) { - // Calculate the index that we are entering into the array cell with - uint64_t Index = Offset/ElSize; - // FIXME for PR82 - Indices.push_back(ConstantSInt::get(Type::LongTy, Index)); - Offset -= (int64_t)(Index*ElSize); // Consume part of the offset - - } else if (isa<ArrayType>(CompTy) || Indices.empty()) { - // Must be indexing a small amount into the first cell of the array - // Just index into element zero of the array here. - // - // FIXME for PR82 - Indices.push_back(ConstantSInt::get(Type::LongTy, 0)); - } else { - return 0; // Hrm. wierd, can't handle this case. Bail - } - NextTy = ElTy; - } - } while (Offset || Scale); // Go until we're done! - - return NextTy; + return 0; } diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp index 2349d76..c486633 100644 --- a/lib/Transforms/Utils/LowerInvoke.cpp +++ b/lib/Transforms/Utils/LowerInvoke.cpp @@ -277,14 +277,14 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) { // Store this old value as our 'next' field, and store our alloca as the // current jblist. std::vector<Value*> Idx; - Idx.push_back(Constant::getNullValue(Type::LongTy)); - Idx.push_back(ConstantUInt::get(Type::UByteTy, 0)); + Idx.push_back(Constant::getNullValue(Type::IntTy)); + Idx.push_back(ConstantUInt::get(Type::UIntTy, 0)); Value *NextFieldPtr = new GetElementPtrInst(JmpBuf, Idx, "NextField", II); new StoreInst(OldEntry, NextFieldPtr, II); new StoreInst(JmpBuf, JBListHead, II); // Call setjmp, passing in the address of the jmpbuffer. - Idx[1] = ConstantUInt::get(Type::UByteTy, 1); + Idx[1] = ConstantUInt::get(Type::UIntTy, 1); Value *JmpBufPtr = new GetElementPtrInst(JmpBuf, Idx, "TheJmpBuf", II); Value *SJRet = new CallInst(SetJmpFn, JmpBufPtr, "sjret", II); @@ -369,14 +369,14 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) { // JBList. std::vector<Value*> Idx; Idx.push_back(Constant::getNullValue(Type::LongTy)); - Idx.push_back(ConstantUInt::get(Type::UByteTy, 0)); + Idx.push_back(ConstantUInt::get(Type::UIntTy, 0)); Value *NextFieldPtr = new GetElementPtrInst(RecPtr, Idx, "NextField", RI); Value *NextRec = new LoadInst(NextFieldPtr, "NextRecord", RI); new StoreInst(NextRec, JBListHead, RI); // Now that we popped the top of the JBList, get a pointer to the jmpbuf and // longjmp. - Idx[1] = ConstantUInt::get(Type::UByteTy, 1); + Idx[1] = ConstantUInt::get(Type::UIntTy, 1); Idx[0] = new GetElementPtrInst(RecPtr, Idx, "JmpBuf", RI); Idx[1] = ConstantInt::get(Type::IntTy, 1); new CallInst(LongJmpFn, Idx, "", RI); diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp index db21bd3..1fee501 100644 --- a/lib/VMCore/ConstantFold.cpp +++ b/lib/VMCore/ConstantFold.cpp @@ -608,10 +608,10 @@ static int IdxCompare(Constant *C1, Constant *C2) { if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2)) return -2; // don't know! - // Ok, we have two differing integer indices. Convert them to - // be the same type. Long is always big enough, so we use it. - C1 = ConstantExpr::getCast(C1, Type::LongTy); - C2 = ConstantExpr::getCast(C2, Type::LongTy); + // Ok, we have two differing integer indices. Sign extend them to be the same + // type. Long is always big enough, so we use it. + C1 = ConstantExpr::getSignExtend(C1, Type::LongTy); + C2 = ConstantExpr::getSignExtend(C2, Type::LongTy); if (C1 == C2) return 0; // Are they just differing types? // If they are really different, now that they are the same type, then we diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp index 1e2d741..b7d7181 100644 --- a/lib/VMCore/Type.cpp +++ b/lib/VMCore/Type.cpp @@ -295,8 +295,9 @@ const std::string &Type::getDescription() const { bool StructType::indexValid(const Value *V) const { // Structure indexes require unsigned integer constants. - if (const ConstantUInt *CU = dyn_cast<ConstantUInt>(V)) - return CU->getValue() < ContainedTys.size(); + if (V->getType() == Type::UIntTy) + if (const ConstantUInt *CU = dyn_cast<ConstantUInt>(V)) + return CU->getValue() < ContainedTys.size(); return false; } @@ -304,10 +305,8 @@ bool StructType::indexValid(const Value *V) const { // element. For a structure type, this must be a constant value... // const Type *StructType::getTypeAtIndex(const Value *V) const { - assert(isa<Constant>(V) && "Structure index must be a constant!!"); + assert(indexValid(V) && "Invalid structure index!"); unsigned Idx = cast<ConstantUInt>(V)->getValue(); - assert(Idx < ContainedTys.size() && "Structure index out of range!"); - assert(indexValid(V) && "Invalid structure index!"); // Duplicate check return ContainedTys[Idx]; } diff --git a/lib/VMCore/iMemory.cpp b/lib/VMCore/iMemory.cpp index 7c8f665..1fc1df8 100644 --- a/lib/VMCore/iMemory.cpp +++ b/lib/VMCore/iMemory.cpp @@ -137,7 +137,12 @@ const Type* GetElementPtrInst::getIndexedType(const Type *Ptr, if (!isa<PointerType>(Ptr)) return 0; // Type isn't a pointer type! // Handle the special case of the empty set index set... - if (Idx.empty()) return cast<PointerType>(Ptr)->getElementType(); + if (Idx.empty()) + if (AllowCompositeLeaf || + cast<PointerType>(Ptr)->getElementType()->isFirstClassType()) + return cast<PointerType>(Ptr)->getElementType(); + else + return 0; unsigned CurIdx = 0; while (const CompositeType *CT = dyn_cast<CompositeType>(Ptr)) { |