aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Gohman <dan433584@gmail.com>2013-01-31 02:00:45 +0000
committerDan Gohman <dan433584@gmail.com>2013-01-31 02:00:45 +0000
commita070d2a0355c4993240b5206ebc1d517c151331d (patch)
tree54ab242655acfe79ed7e42f8576b0f6135433567
parent8ed701da9aa388c078cc6aecac2fe355974c90d2 (diff)
downloadexternal_llvm-a070d2a0355c4993240b5206ebc1d517c151331d.zip
external_llvm-a070d2a0355c4993240b5206ebc1d517c151331d.tar.gz
external_llvm-a070d2a0355c4993240b5206ebc1d517c151331d.tar.bz2
Change GetPointerBaseWithConstantOffset's DataLayout argument from a
reference to a pointer, so that it can handle the case where DataLayout is not available and behave conservatively. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174024 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Analysis/ValueTracking.h4
-rw-r--r--lib/Analysis/Lint.cpp84
-rw-r--r--lib/Analysis/Loads.cpp3
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp8
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp6
-rw-r--r--lib/Transforms/Scalar/GVN.cpp6
-rw-r--r--unittests/IR/InstructionsTest.cpp8
8 files changed, 61 insertions, 62 deletions
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 875c47d..b5b8bfe 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -117,10 +117,10 @@ namespace llvm {
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const DataLayout &TD);
+ const DataLayout *TD);
static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
- const DataLayout &TD) {
+ const DataLayout *TD) {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
}
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index fd10a6b..9393508 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -412,51 +412,49 @@ void Lint::visitMemoryReference(Instruction &I,
}
// Check for buffer overflows and misalignment.
- if (TD) {
- // Only handles memory references that read/write something simple like an
- // alloca instruction or a global variable.
- int64_t Offset = 0;
- if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) {
- // OK, so the access is to a constant offset from Ptr. Check that Ptr is
- // something we can handle and if so extract the size of this base object
- // along with its alignment.
- uint64_t BaseSize = AliasAnalysis::UnknownSize;
- unsigned BaseAlign = 0;
-
- if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
- Type *ATy = AI->getAllocatedType();
- if (!AI->isArrayAllocation() && ATy->isSized())
- BaseSize = TD->getTypeAllocSize(ATy);
- BaseAlign = AI->getAlignment();
- if (BaseAlign == 0 && ATy->isSized())
- BaseAlign = TD->getABITypeAlignment(ATy);
- } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
- // If the global may be defined differently in another compilation unit
- // then don't warn about funky memory accesses.
- if (GV->hasDefinitiveInitializer()) {
- Type *GTy = GV->getType()->getElementType();
- if (GTy->isSized())
- BaseSize = TD->getTypeAllocSize(GTy);
- BaseAlign = GV->getAlignment();
- if (BaseAlign == 0 && GTy->isSized())
- BaseAlign = TD->getABITypeAlignment(GTy);
- }
+ // Only handles memory references that read/write something simple like an
+ // alloca instruction or a global variable.
+ int64_t Offset = 0;
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, TD)) {
+ // OK, so the access is to a constant offset from Ptr. Check that Ptr is
+ // something we can handle and if so extract the size of this base object
+ // along with its alignment.
+ uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ unsigned BaseAlign = 0;
+
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ Type *ATy = AI->getAllocatedType();
+ if (TD && !AI->isArrayAllocation() && ATy->isSized())
+ BaseSize = TD->getTypeAllocSize(ATy);
+ BaseAlign = AI->getAlignment();
+ if (TD && BaseAlign == 0 && ATy->isSized())
+ BaseAlign = TD->getABITypeAlignment(ATy);
+ } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
+ // If the global may be defined differently in another compilation unit
+ // then don't warn about funky memory accesses.
+ if (GV->hasDefinitiveInitializer()) {
+ Type *GTy = GV->getType()->getElementType();
+ if (TD && GTy->isSized())
+ BaseSize = TD->getTypeAllocSize(GTy);
+ BaseAlign = GV->getAlignment();
+ if (TD && BaseAlign == 0 && GTy->isSized())
+ BaseAlign = TD->getABITypeAlignment(GTy);
}
-
- // Accesses from before the start or after the end of the object are not
- // defined.
- Assert1(Size == AliasAnalysis::UnknownSize ||
- BaseSize == AliasAnalysis::UnknownSize ||
- (Offset >= 0 && Offset + Size <= BaseSize),
- "Undefined behavior: Buffer overflow", &I);
-
- // Accesses that say that the memory is more aligned than it is are not
- // defined.
- if (Align == 0 && Ty && Ty->isSized())
- Align = TD->getABITypeAlignment(Ty);
- Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
- "Undefined behavior: Memory reference address is misaligned", &I);
}
+
+ // Accesses from before the start or after the end of the object are not
+ // defined.
+ Assert1(Size == AliasAnalysis::UnknownSize ||
+ BaseSize == AliasAnalysis::UnknownSize ||
+ (Offset >= 0 && Offset + Size <= BaseSize),
+ "Undefined behavior: Buffer overflow", &I);
+
+ // Accesses that say that the memory is more aligned than it is are not
+ // defined.
+ if (TD && Align == 0 && Ty && Ty->isSized())
+ Align = TD->getABITypeAlignment(Ty);
+ Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
+ "Undefined behavior: Memory reference address is misaligned", &I);
}
}
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index 3158873..0902a39 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -57,8 +57,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const DataLayout *TD) {
int64_t ByteOffset = 0;
Value *Base = V;
- if (TD)
- Base = GetPointerBaseWithConstantOffset(V, ByteOffset, *TD);
+ Base = GetPointerBaseWithConstantOffset(V, ByteOffset, TD);
if (ByteOffset < 0) // out of bounds
return false;
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index eee7607..5cb0016 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -262,7 +262,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
// If we haven't already computed the base/offset of MemLoc, do so now.
if (MemLocBase == 0)
- MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, *TD);
+ MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
@@ -287,7 +287,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
- GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, TD);
+ GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 23bc444..473ebc8 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -1671,8 +1671,10 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const DataLayout &TD) {
- unsigned BitWidth = TD.getPointerSizeInBits();
+ const DataLayout *TD) {
+ // Without DataLayout, conservatively assume 64-bit offsets, which is
+ // the widest we support.
+ unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
APInt ByteOffset(BitWidth, 0);
while (1) {
if (Ptr->getType()->isVectorTy())
@@ -1680,7 +1682,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
APInt GEPOffset(BitWidth, 0);
- if (!GEP->accumulateConstantOffset(TD, GEPOffset))
+ if (TD && !GEP->accumulateConstantOffset(*TD, GEPOffset))
break;
ByteOffset += GEPOffset;
Ptr = GEP->getPointerOperand();
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index fe3acbf..57432c7 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -376,10 +376,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval argument). If so, then it clearly overwrites any
// other store to the same object.
- const DataLayout &TD = *AA.getDataLayout();
+ const DataLayout *TD = AA.getDataLayout();
- const Value *UO1 = GetUnderlyingObject(P1, &TD),
- *UO2 = GetUnderlyingObject(P2, &TD);
+ const Value *UO1 = GetUnderlyingObject(P1, TD),
+ *UO2 = GetUnderlyingObject(P2, TD);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 14201b9..50c4714 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -849,8 +849,8 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
+ Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD);
if (StoreBase != LoadBase)
return -1;
@@ -945,7 +945,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD);
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);
unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
unsigned Size = MemoryDependenceAnalysis::
diff --git a/unittests/IR/InstructionsTest.cpp b/unittests/IR/InstructionsTest.cpp
index 5c3b43a..9f66af1 100644
--- a/unittests/IR/InstructionsTest.cpp
+++ b/unittests/IR/InstructionsTest.cpp
@@ -192,10 +192,10 @@ TEST(InstructionsTest, VectorGep) {
"2:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80"
":128:128-n8:16:32:64-S128");
// Make sure we don't crash
- GetPointerBaseWithConstantOffset(Gep0, Offset, TD);
- GetPointerBaseWithConstantOffset(Gep1, Offset, TD);
- GetPointerBaseWithConstantOffset(Gep2, Offset, TD);
- GetPointerBaseWithConstantOffset(Gep3, Offset, TD);
+ GetPointerBaseWithConstantOffset(Gep0, Offset, &TD);
+ GetPointerBaseWithConstantOffset(Gep1, Offset, &TD);
+ GetPointerBaseWithConstantOffset(Gep2, Offset, &TD);
+ GetPointerBaseWithConstantOffset(Gep3, Offset, &TD);
// Gep of Geps
GetElementPtrInst *GepII0 = GetElementPtrInst::Create(Gep0, C2xi32b);