diff options
Diffstat (limited to 'lib/Analysis/MemoryDependenceAnalysis.cpp')
-rw-r--r-- | lib/Analysis/MemoryDependenceAnalysis.cpp | 129 |
1 files changed, 96 insertions, 33 deletions
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 187eada..6d38863 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -18,7 +18,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/AssumptionTracker.h" +#include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/PHITransAddr.h" @@ -59,7 +59,7 @@ char MemoryDependenceAnalysis::ID = 0; // Register this pass... INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep", "Memory Dependence Analysis", false, true) -INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep", "Memory Dependence Analysis", false, true) @@ -82,19 +82,17 @@ void MemoryDependenceAnalysis::releaseMemory() { PredCache->clear(); } - - /// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. /// void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); - AU.addRequired<AssumptionTracker>(); + AU.addRequired<AssumptionCacheTracker>(); AU.addRequiredTransitive<AliasAnalysis>(); } -bool MemoryDependenceAnalysis::runOnFunction(Function &) { +bool MemoryDependenceAnalysis::runOnFunction(Function &F) { AA = &getAnalysis<AliasAnalysis>(); - AT = &getAnalysis<AssumptionTracker>(); + AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); DL = DLP ? &DLP->getDataLayout() : nullptr; DominatorTreeWrapperPass *DTWP = @@ -300,8 +298,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, // Load widening is hostile to ThreadSanitizer: it may cause false positives // or make the reports more cryptic (access sizes are wrong). - if (LI->getParent()->getParent()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeThread)) + if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) return 0; // Get the base of this load. @@ -346,9 +343,9 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, !DL.fitsInLegalInteger(NewLoadByteSize*8)) return 0; - if (LIOffs+NewLoadByteSize > MemLocEnd && - LI->getParent()->getParent()->getAttributes(). - hasAttribute(AttributeSet::FunctionIndex, Attribute::SanitizeAddress)) + if (LIOffs + NewLoadByteSize > MemLocEnd && + LI->getParent()->getParent()->hasFnAttribute( + Attribute::SanitizeAddress)) // We will be reading past the location accessed by the original program. // While this is safe in a regular build, Address Safety analysis tools // may start reporting false warnings. So, don't do widening. @@ -362,6 +359,17 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, } } +static bool isVolatile(Instruction *Inst) { + if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) + return LI->isVolatile(); + else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) + return SI->isVolatile(); + else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) + return AI->isVolatile(); + return false; +} + + /// getPointerDependencyFrom - Return the instruction on which a memory /// location depends. If isLoad is true, this routine ignores may-aliases with /// read-only operations. If isLoad is false, this routine ignores may-aliases @@ -448,12 +456,26 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, // does not alias with when this atomic load indicates that another thread may // be accessing the location. if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { + + // While volatile access cannot be eliminated, they do not have to clobber + // non-aliasing locations, as normal accesses, for example, can be safely + // reordered with volatile accesses. + if (LI->isVolatile()) { + if (!QueryInst) + // Original QueryInst *may* be volatile + return MemDepResult::getClobber(LI); + if (isVolatile(QueryInst)) + // Ordering required if QueryInst is itself volatile + return MemDepResult::getClobber(LI); + // Otherwise, volatile doesn't imply any special ordering + } + // Atomic loads have complications involved. // A Monotonic (or higher) load is OK if the query inst is itself not atomic. // An Acquire (or higher) load sets the HasSeenAcquire flag, so that any // release store will know to return getClobber. // FIXME: This is overly conservative. - if (!LI->isUnordered()) { + if (LI->isAtomic() && LI->getOrdering() > Unordered) { if (!QueryInst) return MemDepResult::getClobber(LI); if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) { @@ -470,13 +492,6 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, HasSeenAcquire = true; } - // FIXME: this is overly conservative. - // While volatile access cannot be eliminated, they do not have to clobber - // non-aliasing locations, as normal accesses can for example be reordered - // with volatile accesses. - if (LI->isVolatile()) - return MemDepResult::getClobber(LI); - AliasAnalysis::Location LoadLoc = AA->getLocation(LI); // If we found a pointer, check if it could be the same as our pointer. @@ -859,21 +874,65 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { /// own block. /// void MemoryDependenceAnalysis:: -getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad, - BasicBlock *FromBB, +getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { + + auto getLocation = [](AliasAnalysis *AA, Instruction *Inst) { + if (auto *I = dyn_cast<LoadInst>(Inst)) + return AA->getLocation(I); + else if (auto *I = dyn_cast<StoreInst>(Inst)) + return AA->getLocation(I); + else if (auto *I = dyn_cast<VAArgInst>(Inst)) + return AA->getLocation(I); + else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst)) + return AA->getLocation(I); + else if (auto *I = dyn_cast<AtomicRMWInst>(Inst)) + return AA->getLocation(I); + else + llvm_unreachable("unsupported memory instruction"); + }; + + const AliasAnalysis::Location Loc = getLocation(AA, QueryInst); + bool isLoad = isa<LoadInst>(QueryInst); + BasicBlock *FromBB = QueryInst->getParent(); + assert(FromBB); + assert(Loc.Ptr->getType()->isPointerTy() && "Can't get pointer deps of a non-pointer!"); Result.clear(); + + // This routine does not expect to deal with volatile instructions. + // Doing so would require piping through the QueryInst all the way through. + // TODO: volatiles can't be elided, but they can be reordered with other + // non-volatile accesses. + + // We currently give up on any instruction which is ordered, but we do handle + // atomic instructions which are unordered. + // TODO: Handle ordered instructions + auto isOrdered = [](Instruction *Inst) { + if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { + return !LI->isUnordered(); + } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { + return !SI->isUnordered(); + } + return false; + }; + if (isVolatile(QueryInst) || isOrdered(QueryInst)) { + Result.push_back(NonLocalDepResult(FromBB, + MemDepResult::getUnknown(), + const_cast<Value *>(Loc.Ptr))); + return; + } + - PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AT); + PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC); // This is the set of blocks we've inspected, and the pointer we consider in // each block. Because of critical edges, we currently bail out if querying // a block with multiple different pointers. This can happen during PHI // translation. DenseMap<BasicBlock*, Value*> Visited; - if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB, + if (!getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, Result, Visited, true)) return; Result.clear(); @@ -887,7 +946,8 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad, /// lookup (which may use dirty cache info if available). If we do a lookup, /// add the result to the cache. MemDepResult MemoryDependenceAnalysis:: -GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, +GetNonLocalInfoForBlock(Instruction *QueryInst, + const AliasAnalysis::Location &Loc, bool isLoad, BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { @@ -928,7 +988,8 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc, } // Scan the block for the dependency. - MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB); + MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, + QueryInst); // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. @@ -1001,7 +1062,8 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, /// not compute dependence information for some reason. This should be treated /// as a clobber dependence on the first instruction in the predecessor block. bool MemoryDependenceAnalysis:: -getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, +getNonLocalPointerDepFromBB(Instruction *QueryInst, + const PHITransAddr &Pointer, const AliasAnalysis::Location &Loc, bool isLoad, BasicBlock *StartBB, SmallVectorImpl<NonLocalDepResult> &Result, @@ -1040,7 +1102,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, } else if (CacheInfo->Size > Loc.Size) { // This query's Size is less than the cached one. Conservatively restart // the query using the greater size. - return getNonLocalPointerDepFromBB(Pointer, + return getNonLocalPointerDepFromBB(QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, StartBB, Result, Visited, SkipFirstBlock); @@ -1060,7 +1122,8 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, CacheInfo->NonLocalDeps.clear(); } if (Loc.AATags) - return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutAATags(), + return getNonLocalPointerDepFromBB(QueryInst, + Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, Visited, SkipFirstBlock); } @@ -1145,7 +1208,6 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // cache value will only see properly sorted cache arrays. if (Cache && NumSortedEntries != Cache->size()) { SortNonLocalDepInfoCache(*Cache, NumSortedEntries); - NumSortedEntries = Cache->size(); } // Since we bail out, the "Cache" set won't contain all of the // results for the query. This is ok (we can still use it to accelerate @@ -1164,7 +1226,8 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // Get the dependency info for Pointer in BB. If we have cached // information, we will use it, otherwise we compute it. DEBUG(AssertSorted(*Cache, NumSortedEntries)); - MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache, + MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, + Loc, isLoad, BB, Cache, NumSortedEntries); // If we got a Def or Clobber, add this to the list of results. @@ -1298,7 +1361,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, // result conflicted with the Visited list; we have to conservatively // assume it is unknown, but this also does not block PRE of the load. if (!CanTranslate || - getNonLocalPointerDepFromBB(PredPointer, + getNonLocalPointerDepFromBB(QueryInst, PredPointer, Loc.getWithNewPtr(PredPtrVal), isLoad, Pred, Result, Visited)) { @@ -1361,7 +1424,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, if (I->getBB() != BB) continue; - assert(I->getResult().isNonLocal() && + assert((I->getResult().isNonLocal() || !DT->isReachableFromEntry(BB)) && "Should only be here with transparent block"); I->setResult(MemDepResult::getUnknown()); Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), |