aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/ObjCARC/ObjCARCOpts.cpp')
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp1308
1 files changed, 231 insertions, 1077 deletions
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index f55b77f..4d75658 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -26,9 +26,11 @@
#include "ObjCARC.h"
#include "ARCRuntimeEntryPoints.h"
+#include "BlotMapVector.h"
#include "DependencyAnalysis.h"
#include "ObjCARCAliasAnalysis.h"
#include "ProvenanceAnalysis.h"
+#include "PtrState.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
@@ -45,102 +47,6 @@ using namespace llvm::objcarc;
#define DEBUG_TYPE "objc-arc-opts"
-/// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
-/// @{
-
-namespace {
- /// \brief An associative container with fast insertion-order (deterministic)
- /// iteration over its elements. Plus the special blot operation.
- template<class KeyT, class ValueT>
- class MapVector {
- /// Map keys to indices in Vector.
- typedef DenseMap<KeyT, size_t> MapTy;
- MapTy Map;
-
- typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
- /// Keys and values.
- VectorTy Vector;
-
- public:
- typedef typename VectorTy::iterator iterator;
- typedef typename VectorTy::const_iterator const_iterator;
- iterator begin() { return Vector.begin(); }
- iterator end() { return Vector.end(); }
- const_iterator begin() const { return Vector.begin(); }
- const_iterator end() const { return Vector.end(); }
-
-#ifdef XDEBUG
- ~MapVector() {
- assert(Vector.size() >= Map.size()); // May differ due to blotting.
- for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
- I != E; ++I) {
- assert(I->second < Vector.size());
- assert(Vector[I->second].first == I->first);
- }
- for (typename VectorTy::const_iterator I = Vector.begin(),
- E = Vector.end(); I != E; ++I)
- assert(!I->first ||
- (Map.count(I->first) &&
- Map[I->first] == size_t(I - Vector.begin())));
- }
-#endif
-
- ValueT &operator[](const KeyT &Arg) {
- std::pair<typename MapTy::iterator, bool> Pair =
- Map.insert(std::make_pair(Arg, size_t(0)));
- if (Pair.second) {
- size_t Num = Vector.size();
- Pair.first->second = Num;
- Vector.push_back(std::make_pair(Arg, ValueT()));
- return Vector[Num].second;
- }
- return Vector[Pair.first->second].second;
- }
-
- std::pair<iterator, bool>
- insert(const std::pair<KeyT, ValueT> &InsertPair) {
- std::pair<typename MapTy::iterator, bool> Pair =
- Map.insert(std::make_pair(InsertPair.first, size_t(0)));
- if (Pair.second) {
- size_t Num = Vector.size();
- Pair.first->second = Num;
- Vector.push_back(InsertPair);
- return std::make_pair(Vector.begin() + Num, true);
- }
- return std::make_pair(Vector.begin() + Pair.first->second, false);
- }
-
- iterator find(const KeyT &Key) {
- typename MapTy::iterator It = Map.find(Key);
- if (It == Map.end()) return Vector.end();
- return Vector.begin() + It->second;
- }
-
- const_iterator find(const KeyT &Key) const {
- typename MapTy::const_iterator It = Map.find(Key);
- if (It == Map.end()) return Vector.end();
- return Vector.begin() + It->second;
- }
-
- /// This is similar to erase, but instead of removing the element from the
- /// vector, it just zeros out the key in the vector. This leaves iterators
- /// intact, but clients must be prepared for zeroed-out keys when iterating.
- void blot(const KeyT &Key) {
- typename MapTy::iterator It = Map.find(Key);
- if (It == Map.end()) return;
- Vector[It->second].first = KeyT();
- Map.erase(It);
- }
-
- void clear() {
- Map.clear();
- Vector.clear();
- }
- };
-}
-
-/// @}
-///
/// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
/// @{
@@ -177,13 +83,14 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
/// This is a wrapper around getUnderlyingObjCPtr along the lines of
/// GetUnderlyingObjects except that it returns early when it sees the first
/// alloca.
-static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
+static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V,
+ const DataLayout &DL) {
SmallPtrSet<const Value *, 4> Visited;
SmallVector<const Value *, 4> Worklist;
Worklist.push_back(V);
do {
const Value *P = Worklist.pop_back_val();
- P = GetUnderlyingObjCPtr(P);
+ P = GetUnderlyingObjCPtr(P, DL);
if (isa<AllocaInst>(P))
return true;
@@ -270,293 +177,6 @@ STATISTIC(NumReleasesAfterOpt,
#endif
namespace {
- /// \enum Sequence
- ///
- /// \brief A sequence of states that a pointer may go through in which an
- /// objc_retain and objc_release are actually needed.
- enum Sequence {
- S_None,
- S_Retain, ///< objc_retain(x).
- S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
- S_Use, ///< any use of x.
- S_Stop, ///< like S_Release, but code motion is stopped.
- S_Release, ///< objc_release(x).
- S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
- };
-
- raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
- LLVM_ATTRIBUTE_UNUSED;
- raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
- switch (S) {
- case S_None:
- return OS << "S_None";
- case S_Retain:
- return OS << "S_Retain";
- case S_CanRelease:
- return OS << "S_CanRelease";
- case S_Use:
- return OS << "S_Use";
- case S_Release:
- return OS << "S_Release";
- case S_MovableRelease:
- return OS << "S_MovableRelease";
- case S_Stop:
- return OS << "S_Stop";
- }
- llvm_unreachable("Unknown sequence type.");
- }
-}
-
-static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
- // The easy cases.
- if (A == B)
- return A;
- if (A == S_None || B == S_None)
- return S_None;
-
- if (A > B) std::swap(A, B);
- if (TopDown) {
- // Choose the side which is further along in the sequence.
- if ((A == S_Retain || A == S_CanRelease) &&
- (B == S_CanRelease || B == S_Use))
- return B;
- } else {
- // Choose the side which is further along in the sequence.
- if ((A == S_Use || A == S_CanRelease) &&
- (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
- return A;
- // If both sides are releases, choose the more conservative one.
- if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
- return A;
- if (A == S_Release && B == S_MovableRelease)
- return A;
- }
-
- return S_None;
-}
-
-namespace {
- /// \brief Unidirectional information about either a
- /// retain-decrement-use-release sequence or release-use-decrement-retain
- /// reverse sequence.
- struct RRInfo {
- /// After an objc_retain, the reference count of the referenced
- /// object is known to be positive. Similarly, before an objc_release, the
- /// reference count of the referenced object is known to be positive. If
- /// there are retain-release pairs in code regions where the retain count
- /// is known to be positive, they can be eliminated, regardless of any side
- /// effects between them.
- ///
- /// Also, a retain+release pair nested within another retain+release
- /// pair all on the known same pointer value can be eliminated, regardless
- /// of any intervening side effects.
- ///
- /// KnownSafe is true when either of these conditions is satisfied.
- bool KnownSafe;
-
- /// True of the objc_release calls are all marked with the "tail" keyword.
- bool IsTailCallRelease;
-
- /// If the Calls are objc_release calls and they all have a
- /// clang.imprecise_release tag, this is the metadata tag.
- MDNode *ReleaseMetadata;
-
- /// For a top-down sequence, the set of objc_retains or
- /// objc_retainBlocks. For bottom-up, the set of objc_releases.
- SmallPtrSet<Instruction *, 2> Calls;
-
- /// The set of optimal insert positions for moving calls in the opposite
- /// sequence.
- SmallPtrSet<Instruction *, 2> ReverseInsertPts;
-
- /// If this is true, we cannot perform code motion but can still remove
- /// retain/release pairs.
- bool CFGHazardAfflicted;
-
- RRInfo() :
- KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
- CFGHazardAfflicted(false) {}
-
- void clear();
-
- /// Conservatively merge the two RRInfo. Returns true if a partial merge has
- /// occurred, false otherwise.
- bool Merge(const RRInfo &Other);
-
- };
-}
-
-void RRInfo::clear() {
- KnownSafe = false;
- IsTailCallRelease = false;
- ReleaseMetadata = nullptr;
- Calls.clear();
- ReverseInsertPts.clear();
- CFGHazardAfflicted = false;
-}
-
-bool RRInfo::Merge(const RRInfo &Other) {
- // Conservatively merge the ReleaseMetadata information.
- if (ReleaseMetadata != Other.ReleaseMetadata)
- ReleaseMetadata = nullptr;
-
- // Conservatively merge the boolean state.
- KnownSafe &= Other.KnownSafe;
- IsTailCallRelease &= Other.IsTailCallRelease;
- CFGHazardAfflicted |= Other.CFGHazardAfflicted;
-
- // Merge the call sets.
- Calls.insert(Other.Calls.begin(), Other.Calls.end());
-
- // Merge the insert point sets. If there are any differences,
- // that makes this a partial merge.
- bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
- for (Instruction *Inst : Other.ReverseInsertPts)
- Partial |= ReverseInsertPts.insert(Inst).second;
- return Partial;
-}
-
-namespace {
- /// \brief This class summarizes several per-pointer runtime properties which
- /// are propogated through the flow graph.
- class PtrState {
- /// True if the reference count is known to be incremented.
- bool KnownPositiveRefCount;
-
- /// True if we've seen an opportunity for partial RR elimination, such as
- /// pushing calls into a CFG triangle or into one side of a CFG diamond.
- bool Partial;
-
- /// The current position in the sequence.
- unsigned char Seq : 8;
-
- /// Unidirectional information about the current sequence.
- RRInfo RRI;
-
- public:
- PtrState() : KnownPositiveRefCount(false), Partial(false),
- Seq(S_None) {}
-
-
- bool IsKnownSafe() const {
- return RRI.KnownSafe;
- }
-
- void SetKnownSafe(const bool NewValue) {
- RRI.KnownSafe = NewValue;
- }
-
- bool IsTailCallRelease() const {
- return RRI.IsTailCallRelease;
- }
-
- void SetTailCallRelease(const bool NewValue) {
- RRI.IsTailCallRelease = NewValue;
- }
-
- bool IsTrackingImpreciseReleases() const {
- return RRI.ReleaseMetadata != nullptr;
- }
-
- const MDNode *GetReleaseMetadata() const {
- return RRI.ReleaseMetadata;
- }
-
- void SetReleaseMetadata(MDNode *NewValue) {
- RRI.ReleaseMetadata = NewValue;
- }
-
- bool IsCFGHazardAfflicted() const {
- return RRI.CFGHazardAfflicted;
- }
-
- void SetCFGHazardAfflicted(const bool NewValue) {
- RRI.CFGHazardAfflicted = NewValue;
- }
-
- void SetKnownPositiveRefCount() {
- DEBUG(dbgs() << "Setting Known Positive.\n");
- KnownPositiveRefCount = true;
- }
-
- void ClearKnownPositiveRefCount() {
- DEBUG(dbgs() << "Clearing Known Positive.\n");
- KnownPositiveRefCount = false;
- }
-
- bool HasKnownPositiveRefCount() const {
- return KnownPositiveRefCount;
- }
-
- void SetSeq(Sequence NewSeq) {
- DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
- Seq = NewSeq;
- }
-
- Sequence GetSeq() const {
- return static_cast<Sequence>(Seq);
- }
-
- void ClearSequenceProgress() {
- ResetSequenceProgress(S_None);
- }
-
- void ResetSequenceProgress(Sequence NewSeq) {
- DEBUG(dbgs() << "Resetting sequence progress.\n");
- SetSeq(NewSeq);
- Partial = false;
- RRI.clear();
- }
-
- void Merge(const PtrState &Other, bool TopDown);
-
- void InsertCall(Instruction *I) {
- RRI.Calls.insert(I);
- }
-
- void InsertReverseInsertPt(Instruction *I) {
- RRI.ReverseInsertPts.insert(I);
- }
-
- void ClearReverseInsertPts() {
- RRI.ReverseInsertPts.clear();
- }
-
- bool HasReverseInsertPts() const {
- return !RRI.ReverseInsertPts.empty();
- }
-
- const RRInfo &GetRRInfo() const {
- return RRI;
- }
- };
-}
-
-void
-PtrState::Merge(const PtrState &Other, bool TopDown) {
- Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
- KnownPositiveRefCount &= Other.KnownPositiveRefCount;
-
- // If we're not in a sequence (anymore), drop all associated state.
- if (Seq == S_None) {
- Partial = false;
- RRI.clear();
- } else if (Partial || Other.Partial) {
- // If we're doing a merge on a path that's previously seen a partial
- // merge, conservatively drop the sequence, to avoid doing partial
- // RR elimination. If the branch predicates for the two merge differ,
- // mixing them is unsafe.
- ClearSequenceProgress();
- } else {
- // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
- // point, we know that currently we are not partial. Stash whether or not
- // the merge operation caused us to undergo a partial merging of reverse
- // insertion points.
- Partial = RRI.Merge(Other.RRI);
- }
-}
-
-namespace {
/// \brief Per-BasicBlock state.
class BBState {
/// The number of unique control paths from the entry which can reach this
@@ -566,20 +186,18 @@ namespace {
/// The number of unique control paths to exits from this block.
unsigned BottomUpPathCount;
- /// A type for PerPtrTopDown and PerPtrBottomUp.
- typedef MapVector<const Value *, PtrState> MapTy;
-
/// The top-down traversal uses this to record information known about a
/// pointer at the bottom of each block.
- MapTy PerPtrTopDown;
+ BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
/// The bottom-up traversal uses this to record information known about a
/// pointer at the top of each block.
- MapTy PerPtrBottomUp;
+ BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
/// Effective predecessors of the current block ignoring ignorable edges and
/// ignored backedges.
SmallVector<BasicBlock *, 2> Preds;
+
/// Effective successors of the current block ignoring ignorable edges and
/// ignored backedges.
SmallVector<BasicBlock *, 2> Succs;
@@ -589,26 +207,38 @@ namespace {
BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
- typedef MapTy::iterator ptr_iterator;
- typedef MapTy::const_iterator ptr_const_iterator;
+ typedef decltype(PerPtrTopDown)::iterator top_down_ptr_iterator;
+ typedef decltype(PerPtrTopDown)::const_iterator const_top_down_ptr_iterator;
- ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
- ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
- ptr_const_iterator top_down_ptr_begin() const {
+ top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
+ top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
+ const_top_down_ptr_iterator top_down_ptr_begin() const {
return PerPtrTopDown.begin();
}
- ptr_const_iterator top_down_ptr_end() const {
+ const_top_down_ptr_iterator top_down_ptr_end() const {
return PerPtrTopDown.end();
}
+ bool hasTopDownPtrs() const {
+ return !PerPtrTopDown.empty();
+ }
+
+ typedef decltype(PerPtrBottomUp)::iterator bottom_up_ptr_iterator;
+ typedef decltype(
+ PerPtrBottomUp)::const_iterator const_bottom_up_ptr_iterator;
- ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
- ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
- ptr_const_iterator bottom_up_ptr_begin() const {
+ bottom_up_ptr_iterator bottom_up_ptr_begin() {
return PerPtrBottomUp.begin();
}
- ptr_const_iterator bottom_up_ptr_end() const {
+ bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
+ const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
+ return PerPtrBottomUp.begin();
+ }
+ const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
return PerPtrBottomUp.end();
}
+ bool hasBottomUpPtrs() const {
+ return !PerPtrBottomUp.empty();
+ }
/// Mark this block as being an entry block, which has one path from the
/// entry by definition.
@@ -621,20 +251,20 @@ namespace {
/// Attempt to find the PtrState object describing the top down state for
/// pointer Arg. Return a new initialized PtrState describing the top down
/// state for Arg if we do not find one.
- PtrState &getPtrTopDownState(const Value *Arg) {
+ TopDownPtrState &getPtrTopDownState(const Value *Arg) {
return PerPtrTopDown[Arg];
}
/// Attempt to find the PtrState object describing the bottom up state for
/// pointer Arg. Return a new initialized PtrState describing the bottom up
/// state for Arg if we do not find one.
- PtrState &getPtrBottomUpState(const Value *Arg) {
+ BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
return PerPtrBottomUp[Arg];
}
/// Attempt to find the PtrState object describing the bottom up state for
/// pointer Arg.
- ptr_iterator findPtrBottomUpState(const Value *Arg) {
+ bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
return PerPtrBottomUp.find(Arg);
}
@@ -685,6 +315,11 @@ namespace {
const unsigned BBState::OverflowOccurredValue = 0xffffffff;
}
+namespace llvm {
+raw_ostream &operator<<(raw_ostream &OS,
+ BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
+}
+
void BBState::InitFromPred(const BBState &Other) {
PerPtrTopDown = Other.PerPtrTopDown;
TopDownPathCount = Other.TopDownPathCount;
@@ -724,19 +359,18 @@ void BBState::MergePred(const BBState &Other) {
// For each entry in the other set, if our set has an entry with the same key,
// merge the entries. Otherwise, copy the entry and merge it with an empty
// entry.
- for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
- ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
- std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
- Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
+ for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
+ MI != ME; ++MI) {
+ auto Pair = PerPtrTopDown.insert(*MI);
+ Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
/*TopDown=*/true);
}
// For each entry in our set, if the other set doesn't have an entry with the
// same key, force it to merge with an empty entry.
- for (ptr_iterator MI = top_down_ptr_begin(),
- ME = top_down_ptr_end(); MI != ME; ++MI)
+ for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
- MI->second.Merge(PtrState(), /*TopDown=*/true);
+ MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
}
/// The bottom-up traversal uses this to merge information about successors to
@@ -768,304 +402,80 @@ void BBState::MergeSucc(const BBState &Other) {
// For each entry in the other set, if our set has an entry with the
// same key, merge the entries. Otherwise, copy the entry and merge
// it with an empty entry.
- for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
- ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
- std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
- Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
+ for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
+ MI != ME; ++MI) {
+ auto Pair = PerPtrBottomUp.insert(*MI);
+ Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
/*TopDown=*/false);
}
// For each entry in our set, if the other set doesn't have an entry
// with the same key, force it to merge with an empty entry.
- for (ptr_iterator MI = bottom_up_ptr_begin(),
- ME = bottom_up_ptr_end(); MI != ME; ++MI)
+ for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
+ ++MI)
if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
- MI->second.Merge(PtrState(), /*TopDown=*/false);
+ MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
}
-// Only enable ARC Annotations if we are building a debug version of
-// libObjCARCOpts.
-#ifndef NDEBUG
-#define ARC_ANNOTATIONS
-#endif
-
-// Define some macros along the lines of DEBUG and some helper functions to make
-// it cleaner to create annotations in the source code and to no-op when not
-// building in debug mode.
-#ifdef ARC_ANNOTATIONS
-
-#include "llvm/Support/CommandLine.h"
-
-/// Enable/disable ARC sequence annotations.
-static cl::opt<bool>
-EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
- cl::desc("Enable emission of arc data flow analysis "
- "annotations"));
-static cl::opt<bool>
-DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
- cl::desc("Disable check for cfg hazards when "
- "annotating"));
-static cl::opt<std::string>
-ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
- cl::init(""),
- cl::desc("filter out all data flow annotations "
- "but those that apply to the given "
- "target llvm identifier."));
-
-/// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
-/// instruction so that we can track backwards when post processing via the llvm
-/// arc annotation processor tool. If the function is an
-static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
- Value *Ptr) {
- MDString *Hash = nullptr;
-
- // If pointer is a result of an instruction and it does not have a source
- // MDNode it, attach a new MDNode onto it. If pointer is a result of
- // an instruction and does have a source MDNode attached to it, return a
- // reference to said Node. Otherwise just return 0.
- if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
- MDNode *Node;
- if (!(Node = Inst->getMetadata(NodeId))) {
- // We do not have any node. Generate and attatch the hash MDString to the
- // instruction.
-
- // We just use an MDString to ensure that this metadata gets written out
- // of line at the module level and to provide a very simple format
- // encoding the information herein. Both of these makes it simpler to
- // parse the annotations by a simple external program.
- std::string Str;
- raw_string_ostream os(Str);
- os << "(" << Inst->getParent()->getParent()->getName() << ",%"
- << Inst->getName() << ")";
-
- Hash = MDString::get(Inst->getContext(), os.str());
- Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
- } else {
- // We have a node. Grab its hash and return it.
- assert(Node->getNumOperands() == 1 &&
- "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
- Hash = cast<MDString>(Node->getOperand(0));
+raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
+ // Dump the pointers we are tracking.
+ OS << " TopDown State:\n";
+ if (!BBInfo.hasTopDownPtrs()) {
+ DEBUG(llvm::dbgs() << " NONE!\n");
+ } else {
+ for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
+ I != E; ++I) {
+ const PtrState &P = I->second;
+ OS << " Ptr: " << *I->first
+ << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
+ << "\n ImpreciseRelease: "
+ << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
+ << " HasCFGHazards: "
+ << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
+ << " KnownPositive: "
+ << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
+ << " Seq: "
+ << P.GetSeq() << "\n";
}
- } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
- std::string str;
- raw_string_ostream os(str);
- os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
- << ")";
- Hash = MDString::get(Arg->getContext(), os.str());
- }
-
- return Hash;
-}
-
-static std::string SequenceToString(Sequence A) {
- std::string str;
- raw_string_ostream os(str);
- os << A;
- return os.str();
-}
-
-/// Helper function to change a Sequence into a String object using our overload
-/// for raw_ostream so we only have printing code in one location.
-static MDString *SequenceToMDString(LLVMContext &Context,
- Sequence A) {
- return MDString::get(Context, SequenceToString(A));
-}
-
-/// A simple function to generate a MDNode which describes the change in state
-/// for Value *Ptr caused by Instruction *Inst.
-static void AppendMDNodeToInstForPtr(unsigned NodeId,
- Instruction *Inst,
- Value *Ptr,
- MDString *PtrSourceMDNodeID,
- Sequence OldSeq,
- Sequence NewSeq) {
- MDNode *Node = nullptr;
- Metadata *tmp[3] = {PtrSourceMDNodeID,
- SequenceToMDString(Inst->getContext(), OldSeq),
- SequenceToMDString(Inst->getContext(), NewSeq)};
- Node = MDNode::get(Inst->getContext(), tmp);
-
- Inst->setMetadata(NodeId, Node);
-}
-
-/// Add to the beginning of the basic block llvm.ptr.annotations which show the
-/// state of a pointer at the entrance to a basic block.
-static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
- Value *Ptr, Sequence Seq) {
- // If we have a target identifier, make sure that we match it before
- // continuing.
- if(!ARCAnnotationTargetIdentifier.empty() &&
- !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
- return;
-
- Module *M = BB->getParent()->getParent();
- LLVMContext &C = M->getContext();
- Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- Type *I8XX = PointerType::getUnqual(I8X);
- Type *Params[] = {I8XX, I8XX};
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
- /*isVarArg=*/false);
- Constant *Callee = M->getOrInsertFunction(Name, FTy);
-
- IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
-
- Value *PtrName;
- StringRef Tmp = Ptr->getName();
- if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
- Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
- Tmp + "_STR");
- PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
- cast<Constant>(ActualPtrName), Tmp);
- }
-
- Value *S;
- std::string SeqStr = SequenceToString(Seq);
- if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
- Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
- SeqStr + "_STR");
- S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
- cast<Constant>(ActualPtrName), SeqStr);
- }
-
- Builder.CreateCall2(Callee, PtrName, S);
-}
-
-/// Add to the end of the basic block llvm.ptr.annotations which show the state
-/// of the pointer at the bottom of the basic block.
-static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
- Value *Ptr, Sequence Seq) {
- // If we have a target identifier, make sure that we match it before emitting
- // an annotation.
- if(!ARCAnnotationTargetIdentifier.empty() &&
- !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
- return;
-
- Module *M = BB->getParent()->getParent();
- LLVMContext &C = M->getContext();
- Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- Type *I8XX = PointerType::getUnqual(I8X);
- Type *Params[] = {I8XX, I8XX};
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
- /*isVarArg=*/false);
- Constant *Callee = M->getOrInsertFunction(Name, FTy);
-
- IRBuilder<> Builder(BB, std::prev(BB->end()));
-
- Value *PtrName;
- StringRef Tmp = Ptr->getName();
- if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
- Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
- Tmp + "_STR");
- PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
- cast<Constant>(ActualPtrName), Tmp);
}
- Value *S;
- std::string SeqStr = SequenceToString(Seq);
- if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
- Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
- SeqStr + "_STR");
- S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
- cast<Constant>(ActualPtrName), SeqStr);
+ OS << " BottomUp State:\n";
+ if (!BBInfo.hasBottomUpPtrs()) {
+ DEBUG(llvm::dbgs() << " NONE!\n");
+ } else {
+ for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
+ I != E; ++I) {
+ const PtrState &P = I->second;
+ OS << " Ptr: " << *I->first
+ << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
+ << "\n ImpreciseRelease: "
+ << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
+ << " HasCFGHazards: "
+ << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
+ << " KnownPositive: "
+ << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
+ << " Seq: "
+ << P.GetSeq() << "\n";
+ }
}
- Builder.CreateCall2(Callee, PtrName, S);
-}
-/// Adds a source annotation to pointer and a state change annotation to Inst
-/// referencing the source annotation and the old/new state of pointer.
-static void GenerateARCAnnotation(unsigned InstMDId,
- unsigned PtrMDId,
- Instruction *Inst,
- Value *Ptr,
- Sequence OldSeq,
- Sequence NewSeq) {
- if (EnableARCAnnotations) {
- // If we have a target identifier, make sure that we match it before
- // emitting an annotation.
- if(!ARCAnnotationTargetIdentifier.empty() &&
- !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
- return;
-
- // First generate the source annotation on our pointer. This will return an
- // MDString* if Ptr actually comes from an instruction implying we can put
- // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
- // then we know that our pointer is from an Argument so we put a reference
- // to the argument number.
- //
- // The point of this is to make it easy for the
- // llvm-arc-annotation-processor tool to cross reference where the source
- // pointer is in the LLVM IR since the LLVM IR parser does not submit such
- // information via debug info for backends to use (since why would anyone
- // need such a thing from LLVM IR besides in non-standard cases
- // [i.e. this]).
- MDString *SourcePtrMDNode =
- AppendMDNodeToSourcePtr(PtrMDId, Ptr);
- AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
- NewSeq);
- }
+ return OS;
}
-// The actual interface for accessing the above functionality is defined via
-// some simple macros which are defined below. We do this so that the user does
-// not need to pass in what metadata id is needed resulting in cleaner code and
-// additionally since it provides an easy way to conditionally no-op all
-// annotation support in a non-debug build.
-
-/// Use this macro to annotate a sequence state change when processing
-/// instructions bottom up,
-#define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
- GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
- ARCAnnotationProvenanceSourceMDKind, (inst), \
- const_cast<Value*>(ptr), (old), (new))
-/// Use this macro to annotate a sequence state change when processing
-/// instructions top down.
-#define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
- GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
- ARCAnnotationProvenanceSourceMDKind, (inst), \
- const_cast<Value*>(ptr), (old), (new))
-
-#define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
- do { \
- if (EnableARCAnnotations) { \
- for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
- E = (_states)._direction##_ptr_end(); I != E; ++I) { \
- Value *Ptr = const_cast<Value*>(I->first); \
- Sequence Seq = I->second.GetSeq(); \
- GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
- } \
- } \
- } while (0)
-
-#define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
- ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
- Entrance, bottom_up)
-#define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
- ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
- Terminator, bottom_up)
-#define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
- ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
- Entrance, top_down)
-#define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
- ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
- Terminator, top_down)
-
-#else // !ARC_ANNOTATION
-// If annotations are off, noop.
-#define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
-#define ANNOTATE_TOPDOWN(inst, ptr, old, new)
-#define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
-#define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
-#define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
-#define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
-#endif // !ARC_ANNOTATION
-
namespace {
+
/// \brief The main ARC optimization pass.
class ObjCARCOpt : public FunctionPass {
bool Changed;
ProvenanceAnalysis PA;
+
+ /// A cache of references to runtime entry point constants.
ARCRuntimeEntryPoints EP;
+ /// A cache of MDKinds that can be passed into other functions to propagate
+ /// MDKind identifiers.
+ ARCMDKindCache MDKindCache;
+
// This is used to track if a pointer is stored into an alloca.
DenseSet<const Value *> MultiOwnersSet;
@@ -1076,24 +486,6 @@ namespace {
/// is in fact used in the current function.
unsigned UsedInThisFunction;
- /// The Metadata Kind for clang.imprecise_release metadata.
- unsigned ImpreciseReleaseMDKind;
-
- /// The Metadata Kind for clang.arc.copy_on_escape metadata.
- unsigned CopyOnEscapeMDKind;
-
- /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
- unsigned NoObjCARCExceptionsMDKind;
-
-#ifdef ARC_ANNOTATIONS
- /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
- unsigned ARCAnnotationBottomUpMDKind;
- /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
- unsigned ARCAnnotationTopDownMDKind;
- /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
- unsigned ARCAnnotationProvenanceSourceMDKind;
-#endif // ARC_ANNOATIONS
-
bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
ARCInstKind &Class);
@@ -1102,47 +494,41 @@ namespace {
void CheckForCFGHazards(const BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
BBState &MyStates) const;
- bool VisitInstructionBottomUp(Instruction *Inst,
- BasicBlock *BB,
- MapVector<Value *, RRInfo> &Retains,
+ bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
+ BlotMapVector<Value *, RRInfo> &Retains,
BBState &MyStates);
bool VisitBottomUp(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains);
+ BlotMapVector<Value *, RRInfo> &Retains);
bool VisitInstructionTopDown(Instruction *Inst,
DenseMap<Value *, RRInfo> &Releases,
BBState &MyStates);
bool VisitTopDown(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
DenseMap<Value *, RRInfo> &Releases);
- bool Visit(Function &F,
- DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains,
+ bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases);
void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
- MapVector<Value *, RRInfo> &Retains,
+ BlotMapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
- SmallVectorImpl<Instruction *> &DeadInsts,
- Module *M);
-
- bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains,
- DenseMap<Value *, RRInfo> &Releases,
- Module *M,
- SmallVectorImpl<Instruction *> &NewRetains,
- SmallVectorImpl<Instruction *> &NewReleases,
- SmallVectorImpl<Instruction *> &DeadInsts,
- RRInfo &RetainsToMove,
- RRInfo &ReleasesToMove,
- Value *Arg,
- bool KnownSafe,
- bool &AnyPairsCompletelyEliminated);
+ SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
+
+ bool
+ PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
+ RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
+ Value *Arg, bool KnownSafe,
+ bool &AnyPairsCompletelyEliminated);
bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains,
- DenseMap<Value *, RRInfo> &Releases,
- Module *M);
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M);
void OptimizeWeakCalls(Function &F);
@@ -1238,7 +624,7 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
"objc_retain since the operand is not a return value.\n"
"Old = " << *RetainRV << "\n");
- Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
DEBUG(dbgs() << "New = " << *RetainRV << "\n");
@@ -1274,7 +660,7 @@ void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
"Old = " << *AutoreleaseRV << "\n");
CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
- Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
AutoreleaseRVCI->setCalledFunction(NewDecl);
AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
Class = ARCInstKind::Autorelease;
@@ -1380,10 +766,11 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
// Create the declaration lazily.
LLVMContext &C = Inst->getContext();
- Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
Call);
- NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
+ NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
+ MDNode::get(C, None));
DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
"since x is otherwise unused.\nOld: " << *Call << "\nNew: "
@@ -1547,7 +934,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
/// no CFG hazards by checking the states of various bottom up pointers.
static void CheckForUseCFGHazard(const Sequence SuccSSeq,
const bool SuccSRRIKnownSafe,
- PtrState &S,
+ TopDownPtrState &S,
bool &SomeSuccHasSame,
bool &AllSuccsHaveSame,
bool &NotAllSeqEqualButKnownSafe,
@@ -1585,7 +972,7 @@ static void CheckForUseCFGHazard(const Sequence SuccSSeq,
/// pointers.
static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
const bool SuccSRRIKnownSafe,
- PtrState &S,
+ TopDownPtrState &S,
bool &SomeSuccHasSame,
bool &AllSuccsHaveSame,
bool &NotAllSeqEqualButKnownSafe) {
@@ -1618,9 +1005,9 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
BBState &MyStates) const {
// If any top-down local-use or possible-dec has a succ which is earlier in
// the sequence, forget it.
- for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
- E = MyStates.top_down_ptr_end(); I != E; ++I) {
- PtrState &S = I->second;
+ for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
+ I != E; ++I) {
+ TopDownPtrState &S = I->second;
const Sequence Seq = I->second.GetSeq();
// We only care about S_Retain, S_CanRelease, and S_Use.
@@ -1646,7 +1033,7 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
const DenseMap<const BasicBlock *, BBState>::iterator BBI =
BBStates.find(*SI);
assert(BBI != BBStates.end());
- const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
const Sequence SuccSSeq = SuccS.GetSeq();
// If bottom up, the pointer is in an S_None state, clear the sequence
@@ -1705,44 +1092,21 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
}
}
-bool
-ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
- BasicBlock *BB,
- MapVector<Value *, RRInfo> &Retains,
- BBState &MyStates) {
+bool ObjCARCOpt::VisitInstructionBottomUp(
+ Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
+ BBState &MyStates) {
bool NestingDetected = false;
ARCInstKind Class = GetARCInstKind(Inst);
const Value *Arg = nullptr;
- DEBUG(dbgs() << "Class: " << Class << "\n");
+ DEBUG(dbgs() << " Class: " << Class << "\n");
switch (Class) {
case ARCInstKind::Release: {
Arg = GetArgRCIdentityRoot(Inst);
- PtrState &S = MyStates.getPtrBottomUpState(Arg);
-
- // If we see two releases in a row on the same pointer. If so, make
- // a note, and we'll cicle back to revisit it after we've
- // hopefully eliminated the second release, which may allow us to
- // eliminate the first release too.
- // Theoretically we could implement removal of nested retain+release
- // pairs by making PtrState hold a stack of states, but this is
- // simple and avoids adding overhead for the non-nested case.
- if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
- DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
- NestingDetected = true;
- }
-
- MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
- Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
- ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
- S.ResetSequenceProgress(NewSeq);
- S.SetReleaseMetadata(ReleaseMetadata);
- S.SetKnownSafe(S.HasKnownPositiveRefCount());
- S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
- S.InsertCall(Inst);
- S.SetKnownPositiveRefCount();
+ BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
+ NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
break;
}
case ARCInstKind::RetainBlock:
@@ -1753,35 +1117,16 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
case ARCInstKind::Retain:
case ARCInstKind::RetainRV: {
Arg = GetArgRCIdentityRoot(Inst);
-
- PtrState &S = MyStates.getPtrBottomUpState(Arg);
- S.SetKnownPositiveRefCount();
-
- Sequence OldSeq = S.GetSeq();
- switch (OldSeq) {
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- case S_Use:
- // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
- // imprecise release, clear our reverse insertion points.
- if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
- S.ClearReverseInsertPts();
- // FALL THROUGH
- case S_CanRelease:
- // Don't do retain+release tracking for ARCInstKind::RetainRV,
- // because it's
- // better to let it remain as the first instruction after a call.
- if (Class != ARCInstKind::RetainRV)
+ BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
+ if (S.MatchWithRetain()) {
+ // Don't do retain+release tracking for ARCInstKind::RetainRV, because
+ // it's better to let it remain as the first instruction after a call.
+ if (Class != ARCInstKind::RetainRV) {
+ DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
Retains[Inst] = S.GetRRInfo();
+ }
S.ClearSequenceProgress();
- break;
- case S_None:
- break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
}
- ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
// A retain moving bottom up can be a use.
break;
}
@@ -1807,9 +1152,10 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
// in the presence of allocas we only unconditionally remove pointers if
// both our retain and our release are KnownSafe.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
- BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
- GetRCIdentityRoot(SI->getValueOperand()));
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand(), DL)) {
+ auto I = MyStates.findPtrBottomUpState(
+ GetRCIdentityRoot(SI->getValueOperand()));
if (I != MyStates.bottom_up_ptr_end())
MultiOwnersSet.insert(I->first);
}
@@ -1821,90 +1167,26 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
// Consider any other possible effects of this instruction on each
// pointer being tracked.
- for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
- ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
+ for (auto MI = MyStates.bottom_up_ptr_begin(),
+ ME = MyStates.bottom_up_ptr_end();
+ MI != ME; ++MI) {
const Value *Ptr = MI->first;
if (Ptr == Arg)
continue; // Handled above.
- PtrState &S = MI->second;
- Sequence Seq = S.GetSeq();
+ BottomUpPtrState &S = MI->second;
- // Check for possible releases.
- if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
- << "\n");
- S.ClearKnownPositiveRefCount();
- switch (Seq) {
- case S_Use:
- S.SetSeq(S_CanRelease);
- ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
- continue;
- case S_CanRelease:
- case S_Release:
- case S_MovableRelease:
- case S_Stop:
- case S_None:
- break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
- }
- }
+ if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
+ continue;
- // Check for possible direct uses.
- switch (Seq) {
- case S_Release:
- case S_MovableRelease:
- if (CanUse(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
- << "\n");
- assert(!S.HasReverseInsertPts());
- // If this is an invoke instruction, we're scanning it as part of
- // one of its successor blocks, since we can't insert code after it
- // in its own block, and we don't want to split critical edges.
- if (isa<InvokeInst>(Inst))
- S.InsertReverseInsertPt(BB->getFirstInsertionPt());
- else
- S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
- S.SetSeq(S_Use);
- ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
- } else if (Seq == S_Release && IsUser(Class)) {
- DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
- << "\n");
- // Non-movable releases depend on any possible objc pointer use.
- S.SetSeq(S_Stop);
- ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
- assert(!S.HasReverseInsertPts());
- // As above; handle invoke specially.
- if (isa<InvokeInst>(Inst))
- S.InsertReverseInsertPt(BB->getFirstInsertionPt());
- else
- S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
- }
- break;
- case S_Stop:
- if (CanUse(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
- << "\n");
- S.SetSeq(S_Use);
- ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
- }
- break;
- case S_CanRelease:
- case S_Use:
- case S_None:
- break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
- }
+ S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
}
return NestingDetected;
}
-bool
-ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
- DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains) {
+bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains) {
DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
@@ -1929,9 +1211,8 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
}
}
- // If ARC Annotations are enabled, output the current state of pointers at the
- // bottom of the basic block.
- ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
+ DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
// Visit all the instructions, bottom-up.
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
@@ -1941,7 +1222,7 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
if (isa<InvokeInst>(Inst))
continue;
- DEBUG(dbgs() << "Visiting " << *Inst << "\n");
+ DEBUG(dbgs() << " Visiting " << *Inst << "\n");
NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
}
@@ -1956,9 +1237,7 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
}
- // If ARC Annotations are enabled, output the current state of pointers at the
- // top of the basic block.
- ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
+ DEBUG(llvm::dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
return NestingDetected;
}
@@ -1971,144 +1250,63 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
ARCInstKind Class = GetARCInstKind(Inst);
const Value *Arg = nullptr;
+ DEBUG(llvm::dbgs() << " Class: " << Class << "\n");
+
switch (Class) {
case ARCInstKind::RetainBlock:
// In OptimizeIndividualCalls, we have strength reduced all optimizable
// objc_retainBlocks to objc_retains. Thus at this point any
- // objc_retainBlocks that we see are not optimizable.
+ // objc_retainBlocks that we see are not optimizable. We need to break since
+ // a retain can be a potential use.
break;
case ARCInstKind::Retain:
case ARCInstKind::RetainRV: {
Arg = GetArgRCIdentityRoot(Inst);
-
- PtrState &S = MyStates.getPtrTopDownState(Arg);
-
- // Don't do retain+release tracking for ARCInstKind::RetainRV, because
- // it's
- // better to let it remain as the first instruction after a call.
- if (Class != ARCInstKind::RetainRV) {
- // If we see two retains in a row on the same pointer. If so, make
- // a note, and we'll cicle back to revisit it after we've
- // hopefully eliminated the second retain, which may allow us to
- // eliminate the first retain too.
- // Theoretically we could implement removal of nested retain+release
- // pairs by making PtrState hold a stack of states, but this is
- // simple and avoids adding overhead for the non-nested case.
- if (S.GetSeq() == S_Retain)
- NestingDetected = true;
-
- ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
- S.ResetSequenceProgress(S_Retain);
- S.SetKnownSafe(S.HasKnownPositiveRefCount());
- S.InsertCall(Inst);
- }
-
- S.SetKnownPositiveRefCount();
-
+ TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
+ NestingDetected |= S.InitTopDown(Class, Inst);
// A retain can be a potential use; procede to the generic checking
// code below.
break;
}
case ARCInstKind::Release: {
Arg = GetArgRCIdentityRoot(Inst);
-
- PtrState &S = MyStates.getPtrTopDownState(Arg);
- S.ClearKnownPositiveRefCount();
-
- Sequence OldSeq = S.GetSeq();
-
- MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
-
- switch (OldSeq) {
- case S_Retain:
- case S_CanRelease:
- if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
- S.ClearReverseInsertPts();
- // FALL THROUGH
- case S_Use:
- S.SetReleaseMetadata(ReleaseMetadata);
- S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
+ TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
+ // Try to form a tentative pair in between this release instruction and the
+ // top down pointers that we are tracking.
+ if (S.MatchWithRelease(MDKindCache, Inst)) {
+ // If we succeed, copy S's RRInfo into the Release -> {Retain Set
+ // Map}. Then we clear S.
+ DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
Releases[Inst] = S.GetRRInfo();
- ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
S.ClearSequenceProgress();
- break;
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
}
break;
}
case ARCInstKind::AutoreleasepoolPop:
// Conservatively, clear MyStates for all known pointers.
MyStates.clearTopDownPointers();
- return NestingDetected;
+ return false;
case ARCInstKind::AutoreleasepoolPush:
case ARCInstKind::None:
- // These are irrelevant.
- return NestingDetected;
+ // These can not be uses of
+ return false;
default:
break;
}
// Consider any other possible effects of this instruction on each
// pointer being tracked.
- for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
- ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
+ for (auto MI = MyStates.top_down_ptr_begin(),
+ ME = MyStates.top_down_ptr_end();
+ MI != ME; ++MI) {
const Value *Ptr = MI->first;
if (Ptr == Arg)
continue; // Handled above.
- PtrState &S = MI->second;
- Sequence Seq = S.GetSeq();
-
- // Check for possible releases.
- if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
- << "\n");
- S.ClearKnownPositiveRefCount();
- switch (Seq) {
- case S_Retain:
- S.SetSeq(S_CanRelease);
- ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
- assert(!S.HasReverseInsertPts());
- S.InsertReverseInsertPt(Inst);
-
- // One call can't cause a transition from S_Retain to S_CanRelease
- // and S_CanRelease to S_Use. If we've made the first transition,
- // we're done.
- continue;
- case S_Use:
- case S_CanRelease:
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
- }
- }
+ TopDownPtrState &S = MI->second;
+ if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
+ continue;
- // Check for possible direct uses.
- switch (Seq) {
- case S_CanRelease:
- if (CanUse(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
- << "\n");
- S.SetSeq(S_Use);
- ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
- }
- break;
- case S_Retain:
- case S_Use:
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
- }
+ S.HandlePotentialUse(Inst, Ptr, PA, Class);
}
return NestingDetected;
@@ -2140,27 +1338,22 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
}
}
- // If ARC Annotations are enabled, output the current state of pointers at the
- // top of the basic block.
- ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
+ DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
// Visit all the instructions, top-down.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
Instruction *Inst = I;
- DEBUG(dbgs() << "Visiting " << *Inst << "\n");
+ DEBUG(dbgs() << " Visiting " << *Inst << "\n");
NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
}
- // If ARC Annotations are enabled, output the current state of pointers at the
- // bottom of the basic block.
- ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
-
-#ifdef ARC_ANNOTATIONS
- if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
-#endif
+ DEBUG(llvm::dbgs() << "\nState Before Checking for CFG Hazards:\n"
+ << BBStates[BB] << "\n\n");
CheckForCFGHazards(BB, BBStates, MyStates);
+ DEBUG(llvm::dbgs() << "Final State:\n" << BBStates[BB] << "\n");
return NestingDetected;
}
@@ -2246,11 +1439,10 @@ ComputePostOrders(Function &F,
}
// Visit the function both top-down and bottom-up.
-bool
-ObjCARCOpt::Visit(Function &F,
- DenseMap<const BasicBlock *, BBState> &BBStates,
- MapVector<Value *, RRInfo> &Retains,
- DenseMap<Value *, RRInfo> &Releases) {
+bool ObjCARCOpt::Visit(Function &F,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases) {
// Use reverse-postorder traversals, because we magically know that loops
// will be well behaved, i.e. they won't repeatedly call retain on a single
@@ -2260,7 +1452,7 @@ ObjCARCOpt::Visit(Function &F,
SmallVector<BasicBlock *, 16> PostOrder;
SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
- NoObjCARCExceptionsMDKind,
+ MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
BBStates);
// Use reverse-postorder on the reverse CFG for bottom-up.
@@ -2281,10 +1473,9 @@ ObjCARCOpt::Visit(Function &F,
}
/// Move the calls in RetainsToMove and ReleasesToMove.
-void ObjCARCOpt::MoveCalls(Value *Arg,
- RRInfo &RetainsToMove,
+void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
RRInfo &ReleasesToMove,
- MapVector<Value *, RRInfo> &Retains,
+ BlotMapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
SmallVectorImpl<Instruction *> &DeadInsts,
Module *M) {
@@ -2297,7 +1488,7 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
Value *MyArg = ArgTy == ParamTy ? Arg :
new BitCastInst(Arg, ParamTy, "", InsertPt);
- Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
Call->setDoesNotThrow();
Call->setTailCall();
@@ -2308,11 +1499,11 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
Value *MyArg = ArgTy == ParamTy ? Arg :
new BitCastInst(Arg, ParamTy, "", InsertPt);
- Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
// Attach a clang.imprecise_release metadata tag, if appropriate.
if (MDNode *M = ReleasesToMove.ReleaseMetadata)
- Call->setMetadata(ImpreciseReleaseMDKind, M);
+ Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
Call->setDoesNotThrow();
if (ReleasesToMove.IsTailCallRelease)
Call->setTailCall();
@@ -2335,20 +1526,15 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
}
-bool
-ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
- &BBStates,
- MapVector<Value *, RRInfo> &Retains,
- DenseMap<Value *, RRInfo> &Releases,
- Module *M,
- SmallVectorImpl<Instruction *> &NewRetains,
- SmallVectorImpl<Instruction *> &NewReleases,
- SmallVectorImpl<Instruction *> &DeadInsts,
- RRInfo &RetainsToMove,
- RRInfo &ReleasesToMove,
- Value *Arg,
- bool KnownSafe,
- bool &AnyPairsCompletelyEliminated) {
+bool ObjCARCOpt::PairUpRetainsAndReleases(
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
+ RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
+ bool &AnyPairsCompletelyEliminated) {
// If a pair happens in a region where it is known that the reference count
// is already incremented, we can similarly ignore possible decrements unless
// we are dealing with a retainable object with multiple provenance sources.
@@ -2369,15 +1555,14 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
for (SmallVectorImpl<Instruction *>::const_iterator
NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
Instruction *NewRetain = *NI;
- MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
+ auto It = Retains.find(NewRetain);
assert(It != Retains.end());
const RRInfo &NewRetainRRI = It->second;
KnownSafeTD &= NewRetainRRI.KnownSafe;
MultipleOwners =
MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
- DenseMap<Value *, RRInfo>::const_iterator Jt =
- Releases.find(NewRetainRelease);
+ auto Jt = Releases.find(NewRetainRelease);
if (Jt == Releases.end())
return false;
const RRInfo &NewRetainReleaseRRI = Jt->second;
@@ -2446,15 +1631,13 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
for (SmallVectorImpl<Instruction *>::const_iterator
NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
Instruction *NewRelease = *NI;
- DenseMap<Value *, RRInfo>::const_iterator It =
- Releases.find(NewRelease);
+ auto It = Releases.find(NewRelease);
assert(It != Releases.end());
const RRInfo &NewReleaseRRI = It->second;
KnownSafeBU &= NewReleaseRRI.KnownSafe;
CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
- MapVector<Value *, RRInfo>::const_iterator Jt =
- Retains.find(NewReleaseRetain);
+ auto Jt = Retains.find(NewReleaseRetain);
if (Jt == Retains.end())
return false;
const RRInfo &NewReleaseRetainRRI = Jt->second;
@@ -2506,11 +1689,8 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
if (NewRetains.empty()) break;
}
- // If the pointer is known incremented in 1 direction and we do not have
- // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
- // to be known safe in both directions.
- bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
- ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
+ // We can only remove pointers if we are known safe in both directions.
+ bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
if (UnconditionallySafe) {
RetainsToMove.ReverseInsertPts.clear();
ReleasesToMove.ReverseInsertPts.clear();
@@ -2540,12 +1720,6 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
if (OldDelta != 0)
return false;
-#ifdef ARC_ANNOTATIONS
- // Do not move calls if ARC annotations are requested.
- if (EnableARCAnnotations)
- return false;
-#endif // ARC_ANNOTATIONS
-
Changed = true;
assert(OldCount != 0 && "Unreachable code?");
NumRRs += OldCount - NewCount;
@@ -2558,12 +1732,10 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
/// Identify pairings between the retains and releases, and delete and/or move
/// them.
-bool
-ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
- &BBStates,
- MapVector<Value *, RRInfo> &Retains,
- DenseMap<Value *, RRInfo> &Releases,
- Module *M) {
+bool ObjCARCOpt::PerformCodePlacement(
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M) {
DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
bool AnyPairsCompletelyEliminated = false;
@@ -2574,8 +1746,9 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
SmallVector<Instruction *, 8> DeadInsts;
// Visit each retain.
- for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
- E = Retains.end(); I != E; ++I) {
+ for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
+ E = Retains.end();
+ I != E; ++I) {
Value *V = I->first;
if (!V) continue; // blotted
@@ -2602,11 +1775,10 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
// Connect the dots between the top-down-collected RetainsToMove and
// bottom-up-collected ReleasesToMove to form sets of related calls.
NewRetains.push_back(Retain);
- bool PerformMoveCalls =
- ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
- NewReleases, DeadInsts, RetainsToMove,
- ReleasesToMove, Arg, KnownSafe,
- AnyPairsCompletelyEliminated);
+ bool PerformMoveCalls = PairUpRetainsAndReleases(
+ BBStates, Retains, Releases, M, NewRetains, NewReleases, DeadInsts,
+ RetainsToMove, ReleasesToMove, Arg, KnownSafe,
+ AnyPairsCompletelyEliminated);
if (PerformMoveCalls) {
// Ok, everything checks out and we're all set. Let's move/delete some
@@ -2678,7 +1850,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == ARCInstKind::LoadWeakRetained) {
- Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
CI->setTailCall();
}
@@ -2707,7 +1879,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == ARCInstKind::LoadWeakRetained) {
- Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
CI->setTailCall();
}
@@ -2795,7 +1967,7 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) {
// map stays valid when we get around to rewriting code and calls get
// replaced by arguments.
DenseMap<Value *, RRInfo> Releases;
- MapVector<Value *, RRInfo> Retains;
+ BlotMapVector<Value *, RRInfo> Retains;
// This is used during the traversal of the function to track the
// states for each identified object at each block.
@@ -2828,8 +2000,7 @@ HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
if (DepInsts.size() != 1)
return false;
- CallInst *Call =
- dyn_cast_or_null<CallInst>(*DepInsts.begin());
+ auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
// Check that the pointer is the return value of the call.
if (!Call || Arg != Call)
@@ -2857,8 +2028,7 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
if (DepInsts.size() != 1)
return nullptr;
- CallInst *Retain =
- dyn_cast_or_null<CallInst>(*DepInsts.begin());
+ auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
// Check that we found a retain with the same argument.
if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
@@ -2883,8 +2053,7 @@ FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
if (DepInsts.size() != 1)
return nullptr;
- CallInst *Autorelease =
- dyn_cast_or_null<CallInst>(*DepInsts.begin());
+ auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
if (!Autorelease)
return nullptr;
ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
@@ -2999,28 +2168,13 @@ bool ObjCARCOpt::doInitialization(Module &M) {
if (!Run)
return false;
- // Identify the imprecise release metadata kind.
- ImpreciseReleaseMDKind =
- M.getContext().getMDKindID("clang.imprecise_release");
- CopyOnEscapeMDKind =
- M.getContext().getMDKindID("clang.arc.copy_on_escape");
- NoObjCARCExceptionsMDKind =
- M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
-#ifdef ARC_ANNOTATIONS
- ARCAnnotationBottomUpMDKind =
- M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
- ARCAnnotationTopDownMDKind =
- M.getContext().getMDKindID("llvm.arc.annotation.topdown");
- ARCAnnotationProvenanceSourceMDKind =
- M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
-#endif // ARC_ANNOTATIONS
-
// Intuitively, objc_retain and others are nocapture, however in practice
// they are not, because they return their argument value. And objc_release
// calls finalizers which can have arbitrary side effects.
+ MDKindCache.init(&M);
// Initialize our runtime entry point cache.
- EP.Initialize(&M);
+ EP.init(&M);
return false;
}