summaryrefslogtreecommitdiffstats
path: root/include/utils
diff options
context:
space:
mode:
Diffstat (limited to 'include/utils')
-rw-r--r--include/utils/AndroidThreads.h3
-rw-r--r--include/utils/BasicHashtable.h8
-rw-r--r--include/utils/CallStack.h6
-rw-r--r--include/utils/Condition.h13
-rw-r--r--include/utils/JenkinsHash.h48
-rw-r--r--include/utils/LinearAllocator.h61
-rw-r--r--include/utils/LruCache.h230
-rw-r--r--include/utils/Mutex.h4
-rw-r--r--include/utils/RWLock.h4
-rw-r--r--include/utils/RefBase.h99
-rw-r--r--include/utils/Thread.h3
-rw-r--r--include/utils/Trace.h163
-rw-r--r--include/utils/TypeHelpers.h2
-rw-r--r--include/utils/Vector.h8
-rw-r--r--include/utils/VectorImpl.h1
15 files changed, 452 insertions, 201 deletions
diff --git a/include/utils/AndroidThreads.h b/include/utils/AndroidThreads.h
index f67648f..4eee14d 100644
--- a/include/utils/AndroidThreads.h
+++ b/include/utils/AndroidThreads.h
@@ -56,6 +56,9 @@ extern int androidCreateRawThreadEtc(android_thread_func_t entryFunction,
size_t threadStackSize,
android_thread_id_t *threadId);
+// set the same of the running thread
+extern void androidSetThreadName(const char* name);
+
// Used by the Java Runtime to control how threads are created, so that
// they can be proper and lovely Java threads.
typedef int (*android_create_thread_fn)(android_thread_func_t entryFunction,
diff --git a/include/utils/BasicHashtable.h b/include/utils/BasicHashtable.h
index fdf9738..7a6c96c 100644
--- a/include/utils/BasicHashtable.h
+++ b/include/utils/BasicHashtable.h
@@ -328,6 +328,14 @@ public:
BasicHashtableImpl::rehash(minimumCapacity, loadFactor);
}
+ /* Determines whether there is room to add another entry without rehashing.
+ * When this returns true, a subsequent add() operation is guaranteed to
+ * complete without performing a rehash.
+ */
+ inline bool hasMoreRoom() const {
+ return mCapacity > mFilledBuckets;
+ }
+
protected:
static inline const TEntry& entryFor(const Bucket& bucket) {
return reinterpret_cast<const TEntry&>(bucket.entry);
diff --git a/include/utils/CallStack.h b/include/utils/CallStack.h
index 079e20c..61dc832 100644
--- a/include/utils/CallStack.h
+++ b/include/utils/CallStack.h
@@ -35,6 +35,8 @@ public:
};
CallStack();
+ CallStack(const char* logtag, int32_t ignoreDepth=1,
+ int32_t maxDepth=MAX_DEPTH);
CallStack(const CallStack& rhs);
~CallStack();
@@ -53,8 +55,8 @@ public:
void update(int32_t ignoreDepth=1, int32_t maxDepth=MAX_DEPTH);
- // Dump a stack trace to the log
- void dump(const char* prefix = 0) const;
+ // Dump a stack trace to the log using the supplied logtag
+ void dump(const char* logtag, const char* prefix = 0) const;
// Return a string (possibly very long) containing the complete stack trace
String8 toString(const char* prefix = 0) const;
diff --git a/include/utils/Condition.h b/include/utils/Condition.h
index 8852d53..e63ba7e 100644
--- a/include/utils/Condition.h
+++ b/include/utils/Condition.h
@@ -48,6 +48,11 @@ public:
SHARED = 1
};
+ enum WakeUpType {
+ WAKE_UP_ONE = 0,
+ WAKE_UP_ALL = 1
+ };
+
Condition();
Condition(int type);
~Condition();
@@ -57,6 +62,14 @@ public:
status_t waitRelative(Mutex& mutex, nsecs_t reltime);
// Signal the condition variable, allowing one thread to continue.
void signal();
+ // Signal the condition variable, allowing one or all threads to continue.
+ void signal(WakeUpType type) {
+ if (type == WAKE_UP_ONE) {
+ signal();
+ } else {
+ broadcast();
+ }
+ }
// Signal the condition variable, allowing all threads to continue.
void broadcast();
diff --git a/include/utils/JenkinsHash.h b/include/utils/JenkinsHash.h
new file mode 100644
index 0000000..7da5dbd
--- /dev/null
+++ b/include/utils/JenkinsHash.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Implementation of Jenkins one-at-a-time hash function. These choices are
+ * optimized for code size and portability, rather than raw speed. But speed
+ * should still be quite good.
+ **/
+
+#ifndef ANDROID_JENKINS_HASH_H
+#define ANDROID_JENKINS_HASH_H
+
+#include <utils/TypeHelpers.h>
+
+namespace android {
+
+/* The Jenkins hash of a sequence of 32 bit words A, B, C is:
+ * Whiten(Mix(Mix(Mix(0, A), B), C)) */
+
+inline uint32_t JenkinsHashMix(uint32_t hash, uint32_t data) {
+ hash += data;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ return hash;
+}
+
+hash_t JenkinsHashWhiten(uint32_t hash);
+
+/* Helpful utility functions for hashing data in 32 bit chunks */
+uint32_t JenkinsHashMixBytes(uint32_t hash, const uint8_t* bytes, size_t size);
+
+uint32_t JenkinsHashMixShorts(uint32_t hash, const uint16_t* shorts, size_t size);
+
+}
+
+#endif // ANDROID_JENKINS_HASH_H
diff --git a/include/utils/LinearAllocator.h b/include/utils/LinearAllocator.h
index cd2521d..4772bc8 100644
--- a/include/utils/LinearAllocator.h
+++ b/include/utils/LinearAllocator.h
@@ -30,11 +30,66 @@
namespace android {
+/**
+ * A memory manager that internally allocates multi-kbyte buffers for placing objects in. It avoids
+ * the overhead of malloc when many objects are allocated. It is most useful when creating many
+ * small objects with a similar lifetime, and doesn't add significant overhead for large
+ * allocations.
+ */
class LinearAllocator {
public:
- void* alloc(size_t size) { return 0; }
- void rewindIfLastAlloc(void* ptr, size_t allocSize) {}
- void dumpMemoryStats(const char* prefix = "") {}
+ LinearAllocator();
+ ~LinearAllocator();
+
+ /**
+ * Reserves and returns a region of memory of at least size 'size', aligning as needed.
+ * Typically this is used in an object's overridden new() method or as a replacement for malloc.
+ *
+ * The lifetime of the returned buffers is tied to that of the LinearAllocator. If calling
+ * delete() on an object stored in a buffer is needed, it should be overridden to use
+ * rewindIfLastAlloc()
+ */
+ void* alloc(size_t size);
+
+ /**
+ * Attempt to deallocate the given buffer, with the LinearAllocator attempting to rewind its
+ * state if possible. No destructors are called.
+ */
+ void rewindIfLastAlloc(void* ptr, size_t allocSize);
+
+ /**
+ * Dump memory usage statistics to the log (allocated and wasted space)
+ */
+ void dumpMemoryStats(const char* prefix = "");
+
+ /**
+ * The number of bytes used for buffers allocated in the LinearAllocator (does not count space
+ * wasted)
+ */
+ size_t usedSize() const { return mTotalAllocated - mWastedSpace; }
+
+private:
+ LinearAllocator(const LinearAllocator& other);
+
+ class Page;
+
+ Page* newPage(size_t pageSize);
+ bool fitsInCurrentPage(size_t size);
+ void ensureNext(size_t size);
+ void* start(Page *p);
+ void* end(Page* p);
+
+ size_t mPageSize;
+ size_t mMaxAllocSize;
+ void* mNext;
+ Page* mCurrentPage;
+ Page* mPages;
+
+ // Memory usage tracking
+ size_t mTotalAllocated;
+ size_t mWastedSpace;
+ size_t mPageCount;
+ size_t mDedicatedPageCount;
};
}; // namespace android
diff --git a/include/utils/LruCache.h b/include/utils/LruCache.h
new file mode 100644
index 0000000..302b929
--- /dev/null
+++ b/include/utils/LruCache.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_UTILS_LRU_CACHE_H
+#define ANDROID_UTILS_LRU_CACHE_H
+
+#include <utils/BasicHashtable.h>
+#include <utils/GenerationCache.h>
+#include <utils/UniquePtr.h>
+
+namespace android {
+
+// OnEntryRemoved is defined in GenerationCache.h, but maybe should move here.
+
+template <typename TKey, typename TValue>
+class LruCache {
+public:
+ explicit LruCache(uint32_t maxCapacity);
+
+ enum Capacity {
+ kUnlimitedCapacity,
+ };
+
+ void setOnEntryRemovedListener(OnEntryRemoved<TKey, TValue>* listener);
+ size_t size() const;
+ const TValue& get(const TKey& key);
+ bool put(const TKey& key, const TValue& value);
+ bool remove(const TKey& key);
+ bool removeOldest();
+ void clear();
+
+ class Iterator {
+ public:
+ Iterator(const LruCache<TKey, TValue>& cache): mCache(cache), mIndex(-1) {
+ }
+
+ bool next() {
+ mIndex = mCache.mTable->next(mIndex);
+ return mIndex != -1;
+ }
+
+ size_t index() const {
+ return mIndex;
+ }
+
+ const TValue& value() const {
+ return mCache.mTable->entryAt(mIndex).value;
+ }
+
+ const TKey& key() const {
+ return mCache.mTable->entryAt(mIndex).key;
+ }
+ private:
+ const LruCache<TKey, TValue>& mCache;
+ size_t mIndex;
+ };
+
+private:
+ LruCache(const LruCache& that); // disallow copy constructor
+
+ struct Entry {
+ TKey key;
+ TValue value;
+ Entry* parent;
+ Entry* child;
+
+ Entry(TKey key_, TValue value_) : key(key_), value(value_), parent(NULL), child(NULL) {
+ }
+ const TKey& getKey() const { return key; }
+ };
+
+ void attachToCache(Entry& entry);
+ void detachFromCache(Entry& entry);
+ void rehash(size_t newCapacity);
+
+ UniquePtr<BasicHashtable<TKey, Entry> > mTable;
+ OnEntryRemoved<TKey, TValue>* mListener;
+ Entry* mOldest;
+ Entry* mYoungest;
+ uint32_t mMaxCapacity;
+ TValue mNullValue;
+};
+
+// Implementation is here, because it's fully templated
+template <typename TKey, typename TValue>
+LruCache<TKey, TValue>::LruCache(uint32_t maxCapacity): mMaxCapacity(maxCapacity),
+ mNullValue(NULL), mTable(new BasicHashtable<TKey, Entry>), mYoungest(NULL), mOldest(NULL),
+ mListener(NULL) {
+};
+
+template<typename K, typename V>
+void LruCache<K, V>::setOnEntryRemovedListener(OnEntryRemoved<K, V>* listener) {
+ mListener = listener;
+}
+
+template <typename TKey, typename TValue>
+size_t LruCache<TKey, TValue>::size() const {
+ return mTable->size();
+}
+
+template <typename TKey, typename TValue>
+const TValue& LruCache<TKey, TValue>::get(const TKey& key) {
+ hash_t hash = hash_type(key);
+ ssize_t index = mTable->find(-1, hash, key);
+ if (index == -1) {
+ return mNullValue;
+ }
+ Entry& entry = mTable->editEntryAt(index);
+ detachFromCache(entry);
+ attachToCache(entry);
+ return entry.value;
+}
+
+template <typename TKey, typename TValue>
+bool LruCache<TKey, TValue>::put(const TKey& key, const TValue& value) {
+ if (mMaxCapacity != kUnlimitedCapacity && size() >= mMaxCapacity) {
+ removeOldest();
+ }
+
+ hash_t hash = hash_type(key);
+ ssize_t index = mTable->find(-1, hash, key);
+ if (index >= 0) {
+ return false;
+ }
+ if (!mTable->hasMoreRoom()) {
+ rehash(mTable->capacity() * 2);
+ }
+
+ // Would it be better to initialize a blank entry and assign key, value?
+ Entry initEntry(key, value);
+ index = mTable->add(hash, initEntry);
+ Entry& entry = mTable->editEntryAt(index);
+ attachToCache(entry);
+ return true;
+}
+
+template <typename TKey, typename TValue>
+bool LruCache<TKey, TValue>::remove(const TKey& key) {
+ hash_t hash = hash_type(key);
+ ssize_t index = mTable->find(-1, hash, key);
+ if (index < 0) {
+ return false;
+ }
+ Entry& entry = mTable->editEntryAt(index);
+ if (mListener) {
+ (*mListener)(entry.key, entry.value);
+ }
+ detachFromCache(entry);
+ mTable->removeAt(index);
+ return true;
+}
+
+template <typename TKey, typename TValue>
+bool LruCache<TKey, TValue>::removeOldest() {
+ if (mOldest != NULL) {
+ return remove(mOldest->key);
+ // TODO: should probably abort if false
+ }
+ return false;
+}
+
+template <typename TKey, typename TValue>
+void LruCache<TKey, TValue>::clear() {
+ if (mListener) {
+ for (Entry* p = mOldest; p != NULL; p = p->child) {
+ (*mListener)(p->key, p->value);
+ }
+ }
+ mYoungest = NULL;
+ mOldest = NULL;
+ mTable->clear();
+}
+
+template <typename TKey, typename TValue>
+void LruCache<TKey, TValue>::attachToCache(Entry& entry) {
+ if (mYoungest == NULL) {
+ mYoungest = mOldest = &entry;
+ } else {
+ entry.parent = mYoungest;
+ mYoungest->child = &entry;
+ mYoungest = &entry;
+ }
+}
+
+template <typename TKey, typename TValue>
+void LruCache<TKey, TValue>::detachFromCache(Entry& entry) {
+ if (entry.parent != NULL) {
+ entry.parent->child = entry.child;
+ } else {
+ mOldest = entry.child;
+ }
+ if (entry.child != NULL) {
+ entry.child->parent = entry.parent;
+ } else {
+ mYoungest = entry.parent;
+ }
+
+ entry.parent = NULL;
+ entry.child = NULL;
+}
+
+template <typename TKey, typename TValue>
+void LruCache<TKey, TValue>::rehash(size_t newCapacity) {
+ UniquePtr<BasicHashtable<TKey, Entry> > oldTable(mTable.release());
+ Entry* oldest = mOldest;
+
+ mOldest = NULL;
+ mYoungest = NULL;
+ mTable.reset(new BasicHashtable<TKey, Entry>(newCapacity));
+ for (Entry* p = oldest; p != NULL; p = p->child) {
+ put(p->key, p->value);
+ }
+}
+
+}
+
+#endif // ANDROID_UTILS_LRU_CACHE_H
diff --git a/include/utils/Mutex.h b/include/utils/Mutex.h
index de6fb39..dd201c8 100644
--- a/include/utils/Mutex.h
+++ b/include/utils/Mutex.h
@@ -91,10 +91,10 @@ private:
inline Mutex::Mutex() {
pthread_mutex_init(&mMutex, NULL);
}
-inline Mutex::Mutex(const char* name) {
+inline Mutex::Mutex(__attribute__((unused)) const char* name) {
pthread_mutex_init(&mMutex, NULL);
}
-inline Mutex::Mutex(int type, const char* name) {
+inline Mutex::Mutex(int type, __attribute__((unused)) const char* name) {
if (type == SHARED) {
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
diff --git a/include/utils/RWLock.h b/include/utils/RWLock.h
index a5abea2..90beb5f 100644
--- a/include/utils/RWLock.h
+++ b/include/utils/RWLock.h
@@ -84,10 +84,10 @@ private:
inline RWLock::RWLock() {
pthread_rwlock_init(&mRWLock, NULL);
}
-inline RWLock::RWLock(const char* name) {
+inline RWLock::RWLock(__attribute__((unused)) const char* name) {
pthread_rwlock_init(&mRWLock, NULL);
}
-inline RWLock::RWLock(int type, const char* name) {
+inline RWLock::RWLock(int type, __attribute__((unused)) const char* name) {
if (type == SHARED) {
pthread_rwlockattr_t attr;
pthread_rwlockattr_init(&attr);
diff --git a/include/utils/RefBase.h b/include/utils/RefBase.h
index 99f5182..033fe67 100644
--- a/include/utils/RefBase.h
+++ b/include/utils/RefBase.h
@@ -52,12 +52,16 @@ inline bool operator _op_ (const U* o) const { \
}
// ---------------------------------------------------------------------------
-class ReferenceMover;
-class ReferenceConverterBase {
+
+class ReferenceRenamer {
+protected:
+ // destructor is purposedly not virtual so we avoid code overhead from
+ // subclasses; we have to make it protected to guarantee that it
+ // cannot be called from this base class (and to make strict compilers
+ // happy).
+ ~ReferenceRenamer() { }
public:
- virtual size_t getReferenceTypeSize() const = 0;
- virtual void* getReferenceBase(void const*) const = 0;
- inline virtual ~ReferenceConverterBase() { }
+ virtual void operator()(size_t i) const = 0;
};
// ---------------------------------------------------------------------------
@@ -144,17 +148,23 @@ protected:
virtual void onLastWeakRef(const void* id);
private:
- friend class ReferenceMover;
- static void moveReferences(void* d, void const* s, size_t n,
- const ReferenceConverterBase& caster);
-
-private:
friend class weakref_type;
class weakref_impl;
RefBase(const RefBase& o);
RefBase& operator=(const RefBase& o);
+private:
+ friend class ReferenceMover;
+
+ static void renameRefs(size_t n, const ReferenceRenamer& renamer);
+
+ static void renameRefId(weakref_type* ref,
+ const void* old_id, const void* new_id);
+
+ static void renameRefId(RefBase* ref,
+ const void* old_id, const void* new_id);
+
weakref_impl* const mRefs;
};
@@ -165,10 +175,10 @@ class LightRefBase
{
public:
inline LightRefBase() : mCount(0) { }
- inline void incStrong(const void* id) const {
+ inline void incStrong(__attribute__((unused)) const void* id) const {
android_atomic_inc(&mCount);
}
- inline void decStrong(const void* id) const {
+ inline void decStrong(__attribute__((unused)) const void* id) const {
if (android_atomic_dec(&mCount) == 1) {
delete static_cast<const T*>(this);
}
@@ -185,8 +195,9 @@ protected:
private:
friend class ReferenceMover;
- inline static void moveReferences(void* d, void const* s, size_t n,
- const ReferenceConverterBase& caster) { }
+ inline static void renameRefs(size_t n, const ReferenceRenamer& renamer) { }
+ inline static void renameRefId(T* ref,
+ const void* old_id, const void* new_id) { }
private:
mutable volatile int32_t mCount;
@@ -455,42 +466,48 @@ inline TextOutput& operator<<(TextOutput& to, const wp<T>& val)
// this class just serves as a namespace so TYPE::moveReferences can stay
// private.
-
class ReferenceMover {
- // StrongReferenceCast and WeakReferenceCast do the impedance matching
- // between the generic (void*) implementation in Refbase and the strongly typed
- // template specializations below.
-
- template <typename TYPE>
- struct StrongReferenceCast : public ReferenceConverterBase {
- virtual size_t getReferenceTypeSize() const { return sizeof( sp<TYPE> ); }
- virtual void* getReferenceBase(void const* p) const {
- sp<TYPE> const* sptr(reinterpret_cast<sp<TYPE> const*>(p));
- return static_cast<typename TYPE::basetype *>(sptr->get());
- }
- };
-
- template <typename TYPE>
- struct WeakReferenceCast : public ReferenceConverterBase {
- virtual size_t getReferenceTypeSize() const { return sizeof( wp<TYPE> ); }
- virtual void* getReferenceBase(void const* p) const {
- wp<TYPE> const* sptr(reinterpret_cast<wp<TYPE> const*>(p));
- return static_cast<typename TYPE::basetype *>(sptr->unsafe_get());
- }
- };
-
public:
+ // it would be nice if we could make sure no extra code is generated
+ // for sp<TYPE> or wp<TYPE> when TYPE is a descendant of RefBase:
+ // Using a sp<RefBase> override doesn't work; it's a bit like we wanted
+ // a template<typename TYPE inherits RefBase> template...
+
template<typename TYPE> static inline
void move_references(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
+
+ class Renamer : public ReferenceRenamer {
+ sp<TYPE>* d;
+ sp<TYPE> const* s;
+ virtual void operator()(size_t i) const {
+ // The id are known to be the sp<>'s this pointer
+ TYPE::renameRefId(d[i].get(), &s[i], &d[i]);
+ }
+ public:
+ Renamer(sp<TYPE>* d, sp<TYPE> const* s) : s(s), d(d) { }
+ };
+
memmove(d, s, n*sizeof(sp<TYPE>));
- StrongReferenceCast<TYPE> caster;
- TYPE::moveReferences(d, s, n, caster);
+ TYPE::renameRefs(n, Renamer(d, s));
}
+
+
template<typename TYPE> static inline
void move_references(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
+
+ class Renamer : public ReferenceRenamer {
+ wp<TYPE>* d;
+ wp<TYPE> const* s;
+ virtual void operator()(size_t i) const {
+ // The id are known to be the wp<>'s this pointer
+ TYPE::renameRefId(d[i].get_refs(), &s[i], &d[i]);
+ }
+ public:
+ Renamer(wp<TYPE>* d, wp<TYPE> const* s) : s(s), d(d) { }
+ };
+
memmove(d, s, n*sizeof(wp<TYPE>));
- WeakReferenceCast<TYPE> caster;
- TYPE::moveReferences(d, s, n, caster);
+ TYPE::renameRefs(n, Renamer(d, s));
}
};
diff --git a/include/utils/Thread.h b/include/utils/Thread.h
index 4a34abd..df30611 100644
--- a/include/utils/Thread.h
+++ b/include/utils/Thread.h
@@ -67,6 +67,9 @@ public:
// Do not call from this object's thread; will return WOULD_BLOCK in that case.
status_t join();
+ // Indicates whether this thread is running or not.
+ bool isRunning() const;
+
#ifdef HAVE_ANDROID_OS
// Return the thread's kernel ID, same as the thread itself calling gettid() or
// androidGetTid(), or -1 if the thread is not running.
diff --git a/include/utils/Trace.h b/include/utils/Trace.h
index 41bce00..49578c4 100644
--- a/include/utils/Trace.h
+++ b/include/utils/Trace.h
@@ -27,166 +27,31 @@
#include <cutils/compiler.h>
#include <utils/threads.h>
+#include <cutils/trace.h>
-// The ATRACE_TAG macro can be defined before including this header to trace
-// using one of the tags defined below. It must be defined to one of the
-// following ATRACE_TAG_* macros. The trace tag is used to filter tracing in
-// userland to avoid some of the runtime cost of tracing when it is not desired.
-//
-// Defining ATRACE_TAG to be ATRACE_TAG_ALWAYS will result in the tracing always
-// being enabled - this should ONLY be done for debug code, as userland tracing
-// has a performance cost even when the trace is not being recorded. Defining
-// ATRACE_TAG to be ATRACE_TAG_NEVER or leaving ATRACE_TAG undefined will result
-// in the tracing always being disabled.
-//
-// These tags must be kept in sync with frameworks/base/core/java/android/os/Trace.java.
-#define ATRACE_TAG_NEVER 0 // The "never" tag is never enabled.
-#define ATRACE_TAG_ALWAYS (1<<0) // The "always" tag is always enabled.
-#define ATRACE_TAG_GRAPHICS (1<<1)
-#define ATRACE_TAG_INPUT (1<<2)
-#define ATRACE_TAG_VIEW (1<<3)
-#define ATRACE_TAG_WEBVIEW (1<<4)
-#define ATRACE_TAG_WINDOW_MANAGER (1<<5)
-#define ATRACE_TAG_ACTIVITY_MANAGER (1<<6)
-#define ATRACE_TAG_SYNC_MANAGER (1<<7)
-#define ATRACE_TAG_AUDIO (1<<8)
-#define ATRACE_TAG_VIDEO (1<<9)
-#define ATRACE_TAG_CAMERA (1<<10)
-#define ATRACE_TAG_LAST ATRACE_TAG_CAMERA
+// See <cutils/trace.h> for more ATRACE_* macros.
-#define ATRACE_TAG_NOT_READY (1LL<<63) // Reserved for use during init
-
-#define ATRACE_TAG_VALID_MASK ((ATRACE_TAG_LAST - 1) | ATRACE_TAG_LAST)
-
-#ifndef ATRACE_TAG
-#define ATRACE_TAG ATRACE_TAG_NEVER
-#elif ATRACE_TAG > ATRACE_TAG_LAST
-#error ATRACE_TAG must be defined to be one of the tags defined in utils/Trace.h
-#endif
-
-// ATRACE_CALL traces the beginning and end of the current function. To trace
-// the correct start and end times this macro should be the first line of the
-// function body.
-#define ATRACE_CALL() android::ScopedTrace ___tracer(ATRACE_TAG, __FUNCTION__)
-
-// ATRACE_NAME traces the beginning and end of the current function. To trace
-// the correct start and end times this macro should be the first line of the
-// function body.
+// ATRACE_NAME traces the beginning and end of the current scope. To trace
+// the correct start and end times this macro should be declared first in the
+// scope body.
#define ATRACE_NAME(name) android::ScopedTrace ___tracer(ATRACE_TAG, name)
-
-// ATRACE_INT traces a named integer value. This can be used to track how the
-// value changes over time in a trace.
-#define ATRACE_INT(name, value) android::Tracer::traceCounter(ATRACE_TAG, name, value)
-
-// ATRACE_ENABLED returns true if the trace tag is enabled. It can be used as a
-// guard condition around more expensive trace calculations.
-#define ATRACE_ENABLED() android::Tracer::isTagEnabled(ATRACE_TAG)
+// ATRACE_CALL is an ATRACE_NAME that uses the current function name.
+#define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
namespace android {
-class Tracer {
-
-public:
-
- static uint64_t getEnabledTags() {
- initIfNeeded();
- return sEnabledTags;
- }
-
- static inline bool isTagEnabled(uint64_t tag) {
- initIfNeeded();
- return sEnabledTags & tag;
- }
-
- static inline void traceCounter(uint64_t tag, const char* name,
- int32_t value) {
- if (CC_UNLIKELY(isTagEnabled(tag))) {
- char buf[1024];
- snprintf(buf, 1024, "C|%d|%s|%d", getpid(), name, value);
- write(sTraceFD, buf, strlen(buf));
- }
- }
-
- static inline void traceBegin(uint64_t tag, const char* name) {
- if (CC_UNLIKELY(isTagEnabled(tag))) {
- char buf[1024];
- size_t len = snprintf(buf, 1024, "B|%d|%s", getpid(), name);
- write(sTraceFD, buf, len);
- }
- }
-
- static inline void traceEnd(uint64_t tag) {
- if (CC_UNLIKELY(isTagEnabled(tag))) {
- char buf = 'E';
- write(sTraceFD, &buf, 1);
- }
- }
-
-private:
-
- static inline void initIfNeeded() {
- if (!android_atomic_acquire_load(&sIsReady)) {
- init();
- }
- }
-
- static void changeCallback();
-
- // init opens the trace marker file for writing and reads the
- // atrace.tags.enableflags system property. It does this only the first
- // time it is run, using sMutex for synchronization.
- static void init();
-
- // retrieve the current value of the system property.
- static void loadSystemProperty();
-
- // sIsReady is a boolean value indicating whether a call to init() has
- // completed in this process. It is initialized to 0 and set to 1 when the
- // first init() call completes. It is set to 1 even if a failure occurred
- // in init (e.g. the trace marker file couldn't be opened).
- //
- // This should be checked by all tracing functions using an atomic acquire
- // load operation before calling init(). This check avoids the need to lock
- // a mutex each time a trace function gets called.
- static volatile int32_t sIsReady;
-
- // sTraceFD is the file descriptor used to write to the kernel's trace
- // buffer. It is initialized to -1 and set to an open file descriptor in
- // init() while a lock on sMutex is held.
- //
- // This should only be used by a trace function after init() has
- // successfully completed.
- static int sTraceFD;
-
- // sEnabledTags is the set of tag bits for which tracing is currently
- // enabled. It is initialized to 0 and set based on the
- // atrace.tags.enableflags system property in init() while a lock on sMutex
- // is held.
- //
- // This should only be used by a trace function after init() has
- // successfully completed.
- //
- // This value is only ever non-zero when tracing is initialized and sTraceFD is not -1.
- static uint64_t sEnabledTags;
-
- // sMutex is used to protect the execution of init().
- static Mutex sMutex;
-};
-
class ScopedTrace {
-
public:
- inline ScopedTrace(uint64_t tag, const char* name) :
- mTag(tag) {
- Tracer::traceBegin(mTag, name);
- }
+inline ScopedTrace(uint64_t tag, const char* name)
+ : mTag(tag) {
+ atrace_begin(mTag,name);
+}
- inline ~ScopedTrace() {
- Tracer::traceEnd(mTag);
- }
+inline ~ScopedTrace() {
+ atrace_end(mTag);
+}
private:
-
uint64_t mTag;
};
diff --git a/include/utils/TypeHelpers.h b/include/utils/TypeHelpers.h
index 2bf33c3..13c9081 100644
--- a/include/utils/TypeHelpers.h
+++ b/include/utils/TypeHelpers.h
@@ -291,7 +291,7 @@ ANDROID_INT64_HASH(uint64_t)
ANDROID_REINTERPRET_HASH(float, uint32_t)
ANDROID_REINTERPRET_HASH(double, uint64_t)
-template <typename T> inline hash_t hash_type(const T*& value) {
+template <typename T> inline hash_t hash_type(T* const & value) {
return hash_type(uintptr_t(value));
}
diff --git a/include/utils/Vector.h b/include/utils/Vector.h
index f3020d6..ed7b725 100644
--- a/include/utils/Vector.h
+++ b/include/utils/Vector.h
@@ -80,7 +80,13 @@ public:
//! sets the capacity. capacity can never be reduced less than size()
inline ssize_t setCapacity(size_t size) { return VectorImpl::setCapacity(size); }
- /*!
+ /*!
+ * set the size of the vector. items are appended with the default
+ * constructor, or removed from the end as needed.
+ */
+ inline ssize_t resize(size_t size) { return VectorImpl::resize(size); }
+
+ /*!
* C-style array access
*/
diff --git a/include/utils/VectorImpl.h b/include/utils/VectorImpl.h
index c4ec2ff..9bc50e6 100644
--- a/include/utils/VectorImpl.h
+++ b/include/utils/VectorImpl.h
@@ -64,6 +64,7 @@ public:
inline bool isEmpty() const { return mCount == 0; }
size_t capacity() const;
ssize_t setCapacity(size_t size);
+ ssize_t resize(size_t size);
/*! append/insert another vector or array */
ssize_t insertVectorAt(const VectorImpl& vector, size_t index);