summaryrefslogtreecommitdiffstats
path: root/include/cutils/atomic.h
diff options
context:
space:
mode:
authorHans Boehm <hboehm@google.com>2014-10-04 00:06:30 +0000
committerAndroid Git Automerger <android-git-automerger@android.com>2014-10-04 00:06:30 +0000
commit2ac8207e505ff645c8fe9416da689f12dd73f68e (patch)
treefd0a945ab4df763055d4788c7a48df537333c6ed /include/cutils/atomic.h
parentd8e138396a97b0d05c08d31df38f2c6a2ff52e2e (diff)
parentf5204549faf7310cc808bcb3676fb36268178e26 (diff)
downloadsystem_core-2ac8207e505ff645c8fe9416da689f12dd73f68e.zip
system_core-2ac8207e505ff645c8fe9416da689f12dd73f68e.tar.gz
system_core-2ac8207e505ff645c8fe9416da689f12dd73f68e.tar.bz2
am f5204549: am 028928fe: Merge "Rewrite atomic.h to use stdatomic.h."
* commit 'f5204549faf7310cc808bcb3676fb36268178e26': Rewrite atomic.h to use stdatomic.h.
Diffstat (limited to 'include/cutils/atomic.h')
-rw-r--r--include/cutils/atomic.h145
1 files changed, 121 insertions, 24 deletions
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index 79409a7..e199130 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -19,9 +19,10 @@
#include <stdint.h>
#include <sys/types.h>
+#include <stdatomic.h>
-#ifdef __cplusplus
-extern "C" {
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE static inline
#endif
/*
@@ -70,6 +71,14 @@ extern "C" {
* If they are not, atomicity is not guaranteed.
*/
+#if ANDROID_SMP == 0
+# define ANDROID_ATOMIC_ACQUIRE memory_order_relaxed
+# define ANDROID_ATOMIC_RELEASE memory_order_relaxed
+#else
+# define ANDROID_ATOMIC_ACQUIRE memory_order_acquire
+# define ANDROID_ATOMIC_RELEASE memory_order_release
+#endif
+
/*
* Basic arithmetic and bitwise operations. These all provide a
* barrier with "release" ordering, and return the previous value.
@@ -77,11 +86,41 @@ extern "C" {
* These have the same characteristics (e.g. what happens on overflow)
* as the equivalent non-atomic C operations.
*/
-int32_t android_atomic_inc(volatile int32_t* addr);
-int32_t android_atomic_dec(volatile int32_t* addr);
-int32_t android_atomic_add(int32_t value, volatile int32_t* addr);
-int32_t android_atomic_and(int32_t value, volatile int32_t* addr);
-int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_inc(volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ /* Int32_t, if it exists, is the same as int_least32_t. */
+ return atomic_fetch_add_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
+}
+
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_dec(volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return atomic_fetch_sub_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
+}
+
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return atomic_fetch_add_explicit(a, value, ANDROID_ATOMIC_RELEASE);
+}
+
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return atomic_fetch_and_explicit(a, value, ANDROID_ATOMIC_RELEASE);
+}
+
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return atomic_fetch_or_explicit(a, value, ANDROID_ATOMIC_RELEASE);
+}
/*
* Perform an atomic load with "acquire" or "release" ordering.
@@ -96,29 +135,53 @@ int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
* this comment, you are in the vast majority, and should not be
* using release loads or replacing them with anything other than
* locks or default sequentially consistent atomics.
- *
- * This is only necessary if you need the memory barrier. A 32-bit read
- * from a 32-bit aligned address is atomic on all supported platforms.
*/
-int32_t android_atomic_acquire_load(volatile const int32_t* addr);
-int32_t android_atomic_release_load(volatile const int32_t* addr);
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_acquire_load(volatile const int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return atomic_load_explicit(a, ANDROID_ATOMIC_ACQUIRE);
+}
+
+ANDROID_ATOMIC_INLINE
+int32_t android_atomic_release_load(volatile const int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ atomic_thread_fence(memory_order_seq_cst);
+ /* Any reasonable clients of this interface would probably prefer */
+ /* something weaker. But some remaining clients seem to be */
+ /* abusing this API in strange ways, e.g. by using it as a fence. */
+ /* Thus we are conservative until we can get rid of remaining */
+ /* clients (and this function). */
+ return atomic_load_explicit(a, memory_order_relaxed);
+}
/*
* Perform an atomic store with "acquire" or "release" ordering.
*
- * Note that the notion of a "acquire" ordering for a store does not
+ * Note that the notion of an "acquire" ordering for a store does not
* really fit into the C11 or C++11 memory model. The extra ordering
* is normally observable only by code using memory_order_relaxed
* atomics, or data races. In the rare cases in which such ordering
* is called for, use memory_order_relaxed atomics and a trailing
* atomic_thread_fence (typically with memory_order_release,
* not memory_order_acquire!) instead.
- *
- * This is only necessary if you need the memory barrier. A 32-bit write
- * to a 32-bit aligned address is atomic on all supported platforms.
*/
-void android_atomic_acquire_store(int32_t value, volatile int32_t* addr);
-void android_atomic_release_store(int32_t value, volatile int32_t* addr);
+ANDROID_ATOMIC_INLINE
+void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ atomic_store_explicit(a, value, memory_order_relaxed);
+ atomic_thread_fence(memory_order_seq_cst);
+ /* Again overly conservative to accomodate weird clients. */
+}
+
+ANDROID_ATOMIC_INLINE
+void android_atomic_release_store(int32_t value, volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ atomic_store_explicit(a, value, ANDROID_ATOMIC_RELEASE);
+}
/*
* Compare-and-set operation with "acquire" or "release" ordering.
@@ -132,10 +195,48 @@ void android_atomic_release_store(int32_t value, volatile int32_t* addr);
* Implementations that use the release CAS in a loop may be less efficient
* than possible, because we re-issue the memory barrier on each iteration.
*/
+ANDROID_ATOMIC_INLINE
int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
- volatile int32_t* addr);
+ volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return (int)(!atomic_compare_exchange_strong_explicit(
+ a, &oldvalue, newvalue,
+ ANDROID_ATOMIC_ACQUIRE,
+ ANDROID_ATOMIC_ACQUIRE));
+}
+
+ANDROID_ATOMIC_INLINE
int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
- volatile int32_t* addr);
+ volatile int32_t* addr)
+{
+ volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
+ return (int)(!atomic_compare_exchange_strong_explicit(
+ a, &oldvalue, newvalue,
+ ANDROID_ATOMIC_RELEASE,
+ memory_order_relaxed));
+}
+
+/*
+ * Fence primitives.
+ */
+ANDROID_ATOMIC_INLINE
+void android_compiler_barrier(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+ /* Could probably also be: */
+ /* atomic_signal_fence(memory_order_seq_cst); */
+}
+
+ANDROID_ATOMIC_INLINE
+void android_memory_barrier(void)
+{
+#if ANDROID_SMP == 0
+ android_compiler_barrier();
+#else
+ atomic_thread_fence(memory_order_seq_cst);
+#endif
+}
/*
* Aliases for code using an older version of this header. These are now
@@ -145,8 +246,4 @@ int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
#define android_atomic_write android_atomic_release_store
#define android_atomic_cmpxchg android_atomic_release_cas
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
#endif // ANDROID_CUTILS_ATOMIC_H