summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndy McFadden <fadden@android.com>2010-05-19 22:33:28 -0700
committerAndy McFadden <fadden@android.com>2010-05-20 21:40:33 -0700
commitac322da69ee48aa792baf5c48cfb719ce077f67e (patch)
treec71b6658824f049f7e7fc8e58391cda113a758da /include
parent53a79a841b4afa5e7dc3e05cfd04b5ae44de95b6 (diff)
downloadsystem_core-ac322da69ee48aa792baf5c48cfb719ce077f67e.zip
system_core-ac322da69ee48aa792baf5c48cfb719ce077f67e.tar.gz
system_core-ac322da69ee48aa792baf5c48cfb719ce077f67e.tar.bz2
Atomic/SMP update.
Added atomic-inline.h. Added a platform-specific memory barrier call there. Added android_atomic_acquire_cmpxchg() and android_atomic_release_store(). Not tested on Mac OS X or SH. Added memory barrier calls to linux-x86 atomics. Mac OS X has barrier functions already. sh isn't really SMP-ready. linux-arm needs work (to be done in a separate change). Updated the makefile to make the SMP state visible to the code here. Note that host binaries are NOT built with SMP enabled; while our hosts are very likely SMP, it's not worth figuring out e.g. whether it's okay to use the SSE2 mfence instruction or have to use something else. We haven't had barriers enabled in host tools before, so there's probably no need to stat now. Removed quasiatomic 64-bit calls (now part of Dalvik). Change-Id: I49e5e6c8abe70f304cdedb9d7b8e6e65f8925815
Diffstat (limited to 'include')
-rw-r--r--include/cutils/atomic-inline.h101
-rw-r--r--include/cutils/atomic.h43
2 files changed, 122 insertions, 22 deletions
diff --git a/include/cutils/atomic-inline.h b/include/cutils/atomic-inline.h
new file mode 100644
index 0000000..4f5ddf7
--- /dev/null
+++ b/include/cutils/atomic-inline.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_INLINE_H
+#define ANDROID_CUTILS_ATOMIC_INLINE_H
+
+/*
+ * Inline declarations and macros for some special-purpose atomic
+ * operations. These are intended for rare circumstances where a
+ * memory barrier needs to be issued inline rather than as a function
+ * call.
+ *
+ * Most code should not use these.
+ *
+ * Anything that does include this file must set ANDROID_SMP to either
+ * 0 or 1, indicating compilation for UP or SMP, respectively.
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP before including atomic-inline.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Define the full memory barrier for an SMP system. This is
+ * platform-specific.
+ */
+
+#ifdef __arm__
+#include <machine/cpu-features.h>
+
+/*
+ * For ARMv6K we need to issue a specific MCR instead of the DMB, since
+ * that wasn't added until v7. For anything older, SMP isn't relevant.
+ * Since we don't have an ARMv6K to test with, we're not going to deal
+ * with that now.
+ *
+ * The DMB instruction is found in the ARM and Thumb2 instruction sets.
+ * This will fail on plain 16-bit Thumb.
+ */
+#if defined(__ARM_HAVE_DMB)
+# define __android_membar_full_smp() \
+ do { __asm__ __volatile__ ("dmb" ::: "memory"); } while (0)
+#else
+# define __android_membar_full_smp() ARM_SMP_defined_but_no_DMB()
+#endif
+
+#elif defined(__i386__) || defined(__x86_64__)
+/*
+ * For recent x86, we can use the SSE2 mfence instruction.
+ */
+# define __android_membar_full_smp() \
+ do { __asm__ __volatile__ ("mfence" ::: "memory"); } while (0)
+
+#else
+/*
+ * Implementation not defined for this platform. Hopefully we're building
+ * in uniprocessor mode.
+ */
+# define __android_membar_full_smp() SMP_barrier_not_defined_for_platform()
+#endif
+
+
+/*
+ * Full barrier. On uniprocessors this is just a compiler reorder barrier,
+ * which ensures that the statements appearing above the barrier in the C/C++
+ * code will be issued after the statements appearing below the barrier.
+ *
+ * For SMP this also includes a memory barrier instruction. On an ARM
+ * CPU this means that the current core will flush pending writes, wait
+ * for pending reads to complete, and discard any cached reads that could
+ * be stale. Other CPUs may do less, but the end result is equivalent.
+ */
+#if ANDROID_SMP != 0
+# define android_membar_full() __android_membar_full_smp()
+#else
+# define android_membar_full() \
+ do { __asm__ __volatile__ ("" ::: "memory"); } while (0)
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // ANDROID_CUTILS_ATOMIC_INLINE_H
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index 5694d66..8e12902 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -25,10 +25,8 @@ extern "C" {
#endif
/*
- * NOTE: memory shared between threads is synchronized by all atomic operations
- * below, this means that no explicit memory barrier is required: all reads or
- * writes issued before android_atomic_* operations are guaranteed to complete
- * before the atomic operation takes place.
+ * Unless otherwise noted, the operations below perform a full fence before
+ * the atomic operation on SMP systems ("release" semantics).
*/
void android_atomic_write(int32_t value, volatile int32_t* addr);
@@ -37,7 +35,6 @@ void android_atomic_write(int32_t value, volatile int32_t* addr);
* all these atomic operations return the previous value
*/
-
int32_t android_atomic_inc(volatile int32_t* addr);
int32_t android_atomic_dec(volatile int32_t* addr);
@@ -48,30 +45,32 @@ int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
int32_t android_atomic_swap(int32_t value, volatile int32_t* addr);
/*
- * NOTE: Two "quasiatomic" operations on the exact same memory address
- * are guaranteed to operate atomically with respect to each other,
- * but no guarantees are made about quasiatomic operations mixed with
- * non-quasiatomic operations on the same address, nor about
- * quasiatomic operations that are performed on partially-overlapping
- * memory.
+ * cmpxchg returns zero if the new value was successfully written. This
+ * will only happen when *addr == oldvalue.
+ *
+ * (The return value is inverted from implementations on other platforms, but
+ * matches the ARM ldrex/strex sematics. Note also this is a compare-and-set
+ * operation, not a compare-and-exchange operation, since we don't return
+ * the original value.)
*/
+int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
+ volatile int32_t* addr);
-int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr);
-int64_t android_quasiatomic_read_64(volatile int64_t* addr);
-
/*
- * cmpxchg return a non zero value if the exchange was NOT performed,
- * in other words if oldvalue != *addr
+ * Same basic operation as android_atomic_cmpxchg, but with "acquire"
+ * semantics. The memory barrier, if required, is performed after the
+ * new value is stored. Useful for acquiring a spin lock.
*/
-
-int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
+int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue,
volatile int32_t* addr);
-int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
- volatile int64_t* addr);
-
+/*
+ * Perform an atomic store with "release" semantics. The memory barrier,
+ * if required, is performed before the store instruction. Useful for
+ * releasing a spin lock.
+ */
+#define android_atomic_release_store android_atomic_write
-
#ifdef __cplusplus
} // extern "C"
#endif