diff options
Diffstat (limited to 'libcutils')
-rw-r--r-- | libcutils/Android.mk | 11 | ||||
-rw-r--r-- | libcutils/atomic-android-arm.S | 14 | ||||
-rw-r--r-- | libcutils/atomic-android-sh.c | 40 | ||||
-rw-r--r-- | libcutils/atomic.c | 203 |
4 files changed, 46 insertions, 222 deletions
diff --git a/libcutils/Android.mk b/libcutils/Android.mk index 4c45cc9..5b05a1e 100644 --- a/libcutils/Android.mk +++ b/libcutils/Android.mk @@ -16,6 +16,13 @@ LOCAL_PATH := $(my-dir) include $(CLEAR_VARS) +ifeq ($(TARGET_CPU_SMP),true) + targetSmpFlag := -DANDROID_SMP=1 +else + targetSmpFlag := -DANDROID_SMP=0 +endif +hostSmpFlag := -DANDROID_SMP=0 + commonSources := \ array.c \ hashmap.c \ @@ -80,6 +87,7 @@ LOCAL_MODULE := libcutils LOCAL_SRC_FILES := $(commonSources) $(commonHostSources) LOCAL_LDLIBS := -lpthread LOCAL_STATIC_LIBRARIES := liblog +LOCAL_CFLAGS += $(hostSmpFlag) include $(BUILD_HOST_STATIC_LIBRARY) @@ -92,6 +100,7 @@ LOCAL_MODULE := libcutils LOCAL_SRC_FILES := $(commonSources) $(commonHostSources) memory.c dlmalloc_stubs.c LOCAL_LDLIBS := -lpthread LOCAL_SHARED_LIBRARIES := liblog +LOCAL_CFLAGS += $(targetSmpFlag) include $(BUILD_SHARED_LIBRARY) else #!sim @@ -114,12 +123,14 @@ endif # !arm LOCAL_C_INCLUDES := $(KERNEL_HEADERS) LOCAL_STATIC_LIBRARIES := liblog +LOCAL_CFLAGS += $(targetSmpFlag) include $(BUILD_STATIC_LIBRARY) include $(CLEAR_VARS) LOCAL_MODULE := libcutils LOCAL_WHOLE_STATIC_LIBRARIES := libcutils LOCAL_SHARED_LIBRARIES := liblog +LOCAL_CFLAGS += $(targetSmpFlag) include $(BUILD_SHARED_LIBRARY) endif #!sim diff --git a/libcutils/atomic-android-arm.S b/libcutils/atomic-android-arm.S index 1dd2363..f918990 100644 --- a/libcutils/atomic-android-arm.S +++ b/libcutils/atomic-android-arm.S @@ -14,6 +14,8 @@ * limitations under the License. */ +/* TODO: insert memory barriers on SMP */ + #include <machine/cpu-features.h> /* @@ -43,6 +45,8 @@ .global android_atomic_cmpxchg .type android_atomic_cmpxchg, %function + .global android_atomic_acquire_cmpxchg + .type android_atomic_acquire_cmpxchg, %function /* * ---------------------------------------------------------------------------- @@ -237,7 +241,7 @@ android_atomic_or: /* replaced swp instruction with ldrex/strex for ARMv6 & ARMv7 */ android_atomic_swap: -#if defined (_ARM_HAVE_LDREX_STREX) +#if defined (__ARM_HAVE_LDREX_STREX) 1: ldrex r2, [r1] strex r3, r0, [r1] teq r3, #0 @@ -256,6 +260,7 @@ android_atomic_swap: * output: r0 = 0 (xchg done) or non-zero (xchg not done) */ +android_atomic_acquire_cmpxchg: android_atomic_cmpxchg: .fnstart .save {r4, lr} @@ -282,10 +287,3 @@ android_atomic_cmpxchg: bx lr .fnend -/* - * ---------------------------------------------------------------------------- - * android_atomic_cmpxchg_64 - * input: r0-r1=oldvalue, r2-r3=newvalue, arg4 (on stack)=address - * output: r0 = 0 (xchg done) or non-zero (xchg not done) - */ -/* TODO: NEED IMPLEMENTATION FOR THIS ARCHITECTURE */ diff --git a/libcutils/atomic-android-sh.c b/libcutils/atomic-android-sh.c index acbea97..d95b02b 100644 --- a/libcutils/atomic-android-sh.c +++ b/libcutils/atomic-android-sh.c @@ -118,42 +118,8 @@ int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, return result; } -int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) { - int64_t oldValue; - pthread_mutex_t* lock = SWAP_LOCK(addr); - - pthread_mutex_lock(lock); - - oldValue = *addr; - *addr = value; - - pthread_mutex_unlock(lock); - return oldValue; -} - -int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue, - volatile int64_t* addr) { - int result; - pthread_mutex_t* lock = SWAP_LOCK(addr); - - pthread_mutex_lock(lock); - - if (*addr == oldvalue) { - *addr = newvalue; - result = 0; - } else { - result = 1; - } - pthread_mutex_unlock(lock); - return result; +int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue, + volatile int32_t* addr) { + return android_atomic_cmpxchg(oldValue, newValue, addr); } -int64_t android_quasiatomic_read_64(volatile int64_t* addr) { - int64_t result; - pthread_mutex_t* lock = SWAP_LOCK(addr); - - pthread_mutex_lock(lock); - result = *addr; - pthread_mutex_unlock(lock); - return result; -} diff --git a/libcutils/atomic.c b/libcutils/atomic.c index 41faaa2..d818906 100644 --- a/libcutils/atomic.c +++ b/libcutils/atomic.c @@ -15,6 +15,7 @@ */ #include <cutils/atomic.h> +#include <cutils/atomic-inline.h> #ifdef HAVE_WIN32_THREADS #include <windows.h> #else @@ -70,40 +71,19 @@ int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) { } int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) { + /* OS X CAS returns zero on failure; invert to return zero on success */ return OSAtomicCompareAndSwap32Barrier(oldvalue, newvalue, (int32_t*)addr) == 0; } -#if defined(__ppc__) \ - || defined(__PPC__) \ - || defined(__powerpc__) \ - || defined(__powerpc) \ - || defined(__POWERPC__) \ - || defined(_M_PPC) \ - || defined(__PPC) -#define NEED_QUASIATOMICS 1 -#else - -int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue, - volatile int64_t* addr) { - return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue, - (int64_t*)addr) == 0; -} - -int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) { - int64_t oldValue; - do { - oldValue = *addr; - } while (android_quasiatomic_cmpxchg_64(oldValue, value, addr)); - return oldValue; +int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue, + volatile int32_t* addr) { + int result = (OSAtomicCompareAndSwap32(oldvalue, newvalue, (int32_t*)addr) == 0); + if (!result) { + /* success, perform barrier */ + OSMemoryBarrier(); + } } -int64_t android_quasiatomic_read_64(volatile int64_t* addr) { - return OSAtomicAdd64Barrier(0, addr); -} - -#endif - - /*****************************************************************************/ #elif defined(__i386__) || defined(__x86_64__) @@ -163,6 +143,7 @@ int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) { } int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) { + android_membar_full(); int xchg; asm volatile ( @@ -175,75 +156,25 @@ int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* return xchg; } -#define NEED_QUASIATOMICS 1 - -/*****************************************************************************/ -#elif __arm__ -// Most of the implementation is in atomic-android-arm.s. - -// on the device, we implement the 64-bit atomic operations through -// mutex locking. normally, this is bad because we must initialize -// a pthread_mutex_t before being able to use it, and this means -// having to do an initialization check on each function call, and -// that's where really ugly things begin... -// -// BUT, as a special twist, we take advantage of the fact that in our -// pthread library, a mutex is simply a volatile word whose value is always -// initialized to 0. In other words, simply declaring a static mutex -// object initializes it ! -// -// another twist is that we use a small array of mutexes to dispatch -// the contention locks from different memory addresses -// - -#include <pthread.h> - -#define SWAP_LOCK_COUNT 32U -static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT]; - -#define SWAP_LOCK(addr) \ - &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT] - - -int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) { - int64_t oldValue; - pthread_mutex_t* lock = SWAP_LOCK(addr); - - pthread_mutex_lock(lock); - - oldValue = *addr; - *addr = value; - - pthread_mutex_unlock(lock); - return oldValue; -} - -int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue, - volatile int64_t* addr) { - int result; - pthread_mutex_t* lock = SWAP_LOCK(addr); - - pthread_mutex_lock(lock); - - if (*addr == oldvalue) { - *addr = newvalue; - result = 0; - } else { - result = 1; - } - pthread_mutex_unlock(lock); - return result; +int android_atomic_acquire_cmpxchg(int32_t oldvalue, int32_t newvalue, + volatile int32_t* addr) { + int xchg; + asm volatile + ( + " lock; cmpxchg %%ecx, (%%edx);" + " setne %%al;" + " andl $1, %%eax" + : "=a" (xchg) + : "a" (oldvalue), "c" (newvalue), "d" (addr) + ); + android_membar_full(); + return xchg; } -int64_t android_quasiatomic_read_64(volatile int64_t* addr) { - int64_t result; - pthread_mutex_t* lock = SWAP_LOCK(addr); - pthread_mutex_lock(lock); - result = *addr; - pthread_mutex_unlock(lock); - return result; -} +/*****************************************************************************/ +#elif __arm__ +// implementation for ARM is in atomic-android-arm.s. /*****************************************************************************/ #elif __sh__ @@ -255,85 +186,3 @@ int64_t android_quasiatomic_read_64(volatile int64_t* addr) { #endif - - -#if NEED_QUASIATOMICS - -/* Note that a spinlock is *not* a good idea in general - * since they can introduce subtle issues. For example, - * a real-time thread trying to acquire a spinlock already - * acquired by another thread will never yeld, making the - * CPU loop endlessly! - * - * However, this code is only used on the Linux simulator - * so it's probably ok for us. - * - * The alternative is to use a pthread mutex, but - * these must be initialized before being used, and - * then you have the problem of lazily initializing - * a mutex without any other synchronization primitive. - */ - -/* global spinlock for all 64-bit quasiatomic operations */ -static int32_t quasiatomic_spinlock = 0; - -int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue, - volatile int64_t* addr) { - int result; - - while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) { -#ifdef HAVE_WIN32_THREADS - Sleep(0); -#else - sched_yield(); -#endif - } - - if (*addr == oldvalue) { - *addr = newvalue; - result = 0; - } else { - result = 1; - } - - android_atomic_swap(0, &quasiatomic_spinlock); - - return result; -} - -int64_t android_quasiatomic_read_64(volatile int64_t* addr) { - int64_t result; - - while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) { -#ifdef HAVE_WIN32_THREADS - Sleep(0); -#else - sched_yield(); -#endif - } - - result = *addr; - android_atomic_swap(0, &quasiatomic_spinlock); - - return result; -} - -int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) { - int64_t result; - - while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) { -#ifdef HAVE_WIN32_THREADS - Sleep(0); -#else - sched_yield(); -#endif - } - - result = *addr; - *addr = value; - android_atomic_swap(0, &quasiatomic_spinlock); - - return result; -} - -#endif |