aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-05-25 20:58:00 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-05-28 19:39:27 +0100
commitbac4e960b5ce2453d862beaf20e59aa68af3b43a (patch)
tree69ba3b450a769fa4a613a1f8c4e6454cdcfae5aa /arch
parent290815710b51de23f9ed6799d3e0bb762d4f907c (diff)
downloadkernel_samsung_espresso10-bac4e960b5ce2453d862beaf20e59aa68af3b43a.zip
kernel_samsung_espresso10-bac4e960b5ce2453d862beaf20e59aa68af3b43a.tar.gz
kernel_samsung_espresso10-bac4e960b5ce2453d862beaf20e59aa68af3b43a.tar.bz2
[ARM] barriers: improve xchg, bitops and atomic SMP barriers
Mathieu Desnoyers pointed out that the ARM barriers were lacking: - cmpxchg, xchg and atomic add return need memory barriers on architectures which can reorder the relative order in which memory read/writes can be seen between CPUs, which seems to include recent ARM architectures. Those barriers are currently missing on ARM. - test_and_xxx_bit were missing SMP barriers. So put these barriers in. Provide separate atomic_add/atomic_sub operations which do not require barriers. Reported-Reviewed-and-Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/assembler.h13
-rw-r--r--arch/arm/include/asm/atomic.h61
-rw-r--r--arch/arm/include/asm/system.h3
-rw-r--r--arch/arm/kernel/entry-armv.S5
-rw-r--r--arch/arm/lib/bitops.h2
5 files changed, 71 insertions, 13 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6116e48..15f8a09 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
+
+/*
+ * SMP data memory barrier
+ */
+ .macro smp_dmb
+#ifdef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7
+ dmb
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#endif
+#endif
+ .endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index ee99723..16b52f3 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
+"1: ldrex %0, [%2]\n"
+" add %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
@@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
+"1: ldrex %0, [%2]\n"
+" sub %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
@@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
@@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;
+ smp_mb();
+
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
@@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
+ smp_mb();
+
return oldval;
}
@@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
+#define atomic_add(i, v) (void) atomic_add_return(i, v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
@@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
+#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-#define atomic_inc(v) (void) atomic_add_return(1, v)
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
-#define atomic_dec(v) (void) atomic_sub_return(1, v)
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
@@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-/* Atomic operations are already serializing on ARM */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index bd4dc8e..7fce8f3 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
+ smp_mb();
+
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
+ smp_mb();
return ret;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d662a2f..83b1da6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -815,10 +815,7 @@ __kuser_helper_start:
*/
__kuser_memory_barrier: @ 0xffff0fa0
-
-#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
- mcr p15, 0, r0, c7, c10, 5 @ dmb
-#endif
+ smp_dmb
usr_ret lr
.align 5
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 2e787d4..c7f2627 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -18,12 +18,14 @@
mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3 @ create mask
+ smp_dmb
1: ldrexb r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
strexb ip, r2, [r1]
cmp ip, #0
bne 1b
+ smp_dmb
cmp r0, #0
movne r0, #1
2: mov pc, lr