aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 10:57:12 +0200
committerKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 10:57:12 +0200
commit50e2778ee890c3e0b7f2e9c876d85aa962abc997 (patch)
tree5dce0669613802469470633ddb7d407f8f8f7986 /arch/arm
parent6a9dff5ea86ccd4a1dd503a36ce9856c4c092360 (diff)
parentb9a7985a8d9ca00d8ce977756fde1306c9ab1e41 (diff)
downloadkernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.zip
kernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.tar.gz
kernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.tar.bz2
Merge commit 'b9a7985' into HEAD
Conflicts: kernel/time/timekeeping.c
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig52
-rw-r--r--arch/arm/boot/compressed/head.S1
-rw-r--r--arch/arm/include/asm/mutex.h119
3 files changed, 31 insertions, 141 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ac2ef95..310434b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1236,6 +1236,32 @@ config ARM_ERRATA_754327
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
+config ARM_ERRATA_764369
+ bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for erratum 764369
+ affecting Cortex-A9 MPCore with two or more processors (all
+ current revisions). Under certain timing circumstances, a data
+ cache line maintenance operation by MVA targeting an Inner
+ Shareable memory region may fail to proceed up to either the
+ Point of Coherency or to the Point of Unification of the
+ system. This workaround adds a DSB instruction before the
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1300,32 +1326,6 @@ source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
-config ARM_ERRATA_764369
- bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for erratum 764369
- affecting Cortex-A9 MPCore with two or more processors (all
- current revisions). Under certain timing circumstances, a data
- cache line maintenance operation by MVA targeting an Inner
- Shareable memory region may fail to proceed up to either the
- Point of Coherency or to the Point of Unification of the
- system. This workaround adds a DSB instruction before the
- relevant cache maintenance functions and sets a specific bit
- in the diagnostic control register of the SCU.
-
-config PL310_ERRATA_769419
- bool "PL310 errata: no automatic Store Buffer drain"
- depends on CACHE_L2X0
- help
- On revisions of the PL310 prior to r3p2, the Store Buffer does
- not automatically drain. This can cause normal, non-cacheable
- writes to be retained when the memory system is idle, leading
- to suboptimal I/O performance for drivers using coherent DMA.
- This option adds a write barrier to the cpu_idle loop so that,
- on systems with an outer cache, the store buffer is drained
- explicitly.
-
endmenu
menu "Kernel Features"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e58603b..caddb9d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -539,6 +539,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf..b1479fd 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -7,121 +7,10 @@
*/
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
-
-#if __LINUX_ARM_ARCH__ < 6
-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
-# include <asm-generic/mutex-xchg.h>
-#else
-
/*
- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
- * atomic decrement (it is not a reliable atomic decrement but it satisfies
- * the defined semantics for our purpose, while being smaller and faster
- * than a real atomic decrement or atomic swap. The idea is to attempt
- * decrementing the lock value only once. If once decremented it isn't zero,
- * or if its store-back fails due to a dispute on the exclusive store, we
- * simply bail out immediately through the slow path where the lock will be
- * reattempted until it succeeds.
+ * On pre-ARMv6 hardware this results in a swp-based implementation,
+ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
+ * accesses instead.
*/
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- __res = fail_fn(count);
- return __res;
-}
-
-/*
- * Same trick is used for the unlock fast path. However the original value,
- * rather than the result, is used to test for success in order to have
- * better generated assembly.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "ldrex %0, [%3] \n\t"
- "add %1, %0, #1 \n\t"
- "strex %2, %1, [%3] "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __orig |= __ex_flag;
- if (unlikely(__orig != 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we use another construct which could be
- * described as a "single value cmpxchg".
- *
- * This provides the needed trylock semantics like cmpxchg would, but it is
- * lighter and less generic than a true cmpxchg implementation.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "1: ldrex %0, [%3] \n\t"
- "subs %1, %0, #1 \n\t"
- "strexeq %2, %1, [%3] \n\t"
- "movlt %0, #0 \n\t"
- "cmpeq %2, #0 \n\t"
- "bgt 1b "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&count->counter)
- : "cc", "memory" );
-
- return __orig;
-}
-
-#endif
+#include <asm-generic/mutex-xchg.h>
#endif