aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 10:57:12 +0200
committerKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 10:57:12 +0200
commit50e2778ee890c3e0b7f2e9c876d85aa962abc997 (patch)
tree5dce0669613802469470633ddb7d407f8f8f7986 /arch
parent6a9dff5ea86ccd4a1dd503a36ce9856c4c092360 (diff)
parentb9a7985a8d9ca00d8ce977756fde1306c9ab1e41 (diff)
downloadkernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.zip
kernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.tar.gz
kernel_samsung_crespo-50e2778ee890c3e0b7f2e9c876d85aa962abc997.tar.bz2
Merge commit 'b9a7985' into HEAD
Conflicts: kernel/time/timekeeping.c
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/arm/Kconfig52
-rw-r--r--arch/arm/boot/compressed/head.S1
-rw-r--r--arch/arm/include/asm/mutex.h119
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c9
-rw-r--r--arch/x86/pci/fixup.c17
-rw-r--r--arch/x86/xen/setup.c4
7 files changed, 62 insertions, 144 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e756d04..b15162f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ac2ef95..310434b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1236,6 +1236,32 @@ config ARM_ERRATA_754327
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
+config ARM_ERRATA_764369
+ bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for erratum 764369
+ affecting Cortex-A9 MPCore with two or more processors (all
+ current revisions). Under certain timing circumstances, a data
+ cache line maintenance operation by MVA targeting an Inner
+ Shareable memory region may fail to proceed up to either the
+ Point of Coherency or to the Point of Unification of the
+ system. This workaround adds a DSB instruction before the
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1300,32 +1326,6 @@ source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
-config ARM_ERRATA_764369
- bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for erratum 764369
- affecting Cortex-A9 MPCore with two or more processors (all
- current revisions). Under certain timing circumstances, a data
- cache line maintenance operation by MVA targeting an Inner
- Shareable memory region may fail to proceed up to either the
- Point of Coherency or to the Point of Unification of the
- system. This workaround adds a DSB instruction before the
- relevant cache maintenance functions and sets a specific bit
- in the diagnostic control register of the SCU.
-
-config PL310_ERRATA_769419
- bool "PL310 errata: no automatic Store Buffer drain"
- depends on CACHE_L2X0
- help
- On revisions of the PL310 prior to r3p2, the Store Buffer does
- not automatically drain. This can cause normal, non-cacheable
- writes to be retained when the memory system is idle, leading
- to suboptimal I/O performance for drivers using coherent DMA.
- This option adds a write barrier to the cpu_idle loop so that,
- on systems with an outer cache, the store buffer is drained
- explicitly.
-
endmenu
menu "Kernel Features"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e58603b..caddb9d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -539,6 +539,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf..b1479fd 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -7,121 +7,10 @@
*/
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
-
-#if __LINUX_ARM_ARCH__ < 6
-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
-# include <asm-generic/mutex-xchg.h>
-#else
-
/*
- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
- * atomic decrement (it is not a reliable atomic decrement but it satisfies
- * the defined semantics for our purpose, while being smaller and faster
- * than a real atomic decrement or atomic swap. The idea is to attempt
- * decrementing the lock value only once. If once decremented it isn't zero,
- * or if its store-back fails due to a dispute on the exclusive store, we
- * simply bail out immediately through the slow path where the lock will be
- * reattempted until it succeeds.
+ * On pre-ARMv6 hardware this results in a swp-based implementation,
+ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
+ * accesses instead.
*/
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- __res = fail_fn(count);
- return __res;
-}
-
-/*
- * Same trick is used for the unlock fast path. However the original value,
- * rather than the result, is used to test for success in order to have
- * better generated assembly.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "ldrex %0, [%3] \n\t"
- "add %1, %0, #1 \n\t"
- "strex %2, %1, [%3] "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __orig |= __ex_flag;
- if (unlikely(__orig != 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we use another construct which could be
- * described as a "single value cmpxchg".
- *
- * This provides the needed trylock semantics like cmpxchg would, but it is
- * lighter and less generic than a true cmpxchg implementation.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "1: ldrex %0, [%3] \n\t"
- "subs %1, %0, #1 \n\t"
- "strexeq %2, %1, [%3] \n\t"
- "movlt %0, #0 \n\t"
- "cmpeq %2, #0 \n\t"
- "bgt 1b "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&count->counter)
- : "cc", "memory" );
-
- return __orig;
-}
-
-#endif
+#include <asm-generic/mutex-xchg.h>
#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 942bda2..1396edf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -451,6 +451,13 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
m->ip = regs->ip;
m->cs = regs->cs;
+ /*
+ * When in VM86 mode make the cs look like ring 3
+ * always. This is a lie, but it's better than passing
+ * the additional vm86 bit around everywhere.
+ */
+ if (v8086_mode(regs))
+ m->cs |= 3;
} else {
m->ip = 0;
m->cs = 0;
@@ -988,6 +995,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*/
add_taint(TAINT_MACHINE_CHECK);
+ mce_get_rip(&m, regs);
severity = mce_severity(&m, tolerant, NULL);
/*
@@ -1026,7 +1034,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
mce_ring_add(m.addr >> PAGE_SHIFT);
- mce_get_rip(&m, regs);
mce_log(&m);
if (severity > worst) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6dd8955..0951b81 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
+
+/*
+ * Twinhead H12Y needs us to block out a region otherwise we map devices
+ * there and any access kills the box.
+ *
+ * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
+ *
+ * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
+ */
+static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
+ pr_info("Reserving memory on Twinhead H12Y\n");
+ request_mem_region(0xFFB00000, 0x100000, "twinhead");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8dcda4..5669564 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -15,6 +15,7 @@
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/acpi.h>
+#include <asm/numa.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -463,4 +464,7 @@ void __init xen_arch_setup(void)
boot_option_idle_override = IDLE_HALT;
fiddle_vdso();
+#ifdef CONFIG_NUMA
+ numa_off = 1;
+#endif
}