From 247055aa21ffef1c49dd64710d5e94c2aee19b58 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 13 Sep 2010 16:03:21 +0100 Subject: ARM: 6384/1: Remove the domain switching on ARMv6k/v7 CPUs This patch removes the domain switching functionality via the set_fs and __switch_to functions on cores that have a TLS register. Currently, the ioremap and vmalloc areas share the same level 1 page tables and therefore have the same domain (DOMAIN_KERNEL). When the kernel domain is modified from Client to Manager (via the __set_fs or in the __switch_to function), the XN (eXecute Never) bit is overridden and newer CPUs can speculatively prefetch the ioremap'ed memory. Linux performs the kernel domain switching to allow user-specific functions (copy_to/from_user, get/put_user etc.) to access kernel memory. In order for these functions to work with the kernel domain set to Client, the patch modifies the LDRT/STRT and related instructions to the LDR/STR ones. The user pages access rights are also modified for kernel read-only access rather than read/write so that the copy-on-write mechanism still works. CPU_USE_DOMAINS gets disabled only if the hardware has a TLS register (CPU_32v6K is defined) since writing the TLS value to the high vectors page isn't possible. The user addresses passed to the kernel are checked by the access_ok() function so that they do not point to the kernel space. Tested-by: Anton Vorontsov Cc: Tony Lindgren Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/entry-armv.S') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c09e357..35f3f20 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -735,7 +735,7 @@ ENTRY(__switch_to) THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack THUMB( str sp, [ip], #4 ) THUMB( str lr, [ip], #4 ) -#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_USE_DOMAINS ldr r6, [r2, #TI_CPU_DOMAIN] #endif set_tls r3, r4, r5 @@ -744,7 +744,7 @@ ENTRY(__switch_to) ldr r8, =__stack_chk_guard ldr r7, [r7, #TSK_STACK_CANARY] #endif -#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_USE_DOMAINS mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif mov r5, r0 -- cgit v1.1 From ad3b6993b9c5482e8a2ec5aed181538c921fdcbd Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 15 Nov 2010 09:42:08 +0000 Subject: ARM: SMP: pass an ipi number to smp_cross_call() This allows us to use smp_cross_call() to trigger a number of different software generated interrupts, rather than combining them all on one SGI. Recover the SGI number via do_IPI. Reviewed-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel/entry-armv.S') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c09e357..955cf5f 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -48,7 +48,7 @@ */ ALT_SMP(test_for_ipi r0, r6, r5, lr) ALT_UP_B(9997f) - movne r0, sp + movne r1, sp adrne lr, BSYM(1b) bne do_IPI -- cgit v1.1 From ed3768a8d9dc2d345d4f27eb44ee1e4825056c08 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 1 Dec 2010 15:39:23 +0100 Subject: ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels. * __fixup_smp_on_up has been modified with support for the THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split into halfwords in case of misalignment, since we can't rely on unaligned accesses working before turning the MMU on. No attempt is made to optimise the aligned case, since the number of fixups is typically small, and it seems best to keep the code as simple as possible. * Add a rotate in the fixup_smp code in order to support CPU_BIG_ENDIAN, as suggested by Nicolas Pitre. * Add an assembly-time sanity-check to ALT_UP() to ensure that the content really is the right size (4 bytes). (No check is done for ALT_SMP(). Possibly, this could be fixed by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus ALT_SMP...SMP_UP_B) into two macros. In the first case, ALT_SMP needs to expand to >= 4 bytes, not == 4.) * smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due to macro limitations) has not been modified: the affected instruction (mov) has no 16-bit encoding, so the correct instruction size is satisfied in this case. * A "mode" parameter has been added to smp_dmb: smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser) smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP() This avoids assembly failures due to use of W() inside smp_dmb, when assembling pure-ARM code in the vectors page. There might be a better way to achieve this. * Kconfig: make SMP_ON_UP depend on (!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2 currently assumes little-endian order.) Tested using a single generic realview kernel on: ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y}) ARM RealView PBX-A9 (SMP) Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/entry-armv.S') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 955cf5f..7f22a11 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -842,7 +842,7 @@ __kuser_helper_start: */ __kuser_memory_barrier: @ 0xffff0fa0 - smp_dmb + smp_dmb arm usr_ret lr .align 5 @@ -959,7 +959,7 @@ kuser_cmpxchg_fixup: #else - smp_dmb + smp_dmb arm 1: ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] -- cgit v1.1 From 521086412ee423fbdfc7da81f257239c43f707b4 Mon Sep 17 00:00:00 2001 From: eric miao Date: Mon, 13 Dec 2010 09:42:34 +0100 Subject: ARM: 6532/1: Allow machine to specify it's own IRQ handlers at run-time Normally different ARM platform has different way to decode the IRQ hardware status and demultiplex to the corresponding IRQ handler. This is highly optimized by macro irq_handler in entry-armv.S, and each machine defines their own macro to decode the IRQ number. However, this prevents multiple machine classes to be built into a single kernel. By allowing each machine to specify thier own handler, and making function pointer 'handle_arch_irq' to point to it at run time, this can be solved. And introduce CONFIG_MULTI_IRQ_HANDLER to allow both solutions to work. Comparing with the highly optimized macro of irq_handler, the new function must be written with care not to lose too much performance. And the IPI stuff on SMP is expected to move to the provided arch IRQ handler as well. The assembly code to invoke handle_arch_irq is optimized by Russell King. Signed-off-by: Eric Miao Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/entry-armv.S') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 35f3f20..caa6c39 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -30,6 +30,14 @@ * Interrupt handling. Preserves r7, r8, r9 */ .macro irq_handler +#ifdef CONFIG_MULTI_IRQ_HANDLER + ldr r5, =handle_arch_irq + mov r0, sp + ldr r5, [r5] + adr lr, BSYM(9997f) + teq r5, #0 + movne pc, r5 +#endif get_irqnr_preamble r5, lr 1: get_irqnr_and_base r0, r6, r5, lr movne r1, sp @@ -58,9 +66,8 @@ adrne lr, BSYM(1b) bne do_local_timer #endif -9997: #endif - +9997: .endm #ifdef CONFIG_KPROBES @@ -1245,3 +1252,9 @@ cr_alignment: .space 4 cr_no_alignment: .space 4 + +#ifdef CONFIG_MULTI_IRQ_HANDLER + .globl handle_arch_irq +handle_arch_irq: + .space 4 +#endif -- cgit v1.1 From cd544ce754ac2432ffcc0626ea802d2b30876b50 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Dec 2010 13:20:08 +0100 Subject: ARM: 6538/1: Subarch IRQ handler macros V3 Per subarch interrupt handler macros V3. This patch breaks out code from the irq_handler macro into arch_irq_handler and arch_irq_handler_default. The macros are put in the header file "entry-macro-multi.S" The arch_irq_handler_default macro is designed to be used by irq_handler in entry-armv.S while arch_irq_handler is suitable for per-subarch use. Signed-off-by: Magnus Damm Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) (limited to 'arch/arm/kernel/entry-armv.S') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index caa6c39..a866dce 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -25,6 +25,7 @@ #include #include "entry-header.S" +#include /* * Interrupt handling. Preserves r7, r8, r9 @@ -38,35 +39,7 @@ teq r5, #0 movne pc, r5 #endif - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, BSYM(1b) - bne asm_do_IRQ - -#ifdef CONFIG_SMP - /* - * XXX - * - * this macro assumes that irqstat (r6) and base (r5) are - * preserved from get_irqnr_and_base above - */ - ALT_SMP(test_for_ipi r0, r6, r5, lr) - ALT_UP_B(9997f) - movne r0, sp - adrne lr, BSYM(1b) - bne do_IPI - -#ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr - movne r0, sp - adrne lr, BSYM(1b) - bne do_local_timer -#endif -#endif + arch_irq_handler_default 9997: .endm -- cgit v1.1