diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kernel/armksyms.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/debug.S | 35 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 11 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 78 | ||||
-rw-r--r-- | arch/arm/kernel/etm.c | 15 | ||||
-rw-r--r-- | arch/arm/kernel/ftrace.c | 188 | ||||
-rw-r--r-- | arch/arm/kernel/head-common.S | 305 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 5 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 323 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 849 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 68 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 45 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 239 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 66 | ||||
-rw-r--r-- | arch/arm/kernel/unwind.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 39 |
19 files changed, 1914 insertions, 405 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 980b78e..5b9b268 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_HAVE_TCM) += tcm.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8214bfe..e5e1e53 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -165,6 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_OLD_MCOUNT EXPORT_SYMBOL(mcount); +#endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 85f2a01..82da661 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -102,8 +102,6 @@ int main(void) DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr)); DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); - DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io)); - DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst)); BLANK(); DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a38b487..a0f0752 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -22,11 +22,11 @@ #if defined(CONFIG_DEBUG_ICEDCC) @@ debug using ARM EmbeddedICE DCC channel -#if defined(CONFIG_CPU_V6) - - .macro addruart, rx, tmp + .macro addruart, rp, rv .endm +#if defined(CONFIG_CPU_V6) + .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 .endm @@ -51,9 +51,6 @@ #elif defined(CONFIG_CPU_V7) - .macro addruart, rx, tmp - .endm - .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 .endm @@ -71,9 +68,6 @@ wait: mrc p14, 0, pc, c0, c1, 0 #elif defined(CONFIG_CPU_XSCALE) - .macro addruart, rx, tmp - .endm - .macro senduart, rd, rx mcr p14, 0, \rd, c8, c0, 0 .endm @@ -98,9 +92,6 @@ wait: mrc p14, 0, pc, c0, c1, 0 #else - .macro addruart, rx, tmp - .endm - .macro senduart, rd, rx mcr p14, 0, \rd, c1, c0, 0 .endm @@ -130,6 +121,22 @@ wait: mrc p14, 0, pc, c0, c1, 0 #include <mach/debug-macro.S> #endif /* CONFIG_DEBUG_ICEDCC */ +#ifdef CONFIG_MMU + .macro addruart_current, rx, tmp1, tmp2 + addruart \tmp1, \tmp2 + mrc p15, 0, \rx, c1, c0 + tst \rx, #1 + moveq \rx, \tmp1 + movne \rx, \tmp2 + .endm + +#else /* !CONFIG_MMU */ + .macro addruart_current, rx, tmp1, tmp2 + addruart \rx, \tmp1 + .endm + +#endif /* CONFIG_MMU */ + /* * Useful debugging routines */ @@ -164,7 +171,7 @@ ENDPROC(printhex2) .ltorg ENTRY(printascii) - addruart r3, r1 + addruart_current r3, r1, r2 b 2f 1: waituart r2, r3 senduart r1, r3 @@ -180,7 +187,7 @@ ENTRY(printascii) ENDPROC(printascii) ENTRY(printch) - addruart r3, r1 + addruart_current r3, r1, r2 mov r1, r0 mov r0, #0 b 1b diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bb8e93a..c09e357 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -46,7 +46,8 @@ * this macro assumes that irqstat (r6) and base (r5) are * preserved from get_irqnr_and_base above */ - test_for_ipi r0, r6, r5, lr + ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_UP_B(9997f) movne r0, sp adrne lr, BSYM(1b) bne do_IPI @@ -57,6 +58,7 @@ adrne lr, BSYM(1b) bne do_local_timer #endif +9997: #endif .endm @@ -965,11 +967,8 @@ kuser_cmpxchg_fixup: beq 1b rsbs r0, r3, #0 /* beware -- each __kuser slot must be 8 instructions max */ -#ifdef CONFIG_SMP - b __kuser_memory_barrier -#else - usr_ret lr -#endif + ALT_SMP(b __kuser_memory_barrier) + ALT_UP(usr_ret lr) #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7885722..8bfa987 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -129,30 +129,58 @@ ENDPROC(ret_from_fork) * clobber the ip register. This is OK because the ARM calling convention * allows it to be clobbered in subroutines and doesn't use it to hold * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). */ + +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(mcount) +ENTRY(__gnu_mcount_nc) + mov ip, lr + ldmia sp!, {lr} + mov pc, ip +ENDPROC(__gnu_mcount_nc) + +ENTRY(ftrace_caller) stmdb sp!, {r0-r3, lr} mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE + ldr r1, [sp, #20] - .globl mcount_call -mcount_call: + .global ftrace_call +ftrace_call: bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +ENDPROC(ftrace_caller) -ENTRY(ftrace_caller) +#ifdef CONFIG_OLD_MCOUNT +ENTRY(mcount) + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} +ENDPROC(mcount) + +ENTRY(ftrace_caller_old) stmdb sp!, {r0-r3, lr} ldr r1, [fp, #-4] mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE - .globl ftrace_call -ftrace_call: + .globl ftrace_call_old +ftrace_call_old: bl ftrace_stub ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +ENDPROC(ftrace_caller_old) +#endif #else @@ -160,7 +188,7 @@ ENTRY(__gnu_mcount_nc) stmdb sp!, {r0-r3, lr} ldr r0, =ftrace_trace_function ldr r2, [r0] - adr r0, ftrace_stub + adr r0, .Lftrace_stub cmp r0, r2 bne gnu_trace ldmia sp!, {r0-r3, ip, lr} @@ -170,11 +198,19 @@ gnu_trace: ldr r1, [sp, #20] @ lr of instrumented routine mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc + adr lr, BSYM(1f) mov pc, r2 +1: ldmia sp!, {r0-r3, ip, lr} mov pc, ip +ENDPROC(__gnu_mcount_nc) +#ifdef CONFIG_OLD_MCOUNT +/* + * This is under an ifdef in order to force link-time errors for people trying + * to build with !FRAME_POINTER with a GCC which doesn't use the new-style + * mcount. + */ ENTRY(mcount) stmdb sp!, {r0-r3, lr} ldr r0, =ftrace_trace_function @@ -193,12 +229,15 @@ trace: mov pc, r2 ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +ENDPROC(mcount) +#endif #endif /* CONFIG_DYNAMIC_FTRACE */ - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) +.Lftrace_stub: mov pc, lr +ENDPROC(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ @@ -295,7 +334,6 @@ ENTRY(vector_swi) get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -312,8 +350,20 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + +#ifdef CONFIG_SECCOMP + tst r10, #_TIF_SECCOMP + beq 1f + mov r0, scno + bl __secure_computing + add r0, sp, #S_R0 + S_OFF @ pointer to regs + ldmia r0, {r0 - r3} @ have to reload r0 - r3 +1: +#endif + + tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 33c7077..a48d512 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c @@ -30,6 +30,21 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shishkin"); +/* + * ETM tracer state + */ +struct tracectx { + unsigned int etb_bufsz; + void __iomem *etb_regs; + void __iomem *etm_regs; + unsigned long flags; + int ncmppairs; + int etm_portsz; + struct device *dev; + struct clk *emu_clk; + struct mutex mutex; +}; + static struct tracectx tracer; static inline bool trace_isrunning(struct tracectx *t) diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 0298286..971ac8c 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -2,102 +2,194 @@ * Dynamic function tracing support. * * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> + * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the - * mcount call-sites get patched lazily with NOP till they are - * enabled. All code mutation routines here take effect atomically. + * mcount call-sites get patched with NOP till they are enabled. + * All code mutation routines here are called under stop_machine(). */ #include <linux/ftrace.h> +#include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/ftrace.h> -#define PC_OFFSET 8 -#define BL_OPCODE 0xeb000000 -#define BL_OFFSET_MASK 0x00ffffff +#ifdef CONFIG_THUMB2_KERNEL +#define NOP 0xeb04f85d /* pop.w {lr} */ +#else +#define NOP 0xe8bd4000 /* pop {lr} */ +#endif -static unsigned long bl_insn; -static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ +#ifdef CONFIG_OLD_MCOUNT +#define OLD_MCOUNT_ADDR ((unsigned long) mcount) +#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) -unsigned char *ftrace_nop_replace(void) +#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ + +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { - return (char *)&NOP; + return rec->arch.old_mcount ? OLD_NOP : NOP; } +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) +{ + if (!rec->arch.old_mcount) + return addr; + + if (addr == MCOUNT_ADDR) + addr = OLD_MCOUNT_ADDR; + else if (addr == FTRACE_ADDR) + addr = OLD_FTRACE_ADDR; + + return addr; +} +#else +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) +{ + return NOP; +} + +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) +{ + return addr; +} +#endif + /* construct a branch (BL) instruction to addr */ -unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) +#ifdef CONFIG_THUMB2_KERNEL +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) { + unsigned long s, j1, j2, i1, i2, imm10, imm11; + unsigned long first, second; long offset; - offset = (long)addr - (long)(pc + PC_OFFSET); + offset = (long)addr - (long)(pc + 4); + if (offset < -16777216 || offset > 16777214) { + WARN_ON_ONCE(1); + return 0; + } + + s = (offset >> 24) & 0x1; + i1 = (offset >> 23) & 0x1; + i2 = (offset >> 22) & 0x1; + imm10 = (offset >> 12) & 0x3ff; + imm11 = (offset >> 1) & 0x7ff; + + j1 = (!i1) ^ s; + j2 = (!i2) ^ s; + + first = 0xf000 | (s << 10) | imm10; + second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11; + + return (second << 16) | first; +} +#else +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +{ + long offset; + + offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { /* Can't generate branches that far (from ARM ARM). Ftrace * doesn't generate branches outside of kernel text. */ WARN_ON_ONCE(1); - return NULL; + return 0; } - offset = (offset >> 2) & BL_OFFSET_MASK; - bl_insn = BL_OPCODE | offset; - return (unsigned char *)&bl_insn; -} -int ftrace_modify_code(unsigned long pc, unsigned char *old_code, - unsigned char *new_code) -{ - unsigned long err = 0, replaced = 0, old, new; + offset = (offset >> 2) & 0x00ffffff; - old = *(unsigned long *)old_code; - new = *(unsigned long *)new_code; + return 0xeb000000 | offset; +} +#endif - __asm__ __volatile__ ( - "1: ldr %1, [%2] \n" - " cmp %1, %4 \n" - "2: streq %3, [%2] \n" - " cmpne %1, %3 \n" - " movne %0, #2 \n" - "3:\n" +static int ftrace_modify_code(unsigned long pc, unsigned long old, + unsigned long new) +{ + unsigned long replaced; - ".pushsection .fixup, \"ax\"\n" - "4: mov %0, #1 \n" - " b 3b \n" - ".popsection\n" + if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) + return -EFAULT; - ".pushsection __ex_table, \"a\"\n" - " .long 1b, 4b \n" - " .long 2b, 4b \n" - ".popsection\n" + if (replaced != old) + return -EINVAL; - : "=r"(err), "=r"(replaced) - : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) - : "memory"); + if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) + return -EPERM; - if (!err && (replaced == old)) - flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); - return err; + return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { - int ret; unsigned long pc, old; - unsigned char *new; + unsigned long new; + int ret; pc = (unsigned long)&ftrace_call; memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(pc, (unsigned long)func); - ret = ftrace_modify_code(pc, (unsigned char *)&old, new); + + ret = ftrace_modify_code(pc, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (!ret) { + pc = (unsigned long)&ftrace_call_old; + memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(pc, (unsigned long)func); + + ret = ftrace_modify_code(pc, old, new); + } +#endif + + return ret; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long new, old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(rec); + new = ftrace_call_replace(ip, adjust_address(rec, addr)); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + unsigned long old; + unsigned long new; + int ret; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (ret == -EINVAL && addr == MCOUNT_ADDR) { + rec->arch.old_mcount = true; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + } +#endif + return ret; } -/* run from ftrace_init with irqs disabled */ int __init ftrace_dyn_arch_init(void *data) { - ftrace_mcount_set(data); + *(unsigned long *)data = 0; + return 0; } diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index b9505aa..bbecaac 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -15,55 +15,6 @@ #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) - .align 2 - .type __switch_data, %object -__switch_data: - .long __mmap_switched - .long __data_loc @ r4 - .long _data @ r5 - .long __bss_start @ r6 - .long _end @ r7 - .long processor_id @ r4 - .long __machine_arch_type @ r5 - .long __atags_pointer @ r6 - .long cr_alignment @ r7 - .long init_thread_union + THREAD_START_SP @ sp - -/* - * The following fragment of code is executed with the MMU on in MMU mode, - * and uses absolute addresses; this is not position independent. - * - * r0 = cp#15 control register - * r1 = machine ID - * r2 = atags pointer - * r9 = processor ID - */ -__mmap_switched: - adr r3, __switch_data + 4 - - ldmia r3!, {r4, r5, r6, r7} - cmp r4, r5 @ Copy data segment if needed -1: cmpne r5, r6 - ldrne fp, [r4], #4 - strne fp, [r5], #4 - bne 1b - - mov fp, #0 @ Clear BSS (and zero fp) -1: cmp r6, r7 - strcc fp, [r6],#4 - bcc 1b - - ARM( ldmia r3, {r4, r5, r6, r7, sp}) - THUMB( ldmia r3, {r4, r5, r6, r7} ) - THUMB( ldr sp, [r3, #16] ) - str r9, [r4] @ Save processor ID - str r1, [r5] @ Save machine type - str r2, [r6] @ Save atags pointer - bic r4, r0, #CR_A @ Clear 'A' bit - stmia r7, {r0, r4} @ Save control register values - b start_kernel -ENDPROC(__mmap_switched) - /* * Exception handling. Something went wrong and we can't proceed. We * ought to tell the user, but since we don't have any guarantee that @@ -73,21 +24,7 @@ ENDPROC(__mmap_switched) * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ -__error_p: -#ifdef CONFIG_DEBUG_LL - adr r0, str_p1 - bl printascii - mov r0, r9 - bl printhex8 - adr r0, str_p2 - bl printascii - b __error -str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" -str_p2: .asciz ").\n" - .align -#endif -ENDPROC(__error_p) - + __HEAD __error_a: #ifdef CONFIG_DEBUG_LL mov r4, r1 @ preserve machine ID @@ -97,7 +34,7 @@ __error_a: bl printhex8 adr r0, str_a2 bl printascii - adr r3, 4f + adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} @ get machine desc list sub r4, r3, r4 @ get offset between virt&phys add r5, r5, r4 @ convert virt addresses to @@ -125,78 +62,6 @@ str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align #endif -__error: -#ifdef CONFIG_ARCH_RPC -/* - * Turn the screen red on a error - RiscPC only. - */ - mov r0, #0x02000000 - mov r3, #0x11 - orr r3, r3, r3, lsl #8 - orr r3, r3, r3, lsl #16 - str r3, [r0], #4 - str r3, [r0], #4 - str r3, [r0], #4 - str r3, [r0], #4 -#endif -1: mov r0, r0 - b 1b -ENDPROC(__error) - - -/* - * Read processor ID register (CP#15, CR0), and look up in the linker-built - * supported processor list. Note that we can't use the absolute addresses - * for the __proc_info lists since we aren't running with the MMU on - * (and therefore, we are not in the correct address space). We have to - * calculate the offset. - * - * r9 = cpuid - * Returns: - * r3, r4, r6 corrupted - * r5 = proc_info pointer in physical address space - * r9 = cpuid (preserved) - */ -__lookup_processor_type: - adr r3, 3f - ldmia r3, {r5 - r7} - add r3, r3, #8 - sub r3, r3, r7 @ get offset between virt&phys - add r5, r5, r3 @ convert virt addresses to - add r6, r6, r3 @ physical address space -1: ldmia r5, {r3, r4} @ value, mask - and r4, r4, r9 @ mask wanted bits - teq r3, r4 - beq 2f - add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) - cmp r5, r6 - blo 1b - mov r5, #0 @ unknown processor -2: mov pc, lr -ENDPROC(__lookup_processor_type) - -/* - * This provides a C-API version of the above function. - */ -ENTRY(lookup_processor_type) - stmfd sp!, {r4 - r7, r9, lr} - mov r9, r0 - bl __lookup_processor_type - mov r0, r5 - ldmfd sp!, {r4 - r7, r9, pc} -ENDPROC(lookup_processor_type) - -/* - * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for - * more information about the __proc_info and __arch_info structures. - */ - .align 2 -3: .long __proc_info_begin - .long __proc_info_end -4: .long . - .long __arch_info_begin - .long __arch_info_end - /* * Lookup machine architecture in the linker-build list of architectures. * Note that we can't use the absolute addresses for the __arch_info @@ -209,7 +74,7 @@ ENDPROC(lookup_processor_type) * r5 = mach_info pointer in physical address space */ __lookup_machine_type: - adr r3, 4b + adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to @@ -225,15 +90,16 @@ __lookup_machine_type: ENDPROC(__lookup_machine_type) /* - * This provides a C-API version of the above function. + * Look in arch/arm/kernel/arch.[ch] for information about the + * __arch_info structures. */ -ENTRY(lookup_machine_type) - stmfd sp!, {r4 - r6, lr} - mov r1, r0 - bl __lookup_machine_type - mov r0, r5 - ldmfd sp!, {r4 - r6, pc} -ENDPROC(lookup_machine_type) + .align 2 + .type __lookup_machine_type_data, %object +__lookup_machine_type_data: + .long . + .long __arch_info_begin + .long __arch_info_end + .size __lookup_machine_type_data, . - __lookup_machine_type_data /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and @@ -265,3 +131,150 @@ __vet_atags: 1: mov r2, #0 mov pc, lr ENDPROC(__vet_atags) + +/* + * The following fragment of code is executed with the MMU on in MMU mode, + * and uses absolute addresses; this is not position independent. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r9 = processor ID + */ + __INIT +__mmap_switched: + adr r3, __mmap_switched_data + + ldmia r3!, {r4, r5, r6, r7} + cmp r4, r5 @ Copy data segment if needed +1: cmpne r5, r6 + ldrne fp, [r4], #4 + strne fp, [r5], #4 + bne 1b + + mov fp, #0 @ Clear BSS (and zero fp) +1: cmp r6, r7 + strcc fp, [r6],#4 + bcc 1b + + ARM( ldmia r3, {r4, r5, r6, r7, sp}) + THUMB( ldmia r3, {r4, r5, r6, r7} ) + THUMB( ldr sp, [r3, #16] ) + str r9, [r4] @ Save processor ID + str r1, [r5] @ Save machine type + str r2, [r6] @ Save atags pointer + bic r4, r0, #CR_A @ Clear 'A' bit + stmia r7, {r0, r4} @ Save control register values + b start_kernel +ENDPROC(__mmap_switched) + + .align 2 + .type __mmap_switched_data, %object +__mmap_switched_data: + .long __data_loc @ r4 + .long _sdata @ r5 + .long __bss_start @ r6 + .long _end @ r7 + .long processor_id @ r4 + .long __machine_arch_type @ r5 + .long __atags_pointer @ r6 + .long cr_alignment @ r7 + .long init_thread_union + THREAD_START_SP @ sp + .size __mmap_switched_data, . - __mmap_switched_data + +/* + * This provides a C-API version of __lookup_machine_type + */ +ENTRY(lookup_machine_type) + stmfd sp!, {r4 - r6, lr} + mov r1, r0 + bl __lookup_machine_type + mov r0, r5 + ldmfd sp!, {r4 - r6, pc} +ENDPROC(lookup_machine_type) + +/* + * This provides a C-API version of __lookup_processor_type + */ +ENTRY(lookup_processor_type) + stmfd sp!, {r4 - r6, r9, lr} + mov r9, r0 + bl __lookup_processor_type + mov r0, r5 + ldmfd sp!, {r4 - r6, r9, pc} +ENDPROC(lookup_processor_type) + +/* + * Read processor ID register (CP#15, CR0), and look up in the linker-built + * supported processor list. Note that we can't use the absolute addresses + * for the __proc_info lists since we aren't running with the MMU on + * (and therefore, we are not in the correct address space). We have to + * calculate the offset. + * + * r9 = cpuid + * Returns: + * r3, r4, r6 corrupted + * r5 = proc_info pointer in physical address space + * r9 = cpuid (preserved) + */ + __CPUINIT +__lookup_processor_type: + adr r3, __lookup_processor_type_data + ldmia r3, {r4 - r6} + sub r3, r3, r4 @ get offset between virt&phys + add r5, r5, r3 @ convert virt addresses to + add r6, r6, r3 @ physical address space +1: ldmia r5, {r3, r4} @ value, mask + and r4, r4, r9 @ mask wanted bits + teq r3, r4 + beq 2f + add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) + cmp r5, r6 + blo 1b + mov r5, #0 @ unknown processor +2: mov pc, lr +ENDPROC(__lookup_processor_type) + +/* + * Look in <asm/procinfo.h> for information about the __proc_info structure. + */ + .align 2 + .type __lookup_processor_type_data, %object +__lookup_processor_type_data: + .long . + .long __proc_info_begin + .long __proc_info_end + .size __lookup_processor_type_data, . - __lookup_processor_type_data + +__error_p: +#ifdef CONFIG_DEBUG_LL + adr r0, str_p1 + bl printascii + mov r0, r9 + bl printhex8 + adr r0, str_p2 + bl printascii + b __error +str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" +str_p2: .asciz ").\n" + .align +#endif +ENDPROC(__error_p) + +__error: +#ifdef CONFIG_ARCH_RPC +/* + * Turn the screen red on a error - RiscPC only. + */ + mov r0, #0x02000000 + mov r3, #0x11 + orr r3, r3, r3, lsl #8 + orr r3, r3, r3, lsl #16 + str r3, [r0], #4 + str r3, [r0], #4 + str r3, [r0], #4 + str r3, [r0], #4 +#endif +1: mov r0, r0 + b 1b +ENDPROC(__error) diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 573b803..814ce1a 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -48,8 +48,6 @@ ENTRY(stext) movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' - ldr r13, __switch_data @ address to jump to after - @ the initialization is done adr lr, BSYM(__after_proc_init) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) @@ -87,8 +85,7 @@ __after_proc_init: mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ - mov r3, r13 - mov pc, r3 @ clear the BSS and jump + b __mmap_switched @ clear the BSS and jump @ to start_kernel ENDPROC(__after_proc_init) .ltorg diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index eb62bf9..dd6b369 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -22,6 +22,10 @@ #include <asm/thread_info.h> #include <asm/system.h> +#ifdef CONFIG_DEBUG_LL +#include <mach/debug-macro.S> +#endif + #if (PHYS_OFFSET & 0x001fffff) #error "PHYS_OFFSET must be at an even 2MiB boundary!" #endif @@ -86,6 +90,9 @@ ENTRY(stext) movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' bl __vet_atags +#ifdef CONFIG_SMP_ON_UP + bl __fixup_smp +#endif bl __create_page_tables /* @@ -95,113 +102,15 @@ ENTRY(stext) * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ - ldr r13, __switch_data @ address to jump to after + ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled - adr lr, BSYM(__enable_mmu) @ return (PIC) address + adr lr, BSYM(1f) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) +1: b __enable_mmu ENDPROC(stext) - -#if defined(CONFIG_SMP) -ENTRY(secondary_startup) - /* - * Common entry point for secondary CPUs. - * - * Ensure that we're in SVC mode, and IRQs are disabled. Lookup - * the processor type - there is no need to check the machine type - * as it has already been validated by the primary processor. - */ - setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 - mrc p15, 0, r9, c0, c0 @ get processor id - bl __lookup_processor_type - movs r10, r5 @ invalid processor? - moveq r0, #'p' @ yes, error 'p' - beq __error - - /* - * Use the page tables supplied from __cpu_up. - */ - adr r4, __secondary_data - ldmia r4, {r5, r7, r12} @ address to jump to after - sub r4, r4, r5 @ mmu has been enabled - ldr r4, [r7, r4] @ get secondary_data.pgdir - adr lr, BSYM(__enable_mmu) @ return address - mov r13, r12 @ __secondary_switched address - ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor - @ (return control reg) - THUMB( add r12, r10, #PROCINFO_INITFUNC ) - THUMB( mov pc, r12 ) -ENDPROC(secondary_startup) - - /* - * r6 = &secondary_data - */ -ENTRY(__secondary_switched) - ldr sp, [r7, #4] @ get secondary_data.stack - mov fp, #0 - b secondary_start_kernel -ENDPROC(__secondary_switched) - - .type __secondary_data, %object -__secondary_data: - .long . - .long secondary_data - .long __secondary_switched -#endif /* defined(CONFIG_SMP) */ - - - -/* - * Setup common bits before finally enabling the MMU. Essentially - * this is just loading the page table pointer and domain access - * registers. - */ -__enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP - orr r0, r0, #CR_A -#else - bic r0, r0, #CR_A -#endif -#ifdef CONFIG_CPU_DCACHE_DISABLE - bic r0, r0, #CR_C -#endif -#ifdef CONFIG_CPU_BPREDICT_DISABLE - bic r0, r0, #CR_Z -#endif -#ifdef CONFIG_CPU_ICACHE_DISABLE - bic r0, r0, #CR_I -#endif - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) - mcr p15, 0, r5, c3, c0, 0 @ load domain access register - mcr p15, 0, r4, c2, c0, 0 @ load page table pointer - b __turn_mmu_on -ENDPROC(__enable_mmu) - -/* - * Enable the MMU. This completely changes the structure of the visible - * memory space. You will not be able to trace execution through this. - * If you have an enquiry about this, *please* check the linux-arm-kernel - * mailing list archives BEFORE sending another post to the list. - * - * r0 = cp#15 control register - * r13 = *virtual* address to jump to upon completion - * - * other registers depend on the function called upon completion - */ - .align 5 -__turn_mmu_on: - mov r0, r0 - mcr p15, 0, r0, c1, c0, 0 @ write control reg - mrc p15, 0, r3, c0, c0, 0 @ read id reg - mov r3, r3 - mov r3, r13 - mov pc, r3 -ENDPROC(__turn_mmu_on) - + .ltorg /* * Setup the initial page tables. We only setup the barest @@ -213,7 +122,7 @@ ENDPROC(__turn_mmu_on) * r10 = procinfo * * Returns: - * r0, r3, r6, r7 corrupted + * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: @@ -235,20 +144,30 @@ __create_page_tables: ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* - * Create identity mapping for first MB of kernel to - * cater for the MMU enable. This identity mapping - * will be removed by paging_init(). We use our current program - * counter to determine corresponding section base address. + * Create identity mapping to cater for __enable_mmu. + * This identity mapping will be removed by paging_init(). */ - mov r6, pc - mov r6, r6, lsr #20 @ start of kernel section - orr r3, r7, r6, lsl #20 @ flags + kernel base - str r3, [r4, r6, lsl #2] @ identity mapping + adr r0, __enable_mmu_loc + ldmia r0, {r3, r5, r6} + sub r0, r0, r3 @ virt->phys offset + add r5, r5, r0 @ phys __enable_mmu + add r6, r6, r0 @ phys __enable_mmu_end + mov r5, r5, lsr #20 + mov r6, r6, lsr #20 + +1: orr r3, r7, r5, lsl #20 @ flags + kernel base + str r3, [r4, r5, lsl #2] @ identity mapping + teq r5, r6 + addne r5, r5, #1 @ next section + bne 1b /* * Now setup the pagetables for our kernel direct * mapped region. */ + mov r3, pc + mov r3, r3, lsr #20 + orr r3, r7, r3, lsl #20 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! ldr r6, =(KERNEL_END - 1) @@ -289,24 +208,35 @@ __create_page_tables: str r6, [r0] #ifdef CONFIG_DEBUG_LL - ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags +#ifndef CONFIG_DEBUG_ICEDCC /* * Map in IO space for serial debugging. * This allows debug messages to be output * via a serial console before paging_init. */ - ldr r3, [r8, #MACHINFO_PGOFFIO] + addruart r7, r3 + + mov r3, r3, lsr #20 + mov r3, r3, lsl #2 + add r0, r4, r3 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) cmp r3, #0x0800 @ limit to 512MB movhi r3, #0x0800 add r6, r0, r3 - ldr r3, [r8, #MACHINFO_PHYSIO] - orr r3, r3, r7 + mov r3, r7, lsr #20 + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags + orr r3, r7, r3, lsl #20 1: str r3, [r0], #4 add r3, r3, #1 << 20 teq r0, r6 bne 1b + +#else /* CONFIG_DEBUG_ICEDCC */ + /* we don't need any serial debugging mappings for ICEDCC */ + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags +#endif /* !CONFIG_DEBUG_ICEDCC */ + #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* * If we're using the NetWinder or CATS, we also need to map @@ -332,5 +262,168 @@ __create_page_tables: mov pc, lr ENDPROC(__create_page_tables) .ltorg +__enable_mmu_loc: + .long . + .long __enable_mmu + .long __enable_mmu_end + +#if defined(CONFIG_SMP) + __CPUINIT +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 + mrc p15, 0, r9, c0, c0 @ get processor id + bl __lookup_processor_type + movs r10, r5 @ invalid processor? + moveq r0, #'p' @ yes, error 'p' + beq __error_p + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r7, r12} @ address to jump to after + sub r4, r4, r5 @ mmu has been enabled + ldr r4, [r7, r4] @ get secondary_data.pgdir + adr lr, BSYM(__enable_mmu) @ return address + mov r13, r12 @ __secondary_switched address + ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor + @ (return control reg) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) +ENDPROC(secondary_startup) + + /* + * r6 = &secondary_data + */ +ENTRY(__secondary_switched) + ldr sp, [r7, #4] @ get secondary_data.stack + mov fp, #0 + b secondary_start_kernel +ENDPROC(__secondary_switched) + + .type __secondary_data, %object +__secondary_data: + .long . + .long secondary_data + .long __secondary_switched +#endif /* defined(CONFIG_SMP) */ + + + +/* + * Setup common bits before finally enabling the MMU. Essentially + * this is just loading the page table pointer and domain access + * registers. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r4 = page table pointer + * r9 = processor ID + * r13 = *virtual* address to jump to upon completion + */ +__enable_mmu: +#ifdef CONFIG_ALIGNMENT_TRAP + orr r0, r0, #CR_A +#else + bic r0, r0, #CR_A +#endif +#ifdef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CR_C +#endif +#ifdef CONFIG_CPU_BPREDICT_DISABLE + bic r0, r0, #CR_Z +#endif +#ifdef CONFIG_CPU_ICACHE_DISABLE + bic r0, r0, #CR_I +#endif + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + mcr p15, 0, r5, c3, c0, 0 @ load domain access register + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer + b __turn_mmu_on +ENDPROC(__enable_mmu) + +/* + * Enable the MMU. This completely changes the structure of the visible + * memory space. You will not be able to trace execution through this. + * If you have an enquiry about this, *please* check the linux-arm-kernel + * mailing list archives BEFORE sending another post to the list. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r9 = processor ID + * r13 = *virtual* address to jump to upon completion + * + * other registers depend on the function called upon completion + */ + .align 5 +__turn_mmu_on: + mov r0, r0 + mcr p15, 0, r0, c1, c0, 0 @ write control reg + mrc p15, 0, r3, c0, c0, 0 @ read id reg + mov r3, r3 + mov r3, r13 + mov pc, r3 +__enable_mmu_end: +ENDPROC(__turn_mmu_on) + + +#ifdef CONFIG_SMP_ON_UP +__fixup_smp: + mov r7, #0x00070000 + orr r6, r7, #0xff000000 @ mask 0xff070000 + orr r7, r7, #0x41000000 @ val 0x41070000 + and r0, r9, r6 + teq r0, r7 @ ARM CPU and ARMv6/v7? + bne __fixup_smp_on_up @ no, assume UP + + orr r6, r6, #0x0000ff00 + orr r6, r6, #0x000000f0 @ mask 0xff07fff0 + orr r7, r7, #0x0000b000 + orr r7, r7, #0x00000020 @ val 0x4107b020 + and r0, r9, r6 + teq r0, r7 @ ARM 11MPCore? + moveq pc, lr @ yes, assume SMP + + mrc p15, 0, r0, c0, c0, 5 @ read MPIDR + tst r0, #1 << 31 + movne pc, lr @ bit 31 => SMP + +__fixup_smp_on_up: + adr r0, 1f + ldmia r0, {r3, r6, r7} + sub r3, r0, r3 + add r6, r6, r3 + add r7, r7, r3 +2: cmp r6, r7 + ldmia r6!, {r0, r4} + strlo r4, [r0, r3] + blo 2b + mov pc, lr +ENDPROC(__fixup_smp) + +1: .word . + .word __smpalt_begin + .word __smpalt_end + + .pushsection .data + .globl smp_on_up +smp_on_up: + ALT_SMP(.long 1) + ALT_UP(.long 0) + .popsection + +#endif #include "head-common.S" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c new file mode 100644 index 0000000..54593b0 --- /dev/null +++ b/arch/arm/kernel/hw_breakpoint.c @@ -0,0 +1,849 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2009, 2010 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + */ +#define pr_fmt(fmt) "hw-breakpoint: " fmt + +#include <linux/errno.h> +#include <linux/perf_event.h> +#include <linux/hw_breakpoint.h> +#include <linux/smp.h> + +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <asm/current.h> +#include <asm/hw_breakpoint.h> +#include <asm/kdebug.h> +#include <asm/system.h> +#include <asm/traps.h> + +/* Breakpoint currently in use for each BRP. */ +static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); + +/* Watchpoint currently in use for each WRP. */ +static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); + +/* Number of BRP/WRP registers on this CPU. */ +static int core_num_brps; +static int core_num_wrps; + +/* Debug architecture version. */ +static u8 debug_arch; + +/* Maximum supported watchpoint length. */ +static u8 max_watchpoint_len; + +/* Determine number of BRP registers available. */ +static int get_num_brps(void) +{ + u32 didr; + ARM_DBG_READ(c0, 0, didr); + return ((didr >> 24) & 0xf) + 1; +} + +/* Determine number of WRP registers available. */ +static int get_num_wrps(void) +{ + /* + * FIXME: When a watchpoint fires, the only way to work out which + * watchpoint it was is by disassembling the faulting instruction + * and working out the address of the memory access. + * + * Furthermore, we can only do this if the watchpoint was precise + * since imprecise watchpoints prevent us from calculating register + * based addresses. + * + * For the time being, we only report 1 watchpoint register so we + * always know which watchpoint fired. In the future we can either + * add a disassembler and address generation emulator, or we can + * insert a check to see if the DFAR is set on watchpoint exception + * entry [the ARM ARM states that the DFAR is UNKNOWN, but + * experience shows that it is set on some implementations]. + */ + +#if 0 + u32 didr, wrps; + ARM_DBG_READ(c0, 0, didr); + return ((didr >> 28) & 0xf) + 1; +#endif + + return 1; +} + +int hw_breakpoint_slots(int type) +{ + /* + * We can be called early, so don't rely on + * our static variables being initialised. + */ + switch (type) { + case TYPE_INST: + return get_num_brps(); + case TYPE_DATA: + return get_num_wrps(); + default: + pr_warning("unknown slot type: %d\n", type); + return 0; + } +} + +/* Determine debug architecture. */ +static u8 get_debug_arch(void) +{ + u32 didr; + + /* Do we implement the extended CPUID interface? */ + if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { + pr_warning("CPUID feature registers not supported. " + "Assuming v6 debug is present.\n"); + return ARM_DEBUG_ARCH_V6; + } + + ARM_DBG_READ(c0, 0, didr); + return (didr >> 16) & 0xf; +} + +/* Does this core support mismatch breakpoints? */ +static int core_has_mismatch_bps(void) +{ + return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1; +} + +u8 arch_get_debug_arch(void) +{ + return debug_arch; +} + +#define READ_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_READ(c ## M, OP2, VAL); \ + break + +#define WRITE_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_WRITE(c ## M, OP2, VAL);\ + break + +#define GEN_READ_WB_REG_CASES(OP2, VAL) \ + READ_WB_REG_CASE(OP2, 0, VAL); \ + READ_WB_REG_CASE(OP2, 1, VAL); \ + READ_WB_REG_CASE(OP2, 2, VAL); \ + READ_WB_REG_CASE(OP2, 3, VAL); \ + READ_WB_REG_CASE(OP2, 4, VAL); \ + READ_WB_REG_CASE(OP2, 5, VAL); \ + READ_WB_REG_CASE(OP2, 6, VAL); \ + READ_WB_REG_CASE(OP2, 7, VAL); \ + READ_WB_REG_CASE(OP2, 8, VAL); \ + READ_WB_REG_CASE(OP2, 9, VAL); \ + READ_WB_REG_CASE(OP2, 10, VAL); \ + READ_WB_REG_CASE(OP2, 11, VAL); \ + READ_WB_REG_CASE(OP2, 12, VAL); \ + READ_WB_REG_CASE(OP2, 13, VAL); \ + READ_WB_REG_CASE(OP2, 14, VAL); \ + READ_WB_REG_CASE(OP2, 15, VAL) + +#define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ + WRITE_WB_REG_CASE(OP2, 0, VAL); \ + WRITE_WB_REG_CASE(OP2, 1, VAL); \ + WRITE_WB_REG_CASE(OP2, 2, VAL); \ + WRITE_WB_REG_CASE(OP2, 3, VAL); \ + WRITE_WB_REG_CASE(OP2, 4, VAL); \ + WRITE_WB_REG_CASE(OP2, 5, VAL); \ + WRITE_WB_REG_CASE(OP2, 6, VAL); \ + WRITE_WB_REG_CASE(OP2, 7, VAL); \ + WRITE_WB_REG_CASE(OP2, 8, VAL); \ + WRITE_WB_REG_CASE(OP2, 9, VAL); \ + WRITE_WB_REG_CASE(OP2, 10, VAL); \ + WRITE_WB_REG_CASE(OP2, 11, VAL); \ + WRITE_WB_REG_CASE(OP2, 12, VAL); \ + WRITE_WB_REG_CASE(OP2, 13, VAL); \ + WRITE_WB_REG_CASE(OP2, 14, VAL); \ + WRITE_WB_REG_CASE(OP2, 15, VAL) + +static u32 read_wb_reg(int n) +{ + u32 val = 0; + + switch (n) { + GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); + default: + pr_warning("attempt to read from unknown breakpoint " + "register %d\n", n); + } + + return val; +} + +static void write_wb_reg(int n, u32 val) +{ + switch (n) { + GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); + default: + pr_warning("attempt to write to unknown breakpoint " + "register %d\n", n); + } + isb(); +} + +/* + * In order to access the breakpoint/watchpoint control registers, + * we must be running in debug monitor mode. Unfortunately, we can + * be put into halting debug mode at any time by an external debugger + * but there is nothing we can do to prevent that. + */ +static int enable_monitor_mode(void) +{ + u32 dscr; + int ret = 0; + + ARM_DBG_READ(c1, 0, dscr); + + /* Ensure that halting mode is disabled. */ + if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled." + "Unable to access hardware resources.")) { + ret = -EPERM; + goto out; + } + + /* Write to the corresponding DSCR. */ + switch (debug_arch) { + case ARM_DEBUG_ARCH_V6: + case ARM_DEBUG_ARCH_V6_1: + ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); + break; + case ARM_DEBUG_ARCH_V7_ECP14: + ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); + break; + default: + ret = -ENODEV; + goto out; + } + + /* Check that the write made it through. */ + ARM_DBG_READ(c1, 0, dscr); + if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN), + "failed to enable monitor mode.")) { + ret = -EPERM; + } + +out: + return ret; +} + +/* + * Check if 8-bit byte-address select is available. + * This clobbers WRP 0. + */ +static u8 get_max_wp_len(void) +{ + u32 ctrl_reg; + struct arch_hw_breakpoint_ctrl ctrl; + u8 size = 4; + + if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) + goto out; + + if (enable_monitor_mode()) + goto out; + + memset(&ctrl, 0, sizeof(ctrl)); + ctrl.len = ARM_BREAKPOINT_LEN_8; + ctrl_reg = encode_ctrl_reg(ctrl); + + write_wb_reg(ARM_BASE_WVR, 0); + write_wb_reg(ARM_BASE_WCR, ctrl_reg); + if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) + size = 8; + +out: + return size; +} + +u8 arch_get_max_wp_len(void) +{ + return max_watchpoint_len; +} + +/* + * Handler for reactivating a suspended watchpoint when the single + * step `mismatch' breakpoint is triggered. + */ +static void wp_single_step_handler(struct perf_event *bp, int unused, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + perf_event_enable(counter_arch_bp(bp)->suspended_wp); + unregister_hw_breakpoint(bp); +} + +static int bp_is_single_step(struct perf_event *bp) +{ + return bp->overflow_handler == wp_single_step_handler; +} + +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + int i, max_slots, ctrl_base, val_base, ret = 0; + + /* Ensure that we are in monitor mode and halting mode is disabled. */ + ret = enable_monitor_mode(); + if (ret) + goto out; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + ctrl_base = ARM_BASE_BCR; + val_base = ARM_BASE_BVR; + slots = __get_cpu_var(bp_on_reg); + max_slots = core_num_brps - 1; + + if (bp_is_single_step(bp)) { + info->ctrl.mismatch = 1; + i = max_slots; + slots[i] = bp; + goto setup; + } + } else { + /* Watchpoint */ + ctrl_base = ARM_BASE_WCR; + val_base = ARM_BASE_WVR; + slots = __get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + } + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) { + ret = -EBUSY; + goto out; + } + +setup: + /* Setup the address register. */ + write_wb_reg(val_base + i, info->address); + + /* Setup the control register. */ + write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1); + +out: + return ret; +} + +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + int i, max_slots, base; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + base = ARM_BASE_BCR; + slots = __get_cpu_var(bp_on_reg); + max_slots = core_num_brps - 1; + + if (bp_is_single_step(bp)) { + i = max_slots; + slots[i] = NULL; + goto reset; + } + } else { + /* Watchpoint */ + base = ARM_BASE_WCR; + slots = __get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + } + + /* Remove the breakpoint. */ + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) + return; + +reset: + /* Reset the control register. */ + write_wb_reg(base + i, 0); +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case ARM_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case ARM_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case ARM_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case ARM_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + + return len_in_bytes; +} + +/* + * Check whether bp virtual address is in kernel space. + */ +int arch_check_bp_in_kernelspace(struct perf_event *bp) +{ + unsigned int len; + unsigned long va; + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + va = info->address; + len = get_hbp_len(info->ctrl.len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. + * Hopefully this will disappear when ptrace can bypass the conversion + * to generic breakpoint descriptions. + */ +int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type) +{ + /* Type */ + switch (ctrl.type) { + case ARM_BREAKPOINT_EXECUTE: + *gen_type = HW_BREAKPOINT_X; + break; + case ARM_BREAKPOINT_LOAD: + *gen_type = HW_BREAKPOINT_R; + break; + case ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_W; + break; + case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (ctrl.len) { + case ARM_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case ARM_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case ARM_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case ARM_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Construct an arch_hw_breakpoint from a perf_event. + */ +static int arch_build_bp_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + /* Type */ + switch (bp->attr.bp_type) { + case HW_BREAKPOINT_X: + info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + break; + case HW_BREAKPOINT_R: + info->ctrl.type = ARM_BREAKPOINT_LOAD; + break; + case HW_BREAKPOINT_W: + info->ctrl.type = ARM_BREAKPOINT_STORE; + break; + case HW_BREAKPOINT_RW: + info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (bp->attr.bp_len) { + case HW_BREAKPOINT_LEN_1: + info->ctrl.len = ARM_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + info->ctrl.len = ARM_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_4: + info->ctrl.len = ARM_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_8: + info->ctrl.len = ARM_BREAKPOINT_LEN_8; + if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) + && max_watchpoint_len >= 8) + break; + default: + return -EINVAL; + } + + /* Address */ + info->address = bp->attr.bp_addr; + + /* Privilege */ + info->ctrl.privilege = ARM_BREAKPOINT_USER; + if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp)) + info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; + + /* Enabled? */ + info->ctrl.enabled = !bp->attr.disabled; + + /* Mismatch */ + info->ctrl.mismatch = 0; + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings. + */ +int arch_validate_hwbkpt_settings(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int ret = 0; + u32 bytelen, max_len, offset, alignment_mask = 0x3; + + /* Build the arch_hw_breakpoint. */ + ret = arch_build_bp_info(bp); + if (ret) + goto out; + + /* Check address alignment. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + alignment_mask = 0x7; + if (info->address & alignment_mask) { + /* + * Try to fix the alignment. This may result in a length + * that is too large, so we must check for that. + */ + bytelen = get_hbp_len(info->ctrl.len); + max_len = info->ctrl.type == ARM_BREAKPOINT_EXECUTE ? 4 : + max_watchpoint_len; + + if (max_len >= 8) + offset = info->address & 0x7; + else + offset = info->address & 0x3; + + if (bytelen > (1 << ((max_len - (offset + 1)) >> 1))) { + ret = -EFBIG; + goto out; + } + + info->ctrl.len <<= offset; + info->address &= ~offset; + + pr_debug("breakpoint alignment fixup: length = 0x%x, " + "address = 0x%x\n", info->ctrl.len, info->address); + } + + /* + * Currently we rely on an overflow handler to take + * care of single-stepping the breakpoint when it fires. + * In the case of userspace breakpoints on a core with V7 debug, + * we can use the mismatch feature as a poor-man's hardware single-step. + */ + if (WARN_ONCE(!bp->overflow_handler && + (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()), + "overflow handler required but none found")) { + ret = -EINVAL; + goto out; + } +out: + return ret; +} + +static void update_mismatch_flag(int idx, int flag) +{ + struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]); + struct arch_hw_breakpoint *info; + + if (bp == NULL) + return; + + info = counter_arch_bp(bp); + + /* Update the mismatch field to enter/exit `single-step' mode */ + if (!bp->overflow_handler && info->ctrl.mismatch != flag) { + info->ctrl.mismatch = flag; + write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1); + } +} + +static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) +{ + int i; + struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg); + struct arch_hw_breakpoint *info; + struct perf_event_attr attr; + + /* Without a disassembler, we can only handle 1 watchpoint. */ + BUG_ON(core_num_wrps > 1); + + hw_breakpoint_init(&attr); + attr.bp_addr = regs->ARM_pc & ~0x3; + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = HW_BREAKPOINT_X; + + for (i = 0; i < core_num_wrps; ++i) { + rcu_read_lock(); + + if (slots[i] == NULL) { + rcu_read_unlock(); + continue; + } + + /* + * The DFAR is an unknown value. Since we only allow a + * single watchpoint, we can set the trigger to the lowest + * possible faulting address. + */ + info = counter_arch_bp(slots[i]); + info->trigger = slots[i]->attr.bp_addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(slots[i], regs); + + /* + * If no overflow handler is present, insert a temporary + * mismatch breakpoint so we can single-step over the + * watchpoint trigger. + */ + if (!slots[i]->overflow_handler) { + bp = register_user_hw_breakpoint(&attr, + wp_single_step_handler, + current); + counter_arch_bp(bp)->suspended_wp = slots[i]; + perf_event_disable(slots[i]); + } + + rcu_read_unlock(); + } +} + +static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) +{ + int i; + int mismatch; + u32 ctrl_reg, val, addr; + struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg); + struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + /* The exception entry code places the amended lr in the PC. */ + addr = regs->ARM_pc; + + for (i = 0; i < core_num_brps; ++i) { + rcu_read_lock(); + + bp = slots[i]; + + if (bp == NULL) { + rcu_read_unlock(); + continue; + } + + mismatch = 0; + + /* Check if the breakpoint value matches. */ + val = read_wb_reg(ARM_BASE_BVR + i); + if (val != (addr & ~0x3)) + goto unlock; + + /* Possible match, check the byte address select to confirm. */ + ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + if ((1 << (addr & 0x3)) & ctrl.len) { + mismatch = 1; + info = counter_arch_bp(bp); + info->trigger = addr; + } + +unlock: + if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) { + pr_debug("breakpoint fired: address = 0x%x\n", addr); + perf_bp_event(bp, regs); + } + + update_mismatch_flag(i, mismatch); + rcu_read_unlock(); + } +} + +/* + * Called from either the Data Abort Handler [watchpoint] or the + * Prefetch Abort Handler [breakpoint]. + */ +static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + int ret = 1; /* Unhandled fault. */ + u32 dscr; + + /* We only handle watchpoints and hardware breakpoints. */ + ARM_DBG_READ(c1, 0, dscr); + + /* Perform perf callbacks. */ + switch (ARM_DSCR_MOE(dscr)) { + case ARM_ENTRY_BREAKPOINT: + breakpoint_handler(addr, regs); + break; + case ARM_ENTRY_ASYNC_WATCHPOINT: + WARN_ON("Asynchronous watchpoint exception taken. " + "Debugging results may be unreliable"); + case ARM_ENTRY_SYNC_WATCHPOINT: + watchpoint_handler(addr, regs); + break; + default: + goto out; + } + + ret = 0; +out: + return ret; +} + +/* + * One-time initialisation. + */ +static void __init reset_ctrl_regs(void *unused) +{ + int i; + + if (enable_monitor_mode()) + return; + + for (i = 0; i < core_num_brps; ++i) { + write_wb_reg(ARM_BASE_BCR + i, 0UL); + write_wb_reg(ARM_BASE_BVR + i, 0UL); + } + + for (i = 0; i < core_num_wrps; ++i) { + write_wb_reg(ARM_BASE_WCR + i, 0UL); + write_wb_reg(ARM_BASE_WVR + i, 0UL); + } +} + +static int __init arch_hw_breakpoint_init(void) +{ + int ret = 0; + u32 dscr; + + debug_arch = get_debug_arch(); + + if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { + pr_info("debug architecture 0x%x unsupported.\n", debug_arch); + ret = -ENODEV; + goto out; + } + + /* Determine how many BRPs/WRPs are available. */ + core_num_brps = get_num_brps(); + core_num_wrps = get_num_wrps(); + + pr_info("found %d breakpoint and %d watchpoint registers.\n", + core_num_brps, core_num_wrps); + + if (core_has_mismatch_bps()) + pr_info("1 breakpoint reserved for watchpoint single-step.\n"); + + ARM_DBG_READ(c1, 0, dscr); + if (dscr & ARM_DSCR_HDBGEN) { + pr_warning("halting debug mode enabled. Assuming maximum " + "watchpoint size of 4 bytes."); + } else { + /* Work out the maximum supported watchpoint length. */ + max_watchpoint_len = get_max_wp_len(); + pr_info("maximum watchpoint size is %u bytes.\n", + max_watchpoint_len); + + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + smp_call_function(reset_ctrl_regs, NULL, 1); + reset_ctrl_regs(NULL); + } + + /* Register debug fault handler. */ + hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, + "watchpoint debug exception"); + hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, + "breakpoint debug exception"); + +out: + return ret; +} +arch_initcall(arch_hw_breakpoint_init); + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ +} + +/* + * Dummy function to register with die_notifier. + */ +int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 6b46058..d9bd786 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -69,20 +69,31 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, { #ifdef CONFIG_ARM_UNWIND Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + struct arm_unwind_mapping *maps = mod->arch.map; for (s = sechdrs; s < sechdrs_end; s++) { - if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_init = s; - else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_devinit = s; - else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_core = s; - else if (strcmp(".init.text", secstrings + s->sh_name) == 0) - mod->arch.sec_init_text = s; - else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0) - mod->arch.sec_devinit_text = s; - else if (strcmp(".text", secstrings + s->sh_name) == 0) - mod->arch.sec_core_text = s; + char const *secname = secstrings + s->sh_name; + + if (strcmp(".ARM.exidx.init.text", secname) == 0) + maps[ARM_SEC_INIT].unw_sec = s; + else if (strcmp(".ARM.exidx.devinit.text", secname) == 0) + maps[ARM_SEC_DEVINIT].unw_sec = s; + else if (strcmp(".ARM.exidx", secname) == 0) + maps[ARM_SEC_CORE].unw_sec = s; + else if (strcmp(".ARM.exidx.exit.text", secname) == 0) + maps[ARM_SEC_EXIT].unw_sec = s; + else if (strcmp(".ARM.exidx.devexit.text", secname) == 0) + maps[ARM_SEC_DEVEXIT].unw_sec = s; + else if (strcmp(".init.text", secname) == 0) + maps[ARM_SEC_INIT].sec_text = s; + else if (strcmp(".devinit.text", secname) == 0) + maps[ARM_SEC_DEVINIT].sec_text = s; + else if (strcmp(".text", secname) == 0) + maps[ARM_SEC_CORE].sec_text = s; + else if (strcmp(".exit.text", secname) == 0) + maps[ARM_SEC_EXIT].sec_text = s; + else if (strcmp(".devexit.text", secname) == 0) + maps[ARM_SEC_DEVEXIT].sec_text = s; } #endif return 0; @@ -292,31 +303,22 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, #ifdef CONFIG_ARM_UNWIND static void register_unwind_tables(struct module *mod) { - if (mod->arch.unw_sec_init && mod->arch.sec_init_text) - mod->arch.unwind_init = - unwind_table_add(mod->arch.unw_sec_init->sh_addr, - mod->arch.unw_sec_init->sh_size, - mod->arch.sec_init_text->sh_addr, - mod->arch.sec_init_text->sh_size); - if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text) - mod->arch.unwind_devinit = - unwind_table_add(mod->arch.unw_sec_devinit->sh_addr, - mod->arch.unw_sec_devinit->sh_size, - mod->arch.sec_devinit_text->sh_addr, - mod->arch.sec_devinit_text->sh_size); - if (mod->arch.unw_sec_core && mod->arch.sec_core_text) - mod->arch.unwind_core = - unwind_table_add(mod->arch.unw_sec_core->sh_addr, - mod->arch.unw_sec_core->sh_size, - mod->arch.sec_core_text->sh_addr, - mod->arch.sec_core_text->sh_size); + int i; + for (i = 0; i < ARM_SEC_MAX; ++i) { + struct arm_unwind_mapping *map = &mod->arch.map[i]; + if (map->unw_sec && map->sec_text) + map->unwind = unwind_table_add(map->unw_sec->sh_addr, + map->unw_sec->sh_size, + map->sec_text->sh_addr, + map->sec_text->sh_size); + } } static void unregister_unwind_tables(struct module *mod) { - unwind_table_del(mod->arch.unwind_init); - unwind_table_del(mod->arch.unwind_devinit); - unwind_table_del(mod->arch.unwind_core); + int i = ARM_SEC_MAX; + while (--i >= 0) + unwind_table_del(mod->arch.map[i].unwind); } #else static inline void register_unwind_tables(struct module *mod) { } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 401e38b..e76fcaa 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/utsname.h> #include <linux/uaccess.h> #include <linux/random.h> +#include <linux/hw_breakpoint.h> #include <asm/cacheflush.h> #include <asm/leds.h> @@ -135,6 +136,25 @@ EXPORT_SYMBOL(pm_power_off); void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); +static void do_nothing(void *unused) +{ +} + +/* + * cpu_idle_wait - Used to ensure that all the CPUs discard old value of + * pm_idle and update to new pm_idle value. Required while changing pm_idle + * handler on SMP systems. + * + * Caller must have changed pm_idle to the new value before the call. Old + * pm_idle value will not be used by any CPU after the return of this function. + */ +void cpu_idle_wait(void) +{ + smp_mb(); + /* kick all the CPUs so that they exit out of pm_idle */ + smp_call_function(do_nothing, NULL, 1); +} +EXPORT_SYMBOL_GPL(cpu_idle_wait); /* * This is our default idle handler. We need to disable @@ -317,6 +337,8 @@ void flush_thread(void) struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; + flush_ptrace_hw_breakpoint(tsk); + memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); @@ -345,6 +367,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_context.sp = (unsigned long)childregs; thread->cpu_context.pc = (unsigned long)ret_from_fork; + clear_ptrace_hw_breakpoint(p); + if (clone_flags & CLONE_SETTLS) thread->tp_value = regs->ARM_r3; @@ -458,3 +482,24 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } + +/* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Let's declare a mapping + * for it so it is visible through ptrace and /proc/<pid>/mem. + */ + +int vectors_user_mapping(void) +{ + struct mm_struct *mm = current->mm; + return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC | + VM_ALWAYSDUMP | VM_RESERVED, + NULL); +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; +} diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index f99d489..e0cb637 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -19,6 +19,8 @@ #include <linux/init.h> #include <linux/signal.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/hw_breakpoint.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -847,6 +849,232 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) } #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +/* + * Convert a virtual register number into an index for a thread_info + * breakpoint array. Breakpoints are identified using positive numbers + * whilst watchpoints are negative. The registers are laid out as pairs + * of (address, control), each pair mapping to a unique hw_breakpoint struct. + * Register 0 is reserved for describing resource information. + */ +static int ptrace_hbp_num_to_idx(long num) +{ + if (num < 0) + num = (ARM_MAX_BRP << 1) - num; + return (num - 1) >> 1; +} + +/* + * Returns the virtual register number for the address of the + * breakpoint at index idx. + */ +static long ptrace_hbp_idx_to_num(int idx) +{ + long mid = ARM_MAX_BRP << 1; + long num = (idx << 1) + 1; + return num > mid ? mid - num : num; +} + +/* + * Handle hitting a HW-breakpoint. + */ +static void ptrace_hbptriggered(struct perf_event *bp, int unused, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); + long num; + int i; + siginfo_t info; + + for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) + if (current->thread.debug.hbp[i] == bp) + break; + + num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); + + info.si_signo = SIGTRAP; + info.si_errno = (int)num; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void __user *)(bkpt->trigger); + + force_sig_info(SIGTRAP, &info, current); +} + +/* + * Set ptrace breakpoint pointers to zero for this task. + * This is required in order to prevent child processes from unregistering + * breakpoints held by their parent. + */ +void clear_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); +} + +/* + * Unregister breakpoints from this task and reset the pointers in + * the thread_struct. + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { + if (t->debug.hbp[i]) { + unregister_hw_breakpoint(t->debug.hbp[i]); + t->debug.hbp[i] = NULL; + } + } +} + +static u32 ptrace_get_hbp_resource_info(void) +{ + u8 num_brps, num_wrps, debug_arch, wp_len; + u32 reg = 0; + + num_brps = hw_breakpoint_slots(TYPE_INST); + num_wrps = hw_breakpoint_slots(TYPE_DATA); + debug_arch = arch_get_debug_arch(); + wp_len = arch_get_max_wp_len(); + + reg |= debug_arch; + reg <<= 8; + reg |= wp_len; + reg <<= 8; + reg |= num_wrps; + reg <<= 8; + reg |= num_brps; + + return reg; +} + +static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) +{ + struct perf_event_attr attr; + + ptrace_breakpoint_init(&attr); + + /* Initialise fields to sane defaults. */ + attr.bp_addr = 0; + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = type; + attr.disabled = 1; + + return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); +} + +static int ptrace_gethbpregs(struct task_struct *tsk, long num, + unsigned long __user *data) +{ + u32 reg; + int idx, ret = 0; + struct perf_event *bp; + struct arch_hw_breakpoint_ctrl arch_ctrl; + + if (num == 0) { + reg = ptrace_get_hbp_resource_info(); + } else { + idx = ptrace_hbp_num_to_idx(num); + if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { + ret = -EINVAL; + goto out; + } + + bp = tsk->thread.debug.hbp[idx]; + if (!bp) { + reg = 0; + goto put; + } + + arch_ctrl = counter_arch_bp(bp)->ctrl; + + /* + * Fix up the len because we may have adjusted it + * to compensate for an unaligned address. + */ + while (!(arch_ctrl.len & 0x1)) + arch_ctrl.len >>= 1; + + if (idx & 0x1) + reg = encode_ctrl_reg(arch_ctrl); + else + reg = bp->attr.bp_addr; + } + +put: + if (put_user(reg, data)) + ret = -EFAULT; + +out: + return ret; +} + +static int ptrace_sethbpregs(struct task_struct *tsk, long num, + unsigned long __user *data) +{ + int idx, gen_len, gen_type, implied_type, ret = 0; + u32 user_val; + struct perf_event *bp; + struct arch_hw_breakpoint_ctrl ctrl; + struct perf_event_attr attr; + + if (num == 0) + goto out; + else if (num < 0) + implied_type = HW_BREAKPOINT_RW; + else + implied_type = HW_BREAKPOINT_X; + + idx = ptrace_hbp_num_to_idx(num); + if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { + ret = -EINVAL; + goto out; + } + + if (get_user(user_val, data)) { + ret = -EFAULT; + goto out; + } + + bp = tsk->thread.debug.hbp[idx]; + if (!bp) { + bp = ptrace_hbp_create(tsk, implied_type); + if (IS_ERR(bp)) { + ret = PTR_ERR(bp); + goto out; + } + tsk->thread.debug.hbp[idx] = bp; + } + + attr = bp->attr; + + if (num & 0x1) { + /* Address */ + attr.bp_addr = user_val; + } else { + /* Control */ + decode_ctrl_reg(user_val, &ctrl); + ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); + if (ret) + goto out; + + if ((gen_type & implied_type) != gen_type) { + ret = -EINVAL; + goto out; + } + + attr.bp_len = gen_len; + attr.bp_type = gen_type; + attr.disabled = !ctrl.enabled; + } + + ret = modify_user_hw_breakpoint(bp, &attr); +out: + return ret; +} +#endif + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; @@ -916,6 +1144,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case PTRACE_GETHBPREGS: + ret = ptrace_gethbpregs(child, addr, + (unsigned long __user *)data); + break; + case PTRACE_SETHBPREGS: + ret = ptrace_sethbpregs(child, addr, + (unsigned long __user *)data); + break; +#endif + default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d5231ae..336f14e 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -36,6 +36,7 @@ #include <asm/procinfo.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/smp_plat.h> #include <asm/mach-types.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> @@ -238,6 +239,35 @@ int cpu_architecture(void) return cpu_arch; } +static int cpu_has_aliasing_icache(unsigned int arch) +{ + int aliasing_icache; + unsigned int id_reg, num_sets, line_size; + + /* arch specifies the register format */ + switch (arch) { + case CPU_ARCH_ARMv7: + asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" + : /* No output operands */ + : "r" (1)); + isb(); + asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" + : "=r" (id_reg)); + line_size = 4 << ((id_reg & 0x7) + 2); + num_sets = ((id_reg >> 13) & 0x7fff) + 1; + aliasing_icache = (line_size * num_sets) > PAGE_SIZE; + break; + case CPU_ARCH_ARMv6: + aliasing_icache = read_cpuid_cachetype() & (1 << 11); + break; + default: + /* I-cache aliases will be handled by D-cache aliasing code */ + aliasing_icache = 0; + } + + return aliasing_icache; +} + static void __init cacheid_init(void) { unsigned int cachetype = read_cpuid_cachetype(); @@ -249,10 +279,15 @@ static void __init cacheid_init(void) cacheid = CACHEID_VIPT_NONALIASING; if ((cachetype & (3 << 14)) == 1 << 14) cacheid |= CACHEID_ASID_TAGGED; - } else if (cachetype & (1 << 23)) + else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7)) + cacheid |= CACHEID_VIPT_I_ALIASING; + } else if (cachetype & (1 << 23)) { cacheid = CACHEID_VIPT_ALIASING; - else + } else { cacheid = CACHEID_VIPT_NONALIASING; + if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) + cacheid |= CACHEID_VIPT_I_ALIASING; + } } else { cacheid = CACHEID_VIVT; } @@ -263,7 +298,7 @@ static void __init cacheid_init(void) cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", cache_is_vivt() ? "VIVT" : icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : - cache_is_vipt_aliasing() ? "VIPT aliasing" : + icache_is_vipt_aliasing() ? "VIPT aliasing" : cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); } @@ -490,7 +525,7 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(_etext - 1); - kernel_data.start = virt_to_phys(_data); + kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for (i = 0; i < mi->nr_banks; i++) { @@ -825,7 +860,8 @@ void __init setup_arch(char **cmdline_p) request_standard_resources(&meminfo, mdesc); #ifdef CONFIG_SMP - smp_init_cpus(); + if (is_smp()) + smp_init_cpus(); #endif reserve_crashkernel(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 40dc74f..8c19595 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -33,6 +33,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> +#include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/localtimer.h> @@ -67,12 +68,47 @@ enum ipi_msg_type { IPI_CPU_STOP, }; +static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, + unsigned long end) +{ + unsigned long addr, prot; + pmd_t *pmd; + + prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) + prot |= PMD_BIT4; + + for (addr = start & PGDIR_MASK; addr < end;) { + pmd = pmd_offset(pgd + pgd_index(addr), addr); + pmd[0] = __pmd(addr | prot); + addr += SECTION_SIZE; + pmd[1] = __pmd(addr | prot); + addr += SECTION_SIZE; + flush_pmd_entry(pmd); + outer_clean_range(__pa(pmd), __pa(pmd + 1)); + } +} + +static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, + unsigned long end) +{ + unsigned long addr; + pmd_t *pmd; + + for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { + pmd = pmd_offset(pgd + pgd_index(addr), addr); + pmd[0] = __pmd(0); + pmd[1] = __pmd(0); + clean_pmd_entry(pmd); + outer_clean_range(__pa(pmd), __pa(pmd + 1)); + } +} + int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; pgd_t *pgd; - pmd_t *pmd; int ret; /* @@ -101,11 +137,16 @@ int __cpuinit __cpu_up(unsigned int cpu) * a 1:1 mapping for the physical address of the kernel. */ pgd = pgd_alloc(&init_mm); - pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); - *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | - PMD_TYPE_SECT | PMD_SECT_AP_WRITE); - flush_pmd_entry(pmd); - outer_clean_range(__pa(pmd), __pa(pmd + 1)); + if (!pgd) + return -ENOMEM; + + if (PHYS_OFFSET != PAGE_OFFSET) { +#ifndef CONFIG_HOTPLUG_CPU + identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); +#endif + identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); + identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); + } /* * We need to tell the secondary core where to find @@ -143,8 +184,14 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.stack = NULL; secondary_data.pgdir = 0; - *pmd = __pmd(0); - clean_pmd_entry(pmd); + if (PHYS_OFFSET != PAGE_OFFSET) { +#ifndef CONFIG_HOTPLUG_CPU + identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); +#endif + identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); + identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); + } + pgd_free(&init_mm, pgd); if (ret) { @@ -567,7 +614,8 @@ void smp_send_stop(void) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); - send_ipi_message(&mask, IPI_CPU_STOP); + if (!cpus_empty(mask)) + send_ipi_message(&mask, IPI_CPU_STOP); } /* diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index dd81a91..2a16176 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -146,6 +146,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr) addr < table->end_addr) { idx = search_index(addr, table->start, table->stop - 1); + /* Move-to-front to exploit common traces */ + list_move(&table->list, &unwind_tables); break; } } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b16c079..1953e3d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,6 +8,19 @@ #include <asm/memory.h> #include <asm/page.h> +#define PROC_INFO \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ + *(.proc.info.init) \ + VMLINUX_SYMBOL(__proc_info_end) = .; + +#ifdef CONFIG_HOTPLUG_CPU +#define ARM_CPU_DISCARD(x) +#define ARM_CPU_KEEP(x) x +#else +#define ARM_CPU_DISCARD(x) x +#define ARM_CPU_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -31,15 +44,18 @@ SECTIONS HEAD_TEXT INIT_TEXT _einittext = .; - __proc_info_begin = .; - *(.proc.info.init) - __proc_info_end = .; + ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; __tagtable_begin = .; *(.taglist.init) __tagtable_end = .; +#ifdef CONFIG_SMP_ON_UP + __smpalt_begin = .; + *(.alt.smp.init) + __smpalt_end = .; +#endif INIT_SETUP(16) @@ -68,10 +84,8 @@ SECTIONS /DISCARD/ : { *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) -#ifndef CONFIG_HOTPLUG_CPU - *(.ARM.exidx.cpuexit.text) - *(.ARM.extab.cpuexit.text) -#endif + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) #ifndef CONFIG_HOTPLUG *(.ARM.exidx.devexit.text) *(.ARM.extab.devexit.text) @@ -100,12 +114,11 @@ SECTIONS *(.glue_7) *(.glue_7t) *(.got) /* Global offset table */ + ARM_CPU_KEEP(PROC_INFO) } RO_DATA(PAGE_SIZE) - _etext = .; /* End of text and rodata section */ - #ifdef CONFIG_ARM_UNWIND /* * Stack unwinding tables @@ -123,6 +136,8 @@ SECTIONS } #endif + _etext = .; /* End of text and rodata section */ + #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; @@ -237,6 +252,12 @@ SECTIONS /* Default discards */ DISCARDS + +#ifndef CONFIG_SMP_ON_UP + /DISCARD/ : { + *(.alt.smp.init) + } +#endif } /* |