From 292ec42af7c6361435fe9df50cd59ec76f6741c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Feb 2011 10:36:39 +0000 Subject: ARM: pm: add function to set WFI low-power mode for SMP CPUs Add a function to set the SCU low-power mode for SMP CPUs. This centralizes this functionality rather than having to expose the SCU register definitions to each platform. Signed-off-by: Russell King --- arch/arm/kernel/smp_scu.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 9ab4149..a1e757c 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } + +/* + * Set the executing CPUs power mode as defined. This will be in + * preparation for it executing a WFI instruction. + * + * This function must be called with preemption disabled, and as it + * has the side effect of disabling coherency, caches must have been + * flushed. Interrupts must also have been disabled. + */ +int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + unsigned int val; + int cpu = smp_processor_id(); + + if (mode > 3 || mode == 1 || cpu > 3) + return -EINVAL; + + val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; + val |= mode; + __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); + + return 0; +} -- cgit v1.1 From 753790e713d80b50b867fa1ed32ec0eb5e82ae8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:32:24 +0000 Subject: ARM: move cache/processor/fault glue to separate include files This allows the cache/processor/fault glue to be more easily used from assembler code. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/kernel/asm-offsets.c | 2 ++ arch/arm/kernel/entry-armv.S | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 82da661..5302a91 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2b46fea..e8d8856 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -16,7 +16,8 @@ */ #include -#include +#include +#include #include #include #include -- cgit v1.1 From 71efb063f4a145ae420be054f5a91dcf7c19b375 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 18 Feb 2011 16:21:06 +0100 Subject: ARM: 6742/1: pmu: avoid setting IRQ affinity on UP systems Now that we can execute a CONFIG_SMP kernel on a uniprocessor system, extra care has to be taken in the PMU IRQ affinity setting code to ensure that we don't always fail to initialise. This patch changes the CPU PMU initialisation code so that when we only have a single IRQ, whose affinity can not be changed at the controller, we report success (0) rather than -EINVAL. Reported-by: Avik Sil Acked-by: Jamie Iles Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/pmu.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96e..2c79eec 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -97,28 +97,34 @@ set_irq_affinity(int irq, irq, cpu); return err; #else - return 0; + return -EINVAL; #endif } static int init_cpu_pmu(void) { - int i, err = 0; + int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; - if (!pdev) { - err = -ENODEV; - goto out; - } + if (!pdev) + return -ENODEV; + + irqs = pdev->num_resources; + + /* + * If we have a single PMU interrupt that we can't shift, assume that + * we're running on a uniprocessor machine and continue. + */ + if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) + return 0; - for (i = 0; i < pdev->num_resources; ++i) { + for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } -out: return err; } -- cgit v1.1 From 315cfe7835c9a3fe27f15519bdeee8bf0a293e33 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 15 Feb 2011 18:06:57 +0100 Subject: ARM: 6676/1: Correct the cpu_architecture() function for ARMv7 If ID_MMFR0[3:0] >= 3, the architecture version is ARMv7. The code was currently only testing for ID_MMFR0[3:0] == 3. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6..5ea4fb7 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -226,8 +226,8 @@ int cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ asm("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) == 0x00000003 || - (mmfr0 & 0x000000f0) == 0x00000030) + if ((mmfr0 & 0x0000000f) >= 0x00000003 || + (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) -- cgit v1.1 From dc810efb0ca5702c9d96782b99282d4b4383e877 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Wed, 16 Feb 2011 18:54:01 +0100 Subject: ARM: 6740/1: Place correctly notes section in the linker script Commit 18991197b4b588255ccabf472ebc84db7b66a19c added --build-id linker option when toolchain supports it. ARM one does, but for some reason places the section at 0 when linker script doesn't mention it explicitly. The 1e621a8e3752367d4aae78a8ab00a18fb2793f34 worked around the problem removing this section from binary image with explicit objcopy options, but it still exists in vmlinux, confusing tools like debuggers and perf. This problem was discussed here: http://lists.infradead.org/pipermail/linux-arm-kernel/2010-May/015994.html http://lists.infradead.org/pipermail/linux-arm-kernel/2010-May/015994.html but the proposed changes to the linker script were substantial. This patch simply places NOTES (36 bytes long, at least when compiled with CodeSourcery toolchain) between data and bss, which seem to be the right place (and suggested by the sample linker script in include/asm-generic/vmlinux.lds.h). It is enough to place it correctly in vmlinux (so debuggers are happy): Section Headers: [11] .data PROGBITS c07ce000 7ce000 020fc0 00 WA 0 0 32 [12] .notes NOTE c07eefc0 7eefc0 000024 00 AX 0 0 4 [13] .bss NOBITS c07ef000 7eefe4 01e628 00 WA 0 0 32 Program Headers: LOAD 0x008000 0xc0008000 0xc0008000 0x7e6fe4 0x805628 RWE 0x8000 NOTE 0x7eefc0 0xc07eefc0 0xc07eefc0 0x00024 0x00024 R E 0x4 Section to Segment mapping: Segment Sections... 00 <...> .data .notes .bss 01 .notes and to get it exposed as /sys/kernel/notes used by perf tools. Signed-off-by: Pawel Moll Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3..558bd81 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -247,6 +247,8 @@ SECTIONS } #endif + NOTES + BSS_SECTION(0, 0, 0) _end = .; -- cgit v1.1 From 53399053eb505cf541b2405bd9d9bca5ecfb96fb Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 20 Feb 2011 12:22:52 +0000 Subject: ARM: Ensure predictable endian state on signal handler entry Ensure a predictable endian state when entering signal handlers. This avoids programs which use SETEND to momentarily switch their endian state from having their signal handlers entered with an unpredictable endian state. Cc: Acked-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/signal.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a6..abaf844 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. -- cgit v1.1 From a9ad21fed09cb95d34af9474be0831525b30c4c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 21 Feb 2011 10:13:36 +0000 Subject: ARM: Keep exit text/data around for SMP_ON_UP When SMP_ON_UP is used and the spinlocks are inlined, we end up with inline spinlocks in the exit code, with references from the SMP alternatives section to the exit sections. This causes link time errors. Avoid this by placing the exit sections in the init-discarded region. Cc: Tested-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 558bd81..6146279 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -21,6 +21,12 @@ #define ARM_CPU_KEEP(x) #endif +#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) +#define ARM_EXIT_KEEP(x) x +#else +#define ARM_EXIT_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,6 +49,7 @@ SECTIONS _sinittext = .; HEAD_TEXT INIT_TEXT + ARM_EXIT_KEEP(EXIT_TEXT) _einittext = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; @@ -67,6 +74,7 @@ SECTIONS #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) #endif } @@ -162,6 +170,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) . = ALIGN(PAGE_SIZE); __init_end = .; #endif -- cgit v1.1 From 5a5af730536fbf15fc354980cba2a0400afa6b76 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 21 Feb 2011 04:37:20 +0100 Subject: ARM: 6745/1: kprobes insn decoding fix Marcin Slusarz says: > In arch/arm/kernel/kprobes-decode.c there's a function > arm_kprobe_decode_insn which does: > > } else if ((insn & 0x0e000000) == 0x0c400000) { > ... > > This is always false, so code below is dead. > I found this bug by coccinelle (http://coccinelle.lip6.fr/). Reported-by: Marcin Slusarz Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/kprobes-decode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f005..8f6ed43 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) return space_cccc_1100_010x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0c400000) { + } else if ((insn & 0x0e000000) == 0x0c000000) { return space_cccc_110x(insn, asi); -- cgit v1.1 From f6b0fa02e8b0708d17d631afce456524eadf87ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:48:39 +0000 Subject: ARM: pm: add generic CPU suspend/resume support This adds core support for saving and restoring CPU coprocessor registers for suspend/resume support. This contains support for suspend with ARM920, ARM926, SA11x0, PXA25x, PXA27x, PXA3xx, V6 and V7 CPUs. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Tested-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/asm-offsets.c | 9 ++++ arch/arm/kernel/sleep.S | 109 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 arch/arm/kernel/sleep.S (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 185ee82..74554f1 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o +obj-$(CONFIG_PM) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5302a91..927522c 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -116,6 +117,14 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif +#ifdef MULTI_CPU + DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); + DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); + DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); +#endif +#ifdef MULTI_CACHE + DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); +#endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S new file mode 100644 index 0000000..2ba1794 --- /dev/null +++ b/arch/arm/kernel/sleep.S @@ -0,0 +1,109 @@ +#include +#include +#include +#include +#include +#include + .text + +/* + * Save CPU state for a suspend + * r1 = v:p offset + * r3 = virtual return function + * Note: sp is decremented to allocate space for CPU state on stack + * r0-r3,r9,r10,lr corrupted + */ +ENTRY(cpu_suspend) + mov r9, lr +#ifdef MULTI_CPU + ldr r10, =processor + mov r2, sp @ current virtual SP + ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + add ip, ip, r1 @ convert resume fn to phys + stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + mov lr, pc + ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state +#else + mov r2, sp @ current virtual SP + ldr r0, =cpu_suspend_size + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + bl cpu_do_suspend +#endif + + @ flush data cache +#ifdef MULTI_CACHE + ldr r10, =cpu_cache + mov lr, r9 + ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] +#else + mov lr, r9 + b __cpuc_flush_kern_all +#endif +ENDPROC(cpu_suspend) + .ltorg + +/* + * r0 = control register value + * r1 = v:p offset (preserved by cpu_do_resume) + * r2 = phys page table base + * r3 = L1 section flags + */ +ENTRY(cpu_resume_mmu) + adr r4, cpu_resume_turn_mmu_on + mov r4, r4, lsr #20 + orr r3, r3, r4, lsl #20 + ldr r5, [r2, r4, lsl #2] @ save old mapping + str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code + sub r2, r2, r1 + ldr r3, =cpu_resume_after_mmu + bic r1, r0, #CR_C @ ensure D-cache is disabled + b cpu_resume_turn_mmu_on +ENDPROC(cpu_resume_mmu) + .ltorg + .align 5 +cpu_resume_turn_mmu_on: + mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc + mrc p15, 0, r1, c0, c0, 0 @ read id reg + mov r1, r1 + mov r1, r1 + mov pc, r3 @ jump to virtual address +ENDPROC(cpu_resume_turn_mmu_on) +cpu_resume_after_mmu: + str r5, [r2, r4, lsl #2] @ restore old mapping + mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache + mov pc, lr +ENDPROC(cpu_resume_after_mmu) + +/* + * Note: Yes, part of the following code is located into the .data section. + * This is to allow sleep_save_sp to be accessed with a relative load + * while we can't rely on any MMU translation. We could have put + * sleep_save_sp in the .text section as well, but some setups might + * insist on it to be truly read-only. + */ + .data + .align +ENTRY(cpu_resume) + ldr r0, sleep_save_sp @ stack phys addr + msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off +#ifdef MULTI_CPU + ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn +#else + ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn + b cpu_do_resume +#endif +ENDPROC(cpu_resume) + +sleep_save_sp: + .word 0 @ preserve stack phys ptr here -- cgit v1.1 From 941aefac4c243cf407d7665d3e64beb32d556acf Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 Feb 2011 11:32:19 +0000 Subject: ARM: pm: allow generic sleep code to be used with SMP CPU idle Allow the generic sleep code to be used with SMP CPU idle by storing N CPU stack pointers rather than just one. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/kernel/sleep.S | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 2ba1794..bfad698 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -26,7 +27,14 @@ ENTRY(cpu_suspend) stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn ldr r3, =sleep_save_sp add r2, sp, r1 @ convert SP to phys +#ifdef CONFIG_SMP + ALT_SMP(mrc p15, 0, lr, c0, c0, 5) + ALT_UP(mov lr, #0) + and lr, lr, #15 + str r2, [r3, lr, lsl #2] @ save phys SP +#else str r2, [r3] @ save phys SP +#endif mov lr, pc ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state #else @@ -37,7 +45,14 @@ ENTRY(cpu_suspend) stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn ldr r3, =sleep_save_sp add r2, sp, r1 @ convert SP to phys +#ifdef CONFIG_SMP + ALT_SMP(mrc p15, 0, lr, c0, c0, 5) + ALT_UP(mov lr, #0) + and lr, lr, #15 + str r2, [r3, lr, lsl #2] @ save phys SP +#else str r2, [r3] @ save phys SP +#endif bl cpu_do_suspend #endif @@ -95,7 +110,15 @@ ENDPROC(cpu_resume_after_mmu) .data .align ENTRY(cpu_resume) +#ifdef CONFIG_SMP + adr r0, sleep_save_sp + ALT_SMP(mrc p15, 0, r1, c0, c0, 5) + ALT_UP(mov r1, #0) + and r1, r1, #15 + ldr r0, [r0, r1, lsl #2] @ stack phys addr +#else ldr r0, sleep_save_sp @ stack phys addr +#endif msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off #ifdef MULTI_CPU ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn @@ -106,4 +129,6 @@ ENTRY(cpu_resume) ENDPROC(cpu_resume) sleep_save_sp: - .word 0 @ preserve stack phys ptr here + .rept CONFIG_NR_CPUS + .long 0 @ preserve stack phys ptr here + .endr -- cgit v1.1 From ba55d3db9bb59a52fe45dbc5d62776adbb289e54 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 25 Feb 2011 20:19:32 +0100 Subject: ARM: 6767/1: ptrace: fix register indexing in GETHBPREGS request The GETHBPREGS ptrace request incorrectly maps its index argument onto the thread's saved debug state when the index != 0. This has not yet been seen from userspace because GDB (the only user of this request) only reads from register 0. This patch fixes the indexing. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/ptrace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816..b13e70f 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; - if (idx & 0x1) - reg = encode_ctrl_reg(arch_ctrl); - else + if (num & 0x1) reg = bp->attr.bp_addr; + else + reg = encode_ctrl_reg(arch_ctrl); } put: -- cgit v1.1 From c09bae709182046ab104757115dfbd74a1ba1a15 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 25 Feb 2011 20:20:42 +0100 Subject: ARM: 6768/1: hw_breakpoint: ensure debug logic is powered up on v7 cores ARMv7 allows the debug core logic to be powered down and provides the DBGPRSR register so that software can power-up and check the status of the logic. This patch ensures that the debug logic is powered up on ARMv7 cores before we attempt to access the extended debug registers. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/hw_breakpoint.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d600bd3..44b84fe 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, /* * One-time initialisation. */ -static void reset_ctrl_regs(void *unused) +static void reset_ctrl_regs(void *info) { - int i; + int i, cpu = smp_processor_id(); + u32 dbg_power; + cpumask_t *cpumask = info; /* * v7 debug contains save and restore registers so that debug state @@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused) */ if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { /* + * Ensure sticky power-down is clear (i.e. debug logic is + * powered up). + */ + asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); + if ((dbg_power & 0x1) == 0) { + pr_warning("CPU %d debug is powered down!\n", cpu); + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); + return; + } + + /* * Unconditionally clear the lock by writing a value * other than 0xC5ACCE55 to the access register. */ @@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { static int __init arch_hw_breakpoint_init(void) { u32 dscr; + cpumask_t cpumask = { CPU_BITS_NONE }; debug_arch = get_debug_arch(); @@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void) * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ - on_each_cpu(reset_ctrl_regs, NULL, 1); + on_each_cpu(reset_ctrl_regs, &cpumask, 1); + if (!cpumask_empty(&cpumask)) { + core_num_brps = 0; + core_num_reserved_brps = 0; + core_num_wrps = 0; + return 0; + } ARM_DBG_READ(c1, 0, dscr); if (dscr & ARM_DSCR_HDBGEN) { -- cgit v1.1