diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-10-26 19:41:17 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-10-27 16:42:43 +1100 |
commit | 4f917ba3d5ee9c98d60fa357e799942df8412de3 (patch) | |
tree | 3576261c81b76e74b2372d2221196040a369bb92 /arch/powerpc | |
parent | 01deab98e3ad8ff27243a8d5f8dd746c7110ae4f (diff) | |
download | kernel_samsung_aries-4f917ba3d5ee9c98d60fa357e799942df8412de3.zip kernel_samsung_aries-4f917ba3d5ee9c98d60fa357e799942df8412de3.tar.gz kernel_samsung_aries-4f917ba3d5ee9c98d60fa357e799942df8412de3.tar.bz2 |
powerpc/ppc64: Use preempt_schedule_irq instead of preempt_schedule
Based on an original patch by Valentine Barshak <vbarshak@ru.mvista.com>
Use preempt_schedule_irq to prevent infinite irq-entry and
eventual stack overflow problems with fast-paced IRQ sources.
This kind of problems has been observed on the PASemi Electra IDE
controller. We have to make sure we are soft-disabled before calling
preempt_schedule_irq and hard disable interrupts after that
to avoid unrecoverable exceptions.
This patch also moves the "clrrdi r9,r1,THREAD_SHIFT" out of
the #ifdef CONFIG_PPC_BOOK3E scope, since r9 is clobbered
and has to be restored in both cases.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f9fd54b..9763267 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -658,42 +658,43 @@ do_work: cmpdi r0,0 crandc eq,cr1*4+eq,eq bne restore - /* here we are preempting the current task */ -1: -#ifdef CONFIG_TRACE_IRQFLAGS - bl .trace_hardirqs_on - /* Note: we just clobbered r10 which used to contain the previous - * MSR before the hard-disabling done by the caller of do_work. - * We don't have that value anymore, but it doesn't matter as - * we will hard-enable unconditionally, we can just reload the - * current MSR into r10 + + /* Here we are preempting the current task. + * + * Ensure interrupts are soft-disabled. We also properly mark + * the PACA to reflect the fact that they are hard-disabled + * and trace the change */ - mfmsr r10 -#endif /* CONFIG_TRACE_IRQFLAGS */ - li r0,1 + li r0,0 stb r0,PACASOFTIRQEN(r13) stb r0,PACAHARDIRQEN(r13) + TRACE_DISABLE_INTS + + /* Call the scheduler with soft IRQs off */ +1: bl .preempt_schedule_irq + + /* Hard-disable interrupts again (and update PACA) */ #ifdef CONFIG_PPC_BOOK3E - wrteei 1 - bl .preempt_schedule wrteei 0 #else - ori r10,r10,MSR_EE - mtmsrd r10,1 /* reenable interrupts */ - bl .preempt_schedule mfmsr r10 - clrrdi r9,r1,THREAD_SHIFT - rldicl r10,r10,48,1 /* disable interrupts again */ + rldicl r10,r10,48,1 rotldi r10,r10,16 mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ + li r0,0 + stb r0,PACAHARDIRQEN(r13) + + /* Re-test flags and eventually loop */ + clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b b restore user_work: -#endif +#endif /* CONFIG_PREEMPT */ + /* Enable interrupts */ #ifdef CONFIG_PPC_BOOK3E wrteei 1 |