diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-27 11:27:22 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-27 11:27:13 +0100 |
commit | 85ac7ca5972d63d23aa5ea75c3834a33b951f89d (patch) | |
tree | 851ff1a5500bce5964336960594cb72bff369d13 /arch/s390/kernel/smp.c | |
parent | 3a3954ceae756ed2a5d53b45c67db6dde3c0c126 (diff) | |
download | kernel_goldelico_gta04-85ac7ca5972d63d23aa5ea75c3834a33b951f89d.zip kernel_goldelico_gta04-85ac7ca5972d63d23aa5ea75c3834a33b951f89d.tar.gz kernel_goldelico_gta04-85ac7ca5972d63d23aa5ea75c3834a33b951f89d.tar.bz2 |
[S390] outstanding interrupts vs. smp_send_stop
The panic function will first print the panic message to the console,
then stop additional cpus with smp_send_stop and finally call the
function on the panic notifier list.
In case of an I/O based console the panic message will cause I/O to
be started and a function on the panic notifier list will wait for the
completion of the I/O. That does not work if an I/O completion interrupt
has already been delivered to a cpu that is then stopped by smp_send_stop.
To break this cyclic dependency add code to smp_send_stop that gives
the additional cpu the opportunity to complete outstanding interrupts.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 50 |
1 files changed, 42 insertions, 8 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8aba77d..b1cd329 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -154,22 +154,52 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); } +static void smp_stop_cpu(void) +{ + while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); +} + void smp_send_stop(void) { - int cpu, rc; + cpumask_t cpumask; + int cpu; + u64 end; /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); trace_hardirqs_off(); - /* stop all processors */ - for_each_online_cpu(cpu) { - if (cpu == smp_processor_id()) - continue; - do { - rc = sigp(cpu, sigp_stop); - } while (rc == sigp_busy); + cpumask_copy(&cpumask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &cpumask); + + if (oops_in_progress) { + /* + * Give the other cpus the opportunity to complete + * outstanding interrupts before stopping them. + */ + end = get_clock() + (1000000UL << 12); + for_each_cpu(cpu, &cpumask) { + set_bit(ec_stop_cpu, (unsigned long *) + &lowcore_ptr[cpu]->ext_call_fast); + while (sigp(cpu, sigp_emergency_signal) == sigp_busy && + get_clock() < end) + cpu_relax(); + } + while (get_clock() < end) { + for_each_cpu(cpu, &cpumask) + if (cpu_stopped(cpu)) + cpumask_clear_cpu(cpu, &cpumask); + if (cpumask_empty(&cpumask)) + break; + cpu_relax(); + } + } + /* stop all processors */ + for_each_cpu(cpu, &cpumask) { + while (sigp(cpu, sigp_stop) == sigp_busy) + cpu_relax(); while (!cpu_stopped(cpu)) cpu_relax(); } @@ -194,6 +224,9 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, */ bits = xchg(&S390_lowcore.ext_call_fast, 0); + if (test_bit(ec_stop_cpu, &bits)) + smp_stop_cpu(); + if (test_bit(ec_schedule, &bits)) scheduler_ipi(); @@ -202,6 +235,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); + } /* |