aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorSebastien Dugue <sebastien.dugue@bull.net>2008-12-01 14:09:07 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-30 22:35:29 +0100
commit94df7de0289bc2df3d6e85cd2ece52bf42682f45 (patch)
treeb3f614b015adfb9574959687bb9b7ac4c884e23a /kernel/time
parent7f22391cbe82a80a9f891d8bd10fc28ff248d1e2 (diff)
downloadkernel_samsung_aries-94df7de0289bc2df3d6e85cd2ece52bf42682f45.zip
kernel_samsung_aries-94df7de0289bc2df3d6e85cd2ece52bf42682f45.tar.gz
kernel_samsung_aries-94df7de0289bc2df3d6e85cd2ece52bf42682f45.tar.bz2
hrtimers: allow the hot-unplugging of all cpus
Impact: fix CPU hotplug hang on Power6 testbox On architectures that support offlining all cpus (at least powerpc/pseries), hot-unpluging the tick_do_timer_cpu can result in a system hang. This comes from the fact that if the cpu going down happens to be the cpu doing the tick, then as the tick_do_timer_cpu handover happens after the cpu is dead (via the CPU_DEAD notification), we're left without ticks, jiffies are frozen and any task relying on timers (msleep, ...) is stuck. That's particularly the case for the cpu looping in __cpu_die() waiting for the dying cpu to be dead. This patch addresses this by having the tick_do_timer_cpu handover happen earlier during the CPU_DYING notification. For this, a new clockevent notification type is introduced (CLOCK_EVT_NOTIFY_CPU_DYING) which is triggered in hrtimer_cpu_notify(). Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net> Cc: <stable@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-common.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 63e05d4..21a5ca8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -274,6 +274,21 @@ out_bc:
}
/*
+ * Transfer the do_timer job away from a dying cpu.
+ *
+ * Called with interrupts disabled.
+ */
+static void tick_handover_do_timer(int *cpup)
+{
+ if (*cpup == tick_do_timer_cpu) {
+ int cpu = cpumask_first(cpu_online_mask);
+
+ tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
+ TICK_DO_TIMER_NONE;
+ }
+}
+
+/*
* Shutdown an event device on a given cpu:
*
* This is called on a life CPU, when a CPU is dead. So we cannot
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup)
clockevents_exchange_device(dev, NULL);
td->evtdev = NULL;
}
- /* Transfer the do_timer job away from this cpu */
- if (*cpup == tick_do_timer_cpu) {
- int cpu = cpumask_first(cpu_online_mask);
-
- tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
- TICK_DO_TIMER_NONE;
- }
spin_unlock_irqrestore(&tick_device_lock, flags);
}
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
tick_broadcast_oneshot_control(reason);
break;
+ case CLOCK_EVT_NOTIFY_CPU_DYING:
+ tick_handover_do_timer(dev);
+ break;
+
case CLOCK_EVT_NOTIFY_CPU_DEAD:
tick_shutdown_broadcast_oneshot(dev);
tick_shutdown_broadcast(dev);