aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-01-25 21:08:31 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:31 +0100
commit2d44ae4d7135b9aee26439b3523b43473381bc5f (patch)
treeba3afc0f03142d26f9238974dab5b99bf1dca1db /kernel
parent48d5e258216f1c7713633439beb98a38c7290649 (diff)
downloadkernel_samsung_smdk4412-2d44ae4d7135b9aee26439b3523b43473381bc5f.zip
kernel_samsung_smdk4412-2d44ae4d7135b9aee26439b3523b43473381bc5f.tar.gz
kernel_samsung_smdk4412-2d44ae4d7135b9aee26439b3523b43473381bc5f.tar.bz2
hrtimer: clean up cpu->base locking tricks
In order to more easily allow for the scheduler to use timers, clean up the locking a bit. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c20
-rw-r--r--kernel/time/tick-sched.c8
2 files changed, 19 insertions, 9 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f994bb8..9f850ca 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1063,7 +1063,9 @@ void hrtimer_interrupt(struct clock_event_device *dev)
basenow = ktime_add(now, base->offset);
while ((node = base->first)) {
+ enum hrtimer_restart (*fn)(struct hrtimer *);
struct hrtimer *timer;
+ int restart;
timer = rb_entry(node, struct hrtimer, node);
@@ -1091,13 +1093,29 @@ void hrtimer_interrupt(struct clock_event_device *dev)
HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
+ /*
+ * Used for scheduler timers, avoid lock
+ * inversion with rq->lock and tasklist_lock.
+ *
+ * These timers are required to deal with
+ * enqueue expiry themselves and are not
+ * allowed to migrate.
+ */
+ spin_unlock(&cpu_base->lock);
+ restart = fn(timer);
+ spin_lock(&cpu_base->lock);
+ } else
+ restart = fn(timer);
+
/*
* Note: We clear the CALLBACK bit after
* enqueue_hrtimer to avoid reprogramming of
* the event hardware. This happens at the end
* of this function anyway.
*/
- if (timer->function(timer) != HRTIMER_NORESTART) {
+ if (restart != HRTIMER_NORESTART) {
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
enqueue_hrtimer(timer, base, 0);
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5f9fb64..1a21b6f 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -514,7 +514,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
struct tick_sched *ts =
container_of(timer, struct tick_sched, sched_timer);
- struct hrtimer_cpu_base *base = timer->base->cpu_base;
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
int cpu = smp_processor_id();
@@ -552,15 +551,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
touch_softlockup_watchdog();
ts->idle_jiffies++;
}
- /*
- * update_process_times() might take tasklist_lock, hence
- * drop the base lock. sched-tick hrtimers are per-CPU and
- * never accessible by userspace APIs, so this is safe to do.
- */
- spin_unlock(&base->lock);
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
- spin_lock(&base->lock);
}
/* Do not restart, when we are in the idle loop */