diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-20 15:56:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-20 15:56:25 -0700 |
commit | cf6ace16a3cd8b728fb0afa68368fd40bbeae19f (patch) | |
tree | 489c64380668e8c5a29d3f36f37554e4b081a647 /kernel/sched.c | |
parent | acc11eab70591744369722280c9ce162a6193494 (diff) | |
parent | d1e9ae47a0285d3f1699e8219ce50f656243b93f (diff) | |
download | kernel_samsung_tuna-cf6ace16a3cd8b728fb0afa68368fd40bbeae19f.zip kernel_samsung_tuna-cf6ace16a3cd8b728fb0afa68368fd40bbeae19f.tar.gz kernel_samsung_tuna-cf6ace16a3cd8b728fb0afa68368fd40bbeae19f.tar.bz2 |
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
signal: align __lock_task_sighand() irq disabling and RCU
softirq,rcu: Inform RCU of irq_exit() activity
sched: Add irq_{enter,exit}() to scheduler_ipi()
rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
rcu: Streamline code produced by __rcu_read_unlock()
rcu: Fix RCU_BOOST race handling current->rcu_read_unlock_special
rcu: decrease rcu_report_exp_rnp coupling with scheduler
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 38 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 14168c4..fde6ff9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) } #ifdef CONFIG_SMP -static void sched_ttwu_pending(void) +static void sched_ttwu_do_pending(struct task_struct *list) { struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; raw_spin_lock(&rq->lock); @@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void) raw_spin_unlock(&rq->lock); } +#ifdef CONFIG_HOTPLUG_CPU + +static void sched_ttwu_pending(void) +{ + struct rq *rq = this_rq(); + struct task_struct *list = xchg(&rq->wake_list, NULL); + + if (!list) + return; + + sched_ttwu_do_pending(list); +} + +#endif /* CONFIG_HOTPLUG_CPU */ + void scheduler_ipi(void) { - sched_ttwu_pending(); + struct rq *rq = this_rq(); + struct task_struct *list = xchg(&rq->wake_list, NULL); + + if (!list) + return; + + /* + * Not all reschedule IPI handlers call irq_enter/irq_exit, since + * traditionally all their work was done from the interrupt return + * path. Now that we actually do some work, we need to make sure + * we do call them. + * + * Some archs already do call them, luckily irq_enter/exit nest + * properly. + * + * Arguably we should visit all archs and update all handlers, + * however a fair share of IPIs are still resched only so this would + * somewhat pessimize the simple resched case. + */ + irq_enter(); + sched_ttwu_do_pending(list); + irq_exit(); } static void ttwu_queue_remote(struct task_struct *p, int cpu) |