diff options
author | Colin Cross <ccross@android.com> | 2011-10-27 15:03:56 -0700 |
---|---|---|
committer | Colin Cross <ccross@android.com> | 2011-10-27 15:03:56 -0700 |
commit | 60988eca1376ba564e778007cbffabebf05599e1 (patch) | |
tree | a188cdd22dc3a9d42955679dc01357bcd0d7e264 /kernel/sched.c | |
parent | 47d998a7cec61a44b9e0799301039a63fed28280 (diff) | |
parent | bc2e7067af4c1cd074aaec71c2982e2265f7f169 (diff) | |
download | kernel_samsung_tuna-60988eca1376ba564e778007cbffabebf05599e1.zip kernel_samsung_tuna-60988eca1376ba564e778007cbffabebf05599e1.tar.gz kernel_samsung_tuna-60988eca1376ba564e778007cbffabebf05599e1.tar.bz2 |
Merge branch 'linux-omap-3.0' into android-omap-3.0
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 65 |
1 files changed, 26 insertions, 39 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index bb4035c..0f81bd4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3714,30 +3714,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) } /* - * Return sum_exec_runtime for the thread group. - * In case the task is currently running, return the sum plus current's - * pending runtime that have not been accounted yet. - * - * Note that the thread group might have other running tasks as well, - * so the return value not includes other pending runtime that other - * running tasks might have. - */ -unsigned long long thread_group_sched_runtime(struct task_struct *p) -{ - struct task_cputime totals; - unsigned long flags; - struct rq *rq; - u64 ns; - - rq = task_rq_lock(p, &flags); - thread_group_cputime(p, &totals); - ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); - - return ns; -} - -/* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in user space since the last update @@ -4243,9 +4219,9 @@ pick_next_task(struct rq *rq) } /* - * schedule() is the main scheduler function. + * __schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +static void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; @@ -4286,16 +4262,6 @@ need_resched: if (to_wakeup) try_to_wake_up_local(to_wakeup); } - - /* - * If we are going to sleep and we have plugged IO - * queued, make sure to submit it to avoid deadlocks. - */ - if (blk_needs_flush_plug(prev)) { - raw_spin_unlock(&rq->lock); - blk_schedule_flush_plug(prev); - raw_spin_lock(&rq->lock); - } } switch_count = &prev->nvcsw; } @@ -4333,6 +4299,26 @@ need_resched: if (need_resched()) goto need_resched; } + +static inline void sched_submit_work(struct task_struct *tsk) +{ + if (!tsk->state) + return; + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. + */ + if (blk_needs_flush_plug(tsk)) + blk_schedule_flush_plug(tsk); +} + +asmlinkage void __sched schedule(void) +{ + struct task_struct *tsk = current; + + sched_submit_work(tsk); + __schedule(); +} EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -4406,7 +4392,7 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); - schedule(); + __schedule(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* @@ -4434,7 +4420,7 @@ asmlinkage void __sched preempt_schedule_irq(void) do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); - schedule(); + __schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); @@ -5559,7 +5545,7 @@ static inline int should_resched(void) static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); - schedule(); + __schedule(); sub_preempt_count(PREEMPT_ACTIVE); } @@ -7414,6 +7400,7 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); if (sd && (sd->flags & SD_OVERLAP)) free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); kfree(*per_cpu_ptr(sdd->sgp, j)); } |