aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2005-11-08 21:38:59 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-09 07:56:32 -0800
commit6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4 (patch)
treea53890e37a81c4da1d6e6f398aaea66880d4f996
parent3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 (diff)
downloadkernel_samsung_crespo-6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4.zip
kernel_samsung_crespo-6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4.tar.gz
kernel_samsung_crespo-6dd4a85bb3ee0715415892c8b0f2a9bd08d31ca4.tar.bz2
[PATCH] sched: correct smp_nice_bias
The priority biasing was off by mutliplying the total load by the total priority bias and this ruins the ratio of loads between runqueues. This patch should correct the ratios of loads between runqueues to be proportional to overall load. -2nd attempt. From: Dave Kleikamp <shaggy@austin.ibm.com> This patch fixes a divide-by-zero error that I hit on a two-way i386 machine. rq->nr_running is tested to be non-zero, but may change by the time it is used in the division. Saving the value to a local variable ensures that the same value that is checked is used in the division. Signed-off-by: Con Kolivas <kernel@kolivas.org> Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec9ea91..502d47c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -972,15 +972,16 @@ void kick_process(task_t *p)
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
+ unsigned long running = rq->nr_running;
unsigned long source_load, cpu_load = rq->cpu_load[type-1],
- load_now = rq->nr_running * SCHED_LOAD_SCALE;
+ load_now = running * SCHED_LOAD_SCALE;
if (type == 0)
source_load = load_now;
else
source_load = min(cpu_load, load_now);
- if (idle == NOT_IDLE || rq->nr_running > 1)
+ if (running > 1 || (idle == NOT_IDLE && running))
/*
* If we are busy rebalancing the load is biased by
* priority to create 'nice' support across cpus. When
@@ -989,7 +990,7 @@ static inline unsigned long __source_load(int cpu, int type, enum idle_type idle
* prevent idle rebalance from trying to pull tasks from a
* queue with only one running task.
*/
- source_load *= rq->prio_bias;
+ source_load = source_load * rq->prio_bias / running;
return source_load;
}
@@ -1005,16 +1006,17 @@ static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
+ unsigned long running = rq->nr_running;
unsigned long target_load, cpu_load = rq->cpu_load[type-1],
- load_now = rq->nr_running * SCHED_LOAD_SCALE;
+ load_now = running * SCHED_LOAD_SCALE;
if (type == 0)
target_load = load_now;
else
target_load = max(cpu_load, load_now);
- if (idle == NOT_IDLE || rq->nr_running > 1)
- target_load *= rq->prio_bias;
+ if (running > 1 || (idle == NOT_IDLE && running))
+ target_load = target_load * rq->prio_bias / running;
return target_load;
}