aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-06-23 17:16:33 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-24 08:59:11 -0700
commit92c4ca5c3a5e180e9762438db235f41d192cb955 (patch)
tree814af0cfd84986f75e1e581f854eeb4f6ab42c35
parent849663430268db63a9c3c7467984e4e530ded901 (diff)
downloadkernel_samsung_tuna-92c4ca5c3a5e180e9762438db235f41d192cb955.zip
kernel_samsung_tuna-92c4ca5c3a5e180e9762438db235f41d192cb955.tar.gz
kernel_samsung_tuna-92c4ca5c3a5e180e9762438db235f41d192cb955.tar.bz2
sched: fix next_interval determination in idle_balance()
The intervals of domains that do not have SD_BALANCE_NEWIDLE must be considered for the calculation of the time of the next balance. Otherwise we may defer rebalancing forever. Siddha also spotted that the conversion of the balance interval to jiffies is missing. Fix that to. From: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> also continue the loop if !(sd->flags & SD_LOAD_BALANCE). Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> It did in fact trigger under all three of mainline, CFS, and -rt including CFS -- see below for a couple of emails from last Friday giving results for these three on the AMD box (where it happened) and on a single-quad NUMA-Q system (where it did not, at least not with such severity). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/sched.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a747591..50e1a31 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2938,17 +2938,21 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
unsigned long next_balance = jiffies + 60 * HZ;
for_each_domain(this_cpu, sd) {
- if (sd->flags & SD_BALANCE_NEWIDLE) {
+ unsigned long interval;
+
+ if (!(sd->flags & SD_LOAD_BALANCE))
+ continue;
+
+ if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance_newidle(this_cpu,
- this_rq, sd);
- if (time_after(next_balance,
- sd->last_balance + sd->balance_interval))
- next_balance = sd->last_balance
- + sd->balance_interval;
- if (pulled_task)
- break;
- }
+ this_rq, sd);
+
+ interval = msecs_to_jiffies(sd->balance_interval);
+ if (time_after(next_balance, sd->last_balance + interval))
+ next_balance = sd->last_balance + interval;
+ if (pulled_task)
+ break;
}
if (!pulled_task)
/*