aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-12 11:06:45 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-12 11:06:45 -0700
commit738ddd30397c25adfa9729257623ada96ef8ce96 (patch)
tree732d3a71d876cfa974b307c2e3f3b6d8f8caf8f5 /kernel
parentcc75b92d11384ba14f93828a2a0040344ae872e7 (diff)
parentde0cf899bbf06b6f64a5dce9c59d74c41b6b4232 (diff)
downloadkernel_samsung_tuna-738ddd30397c25adfa9729257623ada96ef8ce96.zip
kernel_samsung_tuna-738ddd30397c25adfa9729257623ada96ef8ce96.tar.gz
kernel_samsung_tuna-738ddd30397c25adfa9729257623ada96ef8ce96.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: run_rebalance_domains: s/SCHED_IDLE/CPU_IDLE/ sched: fix sleeper bonus sched: make global code static
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c48
-rw-r--r--kernel/sched_fair.c12
2 files changed, 30 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6247e4a..45e17b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h)
if (need_resched())
break;
- rebalance_domains(balance_cpu, SCHED_IDLE);
+ rebalance_domains(balance_cpu, CPU_IDLE);
rq = cpu_rq(balance_cpu);
if (time_after(this_rq->next_balance, rq->next_balance))
@@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-int arch_reinit_sched_domains(void)
+static int arch_reinit_sched_domains(void)
{
int err;
@@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
return ret ? ret : count;
}
-int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
-{
- int err = 0;
-
-#ifdef CONFIG_SCHED_SMT
- if (smt_capable())
- err = sysfs_create_file(&cls->kset.kobj,
- &attr_sched_smt_power_savings.attr);
-#endif
-#ifdef CONFIG_SCHED_MC
- if (!err && mc_capable())
- err = sysfs_create_file(&cls->kset.kobj,
- &attr_sched_mc_power_savings.attr);
-#endif
- return err;
-}
-#endif
-
#ifdef CONFIG_SCHED_MC
static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
{
@@ -6385,8 +6367,8 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
{
return sched_power_savings_store(buf, count, 0);
}
-SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
- sched_mc_power_savings_store);
+static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
+ sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
@@ -6399,8 +6381,26 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
{
return sched_power_savings_store(buf, count, 1);
}
-SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
- sched_smt_power_savings_store);
+static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
+ sched_smt_power_savings_store);
+#endif
+
+int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+{
+ int err = 0;
+
+#ifdef CONFIG_SCHED_SMT
+ if (smt_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_smt_power_savings.attr);
+#endif
+#ifdef CONFIG_SCHED_MC
+ if (!err && mc_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_mc_power_savings.attr);
+#endif
+ return err;
+}
#endif
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c5af389..fedbb51 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -75,7 +75,7 @@ enum {
unsigned int sysctl_sched_features __read_mostly =
SCHED_FEAT_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *1 |
+ SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_PRECISE_CPU_LOAD *1 |
SCHED_FEAT_START_DEBIT *1 |
@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
- delta = calc_delta_mine(cfs_rq->sleeper_bonus,
- curr->load.weight, lw);
- if (unlikely(delta > cfs_rq->sleeper_bonus))
- delta = cfs_rq->sleeper_bonus;
-
+ delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec);
+ delta = calc_delta_mine(delta, curr->load.weight, lw);
+ delta = min((u64)delta, cfs_rq->sleeper_bonus);
cfs_rq->sleeper_bonus -= delta;
delta_mine -= delta;
}
@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Track the amount of bonus we've given to sleepers:
*/
cfs_rq->sleeper_bonus += delta_fair;
+ if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
+ cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}