aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c2
3 files changed, 9 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 198b07a..3a4ac0b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -171,10 +171,6 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};
-struct load_stat {
- struct load_weight load;
-};
-
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
@@ -236,7 +232,7 @@ struct rq {
#ifdef CONFIG_NO_HZ
unsigned char in_nohz_recently;
#endif
- struct load_stat ls; /* capture load from *all* tasks on this cpu */
+ struct load_weight load; /* capture load from *all* tasks on this cpu */
unsigned long nr_load_updates;
u64 nr_switches;
@@ -831,7 +827,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
* Update delta_exec, delta_fair fields for rq.
*
* delta_fair clock advances at a rate inversely proportional to
- * total load (rq->ls.load.weight) on the runqueue, while
+ * total load (rq->load.weight) on the runqueue, while
* delta_exec advances at the same rate as wall-clock (provided
* cpu is not idle).
*
@@ -839,17 +835,17 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
* runqueue over any given interval. This (smoothened) load is used
* during load balance.
*
- * This function is called /before/ updating rq->ls.load
+ * This function is called /before/ updating rq->load
* and when switching tasks.
*/
static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
- update_load_add(&rq->ls.load, p->se.load.weight);
+ update_load_add(&rq->load, p->se.load.weight);
}
static inline void dec_load(struct rq *rq, const struct task_struct *p)
{
- update_load_sub(&rq->ls.load, p->se.load.weight);
+ update_load_sub(&rq->load, p->se.load.weight);
}
static void inc_nr_running(struct task_struct *p, struct rq *rq)
@@ -996,7 +992,7 @@ inline int task_curr(const struct task_struct *p)
/* Used instead of source_load when we know the type == 0 */
unsigned long weighted_cpuload(const int cpu)
{
- return cpu_rq(cpu)->ls.load.weight;
+ return cpu_rq(cpu)->load.weight;
}
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
@@ -1979,7 +1975,7 @@ unsigned long nr_active(void)
*/
static void update_cpu_load(struct rq *this_rq)
{
- unsigned long this_load = this_rq->ls.load.weight;
+ unsigned long this_load = this_rq->load.weight;
int i, scale;
this_rq->nr_load_updates++;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 7a61706..62965f0 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -174,7 +174,7 @@ static void print_cpu(struct seq_file *m, int cpu)
P(nr_running);
SEQ_printf(m, " .%-30s: %lu\n", "load",
- rq->ls.load.weight);
+ rq->load.weight);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ce79eb0..72f202a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -652,7 +652,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
- if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
+ if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
se->slice_max = max(se->slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime);
}