diff options
-rw-r--r-- | kernel/sched.c | 18 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 |
3 files changed, 9 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 198b07a6d038..3a4ac0b75f2d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -171,10 +171,6 @@ struct rt_prio_array { | |||
171 | struct list_head queue[MAX_RT_PRIO]; | 171 | struct list_head queue[MAX_RT_PRIO]; |
172 | }; | 172 | }; |
173 | 173 | ||
174 | struct load_stat { | ||
175 | struct load_weight load; | ||
176 | }; | ||
177 | |||
178 | /* CFS-related fields in a runqueue */ | 174 | /* CFS-related fields in a runqueue */ |
179 | struct cfs_rq { | 175 | struct cfs_rq { |
180 | struct load_weight load; | 176 | struct load_weight load; |
@@ -236,7 +232,7 @@ struct rq { | |||
236 | #ifdef CONFIG_NO_HZ | 232 | #ifdef CONFIG_NO_HZ |
237 | unsigned char in_nohz_recently; | 233 | unsigned char in_nohz_recently; |
238 | #endif | 234 | #endif |
239 | struct load_stat ls; /* capture load from *all* tasks on this cpu */ | 235 | struct load_weight load; /* capture load from *all* tasks on this cpu */ |
240 | unsigned long nr_load_updates; | 236 | unsigned long nr_load_updates; |
241 | u64 nr_switches; | 237 | u64 nr_switches; |
242 | 238 | ||
@@ -831,7 +827,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
831 | * Update delta_exec, delta_fair fields for rq. | 827 | * Update delta_exec, delta_fair fields for rq. |
832 | * | 828 | * |
833 | * delta_fair clock advances at a rate inversely proportional to | 829 | * delta_fair clock advances at a rate inversely proportional to |
834 | * total load (rq->ls.load.weight) on the runqueue, while | 830 | * total load (rq->load.weight) on the runqueue, while |
835 | * delta_exec advances at the same rate as wall-clock (provided | 831 | * delta_exec advances at the same rate as wall-clock (provided |
836 | * cpu is not idle). | 832 | * cpu is not idle). |
837 | * | 833 | * |
@@ -839,17 +835,17 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
839 | * runqueue over any given interval. This (smoothened) load is used | 835 | * runqueue over any given interval. This (smoothened) load is used |
840 | * during load balance. | 836 | * during load balance. |
841 | * | 837 | * |
842 | * This function is called /before/ updating rq->ls.load | 838 | * This function is called /before/ updating rq->load |
843 | * and when switching tasks. | 839 | * and when switching tasks. |
844 | */ | 840 | */ |
845 | static inline void inc_load(struct rq *rq, const struct task_struct *p) | 841 | static inline void inc_load(struct rq *rq, const struct task_struct *p) |
846 | { | 842 | { |
847 | update_load_add(&rq->ls.load, p->se.load.weight); | 843 | update_load_add(&rq->load, p->se.load.weight); |
848 | } | 844 | } |
849 | 845 | ||
850 | static inline void dec_load(struct rq *rq, const struct task_struct *p) | 846 | static inline void dec_load(struct rq *rq, const struct task_struct *p) |
851 | { | 847 | { |
852 | update_load_sub(&rq->ls.load, p->se.load.weight); | 848 | update_load_sub(&rq->load, p->se.load.weight); |
853 | } | 849 | } |
854 | 850 | ||
855 | static void inc_nr_running(struct task_struct *p, struct rq *rq) | 851 | static void inc_nr_running(struct task_struct *p, struct rq *rq) |
@@ -996,7 +992,7 @@ inline int task_curr(const struct task_struct *p) | |||
996 | /* Used instead of source_load when we know the type == 0 */ | 992 | /* Used instead of source_load when we know the type == 0 */ |
997 | unsigned long weighted_cpuload(const int cpu) | 993 | unsigned long weighted_cpuload(const int cpu) |
998 | { | 994 | { |
999 | return cpu_rq(cpu)->ls.load.weight; | 995 | return cpu_rq(cpu)->load.weight; |
1000 | } | 996 | } |
1001 | 997 | ||
1002 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | 998 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
@@ -1979,7 +1975,7 @@ unsigned long nr_active(void) | |||
1979 | */ | 1975 | */ |
1980 | static void update_cpu_load(struct rq *this_rq) | 1976 | static void update_cpu_load(struct rq *this_rq) |
1981 | { | 1977 | { |
1982 | unsigned long this_load = this_rq->ls.load.weight; | 1978 | unsigned long this_load = this_rq->load.weight; |
1983 | int i, scale; | 1979 | int i, scale; |
1984 | 1980 | ||
1985 | this_rq->nr_load_updates++; | 1981 | this_rq->nr_load_updates++; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 7a61706637c7..62965f0ae37c 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -174,7 +174,7 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
174 | 174 | ||
175 | P(nr_running); | 175 | P(nr_running); |
176 | SEQ_printf(m, " .%-30s: %lu\n", "load", | 176 | SEQ_printf(m, " .%-30s: %lu\n", "load", |
177 | rq->ls.load.weight); | 177 | rq->load.weight); |
178 | P(nr_switches); | 178 | P(nr_switches); |
179 | P(nr_load_updates); | 179 | P(nr_load_updates); |
180 | P(nr_uninterruptible); | 180 | P(nr_uninterruptible); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ce79eb0f0660..72f202a8be96 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -652,7 +652,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
652 | * least twice that of our own weight (i.e. dont track it | 652 | * least twice that of our own weight (i.e. dont track it |
653 | * when there are only lesser-weight tasks around): | 653 | * when there are only lesser-weight tasks around): |
654 | */ | 654 | */ |
655 | if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) { | 655 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { |
656 | se->slice_max = max(se->slice_max, | 656 | se->slice_max = max(se->slice_max, |
657 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | 657 | se->sum_exec_runtime - se->prev_sum_exec_runtime); |
658 | } | 658 | } |