diff options
author | Yuyang Du <yuyang.du@intel.com> | 2015-10-12 21:18:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-10-20 04:13:35 -0400 |
commit | 3e386d56bafbb6d2540b49367444997fc671ea69 (patch) | |
tree | 24e69c43e299f8319f61be60d01914b116f2f21d | |
parent | fde7d22e01aa0d252fc5c95fa11f0dac35a4dd59 (diff) |
sched/fair: Update task group's load_avg after task migration
When cfs_rq has cfs_rq->removed_load_avg set (when a task migrates from
this cfs_rq), we need to update its contribution to the group's load_avg.
This should not increase tg's update too much, because in most cases, the
cfs_rq has already decayed its load_avg.
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Yuyang Du <yuyang.du@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1444699103-20272-2-git-send-email-yuyang.du@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/fair.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc62c5096e54..9a5e60fe721a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); | |||
2664 | /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ | 2664 | /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ |
2665 | static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) | 2665 | static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) |
2666 | { | 2666 | { |
2667 | int decayed; | ||
2668 | struct sched_avg *sa = &cfs_rq->avg; | 2667 | struct sched_avg *sa = &cfs_rq->avg; |
2668 | int decayed, removed = 0; | ||
2669 | 2669 | ||
2670 | if (atomic_long_read(&cfs_rq->removed_load_avg)) { | 2670 | if (atomic_long_read(&cfs_rq->removed_load_avg)) { |
2671 | long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); | 2671 | long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); |
2672 | sa->load_avg = max_t(long, sa->load_avg - r, 0); | 2672 | sa->load_avg = max_t(long, sa->load_avg - r, 0); |
2673 | sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); | 2673 | sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); |
2674 | removed = 1; | ||
2674 | } | 2675 | } |
2675 | 2676 | ||
2676 | if (atomic_long_read(&cfs_rq->removed_util_avg)) { | 2677 | if (atomic_long_read(&cfs_rq->removed_util_avg)) { |
@@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) | |||
2688 | cfs_rq->load_last_update_time_copy = sa->last_update_time; | 2689 | cfs_rq->load_last_update_time_copy = sa->last_update_time; |
2689 | #endif | 2690 | #endif |
2690 | 2691 | ||
2691 | return decayed; | 2692 | return decayed || removed; |
2692 | } | 2693 | } |
2693 | 2694 | ||
2694 | /* Update task and its cfs_rq load average */ | 2695 | /* Update task and its cfs_rq load average */ |