aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorYuyang Du <yuyang.du@intel.com>2015-12-16 18:34:27 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-06 05:06:29 -0500
commit0905f04eb21fc1c2e690bed5d0418a061d56c225 (patch)
tree453158d4e26692cbd1244bf14fdc7dd2ffd9ac96 /kernel/sched
parent5a1078043f844074cbd53981432778a8d5dd56e9 (diff)
sched/fair: Fix new task's load avg removed from source CPU in wake_up_new_task()
If a newly created task is selected to go to a different CPU in fork balance when it wakes up the first time, its load averages should not be removed from the source CPU since they are never added to it before. The same is also applicable to a never used group entity. Fix it in remove_entity_load_avg(): when entity's last_update_time is 0, simply return. This should precisely identify the case in question, because in other migrations, the last_update_time is set to 0 after remove_entity_load_avg(). Reported-by: Steve Muckle <steve.muckle@linaro.org> Signed-off-by: Yuyang Du <yuyang.du@intel.com> [peterz: cfs_rq_last_update_time] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <Juri.Lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Patrick Bellasi <patrick.bellasi@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: http://lkml.kernel.org/r/20151216233427.GJ28098@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 93efb962c2e1..1926606ece80 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2900,27 +2900,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2900 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); 2900 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2901} 2901}
2902 2902
2903/*
2904 * Task first catches up with cfs_rq, and then subtract
2905 * itself from the cfs_rq (task must be off the queue now).
2906 */
2907void remove_entity_load_avg(struct sched_entity *se)
2908{
2909 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2910 u64 last_update_time;
2911
2912#ifndef CONFIG_64BIT 2903#ifndef CONFIG_64BIT
2904static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2905{
2913 u64 last_update_time_copy; 2906 u64 last_update_time_copy;
2907 u64 last_update_time;
2914 2908
2915 do { 2909 do {
2916 last_update_time_copy = cfs_rq->load_last_update_time_copy; 2910 last_update_time_copy = cfs_rq->load_last_update_time_copy;
2917 smp_rmb(); 2911 smp_rmb();
2918 last_update_time = cfs_rq->avg.last_update_time; 2912 last_update_time = cfs_rq->avg.last_update_time;
2919 } while (last_update_time != last_update_time_copy); 2913 } while (last_update_time != last_update_time_copy);
2914
2915 return last_update_time;
2916}
2920#else 2917#else
2921 last_update_time = cfs_rq->avg.last_update_time; 2918static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2919{
2920 return cfs_rq->avg.last_update_time;
2921}
2922#endif 2922#endif
2923 2923
2924/*
2925 * Task first catches up with cfs_rq, and then subtract
2926 * itself from the cfs_rq (task must be off the queue now).
2927 */
2928void remove_entity_load_avg(struct sched_entity *se)
2929{
2930 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2931 u64 last_update_time;
2932
2933 /*
2934 * Newly created task or never used group entity should not be removed
2935 * from its (source) cfs_rq
2936 */
2937 if (se->avg.last_update_time == 0)
2938 return;
2939
2940 last_update_time = cfs_rq_last_update_time(cfs_rq);
2941
2924 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); 2942 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
2925 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); 2943 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
2926 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); 2944 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);