aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorSteve Muckle <steve.muckle@linaro.org>2016-03-21 20:21:07 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-23 08:20:35 -0400
commit21e96f88776deead303ecd30a17d1d7c2a1776e3 (patch)
tree28ad0c820987a4dcd36ab6fd56f954d980f240c8 /kernel/sched
parent1f621e028baf391f6684003e32e009bc934b750f (diff)
sched/fair: Move cpufreq hook to update_cfs_rq_load_avg()
The cpufreq hook should be called whenever the root cfs_rq utilization changes so update_cfs_rq_load_avg() is a better place for it. The current location is not invoked in the enqueue_entity() or update_blocked_averages() paths. Suggested-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Steve Muckle <smuckle@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <Juri.Lelli@arm.com> Cc: Michael Turquette <mturquette@baylibre.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Patrick Bellasi <patrick.bellasi@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1458606068-7476-1-git-send-email-smuckle@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e371f43fc80..6df80d47a525 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2878,7 +2878,9 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2878static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 2878static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2879{ 2879{
2880 struct sched_avg *sa = &cfs_rq->avg; 2880 struct sched_avg *sa = &cfs_rq->avg;
2881 struct rq *rq = rq_of(cfs_rq);
2881 int decayed, removed = 0; 2882 int decayed, removed = 0;
2883 int cpu = cpu_of(rq);
2882 2884
2883 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2885 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2884 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2886 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
@@ -2893,7 +2895,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2893 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0); 2895 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
2894 } 2896 }
2895 2897
2896 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, 2898 decayed = __update_load_avg(now, cpu, sa,
2897 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq); 2899 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
2898 2900
2899#ifndef CONFIG_64BIT 2901#ifndef CONFIG_64BIT
@@ -2901,28 +2903,6 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2901 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2903 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2902#endif 2904#endif
2903 2905
2904 return decayed || removed;
2905}
2906
2907/* Update task and its cfs_rq load average */
2908static inline void update_load_avg(struct sched_entity *se, int update_tg)
2909{
2910 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2911 u64 now = cfs_rq_clock_task(cfs_rq);
2912 struct rq *rq = rq_of(cfs_rq);
2913 int cpu = cpu_of(rq);
2914
2915 /*
2916 * Track task load average for carrying it to new CPU after migrated, and
2917 * track group sched_entity load average for task_h_load calc in migration
2918 */
2919 __update_load_avg(now, cpu, &se->avg,
2920 se->on_rq * scale_load_down(se->load.weight),
2921 cfs_rq->curr == se, NULL);
2922
2923 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2924 update_tg_load_avg(cfs_rq, 0);
2925
2926 if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) { 2906 if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
2927 unsigned long max = rq->cpu_capacity_orig; 2907 unsigned long max = rq->cpu_capacity_orig;
2928 2908
@@ -2943,8 +2923,30 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
2943 * See cpu_util(). 2923 * See cpu_util().
2944 */ 2924 */
2945 cpufreq_update_util(rq_clock(rq), 2925 cpufreq_update_util(rq_clock(rq),
2946 min(cfs_rq->avg.util_avg, max), max); 2926 min(sa->util_avg, max), max);
2947 } 2927 }
2928
2929 return decayed || removed;
2930}
2931
2932/* Update task and its cfs_rq load average */
2933static inline void update_load_avg(struct sched_entity *se, int update_tg)
2934{
2935 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2936 u64 now = cfs_rq_clock_task(cfs_rq);
2937 struct rq *rq = rq_of(cfs_rq);
2938 int cpu = cpu_of(rq);
2939
2940 /*
2941 * Track task load average for carrying it to new CPU after migrated, and
2942 * track group sched_entity load average for task_h_load calc in migration
2943 */
2944 __update_load_avg(now, cpu, &se->avg,
2945 se->on_rq * scale_load_down(se->load.weight),
2946 cfs_rq->curr == se, NULL);
2947
2948 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2949 update_tg_load_avg(cfs_rq, 0);
2948} 2950}
2949 2951
2950static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2952static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)