summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-02-02 04:27:00 -0500
committerIngo Molnar <mingo@kernel.org>2018-03-09 01:59:20 -0500
commitea14b57e8a181ac0561eba7a787e088f8c89f822 (patch)
treeb7805b389ea5002bf24815e9ffccd45ba982223c /kernel/sched/fair.c
parent00357f5ec5d67a52a175da6f29f85c2c19d59bc8 (diff)
sched/cpufreq: Provide migration hint
It was suggested that a migration hint might be usefull for the CPU-freq governors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 494d5db9a6cd..e8f5efe2936c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
772 * For !fair tasks do: 772 * For !fair tasks do:
773 * 773 *
774 update_cfs_rq_load_avg(now, cfs_rq); 774 update_cfs_rq_load_avg(now, cfs_rq);
775 attach_entity_load_avg(cfs_rq, se); 775 attach_entity_load_avg(cfs_rq, se, 0);
776 switched_from_fair(rq, p); 776 switched_from_fair(rq, p);
777 * 777 *
778 * such that the next switched_to_fair() has the 778 * such that the next switched_to_fair() has the
@@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se)
3009} 3009}
3010#endif /* CONFIG_FAIR_GROUP_SCHED */ 3010#endif /* CONFIG_FAIR_GROUP_SCHED */
3011 3011
3012static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) 3012static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3013{ 3013{
3014 struct rq *rq = rq_of(cfs_rq); 3014 struct rq *rq = rq_of(cfs_rq);
3015 3015
3016 if (&rq->cfs == cfs_rq) { 3016 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
3017 /* 3017 /*
3018 * There are a few boundary cases this might miss but it should 3018 * There are a few boundary cases this might miss but it should
3019 * get called often enough that that should (hopefully) not be 3019 * get called often enough that that should (hopefully) not be
@@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
3028 * 3028 *
3029 * See cpu_util(). 3029 * See cpu_util().
3030 */ 3030 */
3031 cpufreq_update_util(rq, 0); 3031 cpufreq_update_util(rq, flags);
3032 } 3032 }
3033} 3033}
3034 3034
@@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3686#endif 3686#endif
3687 3687
3688 if (decayed) 3688 if (decayed)
3689 cfs_rq_util_change(cfs_rq); 3689 cfs_rq_util_change(cfs_rq, 0);
3690 3690
3691 return decayed; 3691 return decayed;
3692} 3692}
@@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3699 * Must call update_cfs_rq_load_avg() before this, since we rely on 3699 * Must call update_cfs_rq_load_avg() before this, since we rely on
3700 * cfs_rq->avg.last_update_time being current. 3700 * cfs_rq->avg.last_update_time being current.
3701 */ 3701 */
3702static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3702static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3703{ 3703{
3704 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; 3704 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3705 3705
@@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3735 3735
3736 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 3736 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
3737 3737
3738 cfs_rq_util_change(cfs_rq); 3738 cfs_rq_util_change(cfs_rq, flags);
3739} 3739}
3740 3740
3741/** 3741/**
@@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3754 3754
3755 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 3755 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3756 3756
3757 cfs_rq_util_change(cfs_rq); 3757 cfs_rq_util_change(cfs_rq, 0);
3758} 3758}
3759 3759
3760/* 3760/*
@@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3784 3784
3785 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 3785 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3786 3786
3787 attach_entity_load_avg(cfs_rq, se); 3787 /*
3788 * DO_ATTACH means we're here from enqueue_entity().
3789 * !last_update_time means we've passed through
3790 * migrate_task_rq_fair() indicating we migrated.
3791 *
3792 * IOW we're enqueueing a task on a new CPU.
3793 */
3794 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
3788 update_tg_load_avg(cfs_rq, 0); 3795 update_tg_load_avg(cfs_rq, 0);
3789 3796
3790 } else if (decayed && (flags & UPDATE_TG)) 3797 } else if (decayed && (flags & UPDATE_TG))
@@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3880 3887
3881static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 3888static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
3882{ 3889{
3883 cfs_rq_util_change(cfs_rq); 3890 cfs_rq_util_change(cfs_rq, 0);
3884} 3891}
3885 3892
3886static inline void remove_entity_load_avg(struct sched_entity *se) {} 3893static inline void remove_entity_load_avg(struct sched_entity *se) {}
3887 3894
3888static inline void 3895static inline void
3889attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3896attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
3890static inline void 3897static inline void
3891detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3898detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3892 3899
@@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
9726 9733
9727 /* Synchronize entity with its cfs_rq */ 9734 /* Synchronize entity with its cfs_rq */
9728 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 9735 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
9729 attach_entity_load_avg(cfs_rq, se); 9736 attach_entity_load_avg(cfs_rq, se, 0);
9730 update_tg_load_avg(cfs_rq, false); 9737 update_tg_load_avg(cfs_rq, false);
9731 propagate_entity_cfs_rq(se); 9738 propagate_entity_cfs_rq(se);
9732} 9739}