aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2017-12-04 05:23:19 -0500
committerIngo Molnar <mingo@kernel.org>2018-01-10 05:30:32 -0500
commite0367b12674bf4420870cd0237e3ebafb2ec9593 (patch)
tree5a7353c1283456ec521c98f7f4cf759ec396667d
parentd4edd662ac1657126df7ffd74a278958b133a77d (diff)
sched/deadline: Move CPU frequency selection triggering points
Since SCHED_DEADLINE doesn't track utilization signal (but reserves a fraction of CPU bandwidth to tasks admitted to the system), there is no point in evaluating frequency changes during each tick event. Move frequency selection triggering points to where running_bw changes. Co-authored-by: Claudio Scordino <claudio@evidence.eu.com> Signed-off-by: Juri Lelli <juri.lelli@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luca Abeni <luca.abeni@santannapisa.it> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: alessio.balsini@arm.com Cc: bristot@redhat.com Cc: dietmar.eggemann@arm.com Cc: joelaf@google.com Cc: juri.lelli@redhat.com Cc: mathieu.poirier@linaro.org Cc: morten.rasmussen@arm.com Cc: patrick.bellasi@arm.com Cc: rjw@rjwysocki.net Cc: rostedt@goodmis.org Cc: tkjos@android.com Cc: tommaso.cucinotta@santannapisa.it Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/20171204102325.5110-3-juri.lelli@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/deadline.c7
-rw-r--r--kernel/sched/sched.h12
2 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4c666dbe5038..f584837b32e7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -86,6 +86,8 @@ void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
86 dl_rq->running_bw += dl_bw; 86 dl_rq->running_bw += dl_bw;
87 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 87 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
88 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 88 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
89 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
90 cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
89} 91}
90 92
91static inline 93static inline
@@ -98,6 +100,8 @@ void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 100 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
99 if (dl_rq->running_bw > old) 101 if (dl_rq->running_bw > old)
100 dl_rq->running_bw = 0; 102 dl_rq->running_bw = 0;
103 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
104 cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
101} 105}
102 106
103static inline 107static inline
@@ -1134,9 +1138,6 @@ static void update_curr_dl(struct rq *rq)
1134 return; 1138 return;
1135 } 1139 }
1136 1140
1137 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
1138 cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
1139
1140 schedstat_set(curr->se.statistics.exec_max, 1141 schedstat_set(curr->se.statistics.exec_max,
1141 max(curr->se.statistics.exec_max, delta_exec)); 1142 max(curr->se.statistics.exec_max, delta_exec));
1142 1143
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 136ab500daeb..863964fbcfd2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2055,14 +2055,14 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2055 * The way cpufreq is currently arranged requires it to evaluate the CPU 2055 * The way cpufreq is currently arranged requires it to evaluate the CPU
2056 * performance state (frequency/voltage) on a regular basis to prevent it from 2056 * performance state (frequency/voltage) on a regular basis to prevent it from
2057 * being stuck in a completely inadequate performance level for too long. 2057 * being stuck in a completely inadequate performance level for too long.
2058 * That is not guaranteed to happen if the updates are only triggered from CFS, 2058 * That is not guaranteed to happen if the updates are only triggered from CFS
2059 * though, because they may not be coming in if RT or deadline tasks are active 2059 * and DL, though, because they may not be coming in if only RT tasks are
2060 * all the time (or there are RT and DL tasks only). 2060 * active all the time (or there are RT tasks only).
2061 * 2061 *
2062 * As a workaround for that issue, this function is called by the RT and DL 2062 * As a workaround for that issue, this function is called periodically by the
2063 * sched classes to trigger extra cpufreq updates to prevent it from stalling, 2063 * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2064 * but that really is a band-aid. Going forward it should be replaced with 2064 * but that really is a band-aid. Going forward it should be replaced with
2065 * solutions targeted more specifically at RT and DL tasks. 2065 * solutions targeted more specifically at RT tasks.
2066 */ 2066 */
2067static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2067static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2068{ 2068{