diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2018-05-09 06:35:24 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2018-05-15 04:38:12 -0400 |
commit | ecd2884291261e3fddbc7651ee11a20d596bb514 (patch) | |
tree | 8fd4252e61cce2b673b4613de9fe3cf6e716c6ce /kernel/sched | |
parent | 1b04722c3b892033f143d056a2876f293a1adbcc (diff) |
cpufreq: schedutil: Don't set next_freq to UINT_MAX
The schedutil driver sets sg_policy->next_freq to UINT_MAX on certain
occasions to discard the cached value of next freq:
- In sugov_start(), when the schedutil governor is started for a group
of CPUs.
- And whenever we need to force a freq update before rate-limit
duration, which happens when:
- there is an update in cpufreq policy limits.
- Or when the utilization of DL scheduling class increases.
In return, get_next_freq() doesn't return a cached next_freq value but
recalculates the next frequency instead.
But having special meaning for a particular value of frequency makes the
code less readable and error prone. We recently fixed a bug where the
UINT_MAX value was considered as valid frequency in
sugov_update_single().
All we need is a flag which can be used to discard the value of
sg_policy->next_freq and we already have need_freq_update for that. Lets
reuse it instead of setting next_freq to UINT_MAX.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d7e5194a820d..2442decbfec7 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -95,15 +95,8 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) | |||
95 | if (sg_policy->work_in_progress) | 95 | if (sg_policy->work_in_progress) |
96 | return false; | 96 | return false; |
97 | 97 | ||
98 | if (unlikely(sg_policy->need_freq_update)) { | 98 | if (unlikely(sg_policy->need_freq_update)) |
99 | sg_policy->need_freq_update = false; | ||
100 | /* | ||
101 | * This happens when limits change, so forget the previous | ||
102 | * next_freq value and force an update. | ||
103 | */ | ||
104 | sg_policy->next_freq = UINT_MAX; | ||
105 | return true; | 99 | return true; |
106 | } | ||
107 | 100 | ||
108 | delta_ns = time - sg_policy->last_freq_update_time; | 101 | delta_ns = time - sg_policy->last_freq_update_time; |
109 | 102 | ||
@@ -165,8 +158,10 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, | |||
165 | 158 | ||
166 | freq = (freq + (freq >> 2)) * util / max; | 159 | freq = (freq + (freq >> 2)) * util / max; |
167 | 160 | ||
168 | if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) | 161 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
169 | return sg_policy->next_freq; | 162 | return sg_policy->next_freq; |
163 | |||
164 | sg_policy->need_freq_update = false; | ||
170 | sg_policy->cached_raw_freq = freq; | 165 | sg_policy->cached_raw_freq = freq; |
171 | return cpufreq_driver_resolve_freq(policy, freq); | 166 | return cpufreq_driver_resolve_freq(policy, freq); |
172 | } | 167 | } |
@@ -305,8 +300,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
305 | * Do not reduce the frequency if the CPU has not been idle | 300 | * Do not reduce the frequency if the CPU has not been idle |
306 | * recently, as the reduction is likely to be premature then. | 301 | * recently, as the reduction is likely to be premature then. |
307 | */ | 302 | */ |
308 | if (busy && next_f < sg_policy->next_freq && | 303 | if (busy && next_f < sg_policy->next_freq) { |
309 | sg_policy->next_freq != UINT_MAX) { | ||
310 | next_f = sg_policy->next_freq; | 304 | next_f = sg_policy->next_freq; |
311 | 305 | ||
312 | /* Reset cached freq as next_freq has changed */ | 306 | /* Reset cached freq as next_freq has changed */ |
@@ -654,7 +648,7 @@ static int sugov_start(struct cpufreq_policy *policy) | |||
654 | 648 | ||
655 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; | 649 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
656 | sg_policy->last_freq_update_time = 0; | 650 | sg_policy->last_freq_update_time = 0; |
657 | sg_policy->next_freq = UINT_MAX; | 651 | sg_policy->next_freq = 0; |
658 | sg_policy->work_in_progress = false; | 652 | sg_policy->work_in_progress = false; |
659 | sg_policy->need_freq_update = false; | 653 | sg_policy->need_freq_update = false; |
660 | sg_policy->cached_raw_freq = 0; | 654 | sg_policy->cached_raw_freq = 0; |