aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/cpufreq_schedutil.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
-rw-r--r--kernel/sched/cpufreq_schedutil.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 8f8de3d4d6b7..cd7cd489f739 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,7 @@ struct sugov_policy {
36 u64 last_freq_update_time; 36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns; 37 s64 freq_update_delay_ns;
38 unsigned int next_freq; 38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
39 40
40 /* The next fields are only needed if fast switch cannot be used. */ 41 /* The next fields are only needed if fast switch cannot be used. */
41 struct irq_work irq_work; 42 struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
52 struct update_util_data update_util; 53 struct update_util_data update_util;
53 struct sugov_policy *sg_policy; 54 struct sugov_policy *sg_policy;
54 55
55 unsigned int cached_raw_freq;
56 unsigned long iowait_boost; 56 unsigned long iowait_boost;
57 unsigned long iowait_boost_max; 57 unsigned long iowait_boost_max;
58 u64 last_update; 58 u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
116 116
117/** 117/**
118 * get_next_freq - Compute a new frequency for a given cpufreq policy. 118 * get_next_freq - Compute a new frequency for a given cpufreq policy.
119 * @sg_cpu: schedutil cpu object to compute the new frequency for. 119 * @sg_policy: schedutil policy object to compute the new frequency for.
120 * @util: Current CPU utilization. 120 * @util: Current CPU utilization.
121 * @max: CPU capacity. 121 * @max: CPU capacity.
122 * 122 *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
136 * next_freq (as calculated above) is returned, subject to policy min/max and 136 * next_freq (as calculated above) is returned, subject to policy min/max and
137 * cpufreq driver limitations. 137 * cpufreq driver limitations.
138 */ 138 */
139static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, 139static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140 unsigned long max) 140 unsigned long util, unsigned long max)
141{ 141{
142 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
143 struct cpufreq_policy *policy = sg_policy->policy; 142 struct cpufreq_policy *policy = sg_policy->policy;
144 unsigned int freq = arch_scale_freq_invariant() ? 143 unsigned int freq = arch_scale_freq_invariant() ?
145 policy->cpuinfo.max_freq : policy->cur; 144 policy->cpuinfo.max_freq : policy->cur;
146 145
147 freq = (freq + (freq >> 2)) * util / max; 146 freq = (freq + (freq >> 2)) * util / max;
148 147
149 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 148 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
150 return sg_policy->next_freq; 149 return sg_policy->next_freq;
151 sg_cpu->cached_raw_freq = freq; 150 sg_policy->cached_raw_freq = freq;
152 return cpufreq_driver_resolve_freq(policy, freq); 151 return cpufreq_driver_resolve_freq(policy, freq);
153} 152}
154 153
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
213 } else { 212 } else {
214 sugov_get_util(&util, &max); 213 sugov_get_util(&util, &max);
215 sugov_iowait_boost(sg_cpu, &util, &max); 214 sugov_iowait_boost(sg_cpu, &util, &max);
216 next_f = get_next_freq(sg_cpu, util, max); 215 next_f = get_next_freq(sg_policy, util, max);
217 } 216 }
218 sugov_update_commit(sg_policy, time, next_f); 217 sugov_update_commit(sg_policy, time, next_f);
219} 218}
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
267 sugov_iowait_boost(j_sg_cpu, &util, &max); 266 sugov_iowait_boost(j_sg_cpu, &util, &max);
268 } 267 }
269 268
270 return get_next_freq(sg_cpu, util, max); 269 return get_next_freq(sg_policy, util, max);
271} 270}
272 271
273static void sugov_update_shared(struct update_util_data *hook, u64 time, 272static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
580 sg_policy->next_freq = UINT_MAX; 579 sg_policy->next_freq = UINT_MAX;
581 sg_policy->work_in_progress = false; 580 sg_policy->work_in_progress = false;
582 sg_policy->need_freq_update = false; 581 sg_policy->need_freq_update = false;
582 sg_policy->cached_raw_freq = 0;
583 583
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
590 sg_cpu->max = 0; 590 sg_cpu->max = 0;
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 591 sg_cpu->flags = SCHED_CPUFREQ_RT;
592 sg_cpu->last_update = 0; 592 sg_cpu->last_update = 0;
593 sg_cpu->cached_raw_freq = 0;
594 sg_cpu->iowait_boost = 0; 593 sg_cpu->iowait_boost = 0;
595 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
596 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,