diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-09-03 18:05:22 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-09-03 18:05:22 -0400 |
commit | 08a10002bed151f6df201715adb80c1c5e7fe7ca (patch) | |
tree | a9d53ec2b6f5b1921a614f2d4bd70fb6c3a0d42f /kernel/sched/cpufreq_schedutil.c | |
parent | bd87c8fb9d2e420e5ddffad0cd1abcadfca75dbd (diff) | |
parent | c49cbc19b31e069cb344921c7286d7549767d10e (diff) |
Merge branch 'pm-cpufreq-sched'
* pm-cpufreq-sched:
cpufreq: schedutil: Always process remote callback with slow switching
cpufreq: schedutil: Don't restrict kthread to related_cpus unnecessarily
cpufreq: Return 0 from ->fast_switch() on errors
cpufreq: Simplify cpufreq_can_do_remote_dvfs()
cpufreq: Process remote callbacks from any CPU if the platform permits
sched: cpufreq: Allow remote cpufreq callbacks
cpufreq: schedutil: Use unsigned int for iowait boost
cpufreq: schedutil: Make iowait boost more energy efficient
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 86 |
1 files changed, 71 insertions, 15 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 45fcf21ad685..9209d83ecdcf 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -52,9 +52,11 @@ struct sugov_policy { | |||
52 | struct sugov_cpu { | 52 | struct sugov_cpu { |
53 | struct update_util_data update_util; | 53 | struct update_util_data update_util; |
54 | struct sugov_policy *sg_policy; | 54 | struct sugov_policy *sg_policy; |
55 | unsigned int cpu; | ||
55 | 56 | ||
56 | unsigned long iowait_boost; | 57 | bool iowait_boost_pending; |
57 | unsigned long iowait_boost_max; | 58 | unsigned int iowait_boost; |
59 | unsigned int iowait_boost_max; | ||
58 | u64 last_update; | 60 | u64 last_update; |
59 | 61 | ||
60 | /* The fields below are only needed when sharing a policy. */ | 62 | /* The fields below are only needed when sharing a policy. */ |
@@ -76,6 +78,26 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) | |||
76 | { | 78 | { |
77 | s64 delta_ns; | 79 | s64 delta_ns; |
78 | 80 | ||
81 | /* | ||
82 | * Since cpufreq_update_util() is called with rq->lock held for | ||
83 | * the @target_cpu, our per-cpu data is fully serialized. | ||
84 | * | ||
85 | * However, drivers cannot in general deal with cross-cpu | ||
86 | * requests, so while get_next_freq() will work, our | ||
87 | * sugov_update_commit() call may not for the fast switching platforms. | ||
88 | * | ||
89 | * Hence stop here for remote requests if they aren't supported | ||
90 | * by the hardware, as calculating the frequency is pointless if | ||
91 | * we cannot in fact act on it. | ||
92 | * | ||
93 | * For the slow switching platforms, the kthread is always scheduled on | ||
94 | * the right set of CPUs and any CPU can find the next frequency and | ||
95 | * schedule the kthread. | ||
96 | */ | ||
97 | if (sg_policy->policy->fast_switch_enabled && | ||
98 | !cpufreq_can_do_remote_dvfs(sg_policy->policy)) | ||
99 | return false; | ||
100 | |||
79 | if (sg_policy->work_in_progress) | 101 | if (sg_policy->work_in_progress) |
80 | return false; | 102 | return false; |
81 | 103 | ||
@@ -106,7 +128,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, | |||
106 | 128 | ||
107 | if (policy->fast_switch_enabled) { | 129 | if (policy->fast_switch_enabled) { |
108 | next_freq = cpufreq_driver_fast_switch(policy, next_freq); | 130 | next_freq = cpufreq_driver_fast_switch(policy, next_freq); |
109 | if (next_freq == CPUFREQ_ENTRY_INVALID) | 131 | if (!next_freq) |
110 | return; | 132 | return; |
111 | 133 | ||
112 | policy->cur = next_freq; | 134 | policy->cur = next_freq; |
@@ -154,12 +176,12 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, | |||
154 | return cpufreq_driver_resolve_freq(policy, freq); | 176 | return cpufreq_driver_resolve_freq(policy, freq); |
155 | } | 177 | } |
156 | 178 | ||
157 | static void sugov_get_util(unsigned long *util, unsigned long *max) | 179 | static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu) |
158 | { | 180 | { |
159 | struct rq *rq = this_rq(); | 181 | struct rq *rq = cpu_rq(cpu); |
160 | unsigned long cfs_max; | 182 | unsigned long cfs_max; |
161 | 183 | ||
162 | cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id()); | 184 | cfs_max = arch_scale_cpu_capacity(NULL, cpu); |
163 | 185 | ||
164 | *util = min(rq->cfs.avg.util_avg, cfs_max); | 186 | *util = min(rq->cfs.avg.util_avg, cfs_max); |
165 | *max = cfs_max; | 187 | *max = cfs_max; |
@@ -169,30 +191,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, | |||
169 | unsigned int flags) | 191 | unsigned int flags) |
170 | { | 192 | { |
171 | if (flags & SCHED_CPUFREQ_IOWAIT) { | 193 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
172 | sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; | 194 | if (sg_cpu->iowait_boost_pending) |
195 | return; | ||
196 | |||
197 | sg_cpu->iowait_boost_pending = true; | ||
198 | |||
199 | if (sg_cpu->iowait_boost) { | ||
200 | sg_cpu->iowait_boost <<= 1; | ||
201 | if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) | ||
202 | sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; | ||
203 | } else { | ||
204 | sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; | ||
205 | } | ||
173 | } else if (sg_cpu->iowait_boost) { | 206 | } else if (sg_cpu->iowait_boost) { |
174 | s64 delta_ns = time - sg_cpu->last_update; | 207 | s64 delta_ns = time - sg_cpu->last_update; |
175 | 208 | ||
176 | /* Clear iowait_boost if the CPU apprears to have been idle. */ | 209 | /* Clear iowait_boost if the CPU apprears to have been idle. */ |
177 | if (delta_ns > TICK_NSEC) | 210 | if (delta_ns > TICK_NSEC) { |
178 | sg_cpu->iowait_boost = 0; | 211 | sg_cpu->iowait_boost = 0; |
212 | sg_cpu->iowait_boost_pending = false; | ||
213 | } | ||
179 | } | 214 | } |
180 | } | 215 | } |
181 | 216 | ||
182 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, | 217 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, |
183 | unsigned long *max) | 218 | unsigned long *max) |
184 | { | 219 | { |
185 | unsigned long boost_util = sg_cpu->iowait_boost; | 220 | unsigned int boost_util, boost_max; |
186 | unsigned long boost_max = sg_cpu->iowait_boost_max; | ||
187 | 221 | ||
188 | if (!boost_util) | 222 | if (!sg_cpu->iowait_boost) |
189 | return; | 223 | return; |
190 | 224 | ||
225 | if (sg_cpu->iowait_boost_pending) { | ||
226 | sg_cpu->iowait_boost_pending = false; | ||
227 | } else { | ||
228 | sg_cpu->iowait_boost >>= 1; | ||
229 | if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { | ||
230 | sg_cpu->iowait_boost = 0; | ||
231 | return; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | boost_util = sg_cpu->iowait_boost; | ||
236 | boost_max = sg_cpu->iowait_boost_max; | ||
237 | |||
191 | if (*util * boost_max < *max * boost_util) { | 238 | if (*util * boost_max < *max * boost_util) { |
192 | *util = boost_util; | 239 | *util = boost_util; |
193 | *max = boost_max; | 240 | *max = boost_max; |
194 | } | 241 | } |
195 | sg_cpu->iowait_boost >>= 1; | ||
196 | } | 242 | } |
197 | 243 | ||
198 | #ifdef CONFIG_NO_HZ_COMMON | 244 | #ifdef CONFIG_NO_HZ_COMMON |
@@ -229,7 +275,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
229 | if (flags & SCHED_CPUFREQ_RT_DL) { | 275 | if (flags & SCHED_CPUFREQ_RT_DL) { |
230 | next_f = policy->cpuinfo.max_freq; | 276 | next_f = policy->cpuinfo.max_freq; |
231 | } else { | 277 | } else { |
232 | sugov_get_util(&util, &max); | 278 | sugov_get_util(&util, &max, sg_cpu->cpu); |
233 | sugov_iowait_boost(sg_cpu, &util, &max); | 279 | sugov_iowait_boost(sg_cpu, &util, &max); |
234 | next_f = get_next_freq(sg_policy, util, max); | 280 | next_f = get_next_freq(sg_policy, util, max); |
235 | /* | 281 | /* |
@@ -264,6 +310,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) | |||
264 | delta_ns = time - j_sg_cpu->last_update; | 310 | delta_ns = time - j_sg_cpu->last_update; |
265 | if (delta_ns > TICK_NSEC) { | 311 | if (delta_ns > TICK_NSEC) { |
266 | j_sg_cpu->iowait_boost = 0; | 312 | j_sg_cpu->iowait_boost = 0; |
313 | j_sg_cpu->iowait_boost_pending = false; | ||
267 | continue; | 314 | continue; |
268 | } | 315 | } |
269 | if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) | 316 | if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) |
@@ -290,7 +337,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, | |||
290 | unsigned long util, max; | 337 | unsigned long util, max; |
291 | unsigned int next_f; | 338 | unsigned int next_f; |
292 | 339 | ||
293 | sugov_get_util(&util, &max); | 340 | sugov_get_util(&util, &max, sg_cpu->cpu); |
294 | 341 | ||
295 | raw_spin_lock(&sg_policy->update_lock); | 342 | raw_spin_lock(&sg_policy->update_lock); |
296 | 343 | ||
@@ -445,7 +492,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) | |||
445 | } | 492 | } |
446 | 493 | ||
447 | sg_policy->thread = thread; | 494 | sg_policy->thread = thread; |
448 | kthread_bind_mask(thread, policy->related_cpus); | 495 | |
496 | /* Kthread is bound to all CPUs by default */ | ||
497 | if (!policy->dvfs_possible_from_any_cpu) | ||
498 | kthread_bind_mask(thread, policy->related_cpus); | ||
499 | |||
449 | init_irq_work(&sg_policy->irq_work, sugov_irq_work); | 500 | init_irq_work(&sg_policy->irq_work, sugov_irq_work); |
450 | mutex_init(&sg_policy->work_lock); | 501 | mutex_init(&sg_policy->work_lock); |
451 | 502 | ||
@@ -663,6 +714,11 @@ struct cpufreq_governor *cpufreq_default_governor(void) | |||
663 | 714 | ||
664 | static int __init sugov_register(void) | 715 | static int __init sugov_register(void) |
665 | { | 716 | { |
717 | int cpu; | ||
718 | |||
719 | for_each_possible_cpu(cpu) | ||
720 | per_cpu(sugov_cpu, cpu).cpu = cpu; | ||
721 | |||
666 | return cpufreq_register_governor(&schedutil_gov); | 722 | return cpufreq_register_governor(&schedutil_gov); |
667 | } | 723 | } |
668 | fs_initcall(sugov_register); | 724 | fs_initcall(sugov_register); |