summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-05 12:38:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-05 12:38:39 -0400
commit3c89adb0d11117f64d5b501730be7fb2bf53a479 (patch)
treec259aec20deed6c0a0773cfda3a11f58ec9a077d /kernel/sched
parent11e7c21880617b8383ad8ad059ae9a07e5abe68e (diff)
parenta24e16b1310ce7f474aa0caca0e66f0f174c022f (diff)
Merge tag 'pm-4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These include a significant update of the generic power domains (genpd) and Operating Performance Points (OPP) frameworks, mostly related to the introduction of power domain performance levels, cpufreq updates (new driver for Qualcomm Kryo processors, updates of the existing drivers, some core fixes, schedutil governor improvements), PCI power management fixes, ACPI workaround for EC-based wakeup events handling on resume from suspend-to-idle, and major updates of the turbostat and pm-graph utilities. Specifics: - Introduce power domain performance levels into the the generic power domains (genpd) and Operating Performance Points (OPP) frameworks (Viresh Kumar, Rajendra Nayak, Dan Carpenter). - Fix two issues in the runtime PM framework related to the initialization and removal of devices using device links (Ulf Hansson). - Clean up the initialization of drivers for devices in PM domains (Ulf Hansson, Geert Uytterhoeven). - Fix a cpufreq core issue related to the policy sysfs interface causing CPU online to fail for CPUs sharing one cpufreq policy in some situations (Tao Wang). - Make it possible to use platform-specific suspend/resume hooks in the cpufreq-dt driver and make the Armada 37xx DVFS use that feature (Viresh Kumar, Miquel Raynal). - Optimize policy transition notifications in cpufreq (Viresh Kumar). - Improve the iowait boost mechanism in the schedutil cpufreq governor (Patrick Bellasi). - Improve the handling of deferred frequency updates in the schedutil cpufreq governor (Joel Fernandes, Dietmar Eggemann, Rafael Wysocki, Viresh Kumar). - Add a new cpufreq driver for Qualcomm Kryo (Ilia Lin). - Fix and clean up some cpufreq drivers (Colin Ian King, Dmitry Osipenko, Doug Smythies, Luc Van Oostenryck, Simon Horman, Viresh Kumar). - Fix the handling of PCI devices with the DPM_SMART_SUSPEND flag set and update stale comments in the PCI core PM code (Rafael Wysocki). - Work around an issue related to the handling of EC-based wakeup events in the ACPI PM core during resume from suspend-to-idle if the EC has been put into the low-power mode (Rafael Wysocki). - Improve the handling of wakeup source objects in the PM core (Doug Berger, Mahendran Ganesh, Rafael Wysocki). - Update the driver core to prevent deferred probe from breaking suspend/resume ordering (Feng Kan). - Clean up the PM core somewhat (Bjorn Helgaas, Ulf Hansson, Rafael Wysocki). - Make the core suspend/resume code and cpufreq support the RT patch (Sebastian Andrzej Siewior, Thomas Gleixner). - Consolidate the PM QoS handling in cpuidle governors (Rafael Wysocki). - Fix a possible crash in the hibernation core (Tetsuo Handa). - Update the rockchip-io Adaptive Voltage Scaling (AVS) driver (David Wu). - Update the turbostat utility (fixes, cleanups, new CPU IDs, new command line options, built-in "Low Power Idle" counters support, new POLL and POLL% columns) and add an entry for it to MAINTAINERS (Len Brown, Artem Bityutskiy, Chen Yu, Laura Abbott, Matt Turner, Prarit Bhargava, Srinivas Pandruvada). - Update the pm-graph to version 5.1 (Todd Brandt). - Update the intel_pstate_tracer utility (Doug Smythies)" * tag 'pm-4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (128 commits) tools/power turbostat: update version number tools/power turbostat: Add Node in output tools/power turbostat: add node information into turbostat calculations tools/power turbostat: remove num_ from cpu_topology struct tools/power turbostat: rename num_cores_per_pkg to num_cores_per_node tools/power turbostat: track thread ID in cpu_topology tools/power turbostat: Calculate additional node information for a package tools/power turbostat: Fix node and siblings lookup data tools/power turbostat: set max_num_cpus equal to the cpumask length tools/power turbostat: if --num_iterations, print for specific number of iterations tools/power turbostat: Add Cannon Lake support tools/power turbostat: delete duplicate #defines x86: msr-index.h: Correct SNB_C1/C3_AUTO_UNDEMOTE defines tools/power turbostat: Correct SNB_C1/C3_AUTO_UNDEMOTE defines tools/power turbostat: add POLL and POLL% column tools/power turbostat: Fix --hide Pk%pc10 tools/power turbostat: Build-in "Low Power Idle" counters support tools/power turbostat: Don't make man pages executable tools/power turbostat: remove blank lines tools/power turbostat: a small C-states dump readability immprovement ...
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/cpufreq_schedutil.c262
1 files changed, 179 insertions, 83 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 28592b62b1d5..3cde46483f0a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -51,7 +51,7 @@ struct sugov_cpu {
51 bool iowait_boost_pending; 51 bool iowait_boost_pending;
52 unsigned int iowait_boost; 52 unsigned int iowait_boost;
53 unsigned int iowait_boost_max; 53 unsigned int iowait_boost_max;
54 u64 last_update; 54 u64 last_update;
55 55
56 /* The fields below are only needed when sharing a policy: */ 56 /* The fields below are only needed when sharing a policy: */
57 unsigned long util_cfs; 57 unsigned long util_cfs;
@@ -89,46 +89,52 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
89 * schedule the kthread. 89 * schedule the kthread.
90 */ 90 */
91 if (sg_policy->policy->fast_switch_enabled && 91 if (sg_policy->policy->fast_switch_enabled &&
92 !cpufreq_can_do_remote_dvfs(sg_policy->policy)) 92 !cpufreq_this_cpu_can_update(sg_policy->policy))
93 return false; 93 return false;
94 94
95 if (sg_policy->work_in_progress) 95 if (unlikely(sg_policy->need_freq_update))
96 return false;
97
98 if (unlikely(sg_policy->need_freq_update)) {
99 sg_policy->need_freq_update = false;
100 /*
101 * This happens when limits change, so forget the previous
102 * next_freq value and force an update.
103 */
104 sg_policy->next_freq = UINT_MAX;
105 return true; 96 return true;
106 }
107 97
108 delta_ns = time - sg_policy->last_freq_update_time; 98 delta_ns = time - sg_policy->last_freq_update_time;
109 99
110 return delta_ns >= sg_policy->freq_update_delay_ns; 100 return delta_ns >= sg_policy->freq_update_delay_ns;
111} 101}
112 102
113static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, 103static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
114 unsigned int next_freq) 104 unsigned int next_freq)
115{ 105{
116 struct cpufreq_policy *policy = sg_policy->policy;
117
118 if (sg_policy->next_freq == next_freq) 106 if (sg_policy->next_freq == next_freq)
119 return; 107 return false;
120 108
121 sg_policy->next_freq = next_freq; 109 sg_policy->next_freq = next_freq;
122 sg_policy->last_freq_update_time = time; 110 sg_policy->last_freq_update_time = time;
123 111
124 if (policy->fast_switch_enabled) { 112 return true;
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 113}
126 if (!next_freq)
127 return;
128 114
129 policy->cur = next_freq; 115static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
130 trace_cpu_frequency(next_freq, smp_processor_id()); 116 unsigned int next_freq)
131 } else { 117{
118 struct cpufreq_policy *policy = sg_policy->policy;
119
120 if (!sugov_update_next_freq(sg_policy, time, next_freq))
121 return;
122
123 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
124 if (!next_freq)
125 return;
126
127 policy->cur = next_freq;
128 trace_cpu_frequency(next_freq, smp_processor_id());
129}
130
131static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
132 unsigned int next_freq)
133{
134 if (!sugov_update_next_freq(sg_policy, time, next_freq))
135 return;
136
137 if (!sg_policy->work_in_progress) {
132 sg_policy->work_in_progress = true; 138 sg_policy->work_in_progress = true;
133 irq_work_queue(&sg_policy->irq_work); 139 irq_work_queue(&sg_policy->irq_work);
134 } 140 }
@@ -165,8 +171,10 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
165 171
166 freq = (freq + (freq >> 2)) * util / max; 172 freq = (freq + (freq >> 2)) * util / max;
167 173
168 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 174 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
169 return sg_policy->next_freq; 175 return sg_policy->next_freq;
176
177 sg_policy->need_freq_update = false;
170 sg_policy->cached_raw_freq = freq; 178 sg_policy->cached_raw_freq = freq;
171 return cpufreq_driver_resolve_freq(policy, freq); 179 return cpufreq_driver_resolve_freq(policy, freq);
172} 180}
@@ -200,43 +208,120 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
200 return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs)); 208 return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
201} 209}
202 210
203static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) 211/**
212 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
213 * @sg_cpu: the sugov data for the CPU to boost
214 * @time: the update time from the caller
215 * @set_iowait_boost: true if an IO boost has been requested
216 *
217 * The IO wait boost of a task is disabled after a tick since the last update
218 * of a CPU. If a new IO wait boost is requested after more then a tick, then
219 * we enable the boost starting from the minimum frequency, which improves
220 * energy efficiency by ignoring sporadic wakeups from IO.
221 */
222static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
223 bool set_iowait_boost)
204{ 224{
205 if (flags & SCHED_CPUFREQ_IOWAIT) { 225 s64 delta_ns = time - sg_cpu->last_update;
206 if (sg_cpu->iowait_boost_pending)
207 return;
208 226
209 sg_cpu->iowait_boost_pending = true; 227 /* Reset boost only if a tick has elapsed since last request */
228 if (delta_ns <= TICK_NSEC)
229 return false;
210 230
211 if (sg_cpu->iowait_boost) { 231 sg_cpu->iowait_boost = set_iowait_boost
212 sg_cpu->iowait_boost <<= 1; 232 ? sg_cpu->sg_policy->policy->min : 0;
213 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) 233 sg_cpu->iowait_boost_pending = set_iowait_boost;
214 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
215 } else {
216 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
217 }
218 } else if (sg_cpu->iowait_boost) {
219 s64 delta_ns = time - sg_cpu->last_update;
220 234
221 /* Clear iowait_boost if the CPU apprears to have been idle. */ 235 return true;
222 if (delta_ns > TICK_NSEC) { 236}
223 sg_cpu->iowait_boost = 0; 237
224 sg_cpu->iowait_boost_pending = false; 238/**
225 } 239 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
240 * @sg_cpu: the sugov data for the CPU to boost
241 * @time: the update time from the caller
242 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
243 *
244 * Each time a task wakes up after an IO operation, the CPU utilization can be
245 * boosted to a certain utilization which doubles at each "frequent and
246 * successive" wakeup from IO, ranging from the utilization of the minimum
247 * OPP to the utilization of the maximum OPP.
248 * To keep doubling, an IO boost has to be requested at least once per tick,
249 * otherwise we restart from the utilization of the minimum OPP.
250 */
251static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
252 unsigned int flags)
253{
254 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
255
256 /* Reset boost if the CPU appears to have been idle enough */
257 if (sg_cpu->iowait_boost &&
258 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
259 return;
260
261 /* Boost only tasks waking up after IO */
262 if (!set_iowait_boost)
263 return;
264
265 /* Ensure boost doubles only one time at each request */
266 if (sg_cpu->iowait_boost_pending)
267 return;
268 sg_cpu->iowait_boost_pending = true;
269
270 /* Double the boost at each request */
271 if (sg_cpu->iowait_boost) {
272 sg_cpu->iowait_boost <<= 1;
273 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
274 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
275 return;
226 } 276 }
277
278 /* First wakeup after IO: start with minimum boost */
279 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
227} 280}
228 281
229static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, 282/**
230 unsigned long *max) 283 * sugov_iowait_apply() - Apply the IO boost to a CPU.
284 * @sg_cpu: the sugov data for the cpu to boost
285 * @time: the update time from the caller
286 * @util: the utilization to (eventually) boost
287 * @max: the maximum value the utilization can be boosted to
288 *
289 * A CPU running a task which woken up after an IO operation can have its
290 * utilization boosted to speed up the completion of those IO operations.
291 * The IO boost value is increased each time a task wakes up from IO, in
292 * sugov_iowait_apply(), and it's instead decreased by this function,
293 * each time an increase has not been requested (!iowait_boost_pending).
294 *
295 * A CPU which also appears to have been idle for at least one tick has also
296 * its IO boost utilization reset.
297 *
298 * This mechanism is designed to boost high frequently IO waiting tasks, while
299 * being more conservative on tasks which does sporadic IO operations.
300 */
301static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
302 unsigned long *util, unsigned long *max)
231{ 303{
232 unsigned int boost_util, boost_max; 304 unsigned int boost_util, boost_max;
233 305
306 /* No boost currently required */
234 if (!sg_cpu->iowait_boost) 307 if (!sg_cpu->iowait_boost)
235 return; 308 return;
236 309
310 /* Reset boost if the CPU appears to have been idle enough */
311 if (sugov_iowait_reset(sg_cpu, time, false))
312 return;
313
314 /*
315 * An IO waiting task has just woken up:
316 * allow to further double the boost value
317 */
237 if (sg_cpu->iowait_boost_pending) { 318 if (sg_cpu->iowait_boost_pending) {
238 sg_cpu->iowait_boost_pending = false; 319 sg_cpu->iowait_boost_pending = false;
239 } else { 320 } else {
321 /*
322 * Otherwise: reduce the boost value and disable it when we
323 * reach the minimum.
324 */
240 sg_cpu->iowait_boost >>= 1; 325 sg_cpu->iowait_boost >>= 1;
241 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { 326 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
242 sg_cpu->iowait_boost = 0; 327 sg_cpu->iowait_boost = 0;
@@ -244,9 +329,12 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
244 } 329 }
245 } 330 }
246 331
332 /*
333 * Apply the current boost value: a CPU is boosted only if its current
334 * utilization is smaller then the current IO boost level.
335 */
247 boost_util = sg_cpu->iowait_boost; 336 boost_util = sg_cpu->iowait_boost;
248 boost_max = sg_cpu->iowait_boost_max; 337 boost_max = sg_cpu->iowait_boost_max;
249
250 if (*util * boost_max < *max * boost_util) { 338 if (*util * boost_max < *max * boost_util) {
251 *util = boost_util; 339 *util = boost_util;
252 *max = boost_max; 340 *max = boost_max;
@@ -285,7 +373,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
285 unsigned int next_f; 373 unsigned int next_f;
286 bool busy; 374 bool busy;
287 375
288 sugov_set_iowait_boost(sg_cpu, time, flags); 376 sugov_iowait_boost(sg_cpu, time, flags);
289 sg_cpu->last_update = time; 377 sg_cpu->last_update = time;
290 378
291 ignore_dl_rate_limit(sg_cpu, sg_policy); 379 ignore_dl_rate_limit(sg_cpu, sg_policy);
@@ -298,21 +386,31 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
298 sugov_get_util(sg_cpu); 386 sugov_get_util(sg_cpu);
299 max = sg_cpu->max; 387 max = sg_cpu->max;
300 util = sugov_aggregate_util(sg_cpu); 388 util = sugov_aggregate_util(sg_cpu);
301 sugov_iowait_boost(sg_cpu, &util, &max); 389 sugov_iowait_apply(sg_cpu, time, &util, &max);
302 next_f = get_next_freq(sg_policy, util, max); 390 next_f = get_next_freq(sg_policy, util, max);
303 /* 391 /*
304 * Do not reduce the frequency if the CPU has not been idle 392 * Do not reduce the frequency if the CPU has not been idle
305 * recently, as the reduction is likely to be premature then. 393 * recently, as the reduction is likely to be premature then.
306 */ 394 */
307 if (busy && next_f < sg_policy->next_freq && 395 if (busy && next_f < sg_policy->next_freq) {
308 sg_policy->next_freq != UINT_MAX) {
309 next_f = sg_policy->next_freq; 396 next_f = sg_policy->next_freq;
310 397
311 /* Reset cached freq as next_freq has changed */ 398 /* Reset cached freq as next_freq has changed */
312 sg_policy->cached_raw_freq = 0; 399 sg_policy->cached_raw_freq = 0;
313 } 400 }
314 401
315 sugov_update_commit(sg_policy, time, next_f); 402 /*
403 * This code runs under rq->lock for the target CPU, so it won't run
404 * concurrently on two different CPUs for the same target and it is not
405 * necessary to acquire the lock in the fast switch case.
406 */
407 if (sg_policy->policy->fast_switch_enabled) {
408 sugov_fast_switch(sg_policy, time, next_f);
409 } else {
410 raw_spin_lock(&sg_policy->update_lock);
411 sugov_deferred_update(sg_policy, time, next_f);
412 raw_spin_unlock(&sg_policy->update_lock);
413 }
316} 414}
317 415
318static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 416static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
@@ -325,28 +423,12 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
325 for_each_cpu(j, policy->cpus) { 423 for_each_cpu(j, policy->cpus) {
326 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); 424 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
327 unsigned long j_util, j_max; 425 unsigned long j_util, j_max;
328 s64 delta_ns;
329 426
330 sugov_get_util(j_sg_cpu); 427 sugov_get_util(j_sg_cpu);
331
332 /*
333 * If the CFS CPU utilization was last updated before the
334 * previous frequency update and the time elapsed between the
335 * last update of the CPU utilization and the last frequency
336 * update is long enough, reset iowait_boost and util_cfs, as
337 * they are now probably stale. However, still consider the
338 * CPU contribution if it has some DEADLINE utilization
339 * (util_dl).
340 */
341 delta_ns = time - j_sg_cpu->last_update;
342 if (delta_ns > TICK_NSEC) {
343 j_sg_cpu->iowait_boost = 0;
344 j_sg_cpu->iowait_boost_pending = false;
345 }
346
347 j_max = j_sg_cpu->max; 428 j_max = j_sg_cpu->max;
348 j_util = sugov_aggregate_util(j_sg_cpu); 429 j_util = sugov_aggregate_util(j_sg_cpu);
349 sugov_iowait_boost(j_sg_cpu, &j_util, &j_max); 430 sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
431
350 if (j_util * max > j_max * util) { 432 if (j_util * max > j_max * util) {
351 util = j_util; 433 util = j_util;
352 max = j_max; 434 max = j_max;
@@ -365,14 +447,18 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
365 447
366 raw_spin_lock(&sg_policy->update_lock); 448 raw_spin_lock(&sg_policy->update_lock);
367 449
368 sugov_set_iowait_boost(sg_cpu, time, flags); 450 sugov_iowait_boost(sg_cpu, time, flags);
369 sg_cpu->last_update = time; 451 sg_cpu->last_update = time;
370 452
371 ignore_dl_rate_limit(sg_cpu, sg_policy); 453 ignore_dl_rate_limit(sg_cpu, sg_policy);
372 454
373 if (sugov_should_update_freq(sg_policy, time)) { 455 if (sugov_should_update_freq(sg_policy, time)) {
374 next_f = sugov_next_freq_shared(sg_cpu, time); 456 next_f = sugov_next_freq_shared(sg_cpu, time);
375 sugov_update_commit(sg_policy, time, next_f); 457
458 if (sg_policy->policy->fast_switch_enabled)
459 sugov_fast_switch(sg_policy, time, next_f);
460 else
461 sugov_deferred_update(sg_policy, time, next_f);
376 } 462 }
377 463
378 raw_spin_unlock(&sg_policy->update_lock); 464 raw_spin_unlock(&sg_policy->update_lock);
@@ -381,13 +467,27 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
381static void sugov_work(struct kthread_work *work) 467static void sugov_work(struct kthread_work *work)
382{ 468{
383 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 469 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
470 unsigned int freq;
471 unsigned long flags;
472
473 /*
474 * Hold sg_policy->update_lock shortly to handle the case where:
475 * incase sg_policy->next_freq is read here, and then updated by
476 * sugov_deferred_update() just before work_in_progress is set to false
477 * here, we may miss queueing the new update.
478 *
479 * Note: If a work was queued after the update_lock is released,
480 * sugov_work() will just be called again by kthread_work code; and the
481 * request will be proceed before the sugov thread sleeps.
482 */
483 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
484 freq = sg_policy->next_freq;
485 sg_policy->work_in_progress = false;
486 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
384 487
385 mutex_lock(&sg_policy->work_lock); 488 mutex_lock(&sg_policy->work_lock);
386 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, 489 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
387 CPUFREQ_RELATION_L);
388 mutex_unlock(&sg_policy->work_lock); 490 mutex_unlock(&sg_policy->work_lock);
389
390 sg_policy->work_in_progress = false;
391} 491}
392 492
393static void sugov_irq_work(struct irq_work *irq_work) 493static void sugov_irq_work(struct irq_work *irq_work)
@@ -510,11 +610,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
510 } 610 }
511 611
512 sg_policy->thread = thread; 612 sg_policy->thread = thread;
513 613 kthread_bind_mask(thread, policy->related_cpus);
514 /* Kthread is bound to all CPUs by default */
515 if (!policy->dvfs_possible_from_any_cpu)
516 kthread_bind_mask(thread, policy->related_cpus);
517
518 init_irq_work(&sg_policy->irq_work, sugov_irq_work); 614 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
519 mutex_init(&sg_policy->work_lock); 615 mutex_init(&sg_policy->work_lock);
520 616
@@ -657,7 +753,7 @@ static int sugov_start(struct cpufreq_policy *policy)
657 753
658 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; 754 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
659 sg_policy->last_freq_update_time = 0; 755 sg_policy->last_freq_update_time = 0;
660 sg_policy->next_freq = UINT_MAX; 756 sg_policy->next_freq = 0;
661 sg_policy->work_in_progress = false; 757 sg_policy->work_in_progress = false;
662 sg_policy->need_freq_update = false; 758 sg_policy->need_freq_update = false;
663 sg_policy->cached_raw_freq = 0; 759 sg_policy->cached_raw_freq = 0;