aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 20:33:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 20:33:52 -0500
commit080e4168c0834ccc853c48259e16a5c556c7ecba (patch)
tree641a264718c6f1b8bf9525e4e4a073565044cd03 /drivers/base
parentbbe08c0a43e2c5ee3a00de68c0e867a08a9aa990 (diff)
parent9b5e9cb164ee93ae19c4c6593e8188a55481f78b (diff)
Merge tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management updates deom Rafael Wysocki: "These fix two bugs introduced by recent power management updates (in the cpuidle menu governor and intel_pstate) and a few other issues, clean up things and remove unused code. Specifics: - Fix for a cpuidle menu governor problem that started to take an unnecessary spinlock after one of the recent updates and that did not play well with the RT patch (Rafael Wysocki). - Fix for the new intel_pstate operation mode switching feature added recently that did not reinitialize P-state limits properly when switching operation modes (Rafael Wysocki). - Removal of unused global notifiers from the PM QoS framework (Viresh Kumar). - Generic power domains framework update to make it handle asynchronous invocations of PM callbacks in the "noirq" phases of system suspend/hibernation correctly (Ulf Hansson). - Two hibernation core cleanups (Rafael Wysocki). - intel_idle cleanup related to the sysfs interface (Len Brown). - Off-by-one bug fix in the OPP (Operating Performance Points) framework (Andrzej Hajda). - OPP framework's documentation fix (Viresh Kumar). - cpufreq qoriq driver cleanup (Tang Yuantian). - Fixes for typos in comments in the device runtime PM framework (Christophe Jaillet)" * tag 'pm-extra-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / OPP: Documentation: Fix opp-microvolt in examples intel_idle: stop exposing platform acronyms in sysfs cpufreq: intel_pstate: Fix limits issue with operation mode switching PM / hibernate: Define pr_fmt() and use pr_*() instead of printk() PM / hibernate: Untangle power_down() cpuidle: menu: Avoid taking spinlock for accessing QoS values PM / QoS: Remove global notifiers PM / runtime: Fix some typos cpufreq: qoriq: clean up unused code PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop PM / Domains: Power off masters immediately in the power off sequence PM / Domains: Rename is_async to one_dev_on for genpd_power_off() PM / Domains: Move genpd_power_off() above genpd_power_on()
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c178
-rw-r--r--drivers/base/power/opp/core.c3
-rw-r--r--drivers/base/power/qos.c53
3 files changed, 100 insertions, 134 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3a75fb1b4126..e697dec9d25b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -274,6 +274,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
274} 274}
275 275
276/** 276/**
277 * genpd_power_off - Remove power from a given PM domain.
278 * @genpd: PM domain to power down.
279 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
280 * RPM status of the releated device is in an intermediate state, not yet turned
281 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
282 * be RPM_SUSPENDED, while it tries to power off the PM domain.
283 *
284 * If all of the @genpd's devices have been suspended and all of its subdomains
285 * have been powered down, remove power from @genpd.
286 */
287static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
288 unsigned int depth)
289{
290 struct pm_domain_data *pdd;
291 struct gpd_link *link;
292 unsigned int not_suspended = 0;
293
294 /*
295 * Do not try to power off the domain in the following situations:
296 * (1) The domain is already in the "power off" state.
297 * (2) System suspend is in progress.
298 */
299 if (genpd->status == GPD_STATE_POWER_OFF
300 || genpd->prepared_count > 0)
301 return 0;
302
303 if (atomic_read(&genpd->sd_count) > 0)
304 return -EBUSY;
305
306 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
307 enum pm_qos_flags_status stat;
308
309 stat = dev_pm_qos_flags(pdd->dev,
310 PM_QOS_FLAG_NO_POWER_OFF
311 | PM_QOS_FLAG_REMOTE_WAKEUP);
312 if (stat > PM_QOS_FLAGS_NONE)
313 return -EBUSY;
314
315 /*
316 * Do not allow PM domain to be powered off, when an IRQ safe
317 * device is part of a non-IRQ safe domain.
318 */
319 if (!pm_runtime_suspended(pdd->dev) ||
320 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
321 not_suspended++;
322 }
323
324 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
325 return -EBUSY;
326
327 if (genpd->gov && genpd->gov->power_down_ok) {
328 if (!genpd->gov->power_down_ok(&genpd->domain))
329 return -EAGAIN;
330 }
331
332 if (genpd->power_off) {
333 int ret;
334
335 if (atomic_read(&genpd->sd_count) > 0)
336 return -EBUSY;
337
338 /*
339 * If sd_count > 0 at this point, one of the subdomains hasn't
340 * managed to call genpd_power_on() for the master yet after
341 * incrementing it. In that case genpd_power_on() will wait
342 * for us to drop the lock, so we can call .power_off() and let
343 * the genpd_power_on() restore power for us (this shouldn't
344 * happen very often).
345 */
346 ret = _genpd_power_off(genpd, true);
347 if (ret)
348 return ret;
349 }
350
351 genpd->status = GPD_STATE_POWER_OFF;
352
353 list_for_each_entry(link, &genpd->slave_links, slave_node) {
354 genpd_sd_counter_dec(link->master);
355 genpd_lock_nested(link->master, depth + 1);
356 genpd_power_off(link->master, false, depth + 1);
357 genpd_unlock(link->master);
358 }
359
360 return 0;
361}
362
363/**
277 * genpd_power_on - Restore power to a given PM domain and its masters. 364 * genpd_power_on - Restore power to a given PM domain and its masters.
278 * @genpd: PM domain to power up. 365 * @genpd: PM domain to power up.
279 * @depth: nesting count for lockdep. 366 * @depth: nesting count for lockdep.
@@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
321 &genpd->slave_links, 408 &genpd->slave_links,
322 slave_node) { 409 slave_node) {
323 genpd_sd_counter_dec(link->master); 410 genpd_sd_counter_dec(link->master);
324 genpd_queue_power_off_work(link->master); 411 genpd_lock_nested(link->master, depth + 1);
412 genpd_power_off(link->master, false, depth + 1);
413 genpd_unlock(link->master);
325 } 414 }
326 415
327 return ret; 416 return ret;
@@ -368,87 +457,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
368} 457}
369 458
370/** 459/**
371 * genpd_power_off - Remove power from a given PM domain.
372 * @genpd: PM domain to power down.
373 * @is_async: PM domain is powered down from a scheduled work
374 *
375 * If all of the @genpd's devices have been suspended and all of its subdomains
376 * have been powered down, remove power from @genpd.
377 */
378static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
379{
380 struct pm_domain_data *pdd;
381 struct gpd_link *link;
382 unsigned int not_suspended = 0;
383
384 /*
385 * Do not try to power off the domain in the following situations:
386 * (1) The domain is already in the "power off" state.
387 * (2) System suspend is in progress.
388 */
389 if (genpd->status == GPD_STATE_POWER_OFF
390 || genpd->prepared_count > 0)
391 return 0;
392
393 if (atomic_read(&genpd->sd_count) > 0)
394 return -EBUSY;
395
396 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
397 enum pm_qos_flags_status stat;
398
399 stat = dev_pm_qos_flags(pdd->dev,
400 PM_QOS_FLAG_NO_POWER_OFF
401 | PM_QOS_FLAG_REMOTE_WAKEUP);
402 if (stat > PM_QOS_FLAGS_NONE)
403 return -EBUSY;
404
405 /*
406 * Do not allow PM domain to be powered off, when an IRQ safe
407 * device is part of a non-IRQ safe domain.
408 */
409 if (!pm_runtime_suspended(pdd->dev) ||
410 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
411 not_suspended++;
412 }
413
414 if (not_suspended > 1 || (not_suspended == 1 && is_async))
415 return -EBUSY;
416
417 if (genpd->gov && genpd->gov->power_down_ok) {
418 if (!genpd->gov->power_down_ok(&genpd->domain))
419 return -EAGAIN;
420 }
421
422 if (genpd->power_off) {
423 int ret;
424
425 if (atomic_read(&genpd->sd_count) > 0)
426 return -EBUSY;
427
428 /*
429 * If sd_count > 0 at this point, one of the subdomains hasn't
430 * managed to call genpd_power_on() for the master yet after
431 * incrementing it. In that case genpd_power_on() will wait
432 * for us to drop the lock, so we can call .power_off() and let
433 * the genpd_power_on() restore power for us (this shouldn't
434 * happen very often).
435 */
436 ret = _genpd_power_off(genpd, true);
437 if (ret)
438 return ret;
439 }
440
441 genpd->status = GPD_STATE_POWER_OFF;
442
443 list_for_each_entry(link, &genpd->slave_links, slave_node) {
444 genpd_sd_counter_dec(link->master);
445 genpd_queue_power_off_work(link->master);
446 }
447
448 return 0;
449}
450
451/**
452 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 460 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
453 * @work: Work structure used for scheduling the execution of this function. 461 * @work: Work structure used for scheduling the execution of this function.
454 */ 462 */
@@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
459 genpd = container_of(work, struct generic_pm_domain, power_off_work); 467 genpd = container_of(work, struct generic_pm_domain, power_off_work);
460 468
461 genpd_lock(genpd); 469 genpd_lock(genpd);
462 genpd_power_off(genpd, true); 470 genpd_power_off(genpd, false, 0);
463 genpd_unlock(genpd); 471 genpd_unlock(genpd);
464} 472}
465 473
@@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
578 return 0; 586 return 0;
579 587
580 genpd_lock(genpd); 588 genpd_lock(genpd);
581 genpd_power_off(genpd, false); 589 genpd_power_off(genpd, true, 0);
582 genpd_unlock(genpd); 590 genpd_unlock(genpd);
583 591
584 return 0; 592 return 0;
@@ -658,7 +666,7 @@ err_poweroff:
658 if (!pm_runtime_is_irq_safe(dev) || 666 if (!pm_runtime_is_irq_safe(dev) ||
659 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { 667 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
660 genpd_lock(genpd); 668 genpd_lock(genpd);
661 genpd_power_off(genpd, 0); 669 genpd_power_off(genpd, true, 0);
662 genpd_unlock(genpd); 670 genpd_unlock(genpd);
663 } 671 }
664 672
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 91ec3232d630..dae61720b314 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
231 * The caller needs to ensure that opp_table (and hence the regulator) 231 * The caller needs to ensure that opp_table (and hence the regulator)
232 * isn't freed, while we are executing this routine. 232 * isn't freed, while we are executing this routine.
233 */ 233 */
234 for (i = 0; reg = regulators[i], i < count; i++) { 234 for (i = 0; i < count; i++) {
235 reg = regulators[i];
235 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 236 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
236 if (ret > 0) 237 if (ret > 0)
237 latency_ns += ret * 1000; 238 latency_ns += ret * 1000;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index d888d9869b6a..f850daeffba4 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -17,12 +17,9 @@
17 * 17 *
18 * This QoS design is best effort based. Dependents register their QoS needs. 18 * This QoS design is best effort based. Dependents register their QoS needs.
19 * Watchers register to keep track of the current QoS needs of the system. 19 * Watchers register to keep track of the current QoS needs of the system.
20 * Watchers can register different types of notification callbacks: 20 * Watchers can register a per-device notification callback using the
21 * . a per-device notification callback using the dev_pm_qos_*_notifier API. 21 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
22 * The notification chain data is stored in the per-device constraint 22 * per-device constraint data struct.
23 * data struct.
24 * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
25 * API. The notification chain data is stored in a static variable.
26 * 23 *
27 * Note about the per-device constraint data struct allocation: 24 * Note about the per-device constraint data struct allocation:
28 * . The per-device constraints data struct ptr is tored into the device 25 * . The per-device constraints data struct ptr is tored into the device
@@ -49,8 +46,6 @@
49static DEFINE_MUTEX(dev_pm_qos_mtx); 46static DEFINE_MUTEX(dev_pm_qos_mtx);
50static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); 47static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
51 48
52static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
53
54/** 49/**
55 * __dev_pm_qos_flags - Check PM QoS flags for a given device. 50 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
56 * @dev: Device to check the PM QoS flags for. 51 * @dev: Device to check the PM QoS flags for.
@@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev)
108{ 103{
109 lockdep_assert_held(&dev->power.lock); 104 lockdep_assert_held(&dev->power.lock);
110 105
111 return IS_ERR_OR_NULL(dev->power.qos) ? 106 return dev_pm_qos_raw_read_value(dev);
112 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
113} 107}
114 108
115/** 109/**
@@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
135 * @value: Value to assign to the QoS request. 129 * @value: Value to assign to the QoS request.
136 * 130 *
137 * Internal function to update the constraints list using the PM QoS core 131 * Internal function to update the constraints list using the PM QoS core
138 * code and if needed call the per-device and the global notification 132 * code and if needed call the per-device callbacks.
139 * callbacks
140 */ 133 */
141static int apply_constraint(struct dev_pm_qos_request *req, 134static int apply_constraint(struct dev_pm_qos_request *req,
142 enum pm_qos_req_action action, s32 value) 135 enum pm_qos_req_action action, s32 value)
@@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
148 case DEV_PM_QOS_RESUME_LATENCY: 141 case DEV_PM_QOS_RESUME_LATENCY:
149 ret = pm_qos_update_target(&qos->resume_latency, 142 ret = pm_qos_update_target(&qos->resume_latency,
150 &req->data.pnode, action, value); 143 &req->data.pnode, action, value);
151 if (ret) {
152 value = pm_qos_read_value(&qos->resume_latency);
153 blocking_notifier_call_chain(&dev_pm_notifiers,
154 (unsigned long)value,
155 req);
156 }
157 break; 144 break;
158 case DEV_PM_QOS_LATENCY_TOLERANCE: 145 case DEV_PM_QOS_LATENCY_TOLERANCE:
159 ret = pm_qos_update_target(&qos->latency_tolerance, 146 ret = pm_qos_update_target(&qos->latency_tolerance,
@@ -536,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
536EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); 523EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
537 524
538/** 525/**
539 * dev_pm_qos_add_global_notifier - sets notification entry for changes to
540 * target value of the PM QoS constraints for any device
541 *
542 * @notifier: notifier block managed by caller.
543 *
544 * Will register the notifier into a notification chain that gets called
545 * upon changes to the target value for any device.
546 */
547int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
548{
549 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
550}
551EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
552
553/**
554 * dev_pm_qos_remove_global_notifier - deletes notification for changes to
555 * target value of PM QoS constraints for any device
556 *
557 * @notifier: notifier block to be removed.
558 *
559 * Will remove the notifier from the notification chain that gets called
560 * upon changes to the target value for any device.
561 */
562int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
563{
564 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
565}
566EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
567
568/**
569 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. 526 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
570 * @dev: Device whose ancestor to add the request for. 527 * @dev: Device whose ancestor to add the request for.
571 * @req: Pointer to the preallocated handle. 528 * @req: Pointer to the preallocated handle.