aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/runtime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r--drivers/base/power/runtime.c186
1 files changed, 179 insertions, 7 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 5bd4daa93ef1..cd4e100a1362 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/pm_runtime.h> 11#include <linux/pm_runtime.h>
12#include <linux/jiffies.h>
13#include "power.h" 12#include "power.h"
14 13
15static int rpm_resume(struct device *dev, int rpmflags); 14static int rpm_resume(struct device *dev, int rpmflags);
@@ -79,6 +78,53 @@ static void pm_runtime_cancel_pending(struct device *dev)
79 dev->power.request = RPM_REQ_NONE; 78 dev->power.request = RPM_REQ_NONE;
80} 79}
81 80
81/*
82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83 * @dev: Device to handle.
84 *
85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 *
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
92 */
93unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94{
95 int autosuspend_delay;
96 long elapsed;
97 unsigned long last_busy;
98 unsigned long expires = 0;
99
100 if (!dev->power.use_autosuspend)
101 goto out;
102
103 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104 if (autosuspend_delay < 0)
105 goto out;
106
107 last_busy = ACCESS_ONCE(dev->power.last_busy);
108 elapsed = jiffies - last_busy;
109 if (elapsed < 0)
110 goto out; /* jiffies has wrapped around. */
111
112 /*
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
115 */
116 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117 if (autosuspend_delay >= 1000)
118 expires = round_jiffies(expires);
119 expires += !expires;
120 if (elapsed >= expires - last_busy)
121 expires = 0; /* Already expired. */
122
123 out:
124 return expires;
125}
126EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127
82/** 128/**
83 * rpm_check_suspend_allowed - Test whether a device may be suspended. 129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
84 * @dev: Device to test. 130 * @dev: Device to test.
@@ -234,6 +280,32 @@ static int rpm_suspend(struct device *dev, int rpmflags)
234 if (retval) 280 if (retval)
235 goto out; 281 goto out;
236 282
283 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
284 if ((rpmflags & RPM_AUTO)
285 && dev->power.runtime_status != RPM_SUSPENDING) {
286 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
287
288 if (expires != 0) {
289 /* Pending requests need to be canceled. */
290 dev->power.request = RPM_REQ_NONE;
291
292 /*
293 * Optimization: If the timer is already running and is
294 * set to expire at or before the autosuspend delay,
295 * avoid the overhead of resetting it. Just let it
296 * expire; pm_suspend_timer_fn() will take care of the
297 * rest.
298 */
299 if (!(dev->power.timer_expires && time_before_eq(
300 dev->power.timer_expires, expires))) {
301 dev->power.timer_expires = expires;
302 mod_timer(&dev->power.suspend_timer, expires);
303 }
304 dev->power.timer_autosuspends = 1;
305 goto out;
306 }
307 }
308
237 /* Other scheduled or pending requests need to be canceled. */ 309 /* Other scheduled or pending requests need to be canceled. */
238 pm_runtime_cancel_pending(dev); 310 pm_runtime_cancel_pending(dev);
239 311
@@ -268,7 +340,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
268 340
269 /* Carry out an asynchronous or a synchronous suspend. */ 341 /* Carry out an asynchronous or a synchronous suspend. */
270 if (rpmflags & RPM_ASYNC) { 342 if (rpmflags & RPM_ASYNC) {
271 dev->power.request = RPM_REQ_SUSPEND; 343 dev->power.request = (rpmflags & RPM_AUTO) ?
344 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
272 if (!dev->power.request_pending) { 345 if (!dev->power.request_pending) {
273 dev->power.request_pending = true; 346 dev->power.request_pending = true;
274 queue_work(pm_wq, &dev->power.work); 347 queue_work(pm_wq, &dev->power.work);
@@ -383,8 +456,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
383 if (retval) 456 if (retval)
384 goto out; 457 goto out;
385 458
386 /* Other scheduled or pending requests need to be canceled. */ 459 /*
387 pm_runtime_cancel_pending(dev); 460 * Other scheduled or pending requests need to be canceled. Small
461 * optimization: If an autosuspend timer is running, leave it running
462 * rather than cancelling it now only to restart it again in the near
463 * future.
464 */
465 dev->power.request = RPM_REQ_NONE;
466 if (!dev->power.timer_autosuspends)
467 pm_runtime_deactivate_timer(dev);
388 468
389 if (dev->power.runtime_status == RPM_ACTIVE) { 469 if (dev->power.runtime_status == RPM_ACTIVE) {
390 retval = 1; 470 retval = 1;
@@ -568,6 +648,9 @@ static void pm_runtime_work(struct work_struct *work)
568 case RPM_REQ_SUSPEND: 648 case RPM_REQ_SUSPEND:
569 rpm_suspend(dev, RPM_NOWAIT); 649 rpm_suspend(dev, RPM_NOWAIT);
570 break; 650 break;
651 case RPM_REQ_AUTOSUSPEND:
652 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
653 break;
571 case RPM_REQ_RESUME: 654 case RPM_REQ_RESUME:
572 rpm_resume(dev, RPM_NOWAIT); 655 rpm_resume(dev, RPM_NOWAIT);
573 break; 656 break;
@@ -595,7 +678,8 @@ static void pm_suspend_timer_fn(unsigned long data)
595 /* If 'expire' is after 'jiffies' we've been called too early. */ 678 /* If 'expire' is after 'jiffies' we've been called too early. */
596 if (expires > 0 && !time_after(expires, jiffies)) { 679 if (expires > 0 && !time_after(expires, jiffies)) {
597 dev->power.timer_expires = 0; 680 dev->power.timer_expires = 0;
598 rpm_suspend(dev, RPM_ASYNC); 681 rpm_suspend(dev, dev->power.timer_autosuspends ?
682 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
599 } 683 }
600 684
601 spin_unlock_irqrestore(&dev->power.lock, flags); 685 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -627,6 +711,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
627 711
628 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 712 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
629 dev->power.timer_expires += !dev->power.timer_expires; 713 dev->power.timer_expires += !dev->power.timer_expires;
714 dev->power.timer_autosuspends = 0;
630 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 715 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
631 716
632 out: 717 out:
@@ -670,7 +755,9 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
670 * @dev: Device to suspend. 755 * @dev: Device to suspend.
671 * @rpmflags: Flag bits. 756 * @rpmflags: Flag bits.
672 * 757 *
673 * Carry out a suspend, either synchronous or asynchronous. 758 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
759 * return immediately if it is larger than zero. Then carry out a suspend,
760 * either synchronous or asynchronous.
674 * 761 *
675 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 762 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
676 */ 763 */
@@ -679,6 +766,11 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
679 unsigned long flags; 766 unsigned long flags;
680 int retval; 767 int retval;
681 768
769 if (rpmflags & RPM_GET_PUT) {
770 if (!atomic_dec_and_test(&dev->power.usage_count))
771 return 0;
772 }
773
682 spin_lock_irqsave(&dev->power.lock, flags); 774 spin_lock_irqsave(&dev->power.lock, flags);
683 retval = rpm_suspend(dev, rpmflags); 775 retval = rpm_suspend(dev, rpmflags);
684 spin_unlock_irqrestore(&dev->power.lock, flags); 776 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -980,7 +1072,7 @@ void pm_runtime_allow(struct device *dev)
980 1072
981 dev->power.runtime_auto = true; 1073 dev->power.runtime_auto = true;
982 if (atomic_dec_and_test(&dev->power.usage_count)) 1074 if (atomic_dec_and_test(&dev->power.usage_count))
983 rpm_idle(dev, 0); 1075 rpm_idle(dev, RPM_AUTO);
984 1076
985 out: 1077 out:
986 spin_unlock_irq(&dev->power.lock); 1078 spin_unlock_irq(&dev->power.lock);
@@ -1007,6 +1099,86 @@ void pm_runtime_no_callbacks(struct device *dev)
1007EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1099EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1008 1100
1009/** 1101/**
1102 * update_autosuspend - Handle a change to a device's autosuspend settings.
1103 * @dev: Device to handle.
1104 * @old_delay: The former autosuspend_delay value.
1105 * @old_use: The former use_autosuspend value.
1106 *
1107 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1108 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1109 *
1110 * This function must be called under dev->power.lock with interrupts disabled.
1111 */
1112static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1113{
1114 int delay = dev->power.autosuspend_delay;
1115
1116 /* Should runtime suspend be prevented now? */
1117 if (dev->power.use_autosuspend && delay < 0) {
1118
1119 /* If it used to be allowed then prevent it. */
1120 if (!old_use || old_delay >= 0) {
1121 atomic_inc(&dev->power.usage_count);
1122 rpm_resume(dev, 0);
1123 }
1124 }
1125
1126 /* Runtime suspend should be allowed now. */
1127 else {
1128
1129 /* If it used to be prevented then allow it. */
1130 if (old_use && old_delay < 0)
1131 atomic_dec(&dev->power.usage_count);
1132
1133 /* Maybe we can autosuspend now. */
1134 rpm_idle(dev, RPM_AUTO);
1135 }
1136}
1137
1138/**
1139 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1140 * @dev: Device to handle.
1141 * @delay: Value of the new delay in milliseconds.
1142 *
1143 * Set the device's power.autosuspend_delay value. If it changes to negative
1144 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1145 * changes the other way, allow run-time suspends.
1146 */
1147void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1148{
1149 int old_delay, old_use;
1150
1151 spin_lock_irq(&dev->power.lock);
1152 old_delay = dev->power.autosuspend_delay;
1153 old_use = dev->power.use_autosuspend;
1154 dev->power.autosuspend_delay = delay;
1155 update_autosuspend(dev, old_delay, old_use);
1156 spin_unlock_irq(&dev->power.lock);
1157}
1158EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1159
1160/**
1161 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1162 * @dev: Device to handle.
1163 * @use: New value for use_autosuspend.
1164 *
1165 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1166 * suspends as needed.
1167 */
1168void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1169{
1170 int old_delay, old_use;
1171
1172 spin_lock_irq(&dev->power.lock);
1173 old_delay = dev->power.autosuspend_delay;
1174 old_use = dev->power.use_autosuspend;
1175 dev->power.use_autosuspend = use;
1176 update_autosuspend(dev, old_delay, old_use);
1177 spin_unlock_irq(&dev->power.lock);
1178}
1179EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1180
1181/**
1010 * pm_runtime_init - Initialize run-time PM fields in given device object. 1182 * pm_runtime_init - Initialize run-time PM fields in given device object.
1011 * @dev: Device object to initialize. 1183 * @dev: Device object to initialize.
1012 */ 1184 */