diff options
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r-- | drivers/base/power/runtime.c | 998 |
1 files changed, 549 insertions, 449 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa73..0d4587b15c55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -2,17 +2,55 @@ | |||
2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM | 2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
5 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> | ||
5 | * | 6 | * |
6 | * This file is released under the GPLv2. | 7 | * This file is released under the GPLv2. |
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
10 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
11 | #include <linux/jiffies.h> | 12 | #include "power.h" |
12 | 13 | ||
13 | static int __pm_runtime_resume(struct device *dev, bool from_wq); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
14 | static int __pm_request_idle(struct device *dev); | 15 | static int rpm_suspend(struct device *dev, int rpmflags); |
15 | static int __pm_request_resume(struct device *dev); | 16 | |
17 | /** | ||
18 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
19 | * @dev: Device to update the accounting for | ||
20 | * | ||
21 | * In order to be able to have time accounting of the various power states | ||
22 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
23 | * PM), we need to track the time spent in each state. | ||
24 | * update_pm_runtime_accounting must be called each time before the | ||
25 | * runtime_status field is updated, to account the time in the old state | ||
26 | * correctly. | ||
27 | */ | ||
28 | void update_pm_runtime_accounting(struct device *dev) | ||
29 | { | ||
30 | unsigned long now = jiffies; | ||
31 | int delta; | ||
32 | |||
33 | delta = now - dev->power.accounting_timestamp; | ||
34 | |||
35 | if (delta < 0) | ||
36 | delta = 0; | ||
37 | |||
38 | dev->power.accounting_timestamp = now; | ||
39 | |||
40 | if (dev->power.disable_depth > 0) | ||
41 | return; | ||
42 | |||
43 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
44 | dev->power.suspended_jiffies += delta; | ||
45 | else | ||
46 | dev->power.active_jiffies += delta; | ||
47 | } | ||
48 | |||
49 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
50 | { | ||
51 | update_pm_runtime_accounting(dev); | ||
52 | dev->power.runtime_status = status; | ||
53 | } | ||
16 | 54 | ||
17 | /** | 55 | /** |
18 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. | 56 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
@@ -40,62 +78,156 @@ static void pm_runtime_cancel_pending(struct device *dev) | |||
40 | dev->power.request = RPM_REQ_NONE; | 78 | dev->power.request = RPM_REQ_NONE; |
41 | } | 79 | } |
42 | 80 | ||
43 | /** | 81 | /* |
44 | * __pm_runtime_idle - Notify device bus type if the device can be suspended. | 82 | * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. |
45 | * @dev: Device to notify the bus type about. | 83 | * @dev: Device to handle. |
46 | * | 84 | * |
47 | * This function must be called under dev->power.lock with interrupts disabled. | 85 | * Compute the autosuspend-delay expiration time based on the device's |
86 | * power.last_busy time. If the delay has already expired or is disabled | ||
87 | * (negative) or the power.use_autosuspend flag isn't set, return 0. | ||
88 | * Otherwise return the expiration time in jiffies (adjusted to be nonzero). | ||
89 | * | ||
90 | * This function may be called either with or without dev->power.lock held. | ||
91 | * Either way it can be racy, since power.last_busy may be updated at any time. | ||
48 | */ | 92 | */ |
49 | static int __pm_runtime_idle(struct device *dev) | 93 | unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
50 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 94 | { |
95 | int autosuspend_delay; | ||
96 | long elapsed; | ||
97 | unsigned long last_busy; | ||
98 | unsigned long expires = 0; | ||
99 | |||
100 | if (!dev->power.use_autosuspend) | ||
101 | goto out; | ||
102 | |||
103 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | ||
104 | if (autosuspend_delay < 0) | ||
105 | goto out; | ||
106 | |||
107 | last_busy = ACCESS_ONCE(dev->power.last_busy); | ||
108 | elapsed = jiffies - last_busy; | ||
109 | if (elapsed < 0) | ||
110 | goto out; /* jiffies has wrapped around. */ | ||
111 | |||
112 | /* | ||
113 | * If the autosuspend_delay is >= 1 second, align the timer by rounding | ||
114 | * up to the nearest second. | ||
115 | */ | ||
116 | expires = last_busy + msecs_to_jiffies(autosuspend_delay); | ||
117 | if (autosuspend_delay >= 1000) | ||
118 | expires = round_jiffies(expires); | ||
119 | expires += !expires; | ||
120 | if (elapsed >= expires - last_busy) | ||
121 | expires = 0; /* Already expired. */ | ||
122 | |||
123 | out: | ||
124 | return expires; | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | ||
127 | |||
128 | /** | ||
129 | * rpm_check_suspend_allowed - Test whether a device may be suspended. | ||
130 | * @dev: Device to test. | ||
131 | */ | ||
132 | static int rpm_check_suspend_allowed(struct device *dev) | ||
51 | { | 133 | { |
52 | int retval = 0; | 134 | int retval = 0; |
53 | 135 | ||
54 | if (dev->power.runtime_error) | 136 | if (dev->power.runtime_error) |
55 | retval = -EINVAL; | 137 | retval = -EINVAL; |
56 | else if (dev->power.idle_notification) | ||
57 | retval = -EINPROGRESS; | ||
58 | else if (atomic_read(&dev->power.usage_count) > 0 | 138 | else if (atomic_read(&dev->power.usage_count) > 0 |
59 | || dev->power.disable_depth > 0 | 139 | || dev->power.disable_depth > 0) |
60 | || dev->power.runtime_status != RPM_ACTIVE) | ||
61 | retval = -EAGAIN; | 140 | retval = -EAGAIN; |
62 | else if (!pm_children_suspended(dev)) | 141 | else if (!pm_children_suspended(dev)) |
63 | retval = -EBUSY; | 142 | retval = -EBUSY; |
143 | |||
144 | /* Pending resume requests take precedence over suspends. */ | ||
145 | else if ((dev->power.deferred_resume | ||
146 | && dev->power.runtime_status == RPM_SUSPENDING) | ||
147 | || (dev->power.request_pending | ||
148 | && dev->power.request == RPM_REQ_RESUME)) | ||
149 | retval = -EAGAIN; | ||
150 | else if (dev->power.runtime_status == RPM_SUSPENDED) | ||
151 | retval = 1; | ||
152 | |||
153 | return retval; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * rpm_idle - Notify device bus type if the device can be suspended. | ||
158 | * @dev: Device to notify the bus type about. | ||
159 | * @rpmflags: Flag bits. | ||
160 | * | ||
161 | * Check if the device's run-time PM status allows it to be suspended. If | ||
162 | * another idle notification has been started earlier, return immediately. If | ||
163 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise | ||
164 | * run the ->runtime_idle() callback directly. | ||
165 | * | ||
166 | * This function must be called under dev->power.lock with interrupts disabled. | ||
167 | */ | ||
168 | static int rpm_idle(struct device *dev, int rpmflags) | ||
169 | { | ||
170 | int (*callback)(struct device *); | ||
171 | int retval; | ||
172 | |||
173 | retval = rpm_check_suspend_allowed(dev); | ||
174 | if (retval < 0) | ||
175 | ; /* Conditions are wrong. */ | ||
176 | |||
177 | /* Idle notifications are allowed only in the RPM_ACTIVE state. */ | ||
178 | else if (dev->power.runtime_status != RPM_ACTIVE) | ||
179 | retval = -EAGAIN; | ||
180 | |||
181 | /* | ||
182 | * Any pending request other than an idle notification takes | ||
183 | * precedence over us, except that the timer may be running. | ||
184 | */ | ||
185 | else if (dev->power.request_pending && | ||
186 | dev->power.request > RPM_REQ_IDLE) | ||
187 | retval = -EAGAIN; | ||
188 | |||
189 | /* Act as though RPM_NOWAIT is always set. */ | ||
190 | else if (dev->power.idle_notification) | ||
191 | retval = -EINPROGRESS; | ||
64 | if (retval) | 192 | if (retval) |
65 | goto out; | 193 | goto out; |
66 | 194 | ||
67 | if (dev->power.request_pending) { | 195 | /* Pending requests need to be canceled. */ |
68 | /* | 196 | dev->power.request = RPM_REQ_NONE; |
69 | * If an idle notification request is pending, cancel it. Any | 197 | |
70 | * other pending request takes precedence over us. | 198 | if (dev->power.no_callbacks) { |
71 | */ | 199 | /* Assume ->runtime_idle() callback would have suspended. */ |
72 | if (dev->power.request == RPM_REQ_IDLE) { | 200 | retval = rpm_suspend(dev, rpmflags); |
73 | dev->power.request = RPM_REQ_NONE; | 201 | goto out; |
74 | } else if (dev->power.request != RPM_REQ_NONE) { | 202 | } |
75 | retval = -EAGAIN; | 203 | |
76 | goto out; | 204 | /* Carry out an asynchronous or a synchronous idle notification. */ |
205 | if (rpmflags & RPM_ASYNC) { | ||
206 | dev->power.request = RPM_REQ_IDLE; | ||
207 | if (!dev->power.request_pending) { | ||
208 | dev->power.request_pending = true; | ||
209 | queue_work(pm_wq, &dev->power.work); | ||
77 | } | 210 | } |
211 | goto out; | ||
78 | } | 212 | } |
79 | 213 | ||
80 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
81 | 215 | ||
82 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { | 216 | if (dev->pwr_domain) |
83 | spin_unlock_irq(&dev->power.lock); | 217 | callback = dev->pwr_domain->ops.runtime_idle; |
84 | 218 | else if (dev->type && dev->type->pm) | |
85 | dev->bus->pm->runtime_idle(dev); | 219 | callback = dev->type->pm->runtime_idle; |
86 | 220 | else if (dev->class && dev->class->pm) | |
87 | spin_lock_irq(&dev->power.lock); | 221 | callback = dev->class->pm->runtime_idle; |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | 222 | else if (dev->bus && dev->bus->pm) |
89 | spin_unlock_irq(&dev->power.lock); | 223 | callback = dev->bus->pm->runtime_idle; |
90 | 224 | else | |
91 | dev->type->pm->runtime_idle(dev); | 225 | callback = NULL; |
92 | 226 | ||
93 | spin_lock_irq(&dev->power.lock); | 227 | if (callback) { |
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | 228 | spin_unlock_irq(&dev->power.lock); |
97 | 229 | ||
98 | dev->class->pm->runtime_idle(dev); | 230 | callback(dev); |
99 | 231 | ||
100 | spin_lock_irq(&dev->power.lock); | 232 | spin_lock_irq(&dev->power.lock); |
101 | } | 233 | } |
@@ -108,113 +240,102 @@ static int __pm_runtime_idle(struct device *dev) | |||
108 | } | 240 | } |
109 | 241 | ||
110 | /** | 242 | /** |
111 | * pm_runtime_idle - Notify device bus type if the device can be suspended. | 243 | * rpm_callback - Run a given runtime PM callback for a given device. |
112 | * @dev: Device to notify the bus type about. | 244 | * @cb: Runtime PM callback to run. |
245 | * @dev: Device to run the callback for. | ||
113 | */ | 246 | */ |
114 | int pm_runtime_idle(struct device *dev) | 247 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
248 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
115 | { | 249 | { |
116 | int retval; | 250 | int retval; |
117 | 251 | ||
118 | spin_lock_irq(&dev->power.lock); | 252 | if (!cb) |
119 | retval = __pm_runtime_idle(dev); | 253 | return -ENOSYS; |
120 | spin_unlock_irq(&dev->power.lock); | ||
121 | |||
122 | return retval; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | ||
125 | |||
126 | |||
127 | /** | ||
128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
129 | * @dev: Device to update the accounting for | ||
130 | * | ||
131 | * In order to be able to have time accounting of the various power states | ||
132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
133 | * PM), we need to track the time spent in each state. | ||
134 | * update_pm_runtime_accounting must be called each time before the | ||
135 | * runtime_status field is updated, to account the time in the old state | ||
136 | * correctly. | ||
137 | */ | ||
138 | void update_pm_runtime_accounting(struct device *dev) | ||
139 | { | ||
140 | unsigned long now = jiffies; | ||
141 | int delta; | ||
142 | |||
143 | delta = now - dev->power.accounting_timestamp; | ||
144 | |||
145 | if (delta < 0) | ||
146 | delta = 0; | ||
147 | 254 | ||
148 | dev->power.accounting_timestamp = now; | 255 | if (dev->power.irq_safe) { |
149 | 256 | retval = cb(dev); | |
150 | if (dev->power.disable_depth > 0) | 257 | } else { |
151 | return; | 258 | spin_unlock_irq(&dev->power.lock); |
152 | 259 | ||
153 | if (dev->power.runtime_status == RPM_SUSPENDED) | 260 | retval = cb(dev); |
154 | dev->power.suspended_jiffies += delta; | ||
155 | else | ||
156 | dev->power.active_jiffies += delta; | ||
157 | } | ||
158 | 261 | ||
159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | 262 | spin_lock_irq(&dev->power.lock); |
160 | { | 263 | } |
161 | update_pm_runtime_accounting(dev); | 264 | dev->power.runtime_error = retval; |
162 | dev->power.runtime_status = status; | 265 | return retval; |
163 | } | 266 | } |
164 | 267 | ||
165 | /** | 268 | /** |
166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 269 | * rpm_suspend - Carry out run-time suspend of given device. |
167 | * @dev: Device to suspend. | 270 | * @dev: Device to suspend. |
168 | * @from_wq: If set, the function has been called via pm_wq. | 271 | * @rpmflags: Flag bits. |
169 | * | 272 | * |
170 | * Check if the device can be suspended and run the ->runtime_suspend() callback | 273 | * Check if the device's run-time PM status allows it to be suspended. If |
171 | * provided by its bus type. If another suspend has been started earlier, wait | 274 | * another suspend has been started earlier, either return immediately or wait |
172 | * for it to finish. If an idle notification or suspend request is pending or | 275 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a |
173 | * scheduled, cancel it. | 276 | * pending idle notification. If the RPM_ASYNC flag is set then queue a |
277 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | ||
278 | * If a deferred resume was requested while the callback was running then carry | ||
279 | * it out; otherwise send an idle notification for the device (if the suspend | ||
280 | * failed) or for its parent (if the suspend succeeded). | ||
174 | * | 281 | * |
175 | * This function must be called under dev->power.lock with interrupts disabled. | 282 | * This function must be called under dev->power.lock with interrupts disabled. |
176 | */ | 283 | */ |
177 | int __pm_runtime_suspend(struct device *dev, bool from_wq) | 284 | static int rpm_suspend(struct device *dev, int rpmflags) |
178 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 285 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
179 | { | 286 | { |
287 | int (*callback)(struct device *); | ||
180 | struct device *parent = NULL; | 288 | struct device *parent = NULL; |
181 | bool notify = false; | 289 | int retval; |
182 | int retval = 0; | ||
183 | 290 | ||
184 | dev_dbg(dev, "__pm_runtime_suspend()%s!\n", | 291 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
185 | from_wq ? " from workqueue" : ""); | ||
186 | 292 | ||
187 | repeat: | 293 | repeat: |
188 | if (dev->power.runtime_error) { | 294 | retval = rpm_check_suspend_allowed(dev); |
189 | retval = -EINVAL; | ||
190 | goto out; | ||
191 | } | ||
192 | 295 | ||
193 | /* Pending resume requests take precedence over us. */ | 296 | if (retval < 0) |
194 | if (dev->power.request_pending | 297 | ; /* Conditions are wrong. */ |
195 | && dev->power.request == RPM_REQ_RESUME) { | 298 | |
299 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ | ||
300 | else if (dev->power.runtime_status == RPM_RESUMING && | ||
301 | !(rpmflags & RPM_ASYNC)) | ||
196 | retval = -EAGAIN; | 302 | retval = -EAGAIN; |
303 | if (retval) | ||
197 | goto out; | 304 | goto out; |
305 | |||
306 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ | ||
307 | if ((rpmflags & RPM_AUTO) | ||
308 | && dev->power.runtime_status != RPM_SUSPENDING) { | ||
309 | unsigned long expires = pm_runtime_autosuspend_expiration(dev); | ||
310 | |||
311 | if (expires != 0) { | ||
312 | /* Pending requests need to be canceled. */ | ||
313 | dev->power.request = RPM_REQ_NONE; | ||
314 | |||
315 | /* | ||
316 | * Optimization: If the timer is already running and is | ||
317 | * set to expire at or before the autosuspend delay, | ||
318 | * avoid the overhead of resetting it. Just let it | ||
319 | * expire; pm_suspend_timer_fn() will take care of the | ||
320 | * rest. | ||
321 | */ | ||
322 | if (!(dev->power.timer_expires && time_before_eq( | ||
323 | dev->power.timer_expires, expires))) { | ||
324 | dev->power.timer_expires = expires; | ||
325 | mod_timer(&dev->power.suspend_timer, expires); | ||
326 | } | ||
327 | dev->power.timer_autosuspends = 1; | ||
328 | goto out; | ||
329 | } | ||
198 | } | 330 | } |
199 | 331 | ||
200 | /* Other scheduled or pending requests need to be canceled. */ | 332 | /* Other scheduled or pending requests need to be canceled. */ |
201 | pm_runtime_cancel_pending(dev); | 333 | pm_runtime_cancel_pending(dev); |
202 | 334 | ||
203 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
204 | retval = 1; | ||
205 | else if (dev->power.runtime_status == RPM_RESUMING | ||
206 | || dev->power.disable_depth > 0 | ||
207 | || atomic_read(&dev->power.usage_count) > 0) | ||
208 | retval = -EAGAIN; | ||
209 | else if (!pm_children_suspended(dev)) | ||
210 | retval = -EBUSY; | ||
211 | if (retval) | ||
212 | goto out; | ||
213 | |||
214 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 335 | if (dev->power.runtime_status == RPM_SUSPENDING) { |
215 | DEFINE_WAIT(wait); | 336 | DEFINE_WAIT(wait); |
216 | 337 | ||
217 | if (from_wq) { | 338 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
218 | retval = -EINPROGRESS; | 339 | retval = -EINPROGRESS; |
219 | goto out; | 340 | goto out; |
220 | } | 341 | } |
@@ -236,46 +357,44 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
236 | goto repeat; | 357 | goto repeat; |
237 | } | 358 | } |
238 | 359 | ||
239 | __update_runtime_status(dev, RPM_SUSPENDING); | ||
240 | dev->power.deferred_resume = false; | 360 | dev->power.deferred_resume = false; |
361 | if (dev->power.no_callbacks) | ||
362 | goto no_callback; /* Assume success. */ | ||
363 | |||
364 | /* Carry out an asynchronous or a synchronous suspend. */ | ||
365 | if (rpmflags & RPM_ASYNC) { | ||
366 | dev->power.request = (rpmflags & RPM_AUTO) ? | ||
367 | RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; | ||
368 | if (!dev->power.request_pending) { | ||
369 | dev->power.request_pending = true; | ||
370 | queue_work(pm_wq, &dev->power.work); | ||
371 | } | ||
372 | goto out; | ||
373 | } | ||
241 | 374 | ||
242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 375 | __update_runtime_status(dev, RPM_SUSPENDING); |
243 | spin_unlock_irq(&dev->power.lock); | ||
244 | |||
245 | retval = dev->bus->pm->runtime_suspend(dev); | ||
246 | |||
247 | spin_lock_irq(&dev->power.lock); | ||
248 | dev->power.runtime_error = retval; | ||
249 | } else if (dev->type && dev->type->pm | ||
250 | && dev->type->pm->runtime_suspend) { | ||
251 | spin_unlock_irq(&dev->power.lock); | ||
252 | |||
253 | retval = dev->type->pm->runtime_suspend(dev); | ||
254 | |||
255 | spin_lock_irq(&dev->power.lock); | ||
256 | dev->power.runtime_error = retval; | ||
257 | } else if (dev->class && dev->class->pm | ||
258 | && dev->class->pm->runtime_suspend) { | ||
259 | spin_unlock_irq(&dev->power.lock); | ||
260 | |||
261 | retval = dev->class->pm->runtime_suspend(dev); | ||
262 | 376 | ||
263 | spin_lock_irq(&dev->power.lock); | 377 | if (dev->pwr_domain) |
264 | dev->power.runtime_error = retval; | 378 | callback = dev->pwr_domain->ops.runtime_suspend; |
265 | } else { | 379 | else if (dev->type && dev->type->pm) |
266 | retval = -ENOSYS; | 380 | callback = dev->type->pm->runtime_suspend; |
267 | } | 381 | else if (dev->class && dev->class->pm) |
382 | callback = dev->class->pm->runtime_suspend; | ||
383 | else if (dev->bus && dev->bus->pm) | ||
384 | callback = dev->bus->pm->runtime_suspend; | ||
385 | else | ||
386 | callback = NULL; | ||
268 | 387 | ||
388 | retval = rpm_callback(callback, dev); | ||
269 | if (retval) { | 389 | if (retval) { |
270 | __update_runtime_status(dev, RPM_ACTIVE); | 390 | __update_runtime_status(dev, RPM_ACTIVE); |
271 | if (retval == -EAGAIN || retval == -EBUSY) { | 391 | dev->power.deferred_resume = 0; |
272 | if (dev->power.timer_expires == 0) | 392 | if (retval == -EAGAIN || retval == -EBUSY) |
273 | notify = true; | ||
274 | dev->power.runtime_error = 0; | 393 | dev->power.runtime_error = 0; |
275 | } else { | 394 | else |
276 | pm_runtime_cancel_pending(dev); | 395 | pm_runtime_cancel_pending(dev); |
277 | } | ||
278 | } else { | 396 | } else { |
397 | no_callback: | ||
279 | __update_runtime_status(dev, RPM_SUSPENDED); | 398 | __update_runtime_status(dev, RPM_SUSPENDED); |
280 | pm_runtime_deactivate_timer(dev); | 399 | pm_runtime_deactivate_timer(dev); |
281 | 400 | ||
@@ -287,89 +406,86 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
287 | wake_up_all(&dev->power.wait_queue); | 406 | wake_up_all(&dev->power.wait_queue); |
288 | 407 | ||
289 | if (dev->power.deferred_resume) { | 408 | if (dev->power.deferred_resume) { |
290 | __pm_runtime_resume(dev, false); | 409 | rpm_resume(dev, 0); |
291 | retval = -EAGAIN; | 410 | retval = -EAGAIN; |
292 | goto out; | 411 | goto out; |
293 | } | 412 | } |
294 | 413 | ||
295 | if (notify) | 414 | /* Maybe the parent is now able to suspend. */ |
296 | __pm_runtime_idle(dev); | 415 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { |
297 | 416 | spin_unlock(&dev->power.lock); | |
298 | if (parent && !parent->power.ignore_children) { | ||
299 | spin_unlock_irq(&dev->power.lock); | ||
300 | 417 | ||
301 | pm_request_idle(parent); | 418 | spin_lock(&parent->power.lock); |
419 | rpm_idle(parent, RPM_ASYNC); | ||
420 | spin_unlock(&parent->power.lock); | ||
302 | 421 | ||
303 | spin_lock_irq(&dev->power.lock); | 422 | spin_lock(&dev->power.lock); |
304 | } | 423 | } |
305 | 424 | ||
306 | out: | 425 | out: |
307 | dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); | 426 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
308 | 427 | ||
309 | return retval; | 428 | return retval; |
310 | } | 429 | } |
311 | 430 | ||
312 | /** | 431 | /** |
313 | * pm_runtime_suspend - Carry out run-time suspend of given device. | 432 | * rpm_resume - Carry out run-time resume of given device. |
314 | * @dev: Device to suspend. | ||
315 | */ | ||
316 | int pm_runtime_suspend(struct device *dev) | ||
317 | { | ||
318 | int retval; | ||
319 | |||
320 | spin_lock_irq(&dev->power.lock); | ||
321 | retval = __pm_runtime_suspend(dev, false); | ||
322 | spin_unlock_irq(&dev->power.lock); | ||
323 | |||
324 | return retval; | ||
325 | } | ||
326 | EXPORT_SYMBOL_GPL(pm_runtime_suspend); | ||
327 | |||
328 | /** | ||
329 | * __pm_runtime_resume - Carry out run-time resume of given device. | ||
330 | * @dev: Device to resume. | 433 | * @dev: Device to resume. |
331 | * @from_wq: If set, the function has been called via pm_wq. | 434 | * @rpmflags: Flag bits. |
332 | * | 435 | * |
333 | * Check if the device can be woken up and run the ->runtime_resume() callback | 436 | * Check if the device's run-time PM status allows it to be resumed. Cancel |
334 | * provided by its bus type. If another resume has been started earlier, wait | 437 | * any scheduled or pending requests. If another resume has been started |
335 | * for it to finish. If there's a suspend running in parallel with this | 438 | * earlier, either return immediately or wait for it to finish, depending on the |
336 | * function, wait for it to finish and resume the device. Cancel any scheduled | 439 | * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in |
337 | * or pending requests. | 440 | * parallel with this function, either tell the other process to resume after |
441 | * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC | ||
442 | * flag is set then queue a resume request; otherwise run the | ||
443 | * ->runtime_resume() callback directly. Queue an idle notification for the | ||
444 | * device if the resume succeeded. | ||
338 | * | 445 | * |
339 | * This function must be called under dev->power.lock with interrupts disabled. | 446 | * This function must be called under dev->power.lock with interrupts disabled. |
340 | */ | 447 | */ |
341 | int __pm_runtime_resume(struct device *dev, bool from_wq) | 448 | static int rpm_resume(struct device *dev, int rpmflags) |
342 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 449 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
343 | { | 450 | { |
451 | int (*callback)(struct device *); | ||
344 | struct device *parent = NULL; | 452 | struct device *parent = NULL; |
345 | int retval = 0; | 453 | int retval = 0; |
346 | 454 | ||
347 | dev_dbg(dev, "__pm_runtime_resume()%s!\n", | 455 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
348 | from_wq ? " from workqueue" : ""); | ||
349 | 456 | ||
350 | repeat: | 457 | repeat: |
351 | if (dev->power.runtime_error) { | 458 | if (dev->power.runtime_error) |
352 | retval = -EINVAL; | 459 | retval = -EINVAL; |
460 | else if (dev->power.disable_depth > 0) | ||
461 | retval = -EAGAIN; | ||
462 | if (retval) | ||
353 | goto out; | 463 | goto out; |
354 | } | ||
355 | 464 | ||
356 | pm_runtime_cancel_pending(dev); | 465 | /* |
466 | * Other scheduled or pending requests need to be canceled. Small | ||
467 | * optimization: If an autosuspend timer is running, leave it running | ||
468 | * rather than cancelling it now only to restart it again in the near | ||
469 | * future. | ||
470 | */ | ||
471 | dev->power.request = RPM_REQ_NONE; | ||
472 | if (!dev->power.timer_autosuspends) | ||
473 | pm_runtime_deactivate_timer(dev); | ||
357 | 474 | ||
358 | if (dev->power.runtime_status == RPM_ACTIVE) | 475 | if (dev->power.runtime_status == RPM_ACTIVE) { |
359 | retval = 1; | 476 | retval = 1; |
360 | else if (dev->power.disable_depth > 0) | ||
361 | retval = -EAGAIN; | ||
362 | if (retval) | ||
363 | goto out; | 477 | goto out; |
478 | } | ||
364 | 479 | ||
365 | if (dev->power.runtime_status == RPM_RESUMING | 480 | if (dev->power.runtime_status == RPM_RESUMING |
366 | || dev->power.runtime_status == RPM_SUSPENDING) { | 481 | || dev->power.runtime_status == RPM_SUSPENDING) { |
367 | DEFINE_WAIT(wait); | 482 | DEFINE_WAIT(wait); |
368 | 483 | ||
369 | if (from_wq) { | 484 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
370 | if (dev->power.runtime_status == RPM_SUSPENDING) | 485 | if (dev->power.runtime_status == RPM_SUSPENDING) |
371 | dev->power.deferred_resume = true; | 486 | dev->power.deferred_resume = true; |
372 | retval = -EINPROGRESS; | 487 | else |
488 | retval = -EINPROGRESS; | ||
373 | goto out; | 489 | goto out; |
374 | } | 490 | } |
375 | 491 | ||
@@ -391,12 +507,43 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
391 | goto repeat; | 507 | goto repeat; |
392 | } | 508 | } |
393 | 509 | ||
510 | /* | ||
511 | * See if we can skip waking up the parent. This is safe only if | ||
512 | * power.no_callbacks is set, because otherwise we don't know whether | ||
513 | * the resume will actually succeed. | ||
514 | */ | ||
515 | if (dev->power.no_callbacks && !parent && dev->parent) { | ||
516 | spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); | ||
517 | if (dev->parent->power.disable_depth > 0 | ||
518 | || dev->parent->power.ignore_children | ||
519 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | ||
520 | atomic_inc(&dev->parent->power.child_count); | ||
521 | spin_unlock(&dev->parent->power.lock); | ||
522 | goto no_callback; /* Assume success. */ | ||
523 | } | ||
524 | spin_unlock(&dev->parent->power.lock); | ||
525 | } | ||
526 | |||
527 | /* Carry out an asynchronous or a synchronous resume. */ | ||
528 | if (rpmflags & RPM_ASYNC) { | ||
529 | dev->power.request = RPM_REQ_RESUME; | ||
530 | if (!dev->power.request_pending) { | ||
531 | dev->power.request_pending = true; | ||
532 | queue_work(pm_wq, &dev->power.work); | ||
533 | } | ||
534 | retval = 0; | ||
535 | goto out; | ||
536 | } | ||
537 | |||
394 | if (!parent && dev->parent) { | 538 | if (!parent && dev->parent) { |
395 | /* | 539 | /* |
396 | * Increment the parent's resume counter and resume it if | 540 | * Increment the parent's usage counter and resume it if |
397 | * necessary. | 541 | * necessary. Not needed if dev is irq-safe; then the |
542 | * parent is permanently resumed. | ||
398 | */ | 543 | */ |
399 | parent = dev->parent; | 544 | parent = dev->parent; |
545 | if (dev->power.irq_safe) | ||
546 | goto skip_parent; | ||
400 | spin_unlock(&dev->power.lock); | 547 | spin_unlock(&dev->power.lock); |
401 | 548 | ||
402 | pm_runtime_get_noresume(parent); | 549 | pm_runtime_get_noresume(parent); |
@@ -408,7 +555,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
408 | */ | 555 | */ |
409 | if (!parent->power.disable_depth | 556 | if (!parent->power.disable_depth |
410 | && !parent->power.ignore_children) { | 557 | && !parent->power.ignore_children) { |
411 | __pm_runtime_resume(parent, false); | 558 | rpm_resume(parent, 0); |
412 | if (parent->power.runtime_status != RPM_ACTIVE) | 559 | if (parent->power.runtime_status != RPM_ACTIVE) |
413 | retval = -EBUSY; | 560 | retval = -EBUSY; |
414 | } | 561 | } |
@@ -419,40 +566,30 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
419 | goto out; | 566 | goto out; |
420 | goto repeat; | 567 | goto repeat; |
421 | } | 568 | } |
569 | skip_parent: | ||
422 | 570 | ||
423 | __update_runtime_status(dev, RPM_RESUMING); | 571 | if (dev->power.no_callbacks) |
424 | 572 | goto no_callback; /* Assume success. */ | |
425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | ||
426 | spin_unlock_irq(&dev->power.lock); | ||
427 | |||
428 | retval = dev->bus->pm->runtime_resume(dev); | ||
429 | |||
430 | spin_lock_irq(&dev->power.lock); | ||
431 | dev->power.runtime_error = retval; | ||
432 | } else if (dev->type && dev->type->pm | ||
433 | && dev->type->pm->runtime_resume) { | ||
434 | spin_unlock_irq(&dev->power.lock); | ||
435 | |||
436 | retval = dev->type->pm->runtime_resume(dev); | ||
437 | |||
438 | spin_lock_irq(&dev->power.lock); | ||
439 | dev->power.runtime_error = retval; | ||
440 | } else if (dev->class && dev->class->pm | ||
441 | && dev->class->pm->runtime_resume) { | ||
442 | spin_unlock_irq(&dev->power.lock); | ||
443 | 573 | ||
444 | retval = dev->class->pm->runtime_resume(dev); | 574 | __update_runtime_status(dev, RPM_RESUMING); |
445 | 575 | ||
446 | spin_lock_irq(&dev->power.lock); | 576 | if (dev->pwr_domain) |
447 | dev->power.runtime_error = retval; | 577 | callback = dev->pwr_domain->ops.runtime_resume; |
448 | } else { | 578 | else if (dev->type && dev->type->pm) |
449 | retval = -ENOSYS; | 579 | callback = dev->type->pm->runtime_resume; |
450 | } | 580 | else if (dev->class && dev->class->pm) |
581 | callback = dev->class->pm->runtime_resume; | ||
582 | else if (dev->bus && dev->bus->pm) | ||
583 | callback = dev->bus->pm->runtime_resume; | ||
584 | else | ||
585 | callback = NULL; | ||
451 | 586 | ||
587 | retval = rpm_callback(callback, dev); | ||
452 | if (retval) { | 588 | if (retval) { |
453 | __update_runtime_status(dev, RPM_SUSPENDED); | 589 | __update_runtime_status(dev, RPM_SUSPENDED); |
454 | pm_runtime_cancel_pending(dev); | 590 | pm_runtime_cancel_pending(dev); |
455 | } else { | 591 | } else { |
592 | no_callback: | ||
456 | __update_runtime_status(dev, RPM_ACTIVE); | 593 | __update_runtime_status(dev, RPM_ACTIVE); |
457 | if (parent) | 594 | if (parent) |
458 | atomic_inc(&parent->power.child_count); | 595 | atomic_inc(&parent->power.child_count); |
@@ -460,10 +597,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
460 | wake_up_all(&dev->power.wait_queue); | 597 | wake_up_all(&dev->power.wait_queue); |
461 | 598 | ||
462 | if (!retval) | 599 | if (!retval) |
463 | __pm_request_idle(dev); | 600 | rpm_idle(dev, RPM_ASYNC); |
464 | 601 | ||
465 | out: | 602 | out: |
466 | if (parent) { | 603 | if (parent && !dev->power.irq_safe) { |
467 | spin_unlock_irq(&dev->power.lock); | 604 | spin_unlock_irq(&dev->power.lock); |
468 | 605 | ||
469 | pm_runtime_put(parent); | 606 | pm_runtime_put(parent); |
@@ -471,28 +608,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
471 | spin_lock_irq(&dev->power.lock); | 608 | spin_lock_irq(&dev->power.lock); |
472 | } | 609 | } |
473 | 610 | ||
474 | dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); | 611 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
475 | 612 | ||
476 | return retval; | 613 | return retval; |
477 | } | 614 | } |
478 | 615 | ||
479 | /** | 616 | /** |
480 | * pm_runtime_resume - Carry out run-time resume of given device. | ||
481 | * @dev: Device to suspend. | ||
482 | */ | ||
483 | int pm_runtime_resume(struct device *dev) | ||
484 | { | ||
485 | int retval; | ||
486 | |||
487 | spin_lock_irq(&dev->power.lock); | ||
488 | retval = __pm_runtime_resume(dev, false); | ||
489 | spin_unlock_irq(&dev->power.lock); | ||
490 | |||
491 | return retval; | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(pm_runtime_resume); | ||
494 | |||
495 | /** | ||
496 | * pm_runtime_work - Universal run-time PM work function. | 617 | * pm_runtime_work - Universal run-time PM work function. |
497 | * @work: Work structure used for scheduling the execution of this function. | 618 | * @work: Work structure used for scheduling the execution of this function. |
498 | * | 619 | * |
@@ -517,13 +638,16 @@ static void pm_runtime_work(struct work_struct *work) | |||
517 | case RPM_REQ_NONE: | 638 | case RPM_REQ_NONE: |
518 | break; | 639 | break; |
519 | case RPM_REQ_IDLE: | 640 | case RPM_REQ_IDLE: |
520 | __pm_runtime_idle(dev); | 641 | rpm_idle(dev, RPM_NOWAIT); |
521 | break; | 642 | break; |
522 | case RPM_REQ_SUSPEND: | 643 | case RPM_REQ_SUSPEND: |
523 | __pm_runtime_suspend(dev, true); | 644 | rpm_suspend(dev, RPM_NOWAIT); |
645 | break; | ||
646 | case RPM_REQ_AUTOSUSPEND: | ||
647 | rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); | ||
524 | break; | 648 | break; |
525 | case RPM_REQ_RESUME: | 649 | case RPM_REQ_RESUME: |
526 | __pm_runtime_resume(dev, true); | 650 | rpm_resume(dev, RPM_NOWAIT); |
527 | break; | 651 | break; |
528 | } | 652 | } |
529 | 653 | ||
@@ -532,117 +656,10 @@ static void pm_runtime_work(struct work_struct *work) | |||
532 | } | 656 | } |
533 | 657 | ||
534 | /** | 658 | /** |
535 | * __pm_request_idle - Submit an idle notification request for given device. | ||
536 | * @dev: Device to handle. | ||
537 | * | ||
538 | * Check if the device's run-time PM status is correct for suspending the device | ||
539 | * and queue up a request to run __pm_runtime_idle() for it. | ||
540 | * | ||
541 | * This function must be called under dev->power.lock with interrupts disabled. | ||
542 | */ | ||
543 | static int __pm_request_idle(struct device *dev) | ||
544 | { | ||
545 | int retval = 0; | ||
546 | |||
547 | if (dev->power.runtime_error) | ||
548 | retval = -EINVAL; | ||
549 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
550 | || dev->power.disable_depth > 0 | ||
551 | || dev->power.runtime_status == RPM_SUSPENDED | ||
552 | || dev->power.runtime_status == RPM_SUSPENDING) | ||
553 | retval = -EAGAIN; | ||
554 | else if (!pm_children_suspended(dev)) | ||
555 | retval = -EBUSY; | ||
556 | if (retval) | ||
557 | return retval; | ||
558 | |||
559 | if (dev->power.request_pending) { | ||
560 | /* Any requests other then RPM_REQ_IDLE take precedence. */ | ||
561 | if (dev->power.request == RPM_REQ_NONE) | ||
562 | dev->power.request = RPM_REQ_IDLE; | ||
563 | else if (dev->power.request != RPM_REQ_IDLE) | ||
564 | retval = -EAGAIN; | ||
565 | return retval; | ||
566 | } | ||
567 | |||
568 | dev->power.request = RPM_REQ_IDLE; | ||
569 | dev->power.request_pending = true; | ||
570 | queue_work(pm_wq, &dev->power.work); | ||
571 | |||
572 | return retval; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * pm_request_idle - Submit an idle notification request for given device. | ||
577 | * @dev: Device to handle. | ||
578 | */ | ||
579 | int pm_request_idle(struct device *dev) | ||
580 | { | ||
581 | unsigned long flags; | ||
582 | int retval; | ||
583 | |||
584 | spin_lock_irqsave(&dev->power.lock, flags); | ||
585 | retval = __pm_request_idle(dev); | ||
586 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
587 | |||
588 | return retval; | ||
589 | } | ||
590 | EXPORT_SYMBOL_GPL(pm_request_idle); | ||
591 | |||
592 | /** | ||
593 | * __pm_request_suspend - Submit a suspend request for given device. | ||
594 | * @dev: Device to suspend. | ||
595 | * | ||
596 | * This function must be called under dev->power.lock with interrupts disabled. | ||
597 | */ | ||
598 | static int __pm_request_suspend(struct device *dev) | ||
599 | { | ||
600 | int retval = 0; | ||
601 | |||
602 | if (dev->power.runtime_error) | ||
603 | return -EINVAL; | ||
604 | |||
605 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
606 | retval = 1; | ||
607 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
608 | || dev->power.disable_depth > 0) | ||
609 | retval = -EAGAIN; | ||
610 | else if (dev->power.runtime_status == RPM_SUSPENDING) | ||
611 | retval = -EINPROGRESS; | ||
612 | else if (!pm_children_suspended(dev)) | ||
613 | retval = -EBUSY; | ||
614 | if (retval < 0) | ||
615 | return retval; | ||
616 | |||
617 | pm_runtime_deactivate_timer(dev); | ||
618 | |||
619 | if (dev->power.request_pending) { | ||
620 | /* | ||
621 | * Pending resume requests take precedence over us, but we can | ||
622 | * overtake any other pending request. | ||
623 | */ | ||
624 | if (dev->power.request == RPM_REQ_RESUME) | ||
625 | retval = -EAGAIN; | ||
626 | else if (dev->power.request != RPM_REQ_SUSPEND) | ||
627 | dev->power.request = retval ? | ||
628 | RPM_REQ_NONE : RPM_REQ_SUSPEND; | ||
629 | return retval; | ||
630 | } else if (retval) { | ||
631 | return retval; | ||
632 | } | ||
633 | |||
634 | dev->power.request = RPM_REQ_SUSPEND; | ||
635 | dev->power.request_pending = true; | ||
636 | queue_work(pm_wq, &dev->power.work); | ||
637 | |||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). | 659 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). |
643 | * @data: Device pointer passed by pm_schedule_suspend(). | 660 | * @data: Device pointer passed by pm_schedule_suspend(). |
644 | * | 661 | * |
645 | * Check if the time is right and execute __pm_request_suspend() in that case. | 662 | * Check if the time is right and queue a suspend request. |
646 | */ | 663 | */ |
647 | static void pm_suspend_timer_fn(unsigned long data) | 664 | static void pm_suspend_timer_fn(unsigned long data) |
648 | { | 665 | { |
@@ -656,7 +673,8 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
656 | /* If 'expire' is after 'jiffies' we've been called too early. */ | 673 | /* If 'expire' is after 'jiffies' we've been called too early. */ |
657 | if (expires > 0 && !time_after(expires, jiffies)) { | 674 | if (expires > 0 && !time_after(expires, jiffies)) { |
658 | dev->power.timer_expires = 0; | 675 | dev->power.timer_expires = 0; |
659 | __pm_request_suspend(dev); | 676 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
677 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); | ||
660 | } | 678 | } |
661 | 679 | ||
662 | spin_unlock_irqrestore(&dev->power.lock, flags); | 680 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -670,47 +688,25 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
670 | int pm_schedule_suspend(struct device *dev, unsigned int delay) | 688 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
671 | { | 689 | { |
672 | unsigned long flags; | 690 | unsigned long flags; |
673 | int retval = 0; | 691 | int retval; |
674 | 692 | ||
675 | spin_lock_irqsave(&dev->power.lock, flags); | 693 | spin_lock_irqsave(&dev->power.lock, flags); |
676 | 694 | ||
677 | if (dev->power.runtime_error) { | ||
678 | retval = -EINVAL; | ||
679 | goto out; | ||
680 | } | ||
681 | |||
682 | if (!delay) { | 695 | if (!delay) { |
683 | retval = __pm_request_suspend(dev); | 696 | retval = rpm_suspend(dev, RPM_ASYNC); |
684 | goto out; | 697 | goto out; |
685 | } | 698 | } |
686 | 699 | ||
687 | pm_runtime_deactivate_timer(dev); | 700 | retval = rpm_check_suspend_allowed(dev); |
688 | |||
689 | if (dev->power.request_pending) { | ||
690 | /* | ||
691 | * Pending resume requests take precedence over us, but any | ||
692 | * other pending requests have to be canceled. | ||
693 | */ | ||
694 | if (dev->power.request == RPM_REQ_RESUME) { | ||
695 | retval = -EAGAIN; | ||
696 | goto out; | ||
697 | } | ||
698 | dev->power.request = RPM_REQ_NONE; | ||
699 | } | ||
700 | |||
701 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
702 | retval = 1; | ||
703 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
704 | || dev->power.disable_depth > 0) | ||
705 | retval = -EAGAIN; | ||
706 | else if (!pm_children_suspended(dev)) | ||
707 | retval = -EBUSY; | ||
708 | if (retval) | 701 | if (retval) |
709 | goto out; | 702 | goto out; |
710 | 703 | ||
704 | /* Other scheduled or pending requests need to be canceled. */ | ||
705 | pm_runtime_cancel_pending(dev); | ||
706 | |||
711 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 707 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
712 | if (!dev->power.timer_expires) | 708 | dev->power.timer_expires += !dev->power.timer_expires; |
713 | dev->power.timer_expires = 1; | 709 | dev->power.timer_autosuspends = 0; |
714 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 710 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
715 | 711 | ||
716 | out: | 712 | out: |
@@ -721,103 +717,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
721 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); | 717 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); |
722 | 718 | ||
723 | /** | 719 | /** |
724 | * pm_request_resume - Submit a resume request for given device. | 720 | * __pm_runtime_idle - Entry point for run-time idle operations. |
725 | * @dev: Device to resume. | 721 | * @dev: Device to send idle notification for. |
722 | * @rpmflags: Flag bits. | ||
726 | * | 723 | * |
727 | * This function must be called under dev->power.lock with interrupts disabled. | 724 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and |
725 | * return immediately if it is larger than zero. Then carry out an idle | ||
726 | * notification, either synchronous or asynchronous. | ||
727 | * | ||
728 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
728 | */ | 729 | */ |
729 | static int __pm_request_resume(struct device *dev) | 730 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
730 | { | 731 | { |
731 | int retval = 0; | 732 | unsigned long flags; |
732 | 733 | int retval; | |
733 | if (dev->power.runtime_error) | ||
734 | return -EINVAL; | ||
735 | |||
736 | if (dev->power.runtime_status == RPM_ACTIVE) | ||
737 | retval = 1; | ||
738 | else if (dev->power.runtime_status == RPM_RESUMING) | ||
739 | retval = -EINPROGRESS; | ||
740 | else if (dev->power.disable_depth > 0) | ||
741 | retval = -EAGAIN; | ||
742 | if (retval < 0) | ||
743 | return retval; | ||
744 | |||
745 | pm_runtime_deactivate_timer(dev); | ||
746 | 734 | ||
747 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 735 | if (rpmflags & RPM_GET_PUT) { |
748 | dev->power.deferred_resume = true; | 736 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
749 | return retval; | 737 | return 0; |
750 | } | ||
751 | if (dev->power.request_pending) { | ||
752 | /* If non-resume request is pending, we can overtake it. */ | ||
753 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | ||
754 | return retval; | ||
755 | } | 738 | } |
756 | if (retval) | ||
757 | return retval; | ||
758 | 739 | ||
759 | dev->power.request = RPM_REQ_RESUME; | 740 | spin_lock_irqsave(&dev->power.lock, flags); |
760 | dev->power.request_pending = true; | 741 | retval = rpm_idle(dev, rpmflags); |
761 | queue_work(pm_wq, &dev->power.work); | 742 | spin_unlock_irqrestore(&dev->power.lock, flags); |
762 | 743 | ||
763 | return retval; | 744 | return retval; |
764 | } | 745 | } |
746 | EXPORT_SYMBOL_GPL(__pm_runtime_idle); | ||
765 | 747 | ||
766 | /** | 748 | /** |
767 | * pm_request_resume - Submit a resume request for given device. | 749 | * __pm_runtime_suspend - Entry point for run-time put/suspend operations. |
768 | * @dev: Device to resume. | 750 | * @dev: Device to suspend. |
751 | * @rpmflags: Flag bits. | ||
752 | * | ||
753 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and | ||
754 | * return immediately if it is larger than zero. Then carry out a suspend, | ||
755 | * either synchronous or asynchronous. | ||
756 | * | ||
757 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
769 | */ | 758 | */ |
770 | int pm_request_resume(struct device *dev) | 759 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
771 | { | 760 | { |
772 | unsigned long flags; | 761 | unsigned long flags; |
773 | int retval; | 762 | int retval; |
774 | 763 | ||
764 | if (rpmflags & RPM_GET_PUT) { | ||
765 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
766 | return 0; | ||
767 | } | ||
768 | |||
775 | spin_lock_irqsave(&dev->power.lock, flags); | 769 | spin_lock_irqsave(&dev->power.lock, flags); |
776 | retval = __pm_request_resume(dev); | 770 | retval = rpm_suspend(dev, rpmflags); |
777 | spin_unlock_irqrestore(&dev->power.lock, flags); | 771 | spin_unlock_irqrestore(&dev->power.lock, flags); |
778 | 772 | ||
779 | return retval; | 773 | return retval; |
780 | } | 774 | } |
781 | EXPORT_SYMBOL_GPL(pm_request_resume); | 775 | EXPORT_SYMBOL_GPL(__pm_runtime_suspend); |
782 | 776 | ||
783 | /** | 777 | /** |
784 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 778 | * __pm_runtime_resume - Entry point for run-time resume operations. |
785 | * @dev: Device to handle. | 779 | * @dev: Device to resume. |
786 | * @sync: If set and the device is suspended, resume it synchronously. | 780 | * @rpmflags: Flag bits. |
781 | * | ||
782 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | ||
783 | * carry out a resume, either synchronous or asynchronous. | ||
787 | * | 784 | * |
788 | * Increment the usage count of the device and resume it or submit a resume | 785 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
789 | * request for it, depending on the value of @sync. | ||
790 | */ | 786 | */ |
791 | int __pm_runtime_get(struct device *dev, bool sync) | 787 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
792 | { | 788 | { |
789 | unsigned long flags; | ||
793 | int retval; | 790 | int retval; |
794 | 791 | ||
795 | atomic_inc(&dev->power.usage_count); | 792 | if (rpmflags & RPM_GET_PUT) |
796 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 793 | atomic_inc(&dev->power.usage_count); |
797 | |||
798 | return retval; | ||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(__pm_runtime_get); | ||
801 | |||
802 | /** | ||
803 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | ||
804 | * @dev: Device to handle. | ||
805 | * @sync: If the device's bus type is to be notified, do that synchronously. | ||
806 | * | ||
807 | * Decrement the usage count of the device and if it reaches zero, carry out a | ||
808 | * synchronous idle notification or submit an idle notification request for it, | ||
809 | * depending on the value of @sync. | ||
810 | */ | ||
811 | int __pm_runtime_put(struct device *dev, bool sync) | ||
812 | { | ||
813 | int retval = 0; | ||
814 | 794 | ||
815 | if (atomic_dec_and_test(&dev->power.usage_count)) | 795 | spin_lock_irqsave(&dev->power.lock, flags); |
816 | retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); | 796 | retval = rpm_resume(dev, rpmflags); |
797 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
817 | 798 | ||
818 | return retval; | 799 | return retval; |
819 | } | 800 | } |
820 | EXPORT_SYMBOL_GPL(__pm_runtime_put); | 801 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
821 | 802 | ||
822 | /** | 803 | /** |
823 | * __pm_runtime_set_status - Set run-time PM status of a device. | 804 | * __pm_runtime_set_status - Set run-time PM status of a device. |
@@ -968,7 +949,7 @@ int pm_runtime_barrier(struct device *dev) | |||
968 | 949 | ||
969 | if (dev->power.request_pending | 950 | if (dev->power.request_pending |
970 | && dev->power.request == RPM_REQ_RESUME) { | 951 | && dev->power.request == RPM_REQ_RESUME) { |
971 | __pm_runtime_resume(dev, false); | 952 | rpm_resume(dev, 0); |
972 | retval = 1; | 953 | retval = 1; |
973 | } | 954 | } |
974 | 955 | ||
@@ -1017,7 +998,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
1017 | */ | 998 | */ |
1018 | pm_runtime_get_noresume(dev); | 999 | pm_runtime_get_noresume(dev); |
1019 | 1000 | ||
1020 | __pm_runtime_resume(dev, false); | 1001 | rpm_resume(dev, 0); |
1021 | 1002 | ||
1022 | pm_runtime_put_noidle(dev); | 1003 | pm_runtime_put_noidle(dev); |
1023 | } | 1004 | } |
@@ -1065,7 +1046,7 @@ void pm_runtime_forbid(struct device *dev) | |||
1065 | 1046 | ||
1066 | dev->power.runtime_auto = false; | 1047 | dev->power.runtime_auto = false; |
1067 | atomic_inc(&dev->power.usage_count); | 1048 | atomic_inc(&dev->power.usage_count); |
1068 | __pm_runtime_resume(dev, false); | 1049 | rpm_resume(dev, 0); |
1069 | 1050 | ||
1070 | out: | 1051 | out: |
1071 | spin_unlock_irq(&dev->power.lock); | 1052 | spin_unlock_irq(&dev->power.lock); |
@@ -1086,7 +1067,7 @@ void pm_runtime_allow(struct device *dev) | |||
1086 | 1067 | ||
1087 | dev->power.runtime_auto = true; | 1068 | dev->power.runtime_auto = true; |
1088 | if (atomic_dec_and_test(&dev->power.usage_count)) | 1069 | if (atomic_dec_and_test(&dev->power.usage_count)) |
1089 | __pm_runtime_idle(dev); | 1070 | rpm_idle(dev, RPM_AUTO); |
1090 | 1071 | ||
1091 | out: | 1072 | out: |
1092 | spin_unlock_irq(&dev->power.lock); | 1073 | spin_unlock_irq(&dev->power.lock); |
@@ -1094,13 +1075,130 @@ void pm_runtime_allow(struct device *dev) | |||
1094 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | 1075 | EXPORT_SYMBOL_GPL(pm_runtime_allow); |
1095 | 1076 | ||
1096 | /** | 1077 | /** |
1078 | * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. | ||
1079 | * @dev: Device to handle. | ||
1080 | * | ||
1081 | * Set the power.no_callbacks flag, which tells the PM core that this | ||
1082 | * device is power-managed through its parent and has no run-time PM | ||
1083 | * callbacks of its own. The run-time sysfs attributes will be removed. | ||
1084 | */ | ||
1085 | void pm_runtime_no_callbacks(struct device *dev) | ||
1086 | { | ||
1087 | spin_lock_irq(&dev->power.lock); | ||
1088 | dev->power.no_callbacks = 1; | ||
1089 | spin_unlock_irq(&dev->power.lock); | ||
1090 | if (device_is_registered(dev)) | ||
1091 | rpm_sysfs_remove(dev); | ||
1092 | } | ||
1093 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); | ||
1094 | |||
1095 | /** | ||
1096 | * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. | ||
1097 | * @dev: Device to handle | ||
1098 | * | ||
1099 | * Set the power.irq_safe flag, which tells the PM core that the | ||
1100 | * ->runtime_suspend() and ->runtime_resume() callbacks for this device should | ||
1101 | * always be invoked with the spinlock held and interrupts disabled. It also | ||
1102 | * causes the parent's usage counter to be permanently incremented, preventing | ||
1103 | * the parent from runtime suspending -- otherwise an irq-safe child might have | ||
1104 | * to wait for a non-irq-safe parent. | ||
1105 | */ | ||
1106 | void pm_runtime_irq_safe(struct device *dev) | ||
1107 | { | ||
1108 | if (dev->parent) | ||
1109 | pm_runtime_get_sync(dev->parent); | ||
1110 | spin_lock_irq(&dev->power.lock); | ||
1111 | dev->power.irq_safe = 1; | ||
1112 | spin_unlock_irq(&dev->power.lock); | ||
1113 | } | ||
1114 | EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); | ||
1115 | |||
1116 | /** | ||
1117 | * update_autosuspend - Handle a change to a device's autosuspend settings. | ||
1118 | * @dev: Device to handle. | ||
1119 | * @old_delay: The former autosuspend_delay value. | ||
1120 | * @old_use: The former use_autosuspend value. | ||
1121 | * | ||
1122 | * Prevent runtime suspend if the new delay is negative and use_autosuspend is | ||
1123 | * set; otherwise allow it. Send an idle notification if suspends are allowed. | ||
1124 | * | ||
1125 | * This function must be called under dev->power.lock with interrupts disabled. | ||
1126 | */ | ||
1127 | static void update_autosuspend(struct device *dev, int old_delay, int old_use) | ||
1128 | { | ||
1129 | int delay = dev->power.autosuspend_delay; | ||
1130 | |||
1131 | /* Should runtime suspend be prevented now? */ | ||
1132 | if (dev->power.use_autosuspend && delay < 0) { | ||
1133 | |||
1134 | /* If it used to be allowed then prevent it. */ | ||
1135 | if (!old_use || old_delay >= 0) { | ||
1136 | atomic_inc(&dev->power.usage_count); | ||
1137 | rpm_resume(dev, 0); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* Runtime suspend should be allowed now. */ | ||
1142 | else { | ||
1143 | |||
1144 | /* If it used to be prevented then allow it. */ | ||
1145 | if (old_use && old_delay < 0) | ||
1146 | atomic_dec(&dev->power.usage_count); | ||
1147 | |||
1148 | /* Maybe we can autosuspend now. */ | ||
1149 | rpm_idle(dev, RPM_AUTO); | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. | ||
1155 | * @dev: Device to handle. | ||
1156 | * @delay: Value of the new delay in milliseconds. | ||
1157 | * | ||
1158 | * Set the device's power.autosuspend_delay value. If it changes to negative | ||
1159 | * and the power.use_autosuspend flag is set, prevent run-time suspends. If it | ||
1160 | * changes the other way, allow run-time suspends. | ||
1161 | */ | ||
1162 | void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) | ||
1163 | { | ||
1164 | int old_delay, old_use; | ||
1165 | |||
1166 | spin_lock_irq(&dev->power.lock); | ||
1167 | old_delay = dev->power.autosuspend_delay; | ||
1168 | old_use = dev->power.use_autosuspend; | ||
1169 | dev->power.autosuspend_delay = delay; | ||
1170 | update_autosuspend(dev, old_delay, old_use); | ||
1171 | spin_unlock_irq(&dev->power.lock); | ||
1172 | } | ||
1173 | EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); | ||
1174 | |||
1175 | /** | ||
1176 | * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. | ||
1177 | * @dev: Device to handle. | ||
1178 | * @use: New value for use_autosuspend. | ||
1179 | * | ||
1180 | * Set the device's power.use_autosuspend flag, and allow or prevent run-time | ||
1181 | * suspends as needed. | ||
1182 | */ | ||
1183 | void __pm_runtime_use_autosuspend(struct device *dev, bool use) | ||
1184 | { | ||
1185 | int old_delay, old_use; | ||
1186 | |||
1187 | spin_lock_irq(&dev->power.lock); | ||
1188 | old_delay = dev->power.autosuspend_delay; | ||
1189 | old_use = dev->power.use_autosuspend; | ||
1190 | dev->power.use_autosuspend = use; | ||
1191 | update_autosuspend(dev, old_delay, old_use); | ||
1192 | spin_unlock_irq(&dev->power.lock); | ||
1193 | } | ||
1194 | EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); | ||
1195 | |||
1196 | /** | ||
1097 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1197 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
1098 | * @dev: Device object to initialize. | 1198 | * @dev: Device object to initialize. |
1099 | */ | 1199 | */ |
1100 | void pm_runtime_init(struct device *dev) | 1200 | void pm_runtime_init(struct device *dev) |
1101 | { | 1201 | { |
1102 | spin_lock_init(&dev->power.lock); | ||
1103 | |||
1104 | dev->power.runtime_status = RPM_SUSPENDED; | 1202 | dev->power.runtime_status = RPM_SUSPENDED; |
1105 | dev->power.idle_notification = false; | 1203 | dev->power.idle_notification = false; |
1106 | 1204 | ||
@@ -1137,4 +1235,6 @@ void pm_runtime_remove(struct device *dev) | |||
1137 | /* Change the status back to 'suspended' to match the initial status. */ | 1235 | /* Change the status back to 'suspended' to match the initial status. */ |
1138 | if (dev->power.runtime_status == RPM_ACTIVE) | 1236 | if (dev->power.runtime_status == RPM_ACTIVE) |
1139 | pm_runtime_set_suspended(dev); | 1237 | pm_runtime_set_suspended(dev); |
1238 | if (dev->power.irq_safe && dev->parent) | ||
1239 | pm_runtime_put_sync(dev->parent); | ||
1140 | } | 1240 | } |