aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dd.c11
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/main.c22
-rw-r--r--drivers/base/power/power.h31
-rw-r--r--drivers/base/power/runtime.c1011
5 files changed, 1068 insertions, 8 deletions
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0106875f01d..7b34b3a48f67 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -23,6 +23,7 @@
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/pm_runtime.h>
26 27
27#include "base.h" 28#include "base.h"
28#include "power/power.h" 29#include "power/power.h"
@@ -202,7 +203,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
202 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 203 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
203 drv->bus->name, __func__, dev_name(dev), drv->name); 204 drv->bus->name, __func__, dev_name(dev), drv->name);
204 205
206 pm_runtime_get_noresume(dev);
207 pm_runtime_barrier(dev);
205 ret = really_probe(dev, drv); 208 ret = really_probe(dev, drv);
209 pm_runtime_put_sync(dev);
206 210
207 return ret; 211 return ret;
208} 212}
@@ -245,7 +249,9 @@ int device_attach(struct device *dev)
245 ret = 0; 249 ret = 0;
246 } 250 }
247 } else { 251 } else {
252 pm_runtime_get_noresume(dev);
248 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 253 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
254 pm_runtime_put_sync(dev);
249 } 255 }
250 up(&dev->sem); 256 up(&dev->sem);
251 return ret; 257 return ret;
@@ -306,6 +312,9 @@ static void __device_release_driver(struct device *dev)
306 312
307 drv = dev->driver; 313 drv = dev->driver;
308 if (drv) { 314 if (drv) {
315 pm_runtime_get_noresume(dev);
316 pm_runtime_barrier(dev);
317
309 driver_sysfs_remove(dev); 318 driver_sysfs_remove(dev);
310 319
311 if (dev->bus) 320 if (dev->bus)
@@ -324,6 +333,8 @@ static void __device_release_driver(struct device *dev)
324 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 333 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
325 BUS_NOTIFY_UNBOUND_DRIVER, 334 BUS_NOTIFY_UNBOUND_DRIVER,
326 dev); 335 dev);
336
337 pm_runtime_put_sync(dev);
327 } 338 }
328} 339}
329 340
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 911208b89259..3ce3519e8f30 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_PM) += sysfs.o 1obj-$(CONFIG_PM) += sysfs.o
2obj-$(CONFIG_PM_SLEEP) += main.o 2obj-$(CONFIG_PM_SLEEP) += main.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o
3obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
4 5
5ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 6ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b1a786b7dec..86990011277b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -21,6 +21,7 @@
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
24#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
25#include <linux/rwsem.h> 26#include <linux/rwsem.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
@@ -49,6 +50,16 @@ static DEFINE_MUTEX(dpm_list_mtx);
49static bool transition_started; 50static bool transition_started;
50 51
51/** 52/**
53 * device_pm_init - Initialize the PM-related part of a device object
54 * @dev: Device object being initialized.
55 */
56void device_pm_init(struct device *dev)
57{
58 dev->power.status = DPM_ON;
59 pm_runtime_init(dev);
60}
61
62/**
52 * device_pm_lock - lock the list of active devices used by the PM core 63 * device_pm_lock - lock the list of active devices used by the PM core
53 */ 64 */
54void device_pm_lock(void) 65void device_pm_lock(void)
@@ -105,6 +116,7 @@ void device_pm_remove(struct device *dev)
105 mutex_lock(&dpm_list_mtx); 116 mutex_lock(&dpm_list_mtx);
106 list_del_init(&dev->power.entry); 117 list_del_init(&dev->power.entry);
107 mutex_unlock(&dpm_list_mtx); 118 mutex_unlock(&dpm_list_mtx);
119 pm_runtime_remove(dev);
108} 120}
109 121
110/** 122/**
@@ -512,6 +524,7 @@ static void dpm_complete(pm_message_t state)
512 mutex_unlock(&dpm_list_mtx); 524 mutex_unlock(&dpm_list_mtx);
513 525
514 device_complete(dev, state); 526 device_complete(dev, state);
527 pm_runtime_put_noidle(dev);
515 528
516 mutex_lock(&dpm_list_mtx); 529 mutex_lock(&dpm_list_mtx);
517 } 530 }
@@ -757,7 +770,14 @@ static int dpm_prepare(pm_message_t state)
757 dev->power.status = DPM_PREPARING; 770 dev->power.status = DPM_PREPARING;
758 mutex_unlock(&dpm_list_mtx); 771 mutex_unlock(&dpm_list_mtx);
759 772
760 error = device_prepare(dev, state); 773 pm_runtime_get_noresume(dev);
774 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
775 /* Wake-up requested during system sleep transition. */
776 pm_runtime_put_noidle(dev);
777 error = -EBUSY;
778 } else {
779 error = device_prepare(dev, state);
780 }
761 781
762 mutex_lock(&dpm_list_mtx); 782 mutex_lock(&dpm_list_mtx);
763 if (error) { 783 if (error) {
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c7cb4fc3735c..b8fa1aa5225a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,7 +1,14 @@
1static inline void device_pm_init(struct device *dev) 1#ifdef CONFIG_PM_RUNTIME
2{ 2
3 dev->power.status = DPM_ON; 3extern void pm_runtime_init(struct device *dev);
4} 4extern void pm_runtime_remove(struct device *dev);
5
6#else /* !CONFIG_PM_RUNTIME */
7
8static inline void pm_runtime_init(struct device *dev) {}
9static inline void pm_runtime_remove(struct device *dev) {}
10
11#endif /* !CONFIG_PM_RUNTIME */
5 12
6#ifdef CONFIG_PM_SLEEP 13#ifdef CONFIG_PM_SLEEP
7 14
@@ -16,23 +23,33 @@ static inline struct device *to_device(struct list_head *entry)
16 return container_of(entry, struct device, power.entry); 23 return container_of(entry, struct device, power.entry);
17} 24}
18 25
26extern void device_pm_init(struct device *dev);
19extern void device_pm_add(struct device *); 27extern void device_pm_add(struct device *);
20extern void device_pm_remove(struct device *); 28extern void device_pm_remove(struct device *);
21extern void device_pm_move_before(struct device *, struct device *); 29extern void device_pm_move_before(struct device *, struct device *);
22extern void device_pm_move_after(struct device *, struct device *); 30extern void device_pm_move_after(struct device *, struct device *);
23extern void device_pm_move_last(struct device *); 31extern void device_pm_move_last(struct device *);
24 32
25#else /* CONFIG_PM_SLEEP */ 33#else /* !CONFIG_PM_SLEEP */
34
35static inline void device_pm_init(struct device *dev)
36{
37 pm_runtime_init(dev);
38}
39
40static inline void device_pm_remove(struct device *dev)
41{
42 pm_runtime_remove(dev);
43}
26 44
27static inline void device_pm_add(struct device *dev) {} 45static inline void device_pm_add(struct device *dev) {}
28static inline void device_pm_remove(struct device *dev) {}
29static inline void device_pm_move_before(struct device *deva, 46static inline void device_pm_move_before(struct device *deva,
30 struct device *devb) {} 47 struct device *devb) {}
31static inline void device_pm_move_after(struct device *deva, 48static inline void device_pm_move_after(struct device *deva,
32 struct device *devb) {} 49 struct device *devb) {}
33static inline void device_pm_move_last(struct device *dev) {} 50static inline void device_pm_move_last(struct device *dev) {}
34 51
35#endif 52#endif /* !CONFIG_PM_SLEEP */
36 53
37#ifdef CONFIG_PM 54#ifdef CONFIG_PM
38 55
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 000000000000..38556f6cc22d
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1011 @@
1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
20 */
21static void pm_runtime_deactivate_timer(struct device *dev)
22{
23 if (dev->power.timer_expires > 0) {
24 del_timer(&dev->power.suspend_timer);
25 dev->power.timer_expires = 0;
26 }
27}
28
29/**
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
32 */
33static void pm_runtime_cancel_pending(struct device *dev)
34{
35 pm_runtime_deactivate_timer(dev);
36 /*
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
39 */
40 dev->power.request = RPM_REQ_NONE;
41}
42
43/**
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
46 *
47 * This function must be called under dev->power.lock with interrupts disabled.
48 */
49static int __pm_runtime_idle(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock)
51{
52 int retval = 0;
53
54 dev_dbg(dev, "__pm_runtime_idle()!\n");
55
56 if (dev->power.runtime_error)
57 retval = -EINVAL;
58 else if (dev->power.idle_notification)
59 retval = -EINPROGRESS;
60 else if (atomic_read(&dev->power.usage_count) > 0
61 || dev->power.disable_depth > 0
62 || dev->power.runtime_status != RPM_ACTIVE)
63 retval = -EAGAIN;
64 else if (!pm_children_suspended(dev))
65 retval = -EBUSY;
66 if (retval)
67 goto out;
68
69 if (dev->power.request_pending) {
70 /*
71 * If an idle notification request is pending, cancel it. Any
72 * other pending request takes precedence over us.
73 */
74 if (dev->power.request == RPM_REQ_IDLE) {
75 dev->power.request = RPM_REQ_NONE;
76 } else if (dev->power.request != RPM_REQ_NONE) {
77 retval = -EAGAIN;
78 goto out;
79 }
80 }
81
82 dev->power.idle_notification = true;
83
84 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
85 spin_unlock_irq(&dev->power.lock);
86
87 dev->bus->pm->runtime_idle(dev);
88
89 spin_lock_irq(&dev->power.lock);
90 }
91
92 dev->power.idle_notification = false;
93 wake_up_all(&dev->power.wait_queue);
94
95 out:
96 dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
97
98 return retval;
99}
100
101/**
102 * pm_runtime_idle - Notify device bus type if the device can be suspended.
103 * @dev: Device to notify the bus type about.
104 */
105int pm_runtime_idle(struct device *dev)
106{
107 int retval;
108
109 spin_lock_irq(&dev->power.lock);
110 retval = __pm_runtime_idle(dev);
111 spin_unlock_irq(&dev->power.lock);
112
113 return retval;
114}
115EXPORT_SYMBOL_GPL(pm_runtime_idle);
116
117/**
118 * __pm_runtime_suspend - Carry out run-time suspend of given device.
119 * @dev: Device to suspend.
120 * @from_wq: If set, the function has been called via pm_wq.
121 *
122 * Check if the device can be suspended and run the ->runtime_suspend() callback
123 * provided by its bus type. If another suspend has been started earlier, wait
124 * for it to finish. If an idle notification or suspend request is pending or
125 * scheduled, cancel it.
126 *
127 * This function must be called under dev->power.lock with interrupts disabled.
128 */
129int __pm_runtime_suspend(struct device *dev, bool from_wq)
130 __releases(&dev->power.lock) __acquires(&dev->power.lock)
131{
132 struct device *parent = NULL;
133 bool notify = false;
134 int retval = 0;
135
136 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
137 from_wq ? " from workqueue" : "");
138
139 repeat:
140 if (dev->power.runtime_error) {
141 retval = -EINVAL;
142 goto out;
143 }
144
145 /* Pending resume requests take precedence over us. */
146 if (dev->power.request_pending
147 && dev->power.request == RPM_REQ_RESUME) {
148 retval = -EAGAIN;
149 goto out;
150 }
151
152 /* Other scheduled or pending requests need to be canceled. */
153 pm_runtime_cancel_pending(dev);
154
155 if (dev->power.runtime_status == RPM_SUSPENDED)
156 retval = 1;
157 else if (dev->power.runtime_status == RPM_RESUMING
158 || dev->power.disable_depth > 0
159 || atomic_read(&dev->power.usage_count) > 0)
160 retval = -EAGAIN;
161 else if (!pm_children_suspended(dev))
162 retval = -EBUSY;
163 if (retval)
164 goto out;
165
166 if (dev->power.runtime_status == RPM_SUSPENDING) {
167 DEFINE_WAIT(wait);
168
169 if (from_wq) {
170 retval = -EINPROGRESS;
171 goto out;
172 }
173
174 /* Wait for the other suspend running in parallel with us. */
175 for (;;) {
176 prepare_to_wait(&dev->power.wait_queue, &wait,
177 TASK_UNINTERRUPTIBLE);
178 if (dev->power.runtime_status != RPM_SUSPENDING)
179 break;
180
181 spin_unlock_irq(&dev->power.lock);
182
183 schedule();
184
185 spin_lock_irq(&dev->power.lock);
186 }
187 finish_wait(&dev->power.wait_queue, &wait);
188 goto repeat;
189 }
190
191 dev->power.runtime_status = RPM_SUSPENDING;
192
193 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
194 spin_unlock_irq(&dev->power.lock);
195
196 retval = dev->bus->pm->runtime_suspend(dev);
197
198 spin_lock_irq(&dev->power.lock);
199 dev->power.runtime_error = retval;
200 } else {
201 retval = -ENOSYS;
202 }
203
204 if (retval) {
205 dev->power.runtime_status = RPM_ACTIVE;
206 pm_runtime_cancel_pending(dev);
207 dev->power.deferred_resume = false;
208
209 if (retval == -EAGAIN || retval == -EBUSY) {
210 notify = true;
211 dev->power.runtime_error = 0;
212 }
213 } else {
214 dev->power.runtime_status = RPM_SUSPENDED;
215
216 if (dev->parent) {
217 parent = dev->parent;
218 atomic_add_unless(&parent->power.child_count, -1, 0);
219 }
220 }
221 wake_up_all(&dev->power.wait_queue);
222
223 if (dev->power.deferred_resume) {
224 dev->power.deferred_resume = false;
225 __pm_runtime_resume(dev, false);
226 retval = -EAGAIN;
227 goto out;
228 }
229
230 if (notify)
231 __pm_runtime_idle(dev);
232
233 if (parent && !parent->power.ignore_children) {
234 spin_unlock_irq(&dev->power.lock);
235
236 pm_request_idle(parent);
237
238 spin_lock_irq(&dev->power.lock);
239 }
240
241 out:
242 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
243
244 return retval;
245}
246
247/**
248 * pm_runtime_suspend - Carry out run-time suspend of given device.
249 * @dev: Device to suspend.
250 */
251int pm_runtime_suspend(struct device *dev)
252{
253 int retval;
254
255 spin_lock_irq(&dev->power.lock);
256 retval = __pm_runtime_suspend(dev, false);
257 spin_unlock_irq(&dev->power.lock);
258
259 return retval;
260}
261EXPORT_SYMBOL_GPL(pm_runtime_suspend);
262
263/**
264 * __pm_runtime_resume - Carry out run-time resume of given device.
265 * @dev: Device to resume.
266 * @from_wq: If set, the function has been called via pm_wq.
267 *
268 * Check if the device can be woken up and run the ->runtime_resume() callback
269 * provided by its bus type. If another resume has been started earlier, wait
270 * for it to finish. If there's a suspend running in parallel with this
271 * function, wait for it to finish and resume the device. Cancel any scheduled
272 * or pending requests.
273 *
274 * This function must be called under dev->power.lock with interrupts disabled.
275 */
276int __pm_runtime_resume(struct device *dev, bool from_wq)
277 __releases(&dev->power.lock) __acquires(&dev->power.lock)
278{
279 struct device *parent = NULL;
280 int retval = 0;
281
282 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
283 from_wq ? " from workqueue" : "");
284
285 repeat:
286 if (dev->power.runtime_error) {
287 retval = -EINVAL;
288 goto out;
289 }
290
291 pm_runtime_cancel_pending(dev);
292
293 if (dev->power.runtime_status == RPM_ACTIVE)
294 retval = 1;
295 else if (dev->power.disable_depth > 0)
296 retval = -EAGAIN;
297 if (retval)
298 goto out;
299
300 if (dev->power.runtime_status == RPM_RESUMING
301 || dev->power.runtime_status == RPM_SUSPENDING) {
302 DEFINE_WAIT(wait);
303
304 if (from_wq) {
305 if (dev->power.runtime_status == RPM_SUSPENDING)
306 dev->power.deferred_resume = true;
307 retval = -EINPROGRESS;
308 goto out;
309 }
310
311 /* Wait for the operation carried out in parallel with us. */
312 for (;;) {
313 prepare_to_wait(&dev->power.wait_queue, &wait,
314 TASK_UNINTERRUPTIBLE);
315 if (dev->power.runtime_status != RPM_RESUMING
316 && dev->power.runtime_status != RPM_SUSPENDING)
317 break;
318
319 spin_unlock_irq(&dev->power.lock);
320
321 schedule();
322
323 spin_lock_irq(&dev->power.lock);
324 }
325 finish_wait(&dev->power.wait_queue, &wait);
326 goto repeat;
327 }
328
329 if (!parent && dev->parent) {
330 /*
331 * Increment the parent's resume counter and resume it if
332 * necessary.
333 */
334 parent = dev->parent;
335 spin_unlock_irq(&dev->power.lock);
336
337 pm_runtime_get_noresume(parent);
338
339 spin_lock_irq(&parent->power.lock);
340 /*
341 * We can resume if the parent's run-time PM is disabled or it
342 * is set to ignore children.
343 */
344 if (!parent->power.disable_depth
345 && !parent->power.ignore_children) {
346 __pm_runtime_resume(parent, false);
347 if (parent->power.runtime_status != RPM_ACTIVE)
348 retval = -EBUSY;
349 }
350 spin_unlock_irq(&parent->power.lock);
351
352 spin_lock_irq(&dev->power.lock);
353 if (retval)
354 goto out;
355 goto repeat;
356 }
357
358 dev->power.runtime_status = RPM_RESUMING;
359
360 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
361 spin_unlock_irq(&dev->power.lock);
362
363 retval = dev->bus->pm->runtime_resume(dev);
364
365 spin_lock_irq(&dev->power.lock);
366 dev->power.runtime_error = retval;
367 } else {
368 retval = -ENOSYS;
369 }
370
371 if (retval) {
372 dev->power.runtime_status = RPM_SUSPENDED;
373 pm_runtime_cancel_pending(dev);
374 } else {
375 dev->power.runtime_status = RPM_ACTIVE;
376 if (parent)
377 atomic_inc(&parent->power.child_count);
378 }
379 wake_up_all(&dev->power.wait_queue);
380
381 if (!retval)
382 __pm_request_idle(dev);
383
384 out:
385 if (parent) {
386 spin_unlock_irq(&dev->power.lock);
387
388 pm_runtime_put(parent);
389
390 spin_lock_irq(&dev->power.lock);
391 }
392
393 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
394
395 return retval;
396}
397
398/**
399 * pm_runtime_resume - Carry out run-time resume of given device.
400 * @dev: Device to suspend.
401 */
402int pm_runtime_resume(struct device *dev)
403{
404 int retval;
405
406 spin_lock_irq(&dev->power.lock);
407 retval = __pm_runtime_resume(dev, false);
408 spin_unlock_irq(&dev->power.lock);
409
410 return retval;
411}
412EXPORT_SYMBOL_GPL(pm_runtime_resume);
413
414/**
415 * pm_runtime_work - Universal run-time PM work function.
416 * @work: Work structure used for scheduling the execution of this function.
417 *
418 * Use @work to get the device object the work is to be done for, determine what
419 * is to be done and execute the appropriate run-time PM function.
420 */
421static void pm_runtime_work(struct work_struct *work)
422{
423 struct device *dev = container_of(work, struct device, power.work);
424 enum rpm_request req;
425
426 spin_lock_irq(&dev->power.lock);
427
428 if (!dev->power.request_pending)
429 goto out;
430
431 req = dev->power.request;
432 dev->power.request = RPM_REQ_NONE;
433 dev->power.request_pending = false;
434
435 switch (req) {
436 case RPM_REQ_NONE:
437 break;
438 case RPM_REQ_IDLE:
439 __pm_runtime_idle(dev);
440 break;
441 case RPM_REQ_SUSPEND:
442 __pm_runtime_suspend(dev, true);
443 break;
444 case RPM_REQ_RESUME:
445 __pm_runtime_resume(dev, true);
446 break;
447 }
448
449 out:
450 spin_unlock_irq(&dev->power.lock);
451}
452
453/**
454 * __pm_request_idle - Submit an idle notification request for given device.
455 * @dev: Device to handle.
456 *
457 * Check if the device's run-time PM status is correct for suspending the device
458 * and queue up a request to run __pm_runtime_idle() for it.
459 *
460 * This function must be called under dev->power.lock with interrupts disabled.
461 */
462static int __pm_request_idle(struct device *dev)
463{
464 int retval = 0;
465
466 if (dev->power.runtime_error)
467 retval = -EINVAL;
468 else if (atomic_read(&dev->power.usage_count) > 0
469 || dev->power.disable_depth > 0
470 || dev->power.runtime_status == RPM_SUSPENDED
471 || dev->power.runtime_status == RPM_SUSPENDING)
472 retval = -EAGAIN;
473 else if (!pm_children_suspended(dev))
474 retval = -EBUSY;
475 if (retval)
476 return retval;
477
478 if (dev->power.request_pending) {
479 /* Any requests other then RPM_REQ_IDLE take precedence. */
480 if (dev->power.request == RPM_REQ_NONE)
481 dev->power.request = RPM_REQ_IDLE;
482 else if (dev->power.request != RPM_REQ_IDLE)
483 retval = -EAGAIN;
484 return retval;
485 }
486
487 dev->power.request = RPM_REQ_IDLE;
488 dev->power.request_pending = true;
489 queue_work(pm_wq, &dev->power.work);
490
491 return retval;
492}
493
494/**
495 * pm_request_idle - Submit an idle notification request for given device.
496 * @dev: Device to handle.
497 */
498int pm_request_idle(struct device *dev)
499{
500 unsigned long flags;
501 int retval;
502
503 spin_lock_irqsave(&dev->power.lock, flags);
504 retval = __pm_request_idle(dev);
505 spin_unlock_irqrestore(&dev->power.lock, flags);
506
507 return retval;
508}
509EXPORT_SYMBOL_GPL(pm_request_idle);
510
511/**
512 * __pm_request_suspend - Submit a suspend request for given device.
513 * @dev: Device to suspend.
514 *
515 * This function must be called under dev->power.lock with interrupts disabled.
516 */
517static int __pm_request_suspend(struct device *dev)
518{
519 int retval = 0;
520
521 if (dev->power.runtime_error)
522 return -EINVAL;
523
524 if (dev->power.runtime_status == RPM_SUSPENDED)
525 retval = 1;
526 else if (atomic_read(&dev->power.usage_count) > 0
527 || dev->power.disable_depth > 0)
528 retval = -EAGAIN;
529 else if (dev->power.runtime_status == RPM_SUSPENDING)
530 retval = -EINPROGRESS;
531 else if (!pm_children_suspended(dev))
532 retval = -EBUSY;
533 if (retval < 0)
534 return retval;
535
536 pm_runtime_deactivate_timer(dev);
537
538 if (dev->power.request_pending) {
539 /*
540 * Pending resume requests take precedence over us, but we can
541 * overtake any other pending request.
542 */
543 if (dev->power.request == RPM_REQ_RESUME)
544 retval = -EAGAIN;
545 else if (dev->power.request != RPM_REQ_SUSPEND)
546 dev->power.request = retval ?
547 RPM_REQ_NONE : RPM_REQ_SUSPEND;
548 return retval;
549 } else if (retval) {
550 return retval;
551 }
552
553 dev->power.request = RPM_REQ_SUSPEND;
554 dev->power.request_pending = true;
555 queue_work(pm_wq, &dev->power.work);
556
557 return 0;
558}
559
560/**
561 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
562 * @data: Device pointer passed by pm_schedule_suspend().
563 *
564 * Check if the time is right and execute __pm_request_suspend() in that case.
565 */
566static void pm_suspend_timer_fn(unsigned long data)
567{
568 struct device *dev = (struct device *)data;
569 unsigned long flags;
570 unsigned long expires;
571
572 spin_lock_irqsave(&dev->power.lock, flags);
573
574 expires = dev->power.timer_expires;
575 /* If 'expire' is after 'jiffies' we've been called too early. */
576 if (expires > 0 && !time_after(expires, jiffies)) {
577 dev->power.timer_expires = 0;
578 __pm_request_suspend(dev);
579 }
580
581 spin_unlock_irqrestore(&dev->power.lock, flags);
582}
583
584/**
585 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
586 * @dev: Device to suspend.
587 * @delay: Time to wait before submitting a suspend request, in milliseconds.
588 */
589int pm_schedule_suspend(struct device *dev, unsigned int delay)
590{
591 unsigned long flags;
592 int retval = 0;
593
594 spin_lock_irqsave(&dev->power.lock, flags);
595
596 if (dev->power.runtime_error) {
597 retval = -EINVAL;
598 goto out;
599 }
600
601 if (!delay) {
602 retval = __pm_request_suspend(dev);
603 goto out;
604 }
605
606 pm_runtime_deactivate_timer(dev);
607
608 if (dev->power.request_pending) {
609 /*
610 * Pending resume requests take precedence over us, but any
611 * other pending requests have to be canceled.
612 */
613 if (dev->power.request == RPM_REQ_RESUME) {
614 retval = -EAGAIN;
615 goto out;
616 }
617 dev->power.request = RPM_REQ_NONE;
618 }
619
620 if (dev->power.runtime_status == RPM_SUSPENDED)
621 retval = 1;
622 else if (dev->power.runtime_status == RPM_SUSPENDING)
623 retval = -EINPROGRESS;
624 else if (atomic_read(&dev->power.usage_count) > 0
625 || dev->power.disable_depth > 0)
626 retval = -EAGAIN;
627 else if (!pm_children_suspended(dev))
628 retval = -EBUSY;
629 if (retval)
630 goto out;
631
632 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
633 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
634
635 out:
636 spin_unlock_irqrestore(&dev->power.lock, flags);
637
638 return retval;
639}
640EXPORT_SYMBOL_GPL(pm_schedule_suspend);
641
642/**
643 * pm_request_resume - Submit a resume request for given device.
644 * @dev: Device to resume.
645 *
646 * This function must be called under dev->power.lock with interrupts disabled.
647 */
648static int __pm_request_resume(struct device *dev)
649{
650 int retval = 0;
651
652 if (dev->power.runtime_error)
653 return -EINVAL;
654
655 if (dev->power.runtime_status == RPM_ACTIVE)
656 retval = 1;
657 else if (dev->power.runtime_status == RPM_RESUMING)
658 retval = -EINPROGRESS;
659 else if (dev->power.disable_depth > 0)
660 retval = -EAGAIN;
661 if (retval < 0)
662 return retval;
663
664 pm_runtime_deactivate_timer(dev);
665
666 if (dev->power.request_pending) {
667 /* If non-resume request is pending, we can overtake it. */
668 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
669 return retval;
670 } else if (retval) {
671 return retval;
672 }
673
674 dev->power.request = RPM_REQ_RESUME;
675 dev->power.request_pending = true;
676 queue_work(pm_wq, &dev->power.work);
677
678 return retval;
679}
680
681/**
682 * pm_request_resume - Submit a resume request for given device.
683 * @dev: Device to resume.
684 */
685int pm_request_resume(struct device *dev)
686{
687 unsigned long flags;
688 int retval;
689
690 spin_lock_irqsave(&dev->power.lock, flags);
691 retval = __pm_request_resume(dev);
692 spin_unlock_irqrestore(&dev->power.lock, flags);
693
694 return retval;
695}
696EXPORT_SYMBOL_GPL(pm_request_resume);
697
698/**
699 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
700 * @dev: Device to handle.
701 * @sync: If set and the device is suspended, resume it synchronously.
702 *
703 * Increment the usage count of the device and if it was zero previously,
704 * resume it or submit a resume request for it, depending on the value of @sync.
705 */
706int __pm_runtime_get(struct device *dev, bool sync)
707{
708 int retval = 1;
709
710 if (atomic_add_return(1, &dev->power.usage_count) == 1)
711 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
712
713 return retval;
714}
715EXPORT_SYMBOL_GPL(__pm_runtime_get);
716
717/**
718 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
719 * @dev: Device to handle.
720 * @sync: If the device's bus type is to be notified, do that synchronously.
721 *
722 * Decrement the usage count of the device and if it reaches zero, carry out a
723 * synchronous idle notification or submit an idle notification request for it,
724 * depending on the value of @sync.
725 */
726int __pm_runtime_put(struct device *dev, bool sync)
727{
728 int retval = 0;
729
730 if (atomic_dec_and_test(&dev->power.usage_count))
731 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
732
733 return retval;
734}
735EXPORT_SYMBOL_GPL(__pm_runtime_put);
736
737/**
738 * __pm_runtime_set_status - Set run-time PM status of a device.
739 * @dev: Device to handle.
740 * @status: New run-time PM status of the device.
741 *
742 * If run-time PM of the device is disabled or its power.runtime_error field is
743 * different from zero, the status may be changed either to RPM_ACTIVE, or to
744 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
745 * However, if the device has a parent and the parent is not active, and the
746 * parent's power.ignore_children flag is unset, the device's status cannot be
747 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
748 *
749 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
750 * and the device parent's counter of unsuspended children is modified to
751 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
752 * notification request for the parent is submitted.
753 */
754int __pm_runtime_set_status(struct device *dev, unsigned int status)
755{
756 struct device *parent = dev->parent;
757 unsigned long flags;
758 bool notify_parent = false;
759 int error = 0;
760
761 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
762 return -EINVAL;
763
764 spin_lock_irqsave(&dev->power.lock, flags);
765
766 if (!dev->power.runtime_error && !dev->power.disable_depth) {
767 error = -EAGAIN;
768 goto out;
769 }
770
771 if (dev->power.runtime_status == status)
772 goto out_set;
773
774 if (status == RPM_SUSPENDED) {
775 /* It always is possible to set the status to 'suspended'. */
776 if (parent) {
777 atomic_add_unless(&parent->power.child_count, -1, 0);
778 notify_parent = !parent->power.ignore_children;
779 }
780 goto out_set;
781 }
782
783 if (parent) {
784 spin_lock_irq(&parent->power.lock);
785
786 /*
787 * It is invalid to put an active child under a parent that is
788 * not active, has run-time PM enabled and the
789 * 'power.ignore_children' flag unset.
790 */
791 if (!parent->power.disable_depth
792 && !parent->power.ignore_children
793 && parent->power.runtime_status != RPM_ACTIVE) {
794 error = -EBUSY;
795 } else {
796 if (dev->power.runtime_status == RPM_SUSPENDED)
797 atomic_inc(&parent->power.child_count);
798 }
799
800 spin_unlock_irq(&parent->power.lock);
801
802 if (error)
803 goto out;
804 }
805
806 out_set:
807 dev->power.runtime_status = status;
808 dev->power.runtime_error = 0;
809 out:
810 spin_unlock_irqrestore(&dev->power.lock, flags);
811
812 if (notify_parent)
813 pm_request_idle(parent);
814
815 return error;
816}
817EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
818
819/**
820 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
821 * @dev: Device to handle.
822 *
823 * Flush all pending requests for the device from pm_wq and wait for all
824 * run-time PM operations involving the device in progress to complete.
825 *
826 * Should be called under dev->power.lock with interrupts disabled.
827 */
828static void __pm_runtime_barrier(struct device *dev)
829{
830 pm_runtime_deactivate_timer(dev);
831
832 if (dev->power.request_pending) {
833 dev->power.request = RPM_REQ_NONE;
834 spin_unlock_irq(&dev->power.lock);
835
836 cancel_work_sync(&dev->power.work);
837
838 spin_lock_irq(&dev->power.lock);
839 dev->power.request_pending = false;
840 }
841
842 if (dev->power.runtime_status == RPM_SUSPENDING
843 || dev->power.runtime_status == RPM_RESUMING
844 || dev->power.idle_notification) {
845 DEFINE_WAIT(wait);
846
847 /* Suspend, wake-up or idle notification in progress. */
848 for (;;) {
849 prepare_to_wait(&dev->power.wait_queue, &wait,
850 TASK_UNINTERRUPTIBLE);
851 if (dev->power.runtime_status != RPM_SUSPENDING
852 && dev->power.runtime_status != RPM_RESUMING
853 && !dev->power.idle_notification)
854 break;
855 spin_unlock_irq(&dev->power.lock);
856
857 schedule();
858
859 spin_lock_irq(&dev->power.lock);
860 }
861 finish_wait(&dev->power.wait_queue, &wait);
862 }
863}
864
865/**
866 * pm_runtime_barrier - Flush pending requests and wait for completions.
867 * @dev: Device to handle.
868 *
869 * Prevent the device from being suspended by incrementing its usage counter and
870 * if there's a pending resume request for the device, wake the device up.
871 * Next, make sure that all pending requests for the device have been flushed
872 * from pm_wq and wait for all run-time PM operations involving the device in
873 * progress to complete.
874 *
875 * Return value:
876 * 1, if there was a resume request pending and the device had to be woken up,
877 * 0, otherwise
878 */
879int pm_runtime_barrier(struct device *dev)
880{
881 int retval = 0;
882
883 pm_runtime_get_noresume(dev);
884 spin_lock_irq(&dev->power.lock);
885
886 if (dev->power.request_pending
887 && dev->power.request == RPM_REQ_RESUME) {
888 __pm_runtime_resume(dev, false);
889 retval = 1;
890 }
891
892 __pm_runtime_barrier(dev);
893
894 spin_unlock_irq(&dev->power.lock);
895 pm_runtime_put_noidle(dev);
896
897 return retval;
898}
899EXPORT_SYMBOL_GPL(pm_runtime_barrier);
900
901/**
902 * __pm_runtime_disable - Disable run-time PM of a device.
903 * @dev: Device to handle.
904 * @check_resume: If set, check if there's a resume request for the device.
905 *
906 * Increment power.disable_depth for the device and if was zero previously,
907 * cancel all pending run-time PM requests for the device and wait for all
908 * operations in progress to complete. The device can be either active or
909 * suspended after its run-time PM has been disabled.
910 *
911 * If @check_resume is set and there's a resume request pending when
912 * __pm_runtime_disable() is called and power.disable_depth is zero, the
913 * function will wake up the device before disabling its run-time PM.
914 */
915void __pm_runtime_disable(struct device *dev, bool check_resume)
916{
917 spin_lock_irq(&dev->power.lock);
918
919 if (dev->power.disable_depth > 0) {
920 dev->power.disable_depth++;
921 goto out;
922 }
923
924 /*
925 * Wake up the device if there's a resume request pending, because that
926 * means there probably is some I/O to process and disabling run-time PM
927 * shouldn't prevent the device from processing the I/O.
928 */
929 if (check_resume && dev->power.request_pending
930 && dev->power.request == RPM_REQ_RESUME) {
931 /*
932 * Prevent suspends and idle notifications from being carried
933 * out after we have woken up the device.
934 */
935 pm_runtime_get_noresume(dev);
936
937 __pm_runtime_resume(dev, false);
938
939 pm_runtime_put_noidle(dev);
940 }
941
942 if (!dev->power.disable_depth++)
943 __pm_runtime_barrier(dev);
944
945 out:
946 spin_unlock_irq(&dev->power.lock);
947}
948EXPORT_SYMBOL_GPL(__pm_runtime_disable);
949
950/**
951 * pm_runtime_enable - Enable run-time PM of a device.
952 * @dev: Device to handle.
953 */
954void pm_runtime_enable(struct device *dev)
955{
956 unsigned long flags;
957
958 spin_lock_irqsave(&dev->power.lock, flags);
959
960 if (dev->power.disable_depth > 0)
961 dev->power.disable_depth--;
962 else
963 dev_warn(dev, "Unbalanced %s!\n", __func__);
964
965 spin_unlock_irqrestore(&dev->power.lock, flags);
966}
967EXPORT_SYMBOL_GPL(pm_runtime_enable);
968
969/**
970 * pm_runtime_init - Initialize run-time PM fields in given device object.
971 * @dev: Device object to initialize.
972 */
973void pm_runtime_init(struct device *dev)
974{
975 spin_lock_init(&dev->power.lock);
976
977 dev->power.runtime_status = RPM_SUSPENDED;
978 dev->power.idle_notification = false;
979
980 dev->power.disable_depth = 1;
981 atomic_set(&dev->power.usage_count, 0);
982
983 dev->power.runtime_error = 0;
984
985 atomic_set(&dev->power.child_count, 0);
986 pm_suspend_ignore_children(dev, false);
987
988 dev->power.request_pending = false;
989 dev->power.request = RPM_REQ_NONE;
990 dev->power.deferred_resume = false;
991 INIT_WORK(&dev->power.work, pm_runtime_work);
992
993 dev->power.timer_expires = 0;
994 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
995 (unsigned long)dev);
996
997 init_waitqueue_head(&dev->power.wait_queue);
998}
999
1000/**
1001 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1002 * @dev: Device object being removed from device hierarchy.
1003 */
1004void pm_runtime_remove(struct device *dev)
1005{
1006 __pm_runtime_disable(dev, false);
1007
1008 /* Change the status back to 'suspended' to match the initial status. */
1009 if (dev->power.runtime_status == RPM_ACTIVE)
1010 pm_runtime_set_suspended(dev);
1011}