diff options
-rw-r--r-- | drivers/base/power/runtime.c | 75 | ||||
-rw-r--r-- | include/linux/pm_runtime.h | 23 |
2 files changed, 54 insertions, 44 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index ec08f1ae63f1..0c1db879544b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/pm_runtime.h> | 10 | #include <linux/pm_runtime.h> |
11 | #include <linux/jiffies.h> | 11 | #include <linux/jiffies.h> |
12 | 12 | ||
13 | static int __pm_runtime_resume(struct device *dev, bool from_wq); | 13 | static int __pm_runtime_resume(struct device *dev, int rpmflags); |
14 | static int __pm_request_idle(struct device *dev); | 14 | static int __pm_request_idle(struct device *dev); |
15 | static int __pm_request_resume(struct device *dev); | 15 | static int __pm_request_resume(struct device *dev); |
16 | 16 | ||
@@ -164,24 +164,24 @@ EXPORT_SYMBOL_GPL(pm_runtime_idle); | |||
164 | /** | 164 | /** |
165 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 165 | * __pm_runtime_suspend - Carry out run-time suspend of given device. |
166 | * @dev: Device to suspend. | 166 | * @dev: Device to suspend. |
167 | * @from_wq: If set, the function has been called via pm_wq. | 167 | * @rpmflags: Flag bits. |
168 | * | 168 | * |
169 | * Check if the device can be suspended and run the ->runtime_suspend() callback | 169 | * Check if the device can be suspended and run the ->runtime_suspend() callback |
170 | * provided by its bus type. If another suspend has been started earlier, wait | 170 | * provided by its bus type. If another suspend has been started earlier, |
171 | * for it to finish. If an idle notification or suspend request is pending or | 171 | * either return immediately or wait for it to finish, depending on the |
172 | * RPM_NOWAIT flag. If an idle notification or suspend request is pending or | ||
172 | * scheduled, cancel it. | 173 | * scheduled, cancel it. |
173 | * | 174 | * |
174 | * This function must be called under dev->power.lock with interrupts disabled. | 175 | * This function must be called under dev->power.lock with interrupts disabled. |
175 | */ | 176 | */ |
176 | int __pm_runtime_suspend(struct device *dev, bool from_wq) | 177 | static int __pm_runtime_suspend(struct device *dev, int rpmflags) |
177 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 178 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
178 | { | 179 | { |
179 | struct device *parent = NULL; | 180 | struct device *parent = NULL; |
180 | bool notify = false; | 181 | bool notify = false; |
181 | int retval = 0; | 182 | int retval = 0; |
182 | 183 | ||
183 | dev_dbg(dev, "__pm_runtime_suspend()%s!\n", | 184 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
184 | from_wq ? " from workqueue" : ""); | ||
185 | 185 | ||
186 | repeat: | 186 | repeat: |
187 | if (dev->power.runtime_error) { | 187 | if (dev->power.runtime_error) { |
@@ -213,7 +213,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
213 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 213 | if (dev->power.runtime_status == RPM_SUSPENDING) { |
214 | DEFINE_WAIT(wait); | 214 | DEFINE_WAIT(wait); |
215 | 215 | ||
216 | if (from_wq) { | 216 | if (rpmflags & RPM_NOWAIT) { |
217 | retval = -EINPROGRESS; | 217 | retval = -EINPROGRESS; |
218 | goto out; | 218 | goto out; |
219 | } | 219 | } |
@@ -286,7 +286,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
286 | wake_up_all(&dev->power.wait_queue); | 286 | wake_up_all(&dev->power.wait_queue); |
287 | 287 | ||
288 | if (dev->power.deferred_resume) { | 288 | if (dev->power.deferred_resume) { |
289 | __pm_runtime_resume(dev, false); | 289 | __pm_runtime_resume(dev, 0); |
290 | retval = -EAGAIN; | 290 | retval = -EAGAIN; |
291 | goto out; | 291 | goto out; |
292 | } | 292 | } |
@@ -303,7 +303,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
303 | } | 303 | } |
304 | 304 | ||
305 | out: | 305 | out: |
306 | dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); | 306 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
307 | 307 | ||
308 | return retval; | 308 | return retval; |
309 | } | 309 | } |
@@ -317,7 +317,7 @@ int pm_runtime_suspend(struct device *dev) | |||
317 | int retval; | 317 | int retval; |
318 | 318 | ||
319 | spin_lock_irq(&dev->power.lock); | 319 | spin_lock_irq(&dev->power.lock); |
320 | retval = __pm_runtime_suspend(dev, false); | 320 | retval = __pm_runtime_suspend(dev, 0); |
321 | spin_unlock_irq(&dev->power.lock); | 321 | spin_unlock_irq(&dev->power.lock); |
322 | 322 | ||
323 | return retval; | 323 | return retval; |
@@ -327,24 +327,25 @@ EXPORT_SYMBOL_GPL(pm_runtime_suspend); | |||
327 | /** | 327 | /** |
328 | * __pm_runtime_resume - Carry out run-time resume of given device. | 328 | * __pm_runtime_resume - Carry out run-time resume of given device. |
329 | * @dev: Device to resume. | 329 | * @dev: Device to resume. |
330 | * @from_wq: If set, the function has been called via pm_wq. | 330 | * @rpmflags: Flag bits. |
331 | * | 331 | * |
332 | * Check if the device can be woken up and run the ->runtime_resume() callback | 332 | * Check if the device can be woken up and run the ->runtime_resume() callback |
333 | * provided by its bus type. If another resume has been started earlier, wait | 333 | * provided by its bus type. If another resume has been started earlier, |
334 | * for it to finish. If there's a suspend running in parallel with this | 334 | * either return imediately or wait for it to finish, depending on the |
335 | * function, wait for it to finish and resume the device. Cancel any scheduled | 335 | * RPM_NOWAIT flag. If there's a suspend running in parallel with this |
336 | * or pending requests. | 336 | * function, either tell the other process to resume after suspending |
337 | * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT | ||
338 | * flag. Cancel any scheduled or pending requests. | ||
337 | * | 339 | * |
338 | * This function must be called under dev->power.lock with interrupts disabled. | 340 | * This function must be called under dev->power.lock with interrupts disabled. |
339 | */ | 341 | */ |
340 | int __pm_runtime_resume(struct device *dev, bool from_wq) | 342 | static int __pm_runtime_resume(struct device *dev, int rpmflags) |
341 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 343 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
342 | { | 344 | { |
343 | struct device *parent = NULL; | 345 | struct device *parent = NULL; |
344 | int retval = 0; | 346 | int retval = 0; |
345 | 347 | ||
346 | dev_dbg(dev, "__pm_runtime_resume()%s!\n", | 348 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
347 | from_wq ? " from workqueue" : ""); | ||
348 | 349 | ||
349 | repeat: | 350 | repeat: |
350 | if (dev->power.runtime_error) { | 351 | if (dev->power.runtime_error) { |
@@ -365,7 +366,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
365 | || dev->power.runtime_status == RPM_SUSPENDING) { | 366 | || dev->power.runtime_status == RPM_SUSPENDING) { |
366 | DEFINE_WAIT(wait); | 367 | DEFINE_WAIT(wait); |
367 | 368 | ||
368 | if (from_wq) { | 369 | if (rpmflags & RPM_NOWAIT) { |
369 | if (dev->power.runtime_status == RPM_SUSPENDING) | 370 | if (dev->power.runtime_status == RPM_SUSPENDING) |
370 | dev->power.deferred_resume = true; | 371 | dev->power.deferred_resume = true; |
371 | retval = -EINPROGRESS; | 372 | retval = -EINPROGRESS; |
@@ -407,7 +408,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
407 | */ | 408 | */ |
408 | if (!parent->power.disable_depth | 409 | if (!parent->power.disable_depth |
409 | && !parent->power.ignore_children) { | 410 | && !parent->power.ignore_children) { |
410 | __pm_runtime_resume(parent, false); | 411 | __pm_runtime_resume(parent, 0); |
411 | if (parent->power.runtime_status != RPM_ACTIVE) | 412 | if (parent->power.runtime_status != RPM_ACTIVE) |
412 | retval = -EBUSY; | 413 | retval = -EBUSY; |
413 | } | 414 | } |
@@ -470,7 +471,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
470 | spin_lock_irq(&dev->power.lock); | 471 | spin_lock_irq(&dev->power.lock); |
471 | } | 472 | } |
472 | 473 | ||
473 | dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); | 474 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
474 | 475 | ||
475 | return retval; | 476 | return retval; |
476 | } | 477 | } |
@@ -484,7 +485,7 @@ int pm_runtime_resume(struct device *dev) | |||
484 | int retval; | 485 | int retval; |
485 | 486 | ||
486 | spin_lock_irq(&dev->power.lock); | 487 | spin_lock_irq(&dev->power.lock); |
487 | retval = __pm_runtime_resume(dev, false); | 488 | retval = __pm_runtime_resume(dev, 0); |
488 | spin_unlock_irq(&dev->power.lock); | 489 | spin_unlock_irq(&dev->power.lock); |
489 | 490 | ||
490 | return retval; | 491 | return retval; |
@@ -519,10 +520,10 @@ static void pm_runtime_work(struct work_struct *work) | |||
519 | __pm_runtime_idle(dev); | 520 | __pm_runtime_idle(dev); |
520 | break; | 521 | break; |
521 | case RPM_REQ_SUSPEND: | 522 | case RPM_REQ_SUSPEND: |
522 | __pm_runtime_suspend(dev, true); | 523 | __pm_runtime_suspend(dev, RPM_NOWAIT); |
523 | break; | 524 | break; |
524 | case RPM_REQ_RESUME: | 525 | case RPM_REQ_RESUME: |
525 | __pm_runtime_resume(dev, true); | 526 | __pm_runtime_resume(dev, RPM_NOWAIT); |
526 | break; | 527 | break; |
527 | } | 528 | } |
528 | 529 | ||
@@ -782,17 +783,18 @@ EXPORT_SYMBOL_GPL(pm_request_resume); | |||
782 | /** | 783 | /** |
783 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 784 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. |
784 | * @dev: Device to handle. | 785 | * @dev: Device to handle. |
785 | * @sync: If set and the device is suspended, resume it synchronously. | 786 | * @rpmflags: Flag bits. |
786 | * | 787 | * |
787 | * Increment the usage count of the device and resume it or submit a resume | 788 | * Increment the usage count of the device and resume it or submit a resume |
788 | * request for it, depending on the value of @sync. | 789 | * request for it, depending on the RPM_ASYNC flag bit. |
789 | */ | 790 | */ |
790 | int __pm_runtime_get(struct device *dev, bool sync) | 791 | int __pm_runtime_get(struct device *dev, int rpmflags) |
791 | { | 792 | { |
792 | int retval; | 793 | int retval; |
793 | 794 | ||
794 | atomic_inc(&dev->power.usage_count); | 795 | atomic_inc(&dev->power.usage_count); |
795 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 796 | retval = (rpmflags & RPM_ASYNC) ? |
797 | pm_request_resume(dev) : pm_runtime_resume(dev); | ||
796 | 798 | ||
797 | return retval; | 799 | return retval; |
798 | } | 800 | } |
@@ -801,18 +803,19 @@ EXPORT_SYMBOL_GPL(__pm_runtime_get); | |||
801 | /** | 803 | /** |
802 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | 804 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. |
803 | * @dev: Device to handle. | 805 | * @dev: Device to handle. |
804 | * @sync: If the device's bus type is to be notified, do that synchronously. | 806 | * @rpmflags: Flag bits. |
805 | * | 807 | * |
806 | * Decrement the usage count of the device and if it reaches zero, carry out a | 808 | * Decrement the usage count of the device and if it reaches zero, carry out a |
807 | * synchronous idle notification or submit an idle notification request for it, | 809 | * synchronous idle notification or submit an idle notification request for it, |
808 | * depending on the value of @sync. | 810 | * depending on the RPM_ASYNC flag bit. |
809 | */ | 811 | */ |
810 | int __pm_runtime_put(struct device *dev, bool sync) | 812 | int __pm_runtime_put(struct device *dev, int rpmflags) |
811 | { | 813 | { |
812 | int retval = 0; | 814 | int retval = 0; |
813 | 815 | ||
814 | if (atomic_dec_and_test(&dev->power.usage_count)) | 816 | if (atomic_dec_and_test(&dev->power.usage_count)) |
815 | retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); | 817 | retval = (rpmflags & RPM_ASYNC) ? |
818 | pm_request_idle(dev) : pm_runtime_idle(dev); | ||
816 | 819 | ||
817 | return retval; | 820 | return retval; |
818 | } | 821 | } |
@@ -967,7 +970,7 @@ int pm_runtime_barrier(struct device *dev) | |||
967 | 970 | ||
968 | if (dev->power.request_pending | 971 | if (dev->power.request_pending |
969 | && dev->power.request == RPM_REQ_RESUME) { | 972 | && dev->power.request == RPM_REQ_RESUME) { |
970 | __pm_runtime_resume(dev, false); | 973 | __pm_runtime_resume(dev, 0); |
971 | retval = 1; | 974 | retval = 1; |
972 | } | 975 | } |
973 | 976 | ||
@@ -1016,7 +1019,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
1016 | */ | 1019 | */ |
1017 | pm_runtime_get_noresume(dev); | 1020 | pm_runtime_get_noresume(dev); |
1018 | 1021 | ||
1019 | __pm_runtime_resume(dev, false); | 1022 | __pm_runtime_resume(dev, 0); |
1020 | 1023 | ||
1021 | pm_runtime_put_noidle(dev); | 1024 | pm_runtime_put_noidle(dev); |
1022 | } | 1025 | } |
@@ -1064,7 +1067,7 @@ void pm_runtime_forbid(struct device *dev) | |||
1064 | 1067 | ||
1065 | dev->power.runtime_auto = false; | 1068 | dev->power.runtime_auto = false; |
1066 | atomic_inc(&dev->power.usage_count); | 1069 | atomic_inc(&dev->power.usage_count); |
1067 | __pm_runtime_resume(dev, false); | 1070 | __pm_runtime_resume(dev, 0); |
1068 | 1071 | ||
1069 | out: | 1072 | out: |
1070 | spin_unlock_irq(&dev->power.lock); | 1073 | spin_unlock_irq(&dev->power.lock); |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6e81888c6222..c030cac59aac 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -12,6 +12,11 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
14 | 14 | ||
15 | /* Runtime PM flag argument bits */ | ||
16 | #define RPM_ASYNC 0x01 /* Request is asynchronous */ | ||
17 | #define RPM_NOWAIT 0x02 /* Don't wait for concurrent | ||
18 | state change */ | ||
19 | |||
15 | #ifdef CONFIG_PM_RUNTIME | 20 | #ifdef CONFIG_PM_RUNTIME |
16 | 21 | ||
17 | extern struct workqueue_struct *pm_wq; | 22 | extern struct workqueue_struct *pm_wq; |
@@ -22,8 +27,8 @@ extern int pm_runtime_resume(struct device *dev); | |||
22 | extern int pm_request_idle(struct device *dev); | 27 | extern int pm_request_idle(struct device *dev); |
23 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); | 28 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); |
24 | extern int pm_request_resume(struct device *dev); | 29 | extern int pm_request_resume(struct device *dev); |
25 | extern int __pm_runtime_get(struct device *dev, bool sync); | 30 | extern int __pm_runtime_get(struct device *dev, int rpmflags); |
26 | extern int __pm_runtime_put(struct device *dev, bool sync); | 31 | extern int __pm_runtime_put(struct device *dev, int rpmflags); |
27 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); | 32 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); |
28 | extern int pm_runtime_barrier(struct device *dev); | 33 | extern int pm_runtime_barrier(struct device *dev); |
29 | extern void pm_runtime_enable(struct device *dev); | 34 | extern void pm_runtime_enable(struct device *dev); |
@@ -81,8 +86,10 @@ static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
81 | return -ENOSYS; | 86 | return -ENOSYS; |
82 | } | 87 | } |
83 | static inline int pm_request_resume(struct device *dev) { return 0; } | 88 | static inline int pm_request_resume(struct device *dev) { return 0; } |
84 | static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; } | 89 | static inline int __pm_runtime_get(struct device *dev, int rpmflags) |
85 | static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; } | 90 | { return 1; } |
91 | static inline int __pm_runtime_put(struct device *dev, int rpmflags) | ||
92 | { return 0; } | ||
86 | static inline int __pm_runtime_set_status(struct device *dev, | 93 | static inline int __pm_runtime_set_status(struct device *dev, |
87 | unsigned int status) { return 0; } | 94 | unsigned int status) { return 0; } |
88 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } | 95 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } |
@@ -107,22 +114,22 @@ static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | |||
107 | 114 | ||
108 | static inline int pm_runtime_get(struct device *dev) | 115 | static inline int pm_runtime_get(struct device *dev) |
109 | { | 116 | { |
110 | return __pm_runtime_get(dev, false); | 117 | return __pm_runtime_get(dev, RPM_ASYNC); |
111 | } | 118 | } |
112 | 119 | ||
113 | static inline int pm_runtime_get_sync(struct device *dev) | 120 | static inline int pm_runtime_get_sync(struct device *dev) |
114 | { | 121 | { |
115 | return __pm_runtime_get(dev, true); | 122 | return __pm_runtime_get(dev, 0); |
116 | } | 123 | } |
117 | 124 | ||
118 | static inline int pm_runtime_put(struct device *dev) | 125 | static inline int pm_runtime_put(struct device *dev) |
119 | { | 126 | { |
120 | return __pm_runtime_put(dev, false); | 127 | return __pm_runtime_put(dev, RPM_ASYNC); |
121 | } | 128 | } |
122 | 129 | ||
123 | static inline int pm_runtime_put_sync(struct device *dev) | 130 | static inline int pm_runtime_put_sync(struct device *dev) |
124 | { | 131 | { |
125 | return __pm_runtime_put(dev, true); | 132 | return __pm_runtime_put(dev, 0); |
126 | } | 133 | } |
127 | 134 | ||
128 | static inline int pm_runtime_set_active(struct device *dev) | 135 | static inline int pm_runtime_set_active(struct device *dev) |