aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/runtime.c
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2010-09-25 17:34:54 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2010-10-16 19:57:44 -0400
commit3f9af0513ae5b1f185302c2d0ba656640926d970 (patch)
tree5f99b98aca875a812f444721a88a3fc3de842538 /drivers/base/power/runtime.c
parent4769373ca2c8d0b999749a070c48fd8648888831 (diff)
PM / Runtime: Replace boolean arguments with bitflags
The "from_wq" argument in __pm_runtime_suspend() and __pm_runtime_resume() supposedly indicates whether or not the function was called by the PM workqueue thread, but in fact it isn't always used this way. It really indicates whether or not the function should return early if the requested operation is already in progress. Along with this badly-named boolean argument, later patches in this series will add several other boolean arguments to these functions and others. Therefore this patch (as1422) begins the conversion process by replacing from_wq with a bitflag argument. The same bitflags are also used in __pm_runtime_get() and __pm_runtime_put(), where they indicate whether or not the operation should be asynchronous. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r--drivers/base/power/runtime.c75
1 files changed, 39 insertions, 36 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ec08f1ae63f1..0c1db879544b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -10,7 +10,7 @@
10#include <linux/pm_runtime.h> 10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h> 11#include <linux/jiffies.h>
12 12
13static int __pm_runtime_resume(struct device *dev, bool from_wq); 13static int __pm_runtime_resume(struct device *dev, int rpmflags);
14static int __pm_request_idle(struct device *dev); 14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev); 15static int __pm_request_resume(struct device *dev);
16 16
@@ -164,24 +164,24 @@ EXPORT_SYMBOL_GPL(pm_runtime_idle);
164/** 164/**
165 * __pm_runtime_suspend - Carry out run-time suspend of given device. 165 * __pm_runtime_suspend - Carry out run-time suspend of given device.
166 * @dev: Device to suspend. 166 * @dev: Device to suspend.
167 * @from_wq: If set, the function has been called via pm_wq. 167 * @rpmflags: Flag bits.
168 * 168 *
169 * Check if the device can be suspended and run the ->runtime_suspend() callback 169 * Check if the device can be suspended and run the ->runtime_suspend() callback
170 * provided by its bus type. If another suspend has been started earlier, wait 170 * provided by its bus type. If another suspend has been started earlier,
171 * for it to finish. If an idle notification or suspend request is pending or 171 * either return immediately or wait for it to finish, depending on the
172 * RPM_NOWAIT flag. If an idle notification or suspend request is pending or
172 * scheduled, cancel it. 173 * scheduled, cancel it.
173 * 174 *
174 * This function must be called under dev->power.lock with interrupts disabled. 175 * This function must be called under dev->power.lock with interrupts disabled.
175 */ 176 */
176int __pm_runtime_suspend(struct device *dev, bool from_wq) 177static int __pm_runtime_suspend(struct device *dev, int rpmflags)
177 __releases(&dev->power.lock) __acquires(&dev->power.lock) 178 __releases(&dev->power.lock) __acquires(&dev->power.lock)
178{ 179{
179 struct device *parent = NULL; 180 struct device *parent = NULL;
180 bool notify = false; 181 bool notify = false;
181 int retval = 0; 182 int retval = 0;
182 183
183 dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 184 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
184 from_wq ? " from workqueue" : "");
185 185
186 repeat: 186 repeat:
187 if (dev->power.runtime_error) { 187 if (dev->power.runtime_error) {
@@ -213,7 +213,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
213 if (dev->power.runtime_status == RPM_SUSPENDING) { 213 if (dev->power.runtime_status == RPM_SUSPENDING) {
214 DEFINE_WAIT(wait); 214 DEFINE_WAIT(wait);
215 215
216 if (from_wq) { 216 if (rpmflags & RPM_NOWAIT) {
217 retval = -EINPROGRESS; 217 retval = -EINPROGRESS;
218 goto out; 218 goto out;
219 } 219 }
@@ -286,7 +286,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
286 wake_up_all(&dev->power.wait_queue); 286 wake_up_all(&dev->power.wait_queue);
287 287
288 if (dev->power.deferred_resume) { 288 if (dev->power.deferred_resume) {
289 __pm_runtime_resume(dev, false); 289 __pm_runtime_resume(dev, 0);
290 retval = -EAGAIN; 290 retval = -EAGAIN;
291 goto out; 291 goto out;
292 } 292 }
@@ -303,7 +303,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
303 } 303 }
304 304
305 out: 305 out:
306 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 306 dev_dbg(dev, "%s returns %d\n", __func__, retval);
307 307
308 return retval; 308 return retval;
309} 309}
@@ -317,7 +317,7 @@ int pm_runtime_suspend(struct device *dev)
317 int retval; 317 int retval;
318 318
319 spin_lock_irq(&dev->power.lock); 319 spin_lock_irq(&dev->power.lock);
320 retval = __pm_runtime_suspend(dev, false); 320 retval = __pm_runtime_suspend(dev, 0);
321 spin_unlock_irq(&dev->power.lock); 321 spin_unlock_irq(&dev->power.lock);
322 322
323 return retval; 323 return retval;
@@ -327,24 +327,25 @@ EXPORT_SYMBOL_GPL(pm_runtime_suspend);
327/** 327/**
328 * __pm_runtime_resume - Carry out run-time resume of given device. 328 * __pm_runtime_resume - Carry out run-time resume of given device.
329 * @dev: Device to resume. 329 * @dev: Device to resume.
330 * @from_wq: If set, the function has been called via pm_wq. 330 * @rpmflags: Flag bits.
331 * 331 *
332 * Check if the device can be woken up and run the ->runtime_resume() callback 332 * Check if the device can be woken up and run the ->runtime_resume() callback
333 * provided by its bus type. If another resume has been started earlier, wait 333 * provided by its bus type. If another resume has been started earlier,
334 * for it to finish. If there's a suspend running in parallel with this 334 * either return imediately or wait for it to finish, depending on the
335 * function, wait for it to finish and resume the device. Cancel any scheduled 335 * RPM_NOWAIT flag. If there's a suspend running in parallel with this
336 * or pending requests. 336 * function, either tell the other process to resume after suspending
337 * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT
338 * flag. Cancel any scheduled or pending requests.
337 * 339 *
338 * This function must be called under dev->power.lock with interrupts disabled. 340 * This function must be called under dev->power.lock with interrupts disabled.
339 */ 341 */
340int __pm_runtime_resume(struct device *dev, bool from_wq) 342static int __pm_runtime_resume(struct device *dev, int rpmflags)
341 __releases(&dev->power.lock) __acquires(&dev->power.lock) 343 __releases(&dev->power.lock) __acquires(&dev->power.lock)
342{ 344{
343 struct device *parent = NULL; 345 struct device *parent = NULL;
344 int retval = 0; 346 int retval = 0;
345 347
346 dev_dbg(dev, "__pm_runtime_resume()%s!\n", 348 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
347 from_wq ? " from workqueue" : "");
348 349
349 repeat: 350 repeat:
350 if (dev->power.runtime_error) { 351 if (dev->power.runtime_error) {
@@ -365,7 +366,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
365 || dev->power.runtime_status == RPM_SUSPENDING) { 366 || dev->power.runtime_status == RPM_SUSPENDING) {
366 DEFINE_WAIT(wait); 367 DEFINE_WAIT(wait);
367 368
368 if (from_wq) { 369 if (rpmflags & RPM_NOWAIT) {
369 if (dev->power.runtime_status == RPM_SUSPENDING) 370 if (dev->power.runtime_status == RPM_SUSPENDING)
370 dev->power.deferred_resume = true; 371 dev->power.deferred_resume = true;
371 retval = -EINPROGRESS; 372 retval = -EINPROGRESS;
@@ -407,7 +408,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
407 */ 408 */
408 if (!parent->power.disable_depth 409 if (!parent->power.disable_depth
409 && !parent->power.ignore_children) { 410 && !parent->power.ignore_children) {
410 __pm_runtime_resume(parent, false); 411 __pm_runtime_resume(parent, 0);
411 if (parent->power.runtime_status != RPM_ACTIVE) 412 if (parent->power.runtime_status != RPM_ACTIVE)
412 retval = -EBUSY; 413 retval = -EBUSY;
413 } 414 }
@@ -470,7 +471,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
470 spin_lock_irq(&dev->power.lock); 471 spin_lock_irq(&dev->power.lock);
471 } 472 }
472 473
473 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 474 dev_dbg(dev, "%s returns %d\n", __func__, retval);
474 475
475 return retval; 476 return retval;
476} 477}
@@ -484,7 +485,7 @@ int pm_runtime_resume(struct device *dev)
484 int retval; 485 int retval;
485 486
486 spin_lock_irq(&dev->power.lock); 487 spin_lock_irq(&dev->power.lock);
487 retval = __pm_runtime_resume(dev, false); 488 retval = __pm_runtime_resume(dev, 0);
488 spin_unlock_irq(&dev->power.lock); 489 spin_unlock_irq(&dev->power.lock);
489 490
490 return retval; 491 return retval;
@@ -519,10 +520,10 @@ static void pm_runtime_work(struct work_struct *work)
519 __pm_runtime_idle(dev); 520 __pm_runtime_idle(dev);
520 break; 521 break;
521 case RPM_REQ_SUSPEND: 522 case RPM_REQ_SUSPEND:
522 __pm_runtime_suspend(dev, true); 523 __pm_runtime_suspend(dev, RPM_NOWAIT);
523 break; 524 break;
524 case RPM_REQ_RESUME: 525 case RPM_REQ_RESUME:
525 __pm_runtime_resume(dev, true); 526 __pm_runtime_resume(dev, RPM_NOWAIT);
526 break; 527 break;
527 } 528 }
528 529
@@ -782,17 +783,18 @@ EXPORT_SYMBOL_GPL(pm_request_resume);
782/** 783/**
783 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 784 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
784 * @dev: Device to handle. 785 * @dev: Device to handle.
785 * @sync: If set and the device is suspended, resume it synchronously. 786 * @rpmflags: Flag bits.
786 * 787 *
787 * Increment the usage count of the device and resume it or submit a resume 788 * Increment the usage count of the device and resume it or submit a resume
788 * request for it, depending on the value of @sync. 789 * request for it, depending on the RPM_ASYNC flag bit.
789 */ 790 */
790int __pm_runtime_get(struct device *dev, bool sync) 791int __pm_runtime_get(struct device *dev, int rpmflags)
791{ 792{
792 int retval; 793 int retval;
793 794
794 atomic_inc(&dev->power.usage_count); 795 atomic_inc(&dev->power.usage_count);
795 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 796 retval = (rpmflags & RPM_ASYNC) ?
797 pm_request_resume(dev) : pm_runtime_resume(dev);
796 798
797 return retval; 799 return retval;
798} 800}
@@ -801,18 +803,19 @@ EXPORT_SYMBOL_GPL(__pm_runtime_get);
801/** 803/**
802 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 804 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
803 * @dev: Device to handle. 805 * @dev: Device to handle.
804 * @sync: If the device's bus type is to be notified, do that synchronously. 806 * @rpmflags: Flag bits.
805 * 807 *
806 * Decrement the usage count of the device and if it reaches zero, carry out a 808 * Decrement the usage count of the device and if it reaches zero, carry out a
807 * synchronous idle notification or submit an idle notification request for it, 809 * synchronous idle notification or submit an idle notification request for it,
808 * depending on the value of @sync. 810 * depending on the RPM_ASYNC flag bit.
809 */ 811 */
810int __pm_runtime_put(struct device *dev, bool sync) 812int __pm_runtime_put(struct device *dev, int rpmflags)
811{ 813{
812 int retval = 0; 814 int retval = 0;
813 815
814 if (atomic_dec_and_test(&dev->power.usage_count)) 816 if (atomic_dec_and_test(&dev->power.usage_count))
815 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); 817 retval = (rpmflags & RPM_ASYNC) ?
818 pm_request_idle(dev) : pm_runtime_idle(dev);
816 819
817 return retval; 820 return retval;
818} 821}
@@ -967,7 +970,7 @@ int pm_runtime_barrier(struct device *dev)
967 970
968 if (dev->power.request_pending 971 if (dev->power.request_pending
969 && dev->power.request == RPM_REQ_RESUME) { 972 && dev->power.request == RPM_REQ_RESUME) {
970 __pm_runtime_resume(dev, false); 973 __pm_runtime_resume(dev, 0);
971 retval = 1; 974 retval = 1;
972 } 975 }
973 976
@@ -1016,7 +1019,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
1016 */ 1019 */
1017 pm_runtime_get_noresume(dev); 1020 pm_runtime_get_noresume(dev);
1018 1021
1019 __pm_runtime_resume(dev, false); 1022 __pm_runtime_resume(dev, 0);
1020 1023
1021 pm_runtime_put_noidle(dev); 1024 pm_runtime_put_noidle(dev);
1022 } 1025 }
@@ -1064,7 +1067,7 @@ void pm_runtime_forbid(struct device *dev)
1064 1067
1065 dev->power.runtime_auto = false; 1068 dev->power.runtime_auto = false;
1066 atomic_inc(&dev->power.usage_count); 1069 atomic_inc(&dev->power.usage_count);
1067 __pm_runtime_resume(dev, false); 1070 __pm_runtime_resume(dev, 0);
1068 1071
1069 out: 1072 out:
1070 spin_unlock_irq(&dev->power.lock); 1073 spin_unlock_irq(&dev->power.lock);