diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2010-09-25 17:35:07 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2010-10-16 19:57:47 -0400 |
commit | 140a6c945211ee911dec776fafa52e03a7d7bb9a (patch) | |
tree | 22b5d4bf530d95a890daef41d92051fca043ca26 /drivers/base | |
parent | 1bfee5bc86fdaecc912e06080583eddab7263df2 (diff) |
PM / Runtime: Combine runtime PM entry points
This patch (as1424) combines the various public entry points for the
runtime PM routines into three simple functions: one for idle, one for
suspend, and one for resume. A new bitflag specifies whether or not
to increment or decrement the usage_count field.
The new entry points are named __pm_runtime_idle,
__pm_runtime_suspend, and __pm_runtime_resume, to reflect that they
are trampolines. Simultaneously, the corresponding internal routines
are renamed to rpm_idle, rpm_suspend, and rpm_resume.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/runtime.c | 174 |
1 files changed, 64 insertions, 110 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index d7b5d84c235c..ed227b7c1bb5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
12 | #include <linux/jiffies.h> | 12 | #include <linux/jiffies.h> |
13 | 13 | ||
14 | static int __pm_runtime_resume(struct device *dev, int rpmflags); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
15 | 15 | ||
16 | /** | 16 | /** |
17 | * update_pm_runtime_accounting - Update the time accounting of power states | 17 | * update_pm_runtime_accounting - Update the time accounting of power states |
@@ -107,7 +107,7 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
107 | 107 | ||
108 | 108 | ||
109 | /** | 109 | /** |
110 | * __pm_runtime_idle - Notify device bus type if the device can be suspended. | 110 | * rpm_idle - Notify device bus type if the device can be suspended. |
111 | * @dev: Device to notify the bus type about. | 111 | * @dev: Device to notify the bus type about. |
112 | * @rpmflags: Flag bits. | 112 | * @rpmflags: Flag bits. |
113 | * | 113 | * |
@@ -118,7 +118,7 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
118 | * | 118 | * |
119 | * This function must be called under dev->power.lock with interrupts disabled. | 119 | * This function must be called under dev->power.lock with interrupts disabled. |
120 | */ | 120 | */ |
121 | static int __pm_runtime_idle(struct device *dev, int rpmflags) | 121 | static int rpm_idle(struct device *dev, int rpmflags) |
122 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 122 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
123 | { | 123 | { |
124 | int retval; | 124 | int retval; |
@@ -189,23 +189,7 @@ static int __pm_runtime_idle(struct device *dev, int rpmflags) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | /** | 191 | /** |
192 | * pm_runtime_idle - Notify device bus type if the device can be suspended. | 192 | * rpm_suspend - Carry out run-time suspend of given device. |
193 | * @dev: Device to notify the bus type about. | ||
194 | */ | ||
195 | int pm_runtime_idle(struct device *dev) | ||
196 | { | ||
197 | int retval; | ||
198 | |||
199 | spin_lock_irq(&dev->power.lock); | ||
200 | retval = __pm_runtime_idle(dev, 0); | ||
201 | spin_unlock_irq(&dev->power.lock); | ||
202 | |||
203 | return retval; | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | ||
206 | |||
207 | /** | ||
208 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | ||
209 | * @dev: Device to suspend. | 193 | * @dev: Device to suspend. |
210 | * @rpmflags: Flag bits. | 194 | * @rpmflags: Flag bits. |
211 | * | 195 | * |
@@ -220,7 +204,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_idle); | |||
220 | * | 204 | * |
221 | * This function must be called under dev->power.lock with interrupts disabled. | 205 | * This function must be called under dev->power.lock with interrupts disabled. |
222 | */ | 206 | */ |
223 | static int __pm_runtime_suspend(struct device *dev, int rpmflags) | 207 | static int rpm_suspend(struct device *dev, int rpmflags) |
224 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 208 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
225 | { | 209 | { |
226 | struct device *parent = NULL; | 210 | struct device *parent = NULL; |
@@ -332,13 +316,13 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags) | |||
332 | wake_up_all(&dev->power.wait_queue); | 316 | wake_up_all(&dev->power.wait_queue); |
333 | 317 | ||
334 | if (dev->power.deferred_resume) { | 318 | if (dev->power.deferred_resume) { |
335 | __pm_runtime_resume(dev, 0); | 319 | rpm_resume(dev, 0); |
336 | retval = -EAGAIN; | 320 | retval = -EAGAIN; |
337 | goto out; | 321 | goto out; |
338 | } | 322 | } |
339 | 323 | ||
340 | if (notify) | 324 | if (notify) |
341 | __pm_runtime_idle(dev, 0); | 325 | rpm_idle(dev, 0); |
342 | 326 | ||
343 | if (parent && !parent->power.ignore_children) { | 327 | if (parent && !parent->power.ignore_children) { |
344 | spin_unlock_irq(&dev->power.lock); | 328 | spin_unlock_irq(&dev->power.lock); |
@@ -355,23 +339,7 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags) | |||
355 | } | 339 | } |
356 | 340 | ||
357 | /** | 341 | /** |
358 | * pm_runtime_suspend - Carry out run-time suspend of given device. | 342 | * rpm_resume - Carry out run-time resume of given device. |
359 | * @dev: Device to suspend. | ||
360 | */ | ||
361 | int pm_runtime_suspend(struct device *dev) | ||
362 | { | ||
363 | int retval; | ||
364 | |||
365 | spin_lock_irq(&dev->power.lock); | ||
366 | retval = __pm_runtime_suspend(dev, 0); | ||
367 | spin_unlock_irq(&dev->power.lock); | ||
368 | |||
369 | return retval; | ||
370 | } | ||
371 | EXPORT_SYMBOL_GPL(pm_runtime_suspend); | ||
372 | |||
373 | /** | ||
374 | * __pm_runtime_resume - Carry out run-time resume of given device. | ||
375 | * @dev: Device to resume. | 343 | * @dev: Device to resume. |
376 | * @rpmflags: Flag bits. | 344 | * @rpmflags: Flag bits. |
377 | * | 345 | * |
@@ -387,7 +355,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_suspend); | |||
387 | * | 355 | * |
388 | * This function must be called under dev->power.lock with interrupts disabled. | 356 | * This function must be called under dev->power.lock with interrupts disabled. |
389 | */ | 357 | */ |
390 | static int __pm_runtime_resume(struct device *dev, int rpmflags) | 358 | static int rpm_resume(struct device *dev, int rpmflags) |
391 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 359 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
392 | { | 360 | { |
393 | struct device *parent = NULL; | 361 | struct device *parent = NULL; |
@@ -469,7 +437,7 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags) | |||
469 | */ | 437 | */ |
470 | if (!parent->power.disable_depth | 438 | if (!parent->power.disable_depth |
471 | && !parent->power.ignore_children) { | 439 | && !parent->power.ignore_children) { |
472 | __pm_runtime_resume(parent, 0); | 440 | rpm_resume(parent, 0); |
473 | if (parent->power.runtime_status != RPM_ACTIVE) | 441 | if (parent->power.runtime_status != RPM_ACTIVE) |
474 | retval = -EBUSY; | 442 | retval = -EBUSY; |
475 | } | 443 | } |
@@ -521,7 +489,7 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags) | |||
521 | wake_up_all(&dev->power.wait_queue); | 489 | wake_up_all(&dev->power.wait_queue); |
522 | 490 | ||
523 | if (!retval) | 491 | if (!retval) |
524 | __pm_runtime_idle(dev, RPM_ASYNC); | 492 | rpm_idle(dev, RPM_ASYNC); |
525 | 493 | ||
526 | out: | 494 | out: |
527 | if (parent) { | 495 | if (parent) { |
@@ -538,22 +506,6 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags) | |||
538 | } | 506 | } |
539 | 507 | ||
540 | /** | 508 | /** |
541 | * pm_runtime_resume - Carry out run-time resume of given device. | ||
542 | * @dev: Device to suspend. | ||
543 | */ | ||
544 | int pm_runtime_resume(struct device *dev) | ||
545 | { | ||
546 | int retval; | ||
547 | |||
548 | spin_lock_irq(&dev->power.lock); | ||
549 | retval = __pm_runtime_resume(dev, 0); | ||
550 | spin_unlock_irq(&dev->power.lock); | ||
551 | |||
552 | return retval; | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(pm_runtime_resume); | ||
555 | |||
556 | /** | ||
557 | * pm_runtime_work - Universal run-time PM work function. | 509 | * pm_runtime_work - Universal run-time PM work function. |
558 | * @work: Work structure used for scheduling the execution of this function. | 510 | * @work: Work structure used for scheduling the execution of this function. |
559 | * | 511 | * |
@@ -578,13 +530,13 @@ static void pm_runtime_work(struct work_struct *work) | |||
578 | case RPM_REQ_NONE: | 530 | case RPM_REQ_NONE: |
579 | break; | 531 | break; |
580 | case RPM_REQ_IDLE: | 532 | case RPM_REQ_IDLE: |
581 | __pm_runtime_idle(dev, RPM_NOWAIT); | 533 | rpm_idle(dev, RPM_NOWAIT); |
582 | break; | 534 | break; |
583 | case RPM_REQ_SUSPEND: | 535 | case RPM_REQ_SUSPEND: |
584 | __pm_runtime_suspend(dev, RPM_NOWAIT); | 536 | rpm_suspend(dev, RPM_NOWAIT); |
585 | break; | 537 | break; |
586 | case RPM_REQ_RESUME: | 538 | case RPM_REQ_RESUME: |
587 | __pm_runtime_resume(dev, RPM_NOWAIT); | 539 | rpm_resume(dev, RPM_NOWAIT); |
588 | break; | 540 | break; |
589 | } | 541 | } |
590 | 542 | ||
@@ -593,23 +545,6 @@ static void pm_runtime_work(struct work_struct *work) | |||
593 | } | 545 | } |
594 | 546 | ||
595 | /** | 547 | /** |
596 | * pm_request_idle - Submit an idle notification request for given device. | ||
597 | * @dev: Device to handle. | ||
598 | */ | ||
599 | int pm_request_idle(struct device *dev) | ||
600 | { | ||
601 | unsigned long flags; | ||
602 | int retval; | ||
603 | |||
604 | spin_lock_irqsave(&dev->power.lock, flags); | ||
605 | retval = __pm_runtime_idle(dev, RPM_ASYNC); | ||
606 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
607 | |||
608 | return retval; | ||
609 | } | ||
610 | EXPORT_SYMBOL_GPL(pm_request_idle); | ||
611 | |||
612 | /** | ||
613 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). | 548 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). |
614 | * @data: Device pointer passed by pm_schedule_suspend(). | 549 | * @data: Device pointer passed by pm_schedule_suspend(). |
615 | * | 550 | * |
@@ -627,7 +562,7 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
627 | /* If 'expire' is after 'jiffies' we've been called too early. */ | 562 | /* If 'expire' is after 'jiffies' we've been called too early. */ |
628 | if (expires > 0 && !time_after(expires, jiffies)) { | 563 | if (expires > 0 && !time_after(expires, jiffies)) { |
629 | dev->power.timer_expires = 0; | 564 | dev->power.timer_expires = 0; |
630 | __pm_runtime_suspend(dev, RPM_ASYNC); | 565 | rpm_suspend(dev, RPM_ASYNC); |
631 | } | 566 | } |
632 | 567 | ||
633 | spin_unlock_irqrestore(&dev->power.lock, flags); | 568 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -646,7 +581,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
646 | spin_lock_irqsave(&dev->power.lock, flags); | 581 | spin_lock_irqsave(&dev->power.lock, flags); |
647 | 582 | ||
648 | if (!delay) { | 583 | if (!delay) { |
649 | retval = __pm_runtime_suspend(dev, RPM_ASYNC); | 584 | retval = rpm_suspend(dev, RPM_ASYNC); |
650 | goto out; | 585 | goto out; |
651 | } | 586 | } |
652 | 587 | ||
@@ -669,62 +604,81 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
669 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); | 604 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); |
670 | 605 | ||
671 | /** | 606 | /** |
672 | * pm_request_resume - Submit a resume request for given device. | 607 | * __pm_runtime_idle - Entry point for run-time idle operations. |
673 | * @dev: Device to resume. | 608 | * @dev: Device to send idle notification for. |
609 | * @rpmflags: Flag bits. | ||
610 | * | ||
611 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and | ||
612 | * return immediately if it is larger than zero. Then carry out an idle | ||
613 | * notification, either synchronous or asynchronous. | ||
614 | * | ||
615 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
674 | */ | 616 | */ |
675 | int pm_request_resume(struct device *dev) | 617 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
676 | { | 618 | { |
677 | unsigned long flags; | 619 | unsigned long flags; |
678 | int retval; | 620 | int retval; |
679 | 621 | ||
622 | if (rpmflags & RPM_GET_PUT) { | ||
623 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
624 | return 0; | ||
625 | } | ||
626 | |||
680 | spin_lock_irqsave(&dev->power.lock, flags); | 627 | spin_lock_irqsave(&dev->power.lock, flags); |
681 | retval = __pm_runtime_resume(dev, RPM_ASYNC); | 628 | retval = rpm_idle(dev, rpmflags); |
682 | spin_unlock_irqrestore(&dev->power.lock, flags); | 629 | spin_unlock_irqrestore(&dev->power.lock, flags); |
683 | 630 | ||
684 | return retval; | 631 | return retval; |
685 | } | 632 | } |
686 | EXPORT_SYMBOL_GPL(pm_request_resume); | 633 | EXPORT_SYMBOL_GPL(__pm_runtime_idle); |
687 | 634 | ||
688 | /** | 635 | /** |
689 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 636 | * __pm_runtime_suspend - Entry point for run-time put/suspend operations. |
690 | * @dev: Device to handle. | 637 | * @dev: Device to suspend. |
691 | * @rpmflags: Flag bits. | 638 | * @rpmflags: Flag bits. |
692 | * | 639 | * |
693 | * Increment the usage count of the device and resume it or submit a resume | 640 | * Carry out a suspend, either synchronous or asynchronous. |
694 | * request for it, depending on the RPM_ASYNC flag bit. | 641 | * |
642 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
695 | */ | 643 | */ |
696 | int __pm_runtime_get(struct device *dev, int rpmflags) | 644 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
697 | { | 645 | { |
646 | unsigned long flags; | ||
698 | int retval; | 647 | int retval; |
699 | 648 | ||
700 | atomic_inc(&dev->power.usage_count); | 649 | spin_lock_irqsave(&dev->power.lock, flags); |
701 | retval = (rpmflags & RPM_ASYNC) ? | 650 | retval = rpm_suspend(dev, rpmflags); |
702 | pm_request_resume(dev) : pm_runtime_resume(dev); | 651 | spin_unlock_irqrestore(&dev->power.lock, flags); |
703 | 652 | ||
704 | return retval; | 653 | return retval; |
705 | } | 654 | } |
706 | EXPORT_SYMBOL_GPL(__pm_runtime_get); | 655 | EXPORT_SYMBOL_GPL(__pm_runtime_suspend); |
707 | 656 | ||
708 | /** | 657 | /** |
709 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | 658 | * __pm_runtime_resume - Entry point for run-time resume operations. |
710 | * @dev: Device to handle. | 659 | * @dev: Device to resume. |
711 | * @rpmflags: Flag bits. | 660 | * @rpmflags: Flag bits. |
712 | * | 661 | * |
713 | * Decrement the usage count of the device and if it reaches zero, carry out a | 662 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then |
714 | * synchronous idle notification or submit an idle notification request for it, | 663 | * carry out a resume, either synchronous or asynchronous. |
715 | * depending on the RPM_ASYNC flag bit. | 664 | * |
665 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
716 | */ | 666 | */ |
717 | int __pm_runtime_put(struct device *dev, int rpmflags) | 667 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
718 | { | 668 | { |
719 | int retval = 0; | 669 | unsigned long flags; |
670 | int retval; | ||
720 | 671 | ||
721 | if (atomic_dec_and_test(&dev->power.usage_count)) | 672 | if (rpmflags & RPM_GET_PUT) |
722 | retval = (rpmflags & RPM_ASYNC) ? | 673 | atomic_inc(&dev->power.usage_count); |
723 | pm_request_idle(dev) : pm_runtime_idle(dev); | 674 | |
675 | spin_lock_irqsave(&dev->power.lock, flags); | ||
676 | retval = rpm_resume(dev, rpmflags); | ||
677 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
724 | 678 | ||
725 | return retval; | 679 | return retval; |
726 | } | 680 | } |
727 | EXPORT_SYMBOL_GPL(__pm_runtime_put); | 681 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
728 | 682 | ||
729 | /** | 683 | /** |
730 | * __pm_runtime_set_status - Set run-time PM status of a device. | 684 | * __pm_runtime_set_status - Set run-time PM status of a device. |
@@ -875,7 +829,7 @@ int pm_runtime_barrier(struct device *dev) | |||
875 | 829 | ||
876 | if (dev->power.request_pending | 830 | if (dev->power.request_pending |
877 | && dev->power.request == RPM_REQ_RESUME) { | 831 | && dev->power.request == RPM_REQ_RESUME) { |
878 | __pm_runtime_resume(dev, 0); | 832 | rpm_resume(dev, 0); |
879 | retval = 1; | 833 | retval = 1; |
880 | } | 834 | } |
881 | 835 | ||
@@ -924,7 +878,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
924 | */ | 878 | */ |
925 | pm_runtime_get_noresume(dev); | 879 | pm_runtime_get_noresume(dev); |
926 | 880 | ||
927 | __pm_runtime_resume(dev, 0); | 881 | rpm_resume(dev, 0); |
928 | 882 | ||
929 | pm_runtime_put_noidle(dev); | 883 | pm_runtime_put_noidle(dev); |
930 | } | 884 | } |
@@ -972,7 +926,7 @@ void pm_runtime_forbid(struct device *dev) | |||
972 | 926 | ||
973 | dev->power.runtime_auto = false; | 927 | dev->power.runtime_auto = false; |
974 | atomic_inc(&dev->power.usage_count); | 928 | atomic_inc(&dev->power.usage_count); |
975 | __pm_runtime_resume(dev, 0); | 929 | rpm_resume(dev, 0); |
976 | 930 | ||
977 | out: | 931 | out: |
978 | spin_unlock_irq(&dev->power.lock); | 932 | spin_unlock_irq(&dev->power.lock); |
@@ -993,7 +947,7 @@ void pm_runtime_allow(struct device *dev) | |||
993 | 947 | ||
994 | dev->power.runtime_auto = true; | 948 | dev->power.runtime_auto = true; |
995 | if (atomic_dec_and_test(&dev->power.usage_count)) | 949 | if (atomic_dec_and_test(&dev->power.usage_count)) |
996 | __pm_runtime_idle(dev, 0); | 950 | rpm_idle(dev, 0); |
997 | 951 | ||
998 | out: | 952 | out: |
999 | spin_unlock_irq(&dev->power.lock); | 953 | spin_unlock_irq(&dev->power.lock); |