aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorColin Cross <ccross@android.com>2011-08-08 17:39:36 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-08-14 07:35:52 -0400
commit311aab73d273eb22be976055f6cab224f7279d5e (patch)
tree14ef310eb77cac8d85c85b3ed006713e4602ec8f /drivers/base
parent17f2ae7f677f023997e02fd2ebabd90ea2a0390d (diff)
PM / Runtime: Add might_sleep() to runtime PM functions
Some of the entry points to pm runtime are not safe to call in atomic context unless pm_runtime_irq_safe() has been called. Inspecting the code, it is not immediately obvious that the functions sleep at all, as they run inside a spin_lock_irqsave, but under some conditions they can drop the lock and turn on irqs. If a driver incorrectly calls the pm_runtime apis, it can cause sleeping and irq processing when it expects to stay in atomic context. Add might_sleep_if to the majority of the __pm_runtime_* entry points to enforce correct usage. Add pm_runtime_put_sync_autosuspend to the list of functions that can be called in atomic context. Signed-off-by: Colin Cross <ccross@android.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/runtime.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b807..04e18abb50b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -732,13 +732,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
732 * return immediately if it is larger than zero. Then carry out an idle 732 * return immediately if it is larger than zero. Then carry out an idle
733 * notification, either synchronous or asynchronous. 733 * notification, either synchronous or asynchronous.
734 * 734 *
735 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 735 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
736 * or if pm_runtime_irq_safe() has been called.
736 */ 737 */
737int __pm_runtime_idle(struct device *dev, int rpmflags) 738int __pm_runtime_idle(struct device *dev, int rpmflags)
738{ 739{
739 unsigned long flags; 740 unsigned long flags;
740 int retval; 741 int retval;
741 742
743 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
744
742 if (rpmflags & RPM_GET_PUT) { 745 if (rpmflags & RPM_GET_PUT) {
743 if (!atomic_dec_and_test(&dev->power.usage_count)) 746 if (!atomic_dec_and_test(&dev->power.usage_count))
744 return 0; 747 return 0;
@@ -761,13 +764,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
761 * return immediately if it is larger than zero. Then carry out a suspend, 764 * return immediately if it is larger than zero. Then carry out a suspend,
762 * either synchronous or asynchronous. 765 * either synchronous or asynchronous.
763 * 766 *
764 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 767 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
768 * or if pm_runtime_irq_safe() has been called.
765 */ 769 */
766int __pm_runtime_suspend(struct device *dev, int rpmflags) 770int __pm_runtime_suspend(struct device *dev, int rpmflags)
767{ 771{
768 unsigned long flags; 772 unsigned long flags;
769 int retval; 773 int retval;
770 774
775 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
776
771 if (rpmflags & RPM_GET_PUT) { 777 if (rpmflags & RPM_GET_PUT) {
772 if (!atomic_dec_and_test(&dev->power.usage_count)) 778 if (!atomic_dec_and_test(&dev->power.usage_count))
773 return 0; 779 return 0;
@@ -789,13 +795,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
789 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 795 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
790 * carry out a resume, either synchronous or asynchronous. 796 * carry out a resume, either synchronous or asynchronous.
791 * 797 *
792 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 798 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
799 * or if pm_runtime_irq_safe() has been called.
793 */ 800 */
794int __pm_runtime_resume(struct device *dev, int rpmflags) 801int __pm_runtime_resume(struct device *dev, int rpmflags)
795{ 802{
796 unsigned long flags; 803 unsigned long flags;
797 int retval; 804 int retval;
798 805
806 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
807
799 if (rpmflags & RPM_GET_PUT) 808 if (rpmflags & RPM_GET_PUT)
800 atomic_inc(&dev->power.usage_count); 809 atomic_inc(&dev->power.usage_count);
801 810