aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dd.c11
-rw-r--r--drivers/base/platform.c84
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/main.c30
-rw-r--r--drivers/base/power/power.h31
-rw-r--r--drivers/base/power/runtime.c1011
-rw-r--r--drivers/dma/dw_dmac.c15
-rw-r--r--drivers/dma/txx9dmac.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa.c25
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c25
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/usb/musb/musb_core.c18
12 files changed, 1184 insertions, 98 deletions
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0106875f01d..7b34b3a48f67 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -23,6 +23,7 @@
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/pm_runtime.h>
26 27
27#include "base.h" 28#include "base.h"
28#include "power/power.h" 29#include "power/power.h"
@@ -202,7 +203,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
202 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 203 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
203 drv->bus->name, __func__, dev_name(dev), drv->name); 204 drv->bus->name, __func__, dev_name(dev), drv->name);
204 205
206 pm_runtime_get_noresume(dev);
207 pm_runtime_barrier(dev);
205 ret = really_probe(dev, drv); 208 ret = really_probe(dev, drv);
209 pm_runtime_put_sync(dev);
206 210
207 return ret; 211 return ret;
208} 212}
@@ -245,7 +249,9 @@ int device_attach(struct device *dev)
245 ret = 0; 249 ret = 0;
246 } 250 }
247 } else { 251 } else {
252 pm_runtime_get_noresume(dev);
248 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 253 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
254 pm_runtime_put_sync(dev);
249 } 255 }
250 up(&dev->sem); 256 up(&dev->sem);
251 return ret; 257 return ret;
@@ -306,6 +312,9 @@ static void __device_release_driver(struct device *dev)
306 312
307 drv = dev->driver; 313 drv = dev->driver;
308 if (drv) { 314 if (drv) {
315 pm_runtime_get_noresume(dev);
316 pm_runtime_barrier(dev);
317
309 driver_sysfs_remove(dev); 318 driver_sysfs_remove(dev);
310 319
311 if (dev->bus) 320 if (dev->bus)
@@ -324,6 +333,8 @@ static void __device_release_driver(struct device *dev)
324 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 333 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
325 BUS_NOTIFY_UNBOUND_DRIVER, 334 BUS_NOTIFY_UNBOUND_DRIVER,
326 dev); 335 dev);
336
337 pm_runtime_put_sync(dev);
327 } 338 }
328} 339}
329 340
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 456594bd97bc..0f7d434ce983 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -17,6 +17,7 @@
17#include <linux/bootmem.h> 17#include <linux/bootmem.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/pm_runtime.h>
20 21
21#include "base.h" 22#include "base.h"
22 23
@@ -625,30 +626,6 @@ static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
625 return ret; 626 return ret;
626} 627}
627 628
628static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
629{
630 struct platform_driver *pdrv = to_platform_driver(dev->driver);
631 struct platform_device *pdev = to_platform_device(dev);
632 int ret = 0;
633
634 if (dev->driver && pdrv->suspend_late)
635 ret = pdrv->suspend_late(pdev, mesg);
636
637 return ret;
638}
639
640static int platform_legacy_resume_early(struct device *dev)
641{
642 struct platform_driver *pdrv = to_platform_driver(dev->driver);
643 struct platform_device *pdev = to_platform_device(dev);
644 int ret = 0;
645
646 if (dev->driver && pdrv->resume_early)
647 ret = pdrv->resume_early(pdev);
648
649 return ret;
650}
651
652static int platform_legacy_resume(struct device *dev) 629static int platform_legacy_resume(struct device *dev)
653{ 630{
654 struct platform_driver *pdrv = to_platform_driver(dev->driver); 631 struct platform_driver *pdrv = to_platform_driver(dev->driver);
@@ -680,6 +657,13 @@ static void platform_pm_complete(struct device *dev)
680 drv->pm->complete(dev); 657 drv->pm->complete(dev);
681} 658}
682 659
660#else /* !CONFIG_PM_SLEEP */
661
662#define platform_pm_prepare NULL
663#define platform_pm_complete NULL
664
665#endif /* !CONFIG_PM_SLEEP */
666
683#ifdef CONFIG_SUSPEND 667#ifdef CONFIG_SUSPEND
684 668
685static int platform_pm_suspend(struct device *dev) 669static int platform_pm_suspend(struct device *dev)
@@ -711,8 +695,6 @@ static int platform_pm_suspend_noirq(struct device *dev)
711 if (drv->pm) { 695 if (drv->pm) {
712 if (drv->pm->suspend_noirq) 696 if (drv->pm->suspend_noirq)
713 ret = drv->pm->suspend_noirq(dev); 697 ret = drv->pm->suspend_noirq(dev);
714 } else {
715 ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
716 } 698 }
717 699
718 return ret; 700 return ret;
@@ -747,8 +729,6 @@ static int platform_pm_resume_noirq(struct device *dev)
747 if (drv->pm) { 729 if (drv->pm) {
748 if (drv->pm->resume_noirq) 730 if (drv->pm->resume_noirq)
749 ret = drv->pm->resume_noirq(dev); 731 ret = drv->pm->resume_noirq(dev);
750 } else {
751 ret = platform_legacy_resume_early(dev);
752 } 732 }
753 733
754 return ret; 734 return ret;
@@ -794,8 +774,6 @@ static int platform_pm_freeze_noirq(struct device *dev)
794 if (drv->pm) { 774 if (drv->pm) {
795 if (drv->pm->freeze_noirq) 775 if (drv->pm->freeze_noirq)
796 ret = drv->pm->freeze_noirq(dev); 776 ret = drv->pm->freeze_noirq(dev);
797 } else {
798 ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
799 } 777 }
800 778
801 return ret; 779 return ret;
@@ -830,8 +808,6 @@ static int platform_pm_thaw_noirq(struct device *dev)
830 if (drv->pm) { 808 if (drv->pm) {
831 if (drv->pm->thaw_noirq) 809 if (drv->pm->thaw_noirq)
832 ret = drv->pm->thaw_noirq(dev); 810 ret = drv->pm->thaw_noirq(dev);
833 } else {
834 ret = platform_legacy_resume_early(dev);
835 } 811 }
836 812
837 return ret; 813 return ret;
@@ -866,8 +842,6 @@ static int platform_pm_poweroff_noirq(struct device *dev)
866 if (drv->pm) { 842 if (drv->pm) {
867 if (drv->pm->poweroff_noirq) 843 if (drv->pm->poweroff_noirq)
868 ret = drv->pm->poweroff_noirq(dev); 844 ret = drv->pm->poweroff_noirq(dev);
869 } else {
870 ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
871 } 845 }
872 846
873 return ret; 847 return ret;
@@ -902,8 +876,6 @@ static int platform_pm_restore_noirq(struct device *dev)
902 if (drv->pm) { 876 if (drv->pm) {
903 if (drv->pm->restore_noirq) 877 if (drv->pm->restore_noirq)
904 ret = drv->pm->restore_noirq(dev); 878 ret = drv->pm->restore_noirq(dev);
905 } else {
906 ret = platform_legacy_resume_early(dev);
907 } 879 }
908 880
909 return ret; 881 return ret;
@@ -922,7 +894,32 @@ static int platform_pm_restore_noirq(struct device *dev)
922 894
923#endif /* !CONFIG_HIBERNATION */ 895#endif /* !CONFIG_HIBERNATION */
924 896
925static struct dev_pm_ops platform_dev_pm_ops = { 897#ifdef CONFIG_PM_RUNTIME
898
899int __weak platform_pm_runtime_suspend(struct device *dev)
900{
901 return -ENOSYS;
902};
903
904int __weak platform_pm_runtime_resume(struct device *dev)
905{
906 return -ENOSYS;
907};
908
909int __weak platform_pm_runtime_idle(struct device *dev)
910{
911 return -ENOSYS;
912};
913
914#else /* !CONFIG_PM_RUNTIME */
915
916#define platform_pm_runtime_suspend NULL
917#define platform_pm_runtime_resume NULL
918#define platform_pm_runtime_idle NULL
919
920#endif /* !CONFIG_PM_RUNTIME */
921
922static const struct dev_pm_ops platform_dev_pm_ops = {
926 .prepare = platform_pm_prepare, 923 .prepare = platform_pm_prepare,
927 .complete = platform_pm_complete, 924 .complete = platform_pm_complete,
928 .suspend = platform_pm_suspend, 925 .suspend = platform_pm_suspend,
@@ -937,22 +934,17 @@ static struct dev_pm_ops platform_dev_pm_ops = {
937 .thaw_noirq = platform_pm_thaw_noirq, 934 .thaw_noirq = platform_pm_thaw_noirq,
938 .poweroff_noirq = platform_pm_poweroff_noirq, 935 .poweroff_noirq = platform_pm_poweroff_noirq,
939 .restore_noirq = platform_pm_restore_noirq, 936 .restore_noirq = platform_pm_restore_noirq,
937 .runtime_suspend = platform_pm_runtime_suspend,
938 .runtime_resume = platform_pm_runtime_resume,
939 .runtime_idle = platform_pm_runtime_idle,
940}; 940};
941 941
942#define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops)
943
944#else /* !CONFIG_PM_SLEEP */
945
946#define PLATFORM_PM_OPS_PTR NULL
947
948#endif /* !CONFIG_PM_SLEEP */
949
950struct bus_type platform_bus_type = { 942struct bus_type platform_bus_type = {
951 .name = "platform", 943 .name = "platform",
952 .dev_attrs = platform_dev_attrs, 944 .dev_attrs = platform_dev_attrs,
953 .match = platform_match, 945 .match = platform_match,
954 .uevent = platform_uevent, 946 .uevent = platform_uevent,
955 .pm = PLATFORM_PM_OPS_PTR, 947 .pm = &platform_dev_pm_ops,
956}; 948};
957EXPORT_SYMBOL_GPL(platform_bus_type); 949EXPORT_SYMBOL_GPL(platform_bus_type);
958 950
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 911208b89259..3ce3519e8f30 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_PM) += sysfs.o 1obj-$(CONFIG_PM) += sysfs.o
2obj-$(CONFIG_PM_SLEEP) += main.o 2obj-$(CONFIG_PM_SLEEP) += main.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o
3obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
4 5
5ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 6ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 58a3e572f2c9..86990011277b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -21,6 +21,7 @@
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
24#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
25#include <linux/rwsem.h> 26#include <linux/rwsem.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
@@ -49,6 +50,16 @@ static DEFINE_MUTEX(dpm_list_mtx);
49static bool transition_started; 50static bool transition_started;
50 51
51/** 52/**
53 * device_pm_init - Initialize the PM-related part of a device object
54 * @dev: Device object being initialized.
55 */
56void device_pm_init(struct device *dev)
57{
58 dev->power.status = DPM_ON;
59 pm_runtime_init(dev);
60}
61
62/**
52 * device_pm_lock - lock the list of active devices used by the PM core 63 * device_pm_lock - lock the list of active devices used by the PM core
53 */ 64 */
54void device_pm_lock(void) 65void device_pm_lock(void)
@@ -105,6 +116,7 @@ void device_pm_remove(struct device *dev)
105 mutex_lock(&dpm_list_mtx); 116 mutex_lock(&dpm_list_mtx);
106 list_del_init(&dev->power.entry); 117 list_del_init(&dev->power.entry);
107 mutex_unlock(&dpm_list_mtx); 118 mutex_unlock(&dpm_list_mtx);
119 pm_runtime_remove(dev);
108} 120}
109 121
110/** 122/**
@@ -157,8 +169,9 @@ void device_pm_move_last(struct device *dev)
157 * @ops: PM operations to choose from. 169 * @ops: PM operations to choose from.
158 * @state: PM transition of the system being carried out. 170 * @state: PM transition of the system being carried out.
159 */ 171 */
160static int pm_op(struct device *dev, struct dev_pm_ops *ops, 172static int pm_op(struct device *dev,
161 pm_message_t state) 173 const struct dev_pm_ops *ops,
174 pm_message_t state)
162{ 175{
163 int error = 0; 176 int error = 0;
164 177
@@ -220,7 +233,8 @@ static int pm_op(struct device *dev, struct dev_pm_ops *ops,
220 * The operation is executed with interrupts disabled by the only remaining 233 * The operation is executed with interrupts disabled by the only remaining
221 * functional CPU in the system. 234 * functional CPU in the system.
222 */ 235 */
223static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops, 236static int pm_noirq_op(struct device *dev,
237 const struct dev_pm_ops *ops,
224 pm_message_t state) 238 pm_message_t state)
225{ 239{
226 int error = 0; 240 int error = 0;
@@ -510,6 +524,7 @@ static void dpm_complete(pm_message_t state)
510 mutex_unlock(&dpm_list_mtx); 524 mutex_unlock(&dpm_list_mtx);
511 525
512 device_complete(dev, state); 526 device_complete(dev, state);
527 pm_runtime_put_noidle(dev);
513 528
514 mutex_lock(&dpm_list_mtx); 529 mutex_lock(&dpm_list_mtx);
515 } 530 }
@@ -755,7 +770,14 @@ static int dpm_prepare(pm_message_t state)
755 dev->power.status = DPM_PREPARING; 770 dev->power.status = DPM_PREPARING;
756 mutex_unlock(&dpm_list_mtx); 771 mutex_unlock(&dpm_list_mtx);
757 772
758 error = device_prepare(dev, state); 773 pm_runtime_get_noresume(dev);
774 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
775 /* Wake-up requested during system sleep transition. */
776 pm_runtime_put_noidle(dev);
777 error = -EBUSY;
778 } else {
779 error = device_prepare(dev, state);
780 }
759 781
760 mutex_lock(&dpm_list_mtx); 782 mutex_lock(&dpm_list_mtx);
761 if (error) { 783 if (error) {
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c7cb4fc3735c..b8fa1aa5225a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,7 +1,14 @@
1static inline void device_pm_init(struct device *dev) 1#ifdef CONFIG_PM_RUNTIME
2{ 2
3 dev->power.status = DPM_ON; 3extern void pm_runtime_init(struct device *dev);
4} 4extern void pm_runtime_remove(struct device *dev);
5
6#else /* !CONFIG_PM_RUNTIME */
7
8static inline void pm_runtime_init(struct device *dev) {}
9static inline void pm_runtime_remove(struct device *dev) {}
10
11#endif /* !CONFIG_PM_RUNTIME */
5 12
6#ifdef CONFIG_PM_SLEEP 13#ifdef CONFIG_PM_SLEEP
7 14
@@ -16,23 +23,33 @@ static inline struct device *to_device(struct list_head *entry)
16 return container_of(entry, struct device, power.entry); 23 return container_of(entry, struct device, power.entry);
17} 24}
18 25
26extern void device_pm_init(struct device *dev);
19extern void device_pm_add(struct device *); 27extern void device_pm_add(struct device *);
20extern void device_pm_remove(struct device *); 28extern void device_pm_remove(struct device *);
21extern void device_pm_move_before(struct device *, struct device *); 29extern void device_pm_move_before(struct device *, struct device *);
22extern void device_pm_move_after(struct device *, struct device *); 30extern void device_pm_move_after(struct device *, struct device *);
23extern void device_pm_move_last(struct device *); 31extern void device_pm_move_last(struct device *);
24 32
25#else /* CONFIG_PM_SLEEP */ 33#else /* !CONFIG_PM_SLEEP */
34
35static inline void device_pm_init(struct device *dev)
36{
37 pm_runtime_init(dev);
38}
39
40static inline void device_pm_remove(struct device *dev)
41{
42 pm_runtime_remove(dev);
43}
26 44
27static inline void device_pm_add(struct device *dev) {} 45static inline void device_pm_add(struct device *dev) {}
28static inline void device_pm_remove(struct device *dev) {}
29static inline void device_pm_move_before(struct device *deva, 46static inline void device_pm_move_before(struct device *deva,
30 struct device *devb) {} 47 struct device *devb) {}
31static inline void device_pm_move_after(struct device *deva, 48static inline void device_pm_move_after(struct device *deva,
32 struct device *devb) {} 49 struct device *devb) {}
33static inline void device_pm_move_last(struct device *dev) {} 50static inline void device_pm_move_last(struct device *dev) {}
34 51
35#endif 52#endif /* !CONFIG_PM_SLEEP */
36 53
37#ifdef CONFIG_PM 54#ifdef CONFIG_PM
38 55
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 000000000000..38556f6cc22d
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1011 @@
1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
20 */
21static void pm_runtime_deactivate_timer(struct device *dev)
22{
23 if (dev->power.timer_expires > 0) {
24 del_timer(&dev->power.suspend_timer);
25 dev->power.timer_expires = 0;
26 }
27}
28
29/**
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
32 */
33static void pm_runtime_cancel_pending(struct device *dev)
34{
35 pm_runtime_deactivate_timer(dev);
36 /*
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
39 */
40 dev->power.request = RPM_REQ_NONE;
41}
42
43/**
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
46 *
47 * This function must be called under dev->power.lock with interrupts disabled.
48 */
49static int __pm_runtime_idle(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock)
51{
52 int retval = 0;
53
54 dev_dbg(dev, "__pm_runtime_idle()!\n");
55
56 if (dev->power.runtime_error)
57 retval = -EINVAL;
58 else if (dev->power.idle_notification)
59 retval = -EINPROGRESS;
60 else if (atomic_read(&dev->power.usage_count) > 0
61 || dev->power.disable_depth > 0
62 || dev->power.runtime_status != RPM_ACTIVE)
63 retval = -EAGAIN;
64 else if (!pm_children_suspended(dev))
65 retval = -EBUSY;
66 if (retval)
67 goto out;
68
69 if (dev->power.request_pending) {
70 /*
71 * If an idle notification request is pending, cancel it. Any
72 * other pending request takes precedence over us.
73 */
74 if (dev->power.request == RPM_REQ_IDLE) {
75 dev->power.request = RPM_REQ_NONE;
76 } else if (dev->power.request != RPM_REQ_NONE) {
77 retval = -EAGAIN;
78 goto out;
79 }
80 }
81
82 dev->power.idle_notification = true;
83
84 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
85 spin_unlock_irq(&dev->power.lock);
86
87 dev->bus->pm->runtime_idle(dev);
88
89 spin_lock_irq(&dev->power.lock);
90 }
91
92 dev->power.idle_notification = false;
93 wake_up_all(&dev->power.wait_queue);
94
95 out:
96 dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
97
98 return retval;
99}
100
101/**
102 * pm_runtime_idle - Notify device bus type if the device can be suspended.
103 * @dev: Device to notify the bus type about.
104 */
105int pm_runtime_idle(struct device *dev)
106{
107 int retval;
108
109 spin_lock_irq(&dev->power.lock);
110 retval = __pm_runtime_idle(dev);
111 spin_unlock_irq(&dev->power.lock);
112
113 return retval;
114}
115EXPORT_SYMBOL_GPL(pm_runtime_idle);
116
117/**
118 * __pm_runtime_suspend - Carry out run-time suspend of given device.
119 * @dev: Device to suspend.
120 * @from_wq: If set, the function has been called via pm_wq.
121 *
122 * Check if the device can be suspended and run the ->runtime_suspend() callback
123 * provided by its bus type. If another suspend has been started earlier, wait
124 * for it to finish. If an idle notification or suspend request is pending or
125 * scheduled, cancel it.
126 *
127 * This function must be called under dev->power.lock with interrupts disabled.
128 */
129int __pm_runtime_suspend(struct device *dev, bool from_wq)
130 __releases(&dev->power.lock) __acquires(&dev->power.lock)
131{
132 struct device *parent = NULL;
133 bool notify = false;
134 int retval = 0;
135
136 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
137 from_wq ? " from workqueue" : "");
138
139 repeat:
140 if (dev->power.runtime_error) {
141 retval = -EINVAL;
142 goto out;
143 }
144
145 /* Pending resume requests take precedence over us. */
146 if (dev->power.request_pending
147 && dev->power.request == RPM_REQ_RESUME) {
148 retval = -EAGAIN;
149 goto out;
150 }
151
152 /* Other scheduled or pending requests need to be canceled. */
153 pm_runtime_cancel_pending(dev);
154
155 if (dev->power.runtime_status == RPM_SUSPENDED)
156 retval = 1;
157 else if (dev->power.runtime_status == RPM_RESUMING
158 || dev->power.disable_depth > 0
159 || atomic_read(&dev->power.usage_count) > 0)
160 retval = -EAGAIN;
161 else if (!pm_children_suspended(dev))
162 retval = -EBUSY;
163 if (retval)
164 goto out;
165
166 if (dev->power.runtime_status == RPM_SUSPENDING) {
167 DEFINE_WAIT(wait);
168
169 if (from_wq) {
170 retval = -EINPROGRESS;
171 goto out;
172 }
173
174 /* Wait for the other suspend running in parallel with us. */
175 for (;;) {
176 prepare_to_wait(&dev->power.wait_queue, &wait,
177 TASK_UNINTERRUPTIBLE);
178 if (dev->power.runtime_status != RPM_SUSPENDING)
179 break;
180
181 spin_unlock_irq(&dev->power.lock);
182
183 schedule();
184
185 spin_lock_irq(&dev->power.lock);
186 }
187 finish_wait(&dev->power.wait_queue, &wait);
188 goto repeat;
189 }
190
191 dev->power.runtime_status = RPM_SUSPENDING;
192
193 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
194 spin_unlock_irq(&dev->power.lock);
195
196 retval = dev->bus->pm->runtime_suspend(dev);
197
198 spin_lock_irq(&dev->power.lock);
199 dev->power.runtime_error = retval;
200 } else {
201 retval = -ENOSYS;
202 }
203
204 if (retval) {
205 dev->power.runtime_status = RPM_ACTIVE;
206 pm_runtime_cancel_pending(dev);
207 dev->power.deferred_resume = false;
208
209 if (retval == -EAGAIN || retval == -EBUSY) {
210 notify = true;
211 dev->power.runtime_error = 0;
212 }
213 } else {
214 dev->power.runtime_status = RPM_SUSPENDED;
215
216 if (dev->parent) {
217 parent = dev->parent;
218 atomic_add_unless(&parent->power.child_count, -1, 0);
219 }
220 }
221 wake_up_all(&dev->power.wait_queue);
222
223 if (dev->power.deferred_resume) {
224 dev->power.deferred_resume = false;
225 __pm_runtime_resume(dev, false);
226 retval = -EAGAIN;
227 goto out;
228 }
229
230 if (notify)
231 __pm_runtime_idle(dev);
232
233 if (parent && !parent->power.ignore_children) {
234 spin_unlock_irq(&dev->power.lock);
235
236 pm_request_idle(parent);
237
238 spin_lock_irq(&dev->power.lock);
239 }
240
241 out:
242 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
243
244 return retval;
245}
246
247/**
248 * pm_runtime_suspend - Carry out run-time suspend of given device.
249 * @dev: Device to suspend.
250 */
251int pm_runtime_suspend(struct device *dev)
252{
253 int retval;
254
255 spin_lock_irq(&dev->power.lock);
256 retval = __pm_runtime_suspend(dev, false);
257 spin_unlock_irq(&dev->power.lock);
258
259 return retval;
260}
261EXPORT_SYMBOL_GPL(pm_runtime_suspend);
262
263/**
264 * __pm_runtime_resume - Carry out run-time resume of given device.
265 * @dev: Device to resume.
266 * @from_wq: If set, the function has been called via pm_wq.
267 *
268 * Check if the device can be woken up and run the ->runtime_resume() callback
269 * provided by its bus type. If another resume has been started earlier, wait
270 * for it to finish. If there's a suspend running in parallel with this
271 * function, wait for it to finish and resume the device. Cancel any scheduled
272 * or pending requests.
273 *
274 * This function must be called under dev->power.lock with interrupts disabled.
275 */
276int __pm_runtime_resume(struct device *dev, bool from_wq)
277 __releases(&dev->power.lock) __acquires(&dev->power.lock)
278{
279 struct device *parent = NULL;
280 int retval = 0;
281
282 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
283 from_wq ? " from workqueue" : "");
284
285 repeat:
286 if (dev->power.runtime_error) {
287 retval = -EINVAL;
288 goto out;
289 }
290
291 pm_runtime_cancel_pending(dev);
292
293 if (dev->power.runtime_status == RPM_ACTIVE)
294 retval = 1;
295 else if (dev->power.disable_depth > 0)
296 retval = -EAGAIN;
297 if (retval)
298 goto out;
299
300 if (dev->power.runtime_status == RPM_RESUMING
301 || dev->power.runtime_status == RPM_SUSPENDING) {
302 DEFINE_WAIT(wait);
303
304 if (from_wq) {
305 if (dev->power.runtime_status == RPM_SUSPENDING)
306 dev->power.deferred_resume = true;
307 retval = -EINPROGRESS;
308 goto out;
309 }
310
311 /* Wait for the operation carried out in parallel with us. */
312 for (;;) {
313 prepare_to_wait(&dev->power.wait_queue, &wait,
314 TASK_UNINTERRUPTIBLE);
315 if (dev->power.runtime_status != RPM_RESUMING
316 && dev->power.runtime_status != RPM_SUSPENDING)
317 break;
318
319 spin_unlock_irq(&dev->power.lock);
320
321 schedule();
322
323 spin_lock_irq(&dev->power.lock);
324 }
325 finish_wait(&dev->power.wait_queue, &wait);
326 goto repeat;
327 }
328
329 if (!parent && dev->parent) {
330 /*
331 * Increment the parent's resume counter and resume it if
332 * necessary.
333 */
334 parent = dev->parent;
335 spin_unlock_irq(&dev->power.lock);
336
337 pm_runtime_get_noresume(parent);
338
339 spin_lock_irq(&parent->power.lock);
340 /*
341 * We can resume if the parent's run-time PM is disabled or it
342 * is set to ignore children.
343 */
344 if (!parent->power.disable_depth
345 && !parent->power.ignore_children) {
346 __pm_runtime_resume(parent, false);
347 if (parent->power.runtime_status != RPM_ACTIVE)
348 retval = -EBUSY;
349 }
350 spin_unlock_irq(&parent->power.lock);
351
352 spin_lock_irq(&dev->power.lock);
353 if (retval)
354 goto out;
355 goto repeat;
356 }
357
358 dev->power.runtime_status = RPM_RESUMING;
359
360 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
361 spin_unlock_irq(&dev->power.lock);
362
363 retval = dev->bus->pm->runtime_resume(dev);
364
365 spin_lock_irq(&dev->power.lock);
366 dev->power.runtime_error = retval;
367 } else {
368 retval = -ENOSYS;
369 }
370
371 if (retval) {
372 dev->power.runtime_status = RPM_SUSPENDED;
373 pm_runtime_cancel_pending(dev);
374 } else {
375 dev->power.runtime_status = RPM_ACTIVE;
376 if (parent)
377 atomic_inc(&parent->power.child_count);
378 }
379 wake_up_all(&dev->power.wait_queue);
380
381 if (!retval)
382 __pm_request_idle(dev);
383
384 out:
385 if (parent) {
386 spin_unlock_irq(&dev->power.lock);
387
388 pm_runtime_put(parent);
389
390 spin_lock_irq(&dev->power.lock);
391 }
392
393 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
394
395 return retval;
396}
397
398/**
399 * pm_runtime_resume - Carry out run-time resume of given device.
400 * @dev: Device to suspend.
401 */
402int pm_runtime_resume(struct device *dev)
403{
404 int retval;
405
406 spin_lock_irq(&dev->power.lock);
407 retval = __pm_runtime_resume(dev, false);
408 spin_unlock_irq(&dev->power.lock);
409
410 return retval;
411}
412EXPORT_SYMBOL_GPL(pm_runtime_resume);
413
414/**
415 * pm_runtime_work - Universal run-time PM work function.
416 * @work: Work structure used for scheduling the execution of this function.
417 *
418 * Use @work to get the device object the work is to be done for, determine what
419 * is to be done and execute the appropriate run-time PM function.
420 */
421static void pm_runtime_work(struct work_struct *work)
422{
423 struct device *dev = container_of(work, struct device, power.work);
424 enum rpm_request req;
425
426 spin_lock_irq(&dev->power.lock);
427
428 if (!dev->power.request_pending)
429 goto out;
430
431 req = dev->power.request;
432 dev->power.request = RPM_REQ_NONE;
433 dev->power.request_pending = false;
434
435 switch (req) {
436 case RPM_REQ_NONE:
437 break;
438 case RPM_REQ_IDLE:
439 __pm_runtime_idle(dev);
440 break;
441 case RPM_REQ_SUSPEND:
442 __pm_runtime_suspend(dev, true);
443 break;
444 case RPM_REQ_RESUME:
445 __pm_runtime_resume(dev, true);
446 break;
447 }
448
449 out:
450 spin_unlock_irq(&dev->power.lock);
451}
452
453/**
454 * __pm_request_idle - Submit an idle notification request for given device.
455 * @dev: Device to handle.
456 *
457 * Check if the device's run-time PM status is correct for suspending the device
458 * and queue up a request to run __pm_runtime_idle() for it.
459 *
460 * This function must be called under dev->power.lock with interrupts disabled.
461 */
462static int __pm_request_idle(struct device *dev)
463{
464 int retval = 0;
465
466 if (dev->power.runtime_error)
467 retval = -EINVAL;
468 else if (atomic_read(&dev->power.usage_count) > 0
469 || dev->power.disable_depth > 0
470 || dev->power.runtime_status == RPM_SUSPENDED
471 || dev->power.runtime_status == RPM_SUSPENDING)
472 retval = -EAGAIN;
473 else if (!pm_children_suspended(dev))
474 retval = -EBUSY;
475 if (retval)
476 return retval;
477
478 if (dev->power.request_pending) {
479 /* Any requests other then RPM_REQ_IDLE take precedence. */
480 if (dev->power.request == RPM_REQ_NONE)
481 dev->power.request = RPM_REQ_IDLE;
482 else if (dev->power.request != RPM_REQ_IDLE)
483 retval = -EAGAIN;
484 return retval;
485 }
486
487 dev->power.request = RPM_REQ_IDLE;
488 dev->power.request_pending = true;
489 queue_work(pm_wq, &dev->power.work);
490
491 return retval;
492}
493
494/**
495 * pm_request_idle - Submit an idle notification request for given device.
496 * @dev: Device to handle.
497 */
498int pm_request_idle(struct device *dev)
499{
500 unsigned long flags;
501 int retval;
502
503 spin_lock_irqsave(&dev->power.lock, flags);
504 retval = __pm_request_idle(dev);
505 spin_unlock_irqrestore(&dev->power.lock, flags);
506
507 return retval;
508}
509EXPORT_SYMBOL_GPL(pm_request_idle);
510
511/**
512 * __pm_request_suspend - Submit a suspend request for given device.
513 * @dev: Device to suspend.
514 *
515 * This function must be called under dev->power.lock with interrupts disabled.
516 */
517static int __pm_request_suspend(struct device *dev)
518{
519 int retval = 0;
520
521 if (dev->power.runtime_error)
522 return -EINVAL;
523
524 if (dev->power.runtime_status == RPM_SUSPENDED)
525 retval = 1;
526 else if (atomic_read(&dev->power.usage_count) > 0
527 || dev->power.disable_depth > 0)
528 retval = -EAGAIN;
529 else if (dev->power.runtime_status == RPM_SUSPENDING)
530 retval = -EINPROGRESS;
531 else if (!pm_children_suspended(dev))
532 retval = -EBUSY;
533 if (retval < 0)
534 return retval;
535
536 pm_runtime_deactivate_timer(dev);
537
538 if (dev->power.request_pending) {
539 /*
540 * Pending resume requests take precedence over us, but we can
541 * overtake any other pending request.
542 */
543 if (dev->power.request == RPM_REQ_RESUME)
544 retval = -EAGAIN;
545 else if (dev->power.request != RPM_REQ_SUSPEND)
546 dev->power.request = retval ?
547 RPM_REQ_NONE : RPM_REQ_SUSPEND;
548 return retval;
549 } else if (retval) {
550 return retval;
551 }
552
553 dev->power.request = RPM_REQ_SUSPEND;
554 dev->power.request_pending = true;
555 queue_work(pm_wq, &dev->power.work);
556
557 return 0;
558}
559
560/**
561 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
562 * @data: Device pointer passed by pm_schedule_suspend().
563 *
564 * Check if the time is right and execute __pm_request_suspend() in that case.
565 */
566static void pm_suspend_timer_fn(unsigned long data)
567{
568 struct device *dev = (struct device *)data;
569 unsigned long flags;
570 unsigned long expires;
571
572 spin_lock_irqsave(&dev->power.lock, flags);
573
574 expires = dev->power.timer_expires;
575 /* If 'expire' is after 'jiffies' we've been called too early. */
576 if (expires > 0 && !time_after(expires, jiffies)) {
577 dev->power.timer_expires = 0;
578 __pm_request_suspend(dev);
579 }
580
581 spin_unlock_irqrestore(&dev->power.lock, flags);
582}
583
584/**
585 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
586 * @dev: Device to suspend.
587 * @delay: Time to wait before submitting a suspend request, in milliseconds.
588 */
589int pm_schedule_suspend(struct device *dev, unsigned int delay)
590{
591 unsigned long flags;
592 int retval = 0;
593
594 spin_lock_irqsave(&dev->power.lock, flags);
595
596 if (dev->power.runtime_error) {
597 retval = -EINVAL;
598 goto out;
599 }
600
601 if (!delay) {
602 retval = __pm_request_suspend(dev);
603 goto out;
604 }
605
606 pm_runtime_deactivate_timer(dev);
607
608 if (dev->power.request_pending) {
609 /*
610 * Pending resume requests take precedence over us, but any
611 * other pending requests have to be canceled.
612 */
613 if (dev->power.request == RPM_REQ_RESUME) {
614 retval = -EAGAIN;
615 goto out;
616 }
617 dev->power.request = RPM_REQ_NONE;
618 }
619
620 if (dev->power.runtime_status == RPM_SUSPENDED)
621 retval = 1;
622 else if (dev->power.runtime_status == RPM_SUSPENDING)
623 retval = -EINPROGRESS;
624 else if (atomic_read(&dev->power.usage_count) > 0
625 || dev->power.disable_depth > 0)
626 retval = -EAGAIN;
627 else if (!pm_children_suspended(dev))
628 retval = -EBUSY;
629 if (retval)
630 goto out;
631
632 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
633 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
634
635 out:
636 spin_unlock_irqrestore(&dev->power.lock, flags);
637
638 return retval;
639}
640EXPORT_SYMBOL_GPL(pm_schedule_suspend);
641
642/**
643 * pm_request_resume - Submit a resume request for given device.
644 * @dev: Device to resume.
645 *
646 * This function must be called under dev->power.lock with interrupts disabled.
647 */
648static int __pm_request_resume(struct device *dev)
649{
650 int retval = 0;
651
652 if (dev->power.runtime_error)
653 return -EINVAL;
654
655 if (dev->power.runtime_status == RPM_ACTIVE)
656 retval = 1;
657 else if (dev->power.runtime_status == RPM_RESUMING)
658 retval = -EINPROGRESS;
659 else if (dev->power.disable_depth > 0)
660 retval = -EAGAIN;
661 if (retval < 0)
662 return retval;
663
664 pm_runtime_deactivate_timer(dev);
665
666 if (dev->power.request_pending) {
667 /* If non-resume request is pending, we can overtake it. */
668 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
669 return retval;
670 } else if (retval) {
671 return retval;
672 }
673
674 dev->power.request = RPM_REQ_RESUME;
675 dev->power.request_pending = true;
676 queue_work(pm_wq, &dev->power.work);
677
678 return retval;
679}
680
681/**
682 * pm_request_resume - Submit a resume request for given device.
683 * @dev: Device to resume.
684 */
685int pm_request_resume(struct device *dev)
686{
687 unsigned long flags;
688 int retval;
689
690 spin_lock_irqsave(&dev->power.lock, flags);
691 retval = __pm_request_resume(dev);
692 spin_unlock_irqrestore(&dev->power.lock, flags);
693
694 return retval;
695}
696EXPORT_SYMBOL_GPL(pm_request_resume);
697
698/**
699 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
700 * @dev: Device to handle.
701 * @sync: If set and the device is suspended, resume it synchronously.
702 *
703 * Increment the usage count of the device and if it was zero previously,
704 * resume it or submit a resume request for it, depending on the value of @sync.
705 */
706int __pm_runtime_get(struct device *dev, bool sync)
707{
708 int retval = 1;
709
710 if (atomic_add_return(1, &dev->power.usage_count) == 1)
711 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
712
713 return retval;
714}
715EXPORT_SYMBOL_GPL(__pm_runtime_get);
716
717/**
718 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
719 * @dev: Device to handle.
720 * @sync: If the device's bus type is to be notified, do that synchronously.
721 *
722 * Decrement the usage count of the device and if it reaches zero, carry out a
723 * synchronous idle notification or submit an idle notification request for it,
724 * depending on the value of @sync.
725 */
726int __pm_runtime_put(struct device *dev, bool sync)
727{
728 int retval = 0;
729
730 if (atomic_dec_and_test(&dev->power.usage_count))
731 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
732
733 return retval;
734}
735EXPORT_SYMBOL_GPL(__pm_runtime_put);
736
737/**
738 * __pm_runtime_set_status - Set run-time PM status of a device.
739 * @dev: Device to handle.
740 * @status: New run-time PM status of the device.
741 *
742 * If run-time PM of the device is disabled or its power.runtime_error field is
743 * different from zero, the status may be changed either to RPM_ACTIVE, or to
744 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
745 * However, if the device has a parent and the parent is not active, and the
746 * parent's power.ignore_children flag is unset, the device's status cannot be
747 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
748 *
749 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
750 * and the device parent's counter of unsuspended children is modified to
751 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
752 * notification request for the parent is submitted.
753 */
754int __pm_runtime_set_status(struct device *dev, unsigned int status)
755{
756 struct device *parent = dev->parent;
757 unsigned long flags;
758 bool notify_parent = false;
759 int error = 0;
760
761 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
762 return -EINVAL;
763
764 spin_lock_irqsave(&dev->power.lock, flags);
765
766 if (!dev->power.runtime_error && !dev->power.disable_depth) {
767 error = -EAGAIN;
768 goto out;
769 }
770
771 if (dev->power.runtime_status == status)
772 goto out_set;
773
774 if (status == RPM_SUSPENDED) {
775 /* It always is possible to set the status to 'suspended'. */
776 if (parent) {
777 atomic_add_unless(&parent->power.child_count, -1, 0);
778 notify_parent = !parent->power.ignore_children;
779 }
780 goto out_set;
781 }
782
783 if (parent) {
784 spin_lock_irq(&parent->power.lock);
785
786 /*
787 * It is invalid to put an active child under a parent that is
788 * not active, has run-time PM enabled and the
789 * 'power.ignore_children' flag unset.
790 */
791 if (!parent->power.disable_depth
792 && !parent->power.ignore_children
793 && parent->power.runtime_status != RPM_ACTIVE) {
794 error = -EBUSY;
795 } else {
796 if (dev->power.runtime_status == RPM_SUSPENDED)
797 atomic_inc(&parent->power.child_count);
798 }
799
800 spin_unlock_irq(&parent->power.lock);
801
802 if (error)
803 goto out;
804 }
805
806 out_set:
807 dev->power.runtime_status = status;
808 dev->power.runtime_error = 0;
809 out:
810 spin_unlock_irqrestore(&dev->power.lock, flags);
811
812 if (notify_parent)
813 pm_request_idle(parent);
814
815 return error;
816}
817EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
818
819/**
820 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
821 * @dev: Device to handle.
822 *
823 * Flush all pending requests for the device from pm_wq and wait for all
824 * run-time PM operations involving the device in progress to complete.
825 *
826 * Should be called under dev->power.lock with interrupts disabled.
827 */
828static void __pm_runtime_barrier(struct device *dev)
829{
830 pm_runtime_deactivate_timer(dev);
831
832 if (dev->power.request_pending) {
833 dev->power.request = RPM_REQ_NONE;
834 spin_unlock_irq(&dev->power.lock);
835
836 cancel_work_sync(&dev->power.work);
837
838 spin_lock_irq(&dev->power.lock);
839 dev->power.request_pending = false;
840 }
841
842 if (dev->power.runtime_status == RPM_SUSPENDING
843 || dev->power.runtime_status == RPM_RESUMING
844 || dev->power.idle_notification) {
845 DEFINE_WAIT(wait);
846
847 /* Suspend, wake-up or idle notification in progress. */
848 for (;;) {
849 prepare_to_wait(&dev->power.wait_queue, &wait,
850 TASK_UNINTERRUPTIBLE);
851 if (dev->power.runtime_status != RPM_SUSPENDING
852 && dev->power.runtime_status != RPM_RESUMING
853 && !dev->power.idle_notification)
854 break;
855 spin_unlock_irq(&dev->power.lock);
856
857 schedule();
858
859 spin_lock_irq(&dev->power.lock);
860 }
861 finish_wait(&dev->power.wait_queue, &wait);
862 }
863}
864
865/**
866 * pm_runtime_barrier - Flush pending requests and wait for completions.
867 * @dev: Device to handle.
868 *
869 * Prevent the device from being suspended by incrementing its usage counter and
870 * if there's a pending resume request for the device, wake the device up.
871 * Next, make sure that all pending requests for the device have been flushed
872 * from pm_wq and wait for all run-time PM operations involving the device in
873 * progress to complete.
874 *
875 * Return value:
876 * 1, if there was a resume request pending and the device had to be woken up,
877 * 0, otherwise
878 */
879int pm_runtime_barrier(struct device *dev)
880{
881 int retval = 0;
882
883 pm_runtime_get_noresume(dev);
884 spin_lock_irq(&dev->power.lock);
885
886 if (dev->power.request_pending
887 && dev->power.request == RPM_REQ_RESUME) {
888 __pm_runtime_resume(dev, false);
889 retval = 1;
890 }
891
892 __pm_runtime_barrier(dev);
893
894 spin_unlock_irq(&dev->power.lock);
895 pm_runtime_put_noidle(dev);
896
897 return retval;
898}
899EXPORT_SYMBOL_GPL(pm_runtime_barrier);
900
901/**
902 * __pm_runtime_disable - Disable run-time PM of a device.
903 * @dev: Device to handle.
904 * @check_resume: If set, check if there's a resume request for the device.
905 *
906 * Increment power.disable_depth for the device and if was zero previously,
907 * cancel all pending run-time PM requests for the device and wait for all
908 * operations in progress to complete. The device can be either active or
909 * suspended after its run-time PM has been disabled.
910 *
911 * If @check_resume is set and there's a resume request pending when
912 * __pm_runtime_disable() is called and power.disable_depth is zero, the
913 * function will wake up the device before disabling its run-time PM.
914 */
915void __pm_runtime_disable(struct device *dev, bool check_resume)
916{
917 spin_lock_irq(&dev->power.lock);
918
919 if (dev->power.disable_depth > 0) {
920 dev->power.disable_depth++;
921 goto out;
922 }
923
924 /*
925 * Wake up the device if there's a resume request pending, because that
926 * means there probably is some I/O to process and disabling run-time PM
927 * shouldn't prevent the device from processing the I/O.
928 */
929 if (check_resume && dev->power.request_pending
930 && dev->power.request == RPM_REQ_RESUME) {
931 /*
932 * Prevent suspends and idle notifications from being carried
933 * out after we have woken up the device.
934 */
935 pm_runtime_get_noresume(dev);
936
937 __pm_runtime_resume(dev, false);
938
939 pm_runtime_put_noidle(dev);
940 }
941
942 if (!dev->power.disable_depth++)
943 __pm_runtime_barrier(dev);
944
945 out:
946 spin_unlock_irq(&dev->power.lock);
947}
948EXPORT_SYMBOL_GPL(__pm_runtime_disable);
949
950/**
951 * pm_runtime_enable - Enable run-time PM of a device.
952 * @dev: Device to handle.
953 */
954void pm_runtime_enable(struct device *dev)
955{
956 unsigned long flags;
957
958 spin_lock_irqsave(&dev->power.lock, flags);
959
960 if (dev->power.disable_depth > 0)
961 dev->power.disable_depth--;
962 else
963 dev_warn(dev, "Unbalanced %s!\n", __func__);
964
965 spin_unlock_irqrestore(&dev->power.lock, flags);
966}
967EXPORT_SYMBOL_GPL(pm_runtime_enable);
968
969/**
970 * pm_runtime_init - Initialize run-time PM fields in given device object.
971 * @dev: Device object to initialize.
972 */
973void pm_runtime_init(struct device *dev)
974{
975 spin_lock_init(&dev->power.lock);
976
977 dev->power.runtime_status = RPM_SUSPENDED;
978 dev->power.idle_notification = false;
979
980 dev->power.disable_depth = 1;
981 atomic_set(&dev->power.usage_count, 0);
982
983 dev->power.runtime_error = 0;
984
985 atomic_set(&dev->power.child_count, 0);
986 pm_suspend_ignore_children(dev, false);
987
988 dev->power.request_pending = false;
989 dev->power.request = RPM_REQ_NONE;
990 dev->power.deferred_resume = false;
991 INIT_WORK(&dev->power.work, pm_runtime_work);
992
993 dev->power.timer_expires = 0;
994 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
995 (unsigned long)dev);
996
997 init_waitqueue_head(&dev->power.wait_queue);
998}
999
1000/**
1001 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1002 * @dev: Device object being removed from device hierarchy.
1003 */
1004void pm_runtime_remove(struct device *dev)
1005{
1006 __pm_runtime_disable(dev, false);
1007
1008 /* Change the status back to 'suspended' to match the initial status. */
1009 if (dev->power.runtime_status == RPM_ACTIVE)
1010 pm_runtime_set_suspended(dev);
1011}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 98c9a847bf51..933c143b6a74 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1399,8 +1399,9 @@ static void dw_shutdown(struct platform_device *pdev)
1399 clk_disable(dw->clk); 1399 clk_disable(dw->clk);
1400} 1400}
1401 1401
1402static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg) 1402static int dw_suspend_noirq(struct device *dev)
1403{ 1403{
1404 struct platform_device *pdev = to_platform_device(dev);
1404 struct dw_dma *dw = platform_get_drvdata(pdev); 1405 struct dw_dma *dw = platform_get_drvdata(pdev);
1405 1406
1406 dw_dma_off(platform_get_drvdata(pdev)); 1407 dw_dma_off(platform_get_drvdata(pdev));
@@ -1408,23 +1409,27 @@ static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1408 return 0; 1409 return 0;
1409} 1410}
1410 1411
1411static int dw_resume_early(struct platform_device *pdev) 1412static int dw_resume_noirq(struct device *dev)
1412{ 1413{
1414 struct platform_device *pdev = to_platform_device(dev);
1413 struct dw_dma *dw = platform_get_drvdata(pdev); 1415 struct dw_dma *dw = platform_get_drvdata(pdev);
1414 1416
1415 clk_enable(dw->clk); 1417 clk_enable(dw->clk);
1416 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1418 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1417 return 0; 1419 return 0;
1418
1419} 1420}
1420 1421
1422static struct dev_pm_ops dw_dev_pm_ops = {
1423 .suspend_noirq = dw_suspend_noirq,
1424 .resume_noirq = dw_resume_noirq,
1425};
1426
1421static struct platform_driver dw_driver = { 1427static struct platform_driver dw_driver = {
1422 .remove = __exit_p(dw_remove), 1428 .remove = __exit_p(dw_remove),
1423 .shutdown = dw_shutdown, 1429 .shutdown = dw_shutdown,
1424 .suspend_late = dw_suspend_late,
1425 .resume_early = dw_resume_early,
1426 .driver = { 1430 .driver = {
1427 .name = "dw_dmac", 1431 .name = "dw_dmac",
1432 .pm = &dw_dev_pm_ops,
1428 }, 1433 },
1429}; 1434};
1430 1435
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 88dab52926f4..7837930146a4 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1291,17 +1291,18 @@ static void txx9dmac_shutdown(struct platform_device *pdev)
1291 txx9dmac_off(ddev); 1291 txx9dmac_off(ddev);
1292} 1292}
1293 1293
1294static int txx9dmac_suspend_late(struct platform_device *pdev, 1294static int txx9dmac_suspend_noirq(struct device *dev)
1295 pm_message_t mesg)
1296{ 1295{
1296 struct platform_device *pdev = to_platform_device(dev);
1297 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1297 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1298 1298
1299 txx9dmac_off(ddev); 1299 txx9dmac_off(ddev);
1300 return 0; 1300 return 0;
1301} 1301}
1302 1302
1303static int txx9dmac_resume_early(struct platform_device *pdev) 1303static int txx9dmac_resume_noirq(struct device *dev)
1304{ 1304{
1305 struct platform_device *pdev = to_platform_device(dev);
1305 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1306 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1306 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; 1307 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1307 u32 mcr; 1308 u32 mcr;
@@ -1314,6 +1315,11 @@ static int txx9dmac_resume_early(struct platform_device *pdev)
1314 1315
1315} 1316}
1316 1317
1318static struct dev_pm_ops txx9dmac_dev_pm_ops = {
1319 .suspend_noirq = txx9dmac_suspend_noirq,
1320 .resume_noirq = txx9dmac_resume_noirq,
1321};
1322
1317static struct platform_driver txx9dmac_chan_driver = { 1323static struct platform_driver txx9dmac_chan_driver = {
1318 .remove = __exit_p(txx9dmac_chan_remove), 1324 .remove = __exit_p(txx9dmac_chan_remove),
1319 .driver = { 1325 .driver = {
@@ -1324,10 +1330,9 @@ static struct platform_driver txx9dmac_chan_driver = {
1324static struct platform_driver txx9dmac_driver = { 1330static struct platform_driver txx9dmac_driver = {
1325 .remove = __exit_p(txx9dmac_remove), 1331 .remove = __exit_p(txx9dmac_remove),
1326 .shutdown = txx9dmac_shutdown, 1332 .shutdown = txx9dmac_shutdown,
1327 .suspend_late = txx9dmac_suspend_late,
1328 .resume_early = txx9dmac_resume_early,
1329 .driver = { 1333 .driver = {
1330 .name = "txx9dmac", 1334 .name = "txx9dmac",
1335 .pm = &txx9dmac_dev_pm_ops,
1331 }, 1336 },
1332}; 1337};
1333 1338
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 762e1e530882..049555777f67 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1134,35 +1134,44 @@ static int __exit i2c_pxa_remove(struct platform_device *dev)
1134} 1134}
1135 1135
1136#ifdef CONFIG_PM 1136#ifdef CONFIG_PM
1137static int i2c_pxa_suspend_late(struct platform_device *dev, pm_message_t state) 1137static int i2c_pxa_suspend_noirq(struct device *dev)
1138{ 1138{
1139 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1139 struct platform_device *pdev = to_platform_device(dev);
1140 struct pxa_i2c *i2c = platform_get_drvdata(pdev);
1141
1140 clk_disable(i2c->clk); 1142 clk_disable(i2c->clk);
1143
1141 return 0; 1144 return 0;
1142} 1145}
1143 1146
1144static int i2c_pxa_resume_early(struct platform_device *dev) 1147static int i2c_pxa_resume_noirq(struct device *dev)
1145{ 1148{
1146 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1149 struct platform_device *pdev = to_platform_device(dev);
1150 struct pxa_i2c *i2c = platform_get_drvdata(pdev);
1147 1151
1148 clk_enable(i2c->clk); 1152 clk_enable(i2c->clk);
1149 i2c_pxa_reset(i2c); 1153 i2c_pxa_reset(i2c);
1150 1154
1151 return 0; 1155 return 0;
1152} 1156}
1157
1158static struct dev_pm_ops i2c_pxa_dev_pm_ops = {
1159 .suspend_noirq = i2c_pxa_suspend_noirq,
1160 .resume_noirq = i2c_pxa_resume_noirq,
1161};
1162
1163#define I2C_PXA_DEV_PM_OPS (&i2c_pxa_dev_pm_ops)
1153#else 1164#else
1154#define i2c_pxa_suspend_late NULL 1165#define I2C_PXA_DEV_PM_OPS NULL
1155#define i2c_pxa_resume_early NULL
1156#endif 1166#endif
1157 1167
1158static struct platform_driver i2c_pxa_driver = { 1168static struct platform_driver i2c_pxa_driver = {
1159 .probe = i2c_pxa_probe, 1169 .probe = i2c_pxa_probe,
1160 .remove = __exit_p(i2c_pxa_remove), 1170 .remove = __exit_p(i2c_pxa_remove),
1161 .suspend_late = i2c_pxa_suspend_late,
1162 .resume_early = i2c_pxa_resume_early,
1163 .driver = { 1171 .driver = {
1164 .name = "pxa2xx-i2c", 1172 .name = "pxa2xx-i2c",
1165 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1174 .pm = I2C_PXA_DEV_PM_OPS,
1166 }, 1175 },
1167 .id_table = i2c_pxa_id_table, 1176 .id_table = i2c_pxa_id_table,
1168}; 1177};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 20bb0ceb027b..96aafb91b69a 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -946,17 +946,20 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
946} 946}
947 947
948#ifdef CONFIG_PM 948#ifdef CONFIG_PM
949static int s3c24xx_i2c_suspend_late(struct platform_device *dev, 949static int s3c24xx_i2c_suspend_noirq(struct device *dev)
950 pm_message_t msg)
951{ 950{
952 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); 951 struct platform_device *pdev = to_platform_device(dev);
952 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
953
953 i2c->suspended = 1; 954 i2c->suspended = 1;
955
954 return 0; 956 return 0;
955} 957}
956 958
957static int s3c24xx_i2c_resume(struct platform_device *dev) 959static int s3c24xx_i2c_resume(struct device *dev)
958{ 960{
959 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); 961 struct platform_device *pdev = to_platform_device(dev);
962 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
960 963
961 i2c->suspended = 0; 964 i2c->suspended = 0;
962 s3c24xx_i2c_init(i2c); 965 s3c24xx_i2c_init(i2c);
@@ -964,9 +967,14 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
964 return 0; 967 return 0;
965} 968}
966 969
970static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
971 .suspend_noirq = s3c24xx_i2c_suspend_noirq,
972 .resume = s3c24xx_i2c_resume,
973};
974
975#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
967#else 976#else
968#define s3c24xx_i2c_suspend_late NULL 977#define S3C24XX_DEV_PM_OPS NULL
969#define s3c24xx_i2c_resume NULL
970#endif 978#endif
971 979
972/* device driver for platform bus bits */ 980/* device driver for platform bus bits */
@@ -985,12 +993,11 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
985static struct platform_driver s3c24xx_i2c_driver = { 993static struct platform_driver s3c24xx_i2c_driver = {
986 .probe = s3c24xx_i2c_probe, 994 .probe = s3c24xx_i2c_probe,
987 .remove = s3c24xx_i2c_remove, 995 .remove = s3c24xx_i2c_remove,
988 .suspend_late = s3c24xx_i2c_suspend_late,
989 .resume = s3c24xx_i2c_resume,
990 .id_table = s3c24xx_driver_ids, 996 .id_table = s3c24xx_driver_ids,
991 .driver = { 997 .driver = {
992 .owner = THIS_MODULE, 998 .owner = THIS_MODULE,
993 .name = "s3c-i2c", 999 .name = "s3c-i2c",
1000 .pm = S3C24XX_DEV_PM_OPS,
994 }, 1001 },
995}; 1002};
996 1003
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f99bc7f089f1..a7eb7277b106 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -575,7 +575,7 @@ static void pci_pm_complete(struct device *dev)
575static int pci_pm_suspend(struct device *dev) 575static int pci_pm_suspend(struct device *dev)
576{ 576{
577 struct pci_dev *pci_dev = to_pci_dev(dev); 577 struct pci_dev *pci_dev = to_pci_dev(dev);
578 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 578 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
579 579
580 if (pci_has_legacy_pm_support(pci_dev)) 580 if (pci_has_legacy_pm_support(pci_dev))
581 return pci_legacy_suspend(dev, PMSG_SUSPEND); 581 return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -613,7 +613,7 @@ static int pci_pm_suspend(struct device *dev)
613static int pci_pm_suspend_noirq(struct device *dev) 613static int pci_pm_suspend_noirq(struct device *dev)
614{ 614{
615 struct pci_dev *pci_dev = to_pci_dev(dev); 615 struct pci_dev *pci_dev = to_pci_dev(dev);
616 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 616 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
617 617
618 if (pci_has_legacy_pm_support(pci_dev)) 618 if (pci_has_legacy_pm_support(pci_dev))
619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -672,7 +672,7 @@ static int pci_pm_resume_noirq(struct device *dev)
672static int pci_pm_resume(struct device *dev) 672static int pci_pm_resume(struct device *dev)
673{ 673{
674 struct pci_dev *pci_dev = to_pci_dev(dev); 674 struct pci_dev *pci_dev = to_pci_dev(dev);
675 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 675 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
676 int error = 0; 676 int error = 0;
677 677
678 /* 678 /*
@@ -711,7 +711,7 @@ static int pci_pm_resume(struct device *dev)
711static int pci_pm_freeze(struct device *dev) 711static int pci_pm_freeze(struct device *dev)
712{ 712{
713 struct pci_dev *pci_dev = to_pci_dev(dev); 713 struct pci_dev *pci_dev = to_pci_dev(dev);
714 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 714 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
715 715
716 if (pci_has_legacy_pm_support(pci_dev)) 716 if (pci_has_legacy_pm_support(pci_dev))
717 return pci_legacy_suspend(dev, PMSG_FREEZE); 717 return pci_legacy_suspend(dev, PMSG_FREEZE);
@@ -780,7 +780,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
780static int pci_pm_thaw(struct device *dev) 780static int pci_pm_thaw(struct device *dev)
781{ 781{
782 struct pci_dev *pci_dev = to_pci_dev(dev); 782 struct pci_dev *pci_dev = to_pci_dev(dev);
783 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 783 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
784 int error = 0; 784 int error = 0;
785 785
786 if (pci_has_legacy_pm_support(pci_dev)) 786 if (pci_has_legacy_pm_support(pci_dev))
@@ -799,7 +799,7 @@ static int pci_pm_thaw(struct device *dev)
799static int pci_pm_poweroff(struct device *dev) 799static int pci_pm_poweroff(struct device *dev)
800{ 800{
801 struct pci_dev *pci_dev = to_pci_dev(dev); 801 struct pci_dev *pci_dev = to_pci_dev(dev);
802 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 802 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
803 803
804 if (pci_has_legacy_pm_support(pci_dev)) 804 if (pci_has_legacy_pm_support(pci_dev))
805 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 805 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
@@ -872,7 +872,7 @@ static int pci_pm_restore_noirq(struct device *dev)
872static int pci_pm_restore(struct device *dev) 872static int pci_pm_restore(struct device *dev)
873{ 873{
874 struct pci_dev *pci_dev = to_pci_dev(dev); 874 struct pci_dev *pci_dev = to_pci_dev(dev);
875 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 875 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
876 int error = 0; 876 int error = 0;
877 877
878 /* 878 /*
@@ -910,7 +910,7 @@ static int pci_pm_restore(struct device *dev)
910 910
911#endif /* !CONFIG_HIBERNATION */ 911#endif /* !CONFIG_HIBERNATION */
912 912
913struct dev_pm_ops pci_dev_pm_ops = { 913const struct dev_pm_ops pci_dev_pm_ops = {
914 .prepare = pci_pm_prepare, 914 .prepare = pci_pm_prepare,
915 .complete = pci_pm_complete, 915 .complete = pci_pm_complete,
916 .suspend = pci_pm_suspend, 916 .suspend = pci_pm_suspend,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c7c1ca0494cd..1d26beddf2ca 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2167,8 +2167,9 @@ static int __devexit musb_remove(struct platform_device *pdev)
2167 2167
2168#ifdef CONFIG_PM 2168#ifdef CONFIG_PM
2169 2169
2170static int musb_suspend(struct platform_device *pdev, pm_message_t message) 2170static int musb_suspend(struct device *dev)
2171{ 2171{
2172 struct platform_device *pdev = to_platform_device(dev);
2172 unsigned long flags; 2173 unsigned long flags;
2173 struct musb *musb = dev_to_musb(&pdev->dev); 2174 struct musb *musb = dev_to_musb(&pdev->dev);
2174 2175
@@ -2195,8 +2196,9 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message)
2195 return 0; 2196 return 0;
2196} 2197}
2197 2198
2198static int musb_resume_early(struct platform_device *pdev) 2199static int musb_resume_noirq(struct device *dev)
2199{ 2200{
2201 struct platform_device *pdev = to_platform_device(dev);
2200 struct musb *musb = dev_to_musb(&pdev->dev); 2202 struct musb *musb = dev_to_musb(&pdev->dev);
2201 2203
2202 if (!musb->clock) 2204 if (!musb->clock)
@@ -2214,9 +2216,14 @@ static int musb_resume_early(struct platform_device *pdev)
2214 return 0; 2216 return 0;
2215} 2217}
2216 2218
2219static struct dev_pm_ops musb_dev_pm_ops = {
2220 .suspend = musb_suspend,
2221 .resume_noirq = musb_resume_noirq,
2222};
2223
2224#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2217#else 2225#else
2218#define musb_suspend NULL 2226#define MUSB_DEV_PM_OPS NULL
2219#define musb_resume_early NULL
2220#endif 2227#endif
2221 2228
2222static struct platform_driver musb_driver = { 2229static struct platform_driver musb_driver = {
@@ -2224,11 +2231,10 @@ static struct platform_driver musb_driver = {
2224 .name = (char *)musb_driver_name, 2231 .name = (char *)musb_driver_name,
2225 .bus = &platform_bus_type, 2232 .bus = &platform_bus_type,
2226 .owner = THIS_MODULE, 2233 .owner = THIS_MODULE,
2234 .pm = MUSB_DEV_PM_OPS,
2227 }, 2235 },
2228 .remove = __devexit_p(musb_remove), 2236 .remove = __devexit_p(musb_remove),
2229 .shutdown = musb_shutdown, 2237 .shutdown = musb_shutdown,
2230 .suspend = musb_suspend,
2231 .resume_early = musb_resume_early,
2232}; 2238};
2233 2239
2234/*-------------------------------------------------------------------------*/ 2240/*-------------------------------------------------------------------------*/