aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/runtime_pm.txt378
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/plat-omap/debug-leds.c11
-rw-r--r--arch/arm/plat-omap/gpio.c14
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/microblaze/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/sparc/include/asm/device.h3
-rw-r--r--arch/x86/include/asm/device.h3
-rw-r--r--drivers/base/dd.c11
-rw-r--r--drivers/base/platform.c84
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/main.c30
-rw-r--r--drivers/base/power/power.h31
-rw-r--r--drivers/base/power/runtime.c1011
-rw-r--r--drivers/dma/dw_dmac.c15
-rw-r--r--drivers/dma/txx9dmac.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa.c25
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c25
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/usb/musb/musb_core.c18
-rw-r--r--include/asm-generic/device.h3
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/pm.h101
-rw-r--r--include/linux/pm_runtime.h114
-rw-r--r--kernel/power/Kconfig14
-rw-r--r--kernel/power/main.c17
28 files changed, 1851 insertions, 115 deletions
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
new file mode 100644
index 000000000000..f49a33b704d2
--- /dev/null
+++ b/Documentation/power/runtime_pm.txt
@@ -0,0 +1,378 @@
1Run-time Power Management Framework for I/O Devices
2
3(C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
4
51. Introduction
6
7Support for run-time power management (run-time PM) of I/O devices is provided
8at the power management core (PM core) level by means of:
9
10* The power management workqueue pm_wq in which bus types and device drivers can
11 put their PM-related work items. It is strongly recommended that pm_wq be
12 used for queuing all work items related to run-time PM, because this allows
13 them to be synchronized with system-wide power transitions (suspend to RAM,
14 hibernation and resume from system sleep states). pm_wq is declared in
15 include/linux/pm_runtime.h and defined in kernel/power/main.c.
16
17* A number of run-time PM fields in the 'power' member of 'struct device' (which
18 is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can
19 be used for synchronizing run-time PM operations with one another.
20
21* Three device run-time PM callbacks in 'struct dev_pm_ops' (defined in
22 include/linux/pm.h).
23
24* A set of helper functions defined in drivers/base/power/runtime.c that can be
25 used for carrying out run-time PM operations in such a way that the
26 synchronization between them is taken care of by the PM core. Bus types and
27 device drivers are encouraged to use these functions.
28
29The run-time PM callbacks present in 'struct dev_pm_ops', the device run-time PM
30fields of 'struct dev_pm_info' and the core helper functions provided for
31run-time PM are described below.
32
332. Device Run-time PM Callbacks
34
35There are three device run-time PM callbacks defined in 'struct dev_pm_ops':
36
37struct dev_pm_ops {
38 ...
39 int (*runtime_suspend)(struct device *dev);
40 int (*runtime_resume)(struct device *dev);
41 void (*runtime_idle)(struct device *dev);
42 ...
43};
44
45The ->runtime_suspend() callback is executed by the PM core for the bus type of
46the device being suspended. The bus type's callback is then _entirely_
47_responsible_ for handling the device as appropriate, which may, but need not
48include executing the device driver's own ->runtime_suspend() callback (from the
49PM core's point of view it is not necessary to implement a ->runtime_suspend()
50callback in a device driver as long as the bus type's ->runtime_suspend() knows
51what to do to handle the device).
52
53 * Once the bus type's ->runtime_suspend() callback has completed successfully
54 for given device, the PM core regards the device as suspended, which need
55 not mean that the device has been put into a low power state. It is
56 supposed to mean, however, that the device will not process data and will
57 not communicate with the CPU(s) and RAM until its bus type's
58 ->runtime_resume() callback is executed for it. The run-time PM status of
59 a device after successful execution of its bus type's ->runtime_suspend()
60 callback is 'suspended'.
61
62 * If the bus type's ->runtime_suspend() callback returns -EBUSY or -EAGAIN,
63 the device's run-time PM status is supposed to be 'active', which means that
64 the device _must_ be fully operational afterwards.
65
66 * If the bus type's ->runtime_suspend() callback returns an error code
67 different from -EBUSY or -EAGAIN, the PM core regards this as a fatal
68 error and will refuse to run the helper functions described in Section 4
69 for the device, until the status of it is directly set either to 'active'
70 or to 'suspended' (the PM core provides special helper functions for this
71 purpose).
72
73In particular, if the driver requires remote wakeup capability for proper
74functioning and device_may_wakeup() returns 'false' for the device, then
75->runtime_suspend() should return -EBUSY. On the other hand, if
76device_may_wakeup() returns 'true' for the device and the device is put
77into a low power state during the execution of its bus type's
78->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism
79allowing the device to request a change of its power state, such as PCI PME)
80will be enabled for the device. Generally, remote wake-up should be enabled
81for all input devices put into a low power state at run time.
82
83The ->runtime_resume() callback is executed by the PM core for the bus type of
84the device being woken up. The bus type's callback is then _entirely_
85_responsible_ for handling the device as appropriate, which may, but need not
86include executing the device driver's own ->runtime_resume() callback (from the
87PM core's point of view it is not necessary to implement a ->runtime_resume()
88callback in a device driver as long as the bus type's ->runtime_resume() knows
89what to do to handle the device).
90
91 * Once the bus type's ->runtime_resume() callback has completed successfully,
92 the PM core regards the device as fully operational, which means that the
93 device _must_ be able to complete I/O operations as needed. The run-time
94 PM status of the device is then 'active'.
95
96 * If the bus type's ->runtime_resume() callback returns an error code, the PM
97 core regards this as a fatal error and will refuse to run the helper
98 functions described in Section 4 for the device, until its status is
99 directly set either to 'active' or to 'suspended' (the PM core provides
100 special helper functions for this purpose).
101
102The ->runtime_idle() callback is executed by the PM core for the bus type of
103given device whenever the device appears to be idle, which is indicated to the
104PM core by two counters, the device's usage counter and the counter of 'active'
105children of the device.
106
107 * If any of these counters is decreased using a helper function provided by
108 the PM core and it turns out to be equal to zero, the other counter is
109 checked. If that counter also is equal to zero, the PM core executes the
110 device bus type's ->runtime_idle() callback (with the device as an
111 argument).
112
113The action performed by a bus type's ->runtime_idle() callback is totally
114dependent on the bus type in question, but the expected and recommended action
115is to check if the device can be suspended (i.e. if all of the conditions
116necessary for suspending the device are satisfied) and to queue up a suspend
117request for the device in that case.
118
119The helper functions provided by the PM core, described in Section 4, guarantee
120that the following constraints are met with respect to the bus type's run-time
121PM callbacks:
122
123(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
124 ->runtime_suspend() in parallel with ->runtime_resume() or with another
125 instance of ->runtime_suspend() for the same device) with the exception that
126 ->runtime_suspend() or ->runtime_resume() can be executed in parallel with
127 ->runtime_idle() (although ->runtime_idle() will not be started while any
128 of the other callbacks is being executed for the same device).
129
130(2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active'
131 devices (i.e. the PM core will only execute ->runtime_idle() or
132 ->runtime_suspend() for the devices the run-time PM status of which is
133 'active').
134
135(3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device
136 the usage counter of which is equal to zero _and_ either the counter of
137 'active' children of which is equal to zero, or the 'power.ignore_children'
138 flag of which is set.
139
140(4) ->runtime_resume() can only be executed for 'suspended' devices (i.e. the
141 PM core will only execute ->runtime_resume() for the devices the run-time
142 PM status of which is 'suspended').
143
144Additionally, the helper functions provided by the PM core obey the following
145rules:
146
147 * If ->runtime_suspend() is about to be executed or there's a pending request
148 to execute it, ->runtime_idle() will not be executed for the same device.
149
150 * A request to execute or to schedule the execution of ->runtime_suspend()
151 will cancel any pending requests to execute ->runtime_idle() for the same
152 device.
153
154 * If ->runtime_resume() is about to be executed or there's a pending request
155 to execute it, the other callbacks will not be executed for the same device.
156
157 * A request to execute ->runtime_resume() will cancel any pending or
158 scheduled requests to execute the other callbacks for the same device.
159
1603. Run-time PM Device Fields
161
162The following device run-time PM fields are present in 'struct dev_pm_info', as
163defined in include/linux/pm.h:
164
165 struct timer_list suspend_timer;
166 - timer used for scheduling (delayed) suspend request
167
168 unsigned long timer_expires;
169 - timer expiration time, in jiffies (if this is different from zero, the
170 timer is running and will expire at that time, otherwise the timer is not
171 running)
172
173 struct work_struct work;
174 - work structure used for queuing up requests (i.e. work items in pm_wq)
175
176 wait_queue_head_t wait_queue;
177 - wait queue used if any of the helper functions needs to wait for another
178 one to complete
179
180 spinlock_t lock;
181 - lock used for synchronisation
182
183 atomic_t usage_count;
184 - the usage counter of the device
185
186 atomic_t child_count;
187 - the count of 'active' children of the device
188
189 unsigned int ignore_children;
190 - if set, the value of child_count is ignored (but still updated)
191
192 unsigned int disable_depth;
193 - used for disabling the helper funcions (they work normally if this is
194 equal to zero); the initial value of it is 1 (i.e. run-time PM is
195 initially disabled for all devices)
196
197 unsigned int runtime_error;
198 - if set, there was a fatal error (one of the callbacks returned error code
199 as described in Section 2), so the helper funtions will not work until
200 this flag is cleared; this is the error code returned by the failing
201 callback
202
203 unsigned int idle_notification;
204 - if set, ->runtime_idle() is being executed
205
206 unsigned int request_pending;
207 - if set, there's a pending request (i.e. a work item queued up into pm_wq)
208
209 enum rpm_request request;
210 - type of request that's pending (valid if request_pending is set)
211
212 unsigned int deferred_resume;
213 - set if ->runtime_resume() is about to be run while ->runtime_suspend() is
214 being executed for that device and it is not practical to wait for the
215 suspend to complete; means "start a resume as soon as you've suspended"
216
217 enum rpm_status runtime_status;
218 - the run-time PM status of the device; this field's initial value is
219 RPM_SUSPENDED, which means that each device is initially regarded by the
220 PM core as 'suspended', regardless of its real hardware status
221
222All of the above fields are members of the 'power' member of 'struct device'.
223
2244. Run-time PM Device Helper Functions
225
226The following run-time PM helper functions are defined in
227drivers/base/power/runtime.c and include/linux/pm_runtime.h:
228
229 void pm_runtime_init(struct device *dev);
230 - initialize the device run-time PM fields in 'struct dev_pm_info'
231
232 void pm_runtime_remove(struct device *dev);
233 - make sure that the run-time PM of the device will be disabled after
234 removing the device from device hierarchy
235
236 int pm_runtime_idle(struct device *dev);
237 - execute ->runtime_idle() for the device's bus type; returns 0 on success
238 or error code on failure, where -EINPROGRESS means that ->runtime_idle()
239 is already being executed
240
241 int pm_runtime_suspend(struct device *dev);
242 - execute ->runtime_suspend() for the device's bus type; returns 0 on
243 success, 1 if the device's run-time PM status was already 'suspended', or
244 error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
245 to suspend the device again in future
246
247 int pm_runtime_resume(struct device *dev);
248 - execute ->runtime_resume() for the device's bus type; returns 0 on
249 success, 1 if the device's run-time PM status was already 'active' or
250 error code on failure, where -EAGAIN means it may be safe to attempt to
251 resume the device again in future, but 'power.runtime_error' should be
252 checked additionally
253
254 int pm_request_idle(struct device *dev);
255 - submit a request to execute ->runtime_idle() for the device's bus type
256 (the request is represented by a work item in pm_wq); returns 0 on success
257 or error code if the request has not been queued up
258
259 int pm_schedule_suspend(struct device *dev, unsigned int delay);
260 - schedule the execution of ->runtime_suspend() for the device's bus type
261 in future, where 'delay' is the time to wait before queuing up a suspend
262 work item in pm_wq, in milliseconds (if 'delay' is zero, the work item is
263 queued up immediately); returns 0 on success, 1 if the device's PM
264 run-time status was already 'suspended', or error code if the request
265 hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
266 ->runtime_suspend() is already scheduled and not yet expired, the new
267 value of 'delay' will be used as the time to wait
268
269 int pm_request_resume(struct device *dev);
270 - submit a request to execute ->runtime_resume() for the device's bus type
271 (the request is represented by a work item in pm_wq); returns 0 on
272 success, 1 if the device's run-time PM status was already 'active', or
273 error code if the request hasn't been queued up
274
275 void pm_runtime_get_noresume(struct device *dev);
276 - increment the device's usage counter
277
278 int pm_runtime_get(struct device *dev);
279 - increment the device's usage counter, run pm_request_resume(dev) and
280 return its result
281
282 int pm_runtime_get_sync(struct device *dev);
283 - increment the device's usage counter, run pm_runtime_resume(dev) and
284 return its result
285
286 void pm_runtime_put_noidle(struct device *dev);
287 - decrement the device's usage counter
288
289 int pm_runtime_put(struct device *dev);
290 - decrement the device's usage counter, run pm_request_idle(dev) and return
291 its result
292
293 int pm_runtime_put_sync(struct device *dev);
294 - decrement the device's usage counter, run pm_runtime_idle(dev) and return
295 its result
296
297 void pm_runtime_enable(struct device *dev);
298 - enable the run-time PM helper functions to run the device bus type's
299 run-time PM callbacks described in Section 2
300
301 int pm_runtime_disable(struct device *dev);
302 - prevent the run-time PM helper functions from running the device bus
303 type's run-time PM callbacks, make sure that all of the pending run-time
304 PM operations on the device are either completed or canceled; returns
305 1 if there was a resume request pending and it was necessary to execute
306 ->runtime_resume() for the device's bus type to satisfy that request,
307 otherwise 0 is returned
308
309 void pm_suspend_ignore_children(struct device *dev, bool enable);
310 - set/unset the power.ignore_children flag of the device
311
312 int pm_runtime_set_active(struct device *dev);
313 - clear the device's 'power.runtime_error' flag, set the device's run-time
314 PM status to 'active' and update its parent's counter of 'active'
315 children as appropriate (it is only valid to use this function if
316 'power.runtime_error' is set or 'power.disable_depth' is greater than
317 zero); it will fail and return error code if the device has a parent
318 which is not active and the 'power.ignore_children' flag of which is unset
319
320 void pm_runtime_set_suspended(struct device *dev);
321 - clear the device's 'power.runtime_error' flag, set the device's run-time
322 PM status to 'suspended' and update its parent's counter of 'active'
323 children as appropriate (it is only valid to use this function if
324 'power.runtime_error' is set or 'power.disable_depth' is greater than
325 zero)
326
327It is safe to execute the following helper functions from interrupt context:
328
329pm_request_idle()
330pm_schedule_suspend()
331pm_request_resume()
332pm_runtime_get_noresume()
333pm_runtime_get()
334pm_runtime_put_noidle()
335pm_runtime_put()
336pm_suspend_ignore_children()
337pm_runtime_set_active()
338pm_runtime_set_suspended()
339pm_runtime_enable()
340
3415. Run-time PM Initialization, Device Probing and Removal
342
343Initially, the run-time PM is disabled for all devices, which means that the
344majority of the run-time PM helper funtions described in Section 4 will return
345-EAGAIN until pm_runtime_enable() is called for the device.
346
347In addition to that, the initial run-time PM status of all devices is
348'suspended', but it need not reflect the actual physical state of the device.
349Thus, if the device is initially active (i.e. it is able to process I/O), its
350run-time PM status must be changed to 'active', with the help of
351pm_runtime_set_active(), before pm_runtime_enable() is called for the device.
352
353However, if the device has a parent and the parent's run-time PM is enabled,
354calling pm_runtime_set_active() for the device will affect the parent, unless
355the parent's 'power.ignore_children' flag is set. Namely, in that case the
356parent won't be able to suspend at run time, using the PM core's helper
357functions, as long as the child's status is 'active', even if the child's
358run-time PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
359the child yet or pm_runtime_disable() has been called for it). For this reason,
360once pm_runtime_set_active() has been called for the device, pm_runtime_enable()
361should be called for it too as soon as reasonably possible or its run-time PM
362status should be changed back to 'suspended' with the help of
363pm_runtime_set_suspended().
364
365If the default initial run-time PM status of the device (i.e. 'suspended')
366reflects the actual state of the device, its bus type's or its driver's
367->probe() callback will likely need to wake it up using one of the PM core's
368helper functions described in Section 4. In that case, pm_runtime_resume()
369should be used. Of course, for this purpose the device's run-time PM has to be
370enabled earlier by calling pm_runtime_enable().
371
372If the device bus type's or driver's ->probe() or ->remove() callback runs
373pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
374they will fail returning -EAGAIN, because the device's usage counter is
375incremented by the core before executing ->probe() and ->remove(). Still, it
376may be desirable to suspend the device as soon as ->probe() or ->remove() has
377finished, so the PM core uses pm_runtime_idle_sync() to invoke the device bus
378type's ->runtime_idle() callback at that time.
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index c61642b40603..9f390ce335cb 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -12,4 +12,7 @@ struct dev_archdata {
12#endif 12#endif
13}; 13};
14 14
15struct pdev_archdata {
16};
17
15#endif 18#endif
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
index be4eefda4767..9395898dd49a 100644
--- a/arch/arm/plat-omap/debug-leds.c
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -281,24 +281,27 @@ static int /* __init */ fpga_probe(struct platform_device *pdev)
281 return 0; 281 return 0;
282} 282}
283 283
284static int fpga_suspend_late(struct platform_device *pdev, pm_message_t mesg) 284static int fpga_suspend_noirq(struct device *dev)
285{ 285{
286 __raw_writew(~0, &fpga->leds); 286 __raw_writew(~0, &fpga->leds);
287 return 0; 287 return 0;
288} 288}
289 289
290static int fpga_resume_early(struct platform_device *pdev) 290static int fpga_resume_noirq(struct device *dev)
291{ 291{
292 __raw_writew(~hw_led_state, &fpga->leds); 292 __raw_writew(~hw_led_state, &fpga->leds);
293 return 0; 293 return 0;
294} 294}
295 295
296static struct dev_pm_ops fpga_dev_pm_ops = {
297 .suspend_noirq = fpga_suspend_noirq,
298 .resume_noirq = fpga_resume_noirq,
299};
296 300
297static struct platform_driver led_driver = { 301static struct platform_driver led_driver = {
298 .driver.name = "omap_dbg_led", 302 .driver.name = "omap_dbg_led",
303 .driver.pm = &fpga_dev_pm_ops,
299 .probe = fpga_probe, 304 .probe = fpga_probe,
300 .suspend_late = fpga_suspend_late,
301 .resume_early = fpga_resume_early,
302}; 305};
303 306
304static int __init fpga_init(void) 307static int __init fpga_init(void)
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 9298bc0ab171..50b19a3027bc 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1315,8 +1315,9 @@ static struct irq_chip mpuio_irq_chip = {
1315 1315
1316#include <linux/platform_device.h> 1316#include <linux/platform_device.h>
1317 1317
1318static int omap_mpuio_suspend_late(struct platform_device *pdev, pm_message_t mesg) 1318static int omap_mpuio_suspend_noirq(struct device *dev)
1319{ 1319{
1320 struct platform_device *pdev = to_platform_device(dev);
1320 struct gpio_bank *bank = platform_get_drvdata(pdev); 1321 struct gpio_bank *bank = platform_get_drvdata(pdev);
1321 void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT; 1322 void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
1322 unsigned long flags; 1323 unsigned long flags;
@@ -1329,8 +1330,9 @@ static int omap_mpuio_suspend_late(struct platform_device *pdev, pm_message_t me
1329 return 0; 1330 return 0;
1330} 1331}
1331 1332
1332static int omap_mpuio_resume_early(struct platform_device *pdev) 1333static int omap_mpuio_resume_noirq(struct device *dev)
1333{ 1334{
1335 struct platform_device *pdev = to_platform_device(dev);
1334 struct gpio_bank *bank = platform_get_drvdata(pdev); 1336 struct gpio_bank *bank = platform_get_drvdata(pdev);
1335 void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT; 1337 void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
1336 unsigned long flags; 1338 unsigned long flags;
@@ -1342,14 +1344,18 @@ static int omap_mpuio_resume_early(struct platform_device *pdev)
1342 return 0; 1344 return 0;
1343} 1345}
1344 1346
1347static struct dev_pm_ops omap_mpuio_dev_pm_ops = {
1348 .suspend_noirq = omap_mpuio_suspend_noirq,
1349 .resume_noirq = omap_mpuio_resume_noirq,
1350};
1351
1345/* use platform_driver for this, now that there's no longer any 1352/* use platform_driver for this, now that there's no longer any
1346 * point to sys_device (other than not disturbing old code). 1353 * point to sys_device (other than not disturbing old code).
1347 */ 1354 */
1348static struct platform_driver omap_mpuio_driver = { 1355static struct platform_driver omap_mpuio_driver = {
1349 .suspend_late = omap_mpuio_suspend_late,
1350 .resume_early = omap_mpuio_resume_early,
1351 .driver = { 1356 .driver = {
1352 .name = "mpuio", 1357 .name = "mpuio",
1358 .pm = &omap_mpuio_dev_pm_ops,
1353 }, 1359 },
1354}; 1360};
1355 1361
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 41ab85d66f33..d66d446b127c 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -15,4 +15,7 @@ struct dev_archdata {
15#endif 15#endif
16}; 16};
17 17
18struct pdev_archdata {
19};
20
18#endif /* _ASM_IA64_DEVICE_H */ 21#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index c042830793ed..30286db27c1c 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -16,6 +16,9 @@ struct dev_archdata {
16 struct device_node *of_node; 16 struct device_node *of_node;
17}; 17};
18 18
19struct pdev_archdata {
20};
21
19#endif /* _ASM_MICROBLAZE_DEVICE_H */ 22#endif /* _ASM_MICROBLAZE_DEVICE_H */
20 23
21 24
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 7d2277cef09a..e3e06e0f7fc0 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -30,4 +30,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
30 return ad->of_node; 30 return ad->of_node;
31} 31}
32 32
33struct pdev_archdata {
34};
35
33#endif /* _ASM_POWERPC_DEVICE_H */ 36#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index 3702e087df2c..f3b85b6b0b76 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -32,4 +32,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
32 return ad->prom_node; 32 return ad->prom_node;
33} 33}
34 34
35struct pdev_archdata {
36};
37
35#endif /* _ASM_SPARC_DEVICE_H */ 38#endif /* _ASM_SPARC_DEVICE_H */
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 4994a20acbcb..cee34e9ca45b 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -13,4 +13,7 @@ struct dma_map_ops *dma_ops;
13#endif 13#endif
14}; 14};
15 15
16struct pdev_archdata {
17};
18
16#endif /* _ASM_X86_DEVICE_H */ 19#endif /* _ASM_X86_DEVICE_H */
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0106875f01d..7b34b3a48f67 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -23,6 +23,7 @@
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/pm_runtime.h>
26 27
27#include "base.h" 28#include "base.h"
28#include "power/power.h" 29#include "power/power.h"
@@ -202,7 +203,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
202 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 203 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
203 drv->bus->name, __func__, dev_name(dev), drv->name); 204 drv->bus->name, __func__, dev_name(dev), drv->name);
204 205
206 pm_runtime_get_noresume(dev);
207 pm_runtime_barrier(dev);
205 ret = really_probe(dev, drv); 208 ret = really_probe(dev, drv);
209 pm_runtime_put_sync(dev);
206 210
207 return ret; 211 return ret;
208} 212}
@@ -245,7 +249,9 @@ int device_attach(struct device *dev)
245 ret = 0; 249 ret = 0;
246 } 250 }
247 } else { 251 } else {
252 pm_runtime_get_noresume(dev);
248 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 253 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
254 pm_runtime_put_sync(dev);
249 } 255 }
250 up(&dev->sem); 256 up(&dev->sem);
251 return ret; 257 return ret;
@@ -306,6 +312,9 @@ static void __device_release_driver(struct device *dev)
306 312
307 drv = dev->driver; 313 drv = dev->driver;
308 if (drv) { 314 if (drv) {
315 pm_runtime_get_noresume(dev);
316 pm_runtime_barrier(dev);
317
309 driver_sysfs_remove(dev); 318 driver_sysfs_remove(dev);
310 319
311 if (dev->bus) 320 if (dev->bus)
@@ -324,6 +333,8 @@ static void __device_release_driver(struct device *dev)
324 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 333 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
325 BUS_NOTIFY_UNBOUND_DRIVER, 334 BUS_NOTIFY_UNBOUND_DRIVER,
326 dev); 335 dev);
336
337 pm_runtime_put_sync(dev);
327 } 338 }
328} 339}
329 340
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 456594bd97bc..0f7d434ce983 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -17,6 +17,7 @@
17#include <linux/bootmem.h> 17#include <linux/bootmem.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/pm_runtime.h>
20 21
21#include "base.h" 22#include "base.h"
22 23
@@ -625,30 +626,6 @@ static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
625 return ret; 626 return ret;
626} 627}
627 628
628static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
629{
630 struct platform_driver *pdrv = to_platform_driver(dev->driver);
631 struct platform_device *pdev = to_platform_device(dev);
632 int ret = 0;
633
634 if (dev->driver && pdrv->suspend_late)
635 ret = pdrv->suspend_late(pdev, mesg);
636
637 return ret;
638}
639
640static int platform_legacy_resume_early(struct device *dev)
641{
642 struct platform_driver *pdrv = to_platform_driver(dev->driver);
643 struct platform_device *pdev = to_platform_device(dev);
644 int ret = 0;
645
646 if (dev->driver && pdrv->resume_early)
647 ret = pdrv->resume_early(pdev);
648
649 return ret;
650}
651
652static int platform_legacy_resume(struct device *dev) 629static int platform_legacy_resume(struct device *dev)
653{ 630{
654 struct platform_driver *pdrv = to_platform_driver(dev->driver); 631 struct platform_driver *pdrv = to_platform_driver(dev->driver);
@@ -680,6 +657,13 @@ static void platform_pm_complete(struct device *dev)
680 drv->pm->complete(dev); 657 drv->pm->complete(dev);
681} 658}
682 659
660#else /* !CONFIG_PM_SLEEP */
661
662#define platform_pm_prepare NULL
663#define platform_pm_complete NULL
664
665#endif /* !CONFIG_PM_SLEEP */
666
683#ifdef CONFIG_SUSPEND 667#ifdef CONFIG_SUSPEND
684 668
685static int platform_pm_suspend(struct device *dev) 669static int platform_pm_suspend(struct device *dev)
@@ -711,8 +695,6 @@ static int platform_pm_suspend_noirq(struct device *dev)
711 if (drv->pm) { 695 if (drv->pm) {
712 if (drv->pm->suspend_noirq) 696 if (drv->pm->suspend_noirq)
713 ret = drv->pm->suspend_noirq(dev); 697 ret = drv->pm->suspend_noirq(dev);
714 } else {
715 ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
716 } 698 }
717 699
718 return ret; 700 return ret;
@@ -747,8 +729,6 @@ static int platform_pm_resume_noirq(struct device *dev)
747 if (drv->pm) { 729 if (drv->pm) {
748 if (drv->pm->resume_noirq) 730 if (drv->pm->resume_noirq)
749 ret = drv->pm->resume_noirq(dev); 731 ret = drv->pm->resume_noirq(dev);
750 } else {
751 ret = platform_legacy_resume_early(dev);
752 } 732 }
753 733
754 return ret; 734 return ret;
@@ -794,8 +774,6 @@ static int platform_pm_freeze_noirq(struct device *dev)
794 if (drv->pm) { 774 if (drv->pm) {
795 if (drv->pm->freeze_noirq) 775 if (drv->pm->freeze_noirq)
796 ret = drv->pm->freeze_noirq(dev); 776 ret = drv->pm->freeze_noirq(dev);
797 } else {
798 ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
799 } 777 }
800 778
801 return ret; 779 return ret;
@@ -830,8 +808,6 @@ static int platform_pm_thaw_noirq(struct device *dev)
830 if (drv->pm) { 808 if (drv->pm) {
831 if (drv->pm->thaw_noirq) 809 if (drv->pm->thaw_noirq)
832 ret = drv->pm->thaw_noirq(dev); 810 ret = drv->pm->thaw_noirq(dev);
833 } else {
834 ret = platform_legacy_resume_early(dev);
835 } 811 }
836 812
837 return ret; 813 return ret;
@@ -866,8 +842,6 @@ static int platform_pm_poweroff_noirq(struct device *dev)
866 if (drv->pm) { 842 if (drv->pm) {
867 if (drv->pm->poweroff_noirq) 843 if (drv->pm->poweroff_noirq)
868 ret = drv->pm->poweroff_noirq(dev); 844 ret = drv->pm->poweroff_noirq(dev);
869 } else {
870 ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
871 } 845 }
872 846
873 return ret; 847 return ret;
@@ -902,8 +876,6 @@ static int platform_pm_restore_noirq(struct device *dev)
902 if (drv->pm) { 876 if (drv->pm) {
903 if (drv->pm->restore_noirq) 877 if (drv->pm->restore_noirq)
904 ret = drv->pm->restore_noirq(dev); 878 ret = drv->pm->restore_noirq(dev);
905 } else {
906 ret = platform_legacy_resume_early(dev);
907 } 879 }
908 880
909 return ret; 881 return ret;
@@ -922,7 +894,32 @@ static int platform_pm_restore_noirq(struct device *dev)
922 894
923#endif /* !CONFIG_HIBERNATION */ 895#endif /* !CONFIG_HIBERNATION */
924 896
925static struct dev_pm_ops platform_dev_pm_ops = { 897#ifdef CONFIG_PM_RUNTIME
898
899int __weak platform_pm_runtime_suspend(struct device *dev)
900{
901 return -ENOSYS;
902};
903
904int __weak platform_pm_runtime_resume(struct device *dev)
905{
906 return -ENOSYS;
907};
908
909int __weak platform_pm_runtime_idle(struct device *dev)
910{
911 return -ENOSYS;
912};
913
914#else /* !CONFIG_PM_RUNTIME */
915
916#define platform_pm_runtime_suspend NULL
917#define platform_pm_runtime_resume NULL
918#define platform_pm_runtime_idle NULL
919
920#endif /* !CONFIG_PM_RUNTIME */
921
922static const struct dev_pm_ops platform_dev_pm_ops = {
926 .prepare = platform_pm_prepare, 923 .prepare = platform_pm_prepare,
927 .complete = platform_pm_complete, 924 .complete = platform_pm_complete,
928 .suspend = platform_pm_suspend, 925 .suspend = platform_pm_suspend,
@@ -937,22 +934,17 @@ static struct dev_pm_ops platform_dev_pm_ops = {
937 .thaw_noirq = platform_pm_thaw_noirq, 934 .thaw_noirq = platform_pm_thaw_noirq,
938 .poweroff_noirq = platform_pm_poweroff_noirq, 935 .poweroff_noirq = platform_pm_poweroff_noirq,
939 .restore_noirq = platform_pm_restore_noirq, 936 .restore_noirq = platform_pm_restore_noirq,
937 .runtime_suspend = platform_pm_runtime_suspend,
938 .runtime_resume = platform_pm_runtime_resume,
939 .runtime_idle = platform_pm_runtime_idle,
940}; 940};
941 941
942#define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops)
943
944#else /* !CONFIG_PM_SLEEP */
945
946#define PLATFORM_PM_OPS_PTR NULL
947
948#endif /* !CONFIG_PM_SLEEP */
949
950struct bus_type platform_bus_type = { 942struct bus_type platform_bus_type = {
951 .name = "platform", 943 .name = "platform",
952 .dev_attrs = platform_dev_attrs, 944 .dev_attrs = platform_dev_attrs,
953 .match = platform_match, 945 .match = platform_match,
954 .uevent = platform_uevent, 946 .uevent = platform_uevent,
955 .pm = PLATFORM_PM_OPS_PTR, 947 .pm = &platform_dev_pm_ops,
956}; 948};
957EXPORT_SYMBOL_GPL(platform_bus_type); 949EXPORT_SYMBOL_GPL(platform_bus_type);
958 950
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 911208b89259..3ce3519e8f30 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_PM) += sysfs.o 1obj-$(CONFIG_PM) += sysfs.o
2obj-$(CONFIG_PM_SLEEP) += main.o 2obj-$(CONFIG_PM_SLEEP) += main.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o
3obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
4 5
5ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 6ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 58a3e572f2c9..86990011277b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -21,6 +21,7 @@
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
24#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
25#include <linux/rwsem.h> 26#include <linux/rwsem.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
@@ -49,6 +50,16 @@ static DEFINE_MUTEX(dpm_list_mtx);
49static bool transition_started; 50static bool transition_started;
50 51
51/** 52/**
53 * device_pm_init - Initialize the PM-related part of a device object
54 * @dev: Device object being initialized.
55 */
56void device_pm_init(struct device *dev)
57{
58 dev->power.status = DPM_ON;
59 pm_runtime_init(dev);
60}
61
62/**
52 * device_pm_lock - lock the list of active devices used by the PM core 63 * device_pm_lock - lock the list of active devices used by the PM core
53 */ 64 */
54void device_pm_lock(void) 65void device_pm_lock(void)
@@ -105,6 +116,7 @@ void device_pm_remove(struct device *dev)
105 mutex_lock(&dpm_list_mtx); 116 mutex_lock(&dpm_list_mtx);
106 list_del_init(&dev->power.entry); 117 list_del_init(&dev->power.entry);
107 mutex_unlock(&dpm_list_mtx); 118 mutex_unlock(&dpm_list_mtx);
119 pm_runtime_remove(dev);
108} 120}
109 121
110/** 122/**
@@ -157,8 +169,9 @@ void device_pm_move_last(struct device *dev)
157 * @ops: PM operations to choose from. 169 * @ops: PM operations to choose from.
158 * @state: PM transition of the system being carried out. 170 * @state: PM transition of the system being carried out.
159 */ 171 */
160static int pm_op(struct device *dev, struct dev_pm_ops *ops, 172static int pm_op(struct device *dev,
161 pm_message_t state) 173 const struct dev_pm_ops *ops,
174 pm_message_t state)
162{ 175{
163 int error = 0; 176 int error = 0;
164 177
@@ -220,7 +233,8 @@ static int pm_op(struct device *dev, struct dev_pm_ops *ops,
220 * The operation is executed with interrupts disabled by the only remaining 233 * The operation is executed with interrupts disabled by the only remaining
221 * functional CPU in the system. 234 * functional CPU in the system.
222 */ 235 */
223static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops, 236static int pm_noirq_op(struct device *dev,
237 const struct dev_pm_ops *ops,
224 pm_message_t state) 238 pm_message_t state)
225{ 239{
226 int error = 0; 240 int error = 0;
@@ -510,6 +524,7 @@ static void dpm_complete(pm_message_t state)
510 mutex_unlock(&dpm_list_mtx); 524 mutex_unlock(&dpm_list_mtx);
511 525
512 device_complete(dev, state); 526 device_complete(dev, state);
527 pm_runtime_put_noidle(dev);
513 528
514 mutex_lock(&dpm_list_mtx); 529 mutex_lock(&dpm_list_mtx);
515 } 530 }
@@ -755,7 +770,14 @@ static int dpm_prepare(pm_message_t state)
755 dev->power.status = DPM_PREPARING; 770 dev->power.status = DPM_PREPARING;
756 mutex_unlock(&dpm_list_mtx); 771 mutex_unlock(&dpm_list_mtx);
757 772
758 error = device_prepare(dev, state); 773 pm_runtime_get_noresume(dev);
774 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
775 /* Wake-up requested during system sleep transition. */
776 pm_runtime_put_noidle(dev);
777 error = -EBUSY;
778 } else {
779 error = device_prepare(dev, state);
780 }
759 781
760 mutex_lock(&dpm_list_mtx); 782 mutex_lock(&dpm_list_mtx);
761 if (error) { 783 if (error) {
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c7cb4fc3735c..b8fa1aa5225a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,7 +1,14 @@
1static inline void device_pm_init(struct device *dev) 1#ifdef CONFIG_PM_RUNTIME
2{ 2
3 dev->power.status = DPM_ON; 3extern void pm_runtime_init(struct device *dev);
4} 4extern void pm_runtime_remove(struct device *dev);
5
6#else /* !CONFIG_PM_RUNTIME */
7
8static inline void pm_runtime_init(struct device *dev) {}
9static inline void pm_runtime_remove(struct device *dev) {}
10
11#endif /* !CONFIG_PM_RUNTIME */
5 12
6#ifdef CONFIG_PM_SLEEP 13#ifdef CONFIG_PM_SLEEP
7 14
@@ -16,23 +23,33 @@ static inline struct device *to_device(struct list_head *entry)
16 return container_of(entry, struct device, power.entry); 23 return container_of(entry, struct device, power.entry);
17} 24}
18 25
26extern void device_pm_init(struct device *dev);
19extern void device_pm_add(struct device *); 27extern void device_pm_add(struct device *);
20extern void device_pm_remove(struct device *); 28extern void device_pm_remove(struct device *);
21extern void device_pm_move_before(struct device *, struct device *); 29extern void device_pm_move_before(struct device *, struct device *);
22extern void device_pm_move_after(struct device *, struct device *); 30extern void device_pm_move_after(struct device *, struct device *);
23extern void device_pm_move_last(struct device *); 31extern void device_pm_move_last(struct device *);
24 32
25#else /* CONFIG_PM_SLEEP */ 33#else /* !CONFIG_PM_SLEEP */
34
35static inline void device_pm_init(struct device *dev)
36{
37 pm_runtime_init(dev);
38}
39
40static inline void device_pm_remove(struct device *dev)
41{
42 pm_runtime_remove(dev);
43}
26 44
27static inline void device_pm_add(struct device *dev) {} 45static inline void device_pm_add(struct device *dev) {}
28static inline void device_pm_remove(struct device *dev) {}
29static inline void device_pm_move_before(struct device *deva, 46static inline void device_pm_move_before(struct device *deva,
30 struct device *devb) {} 47 struct device *devb) {}
31static inline void device_pm_move_after(struct device *deva, 48static inline void device_pm_move_after(struct device *deva,
32 struct device *devb) {} 49 struct device *devb) {}
33static inline void device_pm_move_last(struct device *dev) {} 50static inline void device_pm_move_last(struct device *dev) {}
34 51
35#endif 52#endif /* !CONFIG_PM_SLEEP */
36 53
37#ifdef CONFIG_PM 54#ifdef CONFIG_PM
38 55
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 000000000000..38556f6cc22d
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1011 @@
1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19 * @dev: Device to handle.
20 */
21static void pm_runtime_deactivate_timer(struct device *dev)
22{
23 if (dev->power.timer_expires > 0) {
24 del_timer(&dev->power.suspend_timer);
25 dev->power.timer_expires = 0;
26 }
27}
28
29/**
30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31 * @dev: Device to handle.
32 */
33static void pm_runtime_cancel_pending(struct device *dev)
34{
35 pm_runtime_deactivate_timer(dev);
36 /*
37 * In case there's a request pending, make sure its work function will
38 * return without doing anything.
39 */
40 dev->power.request = RPM_REQ_NONE;
41}
42
43/**
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45 * @dev: Device to notify the bus type about.
46 *
47 * This function must be called under dev->power.lock with interrupts disabled.
48 */
49static int __pm_runtime_idle(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock)
51{
52 int retval = 0;
53
54 dev_dbg(dev, "__pm_runtime_idle()!\n");
55
56 if (dev->power.runtime_error)
57 retval = -EINVAL;
58 else if (dev->power.idle_notification)
59 retval = -EINPROGRESS;
60 else if (atomic_read(&dev->power.usage_count) > 0
61 || dev->power.disable_depth > 0
62 || dev->power.runtime_status != RPM_ACTIVE)
63 retval = -EAGAIN;
64 else if (!pm_children_suspended(dev))
65 retval = -EBUSY;
66 if (retval)
67 goto out;
68
69 if (dev->power.request_pending) {
70 /*
71 * If an idle notification request is pending, cancel it. Any
72 * other pending request takes precedence over us.
73 */
74 if (dev->power.request == RPM_REQ_IDLE) {
75 dev->power.request = RPM_REQ_NONE;
76 } else if (dev->power.request != RPM_REQ_NONE) {
77 retval = -EAGAIN;
78 goto out;
79 }
80 }
81
82 dev->power.idle_notification = true;
83
84 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
85 spin_unlock_irq(&dev->power.lock);
86
87 dev->bus->pm->runtime_idle(dev);
88
89 spin_lock_irq(&dev->power.lock);
90 }
91
92 dev->power.idle_notification = false;
93 wake_up_all(&dev->power.wait_queue);
94
95 out:
96 dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
97
98 return retval;
99}
100
101/**
102 * pm_runtime_idle - Notify device bus type if the device can be suspended.
103 * @dev: Device to notify the bus type about.
104 */
105int pm_runtime_idle(struct device *dev)
106{
107 int retval;
108
109 spin_lock_irq(&dev->power.lock);
110 retval = __pm_runtime_idle(dev);
111 spin_unlock_irq(&dev->power.lock);
112
113 return retval;
114}
115EXPORT_SYMBOL_GPL(pm_runtime_idle);
116
117/**
118 * __pm_runtime_suspend - Carry out run-time suspend of given device.
119 * @dev: Device to suspend.
120 * @from_wq: If set, the function has been called via pm_wq.
121 *
122 * Check if the device can be suspended and run the ->runtime_suspend() callback
123 * provided by its bus type. If another suspend has been started earlier, wait
124 * for it to finish. If an idle notification or suspend request is pending or
125 * scheduled, cancel it.
126 *
127 * This function must be called under dev->power.lock with interrupts disabled.
128 */
129int __pm_runtime_suspend(struct device *dev, bool from_wq)
130 __releases(&dev->power.lock) __acquires(&dev->power.lock)
131{
132 struct device *parent = NULL;
133 bool notify = false;
134 int retval = 0;
135
136 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
137 from_wq ? " from workqueue" : "");
138
139 repeat:
140 if (dev->power.runtime_error) {
141 retval = -EINVAL;
142 goto out;
143 }
144
145 /* Pending resume requests take precedence over us. */
146 if (dev->power.request_pending
147 && dev->power.request == RPM_REQ_RESUME) {
148 retval = -EAGAIN;
149 goto out;
150 }
151
152 /* Other scheduled or pending requests need to be canceled. */
153 pm_runtime_cancel_pending(dev);
154
155 if (dev->power.runtime_status == RPM_SUSPENDED)
156 retval = 1;
157 else if (dev->power.runtime_status == RPM_RESUMING
158 || dev->power.disable_depth > 0
159 || atomic_read(&dev->power.usage_count) > 0)
160 retval = -EAGAIN;
161 else if (!pm_children_suspended(dev))
162 retval = -EBUSY;
163 if (retval)
164 goto out;
165
166 if (dev->power.runtime_status == RPM_SUSPENDING) {
167 DEFINE_WAIT(wait);
168
169 if (from_wq) {
170 retval = -EINPROGRESS;
171 goto out;
172 }
173
174 /* Wait for the other suspend running in parallel with us. */
175 for (;;) {
176 prepare_to_wait(&dev->power.wait_queue, &wait,
177 TASK_UNINTERRUPTIBLE);
178 if (dev->power.runtime_status != RPM_SUSPENDING)
179 break;
180
181 spin_unlock_irq(&dev->power.lock);
182
183 schedule();
184
185 spin_lock_irq(&dev->power.lock);
186 }
187 finish_wait(&dev->power.wait_queue, &wait);
188 goto repeat;
189 }
190
191 dev->power.runtime_status = RPM_SUSPENDING;
192
193 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
194 spin_unlock_irq(&dev->power.lock);
195
196 retval = dev->bus->pm->runtime_suspend(dev);
197
198 spin_lock_irq(&dev->power.lock);
199 dev->power.runtime_error = retval;
200 } else {
201 retval = -ENOSYS;
202 }
203
204 if (retval) {
205 dev->power.runtime_status = RPM_ACTIVE;
206 pm_runtime_cancel_pending(dev);
207 dev->power.deferred_resume = false;
208
209 if (retval == -EAGAIN || retval == -EBUSY) {
210 notify = true;
211 dev->power.runtime_error = 0;
212 }
213 } else {
214 dev->power.runtime_status = RPM_SUSPENDED;
215
216 if (dev->parent) {
217 parent = dev->parent;
218 atomic_add_unless(&parent->power.child_count, -1, 0);
219 }
220 }
221 wake_up_all(&dev->power.wait_queue);
222
223 if (dev->power.deferred_resume) {
224 dev->power.deferred_resume = false;
225 __pm_runtime_resume(dev, false);
226 retval = -EAGAIN;
227 goto out;
228 }
229
230 if (notify)
231 __pm_runtime_idle(dev);
232
233 if (parent && !parent->power.ignore_children) {
234 spin_unlock_irq(&dev->power.lock);
235
236 pm_request_idle(parent);
237
238 spin_lock_irq(&dev->power.lock);
239 }
240
241 out:
242 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
243
244 return retval;
245}
246
247/**
248 * pm_runtime_suspend - Carry out run-time suspend of given device.
249 * @dev: Device to suspend.
250 */
251int pm_runtime_suspend(struct device *dev)
252{
253 int retval;
254
255 spin_lock_irq(&dev->power.lock);
256 retval = __pm_runtime_suspend(dev, false);
257 spin_unlock_irq(&dev->power.lock);
258
259 return retval;
260}
261EXPORT_SYMBOL_GPL(pm_runtime_suspend);
262
263/**
264 * __pm_runtime_resume - Carry out run-time resume of given device.
265 * @dev: Device to resume.
266 * @from_wq: If set, the function has been called via pm_wq.
267 *
268 * Check if the device can be woken up and run the ->runtime_resume() callback
269 * provided by its bus type. If another resume has been started earlier, wait
270 * for it to finish. If there's a suspend running in parallel with this
271 * function, wait for it to finish and resume the device. Cancel any scheduled
272 * or pending requests.
273 *
274 * This function must be called under dev->power.lock with interrupts disabled.
275 */
276int __pm_runtime_resume(struct device *dev, bool from_wq)
277 __releases(&dev->power.lock) __acquires(&dev->power.lock)
278{
279 struct device *parent = NULL;
280 int retval = 0;
281
282 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
283 from_wq ? " from workqueue" : "");
284
285 repeat:
286 if (dev->power.runtime_error) {
287 retval = -EINVAL;
288 goto out;
289 }
290
291 pm_runtime_cancel_pending(dev);
292
293 if (dev->power.runtime_status == RPM_ACTIVE)
294 retval = 1;
295 else if (dev->power.disable_depth > 0)
296 retval = -EAGAIN;
297 if (retval)
298 goto out;
299
300 if (dev->power.runtime_status == RPM_RESUMING
301 || dev->power.runtime_status == RPM_SUSPENDING) {
302 DEFINE_WAIT(wait);
303
304 if (from_wq) {
305 if (dev->power.runtime_status == RPM_SUSPENDING)
306 dev->power.deferred_resume = true;
307 retval = -EINPROGRESS;
308 goto out;
309 }
310
311 /* Wait for the operation carried out in parallel with us. */
312 for (;;) {
313 prepare_to_wait(&dev->power.wait_queue, &wait,
314 TASK_UNINTERRUPTIBLE);
315 if (dev->power.runtime_status != RPM_RESUMING
316 && dev->power.runtime_status != RPM_SUSPENDING)
317 break;
318
319 spin_unlock_irq(&dev->power.lock);
320
321 schedule();
322
323 spin_lock_irq(&dev->power.lock);
324 }
325 finish_wait(&dev->power.wait_queue, &wait);
326 goto repeat;
327 }
328
329 if (!parent && dev->parent) {
330 /*
331 * Increment the parent's resume counter and resume it if
332 * necessary.
333 */
334 parent = dev->parent;
335 spin_unlock_irq(&dev->power.lock);
336
337 pm_runtime_get_noresume(parent);
338
339 spin_lock_irq(&parent->power.lock);
340 /*
341 * We can resume if the parent's run-time PM is disabled or it
342 * is set to ignore children.
343 */
344 if (!parent->power.disable_depth
345 && !parent->power.ignore_children) {
346 __pm_runtime_resume(parent, false);
347 if (parent->power.runtime_status != RPM_ACTIVE)
348 retval = -EBUSY;
349 }
350 spin_unlock_irq(&parent->power.lock);
351
352 spin_lock_irq(&dev->power.lock);
353 if (retval)
354 goto out;
355 goto repeat;
356 }
357
358 dev->power.runtime_status = RPM_RESUMING;
359
360 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
361 spin_unlock_irq(&dev->power.lock);
362
363 retval = dev->bus->pm->runtime_resume(dev);
364
365 spin_lock_irq(&dev->power.lock);
366 dev->power.runtime_error = retval;
367 } else {
368 retval = -ENOSYS;
369 }
370
371 if (retval) {
372 dev->power.runtime_status = RPM_SUSPENDED;
373 pm_runtime_cancel_pending(dev);
374 } else {
375 dev->power.runtime_status = RPM_ACTIVE;
376 if (parent)
377 atomic_inc(&parent->power.child_count);
378 }
379 wake_up_all(&dev->power.wait_queue);
380
381 if (!retval)
382 __pm_request_idle(dev);
383
384 out:
385 if (parent) {
386 spin_unlock_irq(&dev->power.lock);
387
388 pm_runtime_put(parent);
389
390 spin_lock_irq(&dev->power.lock);
391 }
392
393 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
394
395 return retval;
396}
397
398/**
399 * pm_runtime_resume - Carry out run-time resume of given device.
400 * @dev: Device to suspend.
401 */
402int pm_runtime_resume(struct device *dev)
403{
404 int retval;
405
406 spin_lock_irq(&dev->power.lock);
407 retval = __pm_runtime_resume(dev, false);
408 spin_unlock_irq(&dev->power.lock);
409
410 return retval;
411}
412EXPORT_SYMBOL_GPL(pm_runtime_resume);
413
414/**
415 * pm_runtime_work - Universal run-time PM work function.
416 * @work: Work structure used for scheduling the execution of this function.
417 *
418 * Use @work to get the device object the work is to be done for, determine what
419 * is to be done and execute the appropriate run-time PM function.
420 */
421static void pm_runtime_work(struct work_struct *work)
422{
423 struct device *dev = container_of(work, struct device, power.work);
424 enum rpm_request req;
425
426 spin_lock_irq(&dev->power.lock);
427
428 if (!dev->power.request_pending)
429 goto out;
430
431 req = dev->power.request;
432 dev->power.request = RPM_REQ_NONE;
433 dev->power.request_pending = false;
434
435 switch (req) {
436 case RPM_REQ_NONE:
437 break;
438 case RPM_REQ_IDLE:
439 __pm_runtime_idle(dev);
440 break;
441 case RPM_REQ_SUSPEND:
442 __pm_runtime_suspend(dev, true);
443 break;
444 case RPM_REQ_RESUME:
445 __pm_runtime_resume(dev, true);
446 break;
447 }
448
449 out:
450 spin_unlock_irq(&dev->power.lock);
451}
452
453/**
454 * __pm_request_idle - Submit an idle notification request for given device.
455 * @dev: Device to handle.
456 *
457 * Check if the device's run-time PM status is correct for suspending the device
458 * and queue up a request to run __pm_runtime_idle() for it.
459 *
460 * This function must be called under dev->power.lock with interrupts disabled.
461 */
462static int __pm_request_idle(struct device *dev)
463{
464 int retval = 0;
465
466 if (dev->power.runtime_error)
467 retval = -EINVAL;
468 else if (atomic_read(&dev->power.usage_count) > 0
469 || dev->power.disable_depth > 0
470 || dev->power.runtime_status == RPM_SUSPENDED
471 || dev->power.runtime_status == RPM_SUSPENDING)
472 retval = -EAGAIN;
473 else if (!pm_children_suspended(dev))
474 retval = -EBUSY;
475 if (retval)
476 return retval;
477
478 if (dev->power.request_pending) {
479 /* Any requests other then RPM_REQ_IDLE take precedence. */
480 if (dev->power.request == RPM_REQ_NONE)
481 dev->power.request = RPM_REQ_IDLE;
482 else if (dev->power.request != RPM_REQ_IDLE)
483 retval = -EAGAIN;
484 return retval;
485 }
486
487 dev->power.request = RPM_REQ_IDLE;
488 dev->power.request_pending = true;
489 queue_work(pm_wq, &dev->power.work);
490
491 return retval;
492}
493
494/**
495 * pm_request_idle - Submit an idle notification request for given device.
496 * @dev: Device to handle.
497 */
498int pm_request_idle(struct device *dev)
499{
500 unsigned long flags;
501 int retval;
502
503 spin_lock_irqsave(&dev->power.lock, flags);
504 retval = __pm_request_idle(dev);
505 spin_unlock_irqrestore(&dev->power.lock, flags);
506
507 return retval;
508}
509EXPORT_SYMBOL_GPL(pm_request_idle);
510
511/**
512 * __pm_request_suspend - Submit a suspend request for given device.
513 * @dev: Device to suspend.
514 *
515 * This function must be called under dev->power.lock with interrupts disabled.
516 */
517static int __pm_request_suspend(struct device *dev)
518{
519 int retval = 0;
520
521 if (dev->power.runtime_error)
522 return -EINVAL;
523
524 if (dev->power.runtime_status == RPM_SUSPENDED)
525 retval = 1;
526 else if (atomic_read(&dev->power.usage_count) > 0
527 || dev->power.disable_depth > 0)
528 retval = -EAGAIN;
529 else if (dev->power.runtime_status == RPM_SUSPENDING)
530 retval = -EINPROGRESS;
531 else if (!pm_children_suspended(dev))
532 retval = -EBUSY;
533 if (retval < 0)
534 return retval;
535
536 pm_runtime_deactivate_timer(dev);
537
538 if (dev->power.request_pending) {
539 /*
540 * Pending resume requests take precedence over us, but we can
541 * overtake any other pending request.
542 */
543 if (dev->power.request == RPM_REQ_RESUME)
544 retval = -EAGAIN;
545 else if (dev->power.request != RPM_REQ_SUSPEND)
546 dev->power.request = retval ?
547 RPM_REQ_NONE : RPM_REQ_SUSPEND;
548 return retval;
549 } else if (retval) {
550 return retval;
551 }
552
553 dev->power.request = RPM_REQ_SUSPEND;
554 dev->power.request_pending = true;
555 queue_work(pm_wq, &dev->power.work);
556
557 return 0;
558}
559
560/**
561 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
562 * @data: Device pointer passed by pm_schedule_suspend().
563 *
564 * Check if the time is right and execute __pm_request_suspend() in that case.
565 */
566static void pm_suspend_timer_fn(unsigned long data)
567{
568 struct device *dev = (struct device *)data;
569 unsigned long flags;
570 unsigned long expires;
571
572 spin_lock_irqsave(&dev->power.lock, flags);
573
574 expires = dev->power.timer_expires;
575 /* If 'expire' is after 'jiffies' we've been called too early. */
576 if (expires > 0 && !time_after(expires, jiffies)) {
577 dev->power.timer_expires = 0;
578 __pm_request_suspend(dev);
579 }
580
581 spin_unlock_irqrestore(&dev->power.lock, flags);
582}
583
584/**
585 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
586 * @dev: Device to suspend.
587 * @delay: Time to wait before submitting a suspend request, in milliseconds.
588 */
589int pm_schedule_suspend(struct device *dev, unsigned int delay)
590{
591 unsigned long flags;
592 int retval = 0;
593
594 spin_lock_irqsave(&dev->power.lock, flags);
595
596 if (dev->power.runtime_error) {
597 retval = -EINVAL;
598 goto out;
599 }
600
601 if (!delay) {
602 retval = __pm_request_suspend(dev);
603 goto out;
604 }
605
606 pm_runtime_deactivate_timer(dev);
607
608 if (dev->power.request_pending) {
609 /*
610 * Pending resume requests take precedence over us, but any
611 * other pending requests have to be canceled.
612 */
613 if (dev->power.request == RPM_REQ_RESUME) {
614 retval = -EAGAIN;
615 goto out;
616 }
617 dev->power.request = RPM_REQ_NONE;
618 }
619
620 if (dev->power.runtime_status == RPM_SUSPENDED)
621 retval = 1;
622 else if (dev->power.runtime_status == RPM_SUSPENDING)
623 retval = -EINPROGRESS;
624 else if (atomic_read(&dev->power.usage_count) > 0
625 || dev->power.disable_depth > 0)
626 retval = -EAGAIN;
627 else if (!pm_children_suspended(dev))
628 retval = -EBUSY;
629 if (retval)
630 goto out;
631
632 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
633 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
634
635 out:
636 spin_unlock_irqrestore(&dev->power.lock, flags);
637
638 return retval;
639}
640EXPORT_SYMBOL_GPL(pm_schedule_suspend);
641
642/**
643 * pm_request_resume - Submit a resume request for given device.
644 * @dev: Device to resume.
645 *
646 * This function must be called under dev->power.lock with interrupts disabled.
647 */
648static int __pm_request_resume(struct device *dev)
649{
650 int retval = 0;
651
652 if (dev->power.runtime_error)
653 return -EINVAL;
654
655 if (dev->power.runtime_status == RPM_ACTIVE)
656 retval = 1;
657 else if (dev->power.runtime_status == RPM_RESUMING)
658 retval = -EINPROGRESS;
659 else if (dev->power.disable_depth > 0)
660 retval = -EAGAIN;
661 if (retval < 0)
662 return retval;
663
664 pm_runtime_deactivate_timer(dev);
665
666 if (dev->power.request_pending) {
667 /* If non-resume request is pending, we can overtake it. */
668 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
669 return retval;
670 } else if (retval) {
671 return retval;
672 }
673
674 dev->power.request = RPM_REQ_RESUME;
675 dev->power.request_pending = true;
676 queue_work(pm_wq, &dev->power.work);
677
678 return retval;
679}
680
681/**
682 * pm_request_resume - Submit a resume request for given device.
683 * @dev: Device to resume.
684 */
685int pm_request_resume(struct device *dev)
686{
687 unsigned long flags;
688 int retval;
689
690 spin_lock_irqsave(&dev->power.lock, flags);
691 retval = __pm_request_resume(dev);
692 spin_unlock_irqrestore(&dev->power.lock, flags);
693
694 return retval;
695}
696EXPORT_SYMBOL_GPL(pm_request_resume);
697
698/**
699 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
700 * @dev: Device to handle.
701 * @sync: If set and the device is suspended, resume it synchronously.
702 *
703 * Increment the usage count of the device and if it was zero previously,
704 * resume it or submit a resume request for it, depending on the value of @sync.
705 */
706int __pm_runtime_get(struct device *dev, bool sync)
707{
708 int retval = 1;
709
710 if (atomic_add_return(1, &dev->power.usage_count) == 1)
711 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
712
713 return retval;
714}
715EXPORT_SYMBOL_GPL(__pm_runtime_get);
716
717/**
718 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
719 * @dev: Device to handle.
720 * @sync: If the device's bus type is to be notified, do that synchronously.
721 *
722 * Decrement the usage count of the device and if it reaches zero, carry out a
723 * synchronous idle notification or submit an idle notification request for it,
724 * depending on the value of @sync.
725 */
726int __pm_runtime_put(struct device *dev, bool sync)
727{
728 int retval = 0;
729
730 if (atomic_dec_and_test(&dev->power.usage_count))
731 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
732
733 return retval;
734}
735EXPORT_SYMBOL_GPL(__pm_runtime_put);
736
737/**
738 * __pm_runtime_set_status - Set run-time PM status of a device.
739 * @dev: Device to handle.
740 * @status: New run-time PM status of the device.
741 *
742 * If run-time PM of the device is disabled or its power.runtime_error field is
743 * different from zero, the status may be changed either to RPM_ACTIVE, or to
744 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
745 * However, if the device has a parent and the parent is not active, and the
746 * parent's power.ignore_children flag is unset, the device's status cannot be
747 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
748 *
749 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
750 * and the device parent's counter of unsuspended children is modified to
751 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
752 * notification request for the parent is submitted.
753 */
754int __pm_runtime_set_status(struct device *dev, unsigned int status)
755{
756 struct device *parent = dev->parent;
757 unsigned long flags;
758 bool notify_parent = false;
759 int error = 0;
760
761 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
762 return -EINVAL;
763
764 spin_lock_irqsave(&dev->power.lock, flags);
765
766 if (!dev->power.runtime_error && !dev->power.disable_depth) {
767 error = -EAGAIN;
768 goto out;
769 }
770
771 if (dev->power.runtime_status == status)
772 goto out_set;
773
774 if (status == RPM_SUSPENDED) {
775 /* It always is possible to set the status to 'suspended'. */
776 if (parent) {
777 atomic_add_unless(&parent->power.child_count, -1, 0);
778 notify_parent = !parent->power.ignore_children;
779 }
780 goto out_set;
781 }
782
783 if (parent) {
784 spin_lock_irq(&parent->power.lock);
785
786 /*
787 * It is invalid to put an active child under a parent that is
788 * not active, has run-time PM enabled and the
789 * 'power.ignore_children' flag unset.
790 */
791 if (!parent->power.disable_depth
792 && !parent->power.ignore_children
793 && parent->power.runtime_status != RPM_ACTIVE) {
794 error = -EBUSY;
795 } else {
796 if (dev->power.runtime_status == RPM_SUSPENDED)
797 atomic_inc(&parent->power.child_count);
798 }
799
800 spin_unlock_irq(&parent->power.lock);
801
802 if (error)
803 goto out;
804 }
805
806 out_set:
807 dev->power.runtime_status = status;
808 dev->power.runtime_error = 0;
809 out:
810 spin_unlock_irqrestore(&dev->power.lock, flags);
811
812 if (notify_parent)
813 pm_request_idle(parent);
814
815 return error;
816}
817EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
818
819/**
820 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
821 * @dev: Device to handle.
822 *
823 * Flush all pending requests for the device from pm_wq and wait for all
824 * run-time PM operations involving the device in progress to complete.
825 *
826 * Should be called under dev->power.lock with interrupts disabled.
827 */
828static void __pm_runtime_barrier(struct device *dev)
829{
830 pm_runtime_deactivate_timer(dev);
831
832 if (dev->power.request_pending) {
833 dev->power.request = RPM_REQ_NONE;
834 spin_unlock_irq(&dev->power.lock);
835
836 cancel_work_sync(&dev->power.work);
837
838 spin_lock_irq(&dev->power.lock);
839 dev->power.request_pending = false;
840 }
841
842 if (dev->power.runtime_status == RPM_SUSPENDING
843 || dev->power.runtime_status == RPM_RESUMING
844 || dev->power.idle_notification) {
845 DEFINE_WAIT(wait);
846
847 /* Suspend, wake-up or idle notification in progress. */
848 for (;;) {
849 prepare_to_wait(&dev->power.wait_queue, &wait,
850 TASK_UNINTERRUPTIBLE);
851 if (dev->power.runtime_status != RPM_SUSPENDING
852 && dev->power.runtime_status != RPM_RESUMING
853 && !dev->power.idle_notification)
854 break;
855 spin_unlock_irq(&dev->power.lock);
856
857 schedule();
858
859 spin_lock_irq(&dev->power.lock);
860 }
861 finish_wait(&dev->power.wait_queue, &wait);
862 }
863}
864
865/**
866 * pm_runtime_barrier - Flush pending requests and wait for completions.
867 * @dev: Device to handle.
868 *
869 * Prevent the device from being suspended by incrementing its usage counter and
870 * if there's a pending resume request for the device, wake the device up.
871 * Next, make sure that all pending requests for the device have been flushed
872 * from pm_wq and wait for all run-time PM operations involving the device in
873 * progress to complete.
874 *
875 * Return value:
876 * 1, if there was a resume request pending and the device had to be woken up,
877 * 0, otherwise
878 */
879int pm_runtime_barrier(struct device *dev)
880{
881 int retval = 0;
882
883 pm_runtime_get_noresume(dev);
884 spin_lock_irq(&dev->power.lock);
885
886 if (dev->power.request_pending
887 && dev->power.request == RPM_REQ_RESUME) {
888 __pm_runtime_resume(dev, false);
889 retval = 1;
890 }
891
892 __pm_runtime_barrier(dev);
893
894 spin_unlock_irq(&dev->power.lock);
895 pm_runtime_put_noidle(dev);
896
897 return retval;
898}
899EXPORT_SYMBOL_GPL(pm_runtime_barrier);
900
901/**
902 * __pm_runtime_disable - Disable run-time PM of a device.
903 * @dev: Device to handle.
904 * @check_resume: If set, check if there's a resume request for the device.
905 *
906 * Increment power.disable_depth for the device and if was zero previously,
907 * cancel all pending run-time PM requests for the device and wait for all
908 * operations in progress to complete. The device can be either active or
909 * suspended after its run-time PM has been disabled.
910 *
911 * If @check_resume is set and there's a resume request pending when
912 * __pm_runtime_disable() is called and power.disable_depth is zero, the
913 * function will wake up the device before disabling its run-time PM.
914 */
915void __pm_runtime_disable(struct device *dev, bool check_resume)
916{
917 spin_lock_irq(&dev->power.lock);
918
919 if (dev->power.disable_depth > 0) {
920 dev->power.disable_depth++;
921 goto out;
922 }
923
924 /*
925 * Wake up the device if there's a resume request pending, because that
926 * means there probably is some I/O to process and disabling run-time PM
927 * shouldn't prevent the device from processing the I/O.
928 */
929 if (check_resume && dev->power.request_pending
930 && dev->power.request == RPM_REQ_RESUME) {
931 /*
932 * Prevent suspends and idle notifications from being carried
933 * out after we have woken up the device.
934 */
935 pm_runtime_get_noresume(dev);
936
937 __pm_runtime_resume(dev, false);
938
939 pm_runtime_put_noidle(dev);
940 }
941
942 if (!dev->power.disable_depth++)
943 __pm_runtime_barrier(dev);
944
945 out:
946 spin_unlock_irq(&dev->power.lock);
947}
948EXPORT_SYMBOL_GPL(__pm_runtime_disable);
949
950/**
951 * pm_runtime_enable - Enable run-time PM of a device.
952 * @dev: Device to handle.
953 */
954void pm_runtime_enable(struct device *dev)
955{
956 unsigned long flags;
957
958 spin_lock_irqsave(&dev->power.lock, flags);
959
960 if (dev->power.disable_depth > 0)
961 dev->power.disable_depth--;
962 else
963 dev_warn(dev, "Unbalanced %s!\n", __func__);
964
965 spin_unlock_irqrestore(&dev->power.lock, flags);
966}
967EXPORT_SYMBOL_GPL(pm_runtime_enable);
968
969/**
970 * pm_runtime_init - Initialize run-time PM fields in given device object.
971 * @dev: Device object to initialize.
972 */
973void pm_runtime_init(struct device *dev)
974{
975 spin_lock_init(&dev->power.lock);
976
977 dev->power.runtime_status = RPM_SUSPENDED;
978 dev->power.idle_notification = false;
979
980 dev->power.disable_depth = 1;
981 atomic_set(&dev->power.usage_count, 0);
982
983 dev->power.runtime_error = 0;
984
985 atomic_set(&dev->power.child_count, 0);
986 pm_suspend_ignore_children(dev, false);
987
988 dev->power.request_pending = false;
989 dev->power.request = RPM_REQ_NONE;
990 dev->power.deferred_resume = false;
991 INIT_WORK(&dev->power.work, pm_runtime_work);
992
993 dev->power.timer_expires = 0;
994 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
995 (unsigned long)dev);
996
997 init_waitqueue_head(&dev->power.wait_queue);
998}
999
1000/**
1001 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1002 * @dev: Device object being removed from device hierarchy.
1003 */
1004void pm_runtime_remove(struct device *dev)
1005{
1006 __pm_runtime_disable(dev, false);
1007
1008 /* Change the status back to 'suspended' to match the initial status. */
1009 if (dev->power.runtime_status == RPM_ACTIVE)
1010 pm_runtime_set_suspended(dev);
1011}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 98c9a847bf51..933c143b6a74 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1399,8 +1399,9 @@ static void dw_shutdown(struct platform_device *pdev)
1399 clk_disable(dw->clk); 1399 clk_disable(dw->clk);
1400} 1400}
1401 1401
1402static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg) 1402static int dw_suspend_noirq(struct device *dev)
1403{ 1403{
1404 struct platform_device *pdev = to_platform_device(dev);
1404 struct dw_dma *dw = platform_get_drvdata(pdev); 1405 struct dw_dma *dw = platform_get_drvdata(pdev);
1405 1406
1406 dw_dma_off(platform_get_drvdata(pdev)); 1407 dw_dma_off(platform_get_drvdata(pdev));
@@ -1408,23 +1409,27 @@ static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1408 return 0; 1409 return 0;
1409} 1410}
1410 1411
1411static int dw_resume_early(struct platform_device *pdev) 1412static int dw_resume_noirq(struct device *dev)
1412{ 1413{
1414 struct platform_device *pdev = to_platform_device(dev);
1413 struct dw_dma *dw = platform_get_drvdata(pdev); 1415 struct dw_dma *dw = platform_get_drvdata(pdev);
1414 1416
1415 clk_enable(dw->clk); 1417 clk_enable(dw->clk);
1416 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1418 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1417 return 0; 1419 return 0;
1418
1419} 1420}
1420 1421
1422static struct dev_pm_ops dw_dev_pm_ops = {
1423 .suspend_noirq = dw_suspend_noirq,
1424 .resume_noirq = dw_resume_noirq,
1425};
1426
1421static struct platform_driver dw_driver = { 1427static struct platform_driver dw_driver = {
1422 .remove = __exit_p(dw_remove), 1428 .remove = __exit_p(dw_remove),
1423 .shutdown = dw_shutdown, 1429 .shutdown = dw_shutdown,
1424 .suspend_late = dw_suspend_late,
1425 .resume_early = dw_resume_early,
1426 .driver = { 1430 .driver = {
1427 .name = "dw_dmac", 1431 .name = "dw_dmac",
1432 .pm = &dw_dev_pm_ops,
1428 }, 1433 },
1429}; 1434};
1430 1435
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 88dab52926f4..7837930146a4 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1291,17 +1291,18 @@ static void txx9dmac_shutdown(struct platform_device *pdev)
1291 txx9dmac_off(ddev); 1291 txx9dmac_off(ddev);
1292} 1292}
1293 1293
1294static int txx9dmac_suspend_late(struct platform_device *pdev, 1294static int txx9dmac_suspend_noirq(struct device *dev)
1295 pm_message_t mesg)
1296{ 1295{
1296 struct platform_device *pdev = to_platform_device(dev);
1297 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1297 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1298 1298
1299 txx9dmac_off(ddev); 1299 txx9dmac_off(ddev);
1300 return 0; 1300 return 0;
1301} 1301}
1302 1302
1303static int txx9dmac_resume_early(struct platform_device *pdev) 1303static int txx9dmac_resume_noirq(struct device *dev)
1304{ 1304{
1305 struct platform_device *pdev = to_platform_device(dev);
1305 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1306 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1306 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; 1307 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1307 u32 mcr; 1308 u32 mcr;
@@ -1314,6 +1315,11 @@ static int txx9dmac_resume_early(struct platform_device *pdev)
1314 1315
1315} 1316}
1316 1317
1318static struct dev_pm_ops txx9dmac_dev_pm_ops = {
1319 .suspend_noirq = txx9dmac_suspend_noirq,
1320 .resume_noirq = txx9dmac_resume_noirq,
1321};
1322
1317static struct platform_driver txx9dmac_chan_driver = { 1323static struct platform_driver txx9dmac_chan_driver = {
1318 .remove = __exit_p(txx9dmac_chan_remove), 1324 .remove = __exit_p(txx9dmac_chan_remove),
1319 .driver = { 1325 .driver = {
@@ -1324,10 +1330,9 @@ static struct platform_driver txx9dmac_chan_driver = {
1324static struct platform_driver txx9dmac_driver = { 1330static struct platform_driver txx9dmac_driver = {
1325 .remove = __exit_p(txx9dmac_remove), 1331 .remove = __exit_p(txx9dmac_remove),
1326 .shutdown = txx9dmac_shutdown, 1332 .shutdown = txx9dmac_shutdown,
1327 .suspend_late = txx9dmac_suspend_late,
1328 .resume_early = txx9dmac_resume_early,
1329 .driver = { 1333 .driver = {
1330 .name = "txx9dmac", 1334 .name = "txx9dmac",
1335 .pm = &txx9dmac_dev_pm_ops,
1331 }, 1336 },
1332}; 1337};
1333 1338
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 762e1e530882..049555777f67 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1134,35 +1134,44 @@ static int __exit i2c_pxa_remove(struct platform_device *dev)
1134} 1134}
1135 1135
1136#ifdef CONFIG_PM 1136#ifdef CONFIG_PM
1137static int i2c_pxa_suspend_late(struct platform_device *dev, pm_message_t state) 1137static int i2c_pxa_suspend_noirq(struct device *dev)
1138{ 1138{
1139 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1139 struct platform_device *pdev = to_platform_device(dev);
1140 struct pxa_i2c *i2c = platform_get_drvdata(pdev);
1141
1140 clk_disable(i2c->clk); 1142 clk_disable(i2c->clk);
1143
1141 return 0; 1144 return 0;
1142} 1145}
1143 1146
1144static int i2c_pxa_resume_early(struct platform_device *dev) 1147static int i2c_pxa_resume_noirq(struct device *dev)
1145{ 1148{
1146 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1149 struct platform_device *pdev = to_platform_device(dev);
1150 struct pxa_i2c *i2c = platform_get_drvdata(pdev);
1147 1151
1148 clk_enable(i2c->clk); 1152 clk_enable(i2c->clk);
1149 i2c_pxa_reset(i2c); 1153 i2c_pxa_reset(i2c);
1150 1154
1151 return 0; 1155 return 0;
1152} 1156}
1157
1158static struct dev_pm_ops i2c_pxa_dev_pm_ops = {
1159 .suspend_noirq = i2c_pxa_suspend_noirq,
1160 .resume_noirq = i2c_pxa_resume_noirq,
1161};
1162
1163#define I2C_PXA_DEV_PM_OPS (&i2c_pxa_dev_pm_ops)
1153#else 1164#else
1154#define i2c_pxa_suspend_late NULL 1165#define I2C_PXA_DEV_PM_OPS NULL
1155#define i2c_pxa_resume_early NULL
1156#endif 1166#endif
1157 1167
1158static struct platform_driver i2c_pxa_driver = { 1168static struct platform_driver i2c_pxa_driver = {
1159 .probe = i2c_pxa_probe, 1169 .probe = i2c_pxa_probe,
1160 .remove = __exit_p(i2c_pxa_remove), 1170 .remove = __exit_p(i2c_pxa_remove),
1161 .suspend_late = i2c_pxa_suspend_late,
1162 .resume_early = i2c_pxa_resume_early,
1163 .driver = { 1171 .driver = {
1164 .name = "pxa2xx-i2c", 1172 .name = "pxa2xx-i2c",
1165 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1174 .pm = I2C_PXA_DEV_PM_OPS,
1166 }, 1175 },
1167 .id_table = i2c_pxa_id_table, 1176 .id_table = i2c_pxa_id_table,
1168}; 1177};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 20bb0ceb027b..96aafb91b69a 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -946,17 +946,20 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
946} 946}
947 947
948#ifdef CONFIG_PM 948#ifdef CONFIG_PM
949static int s3c24xx_i2c_suspend_late(struct platform_device *dev, 949static int s3c24xx_i2c_suspend_noirq(struct device *dev)
950 pm_message_t msg)
951{ 950{
952 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); 951 struct platform_device *pdev = to_platform_device(dev);
952 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
953
953 i2c->suspended = 1; 954 i2c->suspended = 1;
955
954 return 0; 956 return 0;
955} 957}
956 958
957static int s3c24xx_i2c_resume(struct platform_device *dev) 959static int s3c24xx_i2c_resume(struct device *dev)
958{ 960{
959 struct s3c24xx_i2c *i2c = platform_get_drvdata(dev); 961 struct platform_device *pdev = to_platform_device(dev);
962 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
960 963
961 i2c->suspended = 0; 964 i2c->suspended = 0;
962 s3c24xx_i2c_init(i2c); 965 s3c24xx_i2c_init(i2c);
@@ -964,9 +967,14 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
964 return 0; 967 return 0;
965} 968}
966 969
970static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
971 .suspend_noirq = s3c24xx_i2c_suspend_noirq,
972 .resume = s3c24xx_i2c_resume,
973};
974
975#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
967#else 976#else
968#define s3c24xx_i2c_suspend_late NULL 977#define S3C24XX_DEV_PM_OPS NULL
969#define s3c24xx_i2c_resume NULL
970#endif 978#endif
971 979
972/* device driver for platform bus bits */ 980/* device driver for platform bus bits */
@@ -985,12 +993,11 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
985static struct platform_driver s3c24xx_i2c_driver = { 993static struct platform_driver s3c24xx_i2c_driver = {
986 .probe = s3c24xx_i2c_probe, 994 .probe = s3c24xx_i2c_probe,
987 .remove = s3c24xx_i2c_remove, 995 .remove = s3c24xx_i2c_remove,
988 .suspend_late = s3c24xx_i2c_suspend_late,
989 .resume = s3c24xx_i2c_resume,
990 .id_table = s3c24xx_driver_ids, 996 .id_table = s3c24xx_driver_ids,
991 .driver = { 997 .driver = {
992 .owner = THIS_MODULE, 998 .owner = THIS_MODULE,
993 .name = "s3c-i2c", 999 .name = "s3c-i2c",
1000 .pm = S3C24XX_DEV_PM_OPS,
994 }, 1001 },
995}; 1002};
996 1003
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f99bc7f089f1..a7eb7277b106 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -575,7 +575,7 @@ static void pci_pm_complete(struct device *dev)
575static int pci_pm_suspend(struct device *dev) 575static int pci_pm_suspend(struct device *dev)
576{ 576{
577 struct pci_dev *pci_dev = to_pci_dev(dev); 577 struct pci_dev *pci_dev = to_pci_dev(dev);
578 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 578 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
579 579
580 if (pci_has_legacy_pm_support(pci_dev)) 580 if (pci_has_legacy_pm_support(pci_dev))
581 return pci_legacy_suspend(dev, PMSG_SUSPEND); 581 return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -613,7 +613,7 @@ static int pci_pm_suspend(struct device *dev)
613static int pci_pm_suspend_noirq(struct device *dev) 613static int pci_pm_suspend_noirq(struct device *dev)
614{ 614{
615 struct pci_dev *pci_dev = to_pci_dev(dev); 615 struct pci_dev *pci_dev = to_pci_dev(dev);
616 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 616 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
617 617
618 if (pci_has_legacy_pm_support(pci_dev)) 618 if (pci_has_legacy_pm_support(pci_dev))
619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 619 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -672,7 +672,7 @@ static int pci_pm_resume_noirq(struct device *dev)
672static int pci_pm_resume(struct device *dev) 672static int pci_pm_resume(struct device *dev)
673{ 673{
674 struct pci_dev *pci_dev = to_pci_dev(dev); 674 struct pci_dev *pci_dev = to_pci_dev(dev);
675 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 675 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
676 int error = 0; 676 int error = 0;
677 677
678 /* 678 /*
@@ -711,7 +711,7 @@ static int pci_pm_resume(struct device *dev)
711static int pci_pm_freeze(struct device *dev) 711static int pci_pm_freeze(struct device *dev)
712{ 712{
713 struct pci_dev *pci_dev = to_pci_dev(dev); 713 struct pci_dev *pci_dev = to_pci_dev(dev);
714 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 714 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
715 715
716 if (pci_has_legacy_pm_support(pci_dev)) 716 if (pci_has_legacy_pm_support(pci_dev))
717 return pci_legacy_suspend(dev, PMSG_FREEZE); 717 return pci_legacy_suspend(dev, PMSG_FREEZE);
@@ -780,7 +780,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
780static int pci_pm_thaw(struct device *dev) 780static int pci_pm_thaw(struct device *dev)
781{ 781{
782 struct pci_dev *pci_dev = to_pci_dev(dev); 782 struct pci_dev *pci_dev = to_pci_dev(dev);
783 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 783 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
784 int error = 0; 784 int error = 0;
785 785
786 if (pci_has_legacy_pm_support(pci_dev)) 786 if (pci_has_legacy_pm_support(pci_dev))
@@ -799,7 +799,7 @@ static int pci_pm_thaw(struct device *dev)
799static int pci_pm_poweroff(struct device *dev) 799static int pci_pm_poweroff(struct device *dev)
800{ 800{
801 struct pci_dev *pci_dev = to_pci_dev(dev); 801 struct pci_dev *pci_dev = to_pci_dev(dev);
802 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 802 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
803 803
804 if (pci_has_legacy_pm_support(pci_dev)) 804 if (pci_has_legacy_pm_support(pci_dev))
805 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 805 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
@@ -872,7 +872,7 @@ static int pci_pm_restore_noirq(struct device *dev)
872static int pci_pm_restore(struct device *dev) 872static int pci_pm_restore(struct device *dev)
873{ 873{
874 struct pci_dev *pci_dev = to_pci_dev(dev); 874 struct pci_dev *pci_dev = to_pci_dev(dev);
875 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 875 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
876 int error = 0; 876 int error = 0;
877 877
878 /* 878 /*
@@ -910,7 +910,7 @@ static int pci_pm_restore(struct device *dev)
910 910
911#endif /* !CONFIG_HIBERNATION */ 911#endif /* !CONFIG_HIBERNATION */
912 912
913struct dev_pm_ops pci_dev_pm_ops = { 913const struct dev_pm_ops pci_dev_pm_ops = {
914 .prepare = pci_pm_prepare, 914 .prepare = pci_pm_prepare,
915 .complete = pci_pm_complete, 915 .complete = pci_pm_complete,
916 .suspend = pci_pm_suspend, 916 .suspend = pci_pm_suspend,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c7c1ca0494cd..1d26beddf2ca 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2167,8 +2167,9 @@ static int __devexit musb_remove(struct platform_device *pdev)
2167 2167
2168#ifdef CONFIG_PM 2168#ifdef CONFIG_PM
2169 2169
2170static int musb_suspend(struct platform_device *pdev, pm_message_t message) 2170static int musb_suspend(struct device *dev)
2171{ 2171{
2172 struct platform_device *pdev = to_platform_device(dev);
2172 unsigned long flags; 2173 unsigned long flags;
2173 struct musb *musb = dev_to_musb(&pdev->dev); 2174 struct musb *musb = dev_to_musb(&pdev->dev);
2174 2175
@@ -2195,8 +2196,9 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message)
2195 return 0; 2196 return 0;
2196} 2197}
2197 2198
2198static int musb_resume_early(struct platform_device *pdev) 2199static int musb_resume_noirq(struct device *dev)
2199{ 2200{
2201 struct platform_device *pdev = to_platform_device(dev);
2200 struct musb *musb = dev_to_musb(&pdev->dev); 2202 struct musb *musb = dev_to_musb(&pdev->dev);
2201 2203
2202 if (!musb->clock) 2204 if (!musb->clock)
@@ -2214,9 +2216,14 @@ static int musb_resume_early(struct platform_device *pdev)
2214 return 0; 2216 return 0;
2215} 2217}
2216 2218
2219static struct dev_pm_ops musb_dev_pm_ops = {
2220 .suspend = musb_suspend,
2221 .resume_noirq = musb_resume_noirq,
2222};
2223
2224#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2217#else 2225#else
2218#define musb_suspend NULL 2226#define MUSB_DEV_PM_OPS NULL
2219#define musb_resume_early NULL
2220#endif 2227#endif
2221 2228
2222static struct platform_driver musb_driver = { 2229static struct platform_driver musb_driver = {
@@ -2224,11 +2231,10 @@ static struct platform_driver musb_driver = {
2224 .name = (char *)musb_driver_name, 2231 .name = (char *)musb_driver_name,
2225 .bus = &platform_bus_type, 2232 .bus = &platform_bus_type,
2226 .owner = THIS_MODULE, 2233 .owner = THIS_MODULE,
2234 .pm = MUSB_DEV_PM_OPS,
2227 }, 2235 },
2228 .remove = __devexit_p(musb_remove), 2236 .remove = __devexit_p(musb_remove),
2229 .shutdown = musb_shutdown, 2237 .shutdown = musb_shutdown,
2230 .suspend = musb_suspend,
2231 .resume_early = musb_resume_early,
2232}; 2238};
2233 2239
2234/*-------------------------------------------------------------------------*/ 2240/*-------------------------------------------------------------------------*/
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
index c17c9600f220..d7c76bba640d 100644
--- a/include/asm-generic/device.h
+++ b/include/asm-generic/device.h
@@ -9,4 +9,7 @@
9struct dev_archdata { 9struct dev_archdata {
10}; 10};
11 11
12struct pdev_archdata {
13};
14
12#endif /* _ASM_GENERIC_DEVICE_H */ 15#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index aebb81036db2..a28642975053 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -62,7 +62,7 @@ struct bus_type {
62 int (*suspend)(struct device *dev, pm_message_t state); 62 int (*suspend)(struct device *dev, pm_message_t state);
63 int (*resume)(struct device *dev); 63 int (*resume)(struct device *dev);
64 64
65 struct dev_pm_ops *pm; 65 const struct dev_pm_ops *pm;
66 66
67 struct bus_type_private *p; 67 struct bus_type_private *p;
68}; 68};
@@ -132,7 +132,7 @@ struct device_driver {
132 int (*resume) (struct device *dev); 132 int (*resume) (struct device *dev);
133 struct attribute_group **groups; 133 struct attribute_group **groups;
134 134
135 struct dev_pm_ops *pm; 135 const struct dev_pm_ops *pm;
136 136
137 struct driver_private *p; 137 struct driver_private *p;
138}; 138};
@@ -200,7 +200,8 @@ struct class {
200 int (*suspend)(struct device *dev, pm_message_t state); 200 int (*suspend)(struct device *dev, pm_message_t state);
201 int (*resume)(struct device *dev); 201 int (*resume)(struct device *dev);
202 202
203 struct dev_pm_ops *pm; 203 const struct dev_pm_ops *pm;
204
204 struct class_private *p; 205 struct class_private *p;
205}; 206};
206 207
@@ -291,7 +292,7 @@ struct device_type {
291 char *(*nodename)(struct device *dev); 292 char *(*nodename)(struct device *dev);
292 void (*release)(struct device *dev); 293 void (*release)(struct device *dev);
293 294
294 struct dev_pm_ops *pm; 295 const struct dev_pm_ops *pm;
295}; 296};
296 297
297/* interface for exporting device attributes */ 298/* interface for exporting device attributes */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 8dc5123b6305..3c6675c2444b 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -22,6 +22,9 @@ struct platform_device {
22 struct resource * resource; 22 struct resource * resource;
23 23
24 struct platform_device_id *id_entry; 24 struct platform_device_id *id_entry;
25
26 /* arch specific additions */
27 struct pdev_archdata archdata;
25}; 28};
26 29
27#define platform_get_device_id(pdev) ((pdev)->id_entry) 30#define platform_get_device_id(pdev) ((pdev)->id_entry)
@@ -57,8 +60,6 @@ struct platform_driver {
57 int (*remove)(struct platform_device *); 60 int (*remove)(struct platform_device *);
58 void (*shutdown)(struct platform_device *); 61 void (*shutdown)(struct platform_device *);
59 int (*suspend)(struct platform_device *, pm_message_t state); 62 int (*suspend)(struct platform_device *, pm_message_t state);
60 int (*suspend_late)(struct platform_device *, pm_message_t state);
61 int (*resume_early)(struct platform_device *);
62 int (*resume)(struct platform_device *); 63 int (*resume)(struct platform_device *);
63 struct device_driver driver; 64 struct device_driver driver;
64 struct platform_device_id *id_table; 65 struct platform_device_id *id_table;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index b3f74764a586..2b6e20df0e52 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -22,6 +22,10 @@
22#define _LINUX_PM_H 22#define _LINUX_PM_H
23 23
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/workqueue.h>
26#include <linux/spinlock.h>
27#include <linux/wait.h>
28#include <linux/timer.h>
25 29
26/* 30/*
27 * Callbacks for platform drivers to implement. 31 * Callbacks for platform drivers to implement.
@@ -165,6 +169,28 @@ typedef struct pm_message {
165 * It is allowed to unregister devices while the above callbacks are being 169 * It is allowed to unregister devices while the above callbacks are being
166 * executed. However, it is not allowed to unregister a device from within any 170 * executed. However, it is not allowed to unregister a device from within any
167 * of its own callbacks. 171 * of its own callbacks.
172 *
173 * There also are the following callbacks related to run-time power management
174 * of devices:
175 *
176 * @runtime_suspend: Prepare the device for a condition in which it won't be
177 * able to communicate with the CPU(s) and RAM due to power management.
178 * This need not mean that the device should be put into a low power state.
179 * For example, if the device is behind a link which is about to be turned
180 * off, the device may remain at full power. If the device does go to low
181 * power and if device_may_wakeup(dev) is true, remote wake-up (i.e., a
182 * hardware mechanism allowing the device to request a change of its power
183 * state, such as PCI PME) should be enabled for it.
184 *
185 * @runtime_resume: Put the device into the fully active state in response to a
186 * wake-up event generated by hardware or at the request of software. If
187 * necessary, put the device into the full power state and restore its
188 * registers, so that it is fully operational.
189 *
190 * @runtime_idle: Device appears to be inactive and it might be put into a low
191 * power state if all of the necessary conditions are satisfied. Check
192 * these conditions and handle the device as appropriate, possibly queueing
193 * a suspend request for it. The return value is ignored by the PM core.
168 */ 194 */
169 195
170struct dev_pm_ops { 196struct dev_pm_ops {
@@ -182,6 +208,9 @@ struct dev_pm_ops {
182 int (*thaw_noirq)(struct device *dev); 208 int (*thaw_noirq)(struct device *dev);
183 int (*poweroff_noirq)(struct device *dev); 209 int (*poweroff_noirq)(struct device *dev);
184 int (*restore_noirq)(struct device *dev); 210 int (*restore_noirq)(struct device *dev);
211 int (*runtime_suspend)(struct device *dev);
212 int (*runtime_resume)(struct device *dev);
213 int (*runtime_idle)(struct device *dev);
185}; 214};
186 215
187/** 216/**
@@ -315,14 +344,80 @@ enum dpm_state {
315 DPM_OFF_IRQ, 344 DPM_OFF_IRQ,
316}; 345};
317 346
347/**
348 * Device run-time power management status.
349 *
350 * These status labels are used internally by the PM core to indicate the
351 * current status of a device with respect to the PM core operations. They do
352 * not reflect the actual power state of the device or its status as seen by the
353 * driver.
354 *
355 * RPM_ACTIVE Device is fully operational. Indicates that the device
356 * bus type's ->runtime_resume() callback has completed
357 * successfully.
358 *
359 * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has
360 * completed successfully. The device is regarded as
361 * suspended.
362 *
363 * RPM_RESUMING Device bus type's ->runtime_resume() callback is being
364 * executed.
365 *
366 * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being
367 * executed.
368 */
369
370enum rpm_status {
371 RPM_ACTIVE = 0,
372 RPM_RESUMING,
373 RPM_SUSPENDED,
374 RPM_SUSPENDING,
375};
376
377/**
378 * Device run-time power management request types.
379 *
380 * RPM_REQ_NONE Do nothing.
381 *
382 * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback
383 *
384 * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback
385 *
386 * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback
387 */
388
389enum rpm_request {
390 RPM_REQ_NONE = 0,
391 RPM_REQ_IDLE,
392 RPM_REQ_SUSPEND,
393 RPM_REQ_RESUME,
394};
395
318struct dev_pm_info { 396struct dev_pm_info {
319 pm_message_t power_state; 397 pm_message_t power_state;
320 unsigned can_wakeup:1; 398 unsigned int can_wakeup:1;
321 unsigned should_wakeup:1; 399 unsigned int should_wakeup:1;
322 enum dpm_state status; /* Owned by the PM core */ 400 enum dpm_state status; /* Owned by the PM core */
323#ifdef CONFIG_PM_SLEEP 401#ifdef CONFIG_PM_SLEEP
324 struct list_head entry; 402 struct list_head entry;
325#endif 403#endif
404#ifdef CONFIG_PM_RUNTIME
405 struct timer_list suspend_timer;
406 unsigned long timer_expires;
407 struct work_struct work;
408 wait_queue_head_t wait_queue;
409 spinlock_t lock;
410 atomic_t usage_count;
411 atomic_t child_count;
412 unsigned int disable_depth:3;
413 unsigned int ignore_children:1;
414 unsigned int idle_notification:1;
415 unsigned int request_pending:1;
416 unsigned int deferred_resume:1;
417 enum rpm_request request;
418 enum rpm_status runtime_status;
419 int runtime_error;
420#endif
326}; 421};
327 422
328/* 423/*
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
new file mode 100644
index 000000000000..44087044910f
--- /dev/null
+++ b/include/linux/pm_runtime.h
@@ -0,0 +1,114 @@
1/*
2 * pm_runtime.h - Device run-time power management helper functions.
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
5 *
6 * This file is released under the GPLv2.
7 */
8
9#ifndef _LINUX_PM_RUNTIME_H
10#define _LINUX_PM_RUNTIME_H
11
12#include <linux/device.h>
13#include <linux/pm.h>
14
15#ifdef CONFIG_PM_RUNTIME
16
17extern struct workqueue_struct *pm_wq;
18
19extern int pm_runtime_idle(struct device *dev);
20extern int pm_runtime_suspend(struct device *dev);
21extern int pm_runtime_resume(struct device *dev);
22extern int pm_request_idle(struct device *dev);
23extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
24extern int pm_request_resume(struct device *dev);
25extern int __pm_runtime_get(struct device *dev, bool sync);
26extern int __pm_runtime_put(struct device *dev, bool sync);
27extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
28extern int pm_runtime_barrier(struct device *dev);
29extern void pm_runtime_enable(struct device *dev);
30extern void __pm_runtime_disable(struct device *dev, bool check_resume);
31
32static inline bool pm_children_suspended(struct device *dev)
33{
34 return dev->power.ignore_children
35 || !atomic_read(&dev->power.child_count);
36}
37
38static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
39{
40 dev->power.ignore_children = enable;
41}
42
43static inline void pm_runtime_get_noresume(struct device *dev)
44{
45 atomic_inc(&dev->power.usage_count);
46}
47
48static inline void pm_runtime_put_noidle(struct device *dev)
49{
50 atomic_add_unless(&dev->power.usage_count, -1, 0);
51}
52
53#else /* !CONFIG_PM_RUNTIME */
54
55static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; }
56static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; }
57static inline int pm_runtime_resume(struct device *dev) { return 0; }
58static inline int pm_request_idle(struct device *dev) { return -ENOSYS; }
59static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
60{
61 return -ENOSYS;
62}
63static inline int pm_request_resume(struct device *dev) { return 0; }
64static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; }
65static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; }
66static inline int __pm_runtime_set_status(struct device *dev,
67 unsigned int status) { return 0; }
68static inline int pm_runtime_barrier(struct device *dev) { return 0; }
69static inline void pm_runtime_enable(struct device *dev) {}
70static inline void __pm_runtime_disable(struct device *dev, bool c) {}
71
72static inline bool pm_children_suspended(struct device *dev) { return false; }
73static inline void pm_suspend_ignore_children(struct device *dev, bool en) {}
74static inline void pm_runtime_get_noresume(struct device *dev) {}
75static inline void pm_runtime_put_noidle(struct device *dev) {}
76
77#endif /* !CONFIG_PM_RUNTIME */
78
79static inline int pm_runtime_get(struct device *dev)
80{
81 return __pm_runtime_get(dev, false);
82}
83
84static inline int pm_runtime_get_sync(struct device *dev)
85{
86 return __pm_runtime_get(dev, true);
87}
88
89static inline int pm_runtime_put(struct device *dev)
90{
91 return __pm_runtime_put(dev, false);
92}
93
94static inline int pm_runtime_put_sync(struct device *dev)
95{
96 return __pm_runtime_put(dev, true);
97}
98
99static inline int pm_runtime_set_active(struct device *dev)
100{
101 return __pm_runtime_set_status(dev, RPM_ACTIVE);
102}
103
104static inline void pm_runtime_set_suspended(struct device *dev)
105{
106 __pm_runtime_set_status(dev, RPM_SUSPENDED);
107}
108
109static inline void pm_runtime_disable(struct device *dev)
110{
111 __pm_runtime_disable(dev, true);
112}
113
114#endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 72067cbdb37f..91e09d3b2eb2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -208,3 +208,17 @@ config APM_EMULATION
208 random kernel OOPSes or reboots that don't seem to be related to 208 random kernel OOPSes or reboots that don't seem to be related to
209 anything, try disabling/enabling this option (or disabling/enabling 209 anything, try disabling/enabling this option (or disabling/enabling
210 APM in your BIOS). 210 APM in your BIOS).
211
212config PM_RUNTIME
213 bool "Run-time PM core functionality"
214 depends on PM
215 ---help---
216 Enable functionality allowing I/O devices to be put into energy-saving
217 (low power) states at run time (or autosuspended) after a specified
218 period of inactivity and woken up in response to a hardware-generated
219 wake-up event or a driver's request.
220
221 Hardware support is generally required for this functionality to work
222 and the bus type drivers of the buses the devices are on are
223 responsible for the actual handling of the autosuspend requests and
224 wake-up events.
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f710e36930cc..347d2cc88cd0 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -11,6 +11,7 @@
11#include <linux/kobject.h> 11#include <linux/kobject.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/resume-trace.h> 13#include <linux/resume-trace.h>
14#include <linux/workqueue.h>
14 15
15#include "power.h" 16#include "power.h"
16 17
@@ -217,8 +218,24 @@ static struct attribute_group attr_group = {
217 .attrs = g, 218 .attrs = g,
218}; 219};
219 220
221#ifdef CONFIG_PM_RUNTIME
222struct workqueue_struct *pm_wq;
223
224static int __init pm_start_workqueue(void)
225{
226 pm_wq = create_freezeable_workqueue("pm");
227
228 return pm_wq ? 0 : -ENOMEM;
229}
230#else
231static inline int pm_start_workqueue(void) { return 0; }
232#endif
233
220static int __init pm_init(void) 234static int __init pm_init(void)
221{ 235{
236 int error = pm_start_workqueue();
237 if (error)
238 return error;
222 power_kobj = kobject_create_and_add("power", NULL); 239 power_kobj = kobject_create_and_add("power", NULL);
223 if (!power_kobj) 240 if (!power_kobj)
224 return -ENOMEM; 241 return -ENOMEM;