aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 19:01:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 19:01:57 -0400
commit431bf99d26157d56689e5de65bd27ce9f077fc3f (patch)
treeb15e357039956fcdd0e0e6177d2fc99bb3cfa822
parent72f96e0e38d7e29ba16dcfd824ecaebe38b8293e (diff)
parent7ae033cc0dfce68d8e0c83aca60837cf2bf0d2e6 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: (51 commits) PM: Improve error code of pm_notifier_call_chain() PM: Add "RTC" to PM trace time stamps to avoid confusion PM / Suspend: Export suspend_set_ops, suspend_valid_only_mem PM / Suspend: Add .suspend_again() callback to suspend_ops PM / OPP: Introduce function to free cpufreq table ARM / shmobile: Return -EBUSY from A4LC power off if A3RV is active PM / Domains: Take .power_off() error code into account ARM / shmobile: Use genpd_queue_power_off_work() ARM / shmobile: Use pm_genpd_poweroff_unused() PM / Domains: Introduce function to power off all unused PM domains OMAP: PM: disable idle on suspend for GPIO and UART OMAP: PM: omap_device: add API to disable idle on suspend OMAP: PM: omap_device: add system PM methods for PM domain handling OMAP: PM: omap_device: conditionally use PM domain runtime helpers PM / Runtime: Add new helper function: pm_runtime_status_suspended() PM / Domains: Queue up power off work only if it is not pending PM / Domains: Improve handling of wakeup devices during system suspend PM / Domains: Do not restore all devices on power off error PM / Domains: Allow callbacks to execute all runtime PM helpers PM / Domains: Do not execute device callbacks under locks ...
-rw-r--r--Documentation/power/devices.txt14
-rw-r--r--Documentation/power/opp.txt2
-rw-r--r--Documentation/power/runtime_pm.txt229
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mach-omap1/pm_bus.c14
-rw-r--r--arch/arm/mach-omap2/gpio.c2
-rw-r--r--arch/arm/mach-omap2/serial.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c5
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c5
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h29
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c160
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c22
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c11
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h9
-rw-r--r--arch/arm/plat-omap/omap_device.c53
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c6
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c234
-rw-r--r--drivers/base/power/domain.c1273
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c65
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/pci/pci-driver.c18
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/scsi/scsi_pm.c8
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/opp.h8
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/pm_domain.h108
-rw-r--r--include/linux/pm_runtime.h38
-rw-r--r--include/linux/suspend.h8
-rw-r--r--kernel/power/Kconfig8
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/suspend.c20
40 files changed, 2274 insertions, 324 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 64565aac6e40..3384d5996be2 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -506,8 +506,8 @@ routines. Nevertheless, different callback pointers are used in case there is a
506situation where it actually matters. 506situation where it actually matters.
507 507
508 508
509Device Power Domains 509Device Power Management Domains
510-------------------- 510-------------------------------
511Sometimes devices share reference clocks or other power resources. In those 511Sometimes devices share reference clocks or other power resources. In those
512cases it generally is not possible to put devices into low-power states 512cases it generally is not possible to put devices into low-power states
513individually. Instead, a set of devices sharing a power resource can be put 513individually. Instead, a set of devices sharing a power resource can be put
@@ -516,8 +516,8 @@ power resource. Of course, they also need to be put into the full-power state
516together, by turning the shared power resource on. A set of devices with this 516together, by turning the shared power resource on. A set of devices with this
517property is often referred to as a power domain. 517property is often referred to as a power domain.
518 518
519Support for power domains is provided through the pwr_domain field of struct 519Support for power domains is provided through the pm_domain field of struct
520device. This field is a pointer to an object of type struct dev_power_domain, 520device. This field is a pointer to an object of type struct dev_pm_domain,
521defined in include/linux/pm.h, providing a set of power management callbacks 521defined in include/linux/pm.h, providing a set of power management callbacks
522analogous to the subsystem-level and device driver callbacks that are executed 522analogous to the subsystem-level and device driver callbacks that are executed
523for the given device during all power transitions, instead of the respective 523for the given device during all power transitions, instead of the respective
@@ -604,7 +604,7 @@ state temporarily, for example so that its system wakeup capability can be
604disabled. This all depends on the hardware and the design of the subsystem and 604disabled. This all depends on the hardware and the design of the subsystem and
605device driver in question. 605device driver in question.
606 606
607During system-wide resume from a sleep state it's best to put devices into the 607During system-wide resume from a sleep state it's easiest to put devices into
608full-power state, as explained in Documentation/power/runtime_pm.txt. Refer to 608the full-power state, as explained in Documentation/power/runtime_pm.txt. Refer
609that document for more information regarding this particular issue as well as 609to that document for more information regarding this particular issue as well as
610for information on the device runtime power management framework in general. 610for information on the device runtime power management framework in general.
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 5ae70a12c1e2..3035d00757ad 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -321,6 +321,8 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
321 addition to CONFIG_PM as power management feature is required to 321 addition to CONFIG_PM as power management feature is required to
322 dynamically scale voltage and frequency in a system. 322 dynamically scale voltage and frequency in a system.
323 323
324opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
325
3247. Data Structures 3267. Data Structures
325================== 327==================
326Typically an SoC contains multiple voltage domains which are variable. Each 328Typically an SoC contains multiple voltage domains which are variable. Each
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index b24875b1ced5..14dd3c6ad97e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -1,39 +1,39 @@
1Run-time Power Management Framework for I/O Devices 1Runtime Power Management Framework for I/O Devices
2 2
3(C) 2009-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 3(C) 2009-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
4(C) 2010 Alan Stern <stern@rowland.harvard.edu> 4(C) 2010 Alan Stern <stern@rowland.harvard.edu>
5 5
61. Introduction 61. Introduction
7 7
8Support for run-time power management (run-time PM) of I/O devices is provided 8Support for runtime power management (runtime PM) of I/O devices is provided
9at the power management core (PM core) level by means of: 9at the power management core (PM core) level by means of:
10 10
11* The power management workqueue pm_wq in which bus types and device drivers can 11* The power management workqueue pm_wq in which bus types and device drivers can
12 put their PM-related work items. It is strongly recommended that pm_wq be 12 put their PM-related work items. It is strongly recommended that pm_wq be
13 used for queuing all work items related to run-time PM, because this allows 13 used for queuing all work items related to runtime PM, because this allows
14 them to be synchronized with system-wide power transitions (suspend to RAM, 14 them to be synchronized with system-wide power transitions (suspend to RAM,
15 hibernation and resume from system sleep states). pm_wq is declared in 15 hibernation and resume from system sleep states). pm_wq is declared in
16 include/linux/pm_runtime.h and defined in kernel/power/main.c. 16 include/linux/pm_runtime.h and defined in kernel/power/main.c.
17 17
18* A number of run-time PM fields in the 'power' member of 'struct device' (which 18* A number of runtime PM fields in the 'power' member of 'struct device' (which
19 is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can 19 is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can
20 be used for synchronizing run-time PM operations with one another. 20 be used for synchronizing runtime PM operations with one another.
21 21
22* Three device run-time PM callbacks in 'struct dev_pm_ops' (defined in 22* Three device runtime PM callbacks in 'struct dev_pm_ops' (defined in
23 include/linux/pm.h). 23 include/linux/pm.h).
24 24
25* A set of helper functions defined in drivers/base/power/runtime.c that can be 25* A set of helper functions defined in drivers/base/power/runtime.c that can be
26 used for carrying out run-time PM operations in such a way that the 26 used for carrying out runtime PM operations in such a way that the
27 synchronization between them is taken care of by the PM core. Bus types and 27 synchronization between them is taken care of by the PM core. Bus types and
28 device drivers are encouraged to use these functions. 28 device drivers are encouraged to use these functions.
29 29
30The run-time PM callbacks present in 'struct dev_pm_ops', the device run-time PM 30The runtime PM callbacks present in 'struct dev_pm_ops', the device runtime PM
31fields of 'struct dev_pm_info' and the core helper functions provided for 31fields of 'struct dev_pm_info' and the core helper functions provided for
32run-time PM are described below. 32runtime PM are described below.
33 33
342. Device Run-time PM Callbacks 342. Device Runtime PM Callbacks
35 35
36There are three device run-time PM callbacks defined in 'struct dev_pm_ops': 36There are three device runtime PM callbacks defined in 'struct dev_pm_ops':
37 37
38struct dev_pm_ops { 38struct dev_pm_ops {
39 ... 39 ...
@@ -72,11 +72,11 @@ knows what to do to handle the device).
72 not mean that the device has been put into a low power state. It is 72 not mean that the device has been put into a low power state. It is
73 supposed to mean, however, that the device will not process data and will 73 supposed to mean, however, that the device will not process data and will
74 not communicate with the CPU(s) and RAM until the subsystem-level resume 74 not communicate with the CPU(s) and RAM until the subsystem-level resume
75 callback is executed for it. The run-time PM status of a device after 75 callback is executed for it. The runtime PM status of a device after
76 successful execution of the subsystem-level suspend callback is 'suspended'. 76 successful execution of the subsystem-level suspend callback is 'suspended'.
77 77
78 * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, 78 * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
79 the device's run-time PM status is 'active', which means that the device 79 the device's runtime PM status is 'active', which means that the device
80 _must_ be fully operational afterwards. 80 _must_ be fully operational afterwards.
81 81
82 * If the subsystem-level suspend callback returns an error code different 82 * If the subsystem-level suspend callback returns an error code different
@@ -104,7 +104,7 @@ the device).
104 104
105 * Once the subsystem-level resume callback has completed successfully, the PM 105 * Once the subsystem-level resume callback has completed successfully, the PM
106 core regards the device as fully operational, which means that the device 106 core regards the device as fully operational, which means that the device
107 _must_ be able to complete I/O operations as needed. The run-time PM status 107 _must_ be able to complete I/O operations as needed. The runtime PM status
108 of the device is then 'active'. 108 of the device is then 'active'.
109 109
110 * If the subsystem-level resume callback returns an error code, the PM core 110 * If the subsystem-level resume callback returns an error code, the PM core
@@ -130,7 +130,7 @@ device in that case. The value returned by this callback is ignored by the PM
130core. 130core.
131 131
132The helper functions provided by the PM core, described in Section 4, guarantee 132The helper functions provided by the PM core, described in Section 4, guarantee
133that the following constraints are met with respect to the bus type's run-time 133that the following constraints are met with respect to the bus type's runtime
134PM callbacks: 134PM callbacks:
135 135
136(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute 136(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
@@ -142,7 +142,7 @@ PM callbacks:
142 142
143(2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active' 143(2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active'
144 devices (i.e. the PM core will only execute ->runtime_idle() or 144 devices (i.e. the PM core will only execute ->runtime_idle() or
145 ->runtime_suspend() for the devices the run-time PM status of which is 145 ->runtime_suspend() for the devices the runtime PM status of which is
146 'active'). 146 'active').
147 147
148(3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device 148(3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device
@@ -151,7 +151,7 @@ PM callbacks:
151 flag of which is set. 151 flag of which is set.
152 152
153(4) ->runtime_resume() can only be executed for 'suspended' devices (i.e. the 153(4) ->runtime_resume() can only be executed for 'suspended' devices (i.e. the
154 PM core will only execute ->runtime_resume() for the devices the run-time 154 PM core will only execute ->runtime_resume() for the devices the runtime
155 PM status of which is 'suspended'). 155 PM status of which is 'suspended').
156 156
157Additionally, the helper functions provided by the PM core obey the following 157Additionally, the helper functions provided by the PM core obey the following
@@ -171,9 +171,9 @@ rules:
171 scheduled requests to execute the other callbacks for the same device, 171 scheduled requests to execute the other callbacks for the same device,
172 except for scheduled autosuspends. 172 except for scheduled autosuspends.
173 173
1743. Run-time PM Device Fields 1743. Runtime PM Device Fields
175 175
176The following device run-time PM fields are present in 'struct dev_pm_info', as 176The following device runtime PM fields are present in 'struct dev_pm_info', as
177defined in include/linux/pm.h: 177defined in include/linux/pm.h:
178 178
179 struct timer_list suspend_timer; 179 struct timer_list suspend_timer;
@@ -205,7 +205,7 @@ defined in include/linux/pm.h:
205 205
206 unsigned int disable_depth; 206 unsigned int disable_depth;
207 - used for disabling the helper funcions (they work normally if this is 207 - used for disabling the helper funcions (they work normally if this is
208 equal to zero); the initial value of it is 1 (i.e. run-time PM is 208 equal to zero); the initial value of it is 1 (i.e. runtime PM is
209 initially disabled for all devices) 209 initially disabled for all devices)
210 210
211 unsigned int runtime_error; 211 unsigned int runtime_error;
@@ -229,10 +229,10 @@ defined in include/linux/pm.h:
229 suspend to complete; means "start a resume as soon as you've suspended" 229 suspend to complete; means "start a resume as soon as you've suspended"
230 230
231 unsigned int run_wake; 231 unsigned int run_wake;
232 - set if the device is capable of generating run-time wake-up events 232 - set if the device is capable of generating runtime wake-up events
233 233
234 enum rpm_status runtime_status; 234 enum rpm_status runtime_status;
235 - the run-time PM status of the device; this field's initial value is 235 - the runtime PM status of the device; this field's initial value is
236 RPM_SUSPENDED, which means that each device is initially regarded by the 236 RPM_SUSPENDED, which means that each device is initially regarded by the
237 PM core as 'suspended', regardless of its real hardware status 237 PM core as 'suspended', regardless of its real hardware status
238 238
@@ -243,7 +243,7 @@ defined in include/linux/pm.h:
243 and pm_runtime_forbid() helper functions 243 and pm_runtime_forbid() helper functions
244 244
245 unsigned int no_callbacks; 245 unsigned int no_callbacks;
246 - indicates that the device does not use the run-time PM callbacks (see 246 - indicates that the device does not use the runtime PM callbacks (see
247 Section 8); it may be modified only by the pm_runtime_no_callbacks() 247 Section 8); it may be modified only by the pm_runtime_no_callbacks()
248 helper function 248 helper function
249 249
@@ -270,16 +270,16 @@ defined in include/linux/pm.h:
270 270
271All of the above fields are members of the 'power' member of 'struct device'. 271All of the above fields are members of the 'power' member of 'struct device'.
272 272
2734. Run-time PM Device Helper Functions 2734. Runtime PM Device Helper Functions
274 274
275The following run-time PM helper functions are defined in 275The following runtime PM helper functions are defined in
276drivers/base/power/runtime.c and include/linux/pm_runtime.h: 276drivers/base/power/runtime.c and include/linux/pm_runtime.h:
277 277
278 void pm_runtime_init(struct device *dev); 278 void pm_runtime_init(struct device *dev);
279 - initialize the device run-time PM fields in 'struct dev_pm_info' 279 - initialize the device runtime PM fields in 'struct dev_pm_info'
280 280
281 void pm_runtime_remove(struct device *dev); 281 void pm_runtime_remove(struct device *dev);
282 - make sure that the run-time PM of the device will be disabled after 282 - make sure that the runtime PM of the device will be disabled after
283 removing the device from device hierarchy 283 removing the device from device hierarchy
284 284
285 int pm_runtime_idle(struct device *dev); 285 int pm_runtime_idle(struct device *dev);
@@ -289,9 +289,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
289 289
290 int pm_runtime_suspend(struct device *dev); 290 int pm_runtime_suspend(struct device *dev);
291 - execute the subsystem-level suspend callback for the device; returns 0 on 291 - execute the subsystem-level suspend callback for the device; returns 0 on
292 success, 1 if the device's run-time PM status was already 'suspended', or 292 success, 1 if the device's runtime PM status was already 'suspended', or
293 error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt 293 error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
294 to suspend the device again in future 294 to suspend the device again in future and -EACCES means that
295 'power.disable_depth' is different from 0
295 296
296 int pm_runtime_autosuspend(struct device *dev); 297 int pm_runtime_autosuspend(struct device *dev);
297 - same as pm_runtime_suspend() except that the autosuspend delay is taken 298 - same as pm_runtime_suspend() except that the autosuspend delay is taken
@@ -301,10 +302,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
301 302
302 int pm_runtime_resume(struct device *dev); 303 int pm_runtime_resume(struct device *dev);
303 - execute the subsystem-level resume callback for the device; returns 0 on 304 - execute the subsystem-level resume callback for the device; returns 0 on
304 success, 1 if the device's run-time PM status was already 'active' or 305 success, 1 if the device's runtime PM status was already 'active' or
305 error code on failure, where -EAGAIN means it may be safe to attempt to 306 error code on failure, where -EAGAIN means it may be safe to attempt to
306 resume the device again in future, but 'power.runtime_error' should be 307 resume the device again in future, but 'power.runtime_error' should be
307 checked additionally 308 checked additionally, and -EACCES means that 'power.disable_depth' is
309 different from 0
308 310
309 int pm_request_idle(struct device *dev); 311 int pm_request_idle(struct device *dev);
310 - submit a request to execute the subsystem-level idle callback for the 312 - submit a request to execute the subsystem-level idle callback for the
@@ -321,7 +323,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
321 device in future, where 'delay' is the time to wait before queuing up a 323 device in future, where 'delay' is the time to wait before queuing up a
322 suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work 324 suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work
323 item is queued up immediately); returns 0 on success, 1 if the device's PM 325 item is queued up immediately); returns 0 on success, 1 if the device's PM
324 run-time status was already 'suspended', or error code if the request 326 runtime status was already 'suspended', or error code if the request
325 hasn't been scheduled (or queued up if 'delay' is 0); if the execution of 327 hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
326 ->runtime_suspend() is already scheduled and not yet expired, the new 328 ->runtime_suspend() is already scheduled and not yet expired, the new
327 value of 'delay' will be used as the time to wait 329 value of 'delay' will be used as the time to wait
@@ -329,7 +331,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
329 int pm_request_resume(struct device *dev); 331 int pm_request_resume(struct device *dev);
330 - submit a request to execute the subsystem-level resume callback for the 332 - submit a request to execute the subsystem-level resume callback for the
331 device (the request is represented by a work item in pm_wq); returns 0 on 333 device (the request is represented by a work item in pm_wq); returns 0 on
332 success, 1 if the device's run-time PM status was already 'active', or 334 success, 1 if the device's runtime PM status was already 'active', or
333 error code if the request hasn't been queued up 335 error code if the request hasn't been queued up
334 336
335 void pm_runtime_get_noresume(struct device *dev); 337 void pm_runtime_get_noresume(struct device *dev);
@@ -367,22 +369,32 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
367 pm_runtime_autosuspend(dev) and return its result 369 pm_runtime_autosuspend(dev) and return its result
368 370
369 void pm_runtime_enable(struct device *dev); 371 void pm_runtime_enable(struct device *dev);
370 - enable the run-time PM helper functions to run the device bus type's 372 - decrement the device's 'power.disable_depth' field; if that field is equal
371 run-time PM callbacks described in Section 2 373 to zero, the runtime PM helper functions can execute subsystem-level
374 callbacks described in Section 2 for the device
372 375
373 int pm_runtime_disable(struct device *dev); 376 int pm_runtime_disable(struct device *dev);
374 - prevent the run-time PM helper functions from running subsystem-level 377 - increment the device's 'power.disable_depth' field (if the value of that
375 run-time PM callbacks for the device, make sure that all of the pending 378 field was previously zero, this prevents subsystem-level runtime PM
376 run-time PM operations on the device are either completed or canceled; 379 callbacks from being run for the device), make sure that all of the pending
380 runtime PM operations on the device are either completed or canceled;
377 returns 1 if there was a resume request pending and it was necessary to 381 returns 1 if there was a resume request pending and it was necessary to
378 execute the subsystem-level resume callback for the device to satisfy that 382 execute the subsystem-level resume callback for the device to satisfy that
379 request, otherwise 0 is returned 383 request, otherwise 0 is returned
380 384
385 int pm_runtime_barrier(struct device *dev);
386 - check if there's a resume request pending for the device and resume it
387 (synchronously) in that case, cancel any other pending runtime PM requests
388 regarding it and wait for all runtime PM operations on it in progress to
389 complete; returns 1 if there was a resume request pending and it was
390 necessary to execute the subsystem-level resume callback for the device to
391 satisfy that request, otherwise 0 is returned
392
381 void pm_suspend_ignore_children(struct device *dev, bool enable); 393 void pm_suspend_ignore_children(struct device *dev, bool enable);
382 - set/unset the power.ignore_children flag of the device 394 - set/unset the power.ignore_children flag of the device
383 395
384 int pm_runtime_set_active(struct device *dev); 396 int pm_runtime_set_active(struct device *dev);
385 - clear the device's 'power.runtime_error' flag, set the device's run-time 397 - clear the device's 'power.runtime_error' flag, set the device's runtime
386 PM status to 'active' and update its parent's counter of 'active' 398 PM status to 'active' and update its parent's counter of 'active'
387 children as appropriate (it is only valid to use this function if 399 children as appropriate (it is only valid to use this function if
388 'power.runtime_error' is set or 'power.disable_depth' is greater than 400 'power.runtime_error' is set or 'power.disable_depth' is greater than
@@ -390,7 +402,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
390 which is not active and the 'power.ignore_children' flag of which is unset 402 which is not active and the 'power.ignore_children' flag of which is unset
391 403
392 void pm_runtime_set_suspended(struct device *dev); 404 void pm_runtime_set_suspended(struct device *dev);
393 - clear the device's 'power.runtime_error' flag, set the device's run-time 405 - clear the device's 'power.runtime_error' flag, set the device's runtime
394 PM status to 'suspended' and update its parent's counter of 'active' 406 PM status to 'suspended' and update its parent's counter of 'active'
395 children as appropriate (it is only valid to use this function if 407 children as appropriate (it is only valid to use this function if
396 'power.runtime_error' is set or 'power.disable_depth' is greater than 408 'power.runtime_error' is set or 'power.disable_depth' is greater than
@@ -400,6 +412,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
400 - return true if the device's runtime PM status is 'suspended' and its 412 - return true if the device's runtime PM status is 'suspended' and its
401 'power.disable_depth' field is equal to zero, or false otherwise 413 'power.disable_depth' field is equal to zero, or false otherwise
402 414
415 bool pm_runtime_status_suspended(struct device *dev);
416 - return true if the device's runtime PM status is 'suspended'
417
403 void pm_runtime_allow(struct device *dev); 418 void pm_runtime_allow(struct device *dev);
404 - set the power.runtime_auto flag for the device and decrease its usage 419 - set the power.runtime_auto flag for the device and decrease its usage
405 counter (used by the /sys/devices/.../power/control interface to 420 counter (used by the /sys/devices/.../power/control interface to
@@ -411,7 +426,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
411 effectively prevent the device from being power managed at run time) 426 effectively prevent the device from being power managed at run time)
412 427
413 void pm_runtime_no_callbacks(struct device *dev); 428 void pm_runtime_no_callbacks(struct device *dev);
414 - set the power.no_callbacks flag for the device and remove the run-time 429 - set the power.no_callbacks flag for the device and remove the runtime
415 PM attributes from /sys/devices/.../power (or prevent them from being 430 PM attributes from /sys/devices/.../power (or prevent them from being
416 added when the device is registered) 431 added when the device is registered)
417 432
@@ -431,7 +446,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
431 446
432 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); 447 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
433 - set the power.autosuspend_delay value to 'delay' (expressed in 448 - set the power.autosuspend_delay value to 'delay' (expressed in
434 milliseconds); if 'delay' is negative then run-time suspends are 449 milliseconds); if 'delay' is negative then runtime suspends are
435 prevented 450 prevented
436 451
437 unsigned long pm_runtime_autosuspend_expiration(struct device *dev); 452 unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
@@ -470,35 +485,35 @@ pm_runtime_resume()
470pm_runtime_get_sync() 485pm_runtime_get_sync()
471pm_runtime_put_sync_suspend() 486pm_runtime_put_sync_suspend()
472 487
4735. Run-time PM Initialization, Device Probing and Removal 4885. Runtime PM Initialization, Device Probing and Removal
474 489
475Initially, the run-time PM is disabled for all devices, which means that the 490Initially, the runtime PM is disabled for all devices, which means that the
476majority of the run-time PM helper funtions described in Section 4 will return 491majority of the runtime PM helper funtions described in Section 4 will return
477-EAGAIN until pm_runtime_enable() is called for the device. 492-EAGAIN until pm_runtime_enable() is called for the device.
478 493
479In addition to that, the initial run-time PM status of all devices is 494In addition to that, the initial runtime PM status of all devices is
480'suspended', but it need not reflect the actual physical state of the device. 495'suspended', but it need not reflect the actual physical state of the device.
481Thus, if the device is initially active (i.e. it is able to process I/O), its 496Thus, if the device is initially active (i.e. it is able to process I/O), its
482run-time PM status must be changed to 'active', with the help of 497runtime PM status must be changed to 'active', with the help of
483pm_runtime_set_active(), before pm_runtime_enable() is called for the device. 498pm_runtime_set_active(), before pm_runtime_enable() is called for the device.
484 499
485However, if the device has a parent and the parent's run-time PM is enabled, 500However, if the device has a parent and the parent's runtime PM is enabled,
486calling pm_runtime_set_active() for the device will affect the parent, unless 501calling pm_runtime_set_active() for the device will affect the parent, unless
487the parent's 'power.ignore_children' flag is set. Namely, in that case the 502the parent's 'power.ignore_children' flag is set. Namely, in that case the
488parent won't be able to suspend at run time, using the PM core's helper 503parent won't be able to suspend at run time, using the PM core's helper
489functions, as long as the child's status is 'active', even if the child's 504functions, as long as the child's status is 'active', even if the child's
490run-time PM is still disabled (i.e. pm_runtime_enable() hasn't been called for 505runtime PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
491the child yet or pm_runtime_disable() has been called for it). For this reason, 506the child yet or pm_runtime_disable() has been called for it). For this reason,
492once pm_runtime_set_active() has been called for the device, pm_runtime_enable() 507once pm_runtime_set_active() has been called for the device, pm_runtime_enable()
493should be called for it too as soon as reasonably possible or its run-time PM 508should be called for it too as soon as reasonably possible or its runtime PM
494status should be changed back to 'suspended' with the help of 509status should be changed back to 'suspended' with the help of
495pm_runtime_set_suspended(). 510pm_runtime_set_suspended().
496 511
497If the default initial run-time PM status of the device (i.e. 'suspended') 512If the default initial runtime PM status of the device (i.e. 'suspended')
498reflects the actual state of the device, its bus type's or its driver's 513reflects the actual state of the device, its bus type's or its driver's
499->probe() callback will likely need to wake it up using one of the PM core's 514->probe() callback will likely need to wake it up using one of the PM core's
500helper functions described in Section 4. In that case, pm_runtime_resume() 515helper functions described in Section 4. In that case, pm_runtime_resume()
501should be used. Of course, for this purpose the device's run-time PM has to be 516should be used. Of course, for this purpose the device's runtime PM has to be
502enabled earlier by calling pm_runtime_enable(). 517enabled earlier by calling pm_runtime_enable().
503 518
504If the device bus type's or driver's ->probe() callback runs 519If the device bus type's or driver's ->probe() callback runs
@@ -529,33 +544,33 @@ The user space can effectively disallow the driver of the device to power manage
529it at run time by changing the value of its /sys/devices/.../power/control 544it at run time by changing the value of its /sys/devices/.../power/control
530attribute to "on", which causes pm_runtime_forbid() to be called. In principle, 545attribute to "on", which causes pm_runtime_forbid() to be called. In principle,
531this mechanism may also be used by the driver to effectively turn off the 546this mechanism may also be used by the driver to effectively turn off the
532run-time power management of the device until the user space turns it on. 547runtime power management of the device until the user space turns it on.
533Namely, during the initialization the driver can make sure that the run-time PM 548Namely, during the initialization the driver can make sure that the runtime PM
534status of the device is 'active' and call pm_runtime_forbid(). It should be 549status of the device is 'active' and call pm_runtime_forbid(). It should be
535noted, however, that if the user space has already intentionally changed the 550noted, however, that if the user space has already intentionally changed the
536value of /sys/devices/.../power/control to "auto" to allow the driver to power 551value of /sys/devices/.../power/control to "auto" to allow the driver to power
537manage the device at run time, the driver may confuse it by using 552manage the device at run time, the driver may confuse it by using
538pm_runtime_forbid() this way. 553pm_runtime_forbid() this way.
539 554
5406. Run-time PM and System Sleep 5556. Runtime PM and System Sleep
541 556
542Run-time PM and system sleep (i.e., system suspend and hibernation, also known 557Runtime PM and system sleep (i.e., system suspend and hibernation, also known
543as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of 558as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of
544ways. If a device is active when a system sleep starts, everything is 559ways. If a device is active when a system sleep starts, everything is
545straightforward. But what should happen if the device is already suspended? 560straightforward. But what should happen if the device is already suspended?
546 561
547The device may have different wake-up settings for run-time PM and system sleep. 562The device may have different wake-up settings for runtime PM and system sleep.
548For example, remote wake-up may be enabled for run-time suspend but disallowed 563For example, remote wake-up may be enabled for runtime suspend but disallowed
549for system sleep (device_may_wakeup(dev) returns 'false'). When this happens, 564for system sleep (device_may_wakeup(dev) returns 'false'). When this happens,
550the subsystem-level system suspend callback is responsible for changing the 565the subsystem-level system suspend callback is responsible for changing the
551device's wake-up setting (it may leave that to the device driver's system 566device's wake-up setting (it may leave that to the device driver's system
552suspend routine). It may be necessary to resume the device and suspend it again 567suspend routine). It may be necessary to resume the device and suspend it again
553in order to do so. The same is true if the driver uses different power levels 568in order to do so. The same is true if the driver uses different power levels
554or other settings for run-time suspend and system sleep. 569or other settings for runtime suspend and system sleep.
555 570
556During system resume, devices generally should be brought back to full power, 571During system resume, the simplest approach is to bring all devices back to full
557even if they were suspended before the system sleep began. There are several 572power, even if they had been suspended before the system suspend began. There
558reasons for this, including: 573are several reasons for this, including:
559 574
560 * The device might need to switch power levels, wake-up settings, etc. 575 * The device might need to switch power levels, wake-up settings, etc.
561 576
@@ -570,18 +585,50 @@ reasons for this, including:
570 * The device might need to be reset. 585 * The device might need to be reset.
571 586
572 * Even though the device was suspended, if its usage counter was > 0 then most 587 * Even though the device was suspended, if its usage counter was > 0 then most
573 likely it would need a run-time resume in the near future anyway. 588 likely it would need a runtime resume in the near future anyway.
574
575 * Always going back to full power is simplest.
576 589
577If the device was suspended before the sleep began, then its run-time PM status 590If the device had been suspended before the system suspend began and it's
578will have to be updated to reflect the actual post-system sleep status. The way 591brought back to full power during resume, then its runtime PM status will have
579to do this is: 592to be updated to reflect the actual post-system sleep status. The way to do
593this is:
580 594
581 pm_runtime_disable(dev); 595 pm_runtime_disable(dev);
582 pm_runtime_set_active(dev); 596 pm_runtime_set_active(dev);
583 pm_runtime_enable(dev); 597 pm_runtime_enable(dev);
584 598
599The PM core always increments the runtime usage counter before calling the
600->suspend() callback and decrements it after calling the ->resume() callback.
601Hence disabling runtime PM temporarily like this will not cause any runtime
602suspend attempts to be permanently lost. If the usage count goes to zero
603following the return of the ->resume() callback, the ->runtime_idle() callback
604will be invoked as usual.
605
606On some systems, however, system sleep is not entered through a global firmware
607or hardware operation. Instead, all hardware components are put into low-power
608states directly by the kernel in a coordinated way. Then, the system sleep
609state effectively follows from the states the hardware components end up in
610and the system is woken up from that state by a hardware interrupt or a similar
611mechanism entirely under the kernel's control. As a result, the kernel never
612gives control away and the states of all devices during resume are precisely
613known to it. If that is the case and none of the situations listed above takes
614place (in particular, if the system is not waking up from hibernation), it may
615be more efficient to leave the devices that had been suspended before the system
616suspend began in the suspended state.
617
618The PM core does its best to reduce the probability of race conditions between
619the runtime PM and system suspend/resume (and hibernation) callbacks by carrying
620out the following operations:
621
622 * During system suspend it calls pm_runtime_get_noresume() and
623 pm_runtime_barrier() for every device right before executing the
624 subsystem-level .suspend() callback for it. In addition to that it calls
625 pm_runtime_disable() for every device right after executing the
626 subsystem-level .suspend() callback for it.
627
628 * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
629 for every device right before and right after executing the subsystem-level
630 .resume() callback for it, respectively.
631
5857. Generic subsystem callbacks 6327. Generic subsystem callbacks
586 633
587Subsystems may wish to conserve code space by using the set of generic power 634Subsystems may wish to conserve code space by using the set of generic power
@@ -606,40 +653,68 @@ driver/base/power/generic_ops.c:
606 callback provided by its driver and return its result, or return 0 if not 653 callback provided by its driver and return its result, or return 0 if not
607 defined 654 defined
608 655
656 int pm_generic_suspend_noirq(struct device *dev);
657 - if pm_runtime_suspended(dev) returns "false", invoke the ->suspend_noirq()
658 callback provided by the device's driver and return its result, or return
659 0 if not defined
660
609 int pm_generic_resume(struct device *dev); 661 int pm_generic_resume(struct device *dev);
610 - invoke the ->resume() callback provided by the driver of this device and, 662 - invoke the ->resume() callback provided by the driver of this device and,
611 if successful, change the device's runtime PM status to 'active' 663 if successful, change the device's runtime PM status to 'active'
612 664
665 int pm_generic_resume_noirq(struct device *dev);
666 - invoke the ->resume_noirq() callback provided by the driver of this device
667
613 int pm_generic_freeze(struct device *dev); 668 int pm_generic_freeze(struct device *dev);
614 - if the device has not been suspended at run time, invoke the ->freeze() 669 - if the device has not been suspended at run time, invoke the ->freeze()
615 callback provided by its driver and return its result, or return 0 if not 670 callback provided by its driver and return its result, or return 0 if not
616 defined 671 defined
617 672
673 int pm_generic_freeze_noirq(struct device *dev);
674 - if pm_runtime_suspended(dev) returns "false", invoke the ->freeze_noirq()
675 callback provided by the device's driver and return its result, or return
676 0 if not defined
677
618 int pm_generic_thaw(struct device *dev); 678 int pm_generic_thaw(struct device *dev);
619 - if the device has not been suspended at run time, invoke the ->thaw() 679 - if the device has not been suspended at run time, invoke the ->thaw()
620 callback provided by its driver and return its result, or return 0 if not 680 callback provided by its driver and return its result, or return 0 if not
621 defined 681 defined
622 682
683 int pm_generic_thaw_noirq(struct device *dev);
684 - if pm_runtime_suspended(dev) returns "false", invoke the ->thaw_noirq()
685 callback provided by the device's driver and return its result, or return
686 0 if not defined
687
623 int pm_generic_poweroff(struct device *dev); 688 int pm_generic_poweroff(struct device *dev);
624 - if the device has not been suspended at run time, invoke the ->poweroff() 689 - if the device has not been suspended at run time, invoke the ->poweroff()
625 callback provided by its driver and return its result, or return 0 if not 690 callback provided by its driver and return its result, or return 0 if not
626 defined 691 defined
627 692
693 int pm_generic_poweroff_noirq(struct device *dev);
694 - if pm_runtime_suspended(dev) returns "false", run the ->poweroff_noirq()
695 callback provided by the device's driver and return its result, or return
696 0 if not defined
697
628 int pm_generic_restore(struct device *dev); 698 int pm_generic_restore(struct device *dev);
629 - invoke the ->restore() callback provided by the driver of this device and, 699 - invoke the ->restore() callback provided by the driver of this device and,
630 if successful, change the device's runtime PM status to 'active' 700 if successful, change the device's runtime PM status to 'active'
631 701
702 int pm_generic_restore_noirq(struct device *dev);
703 - invoke the ->restore_noirq() callback provided by the device's driver
704
632These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(), 705These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(),
633->runtime_resume(), ->suspend(), ->resume(), ->freeze(), ->thaw(), ->poweroff(), 706->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(),
634or ->restore() callback pointers in the subsystem-level dev_pm_ops structures. 707->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(),
708->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() callback
709pointers in the subsystem-level dev_pm_ops structures.
635 710
636If a subsystem wishes to use all of them at the same time, it can simply assign 711If a subsystem wishes to use all of them at the same time, it can simply assign
637the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its 712the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its
638dev_pm_ops structure pointer. 713dev_pm_ops structure pointer.
639 714
640Device drivers that wish to use the same function as a system suspend, freeze, 715Device drivers that wish to use the same function as a system suspend, freeze,
641poweroff and run-time suspend callback, and similarly for system resume, thaw, 716poweroff and runtime suspend callback, and similarly for system resume, thaw,
642restore, and run-time resume, can achieve this with the help of the 717restore, and runtime resume, can achieve this with the help of the
643UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its 718UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its
644last argument to NULL). 719last argument to NULL).
645 720
@@ -649,7 +724,7 @@ Some "devices" are only logical sub-devices of their parent and cannot be
649power-managed on their own. (The prototype example is a USB interface. Entire 724power-managed on their own. (The prototype example is a USB interface. Entire
650USB devices can go into low-power mode or send wake-up requests, but neither is 725USB devices can go into low-power mode or send wake-up requests, but neither is
651possible for individual interfaces.) The drivers for these devices have no 726possible for individual interfaces.) The drivers for these devices have no
652need of run-time PM callbacks; if the callbacks did exist, ->runtime_suspend() 727need of runtime PM callbacks; if the callbacks did exist, ->runtime_suspend()
653and ->runtime_resume() would always return 0 without doing anything else and 728and ->runtime_resume() would always return 0 without doing anything else and
654->runtime_idle() would always call pm_runtime_suspend(). 729->runtime_idle() would always call pm_runtime_suspend().
655 730
@@ -657,7 +732,7 @@ Subsystems can tell the PM core about these devices by calling
657pm_runtime_no_callbacks(). This should be done after the device structure is 732pm_runtime_no_callbacks(). This should be done after the device structure is
658initialized and before it is registered (although after device registration is 733initialized and before it is registered (although after device registration is
659also okay). The routine will set the device's power.no_callbacks flag and 734also okay). The routine will set the device's power.no_callbacks flag and
660prevent the non-debugging run-time PM sysfs attributes from being created. 735prevent the non-debugging runtime PM sysfs attributes from being created.
661 736
662When power.no_callbacks is set, the PM core will not invoke the 737When power.no_callbacks is set, the PM core will not invoke the
663->runtime_idle(), ->runtime_suspend(), or ->runtime_resume() callbacks. 738->runtime_idle(), ->runtime_suspend(), or ->runtime_resume() callbacks.
@@ -665,7 +740,7 @@ Instead it will assume that suspends and resumes always succeed and that idle
665devices should be suspended. 740devices should be suspended.
666 741
667As a consequence, the PM core will never directly inform the device's subsystem 742As a consequence, the PM core will never directly inform the device's subsystem
668or driver about run-time power changes. Instead, the driver for the device's 743or driver about runtime power changes. Instead, the driver for the device's
669parent must take responsibility for telling the device's driver when the 744parent must take responsibility for telling the device's driver when the
670parent's power state changes. 745parent's power state changes.
671 746
@@ -676,13 +751,13 @@ A device should be put in a low-power state only when there's some reason to
676think it will remain in that state for a substantial time. A common heuristic 751think it will remain in that state for a substantial time. A common heuristic
677says that a device which hasn't been used for a while is liable to remain 752says that a device which hasn't been used for a while is liable to remain
678unused; following this advice, drivers should not allow devices to be suspended 753unused; following this advice, drivers should not allow devices to be suspended
679at run-time until they have been inactive for some minimum period. Even when 754at runtime until they have been inactive for some minimum period. Even when
680the heuristic ends up being non-optimal, it will still prevent devices from 755the heuristic ends up being non-optimal, it will still prevent devices from
681"bouncing" too rapidly between low-power and full-power states. 756"bouncing" too rapidly between low-power and full-power states.
682 757
683The term "autosuspend" is an historical remnant. It doesn't mean that the 758The term "autosuspend" is an historical remnant. It doesn't mean that the
684device is automatically suspended (the subsystem or driver still has to call 759device is automatically suspended (the subsystem or driver still has to call
685the appropriate PM routines); rather it means that run-time suspends will 760the appropriate PM routines); rather it means that runtime suspends will
686automatically be delayed until the desired period of inactivity has elapsed. 761automatically be delayed until the desired period of inactivity has elapsed.
687 762
688Inactivity is determined based on the power.last_busy field. Drivers should 763Inactivity is determined based on the power.last_busy field. Drivers should
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9adc278a22ab..e04fa9d7637c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -642,6 +642,7 @@ config ARCH_SHMOBILE
642 select NO_IOPORT 642 select NO_IOPORT
643 select SPARSE_IRQ 643 select SPARSE_IRQ
644 select MULTI_IRQ_HANDLER 644 select MULTI_IRQ_HANDLER
645 select PM_GENERIC_DOMAINS if PM
645 help 646 help
646 Support for Renesas's SH-Mobile and R-Mobile ARM platforms. 647 Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
647 648
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 334fb8871bc3..943072d5a1d5 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev)
32 if (ret) 32 if (ret)
33 return ret; 33 return ret;
34 34
35 ret = pm_runtime_clk_suspend(dev); 35 ret = pm_clk_suspend(dev);
36 if (ret) { 36 if (ret) {
37 pm_generic_runtime_resume(dev); 37 pm_generic_runtime_resume(dev);
38 return ret; 38 return ret;
@@ -45,24 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev)
45{ 45{
46 dev_dbg(dev, "%s\n", __func__); 46 dev_dbg(dev, "%s\n", __func__);
47 47
48 pm_runtime_clk_resume(dev); 48 pm_clk_resume(dev);
49 return pm_generic_runtime_resume(dev); 49 return pm_generic_runtime_resume(dev);
50} 50}
51 51
52static struct dev_power_domain default_power_domain = { 52static struct dev_pm_domain default_pm_domain = {
53 .ops = { 53 .ops = {
54 .runtime_suspend = omap1_pm_runtime_suspend, 54 .runtime_suspend = omap1_pm_runtime_suspend,
55 .runtime_resume = omap1_pm_runtime_resume, 55 .runtime_resume = omap1_pm_runtime_resume,
56 USE_PLATFORM_PM_SLEEP_OPS 56 USE_PLATFORM_PM_SLEEP_OPS
57 }, 57 },
58}; 58};
59#define OMAP1_PWR_DOMAIN (&default_power_domain) 59#define OMAP1_PM_DOMAIN (&default_pm_domain)
60#else 60#else
61#define OMAP1_PWR_DOMAIN NULL 61#define OMAP1_PM_DOMAIN NULL
62#endif /* CONFIG_PM_RUNTIME */ 62#endif /* CONFIG_PM_RUNTIME */
63 63
64static struct pm_clk_notifier_block platform_bus_notifier = { 64static struct pm_clk_notifier_block platform_bus_notifier = {
65 .pwr_domain = OMAP1_PWR_DOMAIN, 65 .pm_domain = OMAP1_PM_DOMAIN,
66 .con_ids = { "ick", "fck", NULL, }, 66 .con_ids = { "ick", "fck", NULL, },
67}; 67};
68 68
@@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void)
71 if (!cpu_class_is_omap1()) 71 if (!cpu_class_is_omap1())
72 return -ENODEV; 72 return -ENODEV;
73 73
74 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 74 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9a46d7773a48..2765cdc3152d 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -119,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
119 return PTR_ERR(od); 119 return PTR_ERR(od);
120 } 120 }
121 121
122 omap_device_disable_idle_on_suspend(od);
123
122 gpio_bank_count++; 124 gpio_bank_count++;
123 return 0; 125 return 0;
124} 126}
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b7b8cb..466fc722fa0f 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", 805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
806 name, oh->name); 806 name, oh->name);
807 807
808 omap_device_disable_idle_on_suspend(od);
808 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); 809 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
809 810
810 uart->irq = oh->mpu_irqs[0].irq; 811 uart->irq = oh->mpu_irqs[0].irq;
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 803bc6edfca4..b473b8efac68 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void)
1408 1408
1409 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); 1409 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
1410 1410
1411 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
1412 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
1413 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
1414
1411 hdmi_init_pm_clock(); 1415 hdmi_init_pm_clock();
1412 fsi_init_pm_clock(); 1416 fsi_init_pm_clock();
1413 sh7372_pm_init(); 1417 sh7372_pm_init();
1418 pm_clk_add(&fsi_device.dev, "spu2");
1414} 1419}
1415 1420
1416static void __init ap4evb_timer_init(void) 1421static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 3802f2afabef..5b36b6c5b448 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1582,8 +1582,13 @@ static void __init mackerel_init(void)
1582 1582
1583 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); 1583 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1584 1584
1585 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
1586 sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
1587 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
1588
1585 hdmi_init_pm_clock(); 1589 hdmi_init_pm_clock();
1586 sh7372_pm_init(); 1590 sh7372_pm_init();
1591 pm_clk_add(&fsi_device.dev, "spu2");
1587} 1592}
1588 1593
1589static void __init mackerel_timer_init(void) 1594static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index c0800d83971e..91f5779abdd3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = {
662 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 662 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
663 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), 663 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
664 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), 664 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
665 CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
665}; 666};
666 667
667void __init sh7372_clock_init(void) 668void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index df20d7670172..ce595cee86cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -12,6 +12,7 @@
12#define __ASM_SH7372_H__ 12#define __ASM_SH7372_H__
13 13
14#include <linux/sh_clk.h> 14#include <linux/sh_clk.h>
15#include <linux/pm_domain.h>
15 16
16/* 17/*
17 * Pin Function Controller: 18 * Pin Function Controller:
@@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk;
470extern struct clk sh7372_fsidiva_clk; 471extern struct clk sh7372_fsidiva_clk;
471extern struct clk sh7372_fsidivb_clk; 472extern struct clk sh7372_fsidivb_clk;
472 473
474struct platform_device;
475
476struct sh7372_pm_domain {
477 struct generic_pm_domain genpd;
478 unsigned int bit_shift;
479};
480
481static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
482{
483 return container_of(d, struct sh7372_pm_domain, genpd);
484}
485
486#ifdef CONFIG_PM
487extern struct sh7372_pm_domain sh7372_a4lc;
488extern struct sh7372_pm_domain sh7372_a4mp;
489extern struct sh7372_pm_domain sh7372_d4;
490extern struct sh7372_pm_domain sh7372_a3rv;
491extern struct sh7372_pm_domain sh7372_a3ri;
492extern struct sh7372_pm_domain sh7372_a3sg;
493
494extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
495extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
496 struct platform_device *pdev);
497#else
498#define sh7372_init_pm_domain(pd) do { } while(0)
499#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
500#endif /* CONFIG_PM */
501
473#endif /* __ASM_SH7372_H__ */ 502#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 8e4aadf14c9f..933fb411be0f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,16 +15,176 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_runtime.h>
19#include <linux/platform_device.h>
20#include <linux/delay.h>
18#include <asm/system.h> 21#include <asm/system.h>
19#include <asm/io.h> 22#include <asm/io.h>
20#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
21#include <mach/common.h> 24#include <mach/common.h>
25#include <mach/sh7372.h>
22 26
23#define SMFRAM 0xe6a70000 27#define SMFRAM 0xe6a70000
24#define SYSTBCR 0xe6150024 28#define SYSTBCR 0xe6150024
25#define SBAR 0xe6180020 29#define SBAR 0xe6180020
26#define APARMBAREA 0xe6f10020 30#define APARMBAREA 0xe6f10020
27 31
32#define SPDCR 0xe6180008
33#define SWUCR 0xe6180014
34#define PSTR 0xe6180080
35
36#define PSTR_RETRIES 100
37#define PSTR_DELAY_US 10
38
39#ifdef CONFIG_PM
40
41static int pd_power_down(struct generic_pm_domain *genpd)
42{
43 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
44 unsigned int mask = 1 << sh7372_pd->bit_shift;
45
46 if (__raw_readl(PSTR) & mask) {
47 unsigned int retry_count;
48
49 __raw_writel(mask, SPDCR);
50
51 for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
52 if (!(__raw_readl(SPDCR) & mask))
53 break;
54 cpu_relax();
55 }
56 }
57
58 pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
59 mask, __raw_readl(PSTR));
60
61 return 0;
62}
63
64static int pd_power_up(struct generic_pm_domain *genpd)
65{
66 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
67 unsigned int mask = 1 << sh7372_pd->bit_shift;
68 unsigned int retry_count;
69 int ret = 0;
70
71 if (__raw_readl(PSTR) & mask)
72 goto out;
73
74 __raw_writel(mask, SWUCR);
75
76 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
77 if (!(__raw_readl(SWUCR) & mask))
78 goto out;
79 if (retry_count > PSTR_RETRIES)
80 udelay(PSTR_DELAY_US);
81 else
82 cpu_relax();
83 }
84 if (__raw_readl(SWUCR) & mask)
85 ret = -EIO;
86
87 out:
88 pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
89 mask, __raw_readl(PSTR));
90
91 return ret;
92}
93
94static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
95{
96 int ret = pd_power_up(genpd);
97
98 /* force A4LC on after A3RV has been requested on */
99 pm_genpd_poweron(&sh7372_a4lc.genpd);
100
101 return ret;
102}
103
104static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
105{
106 int ret = pd_power_down(genpd);
107
108 /* try to power down A4LC after A3RV is requested off */
109 genpd_queue_power_off_work(&sh7372_a4lc.genpd);
110
111 return ret;
112}
113
114static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
115{
116 /* only power down A4LC if A3RV is off */
117 if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
118 return pd_power_down(genpd);
119
120 return -EBUSY;
121}
122
123static bool pd_active_wakeup(struct device *dev)
124{
125 return true;
126}
127
128void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
129{
130 struct generic_pm_domain *genpd = &sh7372_pd->genpd;
131
132 pm_genpd_init(genpd, NULL, false);
133 genpd->stop_device = pm_clk_suspend;
134 genpd->start_device = pm_clk_resume;
135 genpd->active_wakeup = pd_active_wakeup;
136
137 if (sh7372_pd == &sh7372_a4lc) {
138 genpd->power_off = pd_power_down_a4lc;
139 genpd->power_on = pd_power_up;
140 } else if (sh7372_pd == &sh7372_a3rv) {
141 genpd->power_off = pd_power_down_a3rv;
142 genpd->power_on = pd_power_up_a3rv;
143 } else {
144 genpd->power_off = pd_power_down;
145 genpd->power_on = pd_power_up;
146 }
147 genpd->power_on(&sh7372_pd->genpd);
148}
149
150void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
151 struct platform_device *pdev)
152{
153 struct device *dev = &pdev->dev;
154
155 if (!dev->power.subsys_data) {
156 pm_clk_init(dev);
157 pm_clk_add(dev, NULL);
158 }
159 pm_genpd_add_device(&sh7372_pd->genpd, dev);
160}
161
162struct sh7372_pm_domain sh7372_a4lc = {
163 .bit_shift = 1,
164};
165
166struct sh7372_pm_domain sh7372_a4mp = {
167 .bit_shift = 2,
168};
169
170struct sh7372_pm_domain sh7372_d4 = {
171 .bit_shift = 3,
172};
173
174struct sh7372_pm_domain sh7372_a3rv = {
175 .bit_shift = 6,
176};
177
178struct sh7372_pm_domain sh7372_a3ri = {
179 .bit_shift = 8,
180};
181
182struct sh7372_pm_domain sh7372_a3sg = {
183 .bit_shift = 13,
184};
185
186#endif /* CONFIG_PM */
187
28static void sh7372_enter_core_standby(void) 188static void sh7372_enter_core_standby(void)
29{ 189{
30 void __iomem *smfram = (void __iomem *)SMFRAM; 190 void __iomem *smfram = (void __iomem *)SMFRAM;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 2d1b67a59e4a..6ec454e1e063 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/pm_runtime.h> 16#include <linux/pm_runtime.h>
17#include <linux/pm_domain.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/sh_clk.h> 20#include <linux/sh_clk.h>
@@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev)
28 return pm_runtime_suspend(dev); 29 return pm_runtime_suspend(dev);
29} 30}
30 31
31static struct dev_power_domain default_power_domain = { 32static struct dev_pm_domain default_pm_domain = {
32 .ops = { 33 .ops = {
33 .runtime_suspend = pm_runtime_clk_suspend, 34 .runtime_suspend = pm_clk_suspend,
34 .runtime_resume = pm_runtime_clk_resume, 35 .runtime_resume = pm_clk_resume,
35 .runtime_idle = default_platform_runtime_idle, 36 .runtime_idle = default_platform_runtime_idle,
36 USE_PLATFORM_PM_SLEEP_OPS 37 USE_PLATFORM_PM_SLEEP_OPS
37 }, 38 },
38}; 39};
39 40
40#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) 41#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
41 42
42#else 43#else
43 44
44#define DEFAULT_PWR_DOMAIN_PTR NULL 45#define DEFAULT_PM_DOMAIN_PTR NULL
45 46
46#endif /* CONFIG_PM_RUNTIME */ 47#endif /* CONFIG_PM_RUNTIME */
47 48
48static struct pm_clk_notifier_block platform_bus_notifier = { 49static struct pm_clk_notifier_block platform_bus_notifier = {
49 .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, 50 .pm_domain = DEFAULT_PM_DOMAIN_PTR,
50 .con_ids = { NULL, }, 51 .con_ids = { NULL, },
51}; 52};
52 53
53static int __init sh_pm_runtime_init(void) 54static int __init sh_pm_runtime_init(void)
54{ 55{
55 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 56 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0; 57 return 0;
57} 58}
58core_initcall(sh_pm_runtime_init); 59core_initcall(sh_pm_runtime_init);
60
61static int __init sh_pm_runtime_late_init(void)
62{
63 pm_genpd_poweroff_unused();
64 return 0;
65}
66late_initcall(sh_pm_runtime_late_init);
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index cd807eea69e2..79f0413d8725 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
841 841
842void __init sh7372_add_standard_devices(void) 842void __init sh7372_add_standard_devices(void)
843{ 843{
844 sh7372_init_pm_domain(&sh7372_a4lc);
845 sh7372_init_pm_domain(&sh7372_a4mp);
846 sh7372_init_pm_domain(&sh7372_d4);
847 sh7372_init_pm_domain(&sh7372_a3rv);
848 sh7372_init_pm_domain(&sh7372_a3ri);
849 sh7372_init_pm_domain(&sh7372_a3sg);
850
844 platform_add_devices(sh7372_early_devices, 851 platform_add_devices(sh7372_early_devices,
845 ARRAY_SIZE(sh7372_early_devices)); 852 ARRAY_SIZE(sh7372_early_devices));
846 853
847 platform_add_devices(sh7372_late_devices, 854 platform_add_devices(sh7372_late_devices,
848 ARRAY_SIZE(sh7372_late_devices)); 855 ARRAY_SIZE(sh7372_late_devices));
856
857 sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
858 sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
859 sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
849} 860}
850 861
851void __init sh7372_add_early_devices(void) 862void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349ff9fd8..ee405b36df4b 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -44,6 +44,10 @@ extern struct device omap_device_parent;
44#define OMAP_DEVICE_STATE_IDLE 2 44#define OMAP_DEVICE_STATE_IDLE 2
45#define OMAP_DEVICE_STATE_SHUTDOWN 3 45#define OMAP_DEVICE_STATE_SHUTDOWN 3
46 46
47/* omap_device.flags values */
48#define OMAP_DEVICE_SUSPENDED BIT(0)
49#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1)
50
47/** 51/**
48 * struct omap_device - omap_device wrapper for platform_devices 52 * struct omap_device - omap_device wrapper for platform_devices
49 * @pdev: platform_device 53 * @pdev: platform_device
@@ -73,6 +77,7 @@ struct omap_device {
73 s8 pm_lat_level; 77 s8 pm_lat_level;
74 u8 hwmods_cnt; 78 u8 hwmods_cnt;
75 u8 _state; 79 u8 _state;
80 u8 flags;
76}; 81};
77 82
78/* Device driver interface (call via platform_data fn ptrs) */ 83/* Device driver interface (call via platform_data fn ptrs) */
@@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od);
117int omap_device_disable_clocks(struct omap_device *od); 122int omap_device_disable_clocks(struct omap_device *od);
118int omap_device_enable_clocks(struct omap_device *od); 123int omap_device_enable_clocks(struct omap_device *od);
119 124
125static inline void omap_device_disable_idle_on_suspend(struct omap_device *od)
126{
127 od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND;
128}
120 129
121/* 130/*
122 * Entries should be kept in latency order ascending 131 * Entries should be kept in latency order ascending
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df0c21f..2526fa312b8a 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od)
537 return 0; 537 return 0;
538} 538}
539 539
540#ifdef CONFIG_PM_RUNTIME
540static int _od_runtime_suspend(struct device *dev) 541static int _od_runtime_suspend(struct device *dev)
541{ 542{
542 struct platform_device *pdev = to_platform_device(dev); 543 struct platform_device *pdev = to_platform_device(dev);
@@ -563,13 +564,55 @@ static int _od_runtime_resume(struct device *dev)
563 564
564 return pm_generic_runtime_resume(dev); 565 return pm_generic_runtime_resume(dev);
565} 566}
567#endif
566 568
567static struct dev_power_domain omap_device_power_domain = { 569#ifdef CONFIG_SUSPEND
570static int _od_suspend_noirq(struct device *dev)
571{
572 struct platform_device *pdev = to_platform_device(dev);
573 struct omap_device *od = to_omap_device(pdev);
574 int ret;
575
576 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
577 return pm_generic_suspend_noirq(dev);
578
579 ret = pm_generic_suspend_noirq(dev);
580
581 if (!ret && !pm_runtime_status_suspended(dev)) {
582 if (pm_generic_runtime_suspend(dev) == 0) {
583 omap_device_idle(pdev);
584 od->flags |= OMAP_DEVICE_SUSPENDED;
585 }
586 }
587
588 return ret;
589}
590
591static int _od_resume_noirq(struct device *dev)
592{
593 struct platform_device *pdev = to_platform_device(dev);
594 struct omap_device *od = to_omap_device(pdev);
595
596 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
597 return pm_generic_resume_noirq(dev);
598
599 if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
600 !pm_runtime_status_suspended(dev)) {
601 od->flags &= ~OMAP_DEVICE_SUSPENDED;
602 omap_device_enable(pdev);
603 pm_generic_runtime_resume(dev);
604 }
605
606 return pm_generic_resume_noirq(dev);
607}
608#endif
609
610static struct dev_pm_domain omap_device_pm_domain = {
568 .ops = { 611 .ops = {
569 .runtime_suspend = _od_runtime_suspend, 612 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
570 .runtime_idle = _od_runtime_idle, 613 _od_runtime_idle)
571 .runtime_resume = _od_runtime_resume,
572 USE_PLATFORM_PM_SLEEP_OPS 614 USE_PLATFORM_PM_SLEEP_OPS
615 SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq)
573 } 616 }
574}; 617};
575 618
@@ -586,7 +629,7 @@ int omap_device_register(struct omap_device *od)
586 pr_debug("omap_device: %s: registering\n", od->pdev.name); 629 pr_debug("omap_device: %s: registering\n", od->pdev.name);
587 630
588 od->pdev.dev.parent = &omap_device_parent; 631 od->pdev.dev.parent = &omap_device_parent;
589 od->pdev.dev.pwr_domain = &omap_device_power_domain; 632 od->pdev.dev.pm_domain = &omap_device_pm_domain;
590 return platform_device_register(&od->pdev); 633 return platform_device_register(&od->pdev);
591} 634}
592 635
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 64c807c39208..bf280c812d2f 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -256,7 +256,7 @@ out:
256 return ret; 256 return ret;
257} 257}
258 258
259static struct dev_power_domain default_power_domain = { 259static struct dev_pm_domain default_pm_domain = {
260 .ops = { 260 .ops = {
261 .runtime_suspend = default_platform_runtime_suspend, 261 .runtime_suspend = default_platform_runtime_suspend,
262 .runtime_resume = default_platform_runtime_resume, 262 .runtime_resume = default_platform_runtime_resume,
@@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
285 hwblk_disable(hwblk_info, hwblk); 285 hwblk_disable(hwblk_info, hwblk);
286 /* make sure driver re-inits itself once */ 286 /* make sure driver re-inits itself once */
287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
288 dev->pwr_domain = &default_power_domain; 288 dev->pm_domain = &default_pm_domain;
289 break; 289 break;
290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ 290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
291 case BUS_NOTIFY_BOUND_DRIVER: 291 case BUS_NOTIFY_BOUND_DRIVER:
@@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
300 break; 300 break;
301 case BUS_NOTIFY_DEL_DEVICE: 301 case BUS_NOTIFY_DEL_DEVICE:
302 dev->pwr_domain = NULL; 302 dev->pm_domain = NULL;
303 break; 303 break;
304 } 304 }
305 return 0; 305 return 0;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e7..2639ae79a372 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
7 8
8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ad367c4139b1..a846b2f95cfb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/err.h> 16#include <linux/err.h>
17 17
18#ifdef CONFIG_PM_RUNTIME 18#ifdef CONFIG_PM
19 19
20struct pm_runtime_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 struct mutex lock;
23}; 23};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
36 enum pce_status status; 36 enum pce_status status;
37}; 37};
38 38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev) 39static struct pm_clk_data *__to_pcd(struct device *dev)
40{ 40{
41 return dev ? dev->power.subsys_data : NULL; 41 return dev ? dev->power.subsys_data : NULL;
42} 42}
43 43
44/** 44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM. 45 * pm_clk_add - Start using a device clock for power management.
46 * @dev: Device whose clock is going to be used for runtime PM. 46 * @dev: Device whose clock is going to be used for power management.
47 * @con_id: Connection ID of the clock. 47 * @con_id: Connection ID of the clock.
48 * 48 *
49 * Add the clock represented by @con_id to the list of clocks used for 49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev. 50 * the power management of @dev.
51 */ 51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id) 52int pm_clk_add(struct device *dev, const char *con_id)
53{ 53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev); 54 struct pm_clk_data *pcd = __to_pcd(dev);
55 struct pm_clock_entry *ce; 55 struct pm_clock_entry *ce;
56 56
57 if (!prd) 57 if (!pcd)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL); 60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
73 } 73 }
74 } 74 }
75 75
76 mutex_lock(&prd->lock); 76 mutex_lock(&pcd->lock);
77 list_add_tail(&ce->node, &prd->clock_list); 77 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&prd->lock); 78 mutex_unlock(&pcd->lock);
79 return 0; 79 return 0;
80} 80}
81 81
82/** 82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry. 83 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: Runtime PM clock entry to destroy. 84 * @ce: PM clock entry to destroy.
85 * 85 *
86 * This routine must be called under the mutex protecting the runtime PM list 86 * This routine must be called under the mutex protecting the PM list of clocks
87 * of clocks corresponding the the @ce's device. 87 * corresponding the the @ce's device.
88 */ 88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) 89static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 90{
91 if (!ce) 91 if (!ce)
92 return; 92 return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
108} 108}
109 109
110/** 110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM. 111 * pm_clk_remove - Stop using a device clock for power management.
112 * @dev: Device whose clock should not be used for runtime PM any more. 112 * @dev: Device whose clock should not be used for PM any more.
113 * @con_id: Connection ID of the clock. 113 * @con_id: Connection ID of the clock.
114 * 114 *
115 * Remove the clock represented by @con_id from the list of clocks used for 115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev. 116 * the power management of @dev.
117 */ 117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id) 118void pm_clk_remove(struct device *dev, const char *con_id)
119{ 119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev); 120 struct pm_clk_data *pcd = __to_pcd(dev);
121 struct pm_clock_entry *ce; 121 struct pm_clock_entry *ce;
122 122
123 if (!prd) 123 if (!pcd)
124 return; 124 return;
125 125
126 mutex_lock(&prd->lock); 126 mutex_lock(&pcd->lock);
127 127
128 list_for_each_entry(ce, &prd->clock_list, node) { 128 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce); 130 __pm_clk_remove(ce);
131 break; 131 break;
132 } else if (!con_id || !ce->con_id) { 132 } else if (!con_id || !ce->con_id) {
133 continue; 133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) { 134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce); 135 __pm_clk_remove(ce);
136 break; 136 break;
137 } 137 }
138 } 138 }
139 139
140 mutex_unlock(&prd->lock); 140 mutex_unlock(&pcd->lock);
141} 141}
142 142
143/** 143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. 144 * pm_clk_init - Initialize a device's list of power management clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for. 145 * @dev: Device to initialize the list of PM clocks for.
146 * 146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and 147 * Allocate a struct pm_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it. 148 * make the @dev's power.subsys_data field point to it.
149 */ 149 */
150int pm_runtime_clk_init(struct device *dev) 150int pm_clk_init(struct device *dev)
151{ 151{
152 struct pm_runtime_clk_data *prd; 152 struct pm_clk_data *pcd;
153 153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL); 154 pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
155 if (!prd) { 155 if (!pcd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n"); 156 dev_err(dev, "Not enough memory for PM clock data.\n");
157 return -ENOMEM; 157 return -ENOMEM;
158 } 158 }
159 159
160 INIT_LIST_HEAD(&prd->clock_list); 160 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&prd->lock); 161 mutex_init(&pcd->lock);
162 dev->power.subsys_data = prd; 162 dev->power.subsys_data = pcd;
163 return 0; 163 return 0;
164} 164}
165 165
166/** 166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. 167 * pm_clk_destroy - Destroy a device's list of power management clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for. 168 * @dev: Device to destroy the list of PM clocks for.
169 * 169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries 170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free 171 * from the struct pm_clk_data object pointed to by it before and free
172 * that object. 172 * that object.
173 */ 173 */
174void pm_runtime_clk_destroy(struct device *dev) 174void pm_clk_destroy(struct device *dev)
175{ 175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev); 176 struct pm_clk_data *pcd = __to_pcd(dev);
177 struct pm_clock_entry *ce, *c; 177 struct pm_clock_entry *ce, *c;
178 178
179 if (!prd) 179 if (!pcd)
180 return; 180 return;
181 181
182 dev->power.subsys_data = NULL; 182 dev->power.subsys_data = NULL;
183 183
184 mutex_lock(&prd->lock); 184 mutex_lock(&pcd->lock);
185 185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_runtime_clk_remove(ce); 187 __pm_clk_remove(ce);
188 188
189 mutex_unlock(&prd->lock); 189 mutex_unlock(&pcd->lock);
190 190
191 kfree(prd); 191 kfree(pcd);
192} 192}
193 193
194#endif /* CONFIG_PM */
195
196#ifdef CONFIG_PM_RUNTIME
197
194/** 198/**
195 * pm_runtime_clk_acquire - Acquire a device clock. 199 * pm_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired. 200 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock. 201 * @con_id: Connection ID of the clock.
198 */ 202 */
199static void pm_runtime_clk_acquire(struct device *dev, 203static void pm_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce) 204 struct pm_clock_entry *ce)
201{ 205{
202 ce->clk = clk_get(dev, ce->con_id); 206 ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
209} 213}
210 214
211/** 215/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. 216 * pm_clk_suspend - Disable clocks in a device's PM clock list.
213 * @dev: Device to disable the clocks for. 217 * @dev: Device to disable the clocks for.
214 */ 218 */
215int pm_runtime_clk_suspend(struct device *dev) 219int pm_clk_suspend(struct device *dev)
216{ 220{
217 struct pm_runtime_clk_data *prd = __to_prd(dev); 221 struct pm_clk_data *pcd = __to_pcd(dev);
218 struct pm_clock_entry *ce; 222 struct pm_clock_entry *ce;
219 223
220 dev_dbg(dev, "%s()\n", __func__); 224 dev_dbg(dev, "%s()\n", __func__);
221 225
222 if (!prd) 226 if (!pcd)
223 return 0; 227 return 0;
224 228
225 mutex_lock(&prd->lock); 229 mutex_lock(&pcd->lock);
226 230
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) { 231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE) 232 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce); 233 pm_clk_acquire(dev, ce);
230 234
231 if (ce->status < PCE_STATUS_ERROR) { 235 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk); 236 clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
234 } 238 }
235 } 239 }
236 240
237 mutex_unlock(&prd->lock); 241 mutex_unlock(&pcd->lock);
238 242
239 return 0; 243 return 0;
240} 244}
241 245
242/** 246/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. 247 * pm_clk_resume - Enable clocks in a device's PM clock list.
244 * @dev: Device to enable the clocks for. 248 * @dev: Device to enable the clocks for.
245 */ 249 */
246int pm_runtime_clk_resume(struct device *dev) 250int pm_clk_resume(struct device *dev)
247{ 251{
248 struct pm_runtime_clk_data *prd = __to_prd(dev); 252 struct pm_clk_data *pcd = __to_pcd(dev);
249 struct pm_clock_entry *ce; 253 struct pm_clock_entry *ce;
250 254
251 dev_dbg(dev, "%s()\n", __func__); 255 dev_dbg(dev, "%s()\n", __func__);
252 256
253 if (!prd) 257 if (!pcd)
254 return 0; 258 return 0;
255 259
256 mutex_lock(&prd->lock); 260 mutex_lock(&pcd->lock);
257 261
258 list_for_each_entry(ce, &prd->clock_list, node) { 262 list_for_each_entry(ce, &pcd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE) 263 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce); 264 pm_clk_acquire(dev, ce);
261 265
262 if (ce->status < PCE_STATUS_ERROR) { 266 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk); 267 clk_enable(ce->clk);
@@ -265,28 +269,28 @@ int pm_runtime_clk_resume(struct device *dev)
265 } 269 }
266 } 270 }
267 271
268 mutex_unlock(&prd->lock); 272 mutex_unlock(&pcd->lock);
269 273
270 return 0; 274 return 0;
271} 275}
272 276
273/** 277/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal. 278 * pm_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of. 279 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller. 280 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for. 281 * @data: Device the routine is being run for.
278 * 282 *
279 * For this function to work, @nb must be a member of an object of type 283 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data. 284 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's 285 * Specifically, the pm_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list 286 * pm_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action. 287 * of PM clocks, depending on @action.
284 * 288 *
285 * If the device's pwr_domain field is already populated with a value different 289 * If the device's pm_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function 290 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing. 291 * does nothing.
288 */ 292 */
289static int pm_runtime_clk_notify(struct notifier_block *nb, 293static int pm_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data) 294 unsigned long action, void *data)
291{ 295{
292 struct pm_clk_notifier_block *clknb; 296 struct pm_clk_notifier_block *clknb;
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
300 304
301 switch (action) { 305 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE: 306 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain) 307 if (dev->pm_domain)
304 break; 308 break;
305 309
306 error = pm_runtime_clk_init(dev); 310 error = pm_clk_init(dev);
307 if (error) 311 if (error)
308 break; 312 break;
309 313
310 dev->pwr_domain = clknb->pwr_domain; 314 dev->pm_domain = clknb->pm_domain;
311 if (clknb->con_ids[0]) { 315 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids; *con_id; con_id++) 316 for (con_id = clknb->con_ids; *con_id; con_id++)
313 pm_runtime_clk_add(dev, *con_id); 317 pm_clk_add(dev, *con_id);
314 } else { 318 } else {
315 pm_runtime_clk_add(dev, NULL); 319 pm_clk_add(dev, NULL);
316 } 320 }
317 321
318 break; 322 break;
319 case BUS_NOTIFY_DEL_DEVICE: 323 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain) 324 if (dev->pm_domain != clknb->pm_domain)
321 break; 325 break;
322 326
323 dev->pwr_domain = NULL; 327 dev->pm_domain = NULL;
324 pm_runtime_clk_destroy(dev); 328 pm_clk_destroy(dev);
325 break; 329 break;
326 } 330 }
327 331
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
330 334
331#else /* !CONFIG_PM_RUNTIME */ 335#else /* !CONFIG_PM_RUNTIME */
332 336
337#ifdef CONFIG_PM
338
339/**
340 * pm_clk_suspend - Disable clocks in a device's PM clock list.
341 * @dev: Device to disable the clocks for.
342 */
343int pm_clk_suspend(struct device *dev)
344{
345 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce;
347
348 dev_dbg(dev, "%s()\n", __func__);
349
350 /* If there is no driver, the clocks are already disabled. */
351 if (!pcd || !dev->driver)
352 return 0;
353
354 mutex_lock(&pcd->lock);
355
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk);
358
359 mutex_unlock(&pcd->lock);
360
361 return 0;
362}
363
364/**
365 * pm_clk_resume - Enable clocks in a device's PM clock list.
366 * @dev: Device to enable the clocks for.
367 */
368int pm_clk_resume(struct device *dev)
369{
370 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce;
372
373 dev_dbg(dev, "%s()\n", __func__);
374
375 /* If there is no driver, the clocks should remain disabled. */
376 if (!pcd || !dev->driver)
377 return 0;
378
379 mutex_lock(&pcd->lock);
380
381 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk);
383
384 mutex_unlock(&pcd->lock);
385
386 return 0;
387}
388
389#endif /* CONFIG_PM */
390
333/** 391/**
334 * enable_clock - Enable a device clock. 392 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled. 393 * @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
365} 423}
366 424
367/** 425/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal. 426 * pm_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of. 427 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller. 428 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for. 429 * @data: Device the routine is being run for.
@@ -375,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
375 * Specifically, the con_ids member of that object is used to enable or disable 433 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action. 434 * the device's clocks, depending on @action.
377 */ 435 */
378static int pm_runtime_clk_notify(struct notifier_block *nb, 436static int pm_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data) 437 unsigned long action, void *data)
380{ 438{
381 struct pm_clk_notifier_block *clknb; 439 struct pm_clk_notifier_block *clknb;
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
411#endif /* !CONFIG_PM_RUNTIME */ 469#endif /* !CONFIG_PM_RUNTIME */
412 470
413/** 471/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. 472 * pm_clk_add_notifier - Add bus type notifier for power management clocks.
415 * @bus: Bus type to add the notifier to. 473 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type. 474 * @clknb: Notifier to be added to the given bus type.
417 * 475 *
418 * The nb member of @clknb is not expected to be initialized and its 476 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However, 477 * notifier_call member will be replaced with pm_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this 478 * the remaining members of @clknb should be populated prior to calling this
421 * routine. 479 * routine.
422 */ 480 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus, 481void pm_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb) 482 struct pm_clk_notifier_block *clknb)
425{ 483{
426 if (!bus || !clknb) 484 if (!bus || !clknb)
427 return; 485 return;
428 486
429 clknb->nb.notifier_call = pm_runtime_clk_notify; 487 clknb->nb.notifier_call = pm_clk_notify;
430 bus_register_notifier(bus, &clknb->nb); 488 bus_register_notifier(bus, &clknb->nb);
431} 489}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000000..be8714aa9dd6
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1273 @@
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/sched.h>
17#include <linux/suspend.h>
18
19static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock);
21
22#ifdef CONFIG_PM
23
24static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
28
29 return pd_to_genpd(dev->pm_domain);
30}
31
32static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{
34 if (!WARN_ON(genpd->sd_count == 0))
35 genpd->sd_count--;
36}
37
38static void genpd_acquire_lock(struct generic_pm_domain *genpd)
39{
40 DEFINE_WAIT(wait);
41
42 mutex_lock(&genpd->lock);
43 /*
44 * Wait for the domain to transition into either the active,
45 * or the power off state.
46 */
47 for (;;) {
48 prepare_to_wait(&genpd->status_wait_queue, &wait,
49 TASK_UNINTERRUPTIBLE);
50 if (genpd->status == GPD_STATE_ACTIVE
51 || genpd->status == GPD_STATE_POWER_OFF)
52 break;
53 mutex_unlock(&genpd->lock);
54
55 schedule();
56
57 mutex_lock(&genpd->lock);
58 }
59 finish_wait(&genpd->status_wait_queue, &wait);
60}
61
62static void genpd_release_lock(struct generic_pm_domain *genpd)
63{
64 mutex_unlock(&genpd->lock);
65}
66
67static void genpd_set_active(struct generic_pm_domain *genpd)
68{
69 if (genpd->resume_count == 0)
70 genpd->status = GPD_STATE_ACTIVE;
71}
72
73/**
74 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
75 * @genpd: PM domain to power up.
76 *
77 * Restore power to @genpd and all of its parents so that it is possible to
78 * resume a device belonging to it.
79 */
80int pm_genpd_poweron(struct generic_pm_domain *genpd)
81{
82 struct generic_pm_domain *parent = genpd->parent;
83 DEFINE_WAIT(wait);
84 int ret = 0;
85
86 start:
87 if (parent) {
88 genpd_acquire_lock(parent);
89 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
90 } else {
91 mutex_lock(&genpd->lock);
92 }
93
94 if (genpd->status == GPD_STATE_ACTIVE
95 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
96 goto out;
97
98 if (genpd->status != GPD_STATE_POWER_OFF) {
99 genpd_set_active(genpd);
100 goto out;
101 }
102
103 if (parent && parent->status != GPD_STATE_ACTIVE) {
104 mutex_unlock(&genpd->lock);
105 genpd_release_lock(parent);
106
107 ret = pm_genpd_poweron(parent);
108 if (ret)
109 return ret;
110
111 goto start;
112 }
113
114 if (genpd->power_on) {
115 int ret = genpd->power_on(genpd);
116 if (ret)
117 goto out;
118 }
119
120 genpd_set_active(genpd);
121 if (parent)
122 parent->sd_count++;
123
124 out:
125 mutex_unlock(&genpd->lock);
126 if (parent)
127 genpd_release_lock(parent);
128
129 return ret;
130}
131
132#endif /* CONFIG_PM */
133
134#ifdef CONFIG_PM_RUNTIME
135
136/**
137 * __pm_genpd_save_device - Save the pre-suspend state of a device.
138 * @dle: Device list entry of the device to save the state of.
139 * @genpd: PM domain the device belongs to.
140 */
141static int __pm_genpd_save_device(struct dev_list_entry *dle,
142 struct generic_pm_domain *genpd)
143 __releases(&genpd->lock) __acquires(&genpd->lock)
144{
145 struct device *dev = dle->dev;
146 struct device_driver *drv = dev->driver;
147 int ret = 0;
148
149 if (dle->need_restore)
150 return 0;
151
152 mutex_unlock(&genpd->lock);
153
154 if (drv && drv->pm && drv->pm->runtime_suspend) {
155 if (genpd->start_device)
156 genpd->start_device(dev);
157
158 ret = drv->pm->runtime_suspend(dev);
159
160 if (genpd->stop_device)
161 genpd->stop_device(dev);
162 }
163
164 mutex_lock(&genpd->lock);
165
166 if (!ret)
167 dle->need_restore = true;
168
169 return ret;
170}
171
172/**
173 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
174 * @dle: Device list entry of the device to restore the state of.
175 * @genpd: PM domain the device belongs to.
176 */
177static void __pm_genpd_restore_device(struct dev_list_entry *dle,
178 struct generic_pm_domain *genpd)
179 __releases(&genpd->lock) __acquires(&genpd->lock)
180{
181 struct device *dev = dle->dev;
182 struct device_driver *drv = dev->driver;
183
184 if (!dle->need_restore)
185 return;
186
187 mutex_unlock(&genpd->lock);
188
189 if (drv && drv->pm && drv->pm->runtime_resume) {
190 if (genpd->start_device)
191 genpd->start_device(dev);
192
193 drv->pm->runtime_resume(dev);
194
195 if (genpd->stop_device)
196 genpd->stop_device(dev);
197 }
198
199 mutex_lock(&genpd->lock);
200
201 dle->need_restore = false;
202}
203
204/**
205 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
206 * @genpd: PM domain to check.
207 *
208 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
209 * a "power off" operation, which means that a "power on" has occured in the
210 * meantime, or if its resume_count field is different from zero, which means
211 * that one of its devices has been resumed in the meantime.
212 */
213static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
214{
215 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
216}
217
218/**
219 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
220 * @genpd: PM domait to power off.
221 *
222 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
223 * before.
224 */
225void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
226{
227 if (!work_pending(&genpd->power_off_work))
228 queue_work(pm_wq, &genpd->power_off_work);
229}
230
231/**
232 * pm_genpd_poweroff - Remove power from a given PM domain.
233 * @genpd: PM domain to power down.
234 *
235 * If all of the @genpd's devices have been suspended and all of its subdomains
236 * have been powered down, run the runtime suspend callbacks provided by all of
237 * the @genpd's devices' drivers and remove power from @genpd.
238 */
239static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
240 __releases(&genpd->lock) __acquires(&genpd->lock)
241{
242 struct generic_pm_domain *parent;
243 struct dev_list_entry *dle;
244 unsigned int not_suspended;
245 int ret = 0;
246
247 start:
248 /*
249 * Do not try to power off the domain in the following situations:
250 * (1) The domain is already in the "power off" state.
251 * (2) System suspend is in progress.
252 * (3) One of the domain's devices is being resumed right now.
253 */
254 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
255 || genpd->resume_count > 0)
256 return 0;
257
258 if (genpd->sd_count > 0)
259 return -EBUSY;
260
261 not_suspended = 0;
262 list_for_each_entry(dle, &genpd->dev_list, node)
263 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
264 not_suspended++;
265
266 if (not_suspended > genpd->in_progress)
267 return -EBUSY;
268
269 if (genpd->poweroff_task) {
270 /*
271 * Another instance of pm_genpd_poweroff() is executing
272 * callbacks, so tell it to start over and return.
273 */
274 genpd->status = GPD_STATE_REPEAT;
275 return 0;
276 }
277
278 if (genpd->gov && genpd->gov->power_down_ok) {
279 if (!genpd->gov->power_down_ok(&genpd->domain))
280 return -EAGAIN;
281 }
282
283 genpd->status = GPD_STATE_BUSY;
284 genpd->poweroff_task = current;
285
286 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
287 ret = __pm_genpd_save_device(dle, genpd);
288 if (ret) {
289 genpd_set_active(genpd);
290 goto out;
291 }
292
293 if (genpd_abort_poweroff(genpd))
294 goto out;
295
296 if (genpd->status == GPD_STATE_REPEAT) {
297 genpd->poweroff_task = NULL;
298 goto start;
299 }
300 }
301
302 parent = genpd->parent;
303 if (parent) {
304 mutex_unlock(&genpd->lock);
305
306 genpd_acquire_lock(parent);
307 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
308
309 if (genpd_abort_poweroff(genpd)) {
310 genpd_release_lock(parent);
311 goto out;
312 }
313 }
314
315 if (genpd->power_off) {
316 ret = genpd->power_off(genpd);
317 if (ret == -EBUSY) {
318 genpd_set_active(genpd);
319 if (parent)
320 genpd_release_lock(parent);
321
322 goto out;
323 }
324 }
325
326 genpd->status = GPD_STATE_POWER_OFF;
327
328 if (parent) {
329 genpd_sd_counter_dec(parent);
330 if (parent->sd_count == 0)
331 genpd_queue_power_off_work(parent);
332
333 genpd_release_lock(parent);
334 }
335
336 out:
337 genpd->poweroff_task = NULL;
338 wake_up_all(&genpd->status_wait_queue);
339 return ret;
340}
341
342/**
343 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
344 * @work: Work structure used for scheduling the execution of this function.
345 */
346static void genpd_power_off_work_fn(struct work_struct *work)
347{
348 struct generic_pm_domain *genpd;
349
350 genpd = container_of(work, struct generic_pm_domain, power_off_work);
351
352 genpd_acquire_lock(genpd);
353 pm_genpd_poweroff(genpd);
354 genpd_release_lock(genpd);
355}
356
357/**
358 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
359 * @dev: Device to suspend.
360 *
361 * Carry out a runtime suspend of a device under the assumption that its
362 * pm_domain field points to the domain member of an object of type
363 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
364 */
365static int pm_genpd_runtime_suspend(struct device *dev)
366{
367 struct generic_pm_domain *genpd;
368
369 dev_dbg(dev, "%s()\n", __func__);
370
371 genpd = dev_to_genpd(dev);
372 if (IS_ERR(genpd))
373 return -EINVAL;
374
375 if (genpd->stop_device) {
376 int ret = genpd->stop_device(dev);
377 if (ret)
378 return ret;
379 }
380
381 mutex_lock(&genpd->lock);
382 genpd->in_progress++;
383 pm_genpd_poweroff(genpd);
384 genpd->in_progress--;
385 mutex_unlock(&genpd->lock);
386
387 return 0;
388}
389
390/**
391 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
392 * @dev: Device to resume.
393 * @genpd: PM domain the device belongs to.
394 */
395static void __pm_genpd_runtime_resume(struct device *dev,
396 struct generic_pm_domain *genpd)
397{
398 struct dev_list_entry *dle;
399
400 list_for_each_entry(dle, &genpd->dev_list, node) {
401 if (dle->dev == dev) {
402 __pm_genpd_restore_device(dle, genpd);
403 break;
404 }
405 }
406}
407
408/**
409 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
410 * @dev: Device to resume.
411 *
412 * Carry out a runtime resume of a device under the assumption that its
413 * pm_domain field points to the domain member of an object of type
414 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
415 */
416static int pm_genpd_runtime_resume(struct device *dev)
417{
418 struct generic_pm_domain *genpd;
419 DEFINE_WAIT(wait);
420 int ret;
421
422 dev_dbg(dev, "%s()\n", __func__);
423
424 genpd = dev_to_genpd(dev);
425 if (IS_ERR(genpd))
426 return -EINVAL;
427
428 ret = pm_genpd_poweron(genpd);
429 if (ret)
430 return ret;
431
432 mutex_lock(&genpd->lock);
433 genpd->status = GPD_STATE_BUSY;
434 genpd->resume_count++;
435 for (;;) {
436 prepare_to_wait(&genpd->status_wait_queue, &wait,
437 TASK_UNINTERRUPTIBLE);
438 /*
439 * If current is the powering off task, we have been called
440 * reentrantly from one of the device callbacks, so we should
441 * not wait.
442 */
443 if (!genpd->poweroff_task || genpd->poweroff_task == current)
444 break;
445 mutex_unlock(&genpd->lock);
446
447 schedule();
448
449 mutex_lock(&genpd->lock);
450 }
451 finish_wait(&genpd->status_wait_queue, &wait);
452 __pm_genpd_runtime_resume(dev, genpd);
453 genpd->resume_count--;
454 genpd_set_active(genpd);
455 wake_up_all(&genpd->status_wait_queue);
456 mutex_unlock(&genpd->lock);
457
458 if (genpd->start_device)
459 genpd->start_device(dev);
460
461 return 0;
462}
463
464#else
465
466static inline void genpd_power_off_work_fn(struct work_struct *work) {}
467static inline void __pm_genpd_runtime_resume(struct device *dev,
468 struct generic_pm_domain *genpd) {}
469
470#define pm_genpd_runtime_suspend NULL
471#define pm_genpd_runtime_resume NULL
472
473#endif /* CONFIG_PM_RUNTIME */
474
475#ifdef CONFIG_PM_SLEEP
476
477/**
478 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
479 * @genpd: PM domain to power off, if possible.
480 *
481 * Check if the given PM domain can be powered off (during system suspend or
482 * hibernation) and do that if so. Also, in that case propagate to its parent.
483 *
484 * This function is only called in "noirq" stages of system power transitions,
485 * so it need not acquire locks (all of the "noirq" callbacks are executed
486 * sequentially, so it is guaranteed that it will never run twice in parallel).
487 */
488static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
489{
490 struct generic_pm_domain *parent = genpd->parent;
491
492 if (genpd->status == GPD_STATE_POWER_OFF)
493 return;
494
495 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
496 return;
497
498 if (genpd->power_off)
499 genpd->power_off(genpd);
500
501 genpd->status = GPD_STATE_POWER_OFF;
502 if (parent) {
503 genpd_sd_counter_dec(parent);
504 pm_genpd_sync_poweroff(parent);
505 }
506}
507
508/**
509 * resume_needed - Check whether to resume a device before system suspend.
510 * @dev: Device to check.
511 * @genpd: PM domain the device belongs to.
512 *
513 * There are two cases in which a device that can wake up the system from sleep
514 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
515 * to wake up the system and it has to remain active for this purpose while the
516 * system is in the sleep state and (2) if the device is not enabled to wake up
517 * the system from sleep states and it generally doesn't generate wakeup signals
518 * by itself (those signals are generated on its behalf by other parts of the
519 * system). In the latter case it may be necessary to reconfigure the device's
520 * wakeup settings during system suspend, because it may have been set up to
521 * signal remote wakeup from the system's working state as needed by runtime PM.
522 * Return 'true' in either of the above cases.
523 */
524static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
525{
526 bool active_wakeup;
527
528 if (!device_can_wakeup(dev))
529 return false;
530
531 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
532 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
533}
534
535/**
536 * pm_genpd_prepare - Start power transition of a device in a PM domain.
537 * @dev: Device to start the transition of.
538 *
539 * Start a power transition of a device (during a system-wide power transition)
540 * under the assumption that its pm_domain field points to the domain member of
541 * an object of type struct generic_pm_domain representing a PM domain
542 * consisting of I/O devices.
543 */
544static int pm_genpd_prepare(struct device *dev)
545{
546 struct generic_pm_domain *genpd;
547 int ret;
548
549 dev_dbg(dev, "%s()\n", __func__);
550
551 genpd = dev_to_genpd(dev);
552 if (IS_ERR(genpd))
553 return -EINVAL;
554
555 /*
556 * If a wakeup request is pending for the device, it should be woken up
557 * at this point and a system wakeup event should be reported if it's
558 * set up to wake up the system from sleep states.
559 */
560 pm_runtime_get_noresume(dev);
561 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
562 pm_wakeup_event(dev, 0);
563
564 if (pm_wakeup_pending()) {
565 pm_runtime_put_sync(dev);
566 return -EBUSY;
567 }
568
569 if (resume_needed(dev, genpd))
570 pm_runtime_resume(dev);
571
572 genpd_acquire_lock(genpd);
573
574 if (genpd->prepared_count++ == 0)
575 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
576
577 genpd_release_lock(genpd);
578
579 if (genpd->suspend_power_off) {
580 pm_runtime_put_noidle(dev);
581 return 0;
582 }
583
584 /*
585 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
586 * so pm_genpd_poweron() will return immediately, but if the device
587 * is suspended (e.g. it's been stopped by .stop_device()), we need
588 * to make it operational.
589 */
590 pm_runtime_resume(dev);
591 __pm_runtime_disable(dev, false);
592
593 ret = pm_generic_prepare(dev);
594 if (ret) {
595 mutex_lock(&genpd->lock);
596
597 if (--genpd->prepared_count == 0)
598 genpd->suspend_power_off = false;
599
600 mutex_unlock(&genpd->lock);
601 pm_runtime_enable(dev);
602 }
603
604 pm_runtime_put_sync(dev);
605 return ret;
606}
607
608/**
609 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
610 * @dev: Device to suspend.
611 *
612 * Suspend a device under the assumption that its pm_domain field points to the
613 * domain member of an object of type struct generic_pm_domain representing
614 * a PM domain consisting of I/O devices.
615 */
616static int pm_genpd_suspend(struct device *dev)
617{
618 struct generic_pm_domain *genpd;
619
620 dev_dbg(dev, "%s()\n", __func__);
621
622 genpd = dev_to_genpd(dev);
623 if (IS_ERR(genpd))
624 return -EINVAL;
625
626 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
627}
628
629/**
630 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
631 * @dev: Device to suspend.
632 *
633 * Carry out a late suspend of a device under the assumption that its
634 * pm_domain field points to the domain member of an object of type
635 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
636 */
637static int pm_genpd_suspend_noirq(struct device *dev)
638{
639 struct generic_pm_domain *genpd;
640 int ret;
641
642 dev_dbg(dev, "%s()\n", __func__);
643
644 genpd = dev_to_genpd(dev);
645 if (IS_ERR(genpd))
646 return -EINVAL;
647
648 if (genpd->suspend_power_off)
649 return 0;
650
651 ret = pm_generic_suspend_noirq(dev);
652 if (ret)
653 return ret;
654
655 if (device_may_wakeup(dev)
656 && genpd->active_wakeup && genpd->active_wakeup(dev))
657 return 0;
658
659 if (genpd->stop_device)
660 genpd->stop_device(dev);
661
662 /*
663 * Since all of the "noirq" callbacks are executed sequentially, it is
664 * guaranteed that this function will never run twice in parallel for
665 * the same PM domain, so it is not necessary to use locking here.
666 */
667 genpd->suspended_count++;
668 pm_genpd_sync_poweroff(genpd);
669
670 return 0;
671}
672
673/**
674 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
675 * @dev: Device to resume.
676 *
677 * Carry out an early resume of a device under the assumption that its
678 * pm_domain field points to the domain member of an object of type
679 * struct generic_pm_domain representing a power domain consisting of I/O
680 * devices.
681 */
682static int pm_genpd_resume_noirq(struct device *dev)
683{
684 struct generic_pm_domain *genpd;
685
686 dev_dbg(dev, "%s()\n", __func__);
687
688 genpd = dev_to_genpd(dev);
689 if (IS_ERR(genpd))
690 return -EINVAL;
691
692 if (genpd->suspend_power_off)
693 return 0;
694
695 /*
696 * Since all of the "noirq" callbacks are executed sequentially, it is
697 * guaranteed that this function will never run twice in parallel for
698 * the same PM domain, so it is not necessary to use locking here.
699 */
700 pm_genpd_poweron(genpd);
701 genpd->suspended_count--;
702 if (genpd->start_device)
703 genpd->start_device(dev);
704
705 return pm_generic_resume_noirq(dev);
706}
707
708/**
709 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
710 * @dev: Device to resume.
711 *
712 * Resume a device under the assumption that its pm_domain field points to the
713 * domain member of an object of type struct generic_pm_domain representing
714 * a power domain consisting of I/O devices.
715 */
716static int pm_genpd_resume(struct device *dev)
717{
718 struct generic_pm_domain *genpd;
719
720 dev_dbg(dev, "%s()\n", __func__);
721
722 genpd = dev_to_genpd(dev);
723 if (IS_ERR(genpd))
724 return -EINVAL;
725
726 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
727}
728
729/**
730 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
731 * @dev: Device to freeze.
732 *
733 * Freeze a device under the assumption that its pm_domain field points to the
734 * domain member of an object of type struct generic_pm_domain representing
735 * a power domain consisting of I/O devices.
736 */
737static int pm_genpd_freeze(struct device *dev)
738{
739 struct generic_pm_domain *genpd;
740
741 dev_dbg(dev, "%s()\n", __func__);
742
743 genpd = dev_to_genpd(dev);
744 if (IS_ERR(genpd))
745 return -EINVAL;
746
747 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
748}
749
750/**
751 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
752 * @dev: Device to freeze.
753 *
754 * Carry out a late freeze of a device under the assumption that its
755 * pm_domain field points to the domain member of an object of type
756 * struct generic_pm_domain representing a power domain consisting of I/O
757 * devices.
758 */
759static int pm_genpd_freeze_noirq(struct device *dev)
760{
761 struct generic_pm_domain *genpd;
762 int ret;
763
764 dev_dbg(dev, "%s()\n", __func__);
765
766 genpd = dev_to_genpd(dev);
767 if (IS_ERR(genpd))
768 return -EINVAL;
769
770 if (genpd->suspend_power_off)
771 return 0;
772
773 ret = pm_generic_freeze_noirq(dev);
774 if (ret)
775 return ret;
776
777 if (genpd->stop_device)
778 genpd->stop_device(dev);
779
780 return 0;
781}
782
783/**
784 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
785 * @dev: Device to thaw.
786 *
787 * Carry out an early thaw of a device under the assumption that its
788 * pm_domain field points to the domain member of an object of type
789 * struct generic_pm_domain representing a power domain consisting of I/O
790 * devices.
791 */
792static int pm_genpd_thaw_noirq(struct device *dev)
793{
794 struct generic_pm_domain *genpd;
795
796 dev_dbg(dev, "%s()\n", __func__);
797
798 genpd = dev_to_genpd(dev);
799 if (IS_ERR(genpd))
800 return -EINVAL;
801
802 if (genpd->suspend_power_off)
803 return 0;
804
805 if (genpd->start_device)
806 genpd->start_device(dev);
807
808 return pm_generic_thaw_noirq(dev);
809}
810
811/**
812 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
813 * @dev: Device to thaw.
814 *
815 * Thaw a device under the assumption that its pm_domain field points to the
816 * domain member of an object of type struct generic_pm_domain representing
817 * a power domain consisting of I/O devices.
818 */
819static int pm_genpd_thaw(struct device *dev)
820{
821 struct generic_pm_domain *genpd;
822
823 dev_dbg(dev, "%s()\n", __func__);
824
825 genpd = dev_to_genpd(dev);
826 if (IS_ERR(genpd))
827 return -EINVAL;
828
829 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
830}
831
832/**
833 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
834 * @dev: Device to suspend.
835 *
836 * Power off a device under the assumption that its pm_domain field points to
837 * the domain member of an object of type struct generic_pm_domain representing
838 * a PM domain consisting of I/O devices.
839 */
840static int pm_genpd_dev_poweroff(struct device *dev)
841{
842 struct generic_pm_domain *genpd;
843
844 dev_dbg(dev, "%s()\n", __func__);
845
846 genpd = dev_to_genpd(dev);
847 if (IS_ERR(genpd))
848 return -EINVAL;
849
850 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
851}
852
853/**
854 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
855 * @dev: Device to suspend.
856 *
857 * Carry out a late powering off of a device under the assumption that its
858 * pm_domain field points to the domain member of an object of type
859 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
860 */
861static int pm_genpd_dev_poweroff_noirq(struct device *dev)
862{
863 struct generic_pm_domain *genpd;
864 int ret;
865
866 dev_dbg(dev, "%s()\n", __func__);
867
868 genpd = dev_to_genpd(dev);
869 if (IS_ERR(genpd))
870 return -EINVAL;
871
872 if (genpd->suspend_power_off)
873 return 0;
874
875 ret = pm_generic_poweroff_noirq(dev);
876 if (ret)
877 return ret;
878
879 if (device_may_wakeup(dev)
880 && genpd->active_wakeup && genpd->active_wakeup(dev))
881 return 0;
882
883 if (genpd->stop_device)
884 genpd->stop_device(dev);
885
886 /*
887 * Since all of the "noirq" callbacks are executed sequentially, it is
888 * guaranteed that this function will never run twice in parallel for
889 * the same PM domain, so it is not necessary to use locking here.
890 */
891 genpd->suspended_count++;
892 pm_genpd_sync_poweroff(genpd);
893
894 return 0;
895}
896
897/**
898 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
899 * @dev: Device to resume.
900 *
901 * Carry out an early restore of a device under the assumption that its
902 * pm_domain field points to the domain member of an object of type
903 * struct generic_pm_domain representing a power domain consisting of I/O
904 * devices.
905 */
906static int pm_genpd_restore_noirq(struct device *dev)
907{
908 struct generic_pm_domain *genpd;
909
910 dev_dbg(dev, "%s()\n", __func__);
911
912 genpd = dev_to_genpd(dev);
913 if (IS_ERR(genpd))
914 return -EINVAL;
915
916 /*
917 * Since all of the "noirq" callbacks are executed sequentially, it is
918 * guaranteed that this function will never run twice in parallel for
919 * the same PM domain, so it is not necessary to use locking here.
920 */
921 genpd->status = GPD_STATE_POWER_OFF;
922 if (genpd->suspend_power_off) {
923 /*
924 * The boot kernel might put the domain into the power on state,
925 * so make sure it really is powered off.
926 */
927 if (genpd->power_off)
928 genpd->power_off(genpd);
929 return 0;
930 }
931
932 pm_genpd_poweron(genpd);
933 genpd->suspended_count--;
934 if (genpd->start_device)
935 genpd->start_device(dev);
936
937 return pm_generic_restore_noirq(dev);
938}
939
940/**
941 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
942 * @dev: Device to resume.
943 *
944 * Restore a device under the assumption that its pm_domain field points to the
945 * domain member of an object of type struct generic_pm_domain representing
946 * a power domain consisting of I/O devices.
947 */
948static int pm_genpd_restore(struct device *dev)
949{
950 struct generic_pm_domain *genpd;
951
952 dev_dbg(dev, "%s()\n", __func__);
953
954 genpd = dev_to_genpd(dev);
955 if (IS_ERR(genpd))
956 return -EINVAL;
957
958 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
959}
960
961/**
962 * pm_genpd_complete - Complete power transition of a device in a power domain.
963 * @dev: Device to complete the transition of.
964 *
965 * Complete a power transition of a device (during a system-wide power
966 * transition) under the assumption that its pm_domain field points to the
967 * domain member of an object of type struct generic_pm_domain representing
968 * a power domain consisting of I/O devices.
969 */
970static void pm_genpd_complete(struct device *dev)
971{
972 struct generic_pm_domain *genpd;
973 bool run_complete;
974
975 dev_dbg(dev, "%s()\n", __func__);
976
977 genpd = dev_to_genpd(dev);
978 if (IS_ERR(genpd))
979 return;
980
981 mutex_lock(&genpd->lock);
982
983 run_complete = !genpd->suspend_power_off;
984 if (--genpd->prepared_count == 0)
985 genpd->suspend_power_off = false;
986
987 mutex_unlock(&genpd->lock);
988
989 if (run_complete) {
990 pm_generic_complete(dev);
991 pm_runtime_set_active(dev);
992 pm_runtime_enable(dev);
993 pm_runtime_idle(dev);
994 }
995}
996
997#else
998
999#define pm_genpd_prepare NULL
1000#define pm_genpd_suspend NULL
1001#define pm_genpd_suspend_noirq NULL
1002#define pm_genpd_resume_noirq NULL
1003#define pm_genpd_resume NULL
1004#define pm_genpd_freeze NULL
1005#define pm_genpd_freeze_noirq NULL
1006#define pm_genpd_thaw_noirq NULL
1007#define pm_genpd_thaw NULL
1008#define pm_genpd_dev_poweroff_noirq NULL
1009#define pm_genpd_dev_poweroff NULL
1010#define pm_genpd_restore_noirq NULL
1011#define pm_genpd_restore NULL
1012#define pm_genpd_complete NULL
1013
1014#endif /* CONFIG_PM_SLEEP */
1015
1016/**
1017 * pm_genpd_add_device - Add a device to an I/O PM domain.
1018 * @genpd: PM domain to add the device to.
1019 * @dev: Device to be added.
1020 */
1021int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1022{
1023 struct dev_list_entry *dle;
1024 int ret = 0;
1025
1026 dev_dbg(dev, "%s()\n", __func__);
1027
1028 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1029 return -EINVAL;
1030
1031 genpd_acquire_lock(genpd);
1032
1033 if (genpd->status == GPD_STATE_POWER_OFF) {
1034 ret = -EINVAL;
1035 goto out;
1036 }
1037
1038 if (genpd->prepared_count > 0) {
1039 ret = -EAGAIN;
1040 goto out;
1041 }
1042
1043 list_for_each_entry(dle, &genpd->dev_list, node)
1044 if (dle->dev == dev) {
1045 ret = -EINVAL;
1046 goto out;
1047 }
1048
1049 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1050 if (!dle) {
1051 ret = -ENOMEM;
1052 goto out;
1053 }
1054
1055 dle->dev = dev;
1056 dle->need_restore = false;
1057 list_add_tail(&dle->node, &genpd->dev_list);
1058 genpd->device_count++;
1059
1060 spin_lock_irq(&dev->power.lock);
1061 dev->pm_domain = &genpd->domain;
1062 spin_unlock_irq(&dev->power.lock);
1063
1064 out:
1065 genpd_release_lock(genpd);
1066
1067 return ret;
1068}
1069
1070/**
1071 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1072 * @genpd: PM domain to remove the device from.
1073 * @dev: Device to be removed.
1074 */
1075int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1076 struct device *dev)
1077{
1078 struct dev_list_entry *dle;
1079 int ret = -EINVAL;
1080
1081 dev_dbg(dev, "%s()\n", __func__);
1082
1083 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1084 return -EINVAL;
1085
1086 genpd_acquire_lock(genpd);
1087
1088 if (genpd->prepared_count > 0) {
1089 ret = -EAGAIN;
1090 goto out;
1091 }
1092
1093 list_for_each_entry(dle, &genpd->dev_list, node) {
1094 if (dle->dev != dev)
1095 continue;
1096
1097 spin_lock_irq(&dev->power.lock);
1098 dev->pm_domain = NULL;
1099 spin_unlock_irq(&dev->power.lock);
1100
1101 genpd->device_count--;
1102 list_del(&dle->node);
1103 kfree(dle);
1104
1105 ret = 0;
1106 break;
1107 }
1108
1109 out:
1110 genpd_release_lock(genpd);
1111
1112 return ret;
1113}
1114
1115/**
1116 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1117 * @genpd: Master PM domain to add the subdomain to.
1118 * @new_subdomain: Subdomain to be added.
1119 */
1120int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1121 struct generic_pm_domain *new_subdomain)
1122{
1123 struct generic_pm_domain *subdomain;
1124 int ret = 0;
1125
1126 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1127 return -EINVAL;
1128
1129 start:
1130 genpd_acquire_lock(genpd);
1131 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
1132
1133 if (new_subdomain->status != GPD_STATE_POWER_OFF
1134 && new_subdomain->status != GPD_STATE_ACTIVE) {
1135 mutex_unlock(&new_subdomain->lock);
1136 genpd_release_lock(genpd);
1137 goto start;
1138 }
1139
1140 if (genpd->status == GPD_STATE_POWER_OFF
1141 && new_subdomain->status != GPD_STATE_POWER_OFF) {
1142 ret = -EINVAL;
1143 goto out;
1144 }
1145
1146 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1147 if (subdomain == new_subdomain) {
1148 ret = -EINVAL;
1149 goto out;
1150 }
1151 }
1152
1153 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1154 new_subdomain->parent = genpd;
1155 if (subdomain->status != GPD_STATE_POWER_OFF)
1156 genpd->sd_count++;
1157
1158 out:
1159 mutex_unlock(&new_subdomain->lock);
1160 genpd_release_lock(genpd);
1161
1162 return ret;
1163}
1164
1165/**
1166 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1167 * @genpd: Master PM domain to remove the subdomain from.
1168 * @target: Subdomain to be removed.
1169 */
1170int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1171 struct generic_pm_domain *target)
1172{
1173 struct generic_pm_domain *subdomain;
1174 int ret = -EINVAL;
1175
1176 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1177 return -EINVAL;
1178
1179 start:
1180 genpd_acquire_lock(genpd);
1181
1182 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1183 if (subdomain != target)
1184 continue;
1185
1186 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1187
1188 if (subdomain->status != GPD_STATE_POWER_OFF
1189 && subdomain->status != GPD_STATE_ACTIVE) {
1190 mutex_unlock(&subdomain->lock);
1191 genpd_release_lock(genpd);
1192 goto start;
1193 }
1194
1195 list_del(&subdomain->sd_node);
1196 subdomain->parent = NULL;
1197 if (subdomain->status != GPD_STATE_POWER_OFF)
1198 genpd_sd_counter_dec(genpd);
1199
1200 mutex_unlock(&subdomain->lock);
1201
1202 ret = 0;
1203 break;
1204 }
1205
1206 genpd_release_lock(genpd);
1207
1208 return ret;
1209}
1210
1211/**
1212 * pm_genpd_init - Initialize a generic I/O PM domain object.
1213 * @genpd: PM domain object to initialize.
1214 * @gov: PM domain governor to associate with the domain (may be NULL).
1215 * @is_off: Initial value of the domain's power_is_off field.
1216 */
1217void pm_genpd_init(struct generic_pm_domain *genpd,
1218 struct dev_power_governor *gov, bool is_off)
1219{
1220 if (IS_ERR_OR_NULL(genpd))
1221 return;
1222
1223 INIT_LIST_HEAD(&genpd->sd_node);
1224 genpd->parent = NULL;
1225 INIT_LIST_HEAD(&genpd->dev_list);
1226 INIT_LIST_HEAD(&genpd->sd_list);
1227 mutex_init(&genpd->lock);
1228 genpd->gov = gov;
1229 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1230 genpd->in_progress = 0;
1231 genpd->sd_count = 0;
1232 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1233 init_waitqueue_head(&genpd->status_wait_queue);
1234 genpd->poweroff_task = NULL;
1235 genpd->resume_count = 0;
1236 genpd->device_count = 0;
1237 genpd->suspended_count = 0;
1238 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1239 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1240 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1241 genpd->domain.ops.prepare = pm_genpd_prepare;
1242 genpd->domain.ops.suspend = pm_genpd_suspend;
1243 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1244 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1245 genpd->domain.ops.resume = pm_genpd_resume;
1246 genpd->domain.ops.freeze = pm_genpd_freeze;
1247 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1248 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1249 genpd->domain.ops.thaw = pm_genpd_thaw;
1250 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1251 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1252 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1253 genpd->domain.ops.restore = pm_genpd_restore;
1254 genpd->domain.ops.complete = pm_genpd_complete;
1255 mutex_lock(&gpd_list_lock);
1256 list_add(&genpd->gpd_list_node, &gpd_list);
1257 mutex_unlock(&gpd_list_lock);
1258}
1259
1260/**
1261 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1262 */
1263void pm_genpd_poweroff_unused(void)
1264{
1265 struct generic_pm_domain *genpd;
1266
1267 mutex_lock(&gpd_list_lock);
1268
1269 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1270 genpd_queue_power_off_work(genpd);
1271
1272 mutex_unlock(&gpd_list_lock);
1273}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681c..9508df71274b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
95 * @dev: Device to handle. 95 * @dev: Device to handle.
96 * @event: PM transition of the system under way. 96 * @event: PM transition of the system under way.
97 * @bool: Whether or not this is the "noirq" stage.
97 * 98 *
98 * If the device has not been suspended at run time, execute the 99 * If the device has not been suspended at run time, execute the
99 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and 100 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
100 * return its error code. Otherwise, return zero. 101 * return its error code. Otherwise, return zero.
101 */ 102 */
102static int __pm_generic_call(struct device *dev, int event) 103static int __pm_generic_call(struct device *dev, int event, bool noirq)
103{ 104{
104 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 105 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
105 int (*callback)(struct device *); 106 int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
109 110
110 switch (event) { 111 switch (event) {
111 case PM_EVENT_SUSPEND: 112 case PM_EVENT_SUSPEND:
112 callback = pm->suspend; 113 callback = noirq ? pm->suspend_noirq : pm->suspend;
113 break; 114 break;
114 case PM_EVENT_FREEZE: 115 case PM_EVENT_FREEZE:
115 callback = pm->freeze; 116 callback = noirq ? pm->freeze_noirq : pm->freeze;
116 break; 117 break;
117 case PM_EVENT_HIBERNATE: 118 case PM_EVENT_HIBERNATE:
118 callback = pm->poweroff; 119 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
119 break; 120 break;
120 case PM_EVENT_THAW: 121 case PM_EVENT_THAW:
121 callback = pm->thaw; 122 callback = noirq ? pm->thaw_noirq : pm->thaw;
122 break; 123 break;
123 default: 124 default:
124 callback = NULL; 125 callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
129} 130}
130 131
131/** 132/**
133 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
134 * @dev: Device to suspend.
135 */
136int pm_generic_suspend_noirq(struct device *dev)
137{
138 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
139}
140EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
141
142/**
132 * pm_generic_suspend - Generic suspend callback for subsystems. 143 * pm_generic_suspend - Generic suspend callback for subsystems.
133 * @dev: Device to suspend. 144 * @dev: Device to suspend.
134 */ 145 */
135int pm_generic_suspend(struct device *dev) 146int pm_generic_suspend(struct device *dev)
136{ 147{
137 return __pm_generic_call(dev, PM_EVENT_SUSPEND); 148 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
138} 149}
139EXPORT_SYMBOL_GPL(pm_generic_suspend); 150EXPORT_SYMBOL_GPL(pm_generic_suspend);
140 151
141/** 152/**
153 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
154 * @dev: Device to freeze.
155 */
156int pm_generic_freeze_noirq(struct device *dev)
157{
158 return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
159}
160EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
161
162/**
142 * pm_generic_freeze - Generic freeze callback for subsystems. 163 * pm_generic_freeze - Generic freeze callback for subsystems.
143 * @dev: Device to freeze. 164 * @dev: Device to freeze.
144 */ 165 */
145int pm_generic_freeze(struct device *dev) 166int pm_generic_freeze(struct device *dev)
146{ 167{
147 return __pm_generic_call(dev, PM_EVENT_FREEZE); 168 return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
148} 169}
149EXPORT_SYMBOL_GPL(pm_generic_freeze); 170EXPORT_SYMBOL_GPL(pm_generic_freeze);
150 171
151/** 172/**
173 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
174 * @dev: Device to handle.
175 */
176int pm_generic_poweroff_noirq(struct device *dev)
177{
178 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
179}
180EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
181
182/**
152 * pm_generic_poweroff - Generic poweroff callback for subsystems. 183 * pm_generic_poweroff - Generic poweroff callback for subsystems.
153 * @dev: Device to handle. 184 * @dev: Device to handle.
154 */ 185 */
155int pm_generic_poweroff(struct device *dev) 186int pm_generic_poweroff(struct device *dev)
156{ 187{
157 return __pm_generic_call(dev, PM_EVENT_HIBERNATE); 188 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
158} 189}
159EXPORT_SYMBOL_GPL(pm_generic_poweroff); 190EXPORT_SYMBOL_GPL(pm_generic_poweroff);
160 191
161/** 192/**
193 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
194 * @dev: Device to thaw.
195 */
196int pm_generic_thaw_noirq(struct device *dev)
197{
198 return __pm_generic_call(dev, PM_EVENT_THAW, true);
199}
200EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
201
202/**
162 * pm_generic_thaw - Generic thaw callback for subsystems. 203 * pm_generic_thaw - Generic thaw callback for subsystems.
163 * @dev: Device to thaw. 204 * @dev: Device to thaw.
164 */ 205 */
165int pm_generic_thaw(struct device *dev) 206int pm_generic_thaw(struct device *dev)
166{ 207{
167 return __pm_generic_call(dev, PM_EVENT_THAW); 208 return __pm_generic_call(dev, PM_EVENT_THAW, false);
168} 209}
169EXPORT_SYMBOL_GPL(pm_generic_thaw); 210EXPORT_SYMBOL_GPL(pm_generic_thaw);
170 211
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
172 * __pm_generic_resume - Generic resume/restore callback for subsystems. 213 * __pm_generic_resume - Generic resume/restore callback for subsystems.
173 * @dev: Device to handle. 214 * @dev: Device to handle.
174 * @event: PM transition of the system under way. 215 * @event: PM transition of the system under way.
216 * @bool: Whether or not this is the "noirq" stage.
175 * 217 *
176 * Execute the resume/resotre callback provided by the @dev's driver, if 218 * Execute the resume/resotre callback provided by the @dev's driver, if
177 * defined. If it returns 0, change the device's runtime PM status to 'active'. 219 * defined. If it returns 0, change the device's runtime PM status to 'active'.
178 * Return the callback's error code. 220 * Return the callback's error code.
179 */ 221 */
180static int __pm_generic_resume(struct device *dev, int event) 222static int __pm_generic_resume(struct device *dev, int event, bool noirq)
181{ 223{
182 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
183 int (*callback)(struct device *); 225 int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
188 230
189 switch (event) { 231 switch (event) {
190 case PM_EVENT_RESUME: 232 case PM_EVENT_RESUME:
191 callback = pm->resume; 233 callback = noirq ? pm->resume_noirq : pm->resume;
192 break; 234 break;
193 case PM_EVENT_RESTORE: 235 case PM_EVENT_RESTORE:
194 callback = pm->restore; 236 callback = noirq ? pm->restore_noirq : pm->restore;
195 break; 237 break;
196 default: 238 default:
197 callback = NULL; 239 callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
202 return 0; 244 return 0;
203 245
204 ret = callback(dev); 246 ret = callback(dev);
205 if (!ret && pm_runtime_enabled(dev)) { 247 if (!ret && !noirq && pm_runtime_enabled(dev)) {
206 pm_runtime_disable(dev); 248 pm_runtime_disable(dev);
207 pm_runtime_set_active(dev); 249 pm_runtime_set_active(dev);
208 pm_runtime_enable(dev); 250 pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
212} 254}
213 255
214/** 256/**
257 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
258 * @dev: Device to resume.
259 */
260int pm_generic_resume_noirq(struct device *dev)
261{
262 return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
263}
264EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
265
266/**
215 * pm_generic_resume - Generic resume callback for subsystems. 267 * pm_generic_resume - Generic resume callback for subsystems.
216 * @dev: Device to resume. 268 * @dev: Device to resume.
217 */ 269 */
218int pm_generic_resume(struct device *dev) 270int pm_generic_resume(struct device *dev)
219{ 271{
220 return __pm_generic_resume(dev, PM_EVENT_RESUME); 272 return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
221} 273}
222EXPORT_SYMBOL_GPL(pm_generic_resume); 274EXPORT_SYMBOL_GPL(pm_generic_resume);
223 275
224/** 276/**
277 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
278 * @dev: Device to restore.
279 */
280int pm_generic_restore_noirq(struct device *dev)
281{
282 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
283}
284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285
286/**
225 * pm_generic_restore - Generic restore callback for subsystems. 287 * pm_generic_restore - Generic restore callback for subsystems.
226 * @dev: Device to restore. 288 * @dev: Device to restore.
227 */ 289 */
228int pm_generic_restore(struct device *dev) 290int pm_generic_restore(struct device *dev)
229{ 291{
230 return __pm_generic_resume(dev, PM_EVENT_RESTORE); 292 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
231} 293}
232EXPORT_SYMBOL_GPL(pm_generic_restore); 294EXPORT_SYMBOL_GPL(pm_generic_restore);
233 295
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
256#ifdef CONFIG_PM_SLEEP 318#ifdef CONFIG_PM_SLEEP
257 .prepare = pm_generic_prepare, 319 .prepare = pm_generic_prepare,
258 .suspend = pm_generic_suspend, 320 .suspend = pm_generic_suspend,
321 .suspend_noirq = pm_generic_suspend_noirq,
259 .resume = pm_generic_resume, 322 .resume = pm_generic_resume,
323 .resume_noirq = pm_generic_resume_noirq,
260 .freeze = pm_generic_freeze, 324 .freeze = pm_generic_freeze,
325 .freeze_noirq = pm_generic_freeze_noirq,
261 .thaw = pm_generic_thaw, 326 .thaw = pm_generic_thaw,
327 .thaw_noirq = pm_generic_thaw_noirq,
262 .poweroff = pm_generic_poweroff, 328 .poweroff = pm_generic_poweroff,
329 .poweroff_noirq = pm_generic_poweroff_noirq,
263 .restore = pm_generic_restore, 330 .restore = pm_generic_restore,
331 .restore_noirq = pm_generic_restore_noirq,
264 .complete = pm_generic_complete, 332 .complete = pm_generic_complete,
265#endif 333#endif
266#ifdef CONFIG_PM_RUNTIME 334#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 06f09bf89cb2..a85459126bc6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
425 TRACE_DEVICE(dev); 425 TRACE_DEVICE(dev);
426 TRACE_RESUME(0); 426 TRACE_RESUME(0);
427 427
428 if (dev->pwr_domain) { 428 if (dev->pm_domain) {
429 pm_dev_dbg(dev, state, "EARLY power domain "); 429 pm_dev_dbg(dev, state, "EARLY power domain ");
430 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 430 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
431 } else if (dev->type && dev->type->pm) { 431 } else if (dev->type && dev->type->pm) {
432 pm_dev_dbg(dev, state, "EARLY type "); 432 pm_dev_dbg(dev, state, "EARLY type ");
433 error = pm_noirq_op(dev, dev->type->pm, state); 433 error = pm_noirq_op(dev, dev->type->pm, state);
@@ -505,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
505static int device_resume(struct device *dev, pm_message_t state, bool async) 505static int device_resume(struct device *dev, pm_message_t state, bool async)
506{ 506{
507 int error = 0; 507 int error = 0;
508 bool put = false;
508 509
509 TRACE_DEVICE(dev); 510 TRACE_DEVICE(dev);
510 TRACE_RESUME(0); 511 TRACE_RESUME(0);
@@ -521,9 +522,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
521 if (!dev->power.is_suspended) 522 if (!dev->power.is_suspended)
522 goto Unlock; 523 goto Unlock;
523 524
524 if (dev->pwr_domain) { 525 pm_runtime_enable(dev);
526 put = true;
527
528 if (dev->pm_domain) {
525 pm_dev_dbg(dev, state, "power domain "); 529 pm_dev_dbg(dev, state, "power domain ");
526 error = pm_op(dev, &dev->pwr_domain->ops, state); 530 error = pm_op(dev, &dev->pm_domain->ops, state);
527 goto End; 531 goto End;
528 } 532 }
529 533
@@ -563,6 +567,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
563 complete_all(&dev->power.completion); 567 complete_all(&dev->power.completion);
564 568
565 TRACE_RESUME(error); 569 TRACE_RESUME(error);
570
571 if (put)
572 pm_runtime_put_sync(dev);
573
566 return error; 574 return error;
567} 575}
568 576
@@ -641,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
641{ 649{
642 device_lock(dev); 650 device_lock(dev);
643 651
644 if (dev->pwr_domain) { 652 if (dev->pm_domain) {
645 pm_dev_dbg(dev, state, "completing power domain "); 653 pm_dev_dbg(dev, state, "completing power domain ");
646 if (dev->pwr_domain->ops.complete) 654 if (dev->pm_domain->ops.complete)
647 dev->pwr_domain->ops.complete(dev); 655 dev->pm_domain->ops.complete(dev);
648 } else if (dev->type && dev->type->pm) { 656 } else if (dev->type && dev->type->pm) {
649 pm_dev_dbg(dev, state, "completing type "); 657 pm_dev_dbg(dev, state, "completing type ");
650 if (dev->type->pm->complete) 658 if (dev->type->pm->complete)
@@ -744,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
744{ 752{
745 int error; 753 int error;
746 754
747 if (dev->pwr_domain) { 755 if (dev->pm_domain) {
748 pm_dev_dbg(dev, state, "LATE power domain "); 756 pm_dev_dbg(dev, state, "LATE power domain ");
749 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 757 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
750 if (error) 758 if (error)
751 return error; 759 return error;
752 } else if (dev->type && dev->type->pm) { 760 } else if (dev->type && dev->type->pm) {
@@ -843,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
843 int error = 0; 851 int error = 0;
844 852
845 dpm_wait_for_children(dev, async); 853 dpm_wait_for_children(dev, async);
846 device_lock(dev);
847 854
848 if (async_error) 855 if (async_error)
849 goto Unlock; 856 return 0;
857
858 pm_runtime_get_noresume(dev);
859 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
860 pm_wakeup_event(dev, 0);
850 861
851 if (pm_wakeup_pending()) { 862 if (pm_wakeup_pending()) {
863 pm_runtime_put_sync(dev);
852 async_error = -EBUSY; 864 async_error = -EBUSY;
853 goto Unlock; 865 return 0;
854 } 866 }
855 867
856 if (dev->pwr_domain) { 868 device_lock(dev);
869
870 if (dev->pm_domain) {
857 pm_dev_dbg(dev, state, "power domain "); 871 pm_dev_dbg(dev, state, "power domain ");
858 error = pm_op(dev, &dev->pwr_domain->ops, state); 872 error = pm_op(dev, &dev->pm_domain->ops, state);
859 goto End; 873 goto End;
860 } 874 }
861 875
@@ -890,12 +904,15 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
890 End: 904 End:
891 dev->power.is_suspended = !error; 905 dev->power.is_suspended = !error;
892 906
893 Unlock:
894 device_unlock(dev); 907 device_unlock(dev);
895 complete_all(&dev->power.completion); 908 complete_all(&dev->power.completion);
896 909
897 if (error) 910 if (error) {
911 pm_runtime_put_sync(dev);
898 async_error = error; 912 async_error = error;
913 } else if (dev->power.is_suspended) {
914 __pm_runtime_disable(dev, false);
915 }
899 916
900 return error; 917 return error;
901} 918}
@@ -982,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
982 999
983 device_lock(dev); 1000 device_lock(dev);
984 1001
985 if (dev->pwr_domain) { 1002 if (dev->pm_domain) {
986 pm_dev_dbg(dev, state, "preparing power domain "); 1003 pm_dev_dbg(dev, state, "preparing power domain ");
987 if (dev->pwr_domain->ops.prepare) 1004 if (dev->pm_domain->ops.prepare)
988 error = dev->pwr_domain->ops.prepare(dev); 1005 error = dev->pm_domain->ops.prepare(dev);
989 suspend_report_result(dev->pwr_domain->ops.prepare, error); 1006 suspend_report_result(dev->pm_domain->ops.prepare, error);
990 if (error) 1007 if (error)
991 goto End; 1008 goto End;
992 } else if (dev->type && dev->type->pm) { 1009 } else if (dev->type && dev->type->pm) {
@@ -1035,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
1035 get_device(dev); 1052 get_device(dev);
1036 mutex_unlock(&dpm_list_mtx); 1053 mutex_unlock(&dpm_list_mtx);
1037 1054
1038 pm_runtime_get_noresume(dev); 1055 error = device_prepare(dev, state);
1039 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1040 pm_wakeup_event(dev, 0);
1041
1042 pm_runtime_put_sync(dev);
1043 error = pm_wakeup_pending() ?
1044 -EBUSY : device_prepare(dev, state);
1045 1056
1046 mutex_lock(&dpm_list_mtx); 1057 mutex_lock(&dpm_list_mtx);
1047 if (error) { 1058 if (error) {
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9e..5cc12322ef32 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
625 625
626 return 0; 626 return 0;
627} 627}
628
629/**
630 * opp_free_cpufreq_table() - free the cpufreq table
631 * @dev: device for which we do this operation
632 * @table: table to free
633 *
634 * Free up the table allocated by opp_init_cpufreq_table
635 */
636void opp_free_cpufreq_table(struct device *dev,
637 struct cpufreq_frequency_table **table)
638{
639 if (!table)
640 return;
641
642 kfree(*table);
643 *table = NULL;
644}
628#endif /* CONFIG_CPU_FREQ */ 645#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c55..8dc247c974af 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 * 3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
135 135
136 if (dev->power.runtime_error) 136 if (dev->power.runtime_error)
137 retval = -EINVAL; 137 retval = -EINVAL;
138 else if (atomic_read(&dev->power.usage_count) > 0 138 else if (dev->power.disable_depth > 0)
139 || dev->power.disable_depth > 0) 139 retval = -EACCES;
140 else if (atomic_read(&dev->power.usage_count) > 0)
140 retval = -EAGAIN; 141 retval = -EAGAIN;
141 else if (!pm_children_suspended(dev)) 142 else if (!pm_children_suspended(dev))
142 retval = -EBUSY; 143 retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
158 * @dev: Device to notify the bus type about. 159 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits. 160 * @rpmflags: Flag bits.
160 * 161 *
161 * Check if the device's run-time PM status allows it to be suspended. If 162 * Check if the device's runtime PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If 163 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly. 165 * run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
213 214
214 dev->power.idle_notification = true; 215 dev->power.idle_notification = true;
215 216
216 if (dev->pwr_domain) 217 if (dev->pm_domain)
217 callback = dev->pwr_domain->ops.runtime_idle; 218 callback = dev->pm_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm) 219 else if (dev->type && dev->type->pm)
219 callback = dev->type->pm->runtime_idle; 220 callback = dev->type->pm->runtime_idle;
220 else if (dev->class && dev->class->pm) 221 else if (dev->class && dev->class->pm)
@@ -262,15 +263,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
262 spin_lock_irq(&dev->power.lock); 263 spin_lock_irq(&dev->power.lock);
263 } 264 }
264 dev->power.runtime_error = retval; 265 dev->power.runtime_error = retval;
265 return retval; 266 return retval != -EACCES ? retval : -EIO;
266} 267}
267 268
268/** 269/**
269 * rpm_suspend - Carry out run-time suspend of given device. 270 * rpm_suspend - Carry out runtime suspend of given device.
270 * @dev: Device to suspend. 271 * @dev: Device to suspend.
271 * @rpmflags: Flag bits. 272 * @rpmflags: Flag bits.
272 * 273 *
273 * Check if the device's run-time PM status allows it to be suspended. If 274 * Check if the device's runtime PM status allows it to be suspended. If
274 * another suspend has been started earlier, either return immediately or wait 275 * another suspend has been started earlier, either return immediately or wait
275 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a 276 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
276 * pending idle notification. If the RPM_ASYNC flag is set then queue a 277 * pending idle notification. If the RPM_ASYNC flag is set then queue a
@@ -374,8 +375,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
374 375
375 __update_runtime_status(dev, RPM_SUSPENDING); 376 __update_runtime_status(dev, RPM_SUSPENDING);
376 377
377 if (dev->pwr_domain) 378 if (dev->pm_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend; 379 callback = dev->pm_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm) 380 else if (dev->type && dev->type->pm)
380 callback = dev->type->pm->runtime_suspend; 381 callback = dev->type->pm->runtime_suspend;
381 else if (dev->class && dev->class->pm) 382 else if (dev->class && dev->class->pm)
@@ -388,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
388 retval = rpm_callback(callback, dev); 389 retval = rpm_callback(callback, dev);
389 if (retval) { 390 if (retval) {
390 __update_runtime_status(dev, RPM_ACTIVE); 391 __update_runtime_status(dev, RPM_ACTIVE);
391 dev->power.deferred_resume = 0; 392 dev->power.deferred_resume = false;
392 if (retval == -EAGAIN || retval == -EBUSY) 393 if (retval == -EAGAIN || retval == -EBUSY)
393 dev->power.runtime_error = 0; 394 dev->power.runtime_error = 0;
394 else 395 else
@@ -429,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
429} 430}
430 431
431/** 432/**
432 * rpm_resume - Carry out run-time resume of given device. 433 * rpm_resume - Carry out runtime resume of given device.
433 * @dev: Device to resume. 434 * @dev: Device to resume.
434 * @rpmflags: Flag bits. 435 * @rpmflags: Flag bits.
435 * 436 *
436 * Check if the device's run-time PM status allows it to be resumed. Cancel 437 * Check if the device's runtime PM status allows it to be resumed. Cancel
437 * any scheduled or pending requests. If another resume has been started 438 * any scheduled or pending requests. If another resume has been started
438 * earlier, either return immediately or wait for it to finish, depending on the 439 * earlier, either return immediately or wait for it to finish, depending on the
439 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 440 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
458 if (dev->power.runtime_error) 459 if (dev->power.runtime_error)
459 retval = -EINVAL; 460 retval = -EINVAL;
460 else if (dev->power.disable_depth > 0) 461 else if (dev->power.disable_depth > 0)
461 retval = -EAGAIN; 462 retval = -EACCES;
462 if (retval) 463 if (retval)
463 goto out; 464 goto out;
464 465
@@ -550,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
550 551
551 spin_lock(&parent->power.lock); 552 spin_lock(&parent->power.lock);
552 /* 553 /*
553 * We can resume if the parent's run-time PM is disabled or it 554 * We can resume if the parent's runtime PM is disabled or it
554 * is set to ignore children. 555 * is set to ignore children.
555 */ 556 */
556 if (!parent->power.disable_depth 557 if (!parent->power.disable_depth
@@ -573,8 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
573 574
574 __update_runtime_status(dev, RPM_RESUMING); 575 __update_runtime_status(dev, RPM_RESUMING);
575 576
576 if (dev->pwr_domain) 577 if (dev->pm_domain)
577 callback = dev->pwr_domain->ops.runtime_resume; 578 callback = dev->pm_domain->ops.runtime_resume;
578 else if (dev->type && dev->type->pm) 579 else if (dev->type && dev->type->pm)
579 callback = dev->type->pm->runtime_resume; 580 callback = dev->type->pm->runtime_resume;
580 else if (dev->class && dev->class->pm) 581 else if (dev->class && dev->class->pm)
@@ -614,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
614} 615}
615 616
616/** 617/**
617 * pm_runtime_work - Universal run-time PM work function. 618 * pm_runtime_work - Universal runtime PM work function.
618 * @work: Work structure used for scheduling the execution of this function. 619 * @work: Work structure used for scheduling the execution of this function.
619 * 620 *
620 * Use @work to get the device object the work is to be done for, determine what 621 * Use @work to get the device object the work is to be done for, determine what
621 * is to be done and execute the appropriate run-time PM function. 622 * is to be done and execute the appropriate runtime PM function.
622 */ 623 */
623static void pm_runtime_work(struct work_struct *work) 624static void pm_runtime_work(struct work_struct *work)
624{ 625{
@@ -717,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
717EXPORT_SYMBOL_GPL(pm_schedule_suspend); 718EXPORT_SYMBOL_GPL(pm_schedule_suspend);
718 719
719/** 720/**
720 * __pm_runtime_idle - Entry point for run-time idle operations. 721 * __pm_runtime_idle - Entry point for runtime idle operations.
721 * @dev: Device to send idle notification for. 722 * @dev: Device to send idle notification for.
722 * @rpmflags: Flag bits. 723 * @rpmflags: Flag bits.
723 * 724 *
@@ -746,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
746EXPORT_SYMBOL_GPL(__pm_runtime_idle); 747EXPORT_SYMBOL_GPL(__pm_runtime_idle);
747 748
748/** 749/**
749 * __pm_runtime_suspend - Entry point for run-time put/suspend operations. 750 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
750 * @dev: Device to suspend. 751 * @dev: Device to suspend.
751 * @rpmflags: Flag bits. 752 * @rpmflags: Flag bits.
752 * 753 *
@@ -775,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
775EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 776EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
776 777
777/** 778/**
778 * __pm_runtime_resume - Entry point for run-time resume operations. 779 * __pm_runtime_resume - Entry point for runtime resume operations.
779 * @dev: Device to resume. 780 * @dev: Device to resume.
780 * @rpmflags: Flag bits. 781 * @rpmflags: Flag bits.
781 * 782 *
@@ -801,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
801EXPORT_SYMBOL_GPL(__pm_runtime_resume); 802EXPORT_SYMBOL_GPL(__pm_runtime_resume);
802 803
803/** 804/**
804 * __pm_runtime_set_status - Set run-time PM status of a device. 805 * __pm_runtime_set_status - Set runtime PM status of a device.
805 * @dev: Device to handle. 806 * @dev: Device to handle.
806 * @status: New run-time PM status of the device. 807 * @status: New runtime PM status of the device.
807 * 808 *
808 * If run-time PM of the device is disabled or its power.runtime_error field is 809 * If runtime PM of the device is disabled or its power.runtime_error field is
809 * different from zero, the status may be changed either to RPM_ACTIVE, or to 810 * different from zero, the status may be changed either to RPM_ACTIVE, or to
810 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 811 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
811 * However, if the device has a parent and the parent is not active, and the 812 * However, if the device has a parent and the parent is not active, and the
@@ -851,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
851 852
852 /* 853 /*
853 * It is invalid to put an active child under a parent that is 854 * It is invalid to put an active child under a parent that is
854 * not active, has run-time PM enabled and the 855 * not active, has runtime PM enabled and the
855 * 'power.ignore_children' flag unset. 856 * 'power.ignore_children' flag unset.
856 */ 857 */
857 if (!parent->power.disable_depth 858 if (!parent->power.disable_depth
@@ -885,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
885 * @dev: Device to handle. 886 * @dev: Device to handle.
886 * 887 *
887 * Flush all pending requests for the device from pm_wq and wait for all 888 * Flush all pending requests for the device from pm_wq and wait for all
888 * run-time PM operations involving the device in progress to complete. 889 * runtime PM operations involving the device in progress to complete.
889 * 890 *
890 * Should be called under dev->power.lock with interrupts disabled. 891 * Should be called under dev->power.lock with interrupts disabled.
891 */ 892 */
@@ -933,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
933 * Prevent the device from being suspended by incrementing its usage counter and 934 * Prevent the device from being suspended by incrementing its usage counter and
934 * if there's a pending resume request for the device, wake the device up. 935 * if there's a pending resume request for the device, wake the device up.
935 * Next, make sure that all pending requests for the device have been flushed 936 * Next, make sure that all pending requests for the device have been flushed
936 * from pm_wq and wait for all run-time PM operations involving the device in 937 * from pm_wq and wait for all runtime PM operations involving the device in
937 * progress to complete. 938 * progress to complete.
938 * 939 *
939 * Return value: 940 * Return value:
@@ -963,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
963EXPORT_SYMBOL_GPL(pm_runtime_barrier); 964EXPORT_SYMBOL_GPL(pm_runtime_barrier);
964 965
965/** 966/**
966 * __pm_runtime_disable - Disable run-time PM of a device. 967 * __pm_runtime_disable - Disable runtime PM of a device.
967 * @dev: Device to handle. 968 * @dev: Device to handle.
968 * @check_resume: If set, check if there's a resume request for the device. 969 * @check_resume: If set, check if there's a resume request for the device.
969 * 970 *
970 * Increment power.disable_depth for the device and if was zero previously, 971 * Increment power.disable_depth for the device and if was zero previously,
971 * cancel all pending run-time PM requests for the device and wait for all 972 * cancel all pending runtime PM requests for the device and wait for all
972 * operations in progress to complete. The device can be either active or 973 * operations in progress to complete. The device can be either active or
973 * suspended after its run-time PM has been disabled. 974 * suspended after its runtime PM has been disabled.
974 * 975 *
975 * If @check_resume is set and there's a resume request pending when 976 * If @check_resume is set and there's a resume request pending when
976 * __pm_runtime_disable() is called and power.disable_depth is zero, the 977 * __pm_runtime_disable() is called and power.disable_depth is zero, the
977 * function will wake up the device before disabling its run-time PM. 978 * function will wake up the device before disabling its runtime PM.
978 */ 979 */
979void __pm_runtime_disable(struct device *dev, bool check_resume) 980void __pm_runtime_disable(struct device *dev, bool check_resume)
980{ 981{
@@ -987,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
987 988
988 /* 989 /*
989 * Wake up the device if there's a resume request pending, because that 990 * Wake up the device if there's a resume request pending, because that
990 * means there probably is some I/O to process and disabling run-time PM 991 * means there probably is some I/O to process and disabling runtime PM
991 * shouldn't prevent the device from processing the I/O. 992 * shouldn't prevent the device from processing the I/O.
992 */ 993 */
993 if (check_resume && dev->power.request_pending 994 if (check_resume && dev->power.request_pending
@@ -1012,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
1012EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1013EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1013 1014
1014/** 1015/**
1015 * pm_runtime_enable - Enable run-time PM of a device. 1016 * pm_runtime_enable - Enable runtime PM of a device.
1016 * @dev: Device to handle. 1017 * @dev: Device to handle.
1017 */ 1018 */
1018void pm_runtime_enable(struct device *dev) 1019void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
1031EXPORT_SYMBOL_GPL(pm_runtime_enable); 1032EXPORT_SYMBOL_GPL(pm_runtime_enable);
1032 1033
1033/** 1034/**
1034 * pm_runtime_forbid - Block run-time PM of a device. 1035 * pm_runtime_forbid - Block runtime PM of a device.
1035 * @dev: Device to handle. 1036 * @dev: Device to handle.
1036 * 1037 *
1037 * Increase the device's usage count and clear its power.runtime_auto flag, 1038 * Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
1054EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1055EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1055 1056
1056/** 1057/**
1057 * pm_runtime_allow - Unblock run-time PM of a device. 1058 * pm_runtime_allow - Unblock runtime PM of a device.
1058 * @dev: Device to handle. 1059 * @dev: Device to handle.
1059 * 1060 *
1060 * Decrease the device's usage count and set its power.runtime_auto flag. 1061 * Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
1075EXPORT_SYMBOL_GPL(pm_runtime_allow); 1076EXPORT_SYMBOL_GPL(pm_runtime_allow);
1076 1077
1077/** 1078/**
1078 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. 1079 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1079 * @dev: Device to handle. 1080 * @dev: Device to handle.
1080 * 1081 *
1081 * Set the power.no_callbacks flag, which tells the PM core that this 1082 * Set the power.no_callbacks flag, which tells the PM core that this
1082 * device is power-managed through its parent and has no run-time PM 1083 * device is power-managed through its parent and has no runtime PM
1083 * callbacks of its own. The run-time sysfs attributes will be removed. 1084 * callbacks of its own. The runtime sysfs attributes will be removed.
1084 */ 1085 */
1085void pm_runtime_no_callbacks(struct device *dev) 1086void pm_runtime_no_callbacks(struct device *dev)
1086{ 1087{
@@ -1156,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1156 * @delay: Value of the new delay in milliseconds. 1157 * @delay: Value of the new delay in milliseconds.
1157 * 1158 *
1158 * Set the device's power.autosuspend_delay value. If it changes to negative 1159 * Set the device's power.autosuspend_delay value. If it changes to negative
1159 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it 1160 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1160 * changes the other way, allow run-time suspends. 1161 * changes the other way, allow runtime suspends.
1161 */ 1162 */
1162void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1163void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1163{ 1164{
@@ -1177,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1177 * @dev: Device to handle. 1178 * @dev: Device to handle.
1178 * @use: New value for use_autosuspend. 1179 * @use: New value for use_autosuspend.
1179 * 1180 *
1180 * Set the device's power.use_autosuspend flag, and allow or prevent run-time 1181 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1181 * suspends as needed. 1182 * suspends as needed.
1182 */ 1183 */
1183void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1184void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1194EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1195EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1195 1196
1196/** 1197/**
1197 * pm_runtime_init - Initialize run-time PM fields in given device object. 1198 * pm_runtime_init - Initialize runtime PM fields in given device object.
1198 * @dev: Device object to initialize. 1199 * @dev: Device object to initialize.
1199 */ 1200 */
1200void pm_runtime_init(struct device *dev) 1201void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b8979611..942d6a7c9ae1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
116 cp = memchr(buf, '\n', n); 116 cp = memchr(buf, '\n', n);
117 if (cp) 117 if (cp)
118 len = cp - buf; 118 len = cp - buf;
119 device_lock(dev);
119 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) 120 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
120 pm_runtime_allow(dev); 121 pm_runtime_allow(dev);
121 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) 122 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
122 pm_runtime_forbid(dev); 123 pm_runtime_forbid(dev);
123 else 124 else
124 return -EINVAL; 125 n = -EINVAL;
126 device_unlock(dev);
125 return n; 127 return n;
126} 128}
127 129
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
205 if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) 207 if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
206 return -EINVAL; 208 return -EINVAL;
207 209
210 device_lock(dev);
208 pm_runtime_set_autosuspend_delay(dev, delay); 211 pm_runtime_set_autosuspend_delay(dev, delay);
212 device_unlock(dev);
209 return n; 213 return n;
210} 214}
211 215
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62fe..af10abecb99b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
112 unsigned int val; 112 unsigned int val;
113 113
114 get_rtc_time(&time); 114 get_rtc_time(&time);
115 pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", 115 pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
116 time.tm_hour, time.tm_min, time.tm_sec, 116 time.tm_hour, time.tm_min, time.tm_sec,
117 time.tm_mon + 1, time.tm_mday, time.tm_year % 100); 117 time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
118 val = time.tm_year; /* 100 years */ 118 val = time.tm_year; /* 100 years */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 548708c4b2b8..a7346ab97a3c 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
606 return NOTIFY_OK; 606 return NOTIFY_OK;
607 607
608 /* interrupted by signal */ 608 /* interrupted by signal */
609 return NOTIFY_BAD; 609 return notifier_from_errno(err);
610 610
611 case PM_POST_SUSPEND: 611 case PM_POST_SUSPEND:
612 /* 612 /*
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 46767c53917a..12d1e81a8abe 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -18,6 +18,7 @@
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/pm_runtime.h> 20#include <linux/pm_runtime.h>
21#include <linux/suspend.h>
21#include "pci.h" 22#include "pci.h"
22 23
23struct pci_dynid { 24struct pci_dynid {
@@ -616,6 +617,21 @@ static int pci_pm_prepare(struct device *dev)
616 int error = 0; 617 int error = 0;
617 618
618 /* 619 /*
620 * If a PCI device configured to wake up the system from sleep states
621 * has been suspended at run time and there's a resume request pending
622 * for it, this is equivalent to the device signaling wakeup, so the
623 * system suspend operation should be aborted.
624 */
625 pm_runtime_get_noresume(dev);
626 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
627 pm_wakeup_event(dev, 0);
628
629 if (pm_wakeup_pending()) {
630 pm_runtime_put_sync(dev);
631 return -EBUSY;
632 }
633
634 /*
619 * PCI devices suspended at run time need to be resumed at this 635 * PCI devices suspended at run time need to be resumed at this
620 * point, because in general it is necessary to reconfigure them for 636 * point, because in general it is necessary to reconfigure them for
621 * system suspend. Namely, if the device is supposed to wake up the 637 * system suspend. Namely, if the device is supposed to wake up the
@@ -624,7 +640,7 @@ static int pci_pm_prepare(struct device *dev)
624 * system from the sleep state, we'll have to prevent it from signaling 640 * system from the sleep state, we'll have to prevent it from signaling
625 * wake-up. 641 * wake-up.
626 */ 642 */
627 pm_runtime_get_sync(dev); 643 pm_runtime_resume(dev);
628 644
629 if (drv && drv->pm && drv->pm->prepare) 645 if (drv && drv->pm && drv->pm->prepare)
630 error = drv->pm->prepare(dev); 646 error = drv->pm->prepare(dev);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 12ef9121d4f0..11312f401c70 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
258 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { 258 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
259 pr_err("The system cannot be suspended while the watchdog" 259 pr_err("The system cannot be suspended while the watchdog"
260 " is in use\n"); 260 " is in use\n");
261 return NOTIFY_BAD; 261 return notifier_from_errno(-EBUSY);
262 } 262 }
263 if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { 263 if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
264 clear_bit(VMWDT_OPEN, &vmwdt_is_open); 264 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
265 pr_err("The system cannot be suspended while the watchdog" 265 pr_err("The system cannot be suspended while the watchdog"
266 " is running\n"); 266 " is running\n");
267 return NOTIFY_BAD; 267 return notifier_from_errno(-EBUSY);
268 } 268 }
269 return NOTIFY_DONE; 269 return NOTIFY_DONE;
270} 270}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c47b25fd3f43..92d7324acb1c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
814 mutex_unlock(&css->mutex); 814 mutex_unlock(&css->mutex);
815 continue; 815 continue;
816 } 816 }
817 if (__chsc_do_secm(css, 0)) 817 ret = __chsc_do_secm(css, 0);
818 ret = NOTIFY_BAD; 818 ret = notifier_from_errno(ret);
819 mutex_unlock(&css->mutex); 819 mutex_unlock(&css->mutex);
820 } 820 }
821 break; 821 break;
@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
831 mutex_unlock(&css->mutex); 831 mutex_unlock(&css->mutex);
832 continue; 832 continue;
833 } 833 }
834 if (__chsc_do_secm(css, 1)) 834 ret = __chsc_do_secm(css, 1);
835 ret = NOTIFY_BAD; 835 ret = notifier_from_errno(ret);
836 mutex_unlock(&css->mutex); 836 mutex_unlock(&css->mutex);
837 } 837 }
838 /* search for subchannels, which appeared during hibernation */ 838 /* search for subchannels, which appeared during hibernation */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d70e91ae60af..d82a023a9015 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev)
144 int err; 144 int err;
145 145
146 err = pm_runtime_get_sync(&sdev->sdev_gendev); 146 err = pm_runtime_get_sync(&sdev->sdev_gendev);
147 if (err < 0) 147 if (err < 0 && err !=-EACCES)
148 pm_runtime_put_sync(&sdev->sdev_gendev); 148 pm_runtime_put_sync(&sdev->sdev_gendev);
149 else if (err > 0) 149 else
150 err = 0; 150 err = 0;
151 return err; 151 return err;
152} 152}
@@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost)
173 int err; 173 int err;
174 174
175 err = pm_runtime_get_sync(&shost->shost_gendev); 175 err = pm_runtime_get_sync(&shost->shost_gendev);
176 if (err < 0) 176 if (err < 0 && err !=-EACCES)
177 pm_runtime_put_sync(&shost->shost_gendev); 177 pm_runtime_put_sync(&shost->shost_gendev);
178 else if (err > 0) 178 else
179 err = 0; 179 err = 0;
180 return err; 180 return err;
181} 181}
diff --git a/include/linux/device.h b/include/linux/device.h
index e4f62d8896b7..160d4ddb2499 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -516,7 +516,7 @@ struct device_dma_parameters {
516 * minimizes board-specific #ifdefs in drivers. 516 * minimizes board-specific #ifdefs in drivers.
517 * @power: For device power management. 517 * @power: For device power management.
518 * See Documentation/power/devices.txt for details. 518 * See Documentation/power/devices.txt for details.
519 * @pwr_domain: Provide callbacks that are executed during system suspend, 519 * @pm_domain: Provide callbacks that are executed during system suspend,
520 * hibernation, system resume and during runtime PM transitions 520 * hibernation, system resume and during runtime PM transitions
521 * along with subsystem-level and driver-level callbacks. 521 * along with subsystem-level and driver-level callbacks.
522 * @numa_node: NUMA node this device is close to. 522 * @numa_node: NUMA node this device is close to.
@@ -567,7 +567,7 @@ struct device {
567 void *platform_data; /* Platform specific data, device 567 void *platform_data; /* Platform specific data, device
568 core doesn't touch it */ 568 core doesn't touch it */
569 struct dev_pm_info power; 569 struct dev_pm_info power;
570 struct dev_power_domain *pwr_domain; 570 struct dev_pm_domain *pm_domain;
571 571
572#ifdef CONFIG_NUMA 572#ifdef CONFIG_NUMA
573 int numa_node; /* NUMA node this device is close to */ 573 int numa_node; /* NUMA node this device is close to */
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 5449945d589f..7020e9736fc5 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
94#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) 94#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
95int opp_init_cpufreq_table(struct device *dev, 95int opp_init_cpufreq_table(struct device *dev,
96 struct cpufreq_frequency_table **table); 96 struct cpufreq_frequency_table **table);
97void opp_free_cpufreq_table(struct device *dev,
98 struct cpufreq_frequency_table **table);
97#else 99#else
98static inline int opp_init_cpufreq_table(struct device *dev, 100static inline int opp_init_cpufreq_table(struct device *dev,
99 struct cpufreq_frequency_table **table) 101 struct cpufreq_frequency_table **table)
100{ 102{
101 return -EINVAL; 103 return -EINVAL;
102} 104}
105
106static inline
107void opp_free_cpufreq_table(struct device *dev,
108 struct cpufreq_frequency_table **table)
109{
110}
103#endif /* CONFIG_CPU_FREQ */ 111#endif /* CONFIG_CPU_FREQ */
104 112
105#endif /* __LINUX_OPP_H__ */ 113#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 411e4f4be52b..f7c84c9abd30 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -461,8 +461,8 @@ struct dev_pm_info {
461 unsigned long active_jiffies; 461 unsigned long active_jiffies;
462 unsigned long suspended_jiffies; 462 unsigned long suspended_jiffies;
463 unsigned long accounting_timestamp; 463 unsigned long accounting_timestamp;
464 void *subsys_data; /* Owned by the subsystem. */
465#endif 464#endif
465 void *subsys_data; /* Owned by the subsystem. */
466}; 466};
467 467
468extern void update_pm_runtime_accounting(struct device *dev); 468extern void update_pm_runtime_accounting(struct device *dev);
@@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting(struct device *dev);
472 * hibernation, system resume and during runtime PM transitions along with 472 * hibernation, system resume and during runtime PM transitions along with
473 * subsystem-level and driver-level callbacks. 473 * subsystem-level and driver-level callbacks.
474 */ 474 */
475struct dev_power_domain { 475struct dev_pm_domain {
476 struct dev_pm_ops ops; 476 struct dev_pm_ops ops;
477}; 477};
478 478
@@ -553,11 +553,17 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
553extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 553extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
554 554
555extern int pm_generic_prepare(struct device *dev); 555extern int pm_generic_prepare(struct device *dev);
556extern int pm_generic_suspend_noirq(struct device *dev);
556extern int pm_generic_suspend(struct device *dev); 557extern int pm_generic_suspend(struct device *dev);
558extern int pm_generic_resume_noirq(struct device *dev);
557extern int pm_generic_resume(struct device *dev); 559extern int pm_generic_resume(struct device *dev);
560extern int pm_generic_freeze_noirq(struct device *dev);
558extern int pm_generic_freeze(struct device *dev); 561extern int pm_generic_freeze(struct device *dev);
562extern int pm_generic_thaw_noirq(struct device *dev);
559extern int pm_generic_thaw(struct device *dev); 563extern int pm_generic_thaw(struct device *dev);
564extern int pm_generic_restore_noirq(struct device *dev);
560extern int pm_generic_restore(struct device *dev); 565extern int pm_generic_restore(struct device *dev);
566extern int pm_generic_poweroff_noirq(struct device *dev);
561extern int pm_generic_poweroff(struct device *dev); 567extern int pm_generic_poweroff(struct device *dev);
562extern void pm_generic_complete(struct device *dev); 568extern void pm_generic_complete(struct device *dev);
563 569
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
new file mode 100644
index 000000000000..21097cb086fe
--- /dev/null
+++ b/include/linux/pm_domain.h
@@ -0,0 +1,108 @@
1/*
2 * pm_domain.h - Definitions and headers related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#ifndef _LINUX_PM_DOMAIN_H
10#define _LINUX_PM_DOMAIN_H
11
12#include <linux/device.h>
13
14enum gpd_status {
15 GPD_STATE_ACTIVE = 0, /* PM domain is active */
16 GPD_STATE_BUSY, /* Something is happening to the PM domain */
17 GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
18 GPD_STATE_POWER_OFF, /* PM domain is off */
19};
20
21struct dev_power_governor {
22 bool (*power_down_ok)(struct dev_pm_domain *domain);
23};
24
25struct generic_pm_domain {
26 struct dev_pm_domain domain; /* PM domain operations */
27 struct list_head gpd_list_node; /* Node in the global PM domains list */
28 struct list_head sd_node; /* Node in the parent's subdomain list */
29 struct generic_pm_domain *parent; /* Parent PM domain */
30 struct list_head sd_list; /* List of dubdomains */
31 struct list_head dev_list; /* List of devices */
32 struct mutex lock;
33 struct dev_power_governor *gov;
34 struct work_struct power_off_work;
35 unsigned int in_progress; /* Number of devices being suspended now */
36 unsigned int sd_count; /* Number of subdomains with power "on" */
37 enum gpd_status status; /* Current state of the domain */
38 wait_queue_head_t status_wait_queue;
39 struct task_struct *poweroff_task; /* Powering off task */
40 unsigned int resume_count; /* Number of devices being resumed */
41 unsigned int device_count; /* Number of devices */
42 unsigned int suspended_count; /* System suspend device counter */
43 unsigned int prepared_count; /* Suspend counter of prepared devices */
44 bool suspend_power_off; /* Power status before system suspend */
45 int (*power_off)(struct generic_pm_domain *domain);
46 int (*power_on)(struct generic_pm_domain *domain);
47 int (*start_device)(struct device *dev);
48 int (*stop_device)(struct device *dev);
49 bool (*active_wakeup)(struct device *dev);
50};
51
52static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
53{
54 return container_of(pd, struct generic_pm_domain, domain);
55}
56
57struct dev_list_entry {
58 struct list_head node;
59 struct device *dev;
60 bool need_restore;
61};
62
63#ifdef CONFIG_PM_GENERIC_DOMAINS
64extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
65 struct device *dev);
66extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
67 struct device *dev);
68extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
69 struct generic_pm_domain *new_subdomain);
70extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
71 struct generic_pm_domain *target);
72extern void pm_genpd_init(struct generic_pm_domain *genpd,
73 struct dev_power_governor *gov, bool is_off);
74extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
75extern void pm_genpd_poweroff_unused(void);
76extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
77#else
78static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
79 struct device *dev)
80{
81 return -ENOSYS;
82}
83static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
84 struct device *dev)
85{
86 return -ENOSYS;
87}
88static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
89 struct generic_pm_domain *new_sd)
90{
91 return -ENOSYS;
92}
93static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
94 struct generic_pm_domain *target)
95{
96 return -ENOSYS;
97}
98static inline void pm_genpd_init(struct generic_pm_domain *genpd,
99 struct dev_power_governor *gov, bool is_off) {}
100static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
101{
102 return -ENOSYS;
103}
104static inline void pm_genpd_poweroff_unused(void) {}
105static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
106#endif
107
108#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 878cf84baeb1..daac05d751b2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -82,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev)
82 && !dev->power.disable_depth; 82 && !dev->power.disable_depth;
83} 83}
84 84
85static inline bool pm_runtime_status_suspended(struct device *dev)
86{
87 return dev->power.runtime_status == RPM_SUSPENDED;
88}
89
85static inline bool pm_runtime_enabled(struct device *dev) 90static inline bool pm_runtime_enabled(struct device *dev)
86{ 91{
87 return !dev->power.disable_depth; 92 return !dev->power.disable_depth;
@@ -130,6 +135,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
130static inline bool device_run_wake(struct device *dev) { return false; } 135static inline bool device_run_wake(struct device *dev) { return false; }
131static inline void device_set_run_wake(struct device *dev, bool enable) {} 136static inline void device_set_run_wake(struct device *dev, bool enable) {}
132static inline bool pm_runtime_suspended(struct device *dev) { return false; } 137static inline bool pm_runtime_suspended(struct device *dev) { return false; }
138static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
133static inline bool pm_runtime_enabled(struct device *dev) { return false; } 139static inline bool pm_runtime_enabled(struct device *dev) { return false; }
134 140
135static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } 141static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
@@ -247,41 +253,41 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
247 253
248struct pm_clk_notifier_block { 254struct pm_clk_notifier_block {
249 struct notifier_block nb; 255 struct notifier_block nb;
250 struct dev_power_domain *pwr_domain; 256 struct dev_pm_domain *pm_domain;
251 char *con_ids[]; 257 char *con_ids[];
252}; 258};
253 259
254#ifdef CONFIG_PM_RUNTIME_CLK 260#ifdef CONFIG_PM_CLK
255extern int pm_runtime_clk_init(struct device *dev); 261extern int pm_clk_init(struct device *dev);
256extern void pm_runtime_clk_destroy(struct device *dev); 262extern void pm_clk_destroy(struct device *dev);
257extern int pm_runtime_clk_add(struct device *dev, const char *con_id); 263extern int pm_clk_add(struct device *dev, const char *con_id);
258extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); 264extern void pm_clk_remove(struct device *dev, const char *con_id);
259extern int pm_runtime_clk_suspend(struct device *dev); 265extern int pm_clk_suspend(struct device *dev);
260extern int pm_runtime_clk_resume(struct device *dev); 266extern int pm_clk_resume(struct device *dev);
261#else 267#else
262static inline int pm_runtime_clk_init(struct device *dev) 268static inline int pm_clk_init(struct device *dev)
263{ 269{
264 return -EINVAL; 270 return -EINVAL;
265} 271}
266static inline void pm_runtime_clk_destroy(struct device *dev) 272static inline void pm_clk_destroy(struct device *dev)
267{ 273{
268} 274}
269static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) 275static inline int pm_clk_add(struct device *dev, const char *con_id)
270{ 276{
271 return -EINVAL; 277 return -EINVAL;
272} 278}
273static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) 279static inline void pm_clk_remove(struct device *dev, const char *con_id)
274{ 280{
275} 281}
276#define pm_runtime_clock_suspend NULL 282#define pm_clk_suspend NULL
277#define pm_runtime_clock_resume NULL 283#define pm_clk_resume NULL
278#endif 284#endif
279 285
280#ifdef CONFIG_HAVE_CLK 286#ifdef CONFIG_HAVE_CLK
281extern void pm_runtime_clk_add_notifier(struct bus_type *bus, 287extern void pm_clk_add_notifier(struct bus_type *bus,
282 struct pm_clk_notifier_block *clknb); 288 struct pm_clk_notifier_block *clknb);
283#else 289#else
284static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, 290static inline void pm_clk_add_notifier(struct bus_type *bus,
285 struct pm_clk_notifier_block *clknb) 291 struct pm_clk_notifier_block *clknb)
286{ 292{
287} 293}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 083ffea7ba18..e1e3742733be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t;
92 * @enter() and @wake(), even if any of them fails. It is executed after 92 * @enter() and @wake(), even if any of them fails. It is executed after
93 * a failing @prepare. 93 * a failing @prepare.
94 * 94 *
95 * @suspend_again: Returns whether the system should suspend again (true) or
96 * not (false). If the platform wants to poll sensors or execute some
97 * code during suspended without invoking userspace and most of devices,
98 * suspend_again callback is the place assuming that periodic-wakeup or
99 * alarm-wakeup is already setup. This allows to execute some codes while
100 * being kept suspended in the view of userland and devices.
101 *
95 * @end: Called by the PM core right after resuming devices, to indicate to 102 * @end: Called by the PM core right after resuming devices, to indicate to
96 * the platform that the system has returned to the working state or 103 * the platform that the system has returned to the working state or
97 * the transition to the sleep state has been aborted. 104 * the transition to the sleep state has been aborted.
@@ -113,6 +120,7 @@ struct platform_suspend_ops {
113 int (*enter)(suspend_state_t state); 120 int (*enter)(suspend_state_t state);
114 void (*wake)(void); 121 void (*wake)(void);
115 void (*finish)(void); 122 void (*finish)(void);
123 bool (*suspend_again)(void);
116 void (*end)(void); 124 void (*end)(void);
117 void (*recover)(void); 125 void (*recover)(void);
118}; 126};
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24b55b0..7b856b3458d2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -224,6 +224,10 @@ config PM_OPP
224 implementations a ready to use framework to manage OPPs. 224 implementations a ready to use framework to manage OPPs.
225 For more information, read <file:Documentation/power/opp.txt> 225 For more information, read <file:Documentation/power/opp.txt>
226 226
227config PM_RUNTIME_CLK 227config PM_CLK
228 def_bool y 228 def_bool y
229 depends on PM_RUNTIME && HAVE_CLK 229 depends on PM && HAVE_CLK
230
231config PM_GENERIC_DOMAINS
232 bool
233 depends on PM
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7cb..6c601f871964 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
37 37
38int pm_notifier_call_chain(unsigned long val) 38int pm_notifier_call_chain(unsigned long val)
39{ 39{
40 return (blocking_notifier_call_chain(&pm_chain_head, val, NULL) 40 int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
41 == NOTIFY_BAD) ? -EINVAL : 0; 41
42 return notifier_to_errno(ret);
42} 43}
43 44
44/* If set, devices may be suspended and resumed asynchronously. */ 45/* If set, devices may be suspended and resumed asynchronously. */
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1c41ba215419..b6b71ad2208f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
44 suspend_ops = ops; 44 suspend_ops = ops;
45 mutex_unlock(&pm_mutex); 45 mutex_unlock(&pm_mutex);
46} 46}
47EXPORT_SYMBOL_GPL(suspend_set_ops);
47 48
48bool valid_state(suspend_state_t state) 49bool valid_state(suspend_state_t state)
49{ 50{
@@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state)
65{ 66{
66 return state == PM_SUSPEND_MEM; 67 return state == PM_SUSPEND_MEM;
67} 68}
69EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
68 70
69static int suspend_test(int level) 71static int suspend_test(int level)
70{ 72{
@@ -126,12 +128,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
126} 128}
127 129
128/** 130/**
129 * suspend_enter - enter the desired system sleep state. 131 * suspend_enter - enter the desired system sleep state.
130 * @state: state to enter 132 * @state: State to enter
133 * @wakeup: Returns information that suspend should not be entered again.
131 * 134 *
132 * This function should be called after devices have been suspended. 135 * This function should be called after devices have been suspended.
133 */ 136 */
134static int suspend_enter(suspend_state_t state) 137static int suspend_enter(suspend_state_t state, bool *wakeup)
135{ 138{
136 int error; 139 int error;
137 140
@@ -165,7 +168,8 @@ static int suspend_enter(suspend_state_t state)
165 168
166 error = syscore_suspend(); 169 error = syscore_suspend();
167 if (!error) { 170 if (!error) {
168 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { 171 *wakeup = pm_wakeup_pending();
172 if (!(suspend_test(TEST_CORE) || *wakeup)) {
169 error = suspend_ops->enter(state); 173 error = suspend_ops->enter(state);
170 events_check_enabled = false; 174 events_check_enabled = false;
171 } 175 }
@@ -199,6 +203,7 @@ static int suspend_enter(suspend_state_t state)
199int suspend_devices_and_enter(suspend_state_t state) 203int suspend_devices_and_enter(suspend_state_t state)
200{ 204{
201 int error; 205 int error;
206 bool wakeup = false;
202 207
203 if (!suspend_ops) 208 if (!suspend_ops)
204 return -ENOSYS; 209 return -ENOSYS;
@@ -220,7 +225,10 @@ int suspend_devices_and_enter(suspend_state_t state)
220 if (suspend_test(TEST_DEVICES)) 225 if (suspend_test(TEST_DEVICES))
221 goto Recover_platform; 226 goto Recover_platform;
222 227
223 error = suspend_enter(state); 228 do {
229 error = suspend_enter(state, &wakeup);
230 } while (!error && !wakeup
231 && suspend_ops->suspend_again && suspend_ops->suspend_again());
224 232
225 Resume_devices: 233 Resume_devices:
226 suspend_test_start(); 234 suspend_test_start();