diff options
| -rw-r--r-- | Documentation/power/runtime_pm.txt | 223 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 128 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 45 |
3 files changed, 288 insertions, 108 deletions
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 4a3109b28847..356fd86f4ea8 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
| @@ -42,80 +42,81 @@ struct dev_pm_ops { | |||
| 42 | ... | 42 | ... |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | The ->runtime_suspend() callback is executed by the PM core for the bus type of | 45 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are |
| 46 | the device being suspended. The bus type's callback is then _entirely_ | 46 | executed by the PM core for either the bus type, or device type (if the bus |
| 47 | _responsible_ for handling the device as appropriate, which may, but need not | 47 | type's callback is not defined), or device class (if the bus type's and device |
| 48 | include executing the device driver's own ->runtime_suspend() callback (from the | 48 | type's callbacks are not defined) of given device. The bus type, device type |
| 49 | and device class callbacks are referred to as subsystem-level callbacks in what | ||
| 50 | follows. | ||
| 51 | |||
| 52 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling | ||
| 53 | the suspend of the device as appropriate, which may, but need not include | ||
| 54 | executing the device driver's own ->runtime_suspend() callback (from the | ||
| 49 | PM core's point of view it is not necessary to implement a ->runtime_suspend() | 55 | PM core's point of view it is not necessary to implement a ->runtime_suspend() |
| 50 | callback in a device driver as long as the bus type's ->runtime_suspend() knows | 56 | callback in a device driver as long as the subsystem-level suspend callback |
| 51 | what to do to handle the device). | 57 | knows what to do to handle the device). |
| 52 | 58 | ||
| 53 | * Once the bus type's ->runtime_suspend() callback has completed successfully | 59 | * Once the subsystem-level suspend callback has completed successfully |
| 54 | for given device, the PM core regards the device as suspended, which need | 60 | for given device, the PM core regards the device as suspended, which need |
| 55 | not mean that the device has been put into a low power state. It is | 61 | not mean that the device has been put into a low power state. It is |
| 56 | supposed to mean, however, that the device will not process data and will | 62 | supposed to mean, however, that the device will not process data and will |
| 57 | not communicate with the CPU(s) and RAM until its bus type's | 63 | not communicate with the CPU(s) and RAM until the subsystem-level resume |
| 58 | ->runtime_resume() callback is executed for it. The run-time PM status of | 64 | callback is executed for it. The run-time PM status of a device after |
| 59 | a device after successful execution of its bus type's ->runtime_suspend() | 65 | successful execution of the subsystem-level suspend callback is 'suspended'. |
| 60 | callback is 'suspended'. | 66 | |
| 61 | 67 | * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, | |
| 62 | * If the bus type's ->runtime_suspend() callback returns -EBUSY or -EAGAIN, | 68 | the device's run-time PM status is 'active', which means that the device |
| 63 | the device's run-time PM status is supposed to be 'active', which means that | 69 | _must_ be fully operational afterwards. |
| 64 | the device _must_ be fully operational afterwards. | 70 | |
| 65 | 71 | * If the subsystem-level suspend callback returns an error code different | |
| 66 | * If the bus type's ->runtime_suspend() callback returns an error code | 72 | from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will |
| 67 | different from -EBUSY or -EAGAIN, the PM core regards this as a fatal | 73 | refuse to run the helper functions described in Section 4 for the device, |
| 68 | error and will refuse to run the helper functions described in Section 4 | 74 | until the status of it is directly set either to 'active', or to 'suspended' |
| 69 | for the device, until the status of it is directly set either to 'active' | 75 | (the PM core provides special helper functions for this purpose). |
| 70 | or to 'suspended' (the PM core provides special helper functions for this | 76 | |
| 71 | purpose). | 77 | In particular, if the driver requires remote wake-up capability (i.e. hardware |
| 72 | 78 | mechanism allowing the device to request a change of its power state, such as | |
| 73 | In particular, if the driver requires remote wakeup capability for proper | 79 | PCI PME) for proper functioning and device_run_wake() returns 'false' for the |
| 74 | functioning and device_run_wake() returns 'false' for the device, then | 80 | device, then ->runtime_suspend() should return -EBUSY. On the other hand, if |
| 75 | ->runtime_suspend() should return -EBUSY. On the other hand, if | 81 | device_run_wake() returns 'true' for the device and the device is put into a low |
| 76 | device_run_wake() returns 'true' for the device and the device is put | 82 | power state during the execution of the subsystem-level suspend callback, it is |
| 77 | into a low power state during the execution of its bus type's | 83 | expected that remote wake-up will be enabled for the device. Generally, remote |
| 78 | ->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism | 84 | wake-up should be enabled for all input devices put into a low power state at |
| 79 | allowing the device to request a change of its power state, such as PCI PME) | 85 | run time. |
| 80 | will be enabled for the device. Generally, remote wake-up should be enabled | 86 | |
| 81 | for all input devices put into a low power state at run time. | 87 | The subsystem-level resume callback is _entirely_ _responsible_ for handling the |
| 82 | 88 | resume of the device as appropriate, which may, but need not include executing | |
| 83 | The ->runtime_resume() callback is executed by the PM core for the bus type of | 89 | the device driver's own ->runtime_resume() callback (from the PM core's point of |
| 84 | the device being woken up. The bus type's callback is then _entirely_ | 90 | view it is not necessary to implement a ->runtime_resume() callback in a device |
| 85 | _responsible_ for handling the device as appropriate, which may, but need not | 91 | driver as long as the subsystem-level resume callback knows what to do to handle |
| 86 | include executing the device driver's own ->runtime_resume() callback (from the | 92 | the device). |
| 87 | PM core's point of view it is not necessary to implement a ->runtime_resume() | 93 | |
| 88 | callback in a device driver as long as the bus type's ->runtime_resume() knows | 94 | * Once the subsystem-level resume callback has completed successfully, the PM |
| 89 | what to do to handle the device). | 95 | core regards the device as fully operational, which means that the device |
| 90 | 96 | _must_ be able to complete I/O operations as needed. The run-time PM status | |
| 91 | * Once the bus type's ->runtime_resume() callback has completed successfully, | 97 | of the device is then 'active'. |
| 92 | the PM core regards the device as fully operational, which means that the | 98 | |
| 93 | device _must_ be able to complete I/O operations as needed. The run-time | 99 | * If the subsystem-level resume callback returns an error code, the PM core |
| 94 | PM status of the device is then 'active'. | 100 | regards this as a fatal error and will refuse to run the helper functions |
| 95 | 101 | described in Section 4 for the device, until its status is directly set | |
| 96 | * If the bus type's ->runtime_resume() callback returns an error code, the PM | 102 | either to 'active' or to 'suspended' (the PM core provides special helper |
| 97 | core regards this as a fatal error and will refuse to run the helper | 103 | functions for this purpose). |
| 98 | functions described in Section 4 for the device, until its status is | 104 | |
| 99 | directly set either to 'active' or to 'suspended' (the PM core provides | 105 | The subsystem-level idle callback is executed by the PM core whenever the device |
| 100 | special helper functions for this purpose). | 106 | appears to be idle, which is indicated to the PM core by two counters, the |
| 101 | 107 | device's usage counter and the counter of 'active' children of the device. | |
| 102 | The ->runtime_idle() callback is executed by the PM core for the bus type of | ||
| 103 | given device whenever the device appears to be idle, which is indicated to the | ||
| 104 | PM core by two counters, the device's usage counter and the counter of 'active' | ||
| 105 | children of the device. | ||
| 106 | 108 | ||
| 107 | * If any of these counters is decreased using a helper function provided by | 109 | * If any of these counters is decreased using a helper function provided by |
| 108 | the PM core and it turns out to be equal to zero, the other counter is | 110 | the PM core and it turns out to be equal to zero, the other counter is |
| 109 | checked. If that counter also is equal to zero, the PM core executes the | 111 | checked. If that counter also is equal to zero, the PM core executes the |
| 110 | device bus type's ->runtime_idle() callback (with the device as an | 112 | subsystem-level idle callback with the device as an argument. |
| 111 | argument). | ||
| 112 | 113 | ||
| 113 | The action performed by a bus type's ->runtime_idle() callback is totally | 114 | The action performed by a subsystem-level idle callback is totally dependent on |
| 114 | dependent on the bus type in question, but the expected and recommended action | 115 | the subsystem in question, but the expected and recommended action is to check |
| 115 | is to check if the device can be suspended (i.e. if all of the conditions | 116 | if the device can be suspended (i.e. if all of the conditions necessary for |
| 116 | necessary for suspending the device are satisfied) and to queue up a suspend | 117 | suspending the device are satisfied) and to queue up a suspend request for the |
| 117 | request for the device in that case. The value returned by this callback is | 118 | device in that case. The value returned by this callback is ignored by the PM |
| 118 | ignored by the PM core. | 119 | core. |
| 119 | 120 | ||
| 120 | The helper functions provided by the PM core, described in Section 4, guarantee | 121 | The helper functions provided by the PM core, described in Section 4, guarantee |
| 121 | that the following constraints are met with respect to the bus type's run-time | 122 | that the following constraints are met with respect to the bus type's run-time |
| @@ -238,41 +239,41 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
| 238 | removing the device from device hierarchy | 239 | removing the device from device hierarchy |
| 239 | 240 | ||
| 240 | int pm_runtime_idle(struct device *dev); | 241 | int pm_runtime_idle(struct device *dev); |
| 241 | - execute ->runtime_idle() for the device's bus type; returns 0 on success | 242 | - execute the subsystem-level idle callback for the device; returns 0 on |
| 242 | or error code on failure, where -EINPROGRESS means that ->runtime_idle() | 243 | success or error code on failure, where -EINPROGRESS means that |
| 243 | is already being executed | 244 | ->runtime_idle() is already being executed |
| 244 | 245 | ||
| 245 | int pm_runtime_suspend(struct device *dev); | 246 | int pm_runtime_suspend(struct device *dev); |
| 246 | - execute ->runtime_suspend() for the device's bus type; returns 0 on | 247 | - execute the subsystem-level suspend callback for the device; returns 0 on |
| 247 | success, 1 if the device's run-time PM status was already 'suspended', or | 248 | success, 1 if the device's run-time PM status was already 'suspended', or |
| 248 | error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt | 249 | error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt |
| 249 | to suspend the device again in future | 250 | to suspend the device again in future |
| 250 | 251 | ||
| 251 | int pm_runtime_resume(struct device *dev); | 252 | int pm_runtime_resume(struct device *dev); |
| 252 | - execute ->runtime_resume() for the device's bus type; returns 0 on | 253 | - execute the subsystem-leve resume callback for the device; returns 0 on |
| 253 | success, 1 if the device's run-time PM status was already 'active' or | 254 | success, 1 if the device's run-time PM status was already 'active' or |
| 254 | error code on failure, where -EAGAIN means it may be safe to attempt to | 255 | error code on failure, where -EAGAIN means it may be safe to attempt to |
| 255 | resume the device again in future, but 'power.runtime_error' should be | 256 | resume the device again in future, but 'power.runtime_error' should be |
| 256 | checked additionally | 257 | checked additionally |
| 257 | 258 | ||
| 258 | int pm_request_idle(struct device *dev); | 259 | int pm_request_idle(struct device *dev); |
| 259 | - submit a request to execute ->runtime_idle() for the device's bus type | 260 | - submit a request to execute the subsystem-level idle callback for the |
| 260 | (the request is represented by a work item in pm_wq); returns 0 on success | 261 | device (the request is represented by a work item in pm_wq); returns 0 on |
| 261 | or error code if the request has not been queued up | 262 | success or error code if the request has not been queued up |
| 262 | 263 | ||
| 263 | int pm_schedule_suspend(struct device *dev, unsigned int delay); | 264 | int pm_schedule_suspend(struct device *dev, unsigned int delay); |
| 264 | - schedule the execution of ->runtime_suspend() for the device's bus type | 265 | - schedule the execution of the subsystem-level suspend callback for the |
| 265 | in future, where 'delay' is the time to wait before queuing up a suspend | 266 | device in future, where 'delay' is the time to wait before queuing up a |
| 266 | work item in pm_wq, in milliseconds (if 'delay' is zero, the work item is | 267 | suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work |
| 267 | queued up immediately); returns 0 on success, 1 if the device's PM | 268 | item is queued up immediately); returns 0 on success, 1 if the device's PM |
| 268 | run-time status was already 'suspended', or error code if the request | 269 | run-time status was already 'suspended', or error code if the request |
| 269 | hasn't been scheduled (or queued up if 'delay' is 0); if the execution of | 270 | hasn't been scheduled (or queued up if 'delay' is 0); if the execution of |
| 270 | ->runtime_suspend() is already scheduled and not yet expired, the new | 271 | ->runtime_suspend() is already scheduled and not yet expired, the new |
| 271 | value of 'delay' will be used as the time to wait | 272 | value of 'delay' will be used as the time to wait |
| 272 | 273 | ||
| 273 | int pm_request_resume(struct device *dev); | 274 | int pm_request_resume(struct device *dev); |
| 274 | - submit a request to execute ->runtime_resume() for the device's bus type | 275 | - submit a request to execute the subsystem-level resume callback for the |
| 275 | (the request is represented by a work item in pm_wq); returns 0 on | 276 | device (the request is represented by a work item in pm_wq); returns 0 on |
| 276 | success, 1 if the device's run-time PM status was already 'active', or | 277 | success, 1 if the device's run-time PM status was already 'active', or |
| 277 | error code if the request hasn't been queued up | 278 | error code if the request hasn't been queued up |
| 278 | 279 | ||
| @@ -303,12 +304,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
| 303 | run-time PM callbacks described in Section 2 | 304 | run-time PM callbacks described in Section 2 |
| 304 | 305 | ||
| 305 | int pm_runtime_disable(struct device *dev); | 306 | int pm_runtime_disable(struct device *dev); |
| 306 | - prevent the run-time PM helper functions from running the device bus | 307 | - prevent the run-time PM helper functions from running subsystem-level |
| 307 | type's run-time PM callbacks, make sure that all of the pending run-time | 308 | run-time PM callbacks for the device, make sure that all of the pending |
| 308 | PM operations on the device are either completed or canceled; returns | 309 | run-time PM operations on the device are either completed or canceled; |
| 309 | 1 if there was a resume request pending and it was necessary to execute | 310 | returns 1 if there was a resume request pending and it was necessary to |
| 310 | ->runtime_resume() for the device's bus type to satisfy that request, | 311 | execute the subsystem-level resume callback for the device to satisfy that |
| 311 | otherwise 0 is returned | 312 | request, otherwise 0 is returned |
| 312 | 313 | ||
| 313 | void pm_suspend_ignore_children(struct device *dev, bool enable); | 314 | void pm_suspend_ignore_children(struct device *dev, bool enable); |
| 314 | - set/unset the power.ignore_children flag of the device | 315 | - set/unset the power.ignore_children flag of the device |
| @@ -378,5 +379,55 @@ pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts, | |||
| 378 | they will fail returning -EAGAIN, because the device's usage counter is | 379 | they will fail returning -EAGAIN, because the device's usage counter is |
| 379 | incremented by the core before executing ->probe() and ->remove(). Still, it | 380 | incremented by the core before executing ->probe() and ->remove(). Still, it |
| 380 | may be desirable to suspend the device as soon as ->probe() or ->remove() has | 381 | may be desirable to suspend the device as soon as ->probe() or ->remove() has |
| 381 | finished, so the PM core uses pm_runtime_idle_sync() to invoke the device bus | 382 | finished, so the PM core uses pm_runtime_idle_sync() to invoke the |
| 382 | type's ->runtime_idle() callback at that time. | 383 | subsystem-level idle callback for the device at that time. |
| 384 | |||
| 385 | 6. Run-time PM and System Sleep | ||
| 386 | |||
| 387 | Run-time PM and system sleep (i.e., system suspend and hibernation, also known | ||
| 388 | as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of | ||
| 389 | ways. If a device is active when a system sleep starts, everything is | ||
| 390 | straightforward. But what should happen if the device is already suspended? | ||
| 391 | |||
| 392 | The device may have different wake-up settings for run-time PM and system sleep. | ||
| 393 | For example, remote wake-up may be enabled for run-time suspend but disallowed | ||
| 394 | for system sleep (device_may_wakeup(dev) returns 'false'). When this happens, | ||
| 395 | the subsystem-level system suspend callback is responsible for changing the | ||
| 396 | device's wake-up setting (it may leave that to the device driver's system | ||
| 397 | suspend routine). It may be necessary to resume the device and suspend it again | ||
| 398 | in order to do so. The same is true if the driver uses different power levels | ||
| 399 | or other settings for run-time suspend and system sleep. | ||
| 400 | |||
| 401 | During system resume, devices generally should be brought back to full power, | ||
| 402 | even if they were suspended before the system sleep began. There are several | ||
| 403 | reasons for this, including: | ||
| 404 | |||
| 405 | * The device might need to switch power levels, wake-up settings, etc. | ||
| 406 | |||
| 407 | * Remote wake-up events might have been lost by the firmware. | ||
| 408 | |||
| 409 | * The device's children may need the device to be at full power in order | ||
| 410 | to resume themselves. | ||
| 411 | |||
| 412 | * The driver's idea of the device state may not agree with the device's | ||
| 413 | physical state. This can happen during resume from hibernation. | ||
| 414 | |||
| 415 | * The device might need to be reset. | ||
| 416 | |||
| 417 | * Even though the device was suspended, if its usage counter was > 0 then most | ||
| 418 | likely it would need a run-time resume in the near future anyway. | ||
| 419 | |||
| 420 | * Always going back to full power is simplest. | ||
| 421 | |||
| 422 | If the device was suspended before the sleep began, then its run-time PM status | ||
| 423 | will have to be updated to reflect the actual post-system sleep status. The way | ||
| 424 | to do this is: | ||
| 425 | |||
| 426 | pm_runtime_disable(dev); | ||
| 427 | pm_runtime_set_active(dev); | ||
| 428 | pm_runtime_enable(dev); | ||
| 429 | |||
| 430 | The PM core always increments the run-time usage counter before calling the | ||
| 431 | ->prepare() callback and decrements it after calling the ->complete() callback. | ||
| 432 | Hence disabling run-time PM temporarily like this will not cause any run-time | ||
| 433 | suspend callbacks to be lost. | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1a216c114a0f..48adf80926a0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev) | |||
| 161 | list_move_tail(&dev->power.entry, &dpm_list); | 161 | list_move_tail(&dev->power.entry, &dpm_list); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static ktime_t initcall_debug_start(struct device *dev) | ||
| 165 | { | ||
| 166 | ktime_t calltime = ktime_set(0, 0); | ||
| 167 | |||
| 168 | if (initcall_debug) { | ||
| 169 | pr_info("calling %s+ @ %i\n", | ||
| 170 | dev_name(dev), task_pid_nr(current)); | ||
| 171 | calltime = ktime_get(); | ||
| 172 | } | ||
| 173 | |||
| 174 | return calltime; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void initcall_debug_report(struct device *dev, ktime_t calltime, | ||
| 178 | int error) | ||
| 179 | { | ||
| 180 | ktime_t delta, rettime; | ||
| 181 | |||
| 182 | if (initcall_debug) { | ||
| 183 | rettime = ktime_get(); | ||
| 184 | delta = ktime_sub(rettime, calltime); | ||
| 185 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
| 186 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 164 | /** | 190 | /** |
| 165 | * pm_op - Execute the PM operation appropriate for given PM event. | 191 | * pm_op - Execute the PM operation appropriate for given PM event. |
| 166 | * @dev: Device to handle. | 192 | * @dev: Device to handle. |
| @@ -172,13 +198,9 @@ static int pm_op(struct device *dev, | |||
| 172 | pm_message_t state) | 198 | pm_message_t state) |
| 173 | { | 199 | { |
| 174 | int error = 0; | 200 | int error = 0; |
| 175 | ktime_t calltime, delta, rettime; | 201 | ktime_t calltime; |
| 176 | 202 | ||
| 177 | if (initcall_debug) { | 203 | calltime = initcall_debug_start(dev); |
| 178 | pr_info("calling %s+ @ %i\n", | ||
| 179 | dev_name(dev), task_pid_nr(current)); | ||
| 180 | calltime = ktime_get(); | ||
| 181 | } | ||
| 182 | 204 | ||
| 183 | switch (state.event) { | 205 | switch (state.event) { |
| 184 | #ifdef CONFIG_SUSPEND | 206 | #ifdef CONFIG_SUSPEND |
| @@ -227,12 +249,7 @@ static int pm_op(struct device *dev, | |||
| 227 | error = -EINVAL; | 249 | error = -EINVAL; |
| 228 | } | 250 | } |
| 229 | 251 | ||
| 230 | if (initcall_debug) { | 252 | initcall_debug_report(dev, calltime, error); |
| 231 | rettime = ktime_get(); | ||
| 232 | delta = ktime_sub(rettime, calltime); | ||
| 233 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
| 234 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 235 | } | ||
| 236 | 253 | ||
| 237 | return error; | 254 | return error; |
| 238 | } | 255 | } |
| @@ -309,8 +326,9 @@ static int pm_noirq_op(struct device *dev, | |||
| 309 | if (initcall_debug) { | 326 | if (initcall_debug) { |
| 310 | rettime = ktime_get(); | 327 | rettime = ktime_get(); |
| 311 | delta = ktime_sub(rettime, calltime); | 328 | delta = ktime_sub(rettime, calltime); |
| 312 | printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), | 329 | printk("initcall %s_i+ returned %d after %Ld usecs\n", |
| 313 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | 330 | dev_name(dev), error, |
| 331 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 314 | } | 332 | } |
| 315 | 333 | ||
| 316 | return error; | 334 | return error; |
| @@ -354,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
| 354 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 372 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
| 355 | } | 373 | } |
| 356 | 374 | ||
| 375 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | ||
| 376 | { | ||
| 377 | ktime_t calltime; | ||
| 378 | s64 usecs64; | ||
| 379 | int usecs; | ||
| 380 | |||
| 381 | calltime = ktime_get(); | ||
| 382 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); | ||
| 383 | do_div(usecs64, NSEC_PER_USEC); | ||
| 384 | usecs = usecs64; | ||
| 385 | if (usecs == 0) | ||
| 386 | usecs = 1; | ||
| 387 | pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", | ||
| 388 | info ?: "", info ? " " : "", pm_verb(state.event), | ||
| 389 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | ||
| 390 | } | ||
| 391 | |||
| 357 | /*------------------------- Resume routines -------------------------*/ | 392 | /*------------------------- Resume routines -------------------------*/ |
| 358 | 393 | ||
| 359 | /** | 394 | /** |
| @@ -390,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
| 390 | void dpm_resume_noirq(pm_message_t state) | 425 | void dpm_resume_noirq(pm_message_t state) |
| 391 | { | 426 | { |
| 392 | struct device *dev; | 427 | struct device *dev; |
| 428 | ktime_t starttime = ktime_get(); | ||
| 393 | 429 | ||
| 394 | mutex_lock(&dpm_list_mtx); | 430 | mutex_lock(&dpm_list_mtx); |
| 395 | transition_started = false; | 431 | transition_started = false; |
| @@ -403,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state) | |||
| 403 | pm_dev_err(dev, state, " early", error); | 439 | pm_dev_err(dev, state, " early", error); |
| 404 | } | 440 | } |
| 405 | mutex_unlock(&dpm_list_mtx); | 441 | mutex_unlock(&dpm_list_mtx); |
| 442 | dpm_show_time(starttime, state, "early"); | ||
| 406 | resume_device_irqs(); | 443 | resume_device_irqs(); |
| 407 | } | 444 | } |
| 408 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 445 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
| 409 | 446 | ||
| 410 | /** | 447 | /** |
| 448 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
| 449 | * dev: Device to resume. | ||
| 450 | * cb: Resume callback to execute. | ||
| 451 | */ | ||
| 452 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
| 453 | { | ||
| 454 | int error; | ||
| 455 | ktime_t calltime; | ||
| 456 | |||
| 457 | calltime = initcall_debug_start(dev); | ||
| 458 | |||
| 459 | error = cb(dev); | ||
| 460 | suspend_report_result(cb, error); | ||
| 461 | |||
| 462 | initcall_debug_report(dev, calltime, error); | ||
| 463 | |||
| 464 | return error; | ||
| 465 | } | ||
| 466 | |||
| 467 | /** | ||
| 411 | * device_resume - Execute "resume" callbacks for given device. | 468 | * device_resume - Execute "resume" callbacks for given device. |
| 412 | * @dev: Device to handle. | 469 | * @dev: Device to handle. |
| 413 | * @state: PM transition of the system being carried out. | 470 | * @state: PM transition of the system being carried out. |
| @@ -427,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 427 | error = pm_op(dev, dev->bus->pm, state); | 484 | error = pm_op(dev, dev->bus->pm, state); |
| 428 | } else if (dev->bus->resume) { | 485 | } else if (dev->bus->resume) { |
| 429 | pm_dev_dbg(dev, state, "legacy "); | 486 | pm_dev_dbg(dev, state, "legacy "); |
| 430 | error = dev->bus->resume(dev); | 487 | error = legacy_resume(dev, dev->bus->resume); |
| 431 | } | 488 | } |
| 432 | if (error) | 489 | if (error) |
| 433 | goto End; | 490 | goto End; |
| @@ -448,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 448 | error = pm_op(dev, dev->class->pm, state); | 505 | error = pm_op(dev, dev->class->pm, state); |
| 449 | } else if (dev->class->resume) { | 506 | } else if (dev->class->resume) { |
| 450 | pm_dev_dbg(dev, state, "legacy class "); | 507 | pm_dev_dbg(dev, state, "legacy class "); |
| 451 | error = dev->class->resume(dev); | 508 | error = legacy_resume(dev, dev->class->resume); |
| 452 | } | 509 | } |
| 453 | } | 510 | } |
| 454 | End: | 511 | End: |
| @@ -468,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 468 | static void dpm_resume(pm_message_t state) | 525 | static void dpm_resume(pm_message_t state) |
| 469 | { | 526 | { |
| 470 | struct list_head list; | 527 | struct list_head list; |
| 528 | ktime_t starttime = ktime_get(); | ||
| 471 | 529 | ||
| 472 | INIT_LIST_HEAD(&list); | 530 | INIT_LIST_HEAD(&list); |
| 473 | mutex_lock(&dpm_list_mtx); | 531 | mutex_lock(&dpm_list_mtx); |
| @@ -496,6 +554,7 @@ static void dpm_resume(pm_message_t state) | |||
| 496 | } | 554 | } |
| 497 | list_splice(&list, &dpm_list); | 555 | list_splice(&list, &dpm_list); |
| 498 | mutex_unlock(&dpm_list_mtx); | 556 | mutex_unlock(&dpm_list_mtx); |
| 557 | dpm_show_time(starttime, state, NULL); | ||
| 499 | } | 558 | } |
| 500 | 559 | ||
| 501 | /** | 560 | /** |
| @@ -548,7 +607,7 @@ static void dpm_complete(pm_message_t state) | |||
| 548 | mutex_unlock(&dpm_list_mtx); | 607 | mutex_unlock(&dpm_list_mtx); |
| 549 | 608 | ||
| 550 | device_complete(dev, state); | 609 | device_complete(dev, state); |
| 551 | pm_runtime_put_noidle(dev); | 610 | pm_runtime_put_sync(dev); |
| 552 | 611 | ||
| 553 | mutex_lock(&dpm_list_mtx); | 612 | mutex_lock(&dpm_list_mtx); |
| 554 | } | 613 | } |
| @@ -628,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
| 628 | int dpm_suspend_noirq(pm_message_t state) | 687 | int dpm_suspend_noirq(pm_message_t state) |
| 629 | { | 688 | { |
| 630 | struct device *dev; | 689 | struct device *dev; |
| 690 | ktime_t starttime = ktime_get(); | ||
| 631 | int error = 0; | 691 | int error = 0; |
| 632 | 692 | ||
| 633 | suspend_device_irqs(); | 693 | suspend_device_irqs(); |
| @@ -643,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state) | |||
| 643 | mutex_unlock(&dpm_list_mtx); | 703 | mutex_unlock(&dpm_list_mtx); |
| 644 | if (error) | 704 | if (error) |
| 645 | dpm_resume_noirq(resume_event(state)); | 705 | dpm_resume_noirq(resume_event(state)); |
| 706 | else | ||
| 707 | dpm_show_time(starttime, state, "late"); | ||
| 646 | return error; | 708 | return error; |
| 647 | } | 709 | } |
| 648 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 710 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); |
| 649 | 711 | ||
| 650 | /** | 712 | /** |
| 713 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | ||
| 714 | * dev: Device to suspend. | ||
| 715 | * cb: Suspend callback to execute. | ||
| 716 | */ | ||
| 717 | static int legacy_suspend(struct device *dev, pm_message_t state, | ||
| 718 | int (*cb)(struct device *dev, pm_message_t state)) | ||
| 719 | { | ||
| 720 | int error; | ||
| 721 | ktime_t calltime; | ||
| 722 | |||
| 723 | calltime = initcall_debug_start(dev); | ||
| 724 | |||
| 725 | error = cb(dev, state); | ||
| 726 | suspend_report_result(cb, error); | ||
| 727 | |||
| 728 | initcall_debug_report(dev, calltime, error); | ||
| 729 | |||
| 730 | return error; | ||
| 731 | } | ||
| 732 | |||
| 733 | /** | ||
| 651 | * device_suspend - Execute "suspend" callbacks for given device. | 734 | * device_suspend - Execute "suspend" callbacks for given device. |
| 652 | * @dev: Device to handle. | 735 | * @dev: Device to handle. |
| 653 | * @state: PM transition of the system being carried out. | 736 | * @state: PM transition of the system being carried out. |
| @@ -664,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 664 | error = pm_op(dev, dev->class->pm, state); | 747 | error = pm_op(dev, dev->class->pm, state); |
| 665 | } else if (dev->class->suspend) { | 748 | } else if (dev->class->suspend) { |
| 666 | pm_dev_dbg(dev, state, "legacy class "); | 749 | pm_dev_dbg(dev, state, "legacy class "); |
| 667 | error = dev->class->suspend(dev, state); | 750 | error = legacy_suspend(dev, state, dev->class->suspend); |
| 668 | suspend_report_result(dev->class->suspend, error); | ||
| 669 | } | 751 | } |
| 670 | if (error) | 752 | if (error) |
| 671 | goto End; | 753 | goto End; |
| @@ -686,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 686 | error = pm_op(dev, dev->bus->pm, state); | 768 | error = pm_op(dev, dev->bus->pm, state); |
| 687 | } else if (dev->bus->suspend) { | 769 | } else if (dev->bus->suspend) { |
| 688 | pm_dev_dbg(dev, state, "legacy "); | 770 | pm_dev_dbg(dev, state, "legacy "); |
| 689 | error = dev->bus->suspend(dev, state); | 771 | error = legacy_suspend(dev, state, dev->bus->suspend); |
| 690 | suspend_report_result(dev->bus->suspend, error); | ||
| 691 | } | 772 | } |
| 692 | } | 773 | } |
| 693 | End: | 774 | End: |
| @@ -703,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 703 | static int dpm_suspend(pm_message_t state) | 784 | static int dpm_suspend(pm_message_t state) |
| 704 | { | 785 | { |
| 705 | struct list_head list; | 786 | struct list_head list; |
| 787 | ktime_t starttime = ktime_get(); | ||
| 706 | int error = 0; | 788 | int error = 0; |
| 707 | 789 | ||
| 708 | INIT_LIST_HEAD(&list); | 790 | INIT_LIST_HEAD(&list); |
| @@ -728,6 +810,8 @@ static int dpm_suspend(pm_message_t state) | |||
| 728 | } | 810 | } |
| 729 | list_splice(&list, dpm_list.prev); | 811 | list_splice(&list, dpm_list.prev); |
| 730 | mutex_unlock(&dpm_list_mtx); | 812 | mutex_unlock(&dpm_list_mtx); |
| 813 | if (!error) | ||
| 814 | dpm_show_time(starttime, state, NULL); | ||
| 731 | return error; | 815 | return error; |
| 732 | } | 816 | } |
| 733 | 817 | ||
| @@ -796,7 +880,7 @@ static int dpm_prepare(pm_message_t state) | |||
| 796 | pm_runtime_get_noresume(dev); | 880 | pm_runtime_get_noresume(dev); |
| 797 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 881 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { |
| 798 | /* Wake-up requested during system sleep transition. */ | 882 | /* Wake-up requested during system sleep transition. */ |
| 799 | pm_runtime_put_noidle(dev); | 883 | pm_runtime_put_sync(dev); |
| 800 | error = -EBUSY; | 884 | error = -EBUSY; |
| 801 | } else { | 885 | } else { |
| 802 | error = device_prepare(dev, state); | 886 | error = device_prepare(dev, state); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 40d7720a4b21..f8b044e8aef7 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev) | |||
| 85 | dev->bus->pm->runtime_idle(dev); | 85 | dev->bus->pm->runtime_idle(dev); |
| 86 | 86 | ||
| 87 | spin_lock_irq(&dev->power.lock); | 87 | spin_lock_irq(&dev->power.lock); |
| 88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | ||
| 89 | spin_unlock_irq(&dev->power.lock); | ||
| 90 | |||
| 91 | dev->type->pm->runtime_idle(dev); | ||
| 92 | |||
| 93 | spin_lock_irq(&dev->power.lock); | ||
| 94 | } else if (dev->class && dev->class->pm | ||
| 95 | && dev->class->pm->runtime_idle) { | ||
| 96 | spin_unlock_irq(&dev->power.lock); | ||
| 97 | |||
| 98 | dev->class->pm->runtime_idle(dev); | ||
| 99 | |||
| 100 | spin_lock_irq(&dev->power.lock); | ||
| 88 | } | 101 | } |
| 89 | 102 | ||
| 90 | dev->power.idle_notification = false; | 103 | dev->power.idle_notification = false; |
| @@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 194 | 207 | ||
| 195 | spin_lock_irq(&dev->power.lock); | 208 | spin_lock_irq(&dev->power.lock); |
| 196 | dev->power.runtime_error = retval; | 209 | dev->power.runtime_error = retval; |
| 210 | } else if (dev->type && dev->type->pm | ||
| 211 | && dev->type->pm->runtime_suspend) { | ||
| 212 | spin_unlock_irq(&dev->power.lock); | ||
| 213 | |||
| 214 | retval = dev->type->pm->runtime_suspend(dev); | ||
| 215 | |||
| 216 | spin_lock_irq(&dev->power.lock); | ||
| 217 | dev->power.runtime_error = retval; | ||
| 218 | } else if (dev->class && dev->class->pm | ||
| 219 | && dev->class->pm->runtime_suspend) { | ||
| 220 | spin_unlock_irq(&dev->power.lock); | ||
| 221 | |||
| 222 | retval = dev->class->pm->runtime_suspend(dev); | ||
| 223 | |||
| 224 | spin_lock_irq(&dev->power.lock); | ||
| 225 | dev->power.runtime_error = retval; | ||
| 197 | } else { | 226 | } else { |
| 198 | retval = -ENOSYS; | 227 | retval = -ENOSYS; |
| 199 | } | 228 | } |
| @@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 359 | 388 | ||
| 360 | spin_lock_irq(&dev->power.lock); | 389 | spin_lock_irq(&dev->power.lock); |
| 361 | dev->power.runtime_error = retval; | 390 | dev->power.runtime_error = retval; |
| 391 | } else if (dev->type && dev->type->pm | ||
| 392 | && dev->type->pm->runtime_resume) { | ||
| 393 | spin_unlock_irq(&dev->power.lock); | ||
| 394 | |||
| 395 | retval = dev->type->pm->runtime_resume(dev); | ||
| 396 | |||
| 397 | spin_lock_irq(&dev->power.lock); | ||
| 398 | dev->power.runtime_error = retval; | ||
| 399 | } else if (dev->class && dev->class->pm | ||
| 400 | && dev->class->pm->runtime_resume) { | ||
| 401 | spin_unlock_irq(&dev->power.lock); | ||
| 402 | |||
| 403 | retval = dev->class->pm->runtime_resume(dev); | ||
| 404 | |||
| 405 | spin_lock_irq(&dev->power.lock); | ||
| 406 | dev->power.runtime_error = retval; | ||
| 362 | } else { | 407 | } else { |
| 363 | retval = -ENOSYS; | 408 | retval = -ENOSYS; |
| 364 | } | 409 | } |
