diff options
Diffstat (limited to 'drivers/base/power/runtime.c')
| -rw-r--r-- | drivers/base/power/runtime.c | 944 |
1 files changed, 503 insertions, 441 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa73..1dd8676d7f55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -2,17 +2,55 @@ | |||
| 2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM | 2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
| 5 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> | ||
| 5 | * | 6 | * |
| 6 | * This file is released under the GPLv2. | 7 | * This file is released under the GPLv2. |
| 7 | */ | 8 | */ |
| 8 | 9 | ||
| 9 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 10 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
| 11 | #include <linux/jiffies.h> | 12 | #include "power.h" |
| 12 | 13 | ||
| 13 | static int __pm_runtime_resume(struct device *dev, bool from_wq); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
| 14 | static int __pm_request_idle(struct device *dev); | 15 | static int rpm_suspend(struct device *dev, int rpmflags); |
| 15 | static int __pm_request_resume(struct device *dev); | 16 | |
| 17 | /** | ||
| 18 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
| 19 | * @dev: Device to update the accounting for | ||
| 20 | * | ||
| 21 | * In order to be able to have time accounting of the various power states | ||
| 22 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
| 23 | * PM), we need to track the time spent in each state. | ||
| 24 | * update_pm_runtime_accounting must be called each time before the | ||
| 25 | * runtime_status field is updated, to account the time in the old state | ||
| 26 | * correctly. | ||
| 27 | */ | ||
| 28 | void update_pm_runtime_accounting(struct device *dev) | ||
| 29 | { | ||
| 30 | unsigned long now = jiffies; | ||
| 31 | int delta; | ||
| 32 | |||
| 33 | delta = now - dev->power.accounting_timestamp; | ||
| 34 | |||
| 35 | if (delta < 0) | ||
| 36 | delta = 0; | ||
| 37 | |||
| 38 | dev->power.accounting_timestamp = now; | ||
| 39 | |||
| 40 | if (dev->power.disable_depth > 0) | ||
| 41 | return; | ||
| 42 | |||
| 43 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 44 | dev->power.suspended_jiffies += delta; | ||
| 45 | else | ||
| 46 | dev->power.active_jiffies += delta; | ||
| 47 | } | ||
| 48 | |||
| 49 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
| 50 | { | ||
| 51 | update_pm_runtime_accounting(dev); | ||
| 52 | dev->power.runtime_status = status; | ||
| 53 | } | ||
| 16 | 54 | ||
| 17 | /** | 55 | /** |
| 18 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. | 56 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
| @@ -40,62 +78,154 @@ static void pm_runtime_cancel_pending(struct device *dev) | |||
| 40 | dev->power.request = RPM_REQ_NONE; | 78 | dev->power.request = RPM_REQ_NONE; |
| 41 | } | 79 | } |
| 42 | 80 | ||
| 43 | /** | 81 | /* |
| 44 | * __pm_runtime_idle - Notify device bus type if the device can be suspended. | 82 | * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. |
| 45 | * @dev: Device to notify the bus type about. | 83 | * @dev: Device to handle. |
| 46 | * | 84 | * |
| 47 | * This function must be called under dev->power.lock with interrupts disabled. | 85 | * Compute the autosuspend-delay expiration time based on the device's |
| 86 | * power.last_busy time. If the delay has already expired or is disabled | ||
| 87 | * (negative) or the power.use_autosuspend flag isn't set, return 0. | ||
| 88 | * Otherwise return the expiration time in jiffies (adjusted to be nonzero). | ||
| 89 | * | ||
| 90 | * This function may be called either with or without dev->power.lock held. | ||
| 91 | * Either way it can be racy, since power.last_busy may be updated at any time. | ||
| 48 | */ | 92 | */ |
| 49 | static int __pm_runtime_idle(struct device *dev) | 93 | unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
| 50 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 94 | { |
| 95 | int autosuspend_delay; | ||
| 96 | long elapsed; | ||
| 97 | unsigned long last_busy; | ||
| 98 | unsigned long expires = 0; | ||
| 99 | |||
| 100 | if (!dev->power.use_autosuspend) | ||
| 101 | goto out; | ||
| 102 | |||
| 103 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | ||
| 104 | if (autosuspend_delay < 0) | ||
| 105 | goto out; | ||
| 106 | |||
| 107 | last_busy = ACCESS_ONCE(dev->power.last_busy); | ||
| 108 | elapsed = jiffies - last_busy; | ||
| 109 | if (elapsed < 0) | ||
| 110 | goto out; /* jiffies has wrapped around. */ | ||
| 111 | |||
| 112 | /* | ||
| 113 | * If the autosuspend_delay is >= 1 second, align the timer by rounding | ||
| 114 | * up to the nearest second. | ||
| 115 | */ | ||
| 116 | expires = last_busy + msecs_to_jiffies(autosuspend_delay); | ||
| 117 | if (autosuspend_delay >= 1000) | ||
| 118 | expires = round_jiffies(expires); | ||
| 119 | expires += !expires; | ||
| 120 | if (elapsed >= expires - last_busy) | ||
| 121 | expires = 0; /* Already expired. */ | ||
| 122 | |||
| 123 | out: | ||
| 124 | return expires; | ||
| 125 | } | ||
| 126 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | ||
| 127 | |||
| 128 | /** | ||
| 129 | * rpm_check_suspend_allowed - Test whether a device may be suspended. | ||
| 130 | * @dev: Device to test. | ||
| 131 | */ | ||
| 132 | static int rpm_check_suspend_allowed(struct device *dev) | ||
| 51 | { | 133 | { |
| 52 | int retval = 0; | 134 | int retval = 0; |
| 53 | 135 | ||
| 54 | if (dev->power.runtime_error) | 136 | if (dev->power.runtime_error) |
| 55 | retval = -EINVAL; | 137 | retval = -EINVAL; |
| 56 | else if (dev->power.idle_notification) | ||
| 57 | retval = -EINPROGRESS; | ||
| 58 | else if (atomic_read(&dev->power.usage_count) > 0 | 138 | else if (atomic_read(&dev->power.usage_count) > 0 |
| 59 | || dev->power.disable_depth > 0 | 139 | || dev->power.disable_depth > 0) |
| 60 | || dev->power.runtime_status != RPM_ACTIVE) | ||
| 61 | retval = -EAGAIN; | 140 | retval = -EAGAIN; |
| 62 | else if (!pm_children_suspended(dev)) | 141 | else if (!pm_children_suspended(dev)) |
| 63 | retval = -EBUSY; | 142 | retval = -EBUSY; |
| 143 | |||
| 144 | /* Pending resume requests take precedence over suspends. */ | ||
| 145 | else if ((dev->power.deferred_resume | ||
| 146 | && dev->power.status == RPM_SUSPENDING) | ||
| 147 | || (dev->power.request_pending | ||
| 148 | && dev->power.request == RPM_REQ_RESUME)) | ||
| 149 | retval = -EAGAIN; | ||
| 150 | else if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 151 | retval = 1; | ||
| 152 | |||
| 153 | return retval; | ||
| 154 | } | ||
| 155 | |||
| 156 | /** | ||
| 157 | * rpm_idle - Notify device bus type if the device can be suspended. | ||
| 158 | * @dev: Device to notify the bus type about. | ||
| 159 | * @rpmflags: Flag bits. | ||
| 160 | * | ||
| 161 | * Check if the device's run-time PM status allows it to be suspended. If | ||
| 162 | * another idle notification has been started earlier, return immediately. If | ||
| 163 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise | ||
| 164 | * run the ->runtime_idle() callback directly. | ||
| 165 | * | ||
| 166 | * This function must be called under dev->power.lock with interrupts disabled. | ||
| 167 | */ | ||
| 168 | static int rpm_idle(struct device *dev, int rpmflags) | ||
| 169 | { | ||
| 170 | int (*callback)(struct device *); | ||
| 171 | int retval; | ||
| 172 | |||
| 173 | retval = rpm_check_suspend_allowed(dev); | ||
| 174 | if (retval < 0) | ||
| 175 | ; /* Conditions are wrong. */ | ||
| 176 | |||
| 177 | /* Idle notifications are allowed only in the RPM_ACTIVE state. */ | ||
| 178 | else if (dev->power.runtime_status != RPM_ACTIVE) | ||
| 179 | retval = -EAGAIN; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Any pending request other than an idle notification takes | ||
| 183 | * precedence over us, except that the timer may be running. | ||
| 184 | */ | ||
| 185 | else if (dev->power.request_pending && | ||
| 186 | dev->power.request > RPM_REQ_IDLE) | ||
| 187 | retval = -EAGAIN; | ||
| 188 | |||
| 189 | /* Act as though RPM_NOWAIT is always set. */ | ||
| 190 | else if (dev->power.idle_notification) | ||
| 191 | retval = -EINPROGRESS; | ||
| 64 | if (retval) | 192 | if (retval) |
| 65 | goto out; | 193 | goto out; |
| 66 | 194 | ||
| 67 | if (dev->power.request_pending) { | 195 | /* Pending requests need to be canceled. */ |
| 68 | /* | 196 | dev->power.request = RPM_REQ_NONE; |
| 69 | * If an idle notification request is pending, cancel it. Any | 197 | |
| 70 | * other pending request takes precedence over us. | 198 | if (dev->power.no_callbacks) { |
| 71 | */ | 199 | /* Assume ->runtime_idle() callback would have suspended. */ |
| 72 | if (dev->power.request == RPM_REQ_IDLE) { | 200 | retval = rpm_suspend(dev, rpmflags); |
| 73 | dev->power.request = RPM_REQ_NONE; | 201 | goto out; |
| 74 | } else if (dev->power.request != RPM_REQ_NONE) { | 202 | } |
| 75 | retval = -EAGAIN; | 203 | |
| 76 | goto out; | 204 | /* Carry out an asynchronous or a synchronous idle notification. */ |
| 205 | if (rpmflags & RPM_ASYNC) { | ||
| 206 | dev->power.request = RPM_REQ_IDLE; | ||
| 207 | if (!dev->power.request_pending) { | ||
| 208 | dev->power.request_pending = true; | ||
| 209 | queue_work(pm_wq, &dev->power.work); | ||
| 77 | } | 210 | } |
| 211 | goto out; | ||
| 78 | } | 212 | } |
| 79 | 213 | ||
| 80 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
| 81 | 215 | ||
| 82 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { | 216 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) |
| 83 | spin_unlock_irq(&dev->power.lock); | 217 | callback = dev->bus->pm->runtime_idle; |
| 84 | 218 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) | |
| 85 | dev->bus->pm->runtime_idle(dev); | 219 | callback = dev->type->pm->runtime_idle; |
| 86 | 220 | else if (dev->class && dev->class->pm) | |
| 87 | spin_lock_irq(&dev->power.lock); | 221 | callback = dev->class->pm->runtime_idle; |
| 88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | 222 | else |
| 89 | spin_unlock_irq(&dev->power.lock); | 223 | callback = NULL; |
| 90 | |||
| 91 | dev->type->pm->runtime_idle(dev); | ||
| 92 | 224 | ||
| 93 | spin_lock_irq(&dev->power.lock); | 225 | if (callback) { |
| 94 | } else if (dev->class && dev->class->pm | ||
| 95 | && dev->class->pm->runtime_idle) { | ||
| 96 | spin_unlock_irq(&dev->power.lock); | 226 | spin_unlock_irq(&dev->power.lock); |
| 97 | 227 | ||
| 98 | dev->class->pm->runtime_idle(dev); | 228 | callback(dev); |
| 99 | 229 | ||
| 100 | spin_lock_irq(&dev->power.lock); | 230 | spin_lock_irq(&dev->power.lock); |
| 101 | } | 231 | } |
| @@ -108,113 +238,99 @@ static int __pm_runtime_idle(struct device *dev) | |||
| 108 | } | 238 | } |
| 109 | 239 | ||
| 110 | /** | 240 | /** |
| 111 | * pm_runtime_idle - Notify device bus type if the device can be suspended. | 241 | * rpm_callback - Run a given runtime PM callback for a given device. |
| 112 | * @dev: Device to notify the bus type about. | 242 | * @cb: Runtime PM callback to run. |
| 243 | * @dev: Device to run the callback for. | ||
| 113 | */ | 244 | */ |
| 114 | int pm_runtime_idle(struct device *dev) | 245 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
| 246 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
| 115 | { | 247 | { |
| 116 | int retval; | 248 | int retval; |
| 117 | 249 | ||
| 118 | spin_lock_irq(&dev->power.lock); | 250 | if (!cb) |
| 119 | retval = __pm_runtime_idle(dev); | 251 | return -ENOSYS; |
| 120 | spin_unlock_irq(&dev->power.lock); | ||
| 121 | 252 | ||
| 122 | return retval; | 253 | spin_unlock_irq(&dev->power.lock); |
| 123 | } | ||
| 124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | ||
| 125 | |||
| 126 | |||
| 127 | /** | ||
| 128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
| 129 | * @dev: Device to update the accounting for | ||
| 130 | * | ||
| 131 | * In order to be able to have time accounting of the various power states | ||
| 132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
| 133 | * PM), we need to track the time spent in each state. | ||
| 134 | * update_pm_runtime_accounting must be called each time before the | ||
| 135 | * runtime_status field is updated, to account the time in the old state | ||
| 136 | * correctly. | ||
| 137 | */ | ||
| 138 | void update_pm_runtime_accounting(struct device *dev) | ||
| 139 | { | ||
| 140 | unsigned long now = jiffies; | ||
| 141 | int delta; | ||
| 142 | |||
| 143 | delta = now - dev->power.accounting_timestamp; | ||
| 144 | |||
| 145 | if (delta < 0) | ||
| 146 | delta = 0; | ||
| 147 | 254 | ||
| 148 | dev->power.accounting_timestamp = now; | 255 | retval = cb(dev); |
| 149 | 256 | ||
| 150 | if (dev->power.disable_depth > 0) | 257 | spin_lock_irq(&dev->power.lock); |
| 151 | return; | 258 | dev->power.runtime_error = retval; |
| 152 | |||
| 153 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 154 | dev->power.suspended_jiffies += delta; | ||
| 155 | else | ||
| 156 | dev->power.active_jiffies += delta; | ||
| 157 | } | ||
| 158 | 259 | ||
| 159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | 260 | return retval; |
| 160 | { | ||
| 161 | update_pm_runtime_accounting(dev); | ||
| 162 | dev->power.runtime_status = status; | ||
| 163 | } | 261 | } |
| 164 | 262 | ||
| 165 | /** | 263 | /** |
| 166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 264 | * rpm_suspend - Carry out run-time suspend of given device. |
| 167 | * @dev: Device to suspend. | 265 | * @dev: Device to suspend. |
| 168 | * @from_wq: If set, the function has been called via pm_wq. | 266 | * @rpmflags: Flag bits. |
| 169 | * | 267 | * |
| 170 | * Check if the device can be suspended and run the ->runtime_suspend() callback | 268 | * Check if the device's run-time PM status allows it to be suspended. If |
| 171 | * provided by its bus type. If another suspend has been started earlier, wait | 269 | * another suspend has been started earlier, either return immediately or wait |
| 172 | * for it to finish. If an idle notification or suspend request is pending or | 270 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a |
| 173 | * scheduled, cancel it. | 271 | * pending idle notification. If the RPM_ASYNC flag is set then queue a |
| 272 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | ||
| 273 | * If a deferred resume was requested while the callback was running then carry | ||
| 274 | * it out; otherwise send an idle notification for the device (if the suspend | ||
| 275 | * failed) or for its parent (if the suspend succeeded). | ||
| 174 | * | 276 | * |
| 175 | * This function must be called under dev->power.lock with interrupts disabled. | 277 | * This function must be called under dev->power.lock with interrupts disabled. |
| 176 | */ | 278 | */ |
| 177 | int __pm_runtime_suspend(struct device *dev, bool from_wq) | 279 | static int rpm_suspend(struct device *dev, int rpmflags) |
| 178 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 280 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
| 179 | { | 281 | { |
| 282 | int (*callback)(struct device *); | ||
| 180 | struct device *parent = NULL; | 283 | struct device *parent = NULL; |
| 181 | bool notify = false; | 284 | int retval; |
| 182 | int retval = 0; | ||
| 183 | 285 | ||
| 184 | dev_dbg(dev, "__pm_runtime_suspend()%s!\n", | 286 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
| 185 | from_wq ? " from workqueue" : ""); | ||
| 186 | 287 | ||
| 187 | repeat: | 288 | repeat: |
| 188 | if (dev->power.runtime_error) { | 289 | retval = rpm_check_suspend_allowed(dev); |
| 189 | retval = -EINVAL; | ||
| 190 | goto out; | ||
| 191 | } | ||
| 192 | 290 | ||
| 193 | /* Pending resume requests take precedence over us. */ | 291 | if (retval < 0) |
| 194 | if (dev->power.request_pending | 292 | ; /* Conditions are wrong. */ |
| 195 | && dev->power.request == RPM_REQ_RESUME) { | 293 | |
| 294 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ | ||
| 295 | else if (dev->power.runtime_status == RPM_RESUMING && | ||
| 296 | !(rpmflags & RPM_ASYNC)) | ||
| 196 | retval = -EAGAIN; | 297 | retval = -EAGAIN; |
| 298 | if (retval) | ||
| 197 | goto out; | 299 | goto out; |
| 300 | |||
| 301 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ | ||
| 302 | if ((rpmflags & RPM_AUTO) | ||
| 303 | && dev->power.runtime_status != RPM_SUSPENDING) { | ||
| 304 | unsigned long expires = pm_runtime_autosuspend_expiration(dev); | ||
| 305 | |||
| 306 | if (expires != 0) { | ||
| 307 | /* Pending requests need to be canceled. */ | ||
| 308 | dev->power.request = RPM_REQ_NONE; | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Optimization: If the timer is already running and is | ||
| 312 | * set to expire at or before the autosuspend delay, | ||
| 313 | * avoid the overhead of resetting it. Just let it | ||
| 314 | * expire; pm_suspend_timer_fn() will take care of the | ||
| 315 | * rest. | ||
| 316 | */ | ||
| 317 | if (!(dev->power.timer_expires && time_before_eq( | ||
| 318 | dev->power.timer_expires, expires))) { | ||
| 319 | dev->power.timer_expires = expires; | ||
| 320 | mod_timer(&dev->power.suspend_timer, expires); | ||
| 321 | } | ||
| 322 | dev->power.timer_autosuspends = 1; | ||
| 323 | goto out; | ||
| 324 | } | ||
| 198 | } | 325 | } |
| 199 | 326 | ||
| 200 | /* Other scheduled or pending requests need to be canceled. */ | 327 | /* Other scheduled or pending requests need to be canceled. */ |
| 201 | pm_runtime_cancel_pending(dev); | 328 | pm_runtime_cancel_pending(dev); |
| 202 | 329 | ||
| 203 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 204 | retval = 1; | ||
| 205 | else if (dev->power.runtime_status == RPM_RESUMING | ||
| 206 | || dev->power.disable_depth > 0 | ||
| 207 | || atomic_read(&dev->power.usage_count) > 0) | ||
| 208 | retval = -EAGAIN; | ||
| 209 | else if (!pm_children_suspended(dev)) | ||
| 210 | retval = -EBUSY; | ||
| 211 | if (retval) | ||
| 212 | goto out; | ||
| 213 | |||
| 214 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 330 | if (dev->power.runtime_status == RPM_SUSPENDING) { |
| 215 | DEFINE_WAIT(wait); | 331 | DEFINE_WAIT(wait); |
| 216 | 332 | ||
| 217 | if (from_wq) { | 333 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
| 218 | retval = -EINPROGRESS; | 334 | retval = -EINPROGRESS; |
| 219 | goto out; | 335 | goto out; |
| 220 | } | 336 | } |
| @@ -236,46 +352,42 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 236 | goto repeat; | 352 | goto repeat; |
| 237 | } | 353 | } |
| 238 | 354 | ||
| 239 | __update_runtime_status(dev, RPM_SUSPENDING); | ||
| 240 | dev->power.deferred_resume = false; | 355 | dev->power.deferred_resume = false; |
| 356 | if (dev->power.no_callbacks) | ||
| 357 | goto no_callback; /* Assume success. */ | ||
| 358 | |||
| 359 | /* Carry out an asynchronous or a synchronous suspend. */ | ||
| 360 | if (rpmflags & RPM_ASYNC) { | ||
| 361 | dev->power.request = (rpmflags & RPM_AUTO) ? | ||
| 362 | RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; | ||
| 363 | if (!dev->power.request_pending) { | ||
| 364 | dev->power.request_pending = true; | ||
| 365 | queue_work(pm_wq, &dev->power.work); | ||
| 366 | } | ||
| 367 | goto out; | ||
| 368 | } | ||
| 241 | 369 | ||
| 242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 370 | __update_runtime_status(dev, RPM_SUSPENDING); |
| 243 | spin_unlock_irq(&dev->power.lock); | ||
| 244 | |||
| 245 | retval = dev->bus->pm->runtime_suspend(dev); | ||
| 246 | |||
| 247 | spin_lock_irq(&dev->power.lock); | ||
| 248 | dev->power.runtime_error = retval; | ||
| 249 | } else if (dev->type && dev->type->pm | ||
| 250 | && dev->type->pm->runtime_suspend) { | ||
| 251 | spin_unlock_irq(&dev->power.lock); | ||
| 252 | |||
| 253 | retval = dev->type->pm->runtime_suspend(dev); | ||
| 254 | |||
| 255 | spin_lock_irq(&dev->power.lock); | ||
| 256 | dev->power.runtime_error = retval; | ||
| 257 | } else if (dev->class && dev->class->pm | ||
| 258 | && dev->class->pm->runtime_suspend) { | ||
| 259 | spin_unlock_irq(&dev->power.lock); | ||
| 260 | |||
| 261 | retval = dev->class->pm->runtime_suspend(dev); | ||
| 262 | 371 | ||
| 263 | spin_lock_irq(&dev->power.lock); | 372 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) |
| 264 | dev->power.runtime_error = retval; | 373 | callback = dev->bus->pm->runtime_suspend; |
| 265 | } else { | 374 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend) |
| 266 | retval = -ENOSYS; | 375 | callback = dev->type->pm->runtime_suspend; |
| 267 | } | 376 | else if (dev->class && dev->class->pm) |
| 377 | callback = dev->class->pm->runtime_suspend; | ||
| 378 | else | ||
| 379 | callback = NULL; | ||
| 268 | 380 | ||
| 381 | retval = rpm_callback(callback, dev); | ||
| 269 | if (retval) { | 382 | if (retval) { |
| 270 | __update_runtime_status(dev, RPM_ACTIVE); | 383 | __update_runtime_status(dev, RPM_ACTIVE); |
| 271 | if (retval == -EAGAIN || retval == -EBUSY) { | 384 | dev->power.deferred_resume = 0; |
| 272 | if (dev->power.timer_expires == 0) | 385 | if (retval == -EAGAIN || retval == -EBUSY) |
| 273 | notify = true; | ||
| 274 | dev->power.runtime_error = 0; | 386 | dev->power.runtime_error = 0; |
| 275 | } else { | 387 | else |
| 276 | pm_runtime_cancel_pending(dev); | 388 | pm_runtime_cancel_pending(dev); |
| 277 | } | ||
| 278 | } else { | 389 | } else { |
| 390 | no_callback: | ||
| 279 | __update_runtime_status(dev, RPM_SUSPENDED); | 391 | __update_runtime_status(dev, RPM_SUSPENDED); |
| 280 | pm_runtime_deactivate_timer(dev); | 392 | pm_runtime_deactivate_timer(dev); |
| 281 | 393 | ||
| @@ -287,14 +399,11 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 287 | wake_up_all(&dev->power.wait_queue); | 399 | wake_up_all(&dev->power.wait_queue); |
| 288 | 400 | ||
| 289 | if (dev->power.deferred_resume) { | 401 | if (dev->power.deferred_resume) { |
| 290 | __pm_runtime_resume(dev, false); | 402 | rpm_resume(dev, 0); |
| 291 | retval = -EAGAIN; | 403 | retval = -EAGAIN; |
| 292 | goto out; | 404 | goto out; |
| 293 | } | 405 | } |
| 294 | 406 | ||
| 295 | if (notify) | ||
| 296 | __pm_runtime_idle(dev); | ||
| 297 | |||
| 298 | if (parent && !parent->power.ignore_children) { | 407 | if (parent && !parent->power.ignore_children) { |
| 299 | spin_unlock_irq(&dev->power.lock); | 408 | spin_unlock_irq(&dev->power.lock); |
| 300 | 409 | ||
| @@ -304,72 +413,69 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 304 | } | 413 | } |
| 305 | 414 | ||
| 306 | out: | 415 | out: |
| 307 | dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); | 416 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
| 308 | |||
| 309 | return retval; | ||
| 310 | } | ||
| 311 | |||
| 312 | /** | ||
| 313 | * pm_runtime_suspend - Carry out run-time suspend of given device. | ||
| 314 | * @dev: Device to suspend. | ||
| 315 | */ | ||
| 316 | int pm_runtime_suspend(struct device *dev) | ||
| 317 | { | ||
| 318 | int retval; | ||
| 319 | |||
| 320 | spin_lock_irq(&dev->power.lock); | ||
| 321 | retval = __pm_runtime_suspend(dev, false); | ||
| 322 | spin_unlock_irq(&dev->power.lock); | ||
| 323 | 417 | ||
| 324 | return retval; | 418 | return retval; |
| 325 | } | 419 | } |
| 326 | EXPORT_SYMBOL_GPL(pm_runtime_suspend); | ||
| 327 | 420 | ||
| 328 | /** | 421 | /** |
| 329 | * __pm_runtime_resume - Carry out run-time resume of given device. | 422 | * rpm_resume - Carry out run-time resume of given device. |
| 330 | * @dev: Device to resume. | 423 | * @dev: Device to resume. |
| 331 | * @from_wq: If set, the function has been called via pm_wq. | 424 | * @rpmflags: Flag bits. |
| 332 | * | 425 | * |
| 333 | * Check if the device can be woken up and run the ->runtime_resume() callback | 426 | * Check if the device's run-time PM status allows it to be resumed. Cancel |
| 334 | * provided by its bus type. If another resume has been started earlier, wait | 427 | * any scheduled or pending requests. If another resume has been started |
| 335 | * for it to finish. If there's a suspend running in parallel with this | 428 | * earlier, either return imediately or wait for it to finish, depending on the |
| 336 | * function, wait for it to finish and resume the device. Cancel any scheduled | 429 | * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in |
| 337 | * or pending requests. | 430 | * parallel with this function, either tell the other process to resume after |
| 431 | * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC | ||
| 432 | * flag is set then queue a resume request; otherwise run the | ||
| 433 | * ->runtime_resume() callback directly. Queue an idle notification for the | ||
| 434 | * device if the resume succeeded. | ||
| 338 | * | 435 | * |
| 339 | * This function must be called under dev->power.lock with interrupts disabled. | 436 | * This function must be called under dev->power.lock with interrupts disabled. |
| 340 | */ | 437 | */ |
| 341 | int __pm_runtime_resume(struct device *dev, bool from_wq) | 438 | static int rpm_resume(struct device *dev, int rpmflags) |
| 342 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 439 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
| 343 | { | 440 | { |
| 441 | int (*callback)(struct device *); | ||
| 344 | struct device *parent = NULL; | 442 | struct device *parent = NULL; |
| 345 | int retval = 0; | 443 | int retval = 0; |
| 346 | 444 | ||
| 347 | dev_dbg(dev, "__pm_runtime_resume()%s!\n", | 445 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
| 348 | from_wq ? " from workqueue" : ""); | ||
| 349 | 446 | ||
| 350 | repeat: | 447 | repeat: |
| 351 | if (dev->power.runtime_error) { | 448 | if (dev->power.runtime_error) |
| 352 | retval = -EINVAL; | 449 | retval = -EINVAL; |
| 450 | else if (dev->power.disable_depth > 0) | ||
| 451 | retval = -EAGAIN; | ||
| 452 | if (retval) | ||
| 353 | goto out; | 453 | goto out; |
| 354 | } | ||
| 355 | 454 | ||
| 356 | pm_runtime_cancel_pending(dev); | 455 | /* |
| 456 | * Other scheduled or pending requests need to be canceled. Small | ||
| 457 | * optimization: If an autosuspend timer is running, leave it running | ||
| 458 | * rather than cancelling it now only to restart it again in the near | ||
| 459 | * future. | ||
| 460 | */ | ||
| 461 | dev->power.request = RPM_REQ_NONE; | ||
| 462 | if (!dev->power.timer_autosuspends) | ||
| 463 | pm_runtime_deactivate_timer(dev); | ||
| 357 | 464 | ||
| 358 | if (dev->power.runtime_status == RPM_ACTIVE) | 465 | if (dev->power.runtime_status == RPM_ACTIVE) { |
| 359 | retval = 1; | 466 | retval = 1; |
| 360 | else if (dev->power.disable_depth > 0) | ||
| 361 | retval = -EAGAIN; | ||
| 362 | if (retval) | ||
| 363 | goto out; | 467 | goto out; |
| 468 | } | ||
| 364 | 469 | ||
| 365 | if (dev->power.runtime_status == RPM_RESUMING | 470 | if (dev->power.runtime_status == RPM_RESUMING |
| 366 | || dev->power.runtime_status == RPM_SUSPENDING) { | 471 | || dev->power.runtime_status == RPM_SUSPENDING) { |
| 367 | DEFINE_WAIT(wait); | 472 | DEFINE_WAIT(wait); |
| 368 | 473 | ||
| 369 | if (from_wq) { | 474 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
| 370 | if (dev->power.runtime_status == RPM_SUSPENDING) | 475 | if (dev->power.runtime_status == RPM_SUSPENDING) |
| 371 | dev->power.deferred_resume = true; | 476 | dev->power.deferred_resume = true; |
| 372 | retval = -EINPROGRESS; | 477 | else |
| 478 | retval = -EINPROGRESS; | ||
| 373 | goto out; | 479 | goto out; |
| 374 | } | 480 | } |
| 375 | 481 | ||
| @@ -391,6 +497,34 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 391 | goto repeat; | 497 | goto repeat; |
| 392 | } | 498 | } |
| 393 | 499 | ||
| 500 | /* | ||
| 501 | * See if we can skip waking up the parent. This is safe only if | ||
| 502 | * power.no_callbacks is set, because otherwise we don't know whether | ||
| 503 | * the resume will actually succeed. | ||
| 504 | */ | ||
| 505 | if (dev->power.no_callbacks && !parent && dev->parent) { | ||
| 506 | spin_lock(&dev->parent->power.lock); | ||
| 507 | if (dev->parent->power.disable_depth > 0 | ||
| 508 | || dev->parent->power.ignore_children | ||
| 509 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | ||
| 510 | atomic_inc(&dev->parent->power.child_count); | ||
| 511 | spin_unlock(&dev->parent->power.lock); | ||
| 512 | goto no_callback; /* Assume success. */ | ||
| 513 | } | ||
| 514 | spin_unlock(&dev->parent->power.lock); | ||
| 515 | } | ||
| 516 | |||
| 517 | /* Carry out an asynchronous or a synchronous resume. */ | ||
| 518 | if (rpmflags & RPM_ASYNC) { | ||
| 519 | dev->power.request = RPM_REQ_RESUME; | ||
| 520 | if (!dev->power.request_pending) { | ||
| 521 | dev->power.request_pending = true; | ||
| 522 | queue_work(pm_wq, &dev->power.work); | ||
| 523 | } | ||
| 524 | retval = 0; | ||
| 525 | goto out; | ||
| 526 | } | ||
| 527 | |||
| 394 | if (!parent && dev->parent) { | 528 | if (!parent && dev->parent) { |
| 395 | /* | 529 | /* |
| 396 | * Increment the parent's resume counter and resume it if | 530 | * Increment the parent's resume counter and resume it if |
| @@ -408,7 +542,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 408 | */ | 542 | */ |
| 409 | if (!parent->power.disable_depth | 543 | if (!parent->power.disable_depth |
| 410 | && !parent->power.ignore_children) { | 544 | && !parent->power.ignore_children) { |
| 411 | __pm_runtime_resume(parent, false); | 545 | rpm_resume(parent, 0); |
| 412 | if (parent->power.runtime_status != RPM_ACTIVE) | 546 | if (parent->power.runtime_status != RPM_ACTIVE) |
| 413 | retval = -EBUSY; | 547 | retval = -EBUSY; |
| 414 | } | 548 | } |
| @@ -420,39 +554,26 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 420 | goto repeat; | 554 | goto repeat; |
| 421 | } | 555 | } |
| 422 | 556 | ||
| 423 | __update_runtime_status(dev, RPM_RESUMING); | 557 | if (dev->power.no_callbacks) |
| 424 | 558 | goto no_callback; /* Assume success. */ | |
| 425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | ||
| 426 | spin_unlock_irq(&dev->power.lock); | ||
| 427 | |||
| 428 | retval = dev->bus->pm->runtime_resume(dev); | ||
| 429 | |||
| 430 | spin_lock_irq(&dev->power.lock); | ||
| 431 | dev->power.runtime_error = retval; | ||
| 432 | } else if (dev->type && dev->type->pm | ||
| 433 | && dev->type->pm->runtime_resume) { | ||
| 434 | spin_unlock_irq(&dev->power.lock); | ||
| 435 | |||
| 436 | retval = dev->type->pm->runtime_resume(dev); | ||
| 437 | 559 | ||
| 438 | spin_lock_irq(&dev->power.lock); | 560 | __update_runtime_status(dev, RPM_RESUMING); |
| 439 | dev->power.runtime_error = retval; | ||
| 440 | } else if (dev->class && dev->class->pm | ||
| 441 | && dev->class->pm->runtime_resume) { | ||
| 442 | spin_unlock_irq(&dev->power.lock); | ||
| 443 | |||
| 444 | retval = dev->class->pm->runtime_resume(dev); | ||
| 445 | 561 | ||
| 446 | spin_lock_irq(&dev->power.lock); | 562 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) |
| 447 | dev->power.runtime_error = retval; | 563 | callback = dev->bus->pm->runtime_resume; |
| 448 | } else { | 564 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume) |
| 449 | retval = -ENOSYS; | 565 | callback = dev->type->pm->runtime_resume; |
| 450 | } | 566 | else if (dev->class && dev->class->pm) |
| 567 | callback = dev->class->pm->runtime_resume; | ||
| 568 | else | ||
| 569 | callback = NULL; | ||
| 451 | 570 | ||
| 571 | retval = rpm_callback(callback, dev); | ||
| 452 | if (retval) { | 572 | if (retval) { |
| 453 | __update_runtime_status(dev, RPM_SUSPENDED); | 573 | __update_runtime_status(dev, RPM_SUSPENDED); |
| 454 | pm_runtime_cancel_pending(dev); | 574 | pm_runtime_cancel_pending(dev); |
| 455 | } else { | 575 | } else { |
| 576 | no_callback: | ||
| 456 | __update_runtime_status(dev, RPM_ACTIVE); | 577 | __update_runtime_status(dev, RPM_ACTIVE); |
| 457 | if (parent) | 578 | if (parent) |
| 458 | atomic_inc(&parent->power.child_count); | 579 | atomic_inc(&parent->power.child_count); |
| @@ -460,7 +581,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 460 | wake_up_all(&dev->power.wait_queue); | 581 | wake_up_all(&dev->power.wait_queue); |
| 461 | 582 | ||
| 462 | if (!retval) | 583 | if (!retval) |
| 463 | __pm_request_idle(dev); | 584 | rpm_idle(dev, RPM_ASYNC); |
| 464 | 585 | ||
| 465 | out: | 586 | out: |
| 466 | if (parent) { | 587 | if (parent) { |
| @@ -471,28 +592,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 471 | spin_lock_irq(&dev->power.lock); | 592 | spin_lock_irq(&dev->power.lock); |
| 472 | } | 593 | } |
| 473 | 594 | ||
| 474 | dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); | 595 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
| 475 | 596 | ||
| 476 | return retval; | 597 | return retval; |
| 477 | } | 598 | } |
| 478 | 599 | ||
| 479 | /** | 600 | /** |
| 480 | * pm_runtime_resume - Carry out run-time resume of given device. | ||
| 481 | * @dev: Device to suspend. | ||
| 482 | */ | ||
| 483 | int pm_runtime_resume(struct device *dev) | ||
| 484 | { | ||
| 485 | int retval; | ||
| 486 | |||
| 487 | spin_lock_irq(&dev->power.lock); | ||
| 488 | retval = __pm_runtime_resume(dev, false); | ||
| 489 | spin_unlock_irq(&dev->power.lock); | ||
| 490 | |||
| 491 | return retval; | ||
| 492 | } | ||
| 493 | EXPORT_SYMBOL_GPL(pm_runtime_resume); | ||
| 494 | |||
| 495 | /** | ||
| 496 | * pm_runtime_work - Universal run-time PM work function. | 601 | * pm_runtime_work - Universal run-time PM work function. |
| 497 | * @work: Work structure used for scheduling the execution of this function. | 602 | * @work: Work structure used for scheduling the execution of this function. |
| 498 | * | 603 | * |
| @@ -517,13 +622,16 @@ static void pm_runtime_work(struct work_struct *work) | |||
| 517 | case RPM_REQ_NONE: | 622 | case RPM_REQ_NONE: |
| 518 | break; | 623 | break; |
| 519 | case RPM_REQ_IDLE: | 624 | case RPM_REQ_IDLE: |
| 520 | __pm_runtime_idle(dev); | 625 | rpm_idle(dev, RPM_NOWAIT); |
| 521 | break; | 626 | break; |
| 522 | case RPM_REQ_SUSPEND: | 627 | case RPM_REQ_SUSPEND: |
| 523 | __pm_runtime_suspend(dev, true); | 628 | rpm_suspend(dev, RPM_NOWAIT); |
| 629 | break; | ||
| 630 | case RPM_REQ_AUTOSUSPEND: | ||
| 631 | rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); | ||
| 524 | break; | 632 | break; |
| 525 | case RPM_REQ_RESUME: | 633 | case RPM_REQ_RESUME: |
| 526 | __pm_runtime_resume(dev, true); | 634 | rpm_resume(dev, RPM_NOWAIT); |
| 527 | break; | 635 | break; |
| 528 | } | 636 | } |
| 529 | 637 | ||
| @@ -532,117 +640,10 @@ static void pm_runtime_work(struct work_struct *work) | |||
| 532 | } | 640 | } |
| 533 | 641 | ||
| 534 | /** | 642 | /** |
| 535 | * __pm_request_idle - Submit an idle notification request for given device. | ||
| 536 | * @dev: Device to handle. | ||
| 537 | * | ||
| 538 | * Check if the device's run-time PM status is correct for suspending the device | ||
| 539 | * and queue up a request to run __pm_runtime_idle() for it. | ||
| 540 | * | ||
| 541 | * This function must be called under dev->power.lock with interrupts disabled. | ||
| 542 | */ | ||
| 543 | static int __pm_request_idle(struct device *dev) | ||
| 544 | { | ||
| 545 | int retval = 0; | ||
| 546 | |||
| 547 | if (dev->power.runtime_error) | ||
| 548 | retval = -EINVAL; | ||
| 549 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
| 550 | || dev->power.disable_depth > 0 | ||
| 551 | || dev->power.runtime_status == RPM_SUSPENDED | ||
| 552 | || dev->power.runtime_status == RPM_SUSPENDING) | ||
| 553 | retval = -EAGAIN; | ||
| 554 | else if (!pm_children_suspended(dev)) | ||
| 555 | retval = -EBUSY; | ||
| 556 | if (retval) | ||
| 557 | return retval; | ||
| 558 | |||
| 559 | if (dev->power.request_pending) { | ||
| 560 | /* Any requests other then RPM_REQ_IDLE take precedence. */ | ||
| 561 | if (dev->power.request == RPM_REQ_NONE) | ||
| 562 | dev->power.request = RPM_REQ_IDLE; | ||
| 563 | else if (dev->power.request != RPM_REQ_IDLE) | ||
| 564 | retval = -EAGAIN; | ||
| 565 | return retval; | ||
| 566 | } | ||
| 567 | |||
| 568 | dev->power.request = RPM_REQ_IDLE; | ||
| 569 | dev->power.request_pending = true; | ||
| 570 | queue_work(pm_wq, &dev->power.work); | ||
| 571 | |||
| 572 | return retval; | ||
| 573 | } | ||
| 574 | |||
| 575 | /** | ||
| 576 | * pm_request_idle - Submit an idle notification request for given device. | ||
| 577 | * @dev: Device to handle. | ||
| 578 | */ | ||
| 579 | int pm_request_idle(struct device *dev) | ||
| 580 | { | ||
| 581 | unsigned long flags; | ||
| 582 | int retval; | ||
| 583 | |||
| 584 | spin_lock_irqsave(&dev->power.lock, flags); | ||
| 585 | retval = __pm_request_idle(dev); | ||
| 586 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 587 | |||
| 588 | return retval; | ||
| 589 | } | ||
| 590 | EXPORT_SYMBOL_GPL(pm_request_idle); | ||
| 591 | |||
| 592 | /** | ||
| 593 | * __pm_request_suspend - Submit a suspend request for given device. | ||
| 594 | * @dev: Device to suspend. | ||
| 595 | * | ||
| 596 | * This function must be called under dev->power.lock with interrupts disabled. | ||
| 597 | */ | ||
| 598 | static int __pm_request_suspend(struct device *dev) | ||
| 599 | { | ||
| 600 | int retval = 0; | ||
| 601 | |||
| 602 | if (dev->power.runtime_error) | ||
| 603 | return -EINVAL; | ||
| 604 | |||
| 605 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 606 | retval = 1; | ||
| 607 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
| 608 | || dev->power.disable_depth > 0) | ||
| 609 | retval = -EAGAIN; | ||
| 610 | else if (dev->power.runtime_status == RPM_SUSPENDING) | ||
| 611 | retval = -EINPROGRESS; | ||
| 612 | else if (!pm_children_suspended(dev)) | ||
| 613 | retval = -EBUSY; | ||
| 614 | if (retval < 0) | ||
| 615 | return retval; | ||
| 616 | |||
| 617 | pm_runtime_deactivate_timer(dev); | ||
| 618 | |||
| 619 | if (dev->power.request_pending) { | ||
| 620 | /* | ||
| 621 | * Pending resume requests take precedence over us, but we can | ||
| 622 | * overtake any other pending request. | ||
| 623 | */ | ||
| 624 | if (dev->power.request == RPM_REQ_RESUME) | ||
| 625 | retval = -EAGAIN; | ||
| 626 | else if (dev->power.request != RPM_REQ_SUSPEND) | ||
| 627 | dev->power.request = retval ? | ||
| 628 | RPM_REQ_NONE : RPM_REQ_SUSPEND; | ||
| 629 | return retval; | ||
| 630 | } else if (retval) { | ||
| 631 | return retval; | ||
| 632 | } | ||
| 633 | |||
| 634 | dev->power.request = RPM_REQ_SUSPEND; | ||
| 635 | dev->power.request_pending = true; | ||
| 636 | queue_work(pm_wq, &dev->power.work); | ||
| 637 | |||
| 638 | return 0; | ||
| 639 | } | ||
| 640 | |||
| 641 | /** | ||
| 642 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). | 643 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). |
| 643 | * @data: Device pointer passed by pm_schedule_suspend(). | 644 | * @data: Device pointer passed by pm_schedule_suspend(). |
| 644 | * | 645 | * |
| 645 | * Check if the time is right and execute __pm_request_suspend() in that case. | 646 | * Check if the time is right and queue a suspend request. |
| 646 | */ | 647 | */ |
| 647 | static void pm_suspend_timer_fn(unsigned long data) | 648 | static void pm_suspend_timer_fn(unsigned long data) |
| 648 | { | 649 | { |
| @@ -656,7 +657,8 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
| 656 | /* If 'expire' is after 'jiffies' we've been called too early. */ | 657 | /* If 'expire' is after 'jiffies' we've been called too early. */ |
| 657 | if (expires > 0 && !time_after(expires, jiffies)) { | 658 | if (expires > 0 && !time_after(expires, jiffies)) { |
| 658 | dev->power.timer_expires = 0; | 659 | dev->power.timer_expires = 0; |
| 659 | __pm_request_suspend(dev); | 660 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
| 661 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); | ||
| 660 | } | 662 | } |
| 661 | 663 | ||
| 662 | spin_unlock_irqrestore(&dev->power.lock, flags); | 664 | spin_unlock_irqrestore(&dev->power.lock, flags); |
| @@ -670,47 +672,25 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
| 670 | int pm_schedule_suspend(struct device *dev, unsigned int delay) | 672 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
| 671 | { | 673 | { |
| 672 | unsigned long flags; | 674 | unsigned long flags; |
| 673 | int retval = 0; | 675 | int retval; |
| 674 | 676 | ||
| 675 | spin_lock_irqsave(&dev->power.lock, flags); | 677 | spin_lock_irqsave(&dev->power.lock, flags); |
| 676 | 678 | ||
| 677 | if (dev->power.runtime_error) { | ||
| 678 | retval = -EINVAL; | ||
| 679 | goto out; | ||
| 680 | } | ||
| 681 | |||
| 682 | if (!delay) { | 679 | if (!delay) { |
| 683 | retval = __pm_request_suspend(dev); | 680 | retval = rpm_suspend(dev, RPM_ASYNC); |
| 684 | goto out; | 681 | goto out; |
| 685 | } | 682 | } |
| 686 | 683 | ||
| 687 | pm_runtime_deactivate_timer(dev); | 684 | retval = rpm_check_suspend_allowed(dev); |
| 688 | |||
| 689 | if (dev->power.request_pending) { | ||
| 690 | /* | ||
| 691 | * Pending resume requests take precedence over us, but any | ||
| 692 | * other pending requests have to be canceled. | ||
| 693 | */ | ||
| 694 | if (dev->power.request == RPM_REQ_RESUME) { | ||
| 695 | retval = -EAGAIN; | ||
| 696 | goto out; | ||
| 697 | } | ||
| 698 | dev->power.request = RPM_REQ_NONE; | ||
| 699 | } | ||
| 700 | |||
| 701 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
| 702 | retval = 1; | ||
| 703 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
| 704 | || dev->power.disable_depth > 0) | ||
| 705 | retval = -EAGAIN; | ||
| 706 | else if (!pm_children_suspended(dev)) | ||
| 707 | retval = -EBUSY; | ||
| 708 | if (retval) | 685 | if (retval) |
| 709 | goto out; | 686 | goto out; |
| 710 | 687 | ||
| 688 | /* Other scheduled or pending requests need to be canceled. */ | ||
| 689 | pm_runtime_cancel_pending(dev); | ||
| 690 | |||
| 711 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 691 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
| 712 | if (!dev->power.timer_expires) | 692 | dev->power.timer_expires += !dev->power.timer_expires; |
| 713 | dev->power.timer_expires = 1; | 693 | dev->power.timer_autosuspends = 0; |
| 714 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 694 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
| 715 | 695 | ||
| 716 | out: | 696 | out: |
| @@ -721,103 +701,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
| 721 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); | 701 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); |
| 722 | 702 | ||
| 723 | /** | 703 | /** |
| 724 | * pm_request_resume - Submit a resume request for given device. | 704 | * __pm_runtime_idle - Entry point for run-time idle operations. |
| 725 | * @dev: Device to resume. | 705 | * @dev: Device to send idle notification for. |
| 706 | * @rpmflags: Flag bits. | ||
| 726 | * | 707 | * |
| 727 | * This function must be called under dev->power.lock with interrupts disabled. | 708 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and |
| 709 | * return immediately if it is larger than zero. Then carry out an idle | ||
| 710 | * notification, either synchronous or asynchronous. | ||
| 711 | * | ||
| 712 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
| 728 | */ | 713 | */ |
| 729 | static int __pm_request_resume(struct device *dev) | 714 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
| 730 | { | 715 | { |
| 731 | int retval = 0; | 716 | unsigned long flags; |
| 732 | 717 | int retval; | |
| 733 | if (dev->power.runtime_error) | ||
| 734 | return -EINVAL; | ||
| 735 | |||
| 736 | if (dev->power.runtime_status == RPM_ACTIVE) | ||
| 737 | retval = 1; | ||
| 738 | else if (dev->power.runtime_status == RPM_RESUMING) | ||
| 739 | retval = -EINPROGRESS; | ||
| 740 | else if (dev->power.disable_depth > 0) | ||
| 741 | retval = -EAGAIN; | ||
| 742 | if (retval < 0) | ||
| 743 | return retval; | ||
| 744 | |||
| 745 | pm_runtime_deactivate_timer(dev); | ||
| 746 | 718 | ||
| 747 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 719 | if (rpmflags & RPM_GET_PUT) { |
| 748 | dev->power.deferred_resume = true; | 720 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
| 749 | return retval; | 721 | return 0; |
| 750 | } | 722 | } |
| 751 | if (dev->power.request_pending) { | ||
| 752 | /* If non-resume request is pending, we can overtake it. */ | ||
| 753 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | ||
| 754 | return retval; | ||
| 755 | } | ||
| 756 | if (retval) | ||
| 757 | return retval; | ||
| 758 | 723 | ||
| 759 | dev->power.request = RPM_REQ_RESUME; | 724 | spin_lock_irqsave(&dev->power.lock, flags); |
| 760 | dev->power.request_pending = true; | 725 | retval = rpm_idle(dev, rpmflags); |
| 761 | queue_work(pm_wq, &dev->power.work); | 726 | spin_unlock_irqrestore(&dev->power.lock, flags); |
| 762 | 727 | ||
| 763 | return retval; | 728 | return retval; |
| 764 | } | 729 | } |
| 730 | EXPORT_SYMBOL_GPL(__pm_runtime_idle); | ||
| 765 | 731 | ||
| 766 | /** | 732 | /** |
| 767 | * pm_request_resume - Submit a resume request for given device. | 733 | * __pm_runtime_suspend - Entry point for run-time put/suspend operations. |
| 768 | * @dev: Device to resume. | 734 | * @dev: Device to suspend. |
| 735 | * @rpmflags: Flag bits. | ||
| 736 | * | ||
| 737 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and | ||
| 738 | * return immediately if it is larger than zero. Then carry out a suspend, | ||
| 739 | * either synchronous or asynchronous. | ||
| 740 | * | ||
| 741 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
| 769 | */ | 742 | */ |
| 770 | int pm_request_resume(struct device *dev) | 743 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
| 771 | { | 744 | { |
| 772 | unsigned long flags; | 745 | unsigned long flags; |
| 773 | int retval; | 746 | int retval; |
| 774 | 747 | ||
| 748 | if (rpmflags & RPM_GET_PUT) { | ||
| 749 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
| 750 | return 0; | ||
| 751 | } | ||
| 752 | |||
| 775 | spin_lock_irqsave(&dev->power.lock, flags); | 753 | spin_lock_irqsave(&dev->power.lock, flags); |
| 776 | retval = __pm_request_resume(dev); | 754 | retval = rpm_suspend(dev, rpmflags); |
| 777 | spin_unlock_irqrestore(&dev->power.lock, flags); | 755 | spin_unlock_irqrestore(&dev->power.lock, flags); |
| 778 | 756 | ||
| 779 | return retval; | 757 | return retval; |
| 780 | } | 758 | } |
| 781 | EXPORT_SYMBOL_GPL(pm_request_resume); | 759 | EXPORT_SYMBOL_GPL(__pm_runtime_suspend); |
| 782 | 760 | ||
| 783 | /** | 761 | /** |
| 784 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 762 | * __pm_runtime_resume - Entry point for run-time resume operations. |
| 785 | * @dev: Device to handle. | 763 | * @dev: Device to resume. |
| 786 | * @sync: If set and the device is suspended, resume it synchronously. | 764 | * @rpmflags: Flag bits. |
| 765 | * | ||
| 766 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | ||
| 767 | * carry out a resume, either synchronous or asynchronous. | ||
| 787 | * | 768 | * |
| 788 | * Increment the usage count of the device and resume it or submit a resume | 769 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
| 789 | * request for it, depending on the value of @sync. | ||
| 790 | */ | 770 | */ |
| 791 | int __pm_runtime_get(struct device *dev, bool sync) | 771 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
| 792 | { | 772 | { |
| 773 | unsigned long flags; | ||
| 793 | int retval; | 774 | int retval; |
| 794 | 775 | ||
| 795 | atomic_inc(&dev->power.usage_count); | 776 | if (rpmflags & RPM_GET_PUT) |
| 796 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 777 | atomic_inc(&dev->power.usage_count); |
| 797 | 778 | ||
| 798 | return retval; | 779 | spin_lock_irqsave(&dev->power.lock, flags); |
| 799 | } | 780 | retval = rpm_resume(dev, rpmflags); |
| 800 | EXPORT_SYMBOL_GPL(__pm_runtime_get); | 781 | spin_unlock_irqrestore(&dev->power.lock, flags); |
| 801 | |||
| 802 | /** | ||
| 803 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | ||
| 804 | * @dev: Device to handle. | ||
| 805 | * @sync: If the device's bus type is to be notified, do that synchronously. | ||
| 806 | * | ||
| 807 | * Decrement the usage count of the device and if it reaches zero, carry out a | ||
| 808 | * synchronous idle notification or submit an idle notification request for it, | ||
| 809 | * depending on the value of @sync. | ||
| 810 | */ | ||
| 811 | int __pm_runtime_put(struct device *dev, bool sync) | ||
| 812 | { | ||
| 813 | int retval = 0; | ||
| 814 | |||
| 815 | if (atomic_dec_and_test(&dev->power.usage_count)) | ||
| 816 | retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); | ||
| 817 | 782 | ||
| 818 | return retval; | 783 | return retval; |
| 819 | } | 784 | } |
| 820 | EXPORT_SYMBOL_GPL(__pm_runtime_put); | 785 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
| 821 | 786 | ||
| 822 | /** | 787 | /** |
| 823 | * __pm_runtime_set_status - Set run-time PM status of a device. | 788 | * __pm_runtime_set_status - Set run-time PM status of a device. |
| @@ -968,7 +933,7 @@ int pm_runtime_barrier(struct device *dev) | |||
| 968 | 933 | ||
| 969 | if (dev->power.request_pending | 934 | if (dev->power.request_pending |
| 970 | && dev->power.request == RPM_REQ_RESUME) { | 935 | && dev->power.request == RPM_REQ_RESUME) { |
| 971 | __pm_runtime_resume(dev, false); | 936 | rpm_resume(dev, 0); |
| 972 | retval = 1; | 937 | retval = 1; |
| 973 | } | 938 | } |
| 974 | 939 | ||
| @@ -1017,7 +982,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
| 1017 | */ | 982 | */ |
| 1018 | pm_runtime_get_noresume(dev); | 983 | pm_runtime_get_noresume(dev); |
| 1019 | 984 | ||
| 1020 | __pm_runtime_resume(dev, false); | 985 | rpm_resume(dev, 0); |
| 1021 | 986 | ||
| 1022 | pm_runtime_put_noidle(dev); | 987 | pm_runtime_put_noidle(dev); |
| 1023 | } | 988 | } |
| @@ -1065,7 +1030,7 @@ void pm_runtime_forbid(struct device *dev) | |||
| 1065 | 1030 | ||
| 1066 | dev->power.runtime_auto = false; | 1031 | dev->power.runtime_auto = false; |
| 1067 | atomic_inc(&dev->power.usage_count); | 1032 | atomic_inc(&dev->power.usage_count); |
| 1068 | __pm_runtime_resume(dev, false); | 1033 | rpm_resume(dev, 0); |
| 1069 | 1034 | ||
| 1070 | out: | 1035 | out: |
| 1071 | spin_unlock_irq(&dev->power.lock); | 1036 | spin_unlock_irq(&dev->power.lock); |
| @@ -1086,7 +1051,7 @@ void pm_runtime_allow(struct device *dev) | |||
| 1086 | 1051 | ||
| 1087 | dev->power.runtime_auto = true; | 1052 | dev->power.runtime_auto = true; |
| 1088 | if (atomic_dec_and_test(&dev->power.usage_count)) | 1053 | if (atomic_dec_and_test(&dev->power.usage_count)) |
| 1089 | __pm_runtime_idle(dev); | 1054 | rpm_idle(dev, RPM_AUTO); |
| 1090 | 1055 | ||
| 1091 | out: | 1056 | out: |
| 1092 | spin_unlock_irq(&dev->power.lock); | 1057 | spin_unlock_irq(&dev->power.lock); |
| @@ -1094,13 +1059,110 @@ void pm_runtime_allow(struct device *dev) | |||
| 1094 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | 1059 | EXPORT_SYMBOL_GPL(pm_runtime_allow); |
| 1095 | 1060 | ||
| 1096 | /** | 1061 | /** |
| 1062 | * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. | ||
| 1063 | * @dev: Device to handle. | ||
| 1064 | * | ||
| 1065 | * Set the power.no_callbacks flag, which tells the PM core that this | ||
| 1066 | * device is power-managed through its parent and has no run-time PM | ||
| 1067 | * callbacks of its own. The run-time sysfs attributes will be removed. | ||
| 1068 | * | ||
| 1069 | */ | ||
| 1070 | void pm_runtime_no_callbacks(struct device *dev) | ||
| 1071 | { | ||
| 1072 | spin_lock_irq(&dev->power.lock); | ||
| 1073 | dev->power.no_callbacks = 1; | ||
| 1074 | spin_unlock_irq(&dev->power.lock); | ||
| 1075 | if (device_is_registered(dev)) | ||
| 1076 | rpm_sysfs_remove(dev); | ||
| 1077 | } | ||
| 1078 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); | ||
| 1079 | |||
| 1080 | /** | ||
| 1081 | * update_autosuspend - Handle a change to a device's autosuspend settings. | ||
| 1082 | * @dev: Device to handle. | ||
| 1083 | * @old_delay: The former autosuspend_delay value. | ||
| 1084 | * @old_use: The former use_autosuspend value. | ||
| 1085 | * | ||
| 1086 | * Prevent runtime suspend if the new delay is negative and use_autosuspend is | ||
| 1087 | * set; otherwise allow it. Send an idle notification if suspends are allowed. | ||
| 1088 | * | ||
| 1089 | * This function must be called under dev->power.lock with interrupts disabled. | ||
| 1090 | */ | ||
| 1091 | static void update_autosuspend(struct device *dev, int old_delay, int old_use) | ||
| 1092 | { | ||
| 1093 | int delay = dev->power.autosuspend_delay; | ||
| 1094 | |||
| 1095 | /* Should runtime suspend be prevented now? */ | ||
| 1096 | if (dev->power.use_autosuspend && delay < 0) { | ||
| 1097 | |||
| 1098 | /* If it used to be allowed then prevent it. */ | ||
| 1099 | if (!old_use || old_delay >= 0) { | ||
| 1100 | atomic_inc(&dev->power.usage_count); | ||
| 1101 | rpm_resume(dev, 0); | ||
| 1102 | } | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | /* Runtime suspend should be allowed now. */ | ||
| 1106 | else { | ||
| 1107 | |||
| 1108 | /* If it used to be prevented then allow it. */ | ||
| 1109 | if (old_use && old_delay < 0) | ||
| 1110 | atomic_dec(&dev->power.usage_count); | ||
| 1111 | |||
| 1112 | /* Maybe we can autosuspend now. */ | ||
| 1113 | rpm_idle(dev, RPM_AUTO); | ||
| 1114 | } | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | /** | ||
| 1118 | * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. | ||
| 1119 | * @dev: Device to handle. | ||
| 1120 | * @delay: Value of the new delay in milliseconds. | ||
| 1121 | * | ||
| 1122 | * Set the device's power.autosuspend_delay value. If it changes to negative | ||
| 1123 | * and the power.use_autosuspend flag is set, prevent run-time suspends. If it | ||
| 1124 | * changes the other way, allow run-time suspends. | ||
| 1125 | */ | ||
| 1126 | void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) | ||
| 1127 | { | ||
| 1128 | int old_delay, old_use; | ||
| 1129 | |||
| 1130 | spin_lock_irq(&dev->power.lock); | ||
| 1131 | old_delay = dev->power.autosuspend_delay; | ||
| 1132 | old_use = dev->power.use_autosuspend; | ||
| 1133 | dev->power.autosuspend_delay = delay; | ||
| 1134 | update_autosuspend(dev, old_delay, old_use); | ||
| 1135 | spin_unlock_irq(&dev->power.lock); | ||
| 1136 | } | ||
| 1137 | EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); | ||
| 1138 | |||
| 1139 | /** | ||
| 1140 | * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. | ||
| 1141 | * @dev: Device to handle. | ||
| 1142 | * @use: New value for use_autosuspend. | ||
| 1143 | * | ||
| 1144 | * Set the device's power.use_autosuspend flag, and allow or prevent run-time | ||
| 1145 | * suspends as needed. | ||
| 1146 | */ | ||
| 1147 | void __pm_runtime_use_autosuspend(struct device *dev, bool use) | ||
| 1148 | { | ||
| 1149 | int old_delay, old_use; | ||
| 1150 | |||
| 1151 | spin_lock_irq(&dev->power.lock); | ||
| 1152 | old_delay = dev->power.autosuspend_delay; | ||
| 1153 | old_use = dev->power.use_autosuspend; | ||
| 1154 | dev->power.use_autosuspend = use; | ||
| 1155 | update_autosuspend(dev, old_delay, old_use); | ||
| 1156 | spin_unlock_irq(&dev->power.lock); | ||
| 1157 | } | ||
| 1158 | EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); | ||
| 1159 | |||
| 1160 | /** | ||
| 1097 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1161 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
| 1098 | * @dev: Device object to initialize. | 1162 | * @dev: Device object to initialize. |
| 1099 | */ | 1163 | */ |
| 1100 | void pm_runtime_init(struct device *dev) | 1164 | void pm_runtime_init(struct device *dev) |
| 1101 | { | 1165 | { |
| 1102 | spin_lock_init(&dev->power.lock); | ||
| 1103 | |||
| 1104 | dev->power.runtime_status = RPM_SUSPENDED; | 1166 | dev->power.runtime_status = RPM_SUSPENDED; |
| 1105 | dev->power.idle_notification = false; | 1167 | dev->power.idle_notification = false; |
| 1106 | 1168 | ||
