diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
| commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
| tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/base/power | |
| parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) | |
Patched in Tegra support.
Diffstat (limited to 'drivers/base/power')
| -rw-r--r-- | drivers/base/power/Makefile | 6 | ||||
| -rw-r--r-- | drivers/base/power/clock_ops.c | 136 | ||||
| -rw-r--r-- | drivers/base/power/common.c | 85 | ||||
| -rw-r--r-- | drivers/base/power/domain.c | 1579 | ||||
| -rw-r--r-- | drivers/base/power/domain_governor.c | 254 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 213 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 798 | ||||
| -rw-r--r-- | drivers/base/power/opp.c | 122 | ||||
| -rw-r--r-- | drivers/base/power/power.h | 46 | ||||
| -rw-r--r-- | drivers/base/power/qos.c | 704 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 182 | ||||
| -rw-r--r-- | drivers/base/power/sysfs.c | 182 | ||||
| -rw-r--r-- | drivers/base/power/trace.c | 1 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 286 |
14 files changed, 970 insertions, 3624 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c..2639ae79a37 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o |
| 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
| 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
| 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
| 5 | obj-$(CONFIG_PM_OPP) += opp.o | 5 | obj-$(CONFIG_PM_OPP) += opp.o |
| 6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o | 6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o |
| 7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | 7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o |
| 8 | 8 | ||
| 9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 9d8fde70939..b97294e2d95 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -8,16 +8,20 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
| 10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/device.h> | ||
| 12 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| 13 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
| 14 | #include <linux/pm_clock.h> | 13 | #include <linux/pm_runtime.h> |
| 15 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
| 16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 17 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 18 | 17 | ||
| 19 | #ifdef CONFIG_PM | 18 | #ifdef CONFIG_PM |
| 20 | 19 | ||
| 20 | struct pm_clk_data { | ||
| 21 | struct list_head clock_list; | ||
| 22 | spinlock_t lock; | ||
| 23 | }; | ||
| 24 | |||
| 21 | enum pce_status { | 25 | enum pce_status { |
| 22 | PCE_STATUS_NONE = 0, | 26 | PCE_STATUS_NONE = 0, |
| 23 | PCE_STATUS_ACQUIRED, | 27 | PCE_STATUS_ACQUIRED, |
| @@ -32,6 +36,11 @@ struct pm_clock_entry { | |||
| 32 | enum pce_status status; | 36 | enum pce_status status; |
| 33 | }; | 37 | }; |
| 34 | 38 | ||
| 39 | static struct pm_clk_data *__to_pcd(struct device *dev) | ||
| 40 | { | ||
| 41 | return dev ? dev->power.subsys_data : NULL; | ||
| 42 | } | ||
| 43 | |||
| 35 | /** | 44 | /** |
| 36 | * pm_clk_acquire - Acquire a device clock. | 45 | * pm_clk_acquire - Acquire a device clock. |
| 37 | * @dev: Device whose clock is to be acquired. | 46 | * @dev: Device whose clock is to be acquired. |
| @@ -58,10 +67,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | |||
| 58 | */ | 67 | */ |
| 59 | int pm_clk_add(struct device *dev, const char *con_id) | 68 | int pm_clk_add(struct device *dev, const char *con_id) |
| 60 | { | 69 | { |
| 61 | struct pm_subsys_data *psd = dev_to_psd(dev); | 70 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 62 | struct pm_clock_entry *ce; | 71 | struct pm_clock_entry *ce; |
| 63 | 72 | ||
| 64 | if (!psd) | 73 | if (!pcd) |
| 65 | return -EINVAL; | 74 | return -EINVAL; |
| 66 | 75 | ||
| 67 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 76 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
| @@ -82,9 +91,9 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
| 82 | 91 | ||
| 83 | pm_clk_acquire(dev, ce); | 92 | pm_clk_acquire(dev, ce); |
| 84 | 93 | ||
| 85 | spin_lock_irq(&psd->lock); | 94 | spin_lock_irq(&pcd->lock); |
| 86 | list_add_tail(&ce->node, &psd->clock_list); | 95 | list_add_tail(&ce->node, &pcd->clock_list); |
| 87 | spin_unlock_irq(&psd->lock); | 96 | spin_unlock_irq(&pcd->lock); |
| 88 | return 0; | 97 | return 0; |
| 89 | } | 98 | } |
| 90 | 99 | ||
| @@ -99,13 +108,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
| 99 | 108 | ||
| 100 | if (ce->status < PCE_STATUS_ERROR) { | 109 | if (ce->status < PCE_STATUS_ERROR) { |
| 101 | if (ce->status == PCE_STATUS_ENABLED) | 110 | if (ce->status == PCE_STATUS_ENABLED) |
| 102 | clk_disable_unprepare(ce->clk); | 111 | clk_disable(ce->clk); |
| 103 | 112 | ||
| 104 | if (ce->status >= PCE_STATUS_ACQUIRED) | 113 | if (ce->status >= PCE_STATUS_ACQUIRED) |
| 105 | clk_put(ce->clk); | 114 | clk_put(ce->clk); |
| 106 | } | 115 | } |
| 107 | 116 | ||
| 108 | kfree(ce->con_id); | 117 | if (ce->con_id) |
| 118 | kfree(ce->con_id); | ||
| 119 | |||
| 109 | kfree(ce); | 120 | kfree(ce); |
| 110 | } | 121 | } |
| 111 | 122 | ||
| @@ -119,15 +130,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
| 119 | */ | 130 | */ |
| 120 | void pm_clk_remove(struct device *dev, const char *con_id) | 131 | void pm_clk_remove(struct device *dev, const char *con_id) |
| 121 | { | 132 | { |
| 122 | struct pm_subsys_data *psd = dev_to_psd(dev); | 133 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 123 | struct pm_clock_entry *ce; | 134 | struct pm_clock_entry *ce; |
| 124 | 135 | ||
| 125 | if (!psd) | 136 | if (!pcd) |
| 126 | return; | 137 | return; |
| 127 | 138 | ||
| 128 | spin_lock_irq(&psd->lock); | 139 | spin_lock_irq(&pcd->lock); |
| 129 | 140 | ||
| 130 | list_for_each_entry(ce, &psd->clock_list, node) { | 141 | list_for_each_entry(ce, &pcd->clock_list, node) { |
| 131 | if (!con_id && !ce->con_id) | 142 | if (!con_id && !ce->con_id) |
| 132 | goto remove; | 143 | goto remove; |
| 133 | else if (!con_id || !ce->con_id) | 144 | else if (!con_id || !ce->con_id) |
| @@ -136,12 +147,12 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
| 136 | goto remove; | 147 | goto remove; |
| 137 | } | 148 | } |
| 138 | 149 | ||
| 139 | spin_unlock_irq(&psd->lock); | 150 | spin_unlock_irq(&pcd->lock); |
| 140 | return; | 151 | return; |
| 141 | 152 | ||
| 142 | remove: | 153 | remove: |
| 143 | list_del(&ce->node); | 154 | list_del(&ce->node); |
| 144 | spin_unlock_irq(&psd->lock); | 155 | spin_unlock_irq(&pcd->lock); |
| 145 | 156 | ||
| 146 | __pm_clk_remove(ce); | 157 | __pm_clk_remove(ce); |
| 147 | } | 158 | } |
| @@ -150,26 +161,23 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
| 150 | * pm_clk_init - Initialize a device's list of power management clocks. | 161 | * pm_clk_init - Initialize a device's list of power management clocks. |
| 151 | * @dev: Device to initialize the list of PM clocks for. | 162 | * @dev: Device to initialize the list of PM clocks for. |
| 152 | * | 163 | * |
| 153 | * Initialize the lock and clock_list members of the device's pm_subsys_data | 164 | * Allocate a struct pm_clk_data object, initialize its lock member and |
| 154 | * object. | 165 | * make the @dev's power.subsys_data field point to it. |
| 155 | */ | 166 | */ |
| 156 | void pm_clk_init(struct device *dev) | 167 | int pm_clk_init(struct device *dev) |
| 157 | { | 168 | { |
| 158 | struct pm_subsys_data *psd = dev_to_psd(dev); | 169 | struct pm_clk_data *pcd; |
| 159 | if (psd) | ||
| 160 | INIT_LIST_HEAD(&psd->clock_list); | ||
| 161 | } | ||
| 162 | 170 | ||
| 163 | /** | 171 | pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); |
| 164 | * pm_clk_create - Create and initialize a device's list of PM clocks. | 172 | if (!pcd) { |
| 165 | * @dev: Device to create and initialize the list of PM clocks for. | 173 | dev_err(dev, "Not enough memory for PM clock data.\n"); |
| 166 | * | 174 | return -ENOMEM; |
| 167 | * Allocate a struct pm_subsys_data object, initialize its lock and clock_list | 175 | } |
| 168 | * members and make the @dev's power.subsys_data field point to it. | 176 | |
| 169 | */ | 177 | INIT_LIST_HEAD(&pcd->clock_list); |
| 170 | int pm_clk_create(struct device *dev) | 178 | spin_lock_init(&pcd->lock); |
| 171 | { | 179 | dev->power.subsys_data = pcd; |
| 172 | return dev_pm_get_subsys_data(dev); | 180 | return 0; |
| 173 | } | 181 | } |
| 174 | 182 | ||
| 175 | /** | 183 | /** |
| @@ -177,28 +185,29 @@ int pm_clk_create(struct device *dev) | |||
| 177 | * @dev: Device to destroy the list of PM clocks for. | 185 | * @dev: Device to destroy the list of PM clocks for. |
| 178 | * | 186 | * |
| 179 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | 187 | * Clear the @dev's power.subsys_data field, remove the list of clock entries |
| 180 | * from the struct pm_subsys_data object pointed to by it before and free | 188 | * from the struct pm_clk_data object pointed to by it before and free |
| 181 | * that object. | 189 | * that object. |
| 182 | */ | 190 | */ |
| 183 | void pm_clk_destroy(struct device *dev) | 191 | void pm_clk_destroy(struct device *dev) |
| 184 | { | 192 | { |
| 185 | struct pm_subsys_data *psd = dev_to_psd(dev); | 193 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 186 | struct pm_clock_entry *ce, *c; | 194 | struct pm_clock_entry *ce, *c; |
| 187 | struct list_head list; | 195 | struct list_head list; |
| 188 | 196 | ||
| 189 | if (!psd) | 197 | if (!pcd) |
| 190 | return; | 198 | return; |
| 191 | 199 | ||
| 200 | dev->power.subsys_data = NULL; | ||
| 192 | INIT_LIST_HEAD(&list); | 201 | INIT_LIST_HEAD(&list); |
| 193 | 202 | ||
| 194 | spin_lock_irq(&psd->lock); | 203 | spin_lock_irq(&pcd->lock); |
| 195 | 204 | ||
| 196 | list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) | 205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) |
| 197 | list_move(&ce->node, &list); | 206 | list_move(&ce->node, &list); |
| 198 | 207 | ||
| 199 | spin_unlock_irq(&psd->lock); | 208 | spin_unlock_irq(&pcd->lock); |
| 200 | 209 | ||
| 201 | dev_pm_put_subsys_data(dev); | 210 | kfree(pcd); |
| 202 | 211 | ||
| 203 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | 212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { |
| 204 | list_del(&ce->node); | 213 | list_del(&ce->node); |
| @@ -216,26 +225,25 @@ void pm_clk_destroy(struct device *dev) | |||
| 216 | */ | 225 | */ |
| 217 | int pm_clk_suspend(struct device *dev) | 226 | int pm_clk_suspend(struct device *dev) |
| 218 | { | 227 | { |
| 219 | struct pm_subsys_data *psd = dev_to_psd(dev); | 228 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 220 | struct pm_clock_entry *ce; | 229 | struct pm_clock_entry *ce; |
| 221 | unsigned long flags; | 230 | unsigned long flags; |
| 222 | 231 | ||
| 223 | dev_dbg(dev, "%s()\n", __func__); | 232 | dev_dbg(dev, "%s()\n", __func__); |
| 224 | 233 | ||
| 225 | if (!psd) | 234 | if (!pcd) |
| 226 | return 0; | 235 | return 0; |
| 227 | 236 | ||
| 228 | spin_lock_irqsave(&psd->lock, flags); | 237 | spin_lock_irqsave(&pcd->lock, flags); |
| 229 | 238 | ||
| 230 | list_for_each_entry_reverse(ce, &psd->clock_list, node) { | 239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { |
| 231 | if (ce->status < PCE_STATUS_ERROR) { | 240 | if (ce->status < PCE_STATUS_ERROR) { |
| 232 | if (ce->status == PCE_STATUS_ENABLED) | 241 | clk_disable(ce->clk); |
| 233 | clk_disable(ce->clk); | ||
| 234 | ce->status = PCE_STATUS_ACQUIRED; | 242 | ce->status = PCE_STATUS_ACQUIRED; |
| 235 | } | 243 | } |
| 236 | } | 244 | } |
| 237 | 245 | ||
| 238 | spin_unlock_irqrestore(&psd->lock, flags); | 246 | spin_unlock_irqrestore(&pcd->lock, flags); |
| 239 | 247 | ||
| 240 | return 0; | 248 | return 0; |
| 241 | } | 249 | } |
| @@ -246,25 +254,25 @@ int pm_clk_suspend(struct device *dev) | |||
| 246 | */ | 254 | */ |
| 247 | int pm_clk_resume(struct device *dev) | 255 | int pm_clk_resume(struct device *dev) |
| 248 | { | 256 | { |
| 249 | struct pm_subsys_data *psd = dev_to_psd(dev); | 257 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 250 | struct pm_clock_entry *ce; | 258 | struct pm_clock_entry *ce; |
| 251 | unsigned long flags; | 259 | unsigned long flags; |
| 252 | 260 | ||
| 253 | dev_dbg(dev, "%s()\n", __func__); | 261 | dev_dbg(dev, "%s()\n", __func__); |
| 254 | 262 | ||
| 255 | if (!psd) | 263 | if (!pcd) |
| 256 | return 0; | 264 | return 0; |
| 257 | 265 | ||
| 258 | spin_lock_irqsave(&psd->lock, flags); | 266 | spin_lock_irqsave(&pcd->lock, flags); |
| 259 | 267 | ||
| 260 | list_for_each_entry(ce, &psd->clock_list, node) { | 268 | list_for_each_entry(ce, &pcd->clock_list, node) { |
| 261 | if (ce->status < PCE_STATUS_ERROR) { | 269 | if (ce->status < PCE_STATUS_ERROR) { |
| 262 | clk_enable(ce->clk); | 270 | clk_enable(ce->clk); |
| 263 | ce->status = PCE_STATUS_ENABLED; | 271 | ce->status = PCE_STATUS_ENABLED; |
| 264 | } | 272 | } |
| 265 | } | 273 | } |
| 266 | 274 | ||
| 267 | spin_unlock_irqrestore(&psd->lock, flags); | 275 | spin_unlock_irqrestore(&pcd->lock, flags); |
| 268 | 276 | ||
| 269 | return 0; | 277 | return 0; |
| 270 | } | 278 | } |
| @@ -302,7 +310,7 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
| 302 | if (dev->pm_domain) | 310 | if (dev->pm_domain) |
| 303 | break; | 311 | break; |
| 304 | 312 | ||
| 305 | error = pm_clk_create(dev); | 313 | error = pm_clk_init(dev); |
| 306 | if (error) | 314 | if (error) |
| 307 | break; | 315 | break; |
| 308 | 316 | ||
| @@ -337,22 +345,22 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
| 337 | */ | 345 | */ |
| 338 | int pm_clk_suspend(struct device *dev) | 346 | int pm_clk_suspend(struct device *dev) |
| 339 | { | 347 | { |
| 340 | struct pm_subsys_data *psd = dev_to_psd(dev); | 348 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 341 | struct pm_clock_entry *ce; | 349 | struct pm_clock_entry *ce; |
| 342 | unsigned long flags; | 350 | unsigned long flags; |
| 343 | 351 | ||
| 344 | dev_dbg(dev, "%s()\n", __func__); | 352 | dev_dbg(dev, "%s()\n", __func__); |
| 345 | 353 | ||
| 346 | /* If there is no driver, the clocks are already disabled. */ | 354 | /* If there is no driver, the clocks are already disabled. */ |
| 347 | if (!psd || !dev->driver) | 355 | if (!pcd || !dev->driver) |
| 348 | return 0; | 356 | return 0; |
| 349 | 357 | ||
| 350 | spin_lock_irqsave(&psd->lock, flags); | 358 | spin_lock_irqsave(&pcd->lock, flags); |
| 351 | 359 | ||
| 352 | list_for_each_entry_reverse(ce, &psd->clock_list, node) | 360 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) |
| 353 | clk_disable(ce->clk); | 361 | clk_disable(ce->clk); |
| 354 | 362 | ||
| 355 | spin_unlock_irqrestore(&psd->lock, flags); | 363 | spin_unlock_irqrestore(&pcd->lock, flags); |
| 356 | 364 | ||
| 357 | return 0; | 365 | return 0; |
| 358 | } | 366 | } |
| @@ -363,22 +371,22 @@ int pm_clk_suspend(struct device *dev) | |||
| 363 | */ | 371 | */ |
| 364 | int pm_clk_resume(struct device *dev) | 372 | int pm_clk_resume(struct device *dev) |
| 365 | { | 373 | { |
| 366 | struct pm_subsys_data *psd = dev_to_psd(dev); | 374 | struct pm_clk_data *pcd = __to_pcd(dev); |
| 367 | struct pm_clock_entry *ce; | 375 | struct pm_clock_entry *ce; |
| 368 | unsigned long flags; | 376 | unsigned long flags; |
| 369 | 377 | ||
| 370 | dev_dbg(dev, "%s()\n", __func__); | 378 | dev_dbg(dev, "%s()\n", __func__); |
| 371 | 379 | ||
| 372 | /* If there is no driver, the clocks should remain disabled. */ | 380 | /* If there is no driver, the clocks should remain disabled. */ |
| 373 | if (!psd || !dev->driver) | 381 | if (!pcd || !dev->driver) |
| 374 | return 0; | 382 | return 0; |
| 375 | 383 | ||
| 376 | spin_lock_irqsave(&psd->lock, flags); | 384 | spin_lock_irqsave(&pcd->lock, flags); |
| 377 | 385 | ||
| 378 | list_for_each_entry(ce, &psd->clock_list, node) | 386 | list_for_each_entry(ce, &pcd->clock_list, node) |
| 379 | clk_enable(ce->clk); | 387 | clk_enable(ce->clk); |
| 380 | 388 | ||
| 381 | spin_unlock_irqrestore(&psd->lock, flags); | 389 | spin_unlock_irqrestore(&pcd->lock, flags); |
| 382 | 390 | ||
| 383 | return 0; | 391 | return 0; |
| 384 | } | 392 | } |
| @@ -396,7 +404,7 @@ static void enable_clock(struct device *dev, const char *con_id) | |||
| 396 | 404 | ||
| 397 | clk = clk_get(dev, con_id); | 405 | clk = clk_get(dev, con_id); |
| 398 | if (!IS_ERR(clk)) { | 406 | if (!IS_ERR(clk)) { |
| 399 | clk_prepare_enable(clk); | 407 | clk_enable(clk); |
| 400 | clk_put(clk); | 408 | clk_put(clk); |
| 401 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | 409 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); |
| 402 | } | 410 | } |
| @@ -413,7 +421,7 @@ static void disable_clock(struct device *dev, const char *con_id) | |||
| 413 | 421 | ||
| 414 | clk = clk_get(dev, con_id); | 422 | clk = clk_get(dev, con_id); |
| 415 | if (!IS_ERR(clk)) { | 423 | if (!IS_ERR(clk)) { |
| 416 | clk_disable_unprepare(clk); | 424 | clk_disable(clk); |
| 417 | clk_put(clk); | 425 | clk_put(clk); |
| 418 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | 426 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); |
| 419 | } | 427 | } |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c deleted file mode 100644 index 39c32529b83..00000000000 --- a/drivers/base/power/common.c +++ /dev/null | |||
| @@ -1,85 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * drivers/base/power/common.c - Common device power management code. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/device.h> | ||
| 12 | #include <linux/export.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/pm_clock.h> | ||
| 15 | |||
| 16 | /** | ||
| 17 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | ||
| 18 | * @dev: Device to handle. | ||
| 19 | * | ||
| 20 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | ||
| 21 | * its reference counter. Return 1 if a new object has been created, otherwise | ||
| 22 | * return 0 or error code. | ||
| 23 | */ | ||
| 24 | int dev_pm_get_subsys_data(struct device *dev) | ||
| 25 | { | ||
| 26 | struct pm_subsys_data *psd; | ||
| 27 | |||
| 28 | psd = kzalloc(sizeof(*psd), GFP_KERNEL); | ||
| 29 | if (!psd) | ||
| 30 | return -ENOMEM; | ||
| 31 | |||
| 32 | spin_lock_irq(&dev->power.lock); | ||
| 33 | |||
| 34 | if (dev->power.subsys_data) { | ||
| 35 | dev->power.subsys_data->refcount++; | ||
| 36 | } else { | ||
| 37 | spin_lock_init(&psd->lock); | ||
| 38 | psd->refcount = 1; | ||
| 39 | dev->power.subsys_data = psd; | ||
| 40 | pm_clk_init(dev); | ||
| 41 | psd = NULL; | ||
| 42 | } | ||
| 43 | |||
| 44 | spin_unlock_irq(&dev->power.lock); | ||
| 45 | |||
| 46 | /* kfree() verifies that its argument is nonzero. */ | ||
| 47 | kfree(psd); | ||
| 48 | |||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | ||
| 52 | |||
| 53 | /** | ||
| 54 | * dev_pm_put_subsys_data - Drop reference to power.subsys_data. | ||
| 55 | * @dev: Device to handle. | ||
| 56 | * | ||
| 57 | * If the reference counter of power.subsys_data is zero after dropping the | ||
| 58 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | ||
| 59 | * otherwise. | ||
| 60 | */ | ||
| 61 | int dev_pm_put_subsys_data(struct device *dev) | ||
| 62 | { | ||
| 63 | struct pm_subsys_data *psd; | ||
| 64 | int ret = 0; | ||
| 65 | |||
| 66 | spin_lock_irq(&dev->power.lock); | ||
| 67 | |||
| 68 | psd = dev_to_psd(dev); | ||
| 69 | if (!psd) { | ||
| 70 | ret = -EINVAL; | ||
| 71 | goto out; | ||
| 72 | } | ||
| 73 | |||
| 74 | if (--psd->refcount == 0) { | ||
| 75 | dev->power.subsys_data = NULL; | ||
| 76 | kfree(psd); | ||
| 77 | ret = 1; | ||
| 78 | } | ||
| 79 | |||
| 80 | out: | ||
| 81 | spin_unlock_irq(&dev->power.lock); | ||
| 82 | |||
| 83 | return ret; | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index acc3a8ded29..1c374579407 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -11,69 +11,17 @@ | |||
| 11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| 12 | #include <linux/pm_runtime.h> | 12 | #include <linux/pm_runtime.h> |
| 13 | #include <linux/pm_domain.h> | 13 | #include <linux/pm_domain.h> |
| 14 | #include <linux/pm_qos.h> | ||
| 15 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 17 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
| 18 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
| 19 | #include <linux/export.h> | ||
| 20 | |||
| 21 | #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ | ||
| 22 | ({ \ | ||
| 23 | type (*__routine)(struct device *__d); \ | ||
| 24 | type __ret = (type)0; \ | ||
| 25 | \ | ||
| 26 | __routine = genpd->dev_ops.callback; \ | ||
| 27 | if (__routine) { \ | ||
| 28 | __ret = __routine(dev); \ | ||
| 29 | } else { \ | ||
| 30 | __routine = dev_gpd_data(dev)->ops.callback; \ | ||
| 31 | if (__routine) \ | ||
| 32 | __ret = __routine(dev); \ | ||
| 33 | } \ | ||
| 34 | __ret; \ | ||
| 35 | }) | ||
| 36 | |||
| 37 | #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ | ||
| 38 | ({ \ | ||
| 39 | ktime_t __start = ktime_get(); \ | ||
| 40 | type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ | ||
| 41 | s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ | ||
| 42 | struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ | ||
| 43 | if (!__retval && __elapsed > __td->field) { \ | ||
| 44 | __td->field = __elapsed; \ | ||
| 45 | dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ | ||
| 46 | __elapsed); \ | ||
| 47 | genpd->max_off_time_changed = true; \ | ||
| 48 | __td->constraint_changed = true; \ | ||
| 49 | } \ | ||
| 50 | __retval; \ | ||
| 51 | }) | ||
| 52 | 18 | ||
| 53 | static LIST_HEAD(gpd_list); | 19 | static LIST_HEAD(gpd_list); |
| 54 | static DEFINE_MUTEX(gpd_list_lock); | 20 | static DEFINE_MUTEX(gpd_list_lock); |
| 55 | 21 | ||
| 56 | static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) | ||
| 57 | { | ||
| 58 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
| 59 | |||
| 60 | if (IS_ERR_OR_NULL(domain_name)) | ||
| 61 | return NULL; | ||
| 62 | |||
| 63 | mutex_lock(&gpd_list_lock); | ||
| 64 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
| 65 | if (!strcmp(gpd->name, domain_name)) { | ||
| 66 | genpd = gpd; | ||
| 67 | break; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | mutex_unlock(&gpd_list_lock); | ||
| 71 | return genpd; | ||
| 72 | } | ||
| 73 | |||
| 74 | #ifdef CONFIG_PM | 22 | #ifdef CONFIG_PM |
| 75 | 23 | ||
| 76 | struct generic_pm_domain *dev_to_genpd(struct device *dev) | 24 | static struct generic_pm_domain *dev_to_genpd(struct device *dev) |
| 77 | { | 25 | { |
| 78 | if (IS_ERR_OR_NULL(dev->pm_domain)) | 26 | if (IS_ERR_OR_NULL(dev->pm_domain)) |
| 79 | return ERR_PTR(-EINVAL); | 27 | return ERR_PTR(-EINVAL); |
| @@ -81,32 +29,10 @@ struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
| 81 | return pd_to_genpd(dev->pm_domain); | 29 | return pd_to_genpd(dev->pm_domain); |
| 82 | } | 30 | } |
| 83 | 31 | ||
| 84 | static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) | 32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
| 85 | { | ||
| 86 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, | ||
| 87 | stop_latency_ns, "stop"); | ||
| 88 | } | ||
| 89 | |||
| 90 | static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 91 | { | ||
| 92 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, | ||
| 93 | start_latency_ns, "start"); | ||
| 94 | } | ||
| 95 | |||
| 96 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) | ||
| 97 | { | ||
| 98 | bool ret = false; | ||
| 99 | |||
| 100 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | ||
| 101 | ret = !!atomic_dec_and_test(&genpd->sd_count); | ||
| 102 | |||
| 103 | return ret; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | ||
| 107 | { | 33 | { |
| 108 | atomic_inc(&genpd->sd_count); | 34 | if (!WARN_ON(genpd->sd_count == 0)) |
| 109 | smp_mb__after_atomic_inc(); | 35 | genpd->sd_count--; |
| 110 | } | 36 | } |
| 111 | 37 | ||
| 112 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
| @@ -144,269 +70,134 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
| 144 | genpd->status = GPD_STATE_ACTIVE; | 70 | genpd->status = GPD_STATE_ACTIVE; |
| 145 | } | 71 | } |
| 146 | 72 | ||
| 147 | static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) | ||
| 148 | { | ||
| 149 | s64 usecs64; | ||
| 150 | |||
| 151 | if (!genpd->cpu_data) | ||
| 152 | return; | ||
| 153 | |||
| 154 | usecs64 = genpd->power_on_latency_ns; | ||
| 155 | do_div(usecs64, NSEC_PER_USEC); | ||
| 156 | usecs64 += genpd->cpu_data->saved_exit_latency; | ||
| 157 | genpd->cpu_data->idle_state->exit_latency = usecs64; | ||
| 158 | } | ||
| 159 | |||
| 160 | /** | 73 | /** |
| 161 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. | 74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. |
| 162 | * @genpd: PM domain to power up. | 75 | * @genpd: PM domain to power up. |
| 163 | * | 76 | * |
| 164 | * Restore power to @genpd and all of its masters so that it is possible to | 77 | * Restore power to @genpd and all of its parents so that it is possible to |
| 165 | * resume a device belonging to it. | 78 | * resume a device belonging to it. |
| 166 | */ | 79 | */ |
| 167 | static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | 80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) |
| 168 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
| 169 | { | 81 | { |
| 170 | struct gpd_link *link; | 82 | struct generic_pm_domain *parent = genpd->parent; |
| 171 | DEFINE_WAIT(wait); | ||
| 172 | int ret = 0; | 83 | int ret = 0; |
| 173 | 84 | ||
| 174 | /* If the domain's master is being waited for, we have to wait too. */ | 85 | start: |
| 175 | for (;;) { | 86 | if (parent) { |
| 176 | prepare_to_wait(&genpd->status_wait_queue, &wait, | 87 | genpd_acquire_lock(parent); |
| 177 | TASK_UNINTERRUPTIBLE); | 88 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); |
| 178 | if (genpd->status != GPD_STATE_WAIT_MASTER) | 89 | } else { |
| 179 | break; | ||
| 180 | mutex_unlock(&genpd->lock); | ||
| 181 | |||
| 182 | schedule(); | ||
| 183 | |||
| 184 | mutex_lock(&genpd->lock); | 90 | mutex_lock(&genpd->lock); |
| 185 | } | 91 | } |
| 186 | finish_wait(&genpd->status_wait_queue, &wait); | ||
| 187 | 92 | ||
| 188 | if (genpd->status == GPD_STATE_ACTIVE | 93 | if (genpd->status == GPD_STATE_ACTIVE |
| 189 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 94 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
| 190 | return 0; | 95 | goto out; |
| 191 | 96 | ||
| 192 | if (genpd->status != GPD_STATE_POWER_OFF) { | 97 | if (genpd->status != GPD_STATE_POWER_OFF) { |
| 193 | genpd_set_active(genpd); | 98 | genpd_set_active(genpd); |
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | if (genpd->cpu_data) { | ||
| 198 | cpuidle_pause_and_lock(); | ||
| 199 | genpd->cpu_data->idle_state->disabled = true; | ||
| 200 | cpuidle_resume_and_unlock(); | ||
| 201 | goto out; | 99 | goto out; |
| 202 | } | 100 | } |
| 203 | 101 | ||
| 204 | /* | 102 | if (parent && parent->status != GPD_STATE_ACTIVE) { |
| 205 | * The list is guaranteed not to change while the loop below is being | ||
| 206 | * executed, unless one of the masters' .power_on() callbacks fiddles | ||
| 207 | * with it. | ||
| 208 | */ | ||
| 209 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
| 210 | genpd_sd_counter_inc(link->master); | ||
| 211 | genpd->status = GPD_STATE_WAIT_MASTER; | ||
| 212 | |||
| 213 | mutex_unlock(&genpd->lock); | 103 | mutex_unlock(&genpd->lock); |
| 104 | genpd_release_lock(parent); | ||
| 214 | 105 | ||
| 215 | ret = pm_genpd_poweron(link->master); | 106 | ret = pm_genpd_poweron(parent); |
| 216 | 107 | if (ret) | |
| 217 | mutex_lock(&genpd->lock); | 108 | return ret; |
| 218 | 109 | ||
| 219 | /* | 110 | goto start; |
| 220 | * The "wait for parent" status is guaranteed not to change | ||
| 221 | * while the master is powering on. | ||
| 222 | */ | ||
| 223 | genpd->status = GPD_STATE_POWER_OFF; | ||
| 224 | wake_up_all(&genpd->status_wait_queue); | ||
| 225 | if (ret) { | ||
| 226 | genpd_sd_counter_dec(link->master); | ||
| 227 | goto err; | ||
| 228 | } | ||
| 229 | } | 111 | } |
| 230 | 112 | ||
| 231 | if (genpd->power_on) { | 113 | if (genpd->power_on) { |
| 232 | ktime_t time_start = ktime_get(); | ||
| 233 | s64 elapsed_ns; | ||
| 234 | |||
| 235 | ret = genpd->power_on(genpd); | 114 | ret = genpd->power_on(genpd); |
| 236 | if (ret) | 115 | if (ret) |
| 237 | goto err; | 116 | goto out; |
| 238 | |||
| 239 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | ||
| 240 | if (elapsed_ns > genpd->power_on_latency_ns) { | ||
| 241 | genpd->power_on_latency_ns = elapsed_ns; | ||
| 242 | genpd->max_off_time_changed = true; | ||
| 243 | genpd_recalc_cpu_exit_latency(genpd); | ||
| 244 | if (genpd->name) | ||
| 245 | pr_warning("%s: Power-on latency exceeded, " | ||
| 246 | "new value %lld ns\n", genpd->name, | ||
| 247 | elapsed_ns); | ||
| 248 | } | ||
| 249 | } | 117 | } |
| 250 | 118 | ||
| 251 | out: | ||
| 252 | genpd_set_active(genpd); | 119 | genpd_set_active(genpd); |
| 120 | if (parent) | ||
| 121 | parent->sd_count++; | ||
| 253 | 122 | ||
| 254 | return 0; | 123 | out: |
| 255 | |||
| 256 | err: | ||
| 257 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) | ||
| 258 | genpd_sd_counter_dec(link->master); | ||
| 259 | |||
| 260 | return ret; | ||
| 261 | } | ||
| 262 | |||
| 263 | /** | ||
| 264 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | ||
| 265 | * @genpd: PM domain to power up. | ||
| 266 | */ | ||
| 267 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
| 268 | { | ||
| 269 | int ret; | ||
| 270 | |||
| 271 | mutex_lock(&genpd->lock); | ||
| 272 | ret = __pm_genpd_poweron(genpd); | ||
| 273 | mutex_unlock(&genpd->lock); | 124 | mutex_unlock(&genpd->lock); |
| 274 | return ret; | 125 | if (parent) |
| 275 | } | 126 | genpd_release_lock(parent); |
| 276 | 127 | ||
| 277 | /** | 128 | return ret; |
| 278 | * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. | ||
| 279 | * @domain_name: Name of the PM domain to power up. | ||
| 280 | */ | ||
| 281 | int pm_genpd_name_poweron(const char *domain_name) | ||
| 282 | { | ||
| 283 | struct generic_pm_domain *genpd; | ||
| 284 | |||
| 285 | genpd = pm_genpd_lookup_name(domain_name); | ||
| 286 | return genpd ? pm_genpd_poweron(genpd) : -EINVAL; | ||
| 287 | } | 129 | } |
| 288 | 130 | ||
| 289 | #endif /* CONFIG_PM */ | 131 | #endif /* CONFIG_PM */ |
| 290 | 132 | ||
| 291 | #ifdef CONFIG_PM_RUNTIME | 133 | #ifdef CONFIG_PM_RUNTIME |
| 292 | 134 | ||
| 293 | static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, | ||
| 294 | struct device *dev) | ||
| 295 | { | ||
| 296 | return GENPD_DEV_CALLBACK(genpd, int, start, dev); | ||
| 297 | } | ||
| 298 | |||
| 299 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 300 | { | ||
| 301 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, | ||
| 302 | save_state_latency_ns, "state save"); | ||
| 303 | } | ||
| 304 | |||
| 305 | static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 306 | { | ||
| 307 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, | ||
| 308 | restore_state_latency_ns, | ||
| 309 | "state restore"); | ||
| 310 | } | ||
| 311 | |||
| 312 | static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | ||
| 313 | unsigned long val, void *ptr) | ||
| 314 | { | ||
| 315 | struct generic_pm_domain_data *gpd_data; | ||
| 316 | struct device *dev; | ||
| 317 | |||
| 318 | gpd_data = container_of(nb, struct generic_pm_domain_data, nb); | ||
| 319 | |||
| 320 | mutex_lock(&gpd_data->lock); | ||
| 321 | dev = gpd_data->base.dev; | ||
| 322 | if (!dev) { | ||
| 323 | mutex_unlock(&gpd_data->lock); | ||
| 324 | return NOTIFY_DONE; | ||
| 325 | } | ||
| 326 | mutex_unlock(&gpd_data->lock); | ||
| 327 | |||
| 328 | for (;;) { | ||
| 329 | struct generic_pm_domain *genpd; | ||
| 330 | struct pm_domain_data *pdd; | ||
| 331 | |||
| 332 | spin_lock_irq(&dev->power.lock); | ||
| 333 | |||
| 334 | pdd = dev->power.subsys_data ? | ||
| 335 | dev->power.subsys_data->domain_data : NULL; | ||
| 336 | if (pdd && pdd->dev) { | ||
| 337 | to_gpd_data(pdd)->td.constraint_changed = true; | ||
| 338 | genpd = dev_to_genpd(dev); | ||
| 339 | } else { | ||
| 340 | genpd = ERR_PTR(-ENODATA); | ||
| 341 | } | ||
| 342 | |||
| 343 | spin_unlock_irq(&dev->power.lock); | ||
| 344 | |||
| 345 | if (!IS_ERR(genpd)) { | ||
| 346 | mutex_lock(&genpd->lock); | ||
| 347 | genpd->max_off_time_changed = true; | ||
| 348 | mutex_unlock(&genpd->lock); | ||
| 349 | } | ||
| 350 | |||
| 351 | dev = dev->parent; | ||
| 352 | if (!dev || dev->power.ignore_children) | ||
| 353 | break; | ||
| 354 | } | ||
| 355 | |||
| 356 | return NOTIFY_DONE; | ||
| 357 | } | ||
| 358 | |||
| 359 | /** | 135 | /** |
| 360 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 136 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
| 361 | * @pdd: Domain data of the device to save the state of. | 137 | * @dle: Device list entry of the device to save the state of. |
| 362 | * @genpd: PM domain the device belongs to. | 138 | * @genpd: PM domain the device belongs to. |
| 363 | */ | 139 | */ |
| 364 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, | 140 | static int __pm_genpd_save_device(struct dev_list_entry *dle, |
| 365 | struct generic_pm_domain *genpd) | 141 | struct generic_pm_domain *genpd) |
| 366 | __releases(&genpd->lock) __acquires(&genpd->lock) | 142 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 367 | { | 143 | { |
| 368 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 144 | struct device *dev = dle->dev; |
| 369 | struct device *dev = pdd->dev; | 145 | struct device_driver *drv = dev->driver; |
| 370 | int ret = 0; | 146 | int ret = 0; |
| 371 | 147 | ||
| 372 | if (gpd_data->need_restore) | 148 | if (dle->need_restore) |
| 373 | return 0; | 149 | return 0; |
| 374 | 150 | ||
| 375 | mutex_unlock(&genpd->lock); | 151 | mutex_unlock(&genpd->lock); |
| 376 | 152 | ||
| 377 | genpd_start_dev(genpd, dev); | 153 | if (drv && drv->pm && drv->pm->runtime_suspend) { |
| 378 | ret = genpd_save_dev(genpd, dev); | 154 | if (genpd->start_device) |
| 379 | genpd_stop_dev(genpd, dev); | 155 | genpd->start_device(dev); |
| 156 | |||
| 157 | ret = drv->pm->runtime_suspend(dev); | ||
| 158 | |||
| 159 | if (genpd->stop_device) | ||
| 160 | genpd->stop_device(dev); | ||
| 161 | } | ||
| 380 | 162 | ||
| 381 | mutex_lock(&genpd->lock); | 163 | mutex_lock(&genpd->lock); |
| 382 | 164 | ||
| 383 | if (!ret) | 165 | if (!ret) |
| 384 | gpd_data->need_restore = true; | 166 | dle->need_restore = true; |
| 385 | 167 | ||
| 386 | return ret; | 168 | return ret; |
| 387 | } | 169 | } |
| 388 | 170 | ||
| 389 | /** | 171 | /** |
| 390 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 172 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
| 391 | * @pdd: Domain data of the device to restore the state of. | 173 | * @dle: Device list entry of the device to restore the state of. |
| 392 | * @genpd: PM domain the device belongs to. | 174 | * @genpd: PM domain the device belongs to. |
| 393 | */ | 175 | */ |
| 394 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, | 176 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, |
| 395 | struct generic_pm_domain *genpd) | 177 | struct generic_pm_domain *genpd) |
| 396 | __releases(&genpd->lock) __acquires(&genpd->lock) | 178 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 397 | { | 179 | { |
| 398 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 180 | struct device *dev = dle->dev; |
| 399 | struct device *dev = pdd->dev; | 181 | struct device_driver *drv = dev->driver; |
| 400 | bool need_restore = gpd_data->need_restore; | 182 | |
| 183 | if (!dle->need_restore) | ||
| 184 | return; | ||
| 401 | 185 | ||
| 402 | gpd_data->need_restore = false; | ||
| 403 | mutex_unlock(&genpd->lock); | 186 | mutex_unlock(&genpd->lock); |
| 404 | 187 | ||
| 405 | genpd_start_dev(genpd, dev); | 188 | if (drv && drv->pm && drv->pm->runtime_resume) { |
| 406 | if (need_restore) | 189 | if (genpd->start_device) |
| 407 | genpd_restore_dev(genpd, dev); | 190 | genpd->start_device(dev); |
| 191 | |||
| 192 | drv->pm->runtime_resume(dev); | ||
| 193 | |||
| 194 | if (genpd->stop_device) | ||
| 195 | genpd->stop_device(dev); | ||
| 196 | } | ||
| 408 | 197 | ||
| 409 | mutex_lock(&genpd->lock); | 198 | mutex_lock(&genpd->lock); |
| 199 | |||
| 200 | dle->need_restore = false; | ||
| 410 | } | 201 | } |
| 411 | 202 | ||
| 412 | /** | 203 | /** |
| @@ -420,8 +211,7 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, | |||
| 420 | */ | 211 | */ |
| 421 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 212 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
| 422 | { | 213 | { |
| 423 | return genpd->status == GPD_STATE_WAIT_MASTER | 214 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; |
| 424 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
| 425 | } | 215 | } |
| 426 | 216 | ||
| 427 | /** | 217 | /** |
| @@ -448,8 +238,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
| 448 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 238 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
| 449 | __releases(&genpd->lock) __acquires(&genpd->lock) | 239 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 450 | { | 240 | { |
| 451 | struct pm_domain_data *pdd; | 241 | struct generic_pm_domain *parent; |
| 452 | struct gpd_link *link; | 242 | struct dev_list_entry *dle; |
| 453 | unsigned int not_suspended; | 243 | unsigned int not_suspended; |
| 454 | int ret = 0; | 244 | int ret = 0; |
| 455 | 245 | ||
| @@ -457,32 +247,20 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
| 457 | /* | 247 | /* |
| 458 | * Do not try to power off the domain in the following situations: | 248 | * Do not try to power off the domain in the following situations: |
| 459 | * (1) The domain is already in the "power off" state. | 249 | * (1) The domain is already in the "power off" state. |
| 460 | * (2) The domain is waiting for its master to power up. | 250 | * (2) System suspend is in progress. |
| 461 | * (3) One of the domain's devices is being resumed right now. | 251 | * (3) One of the domain's devices is being resumed right now. |
| 462 | * (4) System suspend is in progress. | ||
| 463 | */ | 252 | */ |
| 464 | if (genpd->status == GPD_STATE_POWER_OFF | 253 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 |
| 465 | || genpd->status == GPD_STATE_WAIT_MASTER | 254 | || genpd->resume_count > 0) |
| 466 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | ||
| 467 | return 0; | 255 | return 0; |
| 468 | 256 | ||
| 469 | if (atomic_read(&genpd->sd_count) > 0) | 257 | if (genpd->sd_count > 0) |
| 470 | return -EBUSY; | 258 | return -EBUSY; |
| 471 | 259 | ||
| 472 | not_suspended = 0; | 260 | not_suspended = 0; |
| 473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | 261 | list_for_each_entry(dle, &genpd->dev_list, node) |
| 474 | enum pm_qos_flags_status stat; | 262 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) |
| 475 | |||
| 476 | stat = dev_pm_qos_flags(pdd->dev, | ||
| 477 | PM_QOS_FLAG_NO_POWER_OFF | ||
| 478 | | PM_QOS_FLAG_REMOTE_WAKEUP); | ||
| 479 | if (stat > PM_QOS_FLAGS_NONE) | ||
| 480 | return -EBUSY; | ||
| 481 | |||
| 482 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | ||
| 483 | || pdd->dev->power.irq_safe)) | ||
| 484 | not_suspended++; | 263 | not_suspended++; |
| 485 | } | ||
| 486 | 264 | ||
| 487 | if (not_suspended > genpd->in_progress) | 265 | if (not_suspended > genpd->in_progress) |
| 488 | return -EBUSY; | 266 | return -EBUSY; |
| @@ -504,80 +282,54 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
| 504 | genpd->status = GPD_STATE_BUSY; | 282 | genpd->status = GPD_STATE_BUSY; |
| 505 | genpd->poweroff_task = current; | 283 | genpd->poweroff_task = current; |
| 506 | 284 | ||
| 507 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { | 285 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { |
| 508 | ret = atomic_read(&genpd->sd_count) == 0 ? | 286 | ret = __pm_genpd_save_device(dle, genpd); |
| 509 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | ||
| 510 | |||
| 511 | if (genpd_abort_poweroff(genpd)) | ||
| 512 | goto out; | ||
| 513 | |||
| 514 | if (ret) { | 287 | if (ret) { |
| 515 | genpd_set_active(genpd); | 288 | genpd_set_active(genpd); |
| 516 | goto out; | 289 | goto out; |
| 517 | } | 290 | } |
| 518 | 291 | ||
| 292 | if (genpd_abort_poweroff(genpd)) | ||
| 293 | goto out; | ||
| 294 | |||
| 519 | if (genpd->status == GPD_STATE_REPEAT) { | 295 | if (genpd->status == GPD_STATE_REPEAT) { |
| 520 | genpd->poweroff_task = NULL; | 296 | genpd->poweroff_task = NULL; |
| 521 | goto start; | 297 | goto start; |
| 522 | } | 298 | } |
| 523 | } | 299 | } |
| 524 | 300 | ||
| 525 | if (genpd->cpu_data) { | 301 | parent = genpd->parent; |
| 526 | /* | 302 | if (parent) { |
| 527 | * If cpu_data is set, cpuidle should turn the domain off when | 303 | mutex_unlock(&genpd->lock); |
| 528 | * the CPU in it is idle. In that case we don't decrement the | ||
| 529 | * subdomain counts of the master domains, so that power is not | ||
| 530 | * removed from the current domain prematurely as a result of | ||
| 531 | * cutting off the masters' power. | ||
| 532 | */ | ||
| 533 | genpd->status = GPD_STATE_POWER_OFF; | ||
| 534 | cpuidle_pause_and_lock(); | ||
| 535 | genpd->cpu_data->idle_state->disabled = false; | ||
| 536 | cpuidle_resume_and_unlock(); | ||
| 537 | goto out; | ||
| 538 | } | ||
| 539 | 304 | ||
| 540 | if (genpd->power_off) { | 305 | genpd_acquire_lock(parent); |
| 541 | ktime_t time_start; | 306 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); |
| 542 | s64 elapsed_ns; | ||
| 543 | 307 | ||
| 544 | if (atomic_read(&genpd->sd_count) > 0) { | 308 | if (genpd_abort_poweroff(genpd)) { |
| 545 | ret = -EBUSY; | 309 | genpd_release_lock(parent); |
| 546 | goto out; | 310 | goto out; |
| 547 | } | 311 | } |
| 312 | } | ||
| 548 | 313 | ||
| 549 | time_start = ktime_get(); | 314 | if (genpd->power_off) { |
| 550 | |||
| 551 | /* | ||
| 552 | * If sd_count > 0 at this point, one of the subdomains hasn't | ||
| 553 | * managed to call pm_genpd_poweron() for the master yet after | ||
| 554 | * incrementing it. In that case pm_genpd_poweron() will wait | ||
| 555 | * for us to drop the lock, so we can call .power_off() and let | ||
| 556 | * the pm_genpd_poweron() restore power for us (this shouldn't | ||
| 557 | * happen very often). | ||
| 558 | */ | ||
| 559 | ret = genpd->power_off(genpd); | 315 | ret = genpd->power_off(genpd); |
| 560 | if (ret == -EBUSY) { | 316 | if (ret == -EBUSY) { |
| 561 | genpd_set_active(genpd); | 317 | genpd_set_active(genpd); |
| 562 | goto out; | 318 | if (parent) |
| 563 | } | 319 | genpd_release_lock(parent); |
| 564 | 320 | ||
| 565 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 321 | goto out; |
| 566 | if (elapsed_ns > genpd->power_off_latency_ns) { | ||
| 567 | genpd->power_off_latency_ns = elapsed_ns; | ||
| 568 | genpd->max_off_time_changed = true; | ||
| 569 | if (genpd->name) | ||
| 570 | pr_warning("%s: Power-off latency exceeded, " | ||
| 571 | "new value %lld ns\n", genpd->name, | ||
| 572 | elapsed_ns); | ||
| 573 | } | 322 | } |
| 574 | } | 323 | } |
| 575 | 324 | ||
| 576 | genpd->status = GPD_STATE_POWER_OFF; | 325 | genpd->status = GPD_STATE_POWER_OFF; |
| 577 | 326 | ||
| 578 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 327 | if (parent) { |
| 579 | genpd_sd_counter_dec(link->master); | 328 | genpd_sd_counter_dec(parent); |
| 580 | genpd_queue_power_off_work(link->master); | 329 | if (parent->sd_count == 0) |
| 330 | genpd_queue_power_off_work(parent); | ||
| 331 | |||
| 332 | genpd_release_lock(parent); | ||
| 581 | } | 333 | } |
| 582 | 334 | ||
| 583 | out: | 335 | out: |
| @@ -612,8 +364,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) | |||
| 612 | static int pm_genpd_runtime_suspend(struct device *dev) | 364 | static int pm_genpd_runtime_suspend(struct device *dev) |
| 613 | { | 365 | { |
| 614 | struct generic_pm_domain *genpd; | 366 | struct generic_pm_domain *genpd; |
| 615 | bool (*stop_ok)(struct device *__dev); | ||
| 616 | int ret; | ||
| 617 | 367 | ||
| 618 | dev_dbg(dev, "%s()\n", __func__); | 368 | dev_dbg(dev, "%s()\n", __func__); |
| 619 | 369 | ||
| @@ -621,22 +371,11 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 621 | if (IS_ERR(genpd)) | 371 | if (IS_ERR(genpd)) |
| 622 | return -EINVAL; | 372 | return -EINVAL; |
| 623 | 373 | ||
| 624 | might_sleep_if(!genpd->dev_irq_safe); | 374 | if (genpd->stop_device) { |
| 625 | 375 | int ret = genpd->stop_device(dev); | |
| 626 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 376 | if (ret) |
| 627 | if (stop_ok && !stop_ok(dev)) | 377 | return ret; |
| 628 | return -EBUSY; | 378 | } |
| 629 | |||
| 630 | ret = genpd_stop_dev(genpd, dev); | ||
| 631 | if (ret) | ||
| 632 | return ret; | ||
| 633 | |||
| 634 | /* | ||
| 635 | * If power.irq_safe is set, this routine will be run with interrupts | ||
| 636 | * off, so it can't use mutexes. | ||
| 637 | */ | ||
| 638 | if (dev->power.irq_safe) | ||
| 639 | return 0; | ||
| 640 | 379 | ||
| 641 | mutex_lock(&genpd->lock); | 380 | mutex_lock(&genpd->lock); |
| 642 | genpd->in_progress++; | 381 | genpd->in_progress++; |
| @@ -648,6 +387,24 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 648 | } | 387 | } |
| 649 | 388 | ||
| 650 | /** | 389 | /** |
| 390 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
| 391 | * @dev: Device to resume. | ||
| 392 | * @genpd: PM domain the device belongs to. | ||
| 393 | */ | ||
| 394 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
| 395 | struct generic_pm_domain *genpd) | ||
| 396 | { | ||
| 397 | struct dev_list_entry *dle; | ||
| 398 | |||
| 399 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
| 400 | if (dle->dev == dev) { | ||
| 401 | __pm_genpd_restore_device(dle, genpd); | ||
| 402 | break; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | } | ||
| 406 | |||
| 407 | /** | ||
| 651 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 408 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
| 652 | * @dev: Device to resume. | 409 | * @dev: Device to resume. |
| 653 | * | 410 | * |
| @@ -667,18 +424,11 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 667 | if (IS_ERR(genpd)) | 424 | if (IS_ERR(genpd)) |
| 668 | return -EINVAL; | 425 | return -EINVAL; |
| 669 | 426 | ||
| 670 | might_sleep_if(!genpd->dev_irq_safe); | 427 | ret = pm_genpd_poweron(genpd); |
| 671 | 428 | if (ret) | |
| 672 | /* If power.irq_safe, the PM domain is never powered off. */ | 429 | return ret; |
| 673 | if (dev->power.irq_safe) | ||
| 674 | return genpd_start_dev_no_timing(genpd, dev); | ||
| 675 | 430 | ||
| 676 | mutex_lock(&genpd->lock); | 431 | mutex_lock(&genpd->lock); |
| 677 | ret = __pm_genpd_poweron(genpd); | ||
| 678 | if (ret) { | ||
| 679 | mutex_unlock(&genpd->lock); | ||
| 680 | return ret; | ||
| 681 | } | ||
| 682 | genpd->status = GPD_STATE_BUSY; | 432 | genpd->status = GPD_STATE_BUSY; |
| 683 | genpd->resume_count++; | 433 | genpd->resume_count++; |
| 684 | for (;;) { | 434 | for (;;) { |
| @@ -698,12 +448,15 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 698 | mutex_lock(&genpd->lock); | 448 | mutex_lock(&genpd->lock); |
| 699 | } | 449 | } |
| 700 | finish_wait(&genpd->status_wait_queue, &wait); | 450 | finish_wait(&genpd->status_wait_queue, &wait); |
| 701 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); | 451 | __pm_genpd_runtime_resume(dev, genpd); |
| 702 | genpd->resume_count--; | 452 | genpd->resume_count--; |
| 703 | genpd_set_active(genpd); | 453 | genpd_set_active(genpd); |
| 704 | wake_up_all(&genpd->status_wait_queue); | 454 | wake_up_all(&genpd->status_wait_queue); |
| 705 | mutex_unlock(&genpd->lock); | 455 | mutex_unlock(&genpd->lock); |
| 706 | 456 | ||
| 457 | if (genpd->start_device) | ||
| 458 | genpd->start_device(dev); | ||
| 459 | |||
| 707 | return 0; | 460 | return 0; |
| 708 | } | 461 | } |
| 709 | 462 | ||
| @@ -724,13 +477,9 @@ void pm_genpd_poweroff_unused(void) | |||
| 724 | 477 | ||
| 725 | #else | 478 | #else |
| 726 | 479 | ||
| 727 | static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | ||
| 728 | unsigned long val, void *ptr) | ||
| 729 | { | ||
| 730 | return NOTIFY_DONE; | ||
| 731 | } | ||
| 732 | |||
| 733 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
| 481 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
| 482 | struct generic_pm_domain *genpd) {} | ||
| 734 | 483 | ||
| 735 | #define pm_genpd_runtime_suspend NULL | 484 | #define pm_genpd_runtime_suspend NULL |
| 736 | #define pm_genpd_runtime_resume NULL | 485 | #define pm_genpd_runtime_resume NULL |
| @@ -740,131 +489,37 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} | |||
| 740 | #ifdef CONFIG_PM_SLEEP | 489 | #ifdef CONFIG_PM_SLEEP |
| 741 | 490 | ||
| 742 | /** | 491 | /** |
| 743 | * pm_genpd_present - Check if the given PM domain has been initialized. | 492 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. |
| 744 | * @genpd: PM domain to check. | ||
| 745 | */ | ||
| 746 | static bool pm_genpd_present(struct generic_pm_domain *genpd) | ||
| 747 | { | ||
| 748 | struct generic_pm_domain *gpd; | ||
| 749 | |||
| 750 | if (IS_ERR_OR_NULL(genpd)) | ||
| 751 | return false; | ||
| 752 | |||
| 753 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) | ||
| 754 | if (gpd == genpd) | ||
| 755 | return true; | ||
| 756 | |||
| 757 | return false; | ||
| 758 | } | ||
| 759 | |||
| 760 | static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, | ||
| 761 | struct device *dev) | ||
| 762 | { | ||
| 763 | return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); | ||
| 764 | } | ||
| 765 | |||
| 766 | static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 767 | { | ||
| 768 | return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); | ||
| 769 | } | ||
| 770 | |||
| 771 | static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) | ||
| 772 | { | ||
| 773 | return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); | ||
| 774 | } | ||
| 775 | |||
| 776 | static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) | ||
| 777 | { | ||
| 778 | return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); | ||
| 779 | } | ||
| 780 | |||
| 781 | static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 782 | { | ||
| 783 | return GENPD_DEV_CALLBACK(genpd, int, resume, dev); | ||
| 784 | } | ||
| 785 | |||
| 786 | static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 787 | { | ||
| 788 | return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); | ||
| 789 | } | ||
| 790 | |||
| 791 | static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) | ||
| 792 | { | ||
| 793 | return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); | ||
| 794 | } | ||
| 795 | |||
| 796 | static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) | ||
| 797 | { | ||
| 798 | return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); | ||
| 799 | } | ||
| 800 | |||
| 801 | static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) | ||
| 802 | { | ||
| 803 | return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); | ||
| 804 | } | ||
| 805 | |||
| 806 | /** | ||
| 807 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. | ||
| 808 | * @genpd: PM domain to power off, if possible. | 493 | * @genpd: PM domain to power off, if possible. |
| 809 | * | 494 | * |
| 810 | * Check if the given PM domain can be powered off (during system suspend or | 495 | * Check if the given PM domain can be powered off (during system suspend or |
| 811 | * hibernation) and do that if so. Also, in that case propagate to its masters. | 496 | * hibernation) and do that if so. Also, in that case propagate to its parent. |
| 812 | * | 497 | * |
| 813 | * This function is only called in "noirq" and "syscore" stages of system power | 498 | * This function is only called in "noirq" stages of system power transitions, |
| 814 | * transitions, so it need not acquire locks (all of the "noirq" callbacks are | 499 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
| 815 | * executed sequentially, so it is guaranteed that it will never run twice in | 500 | * sequentially, so it is guaranteed that it will never run twice in parallel). |
| 816 | * parallel). | ||
| 817 | */ | 501 | */ |
| 818 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 502 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
| 819 | { | 503 | { |
| 820 | struct gpd_link *link; | 504 | struct generic_pm_domain *parent = genpd->parent; |
| 821 | 505 | ||
| 822 | if (genpd->status == GPD_STATE_POWER_OFF) | 506 | if (genpd->status == GPD_STATE_POWER_OFF) |
| 823 | return; | 507 | return; |
| 824 | 508 | ||
| 825 | if (genpd->suspended_count != genpd->device_count | 509 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) |
| 826 | || atomic_read(&genpd->sd_count) > 0) | ||
| 827 | return; | 510 | return; |
| 828 | 511 | ||
| 829 | if (genpd->power_off) | 512 | if (genpd->power_off) |
| 830 | genpd->power_off(genpd); | 513 | genpd->power_off(genpd); |
| 831 | 514 | ||
| 832 | genpd->status = GPD_STATE_POWER_OFF; | 515 | genpd->status = GPD_STATE_POWER_OFF; |
| 833 | 516 | if (parent) { | |
| 834 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 517 | genpd_sd_counter_dec(parent); |
| 835 | genpd_sd_counter_dec(link->master); | 518 | pm_genpd_sync_poweroff(parent); |
| 836 | pm_genpd_sync_poweroff(link->master); | ||
| 837 | } | 519 | } |
| 838 | } | 520 | } |
| 839 | 521 | ||
| 840 | /** | 522 | /** |
| 841 | * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. | ||
| 842 | * @genpd: PM domain to power on. | ||
| 843 | * | ||
| 844 | * This function is only called in "noirq" and "syscore" stages of system power | ||
| 845 | * transitions, so it need not acquire locks (all of the "noirq" callbacks are | ||
| 846 | * executed sequentially, so it is guaranteed that it will never run twice in | ||
| 847 | * parallel). | ||
| 848 | */ | ||
| 849 | static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) | ||
| 850 | { | ||
| 851 | struct gpd_link *link; | ||
| 852 | |||
| 853 | if (genpd->status != GPD_STATE_POWER_OFF) | ||
| 854 | return; | ||
| 855 | |||
| 856 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
| 857 | pm_genpd_sync_poweron(link->master); | ||
| 858 | genpd_sd_counter_inc(link->master); | ||
| 859 | } | ||
| 860 | |||
| 861 | if (genpd->power_on) | ||
| 862 | genpd->power_on(genpd); | ||
| 863 | |||
| 864 | genpd->status = GPD_STATE_ACTIVE; | ||
| 865 | } | ||
| 866 | |||
| 867 | /** | ||
| 868 | * resume_needed - Check whether to resume a device before system suspend. | 523 | * resume_needed - Check whether to resume a device before system suspend. |
| 869 | * @dev: Device to check. | 524 | * @dev: Device to check. |
| 870 | * @genpd: PM domain the device belongs to. | 525 | * @genpd: PM domain the device belongs to. |
| @@ -887,7 +542,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) | |||
| 887 | if (!device_can_wakeup(dev)) | 542 | if (!device_can_wakeup(dev)) |
| 888 | return false; | 543 | return false; |
| 889 | 544 | ||
| 890 | active_wakeup = genpd_dev_active_wakeup(genpd, dev); | 545 | active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); |
| 891 | return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; | 546 | return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; |
| 892 | } | 547 | } |
| 893 | 548 | ||
| @@ -930,10 +585,8 @@ static int pm_genpd_prepare(struct device *dev) | |||
| 930 | 585 | ||
| 931 | genpd_acquire_lock(genpd); | 586 | genpd_acquire_lock(genpd); |
| 932 | 587 | ||
| 933 | if (genpd->prepared_count++ == 0) { | 588 | if (genpd->prepared_count++ == 0) |
| 934 | genpd->suspended_count = 0; | ||
| 935 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; | 589 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; |
| 936 | } | ||
| 937 | 590 | ||
| 938 | genpd_release_lock(genpd); | 591 | genpd_release_lock(genpd); |
| 939 | 592 | ||
| @@ -945,7 +598,7 @@ static int pm_genpd_prepare(struct device *dev) | |||
| 945 | /* | 598 | /* |
| 946 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, | 599 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, |
| 947 | * so pm_genpd_poweron() will return immediately, but if the device | 600 | * so pm_genpd_poweron() will return immediately, but if the device |
| 948 | * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need | 601 | * is suspended (e.g. it's been stopped by .stop_device()), we need |
| 949 | * to make it operational. | 602 | * to make it operational. |
| 950 | */ | 603 | */ |
| 951 | pm_runtime_resume(dev); | 604 | pm_runtime_resume(dev); |
| @@ -984,20 +637,21 @@ static int pm_genpd_suspend(struct device *dev) | |||
| 984 | if (IS_ERR(genpd)) | 637 | if (IS_ERR(genpd)) |
| 985 | return -EINVAL; | 638 | return -EINVAL; |
| 986 | 639 | ||
| 987 | return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); | 640 | return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); |
| 988 | } | 641 | } |
| 989 | 642 | ||
| 990 | /** | 643 | /** |
| 991 | * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. | 644 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. |
| 992 | * @dev: Device to suspend. | 645 | * @dev: Device to suspend. |
| 993 | * | 646 | * |
| 994 | * Carry out a late suspend of a device under the assumption that its | 647 | * Carry out a late suspend of a device under the assumption that its |
| 995 | * pm_domain field points to the domain member of an object of type | 648 | * pm_domain field points to the domain member of an object of type |
| 996 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | 649 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
| 997 | */ | 650 | */ |
| 998 | static int pm_genpd_suspend_late(struct device *dev) | 651 | static int pm_genpd_suspend_noirq(struct device *dev) |
| 999 | { | 652 | { |
| 1000 | struct generic_pm_domain *genpd; | 653 | struct generic_pm_domain *genpd; |
| 654 | int ret; | ||
| 1001 | 655 | ||
| 1002 | dev_dbg(dev, "%s()\n", __func__); | 656 | dev_dbg(dev, "%s()\n", __func__); |
| 1003 | 657 | ||
| @@ -1005,31 +659,19 @@ static int pm_genpd_suspend_late(struct device *dev) | |||
| 1005 | if (IS_ERR(genpd)) | 659 | if (IS_ERR(genpd)) |
| 1006 | return -EINVAL; | 660 | return -EINVAL; |
| 1007 | 661 | ||
| 1008 | return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); | 662 | if (genpd->suspend_power_off) |
| 1009 | } | 663 | return 0; |
| 1010 | |||
| 1011 | /** | ||
| 1012 | * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. | ||
| 1013 | * @dev: Device to suspend. | ||
| 1014 | * | ||
| 1015 | * Stop the device and remove power from the domain if all devices in it have | ||
| 1016 | * been stopped. | ||
| 1017 | */ | ||
| 1018 | static int pm_genpd_suspend_noirq(struct device *dev) | ||
| 1019 | { | ||
| 1020 | struct generic_pm_domain *genpd; | ||
| 1021 | |||
| 1022 | dev_dbg(dev, "%s()\n", __func__); | ||
| 1023 | 664 | ||
| 1024 | genpd = dev_to_genpd(dev); | 665 | ret = pm_generic_suspend_noirq(dev); |
| 1025 | if (IS_ERR(genpd)) | 666 | if (ret) |
| 1026 | return -EINVAL; | 667 | return ret; |
| 1027 | 668 | ||
| 1028 | if (genpd->suspend_power_off | 669 | if (device_may_wakeup(dev) |
| 1029 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | 670 | && genpd->active_wakeup && genpd->active_wakeup(dev)) |
| 1030 | return 0; | 671 | return 0; |
| 1031 | 672 | ||
| 1032 | genpd_stop_dev(genpd, dev); | 673 | if (genpd->stop_device) |
| 674 | genpd->stop_device(dev); | ||
| 1033 | 675 | ||
| 1034 | /* | 676 | /* |
| 1035 | * Since all of the "noirq" callbacks are executed sequentially, it is | 677 | * Since all of the "noirq" callbacks are executed sequentially, it is |
| @@ -1043,10 +685,13 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
| 1043 | } | 685 | } |
| 1044 | 686 | ||
| 1045 | /** | 687 | /** |
| 1046 | * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. | 688 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. |
| 1047 | * @dev: Device to resume. | 689 | * @dev: Device to resume. |
| 1048 | * | 690 | * |
| 1049 | * Restore power to the device's PM domain, if necessary, and start the device. | 691 | * Carry out an early resume of a device under the assumption that its |
| 692 | * pm_domain field points to the domain member of an object of type | ||
| 693 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
| 694 | * devices. | ||
| 1050 | */ | 695 | */ |
| 1051 | static int pm_genpd_resume_noirq(struct device *dev) | 696 | static int pm_genpd_resume_noirq(struct device *dev) |
| 1052 | { | 697 | { |
| @@ -1058,8 +703,7 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
| 1058 | if (IS_ERR(genpd)) | 703 | if (IS_ERR(genpd)) |
| 1059 | return -EINVAL; | 704 | return -EINVAL; |
| 1060 | 705 | ||
| 1061 | if (genpd->suspend_power_off | 706 | if (genpd->suspend_power_off) |
| 1062 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | ||
| 1063 | return 0; | 707 | return 0; |
| 1064 | 708 | ||
| 1065 | /* | 709 | /* |
| @@ -1067,36 +711,16 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
| 1067 | * guaranteed that this function will never run twice in parallel for | 711 | * guaranteed that this function will never run twice in parallel for |
| 1068 | * the same PM domain, so it is not necessary to use locking here. | 712 | * the same PM domain, so it is not necessary to use locking here. |
| 1069 | */ | 713 | */ |
| 1070 | pm_genpd_sync_poweron(genpd); | 714 | pm_genpd_poweron(genpd); |
| 1071 | genpd->suspended_count--; | 715 | genpd->suspended_count--; |
| 716 | if (genpd->start_device) | ||
| 717 | genpd->start_device(dev); | ||
| 1072 | 718 | ||
| 1073 | return genpd_start_dev(genpd, dev); | 719 | return pm_generic_resume_noirq(dev); |
| 1074 | } | 720 | } |
| 1075 | 721 | ||
| 1076 | /** | 722 | /** |
| 1077 | * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. | 723 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. |
| 1078 | * @dev: Device to resume. | ||
| 1079 | * | ||
| 1080 | * Carry out an early resume of a device under the assumption that its | ||
| 1081 | * pm_domain field points to the domain member of an object of type | ||
| 1082 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
| 1083 | * devices. | ||
| 1084 | */ | ||
| 1085 | static int pm_genpd_resume_early(struct device *dev) | ||
| 1086 | { | ||
| 1087 | struct generic_pm_domain *genpd; | ||
| 1088 | |||
| 1089 | dev_dbg(dev, "%s()\n", __func__); | ||
| 1090 | |||
| 1091 | genpd = dev_to_genpd(dev); | ||
| 1092 | if (IS_ERR(genpd)) | ||
| 1093 | return -EINVAL; | ||
| 1094 | |||
| 1095 | return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | /** | ||
| 1099 | * pm_genpd_resume - Resume of device in an I/O PM domain. | ||
| 1100 | * @dev: Device to resume. | 724 | * @dev: Device to resume. |
| 1101 | * | 725 | * |
| 1102 | * Resume a device under the assumption that its pm_domain field points to the | 726 | * Resume a device under the assumption that its pm_domain field points to the |
| @@ -1113,11 +737,11 @@ static int pm_genpd_resume(struct device *dev) | |||
| 1113 | if (IS_ERR(genpd)) | 737 | if (IS_ERR(genpd)) |
| 1114 | return -EINVAL; | 738 | return -EINVAL; |
| 1115 | 739 | ||
| 1116 | return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); | 740 | return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); |
| 1117 | } | 741 | } |
| 1118 | 742 | ||
| 1119 | /** | 743 | /** |
| 1120 | * pm_genpd_freeze - Freezing a device in an I/O PM domain. | 744 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. |
| 1121 | * @dev: Device to freeze. | 745 | * @dev: Device to freeze. |
| 1122 | * | 746 | * |
| 1123 | * Freeze a device under the assumption that its pm_domain field points to the | 747 | * Freeze a device under the assumption that its pm_domain field points to the |
| @@ -1134,11 +758,11 @@ static int pm_genpd_freeze(struct device *dev) | |||
| 1134 | if (IS_ERR(genpd)) | 758 | if (IS_ERR(genpd)) |
| 1135 | return -EINVAL; | 759 | return -EINVAL; |
| 1136 | 760 | ||
| 1137 | return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); | 761 | return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); |
| 1138 | } | 762 | } |
| 1139 | 763 | ||
| 1140 | /** | 764 | /** |
| 1141 | * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. | 765 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. |
| 1142 | * @dev: Device to freeze. | 766 | * @dev: Device to freeze. |
| 1143 | * | 767 | * |
| 1144 | * Carry out a late freeze of a device under the assumption that its | 768 | * Carry out a late freeze of a device under the assumption that its |
| @@ -1146,9 +770,10 @@ static int pm_genpd_freeze(struct device *dev) | |||
| 1146 | * struct generic_pm_domain representing a power domain consisting of I/O | 770 | * struct generic_pm_domain representing a power domain consisting of I/O |
| 1147 | * devices. | 771 | * devices. |
| 1148 | */ | 772 | */ |
| 1149 | static int pm_genpd_freeze_late(struct device *dev) | 773 | static int pm_genpd_freeze_noirq(struct device *dev) |
| 1150 | { | 774 | { |
| 1151 | struct generic_pm_domain *genpd; | 775 | struct generic_pm_domain *genpd; |
| 776 | int ret; | ||
| 1152 | 777 | ||
| 1153 | dev_dbg(dev, "%s()\n", __func__); | 778 | dev_dbg(dev, "%s()\n", __func__); |
| 1154 | 779 | ||
| @@ -1156,19 +781,29 @@ static int pm_genpd_freeze_late(struct device *dev) | |||
| 1156 | if (IS_ERR(genpd)) | 781 | if (IS_ERR(genpd)) |
| 1157 | return -EINVAL; | 782 | return -EINVAL; |
| 1158 | 783 | ||
| 1159 | return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); | 784 | if (genpd->suspend_power_off) |
| 785 | return 0; | ||
| 786 | |||
| 787 | ret = pm_generic_freeze_noirq(dev); | ||
| 788 | if (ret) | ||
| 789 | return ret; | ||
| 790 | |||
| 791 | if (genpd->stop_device) | ||
| 792 | genpd->stop_device(dev); | ||
| 793 | |||
| 794 | return 0; | ||
| 1160 | } | 795 | } |
| 1161 | 796 | ||
| 1162 | /** | 797 | /** |
| 1163 | * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. | 798 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. |
| 1164 | * @dev: Device to freeze. | 799 | * @dev: Device to thaw. |
| 1165 | * | 800 | * |
| 1166 | * Carry out a late freeze of a device under the assumption that its | 801 | * Carry out an early thaw of a device under the assumption that its |
| 1167 | * pm_domain field points to the domain member of an object of type | 802 | * pm_domain field points to the domain member of an object of type |
| 1168 | * struct generic_pm_domain representing a power domain consisting of I/O | 803 | * struct generic_pm_domain representing a power domain consisting of I/O |
| 1169 | * devices. | 804 | * devices. |
| 1170 | */ | 805 | */ |
| 1171 | static int pm_genpd_freeze_noirq(struct device *dev) | 806 | static int pm_genpd_thaw_noirq(struct device *dev) |
| 1172 | { | 807 | { |
| 1173 | struct generic_pm_domain *genpd; | 808 | struct generic_pm_domain *genpd; |
| 1174 | 809 | ||
| @@ -1178,17 +813,24 @@ static int pm_genpd_freeze_noirq(struct device *dev) | |||
| 1178 | if (IS_ERR(genpd)) | 813 | if (IS_ERR(genpd)) |
| 1179 | return -EINVAL; | 814 | return -EINVAL; |
| 1180 | 815 | ||
| 1181 | return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); | 816 | if (genpd->suspend_power_off) |
| 817 | return 0; | ||
| 818 | |||
| 819 | if (genpd->start_device) | ||
| 820 | genpd->start_device(dev); | ||
| 821 | |||
| 822 | return pm_generic_thaw_noirq(dev); | ||
| 1182 | } | 823 | } |
| 1183 | 824 | ||
| 1184 | /** | 825 | /** |
| 1185 | * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. | 826 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. |
| 1186 | * @dev: Device to thaw. | 827 | * @dev: Device to thaw. |
| 1187 | * | 828 | * |
| 1188 | * Start the device, unless power has been removed from the domain already | 829 | * Thaw a device under the assumption that its pm_domain field points to the |
| 1189 | * before the system transition. | 830 | * domain member of an object of type struct generic_pm_domain representing |
| 831 | * a power domain consisting of I/O devices. | ||
| 1190 | */ | 832 | */ |
| 1191 | static int pm_genpd_thaw_noirq(struct device *dev) | 833 | static int pm_genpd_thaw(struct device *dev) |
| 1192 | { | 834 | { |
| 1193 | struct generic_pm_domain *genpd; | 835 | struct generic_pm_domain *genpd; |
| 1194 | 836 | ||
| @@ -1198,19 +840,18 @@ static int pm_genpd_thaw_noirq(struct device *dev) | |||
| 1198 | if (IS_ERR(genpd)) | 840 | if (IS_ERR(genpd)) |
| 1199 | return -EINVAL; | 841 | return -EINVAL; |
| 1200 | 842 | ||
| 1201 | return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); | 843 | return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); |
| 1202 | } | 844 | } |
| 1203 | 845 | ||
| 1204 | /** | 846 | /** |
| 1205 | * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. | 847 | * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. |
| 1206 | * @dev: Device to thaw. | 848 | * @dev: Device to suspend. |
| 1207 | * | 849 | * |
| 1208 | * Carry out an early thaw of a device under the assumption that its | 850 | * Power off a device under the assumption that its pm_domain field points to |
| 1209 | * pm_domain field points to the domain member of an object of type | 851 | * the domain member of an object of type struct generic_pm_domain representing |
| 1210 | * struct generic_pm_domain representing a power domain consisting of I/O | 852 | * a PM domain consisting of I/O devices. |
| 1211 | * devices. | ||
| 1212 | */ | 853 | */ |
| 1213 | static int pm_genpd_thaw_early(struct device *dev) | 854 | static int pm_genpd_dev_poweroff(struct device *dev) |
| 1214 | { | 855 | { |
| 1215 | struct generic_pm_domain *genpd; | 856 | struct generic_pm_domain *genpd; |
| 1216 | 857 | ||
| @@ -1220,20 +861,21 @@ static int pm_genpd_thaw_early(struct device *dev) | |||
| 1220 | if (IS_ERR(genpd)) | 861 | if (IS_ERR(genpd)) |
| 1221 | return -EINVAL; | 862 | return -EINVAL; |
| 1222 | 863 | ||
| 1223 | return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); | 864 | return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); |
| 1224 | } | 865 | } |
| 1225 | 866 | ||
| 1226 | /** | 867 | /** |
| 1227 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. | 868 | * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. |
| 1228 | * @dev: Device to thaw. | 869 | * @dev: Device to suspend. |
| 1229 | * | 870 | * |
| 1230 | * Thaw a device under the assumption that its pm_domain field points to the | 871 | * Carry out a late powering off of a device under the assumption that its |
| 1231 | * domain member of an object of type struct generic_pm_domain representing | 872 | * pm_domain field points to the domain member of an object of type |
| 1232 | * a power domain consisting of I/O devices. | 873 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
| 1233 | */ | 874 | */ |
| 1234 | static int pm_genpd_thaw(struct device *dev) | 875 | static int pm_genpd_dev_poweroff_noirq(struct device *dev) |
| 1235 | { | 876 | { |
| 1236 | struct generic_pm_domain *genpd; | 877 | struct generic_pm_domain *genpd; |
| 878 | int ret; | ||
| 1237 | 879 | ||
| 1238 | dev_dbg(dev, "%s()\n", __func__); | 880 | dev_dbg(dev, "%s()\n", __func__); |
| 1239 | 881 | ||
| @@ -1241,15 +883,39 @@ static int pm_genpd_thaw(struct device *dev) | |||
| 1241 | if (IS_ERR(genpd)) | 883 | if (IS_ERR(genpd)) |
| 1242 | return -EINVAL; | 884 | return -EINVAL; |
| 1243 | 885 | ||
| 1244 | return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); | 886 | if (genpd->suspend_power_off) |
| 887 | return 0; | ||
| 888 | |||
| 889 | ret = pm_generic_poweroff_noirq(dev); | ||
| 890 | if (ret) | ||
| 891 | return ret; | ||
| 892 | |||
| 893 | if (device_may_wakeup(dev) | ||
| 894 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | ||
| 895 | return 0; | ||
| 896 | |||
| 897 | if (genpd->stop_device) | ||
| 898 | genpd->stop_device(dev); | ||
| 899 | |||
| 900 | /* | ||
| 901 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
| 902 | * guaranteed that this function will never run twice in parallel for | ||
| 903 | * the same PM domain, so it is not necessary to use locking here. | ||
| 904 | */ | ||
| 905 | genpd->suspended_count++; | ||
| 906 | pm_genpd_sync_poweroff(genpd); | ||
| 907 | |||
| 908 | return 0; | ||
| 1245 | } | 909 | } |
| 1246 | 910 | ||
| 1247 | /** | 911 | /** |
| 1248 | * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. | 912 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. |
| 1249 | * @dev: Device to resume. | 913 | * @dev: Device to resume. |
| 1250 | * | 914 | * |
| 1251 | * Make sure the domain will be in the same power state as before the | 915 | * Carry out an early restore of a device under the assumption that its |
| 1252 | * hibernation the system is resuming from and start the device if necessary. | 916 | * pm_domain field points to the domain member of an object of type |
| 917 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
| 918 | * devices. | ||
| 1253 | */ | 919 | */ |
| 1254 | static int pm_genpd_restore_noirq(struct device *dev) | 920 | static int pm_genpd_restore_noirq(struct device *dev) |
| 1255 | { | 921 | { |
| @@ -1265,35 +931,45 @@ static int pm_genpd_restore_noirq(struct device *dev) | |||
| 1265 | * Since all of the "noirq" callbacks are executed sequentially, it is | 931 | * Since all of the "noirq" callbacks are executed sequentially, it is |
| 1266 | * guaranteed that this function will never run twice in parallel for | 932 | * guaranteed that this function will never run twice in parallel for |
| 1267 | * the same PM domain, so it is not necessary to use locking here. | 933 | * the same PM domain, so it is not necessary to use locking here. |
| 1268 | * | ||
| 1269 | * At this point suspended_count == 0 means we are being run for the | ||
| 1270 | * first time for the given domain in the present cycle. | ||
| 1271 | */ | 934 | */ |
| 1272 | if (genpd->suspended_count++ == 0) { | 935 | genpd->status = GPD_STATE_POWER_OFF; |
| 936 | if (genpd->suspend_power_off) { | ||
| 1273 | /* | 937 | /* |
| 1274 | * The boot kernel might put the domain into arbitrary state, | 938 | * The boot kernel might put the domain into the power on state, |
| 1275 | * so make it appear as powered off to pm_genpd_sync_poweron(), | 939 | * so make sure it really is powered off. |
| 1276 | * so that it tries to power it on in case it was really off. | ||
| 1277 | */ | 940 | */ |
| 1278 | genpd->status = GPD_STATE_POWER_OFF; | 941 | if (genpd->power_off) |
| 1279 | if (genpd->suspend_power_off) { | 942 | genpd->power_off(genpd); |
| 1280 | /* | 943 | return 0; |
| 1281 | * If the domain was off before the hibernation, make | ||
| 1282 | * sure it will be off going forward. | ||
| 1283 | */ | ||
| 1284 | if (genpd->power_off) | ||
| 1285 | genpd->power_off(genpd); | ||
| 1286 | |||
| 1287 | return 0; | ||
| 1288 | } | ||
| 1289 | } | 944 | } |
| 1290 | 945 | ||
| 1291 | if (genpd->suspend_power_off) | 946 | pm_genpd_poweron(genpd); |
| 1292 | return 0; | 947 | genpd->suspended_count--; |
| 948 | if (genpd->start_device) | ||
| 949 | genpd->start_device(dev); | ||
| 950 | |||
| 951 | return pm_generic_restore_noirq(dev); | ||
| 952 | } | ||
| 1293 | 953 | ||
| 1294 | pm_genpd_sync_poweron(genpd); | 954 | /** |
| 955 | * pm_genpd_restore - Restore a device belonging to an I/O power domain. | ||
| 956 | * @dev: Device to resume. | ||
| 957 | * | ||
| 958 | * Restore a device under the assumption that its pm_domain field points to the | ||
| 959 | * domain member of an object of type struct generic_pm_domain representing | ||
| 960 | * a power domain consisting of I/O devices. | ||
| 961 | */ | ||
| 962 | static int pm_genpd_restore(struct device *dev) | ||
| 963 | { | ||
| 964 | struct generic_pm_domain *genpd; | ||
| 1295 | 965 | ||
| 1296 | return genpd_start_dev(genpd, dev); | 966 | dev_dbg(dev, "%s()\n", __func__); |
| 967 | |||
| 968 | genpd = dev_to_genpd(dev); | ||
| 969 | if (IS_ERR(genpd)) | ||
| 970 | return -EINVAL; | ||
| 971 | |||
| 972 | return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); | ||
| 1297 | } | 973 | } |
| 1298 | 974 | ||
| 1299 | /** | 975 | /** |
| @@ -1332,83 +1008,33 @@ static void pm_genpd_complete(struct device *dev) | |||
| 1332 | } | 1008 | } |
| 1333 | } | 1009 | } |
| 1334 | 1010 | ||
| 1335 | /** | ||
| 1336 | * pm_genpd_syscore_switch - Switch power during system core suspend or resume. | ||
| 1337 | * @dev: Device that normally is marked as "always on" to switch power for. | ||
| 1338 | * | ||
| 1339 | * This routine may only be called during the system core (syscore) suspend or | ||
| 1340 | * resume phase for devices whose "always on" flags are set. | ||
| 1341 | */ | ||
| 1342 | void pm_genpd_syscore_switch(struct device *dev, bool suspend) | ||
| 1343 | { | ||
| 1344 | struct generic_pm_domain *genpd; | ||
| 1345 | |||
| 1346 | genpd = dev_to_genpd(dev); | ||
| 1347 | if (!pm_genpd_present(genpd)) | ||
| 1348 | return; | ||
| 1349 | |||
| 1350 | if (suspend) { | ||
| 1351 | genpd->suspended_count++; | ||
| 1352 | pm_genpd_sync_poweroff(genpd); | ||
| 1353 | } else { | ||
| 1354 | pm_genpd_sync_poweron(genpd); | ||
| 1355 | genpd->suspended_count--; | ||
| 1356 | } | ||
| 1357 | } | ||
| 1358 | EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); | ||
| 1359 | |||
| 1360 | #else | 1011 | #else |
| 1361 | 1012 | ||
| 1362 | #define pm_genpd_prepare NULL | 1013 | #define pm_genpd_prepare NULL |
| 1363 | #define pm_genpd_suspend NULL | 1014 | #define pm_genpd_suspend NULL |
| 1364 | #define pm_genpd_suspend_late NULL | ||
| 1365 | #define pm_genpd_suspend_noirq NULL | 1015 | #define pm_genpd_suspend_noirq NULL |
| 1366 | #define pm_genpd_resume_early NULL | ||
| 1367 | #define pm_genpd_resume_noirq NULL | 1016 | #define pm_genpd_resume_noirq NULL |
| 1368 | #define pm_genpd_resume NULL | 1017 | #define pm_genpd_resume NULL |
| 1369 | #define pm_genpd_freeze NULL | 1018 | #define pm_genpd_freeze NULL |
| 1370 | #define pm_genpd_freeze_late NULL | ||
| 1371 | #define pm_genpd_freeze_noirq NULL | 1019 | #define pm_genpd_freeze_noirq NULL |
| 1372 | #define pm_genpd_thaw_early NULL | ||
| 1373 | #define pm_genpd_thaw_noirq NULL | 1020 | #define pm_genpd_thaw_noirq NULL |
| 1374 | #define pm_genpd_thaw NULL | 1021 | #define pm_genpd_thaw NULL |
| 1022 | #define pm_genpd_dev_poweroff_noirq NULL | ||
| 1023 | #define pm_genpd_dev_poweroff NULL | ||
| 1375 | #define pm_genpd_restore_noirq NULL | 1024 | #define pm_genpd_restore_noirq NULL |
| 1025 | #define pm_genpd_restore NULL | ||
| 1376 | #define pm_genpd_complete NULL | 1026 | #define pm_genpd_complete NULL |
| 1377 | 1027 | ||
| 1378 | #endif /* CONFIG_PM_SLEEP */ | 1028 | #endif /* CONFIG_PM_SLEEP */ |
| 1379 | 1029 | ||
| 1380 | static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) | ||
| 1381 | { | ||
| 1382 | struct generic_pm_domain_data *gpd_data; | ||
| 1383 | |||
| 1384 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); | ||
| 1385 | if (!gpd_data) | ||
| 1386 | return NULL; | ||
| 1387 | |||
| 1388 | mutex_init(&gpd_data->lock); | ||
| 1389 | gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; | ||
| 1390 | dev_pm_qos_add_notifier(dev, &gpd_data->nb); | ||
| 1391 | return gpd_data; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | static void __pm_genpd_free_dev_data(struct device *dev, | ||
| 1395 | struct generic_pm_domain_data *gpd_data) | ||
| 1396 | { | ||
| 1397 | dev_pm_qos_remove_notifier(dev, &gpd_data->nb); | ||
| 1398 | kfree(gpd_data); | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | /** | 1030 | /** |
| 1402 | * __pm_genpd_add_device - Add a device to an I/O PM domain. | 1031 | * pm_genpd_add_device - Add a device to an I/O PM domain. |
| 1403 | * @genpd: PM domain to add the device to. | 1032 | * @genpd: PM domain to add the device to. |
| 1404 | * @dev: Device to be added. | 1033 | * @dev: Device to be added. |
| 1405 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
| 1406 | */ | 1034 | */ |
| 1407 | int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | 1035 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) |
| 1408 | struct gpd_timing_data *td) | ||
| 1409 | { | 1036 | { |
| 1410 | struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; | 1037 | struct dev_list_entry *dle; |
| 1411 | struct pm_domain_data *pdd; | ||
| 1412 | int ret = 0; | 1038 | int ret = 0; |
| 1413 | 1039 | ||
| 1414 | dev_dbg(dev, "%s()\n", __func__); | 1040 | dev_dbg(dev, "%s()\n", __func__); |
| @@ -1416,108 +1042,46 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
| 1416 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | 1042 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) |
| 1417 | return -EINVAL; | 1043 | return -EINVAL; |
| 1418 | 1044 | ||
| 1419 | gpd_data_new = __pm_genpd_alloc_dev_data(dev); | ||
| 1420 | if (!gpd_data_new) | ||
| 1421 | return -ENOMEM; | ||
| 1422 | |||
| 1423 | genpd_acquire_lock(genpd); | 1045 | genpd_acquire_lock(genpd); |
| 1424 | 1046 | ||
| 1047 | if (genpd->status == GPD_STATE_POWER_OFF) { | ||
| 1048 | ret = -EINVAL; | ||
| 1049 | goto out; | ||
| 1050 | } | ||
| 1051 | |||
| 1425 | if (genpd->prepared_count > 0) { | 1052 | if (genpd->prepared_count > 0) { |
| 1426 | ret = -EAGAIN; | 1053 | ret = -EAGAIN; |
| 1427 | goto out; | 1054 | goto out; |
| 1428 | } | 1055 | } |
| 1429 | 1056 | ||
| 1430 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 1057 | list_for_each_entry(dle, &genpd->dev_list, node) |
| 1431 | if (pdd->dev == dev) { | 1058 | if (dle->dev == dev) { |
| 1432 | ret = -EINVAL; | 1059 | ret = -EINVAL; |
| 1433 | goto out; | 1060 | goto out; |
| 1434 | } | 1061 | } |
| 1435 | 1062 | ||
| 1436 | ret = dev_pm_get_subsys_data(dev); | 1063 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); |
| 1437 | if (ret) | 1064 | if (!dle) { |
| 1065 | ret = -ENOMEM; | ||
| 1438 | goto out; | 1066 | goto out; |
| 1067 | } | ||
| 1439 | 1068 | ||
| 1069 | dle->dev = dev; | ||
| 1070 | dle->need_restore = false; | ||
| 1071 | list_add_tail(&dle->node, &genpd->dev_list); | ||
| 1440 | genpd->device_count++; | 1072 | genpd->device_count++; |
| 1441 | genpd->max_off_time_changed = true; | ||
| 1442 | 1073 | ||
| 1443 | spin_lock_irq(&dev->power.lock); | 1074 | spin_lock_irq(&dev->power.lock); |
| 1444 | |||
| 1445 | dev->pm_domain = &genpd->domain; | 1075 | dev->pm_domain = &genpd->domain; |
| 1446 | if (dev->power.subsys_data->domain_data) { | ||
| 1447 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
| 1448 | } else { | ||
| 1449 | gpd_data = gpd_data_new; | ||
| 1450 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
| 1451 | } | ||
| 1452 | gpd_data->refcount++; | ||
| 1453 | if (td) | ||
| 1454 | gpd_data->td = *td; | ||
| 1455 | |||
| 1456 | spin_unlock_irq(&dev->power.lock); | 1076 | spin_unlock_irq(&dev->power.lock); |
| 1457 | 1077 | ||
| 1458 | mutex_lock(&gpd_data->lock); | ||
| 1459 | gpd_data->base.dev = dev; | ||
| 1460 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | ||
| 1461 | gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; | ||
| 1462 | gpd_data->td.constraint_changed = true; | ||
| 1463 | gpd_data->td.effective_constraint_ns = -1; | ||
| 1464 | mutex_unlock(&gpd_data->lock); | ||
| 1465 | |||
| 1466 | out: | 1078 | out: |
| 1467 | genpd_release_lock(genpd); | 1079 | genpd_release_lock(genpd); |
| 1468 | 1080 | ||
| 1469 | if (gpd_data != gpd_data_new) | ||
| 1470 | __pm_genpd_free_dev_data(dev, gpd_data_new); | ||
| 1471 | |||
| 1472 | return ret; | 1081 | return ret; |
| 1473 | } | 1082 | } |
| 1474 | 1083 | ||
| 1475 | /** | 1084 | /** |
| 1476 | * __pm_genpd_of_add_device - Add a device to an I/O PM domain. | ||
| 1477 | * @genpd_node: Device tree node pointer representing a PM domain to which the | ||
| 1478 | * the device is added to. | ||
| 1479 | * @dev: Device to be added. | ||
| 1480 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
| 1481 | */ | ||
| 1482 | int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, | ||
| 1483 | struct gpd_timing_data *td) | ||
| 1484 | { | ||
| 1485 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
| 1486 | |||
| 1487 | dev_dbg(dev, "%s()\n", __func__); | ||
| 1488 | |||
| 1489 | if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) | ||
| 1490 | return -EINVAL; | ||
| 1491 | |||
| 1492 | mutex_lock(&gpd_list_lock); | ||
| 1493 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
| 1494 | if (gpd->of_node == genpd_node) { | ||
| 1495 | genpd = gpd; | ||
| 1496 | break; | ||
| 1497 | } | ||
| 1498 | } | ||
| 1499 | mutex_unlock(&gpd_list_lock); | ||
| 1500 | |||
| 1501 | if (!genpd) | ||
| 1502 | return -EINVAL; | ||
| 1503 | |||
| 1504 | return __pm_genpd_add_device(genpd, dev, td); | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | |||
| 1508 | /** | ||
| 1509 | * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. | ||
| 1510 | * @domain_name: Name of the PM domain to add the device to. | ||
| 1511 | * @dev: Device to be added. | ||
| 1512 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
| 1513 | */ | ||
| 1514 | int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, | ||
| 1515 | struct gpd_timing_data *td) | ||
| 1516 | { | ||
| 1517 | return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | /** | ||
| 1521 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | 1085 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. |
| 1522 | * @genpd: PM domain to remove the device from. | 1086 | * @genpd: PM domain to remove the device from. |
| 1523 | * @dev: Device to be removed. | 1087 | * @dev: Device to be removed. |
| @@ -1525,16 +1089,12 @@ int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, | |||
| 1525 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1089 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
| 1526 | struct device *dev) | 1090 | struct device *dev) |
| 1527 | { | 1091 | { |
| 1528 | struct generic_pm_domain_data *gpd_data; | 1092 | struct dev_list_entry *dle; |
| 1529 | struct pm_domain_data *pdd; | 1093 | int ret = -EINVAL; |
| 1530 | bool remove = false; | ||
| 1531 | int ret = 0; | ||
| 1532 | 1094 | ||
| 1533 | dev_dbg(dev, "%s()\n", __func__); | 1095 | dev_dbg(dev, "%s()\n", __func__); |
| 1534 | 1096 | ||
| 1535 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) | 1097 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) |
| 1536 | || IS_ERR_OR_NULL(dev->pm_domain) | ||
| 1537 | || pd_to_genpd(dev->pm_domain) != genpd) | ||
| 1538 | return -EINVAL; | 1098 | return -EINVAL; |
| 1539 | 1099 | ||
| 1540 | genpd_acquire_lock(genpd); | 1100 | genpd_acquire_lock(genpd); |
| @@ -1544,33 +1104,21 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
| 1544 | goto out; | 1104 | goto out; |
| 1545 | } | 1105 | } |
| 1546 | 1106 | ||
| 1547 | genpd->device_count--; | 1107 | list_for_each_entry(dle, &genpd->dev_list, node) { |
| 1548 | genpd->max_off_time_changed = true; | 1108 | if (dle->dev != dev) |
| 1549 | 1109 | continue; | |
| 1550 | spin_lock_irq(&dev->power.lock); | ||
| 1551 | |||
| 1552 | dev->pm_domain = NULL; | ||
| 1553 | pdd = dev->power.subsys_data->domain_data; | ||
| 1554 | list_del_init(&pdd->list_node); | ||
| 1555 | gpd_data = to_gpd_data(pdd); | ||
| 1556 | if (--gpd_data->refcount == 0) { | ||
| 1557 | dev->power.subsys_data->domain_data = NULL; | ||
| 1558 | remove = true; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | spin_unlock_irq(&dev->power.lock); | ||
| 1562 | |||
| 1563 | mutex_lock(&gpd_data->lock); | ||
| 1564 | pdd->dev = NULL; | ||
| 1565 | mutex_unlock(&gpd_data->lock); | ||
| 1566 | 1110 | ||
| 1567 | genpd_release_lock(genpd); | 1111 | spin_lock_irq(&dev->power.lock); |
| 1112 | dev->pm_domain = NULL; | ||
| 1113 | spin_unlock_irq(&dev->power.lock); | ||
| 1568 | 1114 | ||
| 1569 | dev_pm_put_subsys_data(dev); | 1115 | genpd->device_count--; |
| 1570 | if (remove) | 1116 | list_del(&dle->node); |
| 1571 | __pm_genpd_free_dev_data(dev, gpd_data); | 1117 | kfree(dle); |
| 1572 | 1118 | ||
| 1573 | return 0; | 1119 | ret = 0; |
| 1120 | break; | ||
| 1121 | } | ||
| 1574 | 1122 | ||
| 1575 | out: | 1123 | out: |
| 1576 | genpd_release_lock(genpd); | 1124 | genpd_release_lock(genpd); |
| @@ -1579,131 +1127,74 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
| 1579 | } | 1127 | } |
| 1580 | 1128 | ||
| 1581 | /** | 1129 | /** |
| 1582 | * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. | ||
| 1583 | * @dev: Device to set/unset the flag for. | ||
| 1584 | * @val: The new value of the device's "need restore" flag. | ||
| 1585 | */ | ||
| 1586 | void pm_genpd_dev_need_restore(struct device *dev, bool val) | ||
| 1587 | { | ||
| 1588 | struct pm_subsys_data *psd; | ||
| 1589 | unsigned long flags; | ||
| 1590 | |||
| 1591 | spin_lock_irqsave(&dev->power.lock, flags); | ||
| 1592 | |||
| 1593 | psd = dev_to_psd(dev); | ||
| 1594 | if (psd && psd->domain_data) | ||
| 1595 | to_gpd_data(psd->domain_data)->need_restore = val; | ||
| 1596 | |||
| 1597 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 1598 | } | ||
| 1599 | EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); | ||
| 1600 | |||
| 1601 | /** | ||
| 1602 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1130 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
| 1603 | * @genpd: Master PM domain to add the subdomain to. | 1131 | * @genpd: Master PM domain to add the subdomain to. |
| 1604 | * @subdomain: Subdomain to be added. | 1132 | * @new_subdomain: Subdomain to be added. |
| 1605 | */ | 1133 | */ |
| 1606 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1134 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
| 1607 | struct generic_pm_domain *subdomain) | 1135 | struct generic_pm_domain *new_subdomain) |
| 1608 | { | 1136 | { |
| 1609 | struct gpd_link *link; | 1137 | struct generic_pm_domain *subdomain; |
| 1610 | int ret = 0; | 1138 | int ret = 0; |
| 1611 | 1139 | ||
| 1612 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) | 1140 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) |
| 1613 | || genpd == subdomain) | ||
| 1614 | return -EINVAL; | 1141 | return -EINVAL; |
| 1615 | 1142 | ||
| 1616 | start: | 1143 | start: |
| 1617 | genpd_acquire_lock(genpd); | 1144 | genpd_acquire_lock(genpd); |
| 1618 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1145 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); |
| 1619 | 1146 | ||
| 1620 | if (subdomain->status != GPD_STATE_POWER_OFF | 1147 | if (new_subdomain->status != GPD_STATE_POWER_OFF |
| 1621 | && subdomain->status != GPD_STATE_ACTIVE) { | 1148 | && new_subdomain->status != GPD_STATE_ACTIVE) { |
| 1622 | mutex_unlock(&subdomain->lock); | 1149 | mutex_unlock(&new_subdomain->lock); |
| 1623 | genpd_release_lock(genpd); | 1150 | genpd_release_lock(genpd); |
| 1624 | goto start; | 1151 | goto start; |
| 1625 | } | 1152 | } |
| 1626 | 1153 | ||
| 1627 | if (genpd->status == GPD_STATE_POWER_OFF | 1154 | if (genpd->status == GPD_STATE_POWER_OFF |
| 1628 | && subdomain->status != GPD_STATE_POWER_OFF) { | 1155 | && new_subdomain->status != GPD_STATE_POWER_OFF) { |
| 1629 | ret = -EINVAL; | 1156 | ret = -EINVAL; |
| 1630 | goto out; | 1157 | goto out; |
| 1631 | } | 1158 | } |
| 1632 | 1159 | ||
| 1633 | list_for_each_entry(link, &genpd->master_links, master_node) { | 1160 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { |
| 1634 | if (link->slave == subdomain && link->master == genpd) { | 1161 | if (subdomain == new_subdomain) { |
| 1635 | ret = -EINVAL; | 1162 | ret = -EINVAL; |
| 1636 | goto out; | 1163 | goto out; |
| 1637 | } | 1164 | } |
| 1638 | } | 1165 | } |
| 1639 | 1166 | ||
| 1640 | link = kzalloc(sizeof(*link), GFP_KERNEL); | 1167 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); |
| 1641 | if (!link) { | 1168 | new_subdomain->parent = genpd; |
| 1642 | ret = -ENOMEM; | ||
| 1643 | goto out; | ||
| 1644 | } | ||
| 1645 | link->master = genpd; | ||
| 1646 | list_add_tail(&link->master_node, &genpd->master_links); | ||
| 1647 | link->slave = subdomain; | ||
| 1648 | list_add_tail(&link->slave_node, &subdomain->slave_links); | ||
| 1649 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1169 | if (subdomain->status != GPD_STATE_POWER_OFF) |
| 1650 | genpd_sd_counter_inc(genpd); | 1170 | genpd->sd_count++; |
| 1651 | 1171 | ||
| 1652 | out: | 1172 | out: |
| 1653 | mutex_unlock(&subdomain->lock); | 1173 | mutex_unlock(&new_subdomain->lock); |
| 1654 | genpd_release_lock(genpd); | 1174 | genpd_release_lock(genpd); |
| 1655 | 1175 | ||
| 1656 | return ret; | 1176 | return ret; |
| 1657 | } | 1177 | } |
| 1658 | 1178 | ||
| 1659 | /** | 1179 | /** |
| 1660 | * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. | ||
| 1661 | * @master_name: Name of the master PM domain to add the subdomain to. | ||
| 1662 | * @subdomain_name: Name of the subdomain to be added. | ||
| 1663 | */ | ||
| 1664 | int pm_genpd_add_subdomain_names(const char *master_name, | ||
| 1665 | const char *subdomain_name) | ||
| 1666 | { | ||
| 1667 | struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; | ||
| 1668 | |||
| 1669 | if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) | ||
| 1670 | return -EINVAL; | ||
| 1671 | |||
| 1672 | mutex_lock(&gpd_list_lock); | ||
| 1673 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
| 1674 | if (!master && !strcmp(gpd->name, master_name)) | ||
| 1675 | master = gpd; | ||
| 1676 | |||
| 1677 | if (!subdomain && !strcmp(gpd->name, subdomain_name)) | ||
| 1678 | subdomain = gpd; | ||
| 1679 | |||
| 1680 | if (master && subdomain) | ||
| 1681 | break; | ||
| 1682 | } | ||
| 1683 | mutex_unlock(&gpd_list_lock); | ||
| 1684 | |||
| 1685 | return pm_genpd_add_subdomain(master, subdomain); | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | /** | ||
| 1689 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1180 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
| 1690 | * @genpd: Master PM domain to remove the subdomain from. | 1181 | * @genpd: Master PM domain to remove the subdomain from. |
| 1691 | * @subdomain: Subdomain to be removed. | 1182 | * @target: Subdomain to be removed. |
| 1692 | */ | 1183 | */ |
| 1693 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1184 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
| 1694 | struct generic_pm_domain *subdomain) | 1185 | struct generic_pm_domain *target) |
| 1695 | { | 1186 | { |
| 1696 | struct gpd_link *link; | 1187 | struct generic_pm_domain *subdomain; |
| 1697 | int ret = -EINVAL; | 1188 | int ret = -EINVAL; |
| 1698 | 1189 | ||
| 1699 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) | 1190 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) |
| 1700 | return -EINVAL; | 1191 | return -EINVAL; |
| 1701 | 1192 | ||
| 1702 | start: | 1193 | start: |
| 1703 | genpd_acquire_lock(genpd); | 1194 | genpd_acquire_lock(genpd); |
| 1704 | 1195 | ||
| 1705 | list_for_each_entry(link, &genpd->master_links, master_node) { | 1196 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { |
| 1706 | if (link->slave != subdomain) | 1197 | if (subdomain != target) |
| 1707 | continue; | 1198 | continue; |
| 1708 | 1199 | ||
| 1709 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1200 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
| @@ -1715,9 +1206,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1715 | goto start; | 1206 | goto start; |
| 1716 | } | 1207 | } |
| 1717 | 1208 | ||
| 1718 | list_del(&link->master_node); | 1209 | list_del(&subdomain->sd_node); |
| 1719 | list_del(&link->slave_node); | 1210 | subdomain->parent = NULL; |
| 1720 | kfree(link); | ||
| 1721 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1211 | if (subdomain->status != GPD_STATE_POWER_OFF) |
| 1722 | genpd_sd_counter_dec(genpd); | 1212 | genpd_sd_counter_dec(genpd); |
| 1723 | 1213 | ||
| @@ -1733,389 +1223,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1733 | } | 1223 | } |
| 1734 | 1224 | ||
| 1735 | /** | 1225 | /** |
| 1736 | * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. | ||
| 1737 | * @dev: Device to add the callbacks to. | ||
| 1738 | * @ops: Set of callbacks to add. | ||
| 1739 | * @td: Timing data to add to the device along with the callbacks (optional). | ||
| 1740 | * | ||
| 1741 | * Every call to this routine should be balanced with a call to | ||
| 1742 | * __pm_genpd_remove_callbacks() and they must not be nested. | ||
| 1743 | */ | ||
| 1744 | int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, | ||
| 1745 | struct gpd_timing_data *td) | ||
| 1746 | { | ||
| 1747 | struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; | ||
| 1748 | int ret = 0; | ||
| 1749 | |||
| 1750 | if (!(dev && ops)) | ||
| 1751 | return -EINVAL; | ||
| 1752 | |||
| 1753 | gpd_data_new = __pm_genpd_alloc_dev_data(dev); | ||
| 1754 | if (!gpd_data_new) | ||
| 1755 | return -ENOMEM; | ||
| 1756 | |||
| 1757 | pm_runtime_disable(dev); | ||
| 1758 | device_pm_lock(); | ||
| 1759 | |||
| 1760 | ret = dev_pm_get_subsys_data(dev); | ||
| 1761 | if (ret) | ||
| 1762 | goto out; | ||
| 1763 | |||
| 1764 | spin_lock_irq(&dev->power.lock); | ||
| 1765 | |||
| 1766 | if (dev->power.subsys_data->domain_data) { | ||
| 1767 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
| 1768 | } else { | ||
| 1769 | gpd_data = gpd_data_new; | ||
| 1770 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
| 1771 | } | ||
| 1772 | gpd_data->refcount++; | ||
| 1773 | gpd_data->ops = *ops; | ||
| 1774 | if (td) | ||
| 1775 | gpd_data->td = *td; | ||
| 1776 | |||
| 1777 | spin_unlock_irq(&dev->power.lock); | ||
| 1778 | |||
| 1779 | out: | ||
| 1780 | device_pm_unlock(); | ||
| 1781 | pm_runtime_enable(dev); | ||
| 1782 | |||
| 1783 | if (gpd_data != gpd_data_new) | ||
| 1784 | __pm_genpd_free_dev_data(dev, gpd_data_new); | ||
| 1785 | |||
| 1786 | return ret; | ||
| 1787 | } | ||
| 1788 | EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); | ||
| 1789 | |||
| 1790 | /** | ||
| 1791 | * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. | ||
| 1792 | * @dev: Device to remove the callbacks from. | ||
| 1793 | * @clear_td: If set, clear the device's timing data too. | ||
| 1794 | * | ||
| 1795 | * This routine can only be called after pm_genpd_add_callbacks(). | ||
| 1796 | */ | ||
| 1797 | int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | ||
| 1798 | { | ||
| 1799 | struct generic_pm_domain_data *gpd_data = NULL; | ||
| 1800 | bool remove = false; | ||
| 1801 | int ret = 0; | ||
| 1802 | |||
| 1803 | if (!(dev && dev->power.subsys_data)) | ||
| 1804 | return -EINVAL; | ||
| 1805 | |||
| 1806 | pm_runtime_disable(dev); | ||
| 1807 | device_pm_lock(); | ||
| 1808 | |||
| 1809 | spin_lock_irq(&dev->power.lock); | ||
| 1810 | |||
| 1811 | if (dev->power.subsys_data->domain_data) { | ||
| 1812 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
| 1813 | gpd_data->ops = (struct gpd_dev_ops){ NULL }; | ||
| 1814 | if (clear_td) | ||
| 1815 | gpd_data->td = (struct gpd_timing_data){ 0 }; | ||
| 1816 | |||
| 1817 | if (--gpd_data->refcount == 0) { | ||
| 1818 | dev->power.subsys_data->domain_data = NULL; | ||
| 1819 | remove = true; | ||
| 1820 | } | ||
| 1821 | } else { | ||
| 1822 | ret = -EINVAL; | ||
| 1823 | } | ||
| 1824 | |||
| 1825 | spin_unlock_irq(&dev->power.lock); | ||
| 1826 | |||
| 1827 | device_pm_unlock(); | ||
| 1828 | pm_runtime_enable(dev); | ||
| 1829 | |||
| 1830 | if (ret) | ||
| 1831 | return ret; | ||
| 1832 | |||
| 1833 | dev_pm_put_subsys_data(dev); | ||
| 1834 | if (remove) | ||
| 1835 | __pm_genpd_free_dev_data(dev, gpd_data); | ||
| 1836 | |||
| 1837 | return 0; | ||
| 1838 | } | ||
| 1839 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); | ||
| 1840 | |||
| 1841 | /** | ||
| 1842 | * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. | ||
| 1843 | * @genpd: PM domain to be connected with cpuidle. | ||
| 1844 | * @state: cpuidle state this domain can disable/enable. | ||
| 1845 | * | ||
| 1846 | * Make a PM domain behave as though it contained a CPU core, that is, instead | ||
| 1847 | * of calling its power down routine it will enable the given cpuidle state so | ||
| 1848 | * that the cpuidle subsystem can power it down (if possible and desirable). | ||
| 1849 | */ | ||
| 1850 | int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | ||
| 1851 | { | ||
| 1852 | struct cpuidle_driver *cpuidle_drv; | ||
| 1853 | struct gpd_cpu_data *cpu_data; | ||
| 1854 | struct cpuidle_state *idle_state; | ||
| 1855 | int ret = 0; | ||
| 1856 | |||
| 1857 | if (IS_ERR_OR_NULL(genpd) || state < 0) | ||
| 1858 | return -EINVAL; | ||
| 1859 | |||
| 1860 | genpd_acquire_lock(genpd); | ||
| 1861 | |||
| 1862 | if (genpd->cpu_data) { | ||
| 1863 | ret = -EEXIST; | ||
| 1864 | goto out; | ||
| 1865 | } | ||
| 1866 | cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); | ||
| 1867 | if (!cpu_data) { | ||
| 1868 | ret = -ENOMEM; | ||
| 1869 | goto out; | ||
| 1870 | } | ||
| 1871 | cpuidle_drv = cpuidle_driver_ref(); | ||
| 1872 | if (!cpuidle_drv) { | ||
| 1873 | ret = -ENODEV; | ||
| 1874 | goto err_drv; | ||
| 1875 | } | ||
| 1876 | if (cpuidle_drv->state_count <= state) { | ||
| 1877 | ret = -EINVAL; | ||
| 1878 | goto err; | ||
| 1879 | } | ||
| 1880 | idle_state = &cpuidle_drv->states[state]; | ||
| 1881 | if (!idle_state->disabled) { | ||
| 1882 | ret = -EAGAIN; | ||
| 1883 | goto err; | ||
| 1884 | } | ||
| 1885 | cpu_data->idle_state = idle_state; | ||
| 1886 | cpu_data->saved_exit_latency = idle_state->exit_latency; | ||
| 1887 | genpd->cpu_data = cpu_data; | ||
| 1888 | genpd_recalc_cpu_exit_latency(genpd); | ||
| 1889 | |||
| 1890 | out: | ||
| 1891 | genpd_release_lock(genpd); | ||
| 1892 | return ret; | ||
| 1893 | |||
| 1894 | err: | ||
| 1895 | cpuidle_driver_unref(); | ||
| 1896 | |||
| 1897 | err_drv: | ||
| 1898 | kfree(cpu_data); | ||
| 1899 | goto out; | ||
| 1900 | } | ||
| 1901 | |||
| 1902 | /** | ||
| 1903 | * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. | ||
| 1904 | * @name: Name of the domain to connect to cpuidle. | ||
| 1905 | * @state: cpuidle state this domain can manipulate. | ||
| 1906 | */ | ||
| 1907 | int pm_genpd_name_attach_cpuidle(const char *name, int state) | ||
| 1908 | { | ||
| 1909 | return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | /** | ||
| 1913 | * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. | ||
| 1914 | * @genpd: PM domain to remove the cpuidle connection from. | ||
| 1915 | * | ||
| 1916 | * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the | ||
| 1917 | * given PM domain. | ||
| 1918 | */ | ||
| 1919 | int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | ||
| 1920 | { | ||
| 1921 | struct gpd_cpu_data *cpu_data; | ||
| 1922 | struct cpuidle_state *idle_state; | ||
| 1923 | int ret = 0; | ||
| 1924 | |||
| 1925 | if (IS_ERR_OR_NULL(genpd)) | ||
| 1926 | return -EINVAL; | ||
| 1927 | |||
| 1928 | genpd_acquire_lock(genpd); | ||
| 1929 | |||
| 1930 | cpu_data = genpd->cpu_data; | ||
| 1931 | if (!cpu_data) { | ||
| 1932 | ret = -ENODEV; | ||
| 1933 | goto out; | ||
| 1934 | } | ||
| 1935 | idle_state = cpu_data->idle_state; | ||
| 1936 | if (!idle_state->disabled) { | ||
| 1937 | ret = -EAGAIN; | ||
| 1938 | goto out; | ||
| 1939 | } | ||
| 1940 | idle_state->exit_latency = cpu_data->saved_exit_latency; | ||
| 1941 | cpuidle_driver_unref(); | ||
| 1942 | genpd->cpu_data = NULL; | ||
| 1943 | kfree(cpu_data); | ||
| 1944 | |||
| 1945 | out: | ||
| 1946 | genpd_release_lock(genpd); | ||
| 1947 | return ret; | ||
| 1948 | } | ||
| 1949 | |||
| 1950 | /** | ||
| 1951 | * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. | ||
| 1952 | * @name: Name of the domain to disconnect cpuidle from. | ||
| 1953 | */ | ||
| 1954 | int pm_genpd_name_detach_cpuidle(const char *name) | ||
| 1955 | { | ||
| 1956 | return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); | ||
| 1957 | } | ||
| 1958 | |||
| 1959 | /* Default device callbacks for generic PM domains. */ | ||
| 1960 | |||
| 1961 | /** | ||
| 1962 | * pm_genpd_default_save_state - Default "save device state" for PM domians. | ||
| 1963 | * @dev: Device to handle. | ||
| 1964 | */ | ||
| 1965 | static int pm_genpd_default_save_state(struct device *dev) | ||
| 1966 | { | ||
| 1967 | int (*cb)(struct device *__dev); | ||
| 1968 | |||
| 1969 | cb = dev_gpd_data(dev)->ops.save_state; | ||
| 1970 | if (cb) | ||
| 1971 | return cb(dev); | ||
| 1972 | |||
| 1973 | if (dev->type && dev->type->pm) | ||
| 1974 | cb = dev->type->pm->runtime_suspend; | ||
| 1975 | else if (dev->class && dev->class->pm) | ||
| 1976 | cb = dev->class->pm->runtime_suspend; | ||
| 1977 | else if (dev->bus && dev->bus->pm) | ||
| 1978 | cb = dev->bus->pm->runtime_suspend; | ||
| 1979 | else | ||
| 1980 | cb = NULL; | ||
| 1981 | |||
| 1982 | if (!cb && dev->driver && dev->driver->pm) | ||
| 1983 | cb = dev->driver->pm->runtime_suspend; | ||
| 1984 | |||
| 1985 | return cb ? cb(dev) : 0; | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | /** | ||
| 1989 | * pm_genpd_default_restore_state - Default PM domians "restore device state". | ||
| 1990 | * @dev: Device to handle. | ||
| 1991 | */ | ||
| 1992 | static int pm_genpd_default_restore_state(struct device *dev) | ||
| 1993 | { | ||
| 1994 | int (*cb)(struct device *__dev); | ||
| 1995 | |||
| 1996 | cb = dev_gpd_data(dev)->ops.restore_state; | ||
| 1997 | if (cb) | ||
| 1998 | return cb(dev); | ||
| 1999 | |||
| 2000 | if (dev->type && dev->type->pm) | ||
| 2001 | cb = dev->type->pm->runtime_resume; | ||
| 2002 | else if (dev->class && dev->class->pm) | ||
| 2003 | cb = dev->class->pm->runtime_resume; | ||
| 2004 | else if (dev->bus && dev->bus->pm) | ||
| 2005 | cb = dev->bus->pm->runtime_resume; | ||
| 2006 | else | ||
| 2007 | cb = NULL; | ||
| 2008 | |||
| 2009 | if (!cb && dev->driver && dev->driver->pm) | ||
| 2010 | cb = dev->driver->pm->runtime_resume; | ||
| 2011 | |||
| 2012 | return cb ? cb(dev) : 0; | ||
| 2013 | } | ||
| 2014 | |||
| 2015 | #ifdef CONFIG_PM_SLEEP | ||
| 2016 | |||
| 2017 | /** | ||
| 2018 | * pm_genpd_default_suspend - Default "device suspend" for PM domians. | ||
| 2019 | * @dev: Device to handle. | ||
| 2020 | */ | ||
| 2021 | static int pm_genpd_default_suspend(struct device *dev) | ||
| 2022 | { | ||
| 2023 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; | ||
| 2024 | |||
| 2025 | return cb ? cb(dev) : pm_generic_suspend(dev); | ||
| 2026 | } | ||
| 2027 | |||
| 2028 | /** | ||
| 2029 | * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. | ||
| 2030 | * @dev: Device to handle. | ||
| 2031 | */ | ||
| 2032 | static int pm_genpd_default_suspend_late(struct device *dev) | ||
| 2033 | { | ||
| 2034 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; | ||
| 2035 | |||
| 2036 | return cb ? cb(dev) : pm_generic_suspend_late(dev); | ||
| 2037 | } | ||
| 2038 | |||
| 2039 | /** | ||
| 2040 | * pm_genpd_default_resume_early - Default "early device resume" for PM domians. | ||
| 2041 | * @dev: Device to handle. | ||
| 2042 | */ | ||
| 2043 | static int pm_genpd_default_resume_early(struct device *dev) | ||
| 2044 | { | ||
| 2045 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; | ||
| 2046 | |||
| 2047 | return cb ? cb(dev) : pm_generic_resume_early(dev); | ||
| 2048 | } | ||
| 2049 | |||
| 2050 | /** | ||
| 2051 | * pm_genpd_default_resume - Default "device resume" for PM domians. | ||
| 2052 | * @dev: Device to handle. | ||
| 2053 | */ | ||
| 2054 | static int pm_genpd_default_resume(struct device *dev) | ||
| 2055 | { | ||
| 2056 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; | ||
| 2057 | |||
| 2058 | return cb ? cb(dev) : pm_generic_resume(dev); | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | /** | ||
| 2062 | * pm_genpd_default_freeze - Default "device freeze" for PM domians. | ||
| 2063 | * @dev: Device to handle. | ||
| 2064 | */ | ||
| 2065 | static int pm_genpd_default_freeze(struct device *dev) | ||
| 2066 | { | ||
| 2067 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; | ||
| 2068 | |||
| 2069 | return cb ? cb(dev) : pm_generic_freeze(dev); | ||
| 2070 | } | ||
| 2071 | |||
| 2072 | /** | ||
| 2073 | * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. | ||
| 2074 | * @dev: Device to handle. | ||
| 2075 | */ | ||
| 2076 | static int pm_genpd_default_freeze_late(struct device *dev) | ||
| 2077 | { | ||
| 2078 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; | ||
| 2079 | |||
| 2080 | return cb ? cb(dev) : pm_generic_freeze_late(dev); | ||
| 2081 | } | ||
| 2082 | |||
| 2083 | /** | ||
| 2084 | * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. | ||
| 2085 | * @dev: Device to handle. | ||
| 2086 | */ | ||
| 2087 | static int pm_genpd_default_thaw_early(struct device *dev) | ||
| 2088 | { | ||
| 2089 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; | ||
| 2090 | |||
| 2091 | return cb ? cb(dev) : pm_generic_thaw_early(dev); | ||
| 2092 | } | ||
| 2093 | |||
| 2094 | /** | ||
| 2095 | * pm_genpd_default_thaw - Default "device thaw" for PM domians. | ||
| 2096 | * @dev: Device to handle. | ||
| 2097 | */ | ||
| 2098 | static int pm_genpd_default_thaw(struct device *dev) | ||
| 2099 | { | ||
| 2100 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; | ||
| 2101 | |||
| 2102 | return cb ? cb(dev) : pm_generic_thaw(dev); | ||
| 2103 | } | ||
| 2104 | |||
| 2105 | #else /* !CONFIG_PM_SLEEP */ | ||
| 2106 | |||
| 2107 | #define pm_genpd_default_suspend NULL | ||
| 2108 | #define pm_genpd_default_suspend_late NULL | ||
| 2109 | #define pm_genpd_default_resume_early NULL | ||
| 2110 | #define pm_genpd_default_resume NULL | ||
| 2111 | #define pm_genpd_default_freeze NULL | ||
| 2112 | #define pm_genpd_default_freeze_late NULL | ||
| 2113 | #define pm_genpd_default_thaw_early NULL | ||
| 2114 | #define pm_genpd_default_thaw NULL | ||
| 2115 | |||
| 2116 | #endif /* !CONFIG_PM_SLEEP */ | ||
| 2117 | |||
| 2118 | /** | ||
| 2119 | * pm_genpd_init - Initialize a generic I/O PM domain object. | 1226 | * pm_genpd_init - Initialize a generic I/O PM domain object. |
| 2120 | * @genpd: PM domain object to initialize. | 1227 | * @genpd: PM domain object to initialize. |
| 2121 | * @gov: PM domain governor to associate with the domain (may be NULL). | 1228 | * @gov: PM domain governor to associate with the domain (may be NULL). |
| @@ -2127,54 +1234,38 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
| 2127 | if (IS_ERR_OR_NULL(genpd)) | 1234 | if (IS_ERR_OR_NULL(genpd)) |
| 2128 | return; | 1235 | return; |
| 2129 | 1236 | ||
| 2130 | INIT_LIST_HEAD(&genpd->master_links); | 1237 | INIT_LIST_HEAD(&genpd->sd_node); |
| 2131 | INIT_LIST_HEAD(&genpd->slave_links); | 1238 | genpd->parent = NULL; |
| 2132 | INIT_LIST_HEAD(&genpd->dev_list); | 1239 | INIT_LIST_HEAD(&genpd->dev_list); |
| 1240 | INIT_LIST_HEAD(&genpd->sd_list); | ||
| 2133 | mutex_init(&genpd->lock); | 1241 | mutex_init(&genpd->lock); |
| 2134 | genpd->gov = gov; | 1242 | genpd->gov = gov; |
| 2135 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1243 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
| 2136 | genpd->in_progress = 0; | 1244 | genpd->in_progress = 0; |
| 2137 | atomic_set(&genpd->sd_count, 0); | 1245 | genpd->sd_count = 0; |
| 2138 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1246 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
| 2139 | init_waitqueue_head(&genpd->status_wait_queue); | 1247 | init_waitqueue_head(&genpd->status_wait_queue); |
| 2140 | genpd->poweroff_task = NULL; | 1248 | genpd->poweroff_task = NULL; |
| 2141 | genpd->resume_count = 0; | 1249 | genpd->resume_count = 0; |
| 2142 | genpd->device_count = 0; | 1250 | genpd->device_count = 0; |
| 2143 | genpd->max_off_time_ns = -1; | 1251 | genpd->suspended_count = 0; |
| 2144 | genpd->max_off_time_changed = true; | ||
| 2145 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; | 1252 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; |
| 2146 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; | 1253 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; |
| 2147 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; | 1254 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; |
| 2148 | genpd->domain.ops.prepare = pm_genpd_prepare; | 1255 | genpd->domain.ops.prepare = pm_genpd_prepare; |
| 2149 | genpd->domain.ops.suspend = pm_genpd_suspend; | 1256 | genpd->domain.ops.suspend = pm_genpd_suspend; |
| 2150 | genpd->domain.ops.suspend_late = pm_genpd_suspend_late; | ||
| 2151 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; | 1257 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; |
| 2152 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; | 1258 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; |
| 2153 | genpd->domain.ops.resume_early = pm_genpd_resume_early; | ||
| 2154 | genpd->domain.ops.resume = pm_genpd_resume; | 1259 | genpd->domain.ops.resume = pm_genpd_resume; |
| 2155 | genpd->domain.ops.freeze = pm_genpd_freeze; | 1260 | genpd->domain.ops.freeze = pm_genpd_freeze; |
| 2156 | genpd->domain.ops.freeze_late = pm_genpd_freeze_late; | ||
| 2157 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; | 1261 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; |
| 2158 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; | 1262 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; |
| 2159 | genpd->domain.ops.thaw_early = pm_genpd_thaw_early; | ||
| 2160 | genpd->domain.ops.thaw = pm_genpd_thaw; | 1263 | genpd->domain.ops.thaw = pm_genpd_thaw; |
| 2161 | genpd->domain.ops.poweroff = pm_genpd_suspend; | 1264 | genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; |
| 2162 | genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; | 1265 | genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; |
| 2163 | genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; | ||
| 2164 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; | 1266 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; |
| 2165 | genpd->domain.ops.restore_early = pm_genpd_resume_early; | 1267 | genpd->domain.ops.restore = pm_genpd_restore; |
| 2166 | genpd->domain.ops.restore = pm_genpd_resume; | ||
| 2167 | genpd->domain.ops.complete = pm_genpd_complete; | 1268 | genpd->domain.ops.complete = pm_genpd_complete; |
| 2168 | genpd->dev_ops.save_state = pm_genpd_default_save_state; | ||
| 2169 | genpd->dev_ops.restore_state = pm_genpd_default_restore_state; | ||
| 2170 | genpd->dev_ops.suspend = pm_genpd_default_suspend; | ||
| 2171 | genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; | ||
| 2172 | genpd->dev_ops.resume_early = pm_genpd_default_resume_early; | ||
| 2173 | genpd->dev_ops.resume = pm_genpd_default_resume; | ||
| 2174 | genpd->dev_ops.freeze = pm_genpd_default_freeze; | ||
| 2175 | genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; | ||
| 2176 | genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; | ||
| 2177 | genpd->dev_ops.thaw = pm_genpd_default_thaw; | ||
| 2178 | mutex_lock(&gpd_list_lock); | 1269 | mutex_lock(&gpd_list_lock); |
| 2179 | list_add(&genpd->gpd_list_node, &gpd_list); | 1270 | list_add(&genpd->gpd_list_node, &gpd_list); |
| 2180 | mutex_unlock(&gpd_list_lock); | 1271 | mutex_unlock(&gpd_list_lock); |
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c deleted file mode 100644 index 28dee3053f1..00000000000 --- a/drivers/base/power/domain_governor.c +++ /dev/null | |||
| @@ -1,254 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * drivers/base/power/domain_governor.c - Governors for device PM domains. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/pm_domain.h> | ||
| 12 | #include <linux/pm_qos.h> | ||
| 13 | #include <linux/hrtimer.h> | ||
| 14 | |||
| 15 | #ifdef CONFIG_PM_RUNTIME | ||
| 16 | |||
| 17 | static int dev_update_qos_constraint(struct device *dev, void *data) | ||
| 18 | { | ||
| 19 | s64 *constraint_ns_p = data; | ||
| 20 | s32 constraint_ns = -1; | ||
| 21 | |||
| 22 | if (dev->power.subsys_data && dev->power.subsys_data->domain_data) | ||
| 23 | constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; | ||
| 24 | |||
| 25 | if (constraint_ns < 0) { | ||
| 26 | constraint_ns = dev_pm_qos_read_value(dev); | ||
| 27 | constraint_ns *= NSEC_PER_USEC; | ||
| 28 | } | ||
| 29 | if (constraint_ns == 0) | ||
| 30 | return 0; | ||
| 31 | |||
| 32 | /* | ||
| 33 | * constraint_ns cannot be negative here, because the device has been | ||
| 34 | * suspended. | ||
| 35 | */ | ||
| 36 | if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) | ||
| 37 | *constraint_ns_p = constraint_ns; | ||
| 38 | |||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | /** | ||
| 43 | * default_stop_ok - Default PM domain governor routine for stopping devices. | ||
| 44 | * @dev: Device to check. | ||
| 45 | */ | ||
| 46 | bool default_stop_ok(struct device *dev) | ||
| 47 | { | ||
| 48 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | ||
| 49 | unsigned long flags; | ||
| 50 | s64 constraint_ns; | ||
| 51 | |||
| 52 | dev_dbg(dev, "%s()\n", __func__); | ||
| 53 | |||
| 54 | spin_lock_irqsave(&dev->power.lock, flags); | ||
| 55 | |||
| 56 | if (!td->constraint_changed) { | ||
| 57 | bool ret = td->cached_stop_ok; | ||
| 58 | |||
| 59 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 60 | return ret; | ||
| 61 | } | ||
| 62 | td->constraint_changed = false; | ||
| 63 | td->cached_stop_ok = false; | ||
| 64 | td->effective_constraint_ns = -1; | ||
| 65 | constraint_ns = __dev_pm_qos_read_value(dev); | ||
| 66 | |||
| 67 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 68 | |||
| 69 | if (constraint_ns < 0) | ||
| 70 | return false; | ||
| 71 | |||
| 72 | constraint_ns *= NSEC_PER_USEC; | ||
| 73 | /* | ||
| 74 | * We can walk the children without any additional locking, because | ||
| 75 | * they all have been suspended at this point and their | ||
| 76 | * effective_constraint_ns fields won't be modified in parallel with us. | ||
| 77 | */ | ||
| 78 | if (!dev->power.ignore_children) | ||
| 79 | device_for_each_child(dev, &constraint_ns, | ||
| 80 | dev_update_qos_constraint); | ||
| 81 | |||
| 82 | if (constraint_ns > 0) { | ||
| 83 | constraint_ns -= td->start_latency_ns; | ||
| 84 | if (constraint_ns == 0) | ||
| 85 | return false; | ||
| 86 | } | ||
| 87 | td->effective_constraint_ns = constraint_ns; | ||
| 88 | td->cached_stop_ok = constraint_ns > td->stop_latency_ns || | ||
| 89 | constraint_ns == 0; | ||
| 90 | /* | ||
| 91 | * The children have been suspended already, so we don't need to take | ||
| 92 | * their stop latencies into account here. | ||
| 93 | */ | ||
| 94 | return td->cached_stop_ok; | ||
| 95 | } | ||
| 96 | |||
| 97 | /** | ||
| 98 | * default_power_down_ok - Default generic PM domain power off governor routine. | ||
| 99 | * @pd: PM domain to check. | ||
| 100 | * | ||
| 101 | * This routine must be executed under the PM domain's lock. | ||
| 102 | */ | ||
| 103 | static bool default_power_down_ok(struct dev_pm_domain *pd) | ||
| 104 | { | ||
| 105 | struct generic_pm_domain *genpd = pd_to_genpd(pd); | ||
| 106 | struct gpd_link *link; | ||
| 107 | struct pm_domain_data *pdd; | ||
| 108 | s64 min_off_time_ns; | ||
| 109 | s64 off_on_time_ns; | ||
| 110 | |||
| 111 | if (genpd->max_off_time_changed) { | ||
| 112 | struct gpd_link *link; | ||
| 113 | |||
| 114 | /* | ||
| 115 | * We have to invalidate the cached results for the masters, so | ||
| 116 | * use the observation that default_power_down_ok() is not | ||
| 117 | * going to be called for any master until this instance | ||
| 118 | * returns. | ||
| 119 | */ | ||
| 120 | list_for_each_entry(link, &genpd->slave_links, slave_node) | ||
| 121 | link->master->max_off_time_changed = true; | ||
| 122 | |||
| 123 | genpd->max_off_time_changed = false; | ||
| 124 | genpd->cached_power_down_ok = false; | ||
| 125 | genpd->max_off_time_ns = -1; | ||
| 126 | } else { | ||
| 127 | return genpd->cached_power_down_ok; | ||
| 128 | } | ||
| 129 | |||
| 130 | off_on_time_ns = genpd->power_off_latency_ns + | ||
| 131 | genpd->power_on_latency_ns; | ||
| 132 | /* | ||
| 133 | * It doesn't make sense to remove power from the domain if saving | ||
| 134 | * the state of all devices in it and the power off/power on operations | ||
| 135 | * take too much time. | ||
| 136 | * | ||
| 137 | * All devices in this domain have been stopped already at this point. | ||
| 138 | */ | ||
| 139 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | ||
| 140 | if (pdd->dev->driver) | ||
| 141 | off_on_time_ns += | ||
| 142 | to_gpd_data(pdd)->td.save_state_latency_ns; | ||
| 143 | } | ||
| 144 | |||
| 145 | min_off_time_ns = -1; | ||
| 146 | /* | ||
| 147 | * Check if subdomains can be off for enough time. | ||
| 148 | * | ||
| 149 | * All subdomains have been powered off already at this point. | ||
| 150 | */ | ||
| 151 | list_for_each_entry(link, &genpd->master_links, master_node) { | ||
| 152 | struct generic_pm_domain *sd = link->slave; | ||
| 153 | s64 sd_max_off_ns = sd->max_off_time_ns; | ||
| 154 | |||
| 155 | if (sd_max_off_ns < 0) | ||
| 156 | continue; | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Check if the subdomain is allowed to be off long enough for | ||
| 160 | * the current domain to turn off and on (that's how much time | ||
| 161 | * it will have to wait worst case). | ||
| 162 | */ | ||
| 163 | if (sd_max_off_ns <= off_on_time_ns) | ||
| 164 | return false; | ||
| 165 | |||
| 166 | if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0) | ||
| 167 | min_off_time_ns = sd_max_off_ns; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Check if the devices in the domain can be off enough time. | ||
| 172 | */ | ||
| 173 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | ||
| 174 | struct gpd_timing_data *td; | ||
| 175 | s64 constraint_ns; | ||
| 176 | |||
| 177 | if (!pdd->dev->driver) | ||
| 178 | continue; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Check if the device is allowed to be off long enough for the | ||
| 182 | * domain to turn off and on (that's how much time it will | ||
| 183 | * have to wait worst case). | ||
| 184 | */ | ||
| 185 | td = &to_gpd_data(pdd)->td; | ||
| 186 | constraint_ns = td->effective_constraint_ns; | ||
| 187 | /* default_stop_ok() need not be called before us. */ | ||
| 188 | if (constraint_ns < 0) { | ||
| 189 | constraint_ns = dev_pm_qos_read_value(pdd->dev); | ||
| 190 | constraint_ns *= NSEC_PER_USEC; | ||
| 191 | } | ||
| 192 | if (constraint_ns == 0) | ||
| 193 | continue; | ||
| 194 | |||
| 195 | /* | ||
| 196 | * constraint_ns cannot be negative here, because the device has | ||
| 197 | * been suspended. | ||
| 198 | */ | ||
| 199 | constraint_ns -= td->restore_state_latency_ns; | ||
| 200 | if (constraint_ns <= off_on_time_ns) | ||
| 201 | return false; | ||
| 202 | |||
| 203 | if (min_off_time_ns > constraint_ns || min_off_time_ns < 0) | ||
| 204 | min_off_time_ns = constraint_ns; | ||
| 205 | } | ||
| 206 | |||
| 207 | genpd->cached_power_down_ok = true; | ||
| 208 | |||
| 209 | /* | ||
| 210 | * If the computed minimum device off time is negative, there are no | ||
| 211 | * latency constraints, so the domain can spend arbitrary time in the | ||
| 212 | * "off" state. | ||
| 213 | */ | ||
| 214 | if (min_off_time_ns < 0) | ||
| 215 | return true; | ||
| 216 | |||
| 217 | /* | ||
| 218 | * The difference between the computed minimum subdomain or device off | ||
| 219 | * time and the time needed to turn the domain on is the maximum | ||
| 220 | * theoretical time this domain can spend in the "off" state. | ||
| 221 | */ | ||
| 222 | genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns; | ||
| 223 | return true; | ||
| 224 | } | ||
| 225 | |||
| 226 | static bool always_on_power_down_ok(struct dev_pm_domain *domain) | ||
| 227 | { | ||
| 228 | return false; | ||
| 229 | } | ||
| 230 | |||
| 231 | #else /* !CONFIG_PM_RUNTIME */ | ||
| 232 | |||
| 233 | bool default_stop_ok(struct device *dev) | ||
| 234 | { | ||
| 235 | return false; | ||
| 236 | } | ||
| 237 | |||
| 238 | #define default_power_down_ok NULL | ||
| 239 | #define always_on_power_down_ok NULL | ||
| 240 | |||
| 241 | #endif /* !CONFIG_PM_RUNTIME */ | ||
| 242 | |||
| 243 | struct dev_power_governor simple_qos_governor = { | ||
| 244 | .stop_ok = default_stop_ok, | ||
| 245 | .power_down_ok = default_power_down_ok, | ||
| 246 | }; | ||
| 247 | |||
| 248 | /** | ||
| 249 | * pm_genpd_gov_always_on - A governor implementing an always-on policy | ||
| 250 | */ | ||
| 251 | struct dev_power_governor pm_domain_always_on_gov = { | ||
| 252 | .power_down_ok = always_on_power_down_ok, | ||
| 253 | .stop_ok = default_stop_ok, | ||
| 254 | }; | ||
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index d03d290f31c..9508df71274 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
| 10 | #include <linux/pm_runtime.h> | 10 | #include <linux/pm_runtime.h> |
| 11 | #include <linux/export.h> | ||
| 12 | 11 | ||
| 13 | #ifdef CONFIG_PM_RUNTIME | 12 | #ifdef CONFIG_PM_RUNTIME |
| 14 | /** | 13 | /** |
| @@ -92,28 +91,53 @@ int pm_generic_prepare(struct device *dev) | |||
| 92 | } | 91 | } |
| 93 | 92 | ||
| 94 | /** | 93 | /** |
| 95 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. | 94 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. |
| 96 | * @dev: Device to suspend. | 95 | * @dev: Device to handle. |
| 96 | * @event: PM transition of the system under way. | ||
| 97 | * @bool: Whether or not this is the "noirq" stage. | ||
| 98 | * | ||
| 99 | * If the device has not been suspended at run time, execute the | ||
| 100 | * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and | ||
| 101 | * return its error code. Otherwise, return zero. | ||
| 97 | */ | 102 | */ |
| 98 | int pm_generic_suspend_noirq(struct device *dev) | 103 | static int __pm_generic_call(struct device *dev, int event, bool noirq) |
| 99 | { | 104 | { |
| 100 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 105 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 106 | int (*callback)(struct device *); | ||
| 107 | |||
| 108 | if (!pm || pm_runtime_suspended(dev)) | ||
| 109 | return 0; | ||
| 110 | |||
| 111 | switch (event) { | ||
| 112 | case PM_EVENT_SUSPEND: | ||
| 113 | callback = noirq ? pm->suspend_noirq : pm->suspend; | ||
| 114 | break; | ||
| 115 | case PM_EVENT_FREEZE: | ||
| 116 | callback = noirq ? pm->freeze_noirq : pm->freeze; | ||
| 117 | break; | ||
| 118 | case PM_EVENT_HIBERNATE: | ||
| 119 | callback = noirq ? pm->poweroff_noirq : pm->poweroff; | ||
| 120 | break; | ||
| 121 | case PM_EVENT_THAW: | ||
| 122 | callback = noirq ? pm->thaw_noirq : pm->thaw; | ||
| 123 | break; | ||
| 124 | default: | ||
| 125 | callback = NULL; | ||
| 126 | break; | ||
| 127 | } | ||
| 101 | 128 | ||
| 102 | return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; | 129 | return callback ? callback(dev) : 0; |
| 103 | } | 130 | } |
| 104 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | ||
| 105 | 131 | ||
| 106 | /** | 132 | /** |
| 107 | * pm_generic_suspend_late - Generic suspend_late callback for subsystems. | 133 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. |
| 108 | * @dev: Device to suspend. | 134 | * @dev: Device to suspend. |
| 109 | */ | 135 | */ |
| 110 | int pm_generic_suspend_late(struct device *dev) | 136 | int pm_generic_suspend_noirq(struct device *dev) |
| 111 | { | 137 | { |
| 112 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 138 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); |
| 113 | |||
| 114 | return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; | ||
| 115 | } | 139 | } |
| 116 | EXPORT_SYMBOL_GPL(pm_generic_suspend_late); | 140 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); |
| 117 | 141 | ||
| 118 | /** | 142 | /** |
| 119 | * pm_generic_suspend - Generic suspend callback for subsystems. | 143 | * pm_generic_suspend - Generic suspend callback for subsystems. |
| @@ -121,9 +145,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late); | |||
| 121 | */ | 145 | */ |
| 122 | int pm_generic_suspend(struct device *dev) | 146 | int pm_generic_suspend(struct device *dev) |
| 123 | { | 147 | { |
| 124 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 148 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); |
| 125 | |||
| 126 | return pm && pm->suspend ? pm->suspend(dev) : 0; | ||
| 127 | } | 149 | } |
| 128 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | 150 | EXPORT_SYMBOL_GPL(pm_generic_suspend); |
| 129 | 151 | ||
| @@ -133,33 +155,17 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend); | |||
| 133 | */ | 155 | */ |
| 134 | int pm_generic_freeze_noirq(struct device *dev) | 156 | int pm_generic_freeze_noirq(struct device *dev) |
| 135 | { | 157 | { |
| 136 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 158 | return __pm_generic_call(dev, PM_EVENT_FREEZE, true); |
| 137 | |||
| 138 | return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; | ||
| 139 | } | 159 | } |
| 140 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); | 160 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); |
| 141 | 161 | ||
| 142 | /** | 162 | /** |
| 143 | * pm_generic_freeze_late - Generic freeze_late callback for subsystems. | ||
| 144 | * @dev: Device to freeze. | ||
| 145 | */ | ||
| 146 | int pm_generic_freeze_late(struct device *dev) | ||
| 147 | { | ||
| 148 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 149 | |||
| 150 | return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; | ||
| 151 | } | ||
| 152 | EXPORT_SYMBOL_GPL(pm_generic_freeze_late); | ||
| 153 | |||
| 154 | /** | ||
| 155 | * pm_generic_freeze - Generic freeze callback for subsystems. | 163 | * pm_generic_freeze - Generic freeze callback for subsystems. |
| 156 | * @dev: Device to freeze. | 164 | * @dev: Device to freeze. |
| 157 | */ | 165 | */ |
| 158 | int pm_generic_freeze(struct device *dev) | 166 | int pm_generic_freeze(struct device *dev) |
| 159 | { | 167 | { |
| 160 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 168 | return __pm_generic_call(dev, PM_EVENT_FREEZE, false); |
| 161 | |||
| 162 | return pm && pm->freeze ? pm->freeze(dev) : 0; | ||
| 163 | } | 169 | } |
| 164 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | 170 | EXPORT_SYMBOL_GPL(pm_generic_freeze); |
| 165 | 171 | ||
| @@ -169,33 +175,17 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze); | |||
| 169 | */ | 175 | */ |
| 170 | int pm_generic_poweroff_noirq(struct device *dev) | 176 | int pm_generic_poweroff_noirq(struct device *dev) |
| 171 | { | 177 | { |
| 172 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 178 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); |
| 173 | |||
| 174 | return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; | ||
| 175 | } | 179 | } |
| 176 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); | 180 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); |
| 177 | 181 | ||
| 178 | /** | 182 | /** |
| 179 | * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. | ||
| 180 | * @dev: Device to handle. | ||
| 181 | */ | ||
| 182 | int pm_generic_poweroff_late(struct device *dev) | ||
| 183 | { | ||
| 184 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 185 | |||
| 186 | return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; | ||
| 187 | } | ||
| 188 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); | ||
| 189 | |||
| 190 | /** | ||
| 191 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | 183 | * pm_generic_poweroff - Generic poweroff callback for subsystems. |
| 192 | * @dev: Device to handle. | 184 | * @dev: Device to handle. |
| 193 | */ | 185 | */ |
| 194 | int pm_generic_poweroff(struct device *dev) | 186 | int pm_generic_poweroff(struct device *dev) |
| 195 | { | 187 | { |
| 196 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 188 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); |
| 197 | |||
| 198 | return pm && pm->poweroff ? pm->poweroff(dev) : 0; | ||
| 199 | } | 189 | } |
| 200 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | 190 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); |
| 201 | 191 | ||
| @@ -205,59 +195,73 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff); | |||
| 205 | */ | 195 | */ |
| 206 | int pm_generic_thaw_noirq(struct device *dev) | 196 | int pm_generic_thaw_noirq(struct device *dev) |
| 207 | { | 197 | { |
| 208 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 198 | return __pm_generic_call(dev, PM_EVENT_THAW, true); |
| 209 | |||
| 210 | return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; | ||
| 211 | } | 199 | } |
| 212 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); | 200 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); |
| 213 | 201 | ||
| 214 | /** | 202 | /** |
| 215 | * pm_generic_thaw_early - Generic thaw_early callback for subsystems. | ||
| 216 | * @dev: Device to thaw. | ||
| 217 | */ | ||
| 218 | int pm_generic_thaw_early(struct device *dev) | ||
| 219 | { | ||
| 220 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 221 | |||
| 222 | return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; | ||
| 223 | } | ||
| 224 | EXPORT_SYMBOL_GPL(pm_generic_thaw_early); | ||
| 225 | |||
| 226 | /** | ||
| 227 | * pm_generic_thaw - Generic thaw callback for subsystems. | 203 | * pm_generic_thaw - Generic thaw callback for subsystems. |
| 228 | * @dev: Device to thaw. | 204 | * @dev: Device to thaw. |
| 229 | */ | 205 | */ |
| 230 | int pm_generic_thaw(struct device *dev) | 206 | int pm_generic_thaw(struct device *dev) |
| 231 | { | 207 | { |
| 232 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 208 | return __pm_generic_call(dev, PM_EVENT_THAW, false); |
| 233 | |||
| 234 | return pm && pm->thaw ? pm->thaw(dev) : 0; | ||
| 235 | } | 209 | } |
| 236 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | 210 | EXPORT_SYMBOL_GPL(pm_generic_thaw); |
| 237 | 211 | ||
| 238 | /** | 212 | /** |
| 239 | * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. | 213 | * __pm_generic_resume - Generic resume/restore callback for subsystems. |
| 240 | * @dev: Device to resume. | 214 | * @dev: Device to handle. |
| 215 | * @event: PM transition of the system under way. | ||
| 216 | * @bool: Whether or not this is the "noirq" stage. | ||
| 217 | * | ||
| 218 | * Execute the resume/resotre callback provided by the @dev's driver, if | ||
| 219 | * defined. If it returns 0, change the device's runtime PM status to 'active'. | ||
| 220 | * Return the callback's error code. | ||
| 241 | */ | 221 | */ |
| 242 | int pm_generic_resume_noirq(struct device *dev) | 222 | static int __pm_generic_resume(struct device *dev, int event, bool noirq) |
| 243 | { | 223 | { |
| 244 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 224 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 225 | int (*callback)(struct device *); | ||
| 226 | int ret; | ||
| 227 | |||
| 228 | if (!pm) | ||
| 229 | return 0; | ||
| 230 | |||
| 231 | switch (event) { | ||
| 232 | case PM_EVENT_RESUME: | ||
| 233 | callback = noirq ? pm->resume_noirq : pm->resume; | ||
| 234 | break; | ||
| 235 | case PM_EVENT_RESTORE: | ||
| 236 | callback = noirq ? pm->restore_noirq : pm->restore; | ||
| 237 | break; | ||
| 238 | default: | ||
| 239 | callback = NULL; | ||
| 240 | break; | ||
| 241 | } | ||
| 242 | |||
| 243 | if (!callback) | ||
| 244 | return 0; | ||
| 245 | 245 | ||
| 246 | return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; | 246 | ret = callback(dev); |
| 247 | if (!ret && !noirq && pm_runtime_enabled(dev)) { | ||
| 248 | pm_runtime_disable(dev); | ||
| 249 | pm_runtime_set_active(dev); | ||
| 250 | pm_runtime_enable(dev); | ||
| 251 | } | ||
| 252 | |||
| 253 | return ret; | ||
| 247 | } | 254 | } |
| 248 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); | ||
| 249 | 255 | ||
| 250 | /** | 256 | /** |
| 251 | * pm_generic_resume_early - Generic resume_early callback for subsystems. | 257 | * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. |
| 252 | * @dev: Device to resume. | 258 | * @dev: Device to resume. |
| 253 | */ | 259 | */ |
| 254 | int pm_generic_resume_early(struct device *dev) | 260 | int pm_generic_resume_noirq(struct device *dev) |
| 255 | { | 261 | { |
| 256 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 262 | return __pm_generic_resume(dev, PM_EVENT_RESUME, true); |
| 257 | |||
| 258 | return pm && pm->resume_early ? pm->resume_early(dev) : 0; | ||
| 259 | } | 263 | } |
| 260 | EXPORT_SYMBOL_GPL(pm_generic_resume_early); | 264 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); |
| 261 | 265 | ||
| 262 | /** | 266 | /** |
| 263 | * pm_generic_resume - Generic resume callback for subsystems. | 267 | * pm_generic_resume - Generic resume callback for subsystems. |
| @@ -265,9 +269,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early); | |||
| 265 | */ | 269 | */ |
| 266 | int pm_generic_resume(struct device *dev) | 270 | int pm_generic_resume(struct device *dev) |
| 267 | { | 271 | { |
| 268 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 272 | return __pm_generic_resume(dev, PM_EVENT_RESUME, false); |
| 269 | |||
| 270 | return pm && pm->resume ? pm->resume(dev) : 0; | ||
| 271 | } | 273 | } |
| 272 | EXPORT_SYMBOL_GPL(pm_generic_resume); | 274 | EXPORT_SYMBOL_GPL(pm_generic_resume); |
| 273 | 275 | ||
| @@ -277,33 +279,17 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); | |||
| 277 | */ | 279 | */ |
| 278 | int pm_generic_restore_noirq(struct device *dev) | 280 | int pm_generic_restore_noirq(struct device *dev) |
| 279 | { | 281 | { |
| 280 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 282 | return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); |
| 281 | |||
| 282 | return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; | ||
| 283 | } | 283 | } |
| 284 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); | 284 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); |
| 285 | 285 | ||
| 286 | /** | 286 | /** |
| 287 | * pm_generic_restore_early - Generic restore_early callback for subsystems. | ||
| 288 | * @dev: Device to resume. | ||
| 289 | */ | ||
| 290 | int pm_generic_restore_early(struct device *dev) | ||
| 291 | { | ||
| 292 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 293 | |||
| 294 | return pm && pm->restore_early ? pm->restore_early(dev) : 0; | ||
| 295 | } | ||
| 296 | EXPORT_SYMBOL_GPL(pm_generic_restore_early); | ||
| 297 | |||
| 298 | /** | ||
| 299 | * pm_generic_restore - Generic restore callback for subsystems. | 287 | * pm_generic_restore - Generic restore callback for subsystems. |
| 300 | * @dev: Device to restore. | 288 | * @dev: Device to restore. |
| 301 | */ | 289 | */ |
| 302 | int pm_generic_restore(struct device *dev) | 290 | int pm_generic_restore(struct device *dev) |
| 303 | { | 291 | { |
| 304 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 292 | return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); |
| 305 | |||
| 306 | return pm && pm->restore ? pm->restore(dev) : 0; | ||
| 307 | } | 293 | } |
| 308 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 294 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
| 309 | 295 | ||
| @@ -327,3 +313,28 @@ void pm_generic_complete(struct device *dev) | |||
| 327 | pm_runtime_idle(dev); | 313 | pm_runtime_idle(dev); |
| 328 | } | 314 | } |
| 329 | #endif /* CONFIG_PM_SLEEP */ | 315 | #endif /* CONFIG_PM_SLEEP */ |
| 316 | |||
| 317 | struct dev_pm_ops generic_subsys_pm_ops = { | ||
| 318 | #ifdef CONFIG_PM_SLEEP | ||
| 319 | .prepare = pm_generic_prepare, | ||
| 320 | .suspend = pm_generic_suspend, | ||
| 321 | .suspend_noirq = pm_generic_suspend_noirq, | ||
| 322 | .resume = pm_generic_resume, | ||
| 323 | .resume_noirq = pm_generic_resume_noirq, | ||
| 324 | .freeze = pm_generic_freeze, | ||
| 325 | .freeze_noirq = pm_generic_freeze_noirq, | ||
| 326 | .thaw = pm_generic_thaw, | ||
| 327 | .thaw_noirq = pm_generic_thaw_noirq, | ||
| 328 | .poweroff = pm_generic_poweroff, | ||
| 329 | .poweroff_noirq = pm_generic_poweroff_noirq, | ||
| 330 | .restore = pm_generic_restore, | ||
| 331 | .restore_noirq = pm_generic_restore_noirq, | ||
| 332 | .complete = pm_generic_complete, | ||
| 333 | #endif | ||
| 334 | #ifdef CONFIG_PM_RUNTIME | ||
| 335 | .runtime_suspend = pm_generic_runtime_suspend, | ||
| 336 | .runtime_resume = pm_generic_runtime_resume, | ||
| 337 | .runtime_idle = pm_generic_runtime_idle, | ||
| 338 | #endif | ||
| 339 | }; | ||
| 340 | EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2b7f77d3fcb..7b4b78a6e82 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 21 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
| 22 | #include <linux/export.h> | ||
| 23 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
| 24 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
| 25 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
| @@ -28,12 +27,11 @@ | |||
| 28 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
| 29 | #include <linux/async.h> | 28 | #include <linux/async.h> |
| 30 | #include <linux/suspend.h> | 29 | #include <linux/suspend.h> |
| 31 | #include <linux/cpuidle.h> | 30 | #include <linux/timer.h> |
| 31 | |||
| 32 | #include "../base.h" | 32 | #include "../base.h" |
| 33 | #include "power.h" | 33 | #include "power.h" |
| 34 | 34 | ||
| 35 | typedef int (*pm_callback_t)(struct device *); | ||
| 36 | |||
| 37 | /* | 35 | /* |
| 38 | * The entries in the dpm_list list are in a depth first order, simply | 36 | * The entries in the dpm_list list are in a depth first order, simply |
| 39 | * because children are guaranteed to be discovered after parents, and | 37 | * because children are guaranteed to be discovered after parents, and |
| @@ -45,28 +43,34 @@ typedef int (*pm_callback_t)(struct device *); | |||
| 45 | */ | 43 | */ |
| 46 | 44 | ||
| 47 | LIST_HEAD(dpm_list); | 45 | LIST_HEAD(dpm_list); |
| 48 | static LIST_HEAD(dpm_prepared_list); | 46 | LIST_HEAD(dpm_prepared_list); |
| 49 | static LIST_HEAD(dpm_suspended_list); | 47 | LIST_HEAD(dpm_suspended_list); |
| 50 | static LIST_HEAD(dpm_late_early_list); | 48 | LIST_HEAD(dpm_noirq_list); |
| 51 | static LIST_HEAD(dpm_noirq_list); | ||
| 52 | 49 | ||
| 53 | struct suspend_stats suspend_stats; | ||
| 54 | static DEFINE_MUTEX(dpm_list_mtx); | 50 | static DEFINE_MUTEX(dpm_list_mtx); |
| 55 | static pm_message_t pm_transition; | 51 | static pm_message_t pm_transition; |
| 56 | 52 | ||
| 53 | static void dpm_drv_timeout(unsigned long data); | ||
| 54 | struct dpm_drv_wd_data { | ||
| 55 | struct device *dev; | ||
| 56 | struct task_struct *tsk; | ||
| 57 | }; | ||
| 58 | |||
| 57 | static int async_error; | 59 | static int async_error; |
| 58 | 60 | ||
| 59 | /** | 61 | /** |
| 60 | * device_pm_sleep_init - Initialize system suspend-related device fields. | 62 | * device_pm_init - Initialize the PM-related part of a device object. |
| 61 | * @dev: Device object being initialized. | 63 | * @dev: Device object being initialized. |
| 62 | */ | 64 | */ |
| 63 | void device_pm_sleep_init(struct device *dev) | 65 | void device_pm_init(struct device *dev) |
| 64 | { | 66 | { |
| 65 | dev->power.is_prepared = false; | 67 | dev->power.is_prepared = false; |
| 66 | dev->power.is_suspended = false; | 68 | dev->power.is_suspended = false; |
| 67 | init_completion(&dev->power.completion); | 69 | init_completion(&dev->power.completion); |
| 68 | complete_all(&dev->power.completion); | 70 | complete_all(&dev->power.completion); |
| 69 | dev->power.wakeup = NULL; | 71 | dev->power.wakeup = NULL; |
| 72 | spin_lock_init(&dev->power.lock); | ||
| 73 | pm_runtime_init(dev); | ||
| 70 | INIT_LIST_HEAD(&dev->power.entry); | 74 | INIT_LIST_HEAD(&dev->power.entry); |
| 71 | } | 75 | } |
| 72 | 76 | ||
| @@ -99,7 +103,6 @@ void device_pm_add(struct device *dev) | |||
| 99 | dev_warn(dev, "parent %s should not be sleeping\n", | 103 | dev_warn(dev, "parent %s should not be sleeping\n", |
| 100 | dev_name(dev->parent)); | 104 | dev_name(dev->parent)); |
| 101 | list_add_tail(&dev->power.entry, &dpm_list); | 105 | list_add_tail(&dev->power.entry, &dpm_list); |
| 102 | dev_pm_qos_constraints_init(dev); | ||
| 103 | mutex_unlock(&dpm_list_mtx); | 106 | mutex_unlock(&dpm_list_mtx); |
| 104 | } | 107 | } |
| 105 | 108 | ||
| @@ -113,7 +116,6 @@ void device_pm_remove(struct device *dev) | |||
| 113 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 116 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 114 | complete_all(&dev->power.completion); | 117 | complete_all(&dev->power.completion); |
| 115 | mutex_lock(&dpm_list_mtx); | 118 | mutex_lock(&dpm_list_mtx); |
| 116 | dev_pm_qos_constraints_destroy(dev); | ||
| 117 | list_del_init(&dev->power.entry); | 119 | list_del_init(&dev->power.entry); |
| 118 | mutex_unlock(&dpm_list_mtx); | 120 | mutex_unlock(&dpm_list_mtx); |
| 119 | device_wakeup_disable(dev); | 121 | device_wakeup_disable(dev); |
| @@ -163,10 +165,9 @@ static ktime_t initcall_debug_start(struct device *dev) | |||
| 163 | { | 165 | { |
| 164 | ktime_t calltime = ktime_set(0, 0); | 166 | ktime_t calltime = ktime_set(0, 0); |
| 165 | 167 | ||
| 166 | if (pm_print_times_enabled) { | 168 | if (initcall_debug) { |
| 167 | pr_info("calling %s+ @ %i, parent: %s\n", | 169 | pr_info("calling %s+ @ %i\n", |
| 168 | dev_name(dev), task_pid_nr(current), | 170 | dev_name(dev), task_pid_nr(current)); |
| 169 | dev->parent ? dev_name(dev->parent) : "none"); | ||
| 170 | calltime = ktime_get(); | 171 | calltime = ktime_get(); |
| 171 | } | 172 | } |
| 172 | 173 | ||
| @@ -178,7 +179,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, | |||
| 178 | { | 179 | { |
| 179 | ktime_t delta, rettime; | 180 | ktime_t delta, rettime; |
| 180 | 181 | ||
| 181 | if (pm_print_times_enabled) { | 182 | if (initcall_debug) { |
| 182 | rettime = ktime_get(); | 183 | rettime = ktime_get(); |
| 183 | delta = ktime_sub(rettime, calltime); | 184 | delta = ktime_sub(rettime, calltime); |
| 184 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | 185 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), |
| @@ -212,103 +213,151 @@ static void dpm_wait_for_children(struct device *dev, bool async) | |||
| 212 | } | 213 | } |
| 213 | 214 | ||
| 214 | /** | 215 | /** |
| 215 | * pm_op - Return the PM operation appropriate for given PM event. | 216 | * pm_op - Execute the PM operation appropriate for given PM event. |
| 217 | * @dev: Device to handle. | ||
| 216 | * @ops: PM operations to choose from. | 218 | * @ops: PM operations to choose from. |
| 217 | * @state: PM transition of the system being carried out. | 219 | * @state: PM transition of the system being carried out. |
| 218 | */ | 220 | */ |
| 219 | static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) | 221 | static int pm_op(struct device *dev, |
| 222 | const struct dev_pm_ops *ops, | ||
| 223 | pm_message_t state) | ||
| 220 | { | 224 | { |
| 221 | switch (state.event) { | 225 | int error = 0; |
| 222 | #ifdef CONFIG_SUSPEND | 226 | ktime_t calltime; |
| 223 | case PM_EVENT_SUSPEND: | ||
| 224 | return ops->suspend; | ||
| 225 | case PM_EVENT_RESUME: | ||
| 226 | return ops->resume; | ||
| 227 | #endif /* CONFIG_SUSPEND */ | ||
| 228 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
| 229 | case PM_EVENT_FREEZE: | ||
| 230 | case PM_EVENT_QUIESCE: | ||
| 231 | return ops->freeze; | ||
| 232 | case PM_EVENT_HIBERNATE: | ||
| 233 | return ops->poweroff; | ||
| 234 | case PM_EVENT_THAW: | ||
| 235 | case PM_EVENT_RECOVER: | ||
| 236 | return ops->thaw; | ||
| 237 | break; | ||
| 238 | case PM_EVENT_RESTORE: | ||
| 239 | return ops->restore; | ||
| 240 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | ||
| 241 | } | ||
| 242 | 227 | ||
| 243 | return NULL; | 228 | calltime = initcall_debug_start(dev); |
| 244 | } | ||
| 245 | 229 | ||
| 246 | /** | ||
| 247 | * pm_late_early_op - Return the PM operation appropriate for given PM event. | ||
| 248 | * @ops: PM operations to choose from. | ||
| 249 | * @state: PM transition of the system being carried out. | ||
| 250 | * | ||
| 251 | * Runtime PM is disabled for @dev while this function is being executed. | ||
| 252 | */ | ||
| 253 | static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, | ||
| 254 | pm_message_t state) | ||
| 255 | { | ||
| 256 | switch (state.event) { | 230 | switch (state.event) { |
| 257 | #ifdef CONFIG_SUSPEND | 231 | #ifdef CONFIG_SUSPEND |
| 258 | case PM_EVENT_SUSPEND: | 232 | case PM_EVENT_SUSPEND: |
| 259 | return ops->suspend_late; | 233 | if (ops->suspend) { |
| 234 | error = ops->suspend(dev); | ||
| 235 | suspend_report_result(ops->suspend, error); | ||
| 236 | } | ||
| 237 | break; | ||
| 260 | case PM_EVENT_RESUME: | 238 | case PM_EVENT_RESUME: |
| 261 | return ops->resume_early; | 239 | if (ops->resume) { |
| 240 | error = ops->resume(dev); | ||
| 241 | suspend_report_result(ops->resume, error); | ||
| 242 | } | ||
| 243 | break; | ||
| 262 | #endif /* CONFIG_SUSPEND */ | 244 | #endif /* CONFIG_SUSPEND */ |
| 263 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 245 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 264 | case PM_EVENT_FREEZE: | 246 | case PM_EVENT_FREEZE: |
| 265 | case PM_EVENT_QUIESCE: | 247 | case PM_EVENT_QUIESCE: |
| 266 | return ops->freeze_late; | 248 | if (ops->freeze) { |
| 249 | error = ops->freeze(dev); | ||
| 250 | suspend_report_result(ops->freeze, error); | ||
| 251 | } | ||
| 252 | break; | ||
| 267 | case PM_EVENT_HIBERNATE: | 253 | case PM_EVENT_HIBERNATE: |
| 268 | return ops->poweroff_late; | 254 | if (ops->poweroff) { |
| 255 | error = ops->poweroff(dev); | ||
| 256 | suspend_report_result(ops->poweroff, error); | ||
| 257 | } | ||
| 258 | break; | ||
| 269 | case PM_EVENT_THAW: | 259 | case PM_EVENT_THAW: |
| 270 | case PM_EVENT_RECOVER: | 260 | case PM_EVENT_RECOVER: |
| 271 | return ops->thaw_early; | 261 | if (ops->thaw) { |
| 262 | error = ops->thaw(dev); | ||
| 263 | suspend_report_result(ops->thaw, error); | ||
| 264 | } | ||
| 265 | break; | ||
| 272 | case PM_EVENT_RESTORE: | 266 | case PM_EVENT_RESTORE: |
| 273 | return ops->restore_early; | 267 | if (ops->restore) { |
| 268 | error = ops->restore(dev); | ||
| 269 | suspend_report_result(ops->restore, error); | ||
| 270 | } | ||
| 271 | break; | ||
| 274 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 272 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 273 | default: | ||
| 274 | error = -EINVAL; | ||
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | return NULL; | 277 | initcall_debug_report(dev, calltime, error); |
| 278 | |||
| 279 | return error; | ||
| 278 | } | 280 | } |
| 279 | 281 | ||
| 280 | /** | 282 | /** |
| 281 | * pm_noirq_op - Return the PM operation appropriate for given PM event. | 283 | * pm_noirq_op - Execute the PM operation appropriate for given PM event. |
| 284 | * @dev: Device to handle. | ||
| 282 | * @ops: PM operations to choose from. | 285 | * @ops: PM operations to choose from. |
| 283 | * @state: PM transition of the system being carried out. | 286 | * @state: PM transition of the system being carried out. |
| 284 | * | 287 | * |
| 285 | * The driver of @dev will not receive interrupts while this function is being | 288 | * The driver of @dev will not receive interrupts while this function is being |
| 286 | * executed. | 289 | * executed. |
| 287 | */ | 290 | */ |
| 288 | static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) | 291 | static int pm_noirq_op(struct device *dev, |
| 292 | const struct dev_pm_ops *ops, | ||
| 293 | pm_message_t state) | ||
| 289 | { | 294 | { |
| 295 | int error = 0; | ||
| 296 | ktime_t calltime = ktime_set(0, 0), delta, rettime; | ||
| 297 | |||
| 298 | if (initcall_debug) { | ||
| 299 | pr_info("calling %s+ @ %i, parent: %s\n", | ||
| 300 | dev_name(dev), task_pid_nr(current), | ||
| 301 | dev->parent ? dev_name(dev->parent) : "none"); | ||
| 302 | calltime = ktime_get(); | ||
| 303 | } | ||
| 304 | |||
| 290 | switch (state.event) { | 305 | switch (state.event) { |
| 291 | #ifdef CONFIG_SUSPEND | 306 | #ifdef CONFIG_SUSPEND |
| 292 | case PM_EVENT_SUSPEND: | 307 | case PM_EVENT_SUSPEND: |
| 293 | return ops->suspend_noirq; | 308 | if (ops->suspend_noirq) { |
| 309 | error = ops->suspend_noirq(dev); | ||
| 310 | suspend_report_result(ops->suspend_noirq, error); | ||
| 311 | } | ||
| 312 | break; | ||
| 294 | case PM_EVENT_RESUME: | 313 | case PM_EVENT_RESUME: |
| 295 | return ops->resume_noirq; | 314 | if (ops->resume_noirq) { |
| 315 | error = ops->resume_noirq(dev); | ||
| 316 | suspend_report_result(ops->resume_noirq, error); | ||
| 317 | } | ||
| 318 | break; | ||
| 296 | #endif /* CONFIG_SUSPEND */ | 319 | #endif /* CONFIG_SUSPEND */ |
| 297 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 320 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 298 | case PM_EVENT_FREEZE: | 321 | case PM_EVENT_FREEZE: |
| 299 | case PM_EVENT_QUIESCE: | 322 | case PM_EVENT_QUIESCE: |
| 300 | return ops->freeze_noirq; | 323 | if (ops->freeze_noirq) { |
| 324 | error = ops->freeze_noirq(dev); | ||
| 325 | suspend_report_result(ops->freeze_noirq, error); | ||
| 326 | } | ||
| 327 | break; | ||
| 301 | case PM_EVENT_HIBERNATE: | 328 | case PM_EVENT_HIBERNATE: |
| 302 | return ops->poweroff_noirq; | 329 | if (ops->poweroff_noirq) { |
| 330 | error = ops->poweroff_noirq(dev); | ||
| 331 | suspend_report_result(ops->poweroff_noirq, error); | ||
| 332 | } | ||
| 333 | break; | ||
| 303 | case PM_EVENT_THAW: | 334 | case PM_EVENT_THAW: |
| 304 | case PM_EVENT_RECOVER: | 335 | case PM_EVENT_RECOVER: |
| 305 | return ops->thaw_noirq; | 336 | if (ops->thaw_noirq) { |
| 337 | error = ops->thaw_noirq(dev); | ||
| 338 | suspend_report_result(ops->thaw_noirq, error); | ||
| 339 | } | ||
| 340 | break; | ||
| 306 | case PM_EVENT_RESTORE: | 341 | case PM_EVENT_RESTORE: |
| 307 | return ops->restore_noirq; | 342 | if (ops->restore_noirq) { |
| 343 | error = ops->restore_noirq(dev); | ||
| 344 | suspend_report_result(ops->restore_noirq, error); | ||
| 345 | } | ||
| 346 | break; | ||
| 308 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 347 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 348 | default: | ||
| 349 | error = -EINVAL; | ||
| 309 | } | 350 | } |
| 310 | 351 | ||
| 311 | return NULL; | 352 | if (initcall_debug) { |
| 353 | rettime = ktime_get(); | ||
| 354 | delta = ktime_sub(rettime, calltime); | ||
| 355 | printk("initcall %s_i+ returned %d after %Ld usecs\n", | ||
| 356 | dev_name(dev), error, | ||
| 357 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 358 | } | ||
| 359 | |||
| 360 | return error; | ||
| 312 | } | 361 | } |
| 313 | 362 | ||
| 314 | static char *pm_verb(int event) | 363 | static char *pm_verb(int event) |
| @@ -366,26 +415,6 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | |||
| 366 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | 415 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); |
| 367 | } | 416 | } |
| 368 | 417 | ||
| 369 | static int dpm_run_callback(pm_callback_t cb, struct device *dev, | ||
| 370 | pm_message_t state, char *info) | ||
| 371 | { | ||
| 372 | ktime_t calltime; | ||
| 373 | int error; | ||
| 374 | |||
| 375 | if (!cb) | ||
| 376 | return 0; | ||
| 377 | |||
| 378 | calltime = initcall_debug_start(dev); | ||
| 379 | |||
| 380 | pm_dev_dbg(dev, state, info); | ||
| 381 | error = cb(dev); | ||
| 382 | suspend_report_result(cb, error); | ||
| 383 | |||
| 384 | initcall_debug_report(dev, calltime, error); | ||
| 385 | |||
| 386 | return error; | ||
| 387 | } | ||
| 388 | |||
| 389 | /*------------------------- Resume routines -------------------------*/ | 418 | /*------------------------- Resume routines -------------------------*/ |
| 390 | 419 | ||
| 391 | /** | 420 | /** |
| @@ -398,50 +427,37 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, | |||
| 398 | */ | 427 | */ |
| 399 | static int device_resume_noirq(struct device *dev, pm_message_t state) | 428 | static int device_resume_noirq(struct device *dev, pm_message_t state) |
| 400 | { | 429 | { |
| 401 | pm_callback_t callback = NULL; | ||
| 402 | char *info = NULL; | ||
| 403 | int error = 0; | 430 | int error = 0; |
| 404 | 431 | ||
| 405 | TRACE_DEVICE(dev); | 432 | TRACE_DEVICE(dev); |
| 406 | TRACE_RESUME(0); | 433 | TRACE_RESUME(0); |
| 407 | 434 | ||
| 408 | if (dev->power.syscore) | ||
| 409 | goto Out; | ||
| 410 | |||
| 411 | if (dev->pm_domain) { | 435 | if (dev->pm_domain) { |
| 412 | info = "noirq power domain "; | 436 | pm_dev_dbg(dev, state, "EARLY power domain "); |
| 413 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 437 | error = pm_noirq_op(dev, &dev->pm_domain->ops, state); |
| 414 | } else if (dev->type && dev->type->pm) { | 438 | } else if (dev->type && dev->type->pm) { |
| 415 | info = "noirq type "; | 439 | pm_dev_dbg(dev, state, "EARLY type "); |
| 416 | callback = pm_noirq_op(dev->type->pm, state); | 440 | error = pm_noirq_op(dev, dev->type->pm, state); |
| 417 | } else if (dev->class && dev->class->pm) { | 441 | } else if (dev->class && dev->class->pm) { |
| 418 | info = "noirq class "; | 442 | pm_dev_dbg(dev, state, "EARLY class "); |
| 419 | callback = pm_noirq_op(dev->class->pm, state); | 443 | error = pm_noirq_op(dev, dev->class->pm, state); |
| 420 | } else if (dev->bus && dev->bus->pm) { | 444 | } else if (dev->bus && dev->bus->pm) { |
| 421 | info = "noirq bus "; | 445 | pm_dev_dbg(dev, state, "EARLY "); |
| 422 | callback = pm_noirq_op(dev->bus->pm, state); | 446 | error = pm_noirq_op(dev, dev->bus->pm, state); |
| 423 | } | 447 | } |
| 424 | 448 | ||
| 425 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 426 | info = "noirq driver "; | ||
| 427 | callback = pm_noirq_op(dev->driver->pm, state); | ||
| 428 | } | ||
| 429 | |||
| 430 | error = dpm_run_callback(callback, dev, state, info); | ||
| 431 | |||
| 432 | Out: | ||
| 433 | TRACE_RESUME(error); | 449 | TRACE_RESUME(error); |
| 434 | return error; | 450 | return error; |
| 435 | } | 451 | } |
| 436 | 452 | ||
| 437 | /** | 453 | /** |
| 438 | * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. | 454 | * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. |
| 439 | * @state: PM transition of the system being carried out. | 455 | * @state: PM transition of the system being carried out. |
| 440 | * | 456 | * |
| 441 | * Call the "noirq" resume handlers for all devices in dpm_noirq_list and | 457 | * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and |
| 442 | * enable device drivers to receive interrupts. | 458 | * enable device drivers to receive interrupts. |
| 443 | */ | 459 | */ |
| 444 | static void dpm_resume_noirq(pm_message_t state) | 460 | void dpm_resume_noirq(pm_message_t state) |
| 445 | { | 461 | { |
| 446 | ktime_t starttime = ktime_get(); | 462 | ktime_t starttime = ktime_get(); |
| 447 | 463 | ||
| @@ -451,117 +467,43 @@ static void dpm_resume_noirq(pm_message_t state) | |||
| 451 | int error; | 467 | int error; |
| 452 | 468 | ||
| 453 | get_device(dev); | 469 | get_device(dev); |
| 454 | list_move_tail(&dev->power.entry, &dpm_late_early_list); | 470 | list_move_tail(&dev->power.entry, &dpm_suspended_list); |
| 455 | mutex_unlock(&dpm_list_mtx); | 471 | mutex_unlock(&dpm_list_mtx); |
| 456 | 472 | ||
| 457 | error = device_resume_noirq(dev, state); | 473 | error = device_resume_noirq(dev, state); |
| 458 | if (error) { | 474 | if (error) |
| 459 | suspend_stats.failed_resume_noirq++; | 475 | pm_dev_err(dev, state, " early", error); |
| 460 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); | ||
| 461 | dpm_save_failed_dev(dev_name(dev)); | ||
| 462 | pm_dev_err(dev, state, " noirq", error); | ||
| 463 | } | ||
| 464 | 476 | ||
| 465 | mutex_lock(&dpm_list_mtx); | 477 | mutex_lock(&dpm_list_mtx); |
| 466 | put_device(dev); | 478 | put_device(dev); |
| 467 | } | 479 | } |
| 468 | mutex_unlock(&dpm_list_mtx); | 480 | mutex_unlock(&dpm_list_mtx); |
| 469 | dpm_show_time(starttime, state, "noirq"); | 481 | dpm_show_time(starttime, state, "early"); |
| 470 | resume_device_irqs(); | 482 | resume_device_irqs(); |
| 471 | cpuidle_resume(); | ||
| 472 | } | 483 | } |
| 484 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | ||
| 473 | 485 | ||
| 474 | /** | 486 | /** |
| 475 | * device_resume_early - Execute an "early resume" callback for given device. | 487 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. |
| 476 | * @dev: Device to handle. | 488 | * @dev: Device to resume. |
| 477 | * @state: PM transition of the system being carried out. | 489 | * @cb: Resume callback to execute. |
| 478 | * | ||
| 479 | * Runtime PM is disabled for @dev while this function is being executed. | ||
| 480 | */ | 490 | */ |
| 481 | static int device_resume_early(struct device *dev, pm_message_t state) | 491 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) |
| 482 | { | 492 | { |
| 483 | pm_callback_t callback = NULL; | 493 | int error; |
| 484 | char *info = NULL; | 494 | ktime_t calltime; |
| 485 | int error = 0; | ||
| 486 | |||
| 487 | TRACE_DEVICE(dev); | ||
| 488 | TRACE_RESUME(0); | ||
| 489 | |||
| 490 | if (dev->power.syscore) | ||
| 491 | goto Out; | ||
| 492 | |||
| 493 | if (dev->pm_domain) { | ||
| 494 | info = "early power domain "; | ||
| 495 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
| 496 | } else if (dev->type && dev->type->pm) { | ||
| 497 | info = "early type "; | ||
| 498 | callback = pm_late_early_op(dev->type->pm, state); | ||
| 499 | } else if (dev->class && dev->class->pm) { | ||
| 500 | info = "early class "; | ||
| 501 | callback = pm_late_early_op(dev->class->pm, state); | ||
| 502 | } else if (dev->bus && dev->bus->pm) { | ||
| 503 | info = "early bus "; | ||
| 504 | callback = pm_late_early_op(dev->bus->pm, state); | ||
| 505 | } | ||
| 506 | 495 | ||
| 507 | if (!callback && dev->driver && dev->driver->pm) { | 496 | calltime = initcall_debug_start(dev); |
| 508 | info = "early driver "; | ||
| 509 | callback = pm_late_early_op(dev->driver->pm, state); | ||
| 510 | } | ||
| 511 | 497 | ||
| 512 | error = dpm_run_callback(callback, dev, state, info); | 498 | error = cb(dev); |
| 499 | suspend_report_result(cb, error); | ||
| 513 | 500 | ||
| 514 | Out: | 501 | initcall_debug_report(dev, calltime, error); |
| 515 | TRACE_RESUME(error); | ||
| 516 | 502 | ||
| 517 | pm_runtime_enable(dev); | ||
| 518 | return error; | 503 | return error; |
| 519 | } | 504 | } |
| 520 | 505 | ||
| 521 | /** | 506 | /** |
| 522 | * dpm_resume_early - Execute "early resume" callbacks for all devices. | ||
| 523 | * @state: PM transition of the system being carried out. | ||
| 524 | */ | ||
| 525 | static void dpm_resume_early(pm_message_t state) | ||
| 526 | { | ||
| 527 | ktime_t starttime = ktime_get(); | ||
| 528 | |||
| 529 | mutex_lock(&dpm_list_mtx); | ||
| 530 | while (!list_empty(&dpm_late_early_list)) { | ||
| 531 | struct device *dev = to_device(dpm_late_early_list.next); | ||
| 532 | int error; | ||
| 533 | |||
| 534 | get_device(dev); | ||
| 535 | list_move_tail(&dev->power.entry, &dpm_suspended_list); | ||
| 536 | mutex_unlock(&dpm_list_mtx); | ||
| 537 | |||
| 538 | error = device_resume_early(dev, state); | ||
| 539 | if (error) { | ||
| 540 | suspend_stats.failed_resume_early++; | ||
| 541 | dpm_save_failed_step(SUSPEND_RESUME_EARLY); | ||
| 542 | dpm_save_failed_dev(dev_name(dev)); | ||
| 543 | pm_dev_err(dev, state, " early", error); | ||
| 544 | } | ||
| 545 | |||
| 546 | mutex_lock(&dpm_list_mtx); | ||
| 547 | put_device(dev); | ||
| 548 | } | ||
| 549 | mutex_unlock(&dpm_list_mtx); | ||
| 550 | dpm_show_time(starttime, state, "early"); | ||
| 551 | } | ||
| 552 | |||
| 553 | /** | ||
| 554 | * dpm_resume_start - Execute "noirq" and "early" device callbacks. | ||
| 555 | * @state: PM transition of the system being carried out. | ||
| 556 | */ | ||
| 557 | void dpm_resume_start(pm_message_t state) | ||
| 558 | { | ||
| 559 | dpm_resume_noirq(state); | ||
| 560 | dpm_resume_early(state); | ||
| 561 | } | ||
| 562 | EXPORT_SYMBOL_GPL(dpm_resume_start); | ||
| 563 | |||
| 564 | /** | ||
| 565 | * device_resume - Execute "resume" callbacks for given device. | 507 | * device_resume - Execute "resume" callbacks for given device. |
| 566 | * @dev: Device to handle. | 508 | * @dev: Device to handle. |
| 567 | * @state: PM transition of the system being carried out. | 509 | * @state: PM transition of the system being carried out. |
| @@ -569,16 +511,12 @@ EXPORT_SYMBOL_GPL(dpm_resume_start); | |||
| 569 | */ | 511 | */ |
| 570 | static int device_resume(struct device *dev, pm_message_t state, bool async) | 512 | static int device_resume(struct device *dev, pm_message_t state, bool async) |
| 571 | { | 513 | { |
| 572 | pm_callback_t callback = NULL; | ||
| 573 | char *info = NULL; | ||
| 574 | int error = 0; | 514 | int error = 0; |
| 515 | bool put = false; | ||
| 575 | 516 | ||
| 576 | TRACE_DEVICE(dev); | 517 | TRACE_DEVICE(dev); |
| 577 | TRACE_RESUME(0); | 518 | TRACE_RESUME(0); |
| 578 | 519 | ||
| 579 | if (dev->power.syscore) | ||
| 580 | goto Complete; | ||
| 581 | |||
| 582 | dpm_wait(dev->parent, async); | 520 | dpm_wait(dev->parent, async); |
| 583 | device_lock(dev); | 521 | device_lock(dev); |
| 584 | 522 | ||
| @@ -591,59 +529,55 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
| 591 | if (!dev->power.is_suspended) | 529 | if (!dev->power.is_suspended) |
| 592 | goto Unlock; | 530 | goto Unlock; |
| 593 | 531 | ||
| 532 | pm_runtime_enable(dev); | ||
| 533 | put = true; | ||
| 534 | |||
| 594 | if (dev->pm_domain) { | 535 | if (dev->pm_domain) { |
| 595 | info = "power domain "; | 536 | pm_dev_dbg(dev, state, "power domain "); |
| 596 | callback = pm_op(&dev->pm_domain->ops, state); | 537 | error = pm_op(dev, &dev->pm_domain->ops, state); |
| 597 | goto Driver; | 538 | goto End; |
| 598 | } | 539 | } |
| 599 | 540 | ||
| 600 | if (dev->type && dev->type->pm) { | 541 | if (dev->type && dev->type->pm) { |
| 601 | info = "type "; | 542 | pm_dev_dbg(dev, state, "type "); |
| 602 | callback = pm_op(dev->type->pm, state); | 543 | error = pm_op(dev, dev->type->pm, state); |
| 603 | goto Driver; | 544 | goto End; |
| 604 | } | 545 | } |
| 605 | 546 | ||
| 606 | if (dev->class) { | 547 | if (dev->class) { |
| 607 | if (dev->class->pm) { | 548 | if (dev->class->pm) { |
| 608 | info = "class "; | 549 | pm_dev_dbg(dev, state, "class "); |
| 609 | callback = pm_op(dev->class->pm, state); | 550 | error = pm_op(dev, dev->class->pm, state); |
| 610 | goto Driver; | 551 | goto End; |
| 611 | } else if (dev->class->resume) { | 552 | } else if (dev->class->resume) { |
| 612 | info = "legacy class "; | 553 | pm_dev_dbg(dev, state, "legacy class "); |
| 613 | callback = dev->class->resume; | 554 | error = legacy_resume(dev, dev->class->resume); |
| 614 | goto End; | 555 | goto End; |
| 615 | } | 556 | } |
| 616 | } | 557 | } |
| 617 | 558 | ||
| 618 | if (dev->bus) { | 559 | if (dev->bus) { |
| 619 | if (dev->bus->pm) { | 560 | if (dev->bus->pm) { |
| 620 | info = "bus "; | 561 | pm_dev_dbg(dev, state, ""); |
| 621 | callback = pm_op(dev->bus->pm, state); | 562 | error = pm_op(dev, dev->bus->pm, state); |
| 622 | } else if (dev->bus->resume) { | 563 | } else if (dev->bus->resume) { |
| 623 | info = "legacy bus "; | 564 | pm_dev_dbg(dev, state, "legacy "); |
| 624 | callback = dev->bus->resume; | 565 | error = legacy_resume(dev, dev->bus->resume); |
| 625 | goto End; | ||
| 626 | } | 566 | } |
| 627 | } | 567 | } |
| 628 | 568 | ||
| 629 | Driver: | ||
| 630 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 631 | info = "driver "; | ||
| 632 | callback = pm_op(dev->driver->pm, state); | ||
| 633 | } | ||
| 634 | |||
| 635 | End: | 569 | End: |
| 636 | error = dpm_run_callback(callback, dev, state, info); | ||
| 637 | dev->power.is_suspended = false; | 570 | dev->power.is_suspended = false; |
| 638 | 571 | ||
| 639 | Unlock: | 572 | Unlock: |
| 640 | device_unlock(dev); | 573 | device_unlock(dev); |
| 641 | |||
| 642 | Complete: | ||
| 643 | complete_all(&dev->power.completion); | 574 | complete_all(&dev->power.completion); |
| 644 | 575 | ||
| 645 | TRACE_RESUME(error); | 576 | TRACE_RESUME(error); |
| 646 | 577 | ||
| 578 | if (put) | ||
| 579 | pm_runtime_put_sync(dev); | ||
| 580 | |||
| 647 | return error; | 581 | return error; |
| 648 | } | 582 | } |
| 649 | 583 | ||
| @@ -665,6 +599,30 @@ static bool is_async(struct device *dev) | |||
| 665 | } | 599 | } |
| 666 | 600 | ||
| 667 | /** | 601 | /** |
| 602 | * dpm_drv_timeout - Driver suspend / resume watchdog handler | ||
| 603 | * @data: struct device which timed out | ||
| 604 | * | ||
| 605 | * Called when a driver has timed out suspending or resuming. | ||
| 606 | * There's not much we can do here to recover so | ||
| 607 | * BUG() out for a crash-dump | ||
| 608 | * | ||
| 609 | */ | ||
| 610 | static void dpm_drv_timeout(unsigned long data) | ||
| 611 | { | ||
| 612 | struct dpm_drv_wd_data *wd_data = (void *)data; | ||
| 613 | struct device *dev = wd_data->dev; | ||
| 614 | struct task_struct *tsk = wd_data->tsk; | ||
| 615 | |||
| 616 | printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), | ||
| 617 | (dev->driver ? dev->driver->name : "no driver")); | ||
| 618 | |||
| 619 | printk(KERN_EMERG "dpm suspend stack:\n"); | ||
| 620 | show_stack(tsk, NULL); | ||
| 621 | |||
| 622 | BUG(); | ||
| 623 | } | ||
| 624 | |||
| 625 | /** | ||
| 668 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. | 626 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
| 669 | * @state: PM transition of the system being carried out. | 627 | * @state: PM transition of the system being carried out. |
| 670 | * | 628 | * |
| @@ -699,12 +657,8 @@ void dpm_resume(pm_message_t state) | |||
| 699 | mutex_unlock(&dpm_list_mtx); | 657 | mutex_unlock(&dpm_list_mtx); |
| 700 | 658 | ||
| 701 | error = device_resume(dev, state, false); | 659 | error = device_resume(dev, state, false); |
| 702 | if (error) { | 660 | if (error) |
| 703 | suspend_stats.failed_resume++; | ||
| 704 | dpm_save_failed_step(SUSPEND_RESUME); | ||
| 705 | dpm_save_failed_dev(dev_name(dev)); | ||
| 706 | pm_dev_err(dev, state, "", error); | 661 | pm_dev_err(dev, state, "", error); |
| 707 | } | ||
| 708 | 662 | ||
| 709 | mutex_lock(&dpm_list_mtx); | 663 | mutex_lock(&dpm_list_mtx); |
| 710 | } | 664 | } |
| @@ -724,41 +678,27 @@ void dpm_resume(pm_message_t state) | |||
| 724 | */ | 678 | */ |
| 725 | static void device_complete(struct device *dev, pm_message_t state) | 679 | static void device_complete(struct device *dev, pm_message_t state) |
| 726 | { | 680 | { |
| 727 | void (*callback)(struct device *) = NULL; | ||
| 728 | char *info = NULL; | ||
| 729 | |||
| 730 | if (dev->power.syscore) | ||
| 731 | return; | ||
| 732 | |||
| 733 | device_lock(dev); | 681 | device_lock(dev); |
| 734 | 682 | ||
| 735 | if (dev->pm_domain) { | 683 | if (dev->pm_domain) { |
| 736 | info = "completing power domain "; | 684 | pm_dev_dbg(dev, state, "completing power domain "); |
| 737 | callback = dev->pm_domain->ops.complete; | 685 | if (dev->pm_domain->ops.complete) |
| 686 | dev->pm_domain->ops.complete(dev); | ||
| 738 | } else if (dev->type && dev->type->pm) { | 687 | } else if (dev->type && dev->type->pm) { |
| 739 | info = "completing type "; | 688 | pm_dev_dbg(dev, state, "completing type "); |
| 740 | callback = dev->type->pm->complete; | 689 | if (dev->type->pm->complete) |
| 690 | dev->type->pm->complete(dev); | ||
| 741 | } else if (dev->class && dev->class->pm) { | 691 | } else if (dev->class && dev->class->pm) { |
| 742 | info = "completing class "; | 692 | pm_dev_dbg(dev, state, "completing class "); |
| 743 | callback = dev->class->pm->complete; | 693 | if (dev->class->pm->complete) |
| 694 | dev->class->pm->complete(dev); | ||
| 744 | } else if (dev->bus && dev->bus->pm) { | 695 | } else if (dev->bus && dev->bus->pm) { |
| 745 | info = "completing bus "; | 696 | pm_dev_dbg(dev, state, "completing "); |
| 746 | callback = dev->bus->pm->complete; | 697 | if (dev->bus->pm->complete) |
| 747 | } | 698 | dev->bus->pm->complete(dev); |
| 748 | |||
| 749 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 750 | info = "completing driver "; | ||
| 751 | callback = dev->driver->pm->complete; | ||
| 752 | } | ||
| 753 | |||
| 754 | if (callback) { | ||
| 755 | pm_dev_dbg(dev, state, info); | ||
| 756 | callback(dev); | ||
| 757 | } | 699 | } |
| 758 | 700 | ||
| 759 | device_unlock(dev); | 701 | device_unlock(dev); |
| 760 | |||
| 761 | pm_runtime_put_sync(dev); | ||
| 762 | } | 702 | } |
| 763 | 703 | ||
| 764 | /** | 704 | /** |
| @@ -841,186 +781,73 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
| 841 | */ | 781 | */ |
| 842 | static int device_suspend_noirq(struct device *dev, pm_message_t state) | 782 | static int device_suspend_noirq(struct device *dev, pm_message_t state) |
| 843 | { | 783 | { |
| 844 | pm_callback_t callback = NULL; | 784 | int error; |
| 845 | char *info = NULL; | ||
| 846 | |||
| 847 | if (dev->power.syscore) | ||
| 848 | return 0; | ||
| 849 | 785 | ||
| 850 | if (dev->pm_domain) { | 786 | if (dev->pm_domain) { |
| 851 | info = "noirq power domain "; | 787 | pm_dev_dbg(dev, state, "LATE power domain "); |
| 852 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 788 | error = pm_noirq_op(dev, &dev->pm_domain->ops, state); |
| 789 | if (error) | ||
| 790 | return error; | ||
| 853 | } else if (dev->type && dev->type->pm) { | 791 | } else if (dev->type && dev->type->pm) { |
| 854 | info = "noirq type "; | 792 | pm_dev_dbg(dev, state, "LATE type "); |
| 855 | callback = pm_noirq_op(dev->type->pm, state); | 793 | error = pm_noirq_op(dev, dev->type->pm, state); |
| 794 | if (error) | ||
| 795 | return error; | ||
| 856 | } else if (dev->class && dev->class->pm) { | 796 | } else if (dev->class && dev->class->pm) { |
| 857 | info = "noirq class "; | 797 | pm_dev_dbg(dev, state, "LATE class "); |
| 858 | callback = pm_noirq_op(dev->class->pm, state); | 798 | error = pm_noirq_op(dev, dev->class->pm, state); |
| 799 | if (error) | ||
| 800 | return error; | ||
| 859 | } else if (dev->bus && dev->bus->pm) { | 801 | } else if (dev->bus && dev->bus->pm) { |
| 860 | info = "noirq bus "; | 802 | pm_dev_dbg(dev, state, "LATE "); |
| 861 | callback = pm_noirq_op(dev->bus->pm, state); | 803 | error = pm_noirq_op(dev, dev->bus->pm, state); |
| 862 | } | 804 | if (error) |
| 863 | 805 | return error; | |
| 864 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 865 | info = "noirq driver "; | ||
| 866 | callback = pm_noirq_op(dev->driver->pm, state); | ||
| 867 | } | 806 | } |
| 868 | 807 | ||
| 869 | return dpm_run_callback(callback, dev, state, info); | 808 | return 0; |
| 870 | } | 809 | } |
| 871 | 810 | ||
| 872 | /** | 811 | /** |
| 873 | * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. | 812 | * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. |
| 874 | * @state: PM transition of the system being carried out. | 813 | * @state: PM transition of the system being carried out. |
| 875 | * | 814 | * |
| 876 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend | 815 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend |
| 877 | * handlers for all non-sysdev devices. | 816 | * handlers for all non-sysdev devices. |
| 878 | */ | 817 | */ |
| 879 | static int dpm_suspend_noirq(pm_message_t state) | 818 | int dpm_suspend_noirq(pm_message_t state) |
| 880 | { | 819 | { |
| 881 | ktime_t starttime = ktime_get(); | 820 | ktime_t starttime = ktime_get(); |
| 882 | int error = 0; | 821 | int error = 0; |
| 883 | 822 | ||
| 884 | cpuidle_pause(); | ||
| 885 | suspend_device_irqs(); | 823 | suspend_device_irqs(); |
| 886 | mutex_lock(&dpm_list_mtx); | 824 | mutex_lock(&dpm_list_mtx); |
| 887 | while (!list_empty(&dpm_late_early_list)) { | ||
| 888 | struct device *dev = to_device(dpm_late_early_list.prev); | ||
| 889 | |||
| 890 | get_device(dev); | ||
| 891 | mutex_unlock(&dpm_list_mtx); | ||
| 892 | |||
| 893 | error = device_suspend_noirq(dev, state); | ||
| 894 | |||
| 895 | mutex_lock(&dpm_list_mtx); | ||
| 896 | if (error) { | ||
| 897 | pm_dev_err(dev, state, " noirq", error); | ||
| 898 | suspend_stats.failed_suspend_noirq++; | ||
| 899 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); | ||
| 900 | dpm_save_failed_dev(dev_name(dev)); | ||
| 901 | put_device(dev); | ||
| 902 | break; | ||
| 903 | } | ||
| 904 | if (!list_empty(&dev->power.entry)) | ||
| 905 | list_move(&dev->power.entry, &dpm_noirq_list); | ||
| 906 | put_device(dev); | ||
| 907 | |||
| 908 | if (pm_wakeup_pending()) { | ||
| 909 | error = -EBUSY; | ||
| 910 | break; | ||
| 911 | } | ||
| 912 | } | ||
| 913 | mutex_unlock(&dpm_list_mtx); | ||
| 914 | if (error) | ||
| 915 | dpm_resume_noirq(resume_event(state)); | ||
| 916 | else | ||
| 917 | dpm_show_time(starttime, state, "noirq"); | ||
| 918 | return error; | ||
| 919 | } | ||
| 920 | |||
| 921 | /** | ||
| 922 | * device_suspend_late - Execute a "late suspend" callback for given device. | ||
| 923 | * @dev: Device to handle. | ||
| 924 | * @state: PM transition of the system being carried out. | ||
| 925 | * | ||
| 926 | * Runtime PM is disabled for @dev while this function is being executed. | ||
| 927 | */ | ||
| 928 | static int device_suspend_late(struct device *dev, pm_message_t state) | ||
| 929 | { | ||
| 930 | pm_callback_t callback = NULL; | ||
| 931 | char *info = NULL; | ||
| 932 | |||
| 933 | __pm_runtime_disable(dev, false); | ||
| 934 | |||
| 935 | if (dev->power.syscore) | ||
| 936 | return 0; | ||
| 937 | |||
| 938 | if (dev->pm_domain) { | ||
| 939 | info = "late power domain "; | ||
| 940 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
| 941 | } else if (dev->type && dev->type->pm) { | ||
| 942 | info = "late type "; | ||
| 943 | callback = pm_late_early_op(dev->type->pm, state); | ||
| 944 | } else if (dev->class && dev->class->pm) { | ||
| 945 | info = "late class "; | ||
| 946 | callback = pm_late_early_op(dev->class->pm, state); | ||
| 947 | } else if (dev->bus && dev->bus->pm) { | ||
| 948 | info = "late bus "; | ||
| 949 | callback = pm_late_early_op(dev->bus->pm, state); | ||
| 950 | } | ||
| 951 | |||
| 952 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 953 | info = "late driver "; | ||
| 954 | callback = pm_late_early_op(dev->driver->pm, state); | ||
| 955 | } | ||
| 956 | |||
| 957 | return dpm_run_callback(callback, dev, state, info); | ||
| 958 | } | ||
| 959 | |||
| 960 | /** | ||
| 961 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. | ||
| 962 | * @state: PM transition of the system being carried out. | ||
| 963 | */ | ||
| 964 | static int dpm_suspend_late(pm_message_t state) | ||
| 965 | { | ||
| 966 | ktime_t starttime = ktime_get(); | ||
| 967 | int error = 0; | ||
| 968 | |||
| 969 | mutex_lock(&dpm_list_mtx); | ||
| 970 | while (!list_empty(&dpm_suspended_list)) { | 825 | while (!list_empty(&dpm_suspended_list)) { |
| 971 | struct device *dev = to_device(dpm_suspended_list.prev); | 826 | struct device *dev = to_device(dpm_suspended_list.prev); |
| 972 | 827 | ||
| 973 | get_device(dev); | 828 | get_device(dev); |
| 974 | mutex_unlock(&dpm_list_mtx); | 829 | mutex_unlock(&dpm_list_mtx); |
| 975 | 830 | ||
| 976 | error = device_suspend_late(dev, state); | 831 | error = device_suspend_noirq(dev, state); |
| 977 | 832 | ||
| 978 | mutex_lock(&dpm_list_mtx); | 833 | mutex_lock(&dpm_list_mtx); |
| 979 | if (error) { | 834 | if (error) { |
| 980 | pm_dev_err(dev, state, " late", error); | 835 | pm_dev_err(dev, state, " late", error); |
| 981 | suspend_stats.failed_suspend_late++; | ||
| 982 | dpm_save_failed_step(SUSPEND_SUSPEND_LATE); | ||
| 983 | dpm_save_failed_dev(dev_name(dev)); | ||
| 984 | put_device(dev); | 836 | put_device(dev); |
| 985 | break; | 837 | break; |
| 986 | } | 838 | } |
| 987 | if (!list_empty(&dev->power.entry)) | 839 | if (!list_empty(&dev->power.entry)) |
| 988 | list_move(&dev->power.entry, &dpm_late_early_list); | 840 | list_move(&dev->power.entry, &dpm_noirq_list); |
| 989 | put_device(dev); | 841 | put_device(dev); |
| 990 | |||
| 991 | if (pm_wakeup_pending()) { | ||
| 992 | error = -EBUSY; | ||
| 993 | break; | ||
| 994 | } | ||
| 995 | } | 842 | } |
| 996 | mutex_unlock(&dpm_list_mtx); | 843 | mutex_unlock(&dpm_list_mtx); |
| 997 | if (error) | 844 | if (error) |
| 998 | dpm_resume_early(resume_event(state)); | 845 | dpm_resume_noirq(resume_event(state)); |
| 999 | else | 846 | else |
| 1000 | dpm_show_time(starttime, state, "late"); | 847 | dpm_show_time(starttime, state, "late"); |
| 1001 | |||
| 1002 | return error; | 848 | return error; |
| 1003 | } | 849 | } |
| 1004 | 850 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | |
| 1005 | /** | ||
| 1006 | * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. | ||
| 1007 | * @state: PM transition of the system being carried out. | ||
| 1008 | */ | ||
| 1009 | int dpm_suspend_end(pm_message_t state) | ||
| 1010 | { | ||
| 1011 | int error = dpm_suspend_late(state); | ||
| 1012 | if (error) | ||
| 1013 | return error; | ||
| 1014 | |||
| 1015 | error = dpm_suspend_noirq(state); | ||
| 1016 | if (error) { | ||
| 1017 | dpm_resume_early(resume_event(state)); | ||
| 1018 | return error; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | return 0; | ||
| 1022 | } | ||
| 1023 | EXPORT_SYMBOL_GPL(dpm_suspend_end); | ||
| 1024 | 851 | ||
| 1025 | /** | 852 | /** |
| 1026 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | 853 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
| @@ -1052,51 +879,52 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
| 1052 | */ | 879 | */ |
| 1053 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) | 880 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) |
| 1054 | { | 881 | { |
| 1055 | pm_callback_t callback = NULL; | ||
| 1056 | char *info = NULL; | ||
| 1057 | int error = 0; | 882 | int error = 0; |
| 883 | struct timer_list timer; | ||
| 884 | struct dpm_drv_wd_data data; | ||
| 1058 | 885 | ||
| 1059 | dpm_wait_for_children(dev, async); | 886 | dpm_wait_for_children(dev, async); |
| 1060 | 887 | ||
| 888 | data.dev = dev; | ||
| 889 | data.tsk = get_current(); | ||
| 890 | init_timer_on_stack(&timer); | ||
| 891 | timer.expires = jiffies + HZ * 12; | ||
| 892 | timer.function = dpm_drv_timeout; | ||
| 893 | timer.data = (unsigned long)&data; | ||
| 894 | add_timer(&timer); | ||
| 895 | |||
| 1061 | if (async_error) | 896 | if (async_error) |
| 1062 | goto Complete; | 897 | return 0; |
| 1063 | 898 | ||
| 1064 | /* | 899 | pm_runtime_get_noresume(dev); |
| 1065 | * If a device configured to wake up the system from sleep states | ||
| 1066 | * has been suspended at run time and there's a resume request pending | ||
| 1067 | * for it, this is equivalent to the device signaling wakeup, so the | ||
| 1068 | * system suspend operation should be aborted. | ||
| 1069 | */ | ||
| 1070 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) | 900 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
| 1071 | pm_wakeup_event(dev, 0); | 901 | pm_wakeup_event(dev, 0); |
| 1072 | 902 | ||
| 1073 | if (pm_wakeup_pending()) { | 903 | if (pm_wakeup_pending()) { |
| 904 | pm_runtime_put_sync(dev); | ||
| 1074 | async_error = -EBUSY; | 905 | async_error = -EBUSY; |
| 1075 | goto Complete; | 906 | return 0; |
| 1076 | } | 907 | } |
| 1077 | 908 | ||
| 1078 | if (dev->power.syscore) | ||
| 1079 | goto Complete; | ||
| 1080 | |||
| 1081 | device_lock(dev); | 909 | device_lock(dev); |
| 1082 | 910 | ||
| 1083 | if (dev->pm_domain) { | 911 | if (dev->pm_domain) { |
| 1084 | info = "power domain "; | 912 | pm_dev_dbg(dev, state, "power domain "); |
| 1085 | callback = pm_op(&dev->pm_domain->ops, state); | 913 | error = pm_op(dev, &dev->pm_domain->ops, state); |
| 1086 | goto Run; | 914 | goto End; |
| 1087 | } | 915 | } |
| 1088 | 916 | ||
| 1089 | if (dev->type && dev->type->pm) { | 917 | if (dev->type && dev->type->pm) { |
| 1090 | info = "type "; | 918 | pm_dev_dbg(dev, state, "type "); |
| 1091 | callback = pm_op(dev->type->pm, state); | 919 | error = pm_op(dev, dev->type->pm, state); |
| 1092 | goto Run; | 920 | goto End; |
| 1093 | } | 921 | } |
| 1094 | 922 | ||
| 1095 | if (dev->class) { | 923 | if (dev->class) { |
| 1096 | if (dev->class->pm) { | 924 | if (dev->class->pm) { |
| 1097 | info = "class "; | 925 | pm_dev_dbg(dev, state, "class "); |
| 1098 | callback = pm_op(dev->class->pm, state); | 926 | error = pm_op(dev, dev->class->pm, state); |
| 1099 | goto Run; | 927 | goto End; |
| 1100 | } else if (dev->class->suspend) { | 928 | } else if (dev->class->suspend) { |
| 1101 | pm_dev_dbg(dev, state, "legacy class "); | 929 | pm_dev_dbg(dev, state, "legacy class "); |
| 1102 | error = legacy_suspend(dev, state, dev->class->suspend); | 930 | error = legacy_suspend(dev, state, dev->class->suspend); |
| @@ -1106,37 +934,30 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 1106 | 934 | ||
| 1107 | if (dev->bus) { | 935 | if (dev->bus) { |
| 1108 | if (dev->bus->pm) { | 936 | if (dev->bus->pm) { |
| 1109 | info = "bus "; | 937 | pm_dev_dbg(dev, state, ""); |
| 1110 | callback = pm_op(dev->bus->pm, state); | 938 | error = pm_op(dev, dev->bus->pm, state); |
| 1111 | } else if (dev->bus->suspend) { | 939 | } else if (dev->bus->suspend) { |
| 1112 | pm_dev_dbg(dev, state, "legacy bus "); | 940 | pm_dev_dbg(dev, state, "legacy "); |
| 1113 | error = legacy_suspend(dev, state, dev->bus->suspend); | 941 | error = legacy_suspend(dev, state, dev->bus->suspend); |
| 1114 | goto End; | ||
| 1115 | } | 942 | } |
| 1116 | } | 943 | } |
| 1117 | 944 | ||
| 1118 | Run: | ||
| 1119 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 1120 | info = "driver "; | ||
| 1121 | callback = pm_op(dev->driver->pm, state); | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | error = dpm_run_callback(callback, dev, state, info); | ||
| 1125 | |||
| 1126 | End: | 945 | End: |
| 1127 | if (!error) { | 946 | dev->power.is_suspended = !error; |
| 1128 | dev->power.is_suspended = true; | ||
| 1129 | if (dev->power.wakeup_path | ||
| 1130 | && dev->parent && !dev->parent->power.ignore_children) | ||
| 1131 | dev->parent->power.wakeup_path = true; | ||
| 1132 | } | ||
| 1133 | 947 | ||
| 1134 | device_unlock(dev); | 948 | device_unlock(dev); |
| 1135 | 949 | ||
| 1136 | Complete: | 950 | del_timer_sync(&timer); |
| 951 | destroy_timer_on_stack(&timer); | ||
| 952 | |||
| 1137 | complete_all(&dev->power.completion); | 953 | complete_all(&dev->power.completion); |
| 1138 | if (error) | 954 | |
| 955 | if (error) { | ||
| 956 | pm_runtime_put_sync(dev); | ||
| 1139 | async_error = error; | 957 | async_error = error; |
| 958 | } else if (dev->power.is_suspended) { | ||
| 959 | __pm_runtime_disable(dev, false); | ||
| 960 | } | ||
| 1140 | 961 | ||
| 1141 | return error; | 962 | return error; |
| 1142 | } | 963 | } |
| @@ -1147,10 +968,8 @@ static void async_suspend(void *data, async_cookie_t cookie) | |||
| 1147 | int error; | 968 | int error; |
| 1148 | 969 | ||
| 1149 | error = __device_suspend(dev, pm_transition, true); | 970 | error = __device_suspend(dev, pm_transition, true); |
| 1150 | if (error) { | 971 | if (error) |
| 1151 | dpm_save_failed_dev(dev_name(dev)); | ||
| 1152 | pm_dev_err(dev, pm_transition, " async", error); | 972 | pm_dev_err(dev, pm_transition, " async", error); |
| 1153 | } | ||
| 1154 | 973 | ||
| 1155 | put_device(dev); | 974 | put_device(dev); |
| 1156 | } | 975 | } |
| @@ -1193,7 +1012,6 @@ int dpm_suspend(pm_message_t state) | |||
| 1193 | mutex_lock(&dpm_list_mtx); | 1012 | mutex_lock(&dpm_list_mtx); |
| 1194 | if (error) { | 1013 | if (error) { |
| 1195 | pm_dev_err(dev, state, "", error); | 1014 | pm_dev_err(dev, state, "", error); |
| 1196 | dpm_save_failed_dev(dev_name(dev)); | ||
| 1197 | put_device(dev); | 1015 | put_device(dev); |
| 1198 | break; | 1016 | break; |
| 1199 | } | 1017 | } |
| @@ -1207,10 +1025,7 @@ int dpm_suspend(pm_message_t state) | |||
| 1207 | async_synchronize_full(); | 1025 | async_synchronize_full(); |
| 1208 | if (!error) | 1026 | if (!error) |
| 1209 | error = async_error; | 1027 | error = async_error; |
| 1210 | if (error) { | 1028 | if (!error) |
| 1211 | suspend_stats.failed_suspend++; | ||
| 1212 | dpm_save_failed_step(SUSPEND_SUSPEND); | ||
| 1213 | } else | ||
| 1214 | dpm_show_time(starttime, state, NULL); | 1029 | dpm_show_time(starttime, state, NULL); |
| 1215 | return error; | 1030 | return error; |
| 1216 | } | 1031 | } |
| @@ -1225,49 +1040,39 @@ int dpm_suspend(pm_message_t state) | |||
| 1225 | */ | 1040 | */ |
| 1226 | static int device_prepare(struct device *dev, pm_message_t state) | 1041 | static int device_prepare(struct device *dev, pm_message_t state) |
| 1227 | { | 1042 | { |
| 1228 | int (*callback)(struct device *) = NULL; | ||
| 1229 | char *info = NULL; | ||
| 1230 | int error = 0; | 1043 | int error = 0; |
| 1231 | 1044 | ||
| 1232 | if (dev->power.syscore) | ||
| 1233 | return 0; | ||
| 1234 | |||
| 1235 | /* | ||
| 1236 | * If a device's parent goes into runtime suspend at the wrong time, | ||
| 1237 | * it won't be possible to resume the device. To prevent this we | ||
| 1238 | * block runtime suspend here, during the prepare phase, and allow | ||
| 1239 | * it again during the complete phase. | ||
| 1240 | */ | ||
| 1241 | pm_runtime_get_noresume(dev); | ||
| 1242 | |||
| 1243 | device_lock(dev); | 1045 | device_lock(dev); |
| 1244 | 1046 | ||
| 1245 | dev->power.wakeup_path = device_may_wakeup(dev); | ||
| 1246 | |||
| 1247 | if (dev->pm_domain) { | 1047 | if (dev->pm_domain) { |
| 1248 | info = "preparing power domain "; | 1048 | pm_dev_dbg(dev, state, "preparing power domain "); |
| 1249 | callback = dev->pm_domain->ops.prepare; | 1049 | if (dev->pm_domain->ops.prepare) |
| 1050 | error = dev->pm_domain->ops.prepare(dev); | ||
| 1051 | suspend_report_result(dev->pm_domain->ops.prepare, error); | ||
| 1052 | if (error) | ||
| 1053 | goto End; | ||
| 1250 | } else if (dev->type && dev->type->pm) { | 1054 | } else if (dev->type && dev->type->pm) { |
| 1251 | info = "preparing type "; | 1055 | pm_dev_dbg(dev, state, "preparing type "); |
| 1252 | callback = dev->type->pm->prepare; | 1056 | if (dev->type->pm->prepare) |
| 1057 | error = dev->type->pm->prepare(dev); | ||
| 1058 | suspend_report_result(dev->type->pm->prepare, error); | ||
| 1059 | if (error) | ||
| 1060 | goto End; | ||
| 1253 | } else if (dev->class && dev->class->pm) { | 1061 | } else if (dev->class && dev->class->pm) { |
| 1254 | info = "preparing class "; | 1062 | pm_dev_dbg(dev, state, "preparing class "); |
| 1255 | callback = dev->class->pm->prepare; | 1063 | if (dev->class->pm->prepare) |
| 1064 | error = dev->class->pm->prepare(dev); | ||
| 1065 | suspend_report_result(dev->class->pm->prepare, error); | ||
| 1066 | if (error) | ||
| 1067 | goto End; | ||
| 1256 | } else if (dev->bus && dev->bus->pm) { | 1068 | } else if (dev->bus && dev->bus->pm) { |
| 1257 | info = "preparing bus "; | 1069 | pm_dev_dbg(dev, state, "preparing "); |
| 1258 | callback = dev->bus->pm->prepare; | 1070 | if (dev->bus->pm->prepare) |
| 1259 | } | 1071 | error = dev->bus->pm->prepare(dev); |
| 1260 | 1072 | suspend_report_result(dev->bus->pm->prepare, error); | |
| 1261 | if (!callback && dev->driver && dev->driver->pm) { | ||
| 1262 | info = "preparing driver "; | ||
| 1263 | callback = dev->driver->pm->prepare; | ||
| 1264 | } | ||
| 1265 | |||
| 1266 | if (callback) { | ||
| 1267 | error = callback(dev); | ||
| 1268 | suspend_report_result(callback, error); | ||
| 1269 | } | 1073 | } |
| 1270 | 1074 | ||
| 1075 | End: | ||
| 1271 | device_unlock(dev); | 1076 | device_unlock(dev); |
| 1272 | 1077 | ||
| 1273 | return error; | 1078 | return error; |
| @@ -1328,10 +1133,7 @@ int dpm_suspend_start(pm_message_t state) | |||
| 1328 | int error; | 1133 | int error; |
| 1329 | 1134 | ||
| 1330 | error = dpm_prepare(state); | 1135 | error = dpm_prepare(state); |
| 1331 | if (error) { | 1136 | if (!error) |
| 1332 | suspend_stats.failed_prepare++; | ||
| 1333 | dpm_save_failed_step(SUSPEND_PREPARE); | ||
| 1334 | } else | ||
| 1335 | error = dpm_suspend(state); | 1137 | error = dpm_suspend(state); |
| 1336 | return error; | 1138 | return error; |
| 1337 | } | 1139 | } |
| @@ -1355,25 +1157,3 @@ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | |||
| 1355 | return async_error; | 1157 | return async_error; |
| 1356 | } | 1158 | } |
| 1357 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | 1159 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); |
| 1358 | |||
| 1359 | /** | ||
| 1360 | * dpm_for_each_dev - device iterator. | ||
| 1361 | * @data: data for the callback. | ||
| 1362 | * @fn: function to be called for each device. | ||
| 1363 | * | ||
| 1364 | * Iterate over devices in dpm_list, and call @fn for each device, | ||
| 1365 | * passing it @data. | ||
| 1366 | */ | ||
| 1367 | void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) | ||
| 1368 | { | ||
| 1369 | struct device *dev; | ||
| 1370 | |||
| 1371 | if (!fn) | ||
| 1372 | return; | ||
| 1373 | |||
| 1374 | device_pm_lock(); | ||
| 1375 | list_for_each_entry(dev, &dpm_list, power.entry) | ||
| 1376 | fn(dev, data); | ||
| 1377 | device_pm_unlock(); | ||
| 1378 | } | ||
| 1379 | EXPORT_SYMBOL_GPL(dpm_for_each_dev); | ||
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 50b2831e027..b23de185cb0 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
| @@ -17,13 +17,10 @@ | |||
| 17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 19 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
| 20 | #include <linux/device.h> | ||
| 21 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 22 | #include <linux/rculist.h> | 21 | #include <linux/rculist.h> |
| 23 | #include <linux/rcupdate.h> | 22 | #include <linux/rcupdate.h> |
| 24 | #include <linux/opp.h> | 23 | #include <linux/opp.h> |
| 25 | #include <linux/of.h> | ||
| 26 | #include <linux/export.h> | ||
| 27 | 24 | ||
| 28 | /* | 25 | /* |
| 29 | * Internal data structure organization with the OPP layer library is as | 26 | * Internal data structure organization with the OPP layer library is as |
| @@ -66,7 +63,6 @@ struct opp { | |||
| 66 | unsigned long u_volt; | 63 | unsigned long u_volt; |
| 67 | 64 | ||
| 68 | struct device_opp *dev_opp; | 65 | struct device_opp *dev_opp; |
| 69 | struct rcu_head head; | ||
| 70 | }; | 66 | }; |
| 71 | 67 | ||
| 72 | /** | 68 | /** |
| @@ -77,7 +73,6 @@ struct opp { | |||
| 77 | * RCU usage: nodes are not modified in the list of device_opp, | 73 | * RCU usage: nodes are not modified in the list of device_opp, |
| 78 | * however addition is possible and is secured by dev_opp_list_lock | 74 | * however addition is possible and is secured by dev_opp_list_lock |
| 79 | * @dev: device pointer | 75 | * @dev: device pointer |
| 80 | * @head: notifier head to notify the OPP availability changes. | ||
| 81 | * @opp_list: list of opps | 76 | * @opp_list: list of opps |
| 82 | * | 77 | * |
| 83 | * This is an internal data structure maintaining the link to opps attached to | 78 | * This is an internal data structure maintaining the link to opps attached to |
| @@ -88,7 +83,6 @@ struct device_opp { | |||
| 88 | struct list_head node; | 83 | struct list_head node; |
| 89 | 84 | ||
| 90 | struct device *dev; | 85 | struct device *dev; |
| 91 | struct srcu_notifier_head head; | ||
| 92 | struct list_head opp_list; | 86 | struct list_head opp_list; |
| 93 | }; | 87 | }; |
| 94 | 88 | ||
| @@ -162,7 +156,6 @@ unsigned long opp_get_voltage(struct opp *opp) | |||
| 162 | 156 | ||
| 163 | return v; | 157 | return v; |
| 164 | } | 158 | } |
| 165 | EXPORT_SYMBOL(opp_get_voltage); | ||
| 166 | 159 | ||
| 167 | /** | 160 | /** |
| 168 | * opp_get_freq() - Gets the frequency corresponding to an available opp | 161 | * opp_get_freq() - Gets the frequency corresponding to an available opp |
| @@ -192,7 +185,6 @@ unsigned long opp_get_freq(struct opp *opp) | |||
| 192 | 185 | ||
| 193 | return f; | 186 | return f; |
| 194 | } | 187 | } |
| 195 | EXPORT_SYMBOL(opp_get_freq); | ||
| 196 | 188 | ||
| 197 | /** | 189 | /** |
| 198 | * opp_get_opp_count() - Get number of opps available in the opp list | 190 | * opp_get_opp_count() - Get number of opps available in the opp list |
| @@ -225,7 +217,6 @@ int opp_get_opp_count(struct device *dev) | |||
| 225 | 217 | ||
| 226 | return count; | 218 | return count; |
| 227 | } | 219 | } |
| 228 | EXPORT_SYMBOL(opp_get_opp_count); | ||
| 229 | 220 | ||
| 230 | /** | 221 | /** |
| 231 | * opp_find_freq_exact() - search for an exact frequency | 222 | * opp_find_freq_exact() - search for an exact frequency |
| @@ -235,10 +226,7 @@ EXPORT_SYMBOL(opp_get_opp_count); | |||
| 235 | * | 226 | * |
| 236 | * Searches for exact match in the opp list and returns pointer to the matching | 227 | * Searches for exact match in the opp list and returns pointer to the matching |
| 237 | * opp if found, else returns ERR_PTR in case of error and should be handled | 228 | * opp if found, else returns ERR_PTR in case of error and should be handled |
| 238 | * using IS_ERR. Error return values can be: | 229 | * using IS_ERR. |
| 239 | * EINVAL: for bad pointer | ||
| 240 | * ERANGE: no match found for search | ||
| 241 | * ENODEV: if device not found in list of registered devices | ||
| 242 | * | 230 | * |
| 243 | * Note: available is a modifier for the search. if available=true, then the | 231 | * Note: available is a modifier for the search. if available=true, then the |
| 244 | * match is for exact matching frequency and is available in the stored OPP | 232 | * match is for exact matching frequency and is available in the stored OPP |
| @@ -257,7 +245,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
| 257 | bool available) | 245 | bool available) |
| 258 | { | 246 | { |
| 259 | struct device_opp *dev_opp; | 247 | struct device_opp *dev_opp; |
| 260 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 248 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); |
| 261 | 249 | ||
| 262 | dev_opp = find_device_opp(dev); | 250 | dev_opp = find_device_opp(dev); |
| 263 | if (IS_ERR(dev_opp)) { | 251 | if (IS_ERR(dev_opp)) { |
| @@ -276,7 +264,6 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
| 276 | 264 | ||
| 277 | return opp; | 265 | return opp; |
| 278 | } | 266 | } |
| 279 | EXPORT_SYMBOL(opp_find_freq_exact); | ||
| 280 | 267 | ||
| 281 | /** | 268 | /** |
| 282 | * opp_find_freq_ceil() - Search for an rounded ceil freq | 269 | * opp_find_freq_ceil() - Search for an rounded ceil freq |
| @@ -287,11 +274,7 @@ EXPORT_SYMBOL(opp_find_freq_exact); | |||
| 287 | * for a device. | 274 | * for a device. |
| 288 | * | 275 | * |
| 289 | * Returns matching *opp and refreshes *freq accordingly, else returns | 276 | * Returns matching *opp and refreshes *freq accordingly, else returns |
| 290 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | 277 | * ERR_PTR in case of error and should be handled using IS_ERR. |
| 291 | * values can be: | ||
| 292 | * EINVAL: for bad pointer | ||
| 293 | * ERANGE: no match found for search | ||
| 294 | * ENODEV: if device not found in list of registered devices | ||
| 295 | * | 278 | * |
| 296 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 279 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
| 297 | * protected pointer. The reason for the same is that the opp pointer which is | 280 | * protected pointer. The reason for the same is that the opp pointer which is |
| @@ -302,7 +285,7 @@ EXPORT_SYMBOL(opp_find_freq_exact); | |||
| 302 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | 285 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) |
| 303 | { | 286 | { |
| 304 | struct device_opp *dev_opp; | 287 | struct device_opp *dev_opp; |
| 305 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 288 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); |
| 306 | 289 | ||
| 307 | if (!dev || !freq) { | 290 | if (!dev || !freq) { |
| 308 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 291 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
| @@ -311,7 +294,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
| 311 | 294 | ||
| 312 | dev_opp = find_device_opp(dev); | 295 | dev_opp = find_device_opp(dev); |
| 313 | if (IS_ERR(dev_opp)) | 296 | if (IS_ERR(dev_opp)) |
| 314 | return ERR_CAST(dev_opp); | 297 | return opp; |
| 315 | 298 | ||
| 316 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 299 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
| 317 | if (temp_opp->available && temp_opp->rate >= *freq) { | 300 | if (temp_opp->available && temp_opp->rate >= *freq) { |
| @@ -323,7 +306,6 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
| 323 | 306 | ||
| 324 | return opp; | 307 | return opp; |
| 325 | } | 308 | } |
| 326 | EXPORT_SYMBOL(opp_find_freq_ceil); | ||
| 327 | 309 | ||
| 328 | /** | 310 | /** |
| 329 | * opp_find_freq_floor() - Search for a rounded floor freq | 311 | * opp_find_freq_floor() - Search for a rounded floor freq |
| @@ -334,11 +316,7 @@ EXPORT_SYMBOL(opp_find_freq_ceil); | |||
| 334 | * for a device. | 316 | * for a device. |
| 335 | * | 317 | * |
| 336 | * Returns matching *opp and refreshes *freq accordingly, else returns | 318 | * Returns matching *opp and refreshes *freq accordingly, else returns |
| 337 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | 319 | * ERR_PTR in case of error and should be handled using IS_ERR. |
| 338 | * values can be: | ||
| 339 | * EINVAL: for bad pointer | ||
| 340 | * ERANGE: no match found for search | ||
| 341 | * ENODEV: if device not found in list of registered devices | ||
| 342 | * | 320 | * |
| 343 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 321 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
| 344 | * protected pointer. The reason for the same is that the opp pointer which is | 322 | * protected pointer. The reason for the same is that the opp pointer which is |
| @@ -349,7 +327,7 @@ EXPORT_SYMBOL(opp_find_freq_ceil); | |||
| 349 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | 327 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) |
| 350 | { | 328 | { |
| 351 | struct device_opp *dev_opp; | 329 | struct device_opp *dev_opp; |
| 352 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 330 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); |
| 353 | 331 | ||
| 354 | if (!dev || !freq) { | 332 | if (!dev || !freq) { |
| 355 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 333 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
| @@ -358,7 +336,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
| 358 | 336 | ||
| 359 | dev_opp = find_device_opp(dev); | 337 | dev_opp = find_device_opp(dev); |
| 360 | if (IS_ERR(dev_opp)) | 338 | if (IS_ERR(dev_opp)) |
| 361 | return ERR_CAST(dev_opp); | 339 | return opp; |
| 362 | 340 | ||
| 363 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 341 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
| 364 | if (temp_opp->available) { | 342 | if (temp_opp->available) { |
| @@ -374,7 +352,6 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
| 374 | 352 | ||
| 375 | return opp; | 353 | return opp; |
| 376 | } | 354 | } |
| 377 | EXPORT_SYMBOL(opp_find_freq_floor); | ||
| 378 | 355 | ||
| 379 | /** | 356 | /** |
| 380 | * opp_add() - Add an OPP table from a table definitions | 357 | * opp_add() - Add an OPP table from a table definitions |
| @@ -427,7 +404,6 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
| 427 | } | 404 | } |
| 428 | 405 | ||
| 429 | dev_opp->dev = dev; | 406 | dev_opp->dev = dev; |
| 430 | srcu_init_notifier_head(&dev_opp->head); | ||
| 431 | INIT_LIST_HEAD(&dev_opp->opp_list); | 407 | INIT_LIST_HEAD(&dev_opp->opp_list); |
| 432 | 408 | ||
| 433 | /* Secure the device list modification */ | 409 | /* Secure the device list modification */ |
| @@ -452,11 +428,6 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
| 452 | list_add_rcu(&new_opp->node, head); | 428 | list_add_rcu(&new_opp->node, head); |
| 453 | mutex_unlock(&dev_opp_list_lock); | 429 | mutex_unlock(&dev_opp_list_lock); |
| 454 | 430 | ||
| 455 | /* | ||
| 456 | * Notify the changes in the availability of the operable | ||
| 457 | * frequency/voltage list. | ||
| 458 | */ | ||
| 459 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); | ||
| 460 | return 0; | 431 | return 0; |
| 461 | } | 432 | } |
| 462 | 433 | ||
| @@ -531,20 +502,15 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
| 531 | 502 | ||
| 532 | list_replace_rcu(&opp->node, &new_opp->node); | 503 | list_replace_rcu(&opp->node, &new_opp->node); |
| 533 | mutex_unlock(&dev_opp_list_lock); | 504 | mutex_unlock(&dev_opp_list_lock); |
| 534 | kfree_rcu(opp, head); | 505 | synchronize_rcu(); |
| 535 | 506 | ||
| 536 | /* Notify the change of the OPP availability */ | 507 | /* clean up old opp */ |
| 537 | if (availability_req) | 508 | new_opp = opp; |
| 538 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, | 509 | goto out; |
| 539 | new_opp); | ||
| 540 | else | ||
| 541 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, | ||
| 542 | new_opp); | ||
| 543 | |||
| 544 | return 0; | ||
| 545 | 510 | ||
| 546 | unlock: | 511 | unlock: |
| 547 | mutex_unlock(&dev_opp_list_lock); | 512 | mutex_unlock(&dev_opp_list_lock); |
| 513 | out: | ||
| 548 | kfree(new_opp); | 514 | kfree(new_opp); |
| 549 | return r; | 515 | return r; |
| 550 | } | 516 | } |
| @@ -568,7 +534,6 @@ int opp_enable(struct device *dev, unsigned long freq) | |||
| 568 | { | 534 | { |
| 569 | return opp_set_availability(dev, freq, true); | 535 | return opp_set_availability(dev, freq, true); |
| 570 | } | 536 | } |
| 571 | EXPORT_SYMBOL(opp_enable); | ||
| 572 | 537 | ||
| 573 | /** | 538 | /** |
| 574 | * opp_disable() - Disable a specific OPP | 539 | * opp_disable() - Disable a specific OPP |
| @@ -590,7 +555,6 @@ int opp_disable(struct device *dev, unsigned long freq) | |||
| 590 | { | 555 | { |
| 591 | return opp_set_availability(dev, freq, false); | 556 | return opp_set_availability(dev, freq, false); |
| 592 | } | 557 | } |
| 593 | EXPORT_SYMBOL(opp_disable); | ||
| 594 | 558 | ||
| 595 | #ifdef CONFIG_CPU_FREQ | 559 | #ifdef CONFIG_CPU_FREQ |
| 596 | /** | 560 | /** |
| @@ -679,63 +643,3 @@ void opp_free_cpufreq_table(struct device *dev, | |||
| 679 | *table = NULL; | 643 | *table = NULL; |
| 680 | } | 644 | } |
| 681 | #endif /* CONFIG_CPU_FREQ */ | 645 | #endif /* CONFIG_CPU_FREQ */ |
| 682 | |||
| 683 | /** | ||
| 684 | * opp_get_notifier() - find notifier_head of the device with opp | ||
| 685 | * @dev: device pointer used to lookup device OPPs. | ||
| 686 | */ | ||
| 687 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | ||
| 688 | { | ||
| 689 | struct device_opp *dev_opp = find_device_opp(dev); | ||
| 690 | |||
| 691 | if (IS_ERR(dev_opp)) | ||
| 692 | return ERR_CAST(dev_opp); /* matching type */ | ||
| 693 | |||
| 694 | return &dev_opp->head; | ||
| 695 | } | ||
| 696 | |||
| 697 | #ifdef CONFIG_OF | ||
| 698 | /** | ||
| 699 | * of_init_opp_table() - Initialize opp table from device tree | ||
| 700 | * @dev: device pointer used to lookup device OPPs. | ||
| 701 | * | ||
| 702 | * Register the initial OPP table with the OPP library for given device. | ||
| 703 | */ | ||
| 704 | int of_init_opp_table(struct device *dev) | ||
| 705 | { | ||
| 706 | const struct property *prop; | ||
| 707 | const __be32 *val; | ||
| 708 | int nr; | ||
| 709 | |||
| 710 | prop = of_find_property(dev->of_node, "operating-points", NULL); | ||
| 711 | if (!prop) | ||
| 712 | return -ENODEV; | ||
| 713 | if (!prop->value) | ||
| 714 | return -ENODATA; | ||
| 715 | |||
| 716 | /* | ||
| 717 | * Each OPP is a set of tuples consisting of frequency and | ||
| 718 | * voltage like <freq-kHz vol-uV>. | ||
| 719 | */ | ||
| 720 | nr = prop->length / sizeof(u32); | ||
| 721 | if (nr % 2) { | ||
| 722 | dev_err(dev, "%s: Invalid OPP list\n", __func__); | ||
| 723 | return -EINVAL; | ||
| 724 | } | ||
| 725 | |||
| 726 | val = prop->value; | ||
| 727 | while (nr) { | ||
| 728 | unsigned long freq = be32_to_cpup(val++) * 1000; | ||
| 729 | unsigned long volt = be32_to_cpup(val++); | ||
| 730 | |||
| 731 | if (opp_add(dev, freq, volt)) { | ||
| 732 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | ||
| 733 | __func__, freq); | ||
| 734 | continue; | ||
| 735 | } | ||
| 736 | nr -= 2; | ||
| 737 | } | ||
| 738 | |||
| 739 | return 0; | ||
| 740 | } | ||
| 741 | #endif | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b16686a0a5a..f2a25f18fde 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
| @@ -1,32 +1,10 @@ | |||
| 1 | #include <linux/pm_qos.h> | ||
| 2 | |||
| 3 | static inline void device_pm_init_common(struct device *dev) | ||
| 4 | { | ||
| 5 | if (!dev->power.early_init) { | ||
| 6 | spin_lock_init(&dev->power.lock); | ||
| 7 | dev->power.power_state = PMSG_INVALID; | ||
| 8 | dev->power.early_init = true; | ||
| 9 | } | ||
| 10 | } | ||
| 11 | |||
| 12 | #ifdef CONFIG_PM_RUNTIME | 1 | #ifdef CONFIG_PM_RUNTIME |
| 13 | 2 | ||
| 14 | static inline void pm_runtime_early_init(struct device *dev) | ||
| 15 | { | ||
| 16 | dev->power.disable_depth = 1; | ||
| 17 | device_pm_init_common(dev); | ||
| 18 | } | ||
| 19 | |||
| 20 | extern void pm_runtime_init(struct device *dev); | 3 | extern void pm_runtime_init(struct device *dev); |
| 21 | extern void pm_runtime_remove(struct device *dev); | 4 | extern void pm_runtime_remove(struct device *dev); |
| 22 | 5 | ||
| 23 | #else /* !CONFIG_PM_RUNTIME */ | 6 | #else /* !CONFIG_PM_RUNTIME */ |
| 24 | 7 | ||
| 25 | static inline void pm_runtime_early_init(struct device *dev) | ||
| 26 | { | ||
| 27 | device_pm_init_common(dev); | ||
| 28 | } | ||
| 29 | |||
| 30 | static inline void pm_runtime_init(struct device *dev) {} | 8 | static inline void pm_runtime_init(struct device *dev) {} |
| 31 | static inline void pm_runtime_remove(struct device *dev) {} | 9 | static inline void pm_runtime_remove(struct device *dev) {} |
| 32 | 10 | ||
| @@ -45,7 +23,7 @@ static inline struct device *to_device(struct list_head *entry) | |||
| 45 | return container_of(entry, struct device, power.entry); | 23 | return container_of(entry, struct device, power.entry); |
| 46 | } | 24 | } |
| 47 | 25 | ||
| 48 | extern void device_pm_sleep_init(struct device *dev); | 26 | extern void device_pm_init(struct device *dev); |
| 49 | extern void device_pm_add(struct device *); | 27 | extern void device_pm_add(struct device *); |
| 50 | extern void device_pm_remove(struct device *); | 28 | extern void device_pm_remove(struct device *); |
| 51 | extern void device_pm_move_before(struct device *, struct device *); | 29 | extern void device_pm_move_before(struct device *, struct device *); |
| @@ -54,19 +32,18 @@ extern void device_pm_move_last(struct device *); | |||
| 54 | 32 | ||
| 55 | #else /* !CONFIG_PM_SLEEP */ | 33 | #else /* !CONFIG_PM_SLEEP */ |
| 56 | 34 | ||
| 57 | static inline void device_pm_sleep_init(struct device *dev) {} | 35 | static inline void device_pm_init(struct device *dev) |
| 58 | |||
| 59 | static inline void device_pm_add(struct device *dev) | ||
| 60 | { | 36 | { |
| 61 | dev_pm_qos_constraints_init(dev); | 37 | spin_lock_init(&dev->power.lock); |
| 38 | pm_runtime_init(dev); | ||
| 62 | } | 39 | } |
| 63 | 40 | ||
| 64 | static inline void device_pm_remove(struct device *dev) | 41 | static inline void device_pm_remove(struct device *dev) |
| 65 | { | 42 | { |
| 66 | dev_pm_qos_constraints_destroy(dev); | ||
| 67 | pm_runtime_remove(dev); | 43 | pm_runtime_remove(dev); |
| 68 | } | 44 | } |
| 69 | 45 | ||
| 46 | static inline void device_pm_add(struct device *dev) {} | ||
| 70 | static inline void device_pm_move_before(struct device *deva, | 47 | static inline void device_pm_move_before(struct device *deva, |
| 71 | struct device *devb) {} | 48 | struct device *devb) {} |
| 72 | static inline void device_pm_move_after(struct device *deva, | 49 | static inline void device_pm_move_after(struct device *deva, |
| @@ -75,13 +52,6 @@ static inline void device_pm_move_last(struct device *dev) {} | |||
| 75 | 52 | ||
| 76 | #endif /* !CONFIG_PM_SLEEP */ | 53 | #endif /* !CONFIG_PM_SLEEP */ |
| 77 | 54 | ||
| 78 | static inline void device_pm_init(struct device *dev) | ||
| 79 | { | ||
| 80 | device_pm_init_common(dev); | ||
| 81 | device_pm_sleep_init(dev); | ||
| 82 | pm_runtime_init(dev); | ||
| 83 | } | ||
| 84 | |||
| 85 | #ifdef CONFIG_PM | 55 | #ifdef CONFIG_PM |
| 86 | 56 | ||
| 87 | /* | 57 | /* |
| @@ -93,10 +63,6 @@ extern void dpm_sysfs_remove(struct device *dev); | |||
| 93 | extern void rpm_sysfs_remove(struct device *dev); | 63 | extern void rpm_sysfs_remove(struct device *dev); |
| 94 | extern int wakeup_sysfs_add(struct device *dev); | 64 | extern int wakeup_sysfs_add(struct device *dev); |
| 95 | extern void wakeup_sysfs_remove(struct device *dev); | 65 | extern void wakeup_sysfs_remove(struct device *dev); |
| 96 | extern int pm_qos_sysfs_add_latency(struct device *dev); | ||
| 97 | extern void pm_qos_sysfs_remove_latency(struct device *dev); | ||
| 98 | extern int pm_qos_sysfs_add_flags(struct device *dev); | ||
| 99 | extern void pm_qos_sysfs_remove_flags(struct device *dev); | ||
| 100 | 66 | ||
| 101 | #else /* CONFIG_PM */ | 67 | #else /* CONFIG_PM */ |
| 102 | 68 | ||
| @@ -105,7 +71,5 @@ static inline void dpm_sysfs_remove(struct device *dev) {} | |||
| 105 | static inline void rpm_sysfs_remove(struct device *dev) {} | 71 | static inline void rpm_sysfs_remove(struct device *dev) {} |
| 106 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } | 72 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } |
| 107 | static inline void wakeup_sysfs_remove(struct device *dev) {} | 73 | static inline void wakeup_sysfs_remove(struct device *dev) {} |
| 108 | static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } | ||
| 109 | static inline void pm_qos_sysfs_remove(struct device *dev) {} | ||
| 110 | 74 | ||
| 111 | #endif | 75 | #endif |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c deleted file mode 100644 index d21349544ce..00000000000 --- a/drivers/base/power/qos.c +++ /dev/null | |||
| @@ -1,704 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Devices PM QoS constraints management | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Texas Instruments, Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * | ||
| 11 | * This module exposes the interface to kernel space for specifying | ||
| 12 | * per-device PM QoS dependencies. It provides infrastructure for registration | ||
| 13 | * of: | ||
| 14 | * | ||
| 15 | * Dependents on a QoS value : register requests | ||
| 16 | * Watchers of QoS value : get notified when target QoS value changes | ||
| 17 | * | ||
| 18 | * This QoS design is best effort based. Dependents register their QoS needs. | ||
| 19 | * Watchers register to keep track of the current QoS needs of the system. | ||
| 20 | * Watchers can register different types of notification callbacks: | ||
| 21 | * . a per-device notification callback using the dev_pm_qos_*_notifier API. | ||
| 22 | * The notification chain data is stored in the per-device constraint | ||
| 23 | * data struct. | ||
| 24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier | ||
| 25 | * API. The notification chain data is stored in a static variable. | ||
| 26 | * | ||
| 27 | * Note about the per-device constraint data struct allocation: | ||
| 28 | * . The per-device constraints data struct ptr is tored into the device | ||
| 29 | * dev_pm_info. | ||
| 30 | * . To minimize the data usage by the per-device constraints, the data struct | ||
| 31 | * is only allocated at the first call to dev_pm_qos_add_request. | ||
| 32 | * . The data is later free'd when the device is removed from the system. | ||
| 33 | * . A global mutex protects the constraints users from the data being | ||
| 34 | * allocated and free'd. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #include <linux/pm_qos.h> | ||
| 38 | #include <linux/spinlock.h> | ||
| 39 | #include <linux/slab.h> | ||
| 40 | #include <linux/device.h> | ||
| 41 | #include <linux/mutex.h> | ||
| 42 | #include <linux/export.h> | ||
| 43 | #include <linux/pm_runtime.h> | ||
| 44 | |||
| 45 | #include "power.h" | ||
| 46 | |||
| 47 | static DEFINE_MUTEX(dev_pm_qos_mtx); | ||
| 48 | |||
| 49 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * __dev_pm_qos_flags - Check PM QoS flags for a given device. | ||
| 53 | * @dev: Device to check the PM QoS flags for. | ||
| 54 | * @mask: Flags to check against. | ||
| 55 | * | ||
| 56 | * This routine must be called with dev->power.lock held. | ||
| 57 | */ | ||
| 58 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) | ||
| 59 | { | ||
| 60 | struct dev_pm_qos *qos = dev->power.qos; | ||
| 61 | struct pm_qos_flags *pqf; | ||
| 62 | s32 val; | ||
| 63 | |||
| 64 | if (!qos) | ||
| 65 | return PM_QOS_FLAGS_UNDEFINED; | ||
| 66 | |||
| 67 | pqf = &qos->flags; | ||
| 68 | if (list_empty(&pqf->list)) | ||
| 69 | return PM_QOS_FLAGS_UNDEFINED; | ||
| 70 | |||
| 71 | val = pqf->effective_flags & mask; | ||
| 72 | if (val) | ||
| 73 | return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; | ||
| 74 | |||
| 75 | return PM_QOS_FLAGS_NONE; | ||
| 76 | } | ||
| 77 | |||
| 78 | /** | ||
| 79 | * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). | ||
| 80 | * @dev: Device to check the PM QoS flags for. | ||
| 81 | * @mask: Flags to check against. | ||
| 82 | */ | ||
| 83 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) | ||
| 84 | { | ||
| 85 | unsigned long irqflags; | ||
| 86 | enum pm_qos_flags_status ret; | ||
| 87 | |||
| 88 | spin_lock_irqsave(&dev->power.lock, irqflags); | ||
| 89 | ret = __dev_pm_qos_flags(dev, mask); | ||
| 90 | spin_unlock_irqrestore(&dev->power.lock, irqflags); | ||
| 91 | |||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | /** | ||
| 96 | * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. | ||
| 97 | * @dev: Device to get the PM QoS constraint value for. | ||
| 98 | * | ||
| 99 | * This routine must be called with dev->power.lock held. | ||
| 100 | */ | ||
| 101 | s32 __dev_pm_qos_read_value(struct device *dev) | ||
| 102 | { | ||
| 103 | return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | /** | ||
| 107 | * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). | ||
| 108 | * @dev: Device to get the PM QoS constraint value for. | ||
| 109 | */ | ||
| 110 | s32 dev_pm_qos_read_value(struct device *dev) | ||
| 111 | { | ||
| 112 | unsigned long flags; | ||
| 113 | s32 ret; | ||
| 114 | |||
| 115 | spin_lock_irqsave(&dev->power.lock, flags); | ||
| 116 | ret = __dev_pm_qos_read_value(dev); | ||
| 117 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 118 | |||
| 119 | return ret; | ||
| 120 | } | ||
| 121 | |||
| 122 | /** | ||
| 123 | * apply_constraint - Add/modify/remove device PM QoS request. | ||
| 124 | * @req: Constraint request to apply | ||
| 125 | * @action: Action to perform (add/update/remove). | ||
| 126 | * @value: Value to assign to the QoS request. | ||
| 127 | * | ||
| 128 | * Internal function to update the constraints list using the PM QoS core | ||
| 129 | * code and if needed call the per-device and the global notification | ||
| 130 | * callbacks | ||
| 131 | */ | ||
| 132 | static int apply_constraint(struct dev_pm_qos_request *req, | ||
| 133 | enum pm_qos_req_action action, s32 value) | ||
| 134 | { | ||
| 135 | struct dev_pm_qos *qos = req->dev->power.qos; | ||
| 136 | int ret; | ||
| 137 | |||
| 138 | switch(req->type) { | ||
| 139 | case DEV_PM_QOS_LATENCY: | ||
| 140 | ret = pm_qos_update_target(&qos->latency, &req->data.pnode, | ||
| 141 | action, value); | ||
| 142 | if (ret) { | ||
| 143 | value = pm_qos_read_value(&qos->latency); | ||
| 144 | blocking_notifier_call_chain(&dev_pm_notifiers, | ||
| 145 | (unsigned long)value, | ||
| 146 | req); | ||
| 147 | } | ||
| 148 | break; | ||
| 149 | case DEV_PM_QOS_FLAGS: | ||
| 150 | ret = pm_qos_update_flags(&qos->flags, &req->data.flr, | ||
| 151 | action, value); | ||
| 152 | break; | ||
| 153 | default: | ||
| 154 | ret = -EINVAL; | ||
| 155 | } | ||
| 156 | |||
| 157 | return ret; | ||
| 158 | } | ||
| 159 | |||
| 160 | /* | ||
| 161 | * dev_pm_qos_constraints_allocate | ||
| 162 | * @dev: device to allocate data for | ||
| 163 | * | ||
| 164 | * Called at the first call to add_request, for constraint data allocation | ||
| 165 | * Must be called with the dev_pm_qos_mtx mutex held | ||
| 166 | */ | ||
| 167 | static int dev_pm_qos_constraints_allocate(struct device *dev) | ||
| 168 | { | ||
| 169 | struct dev_pm_qos *qos; | ||
| 170 | struct pm_qos_constraints *c; | ||
| 171 | struct blocking_notifier_head *n; | ||
| 172 | |||
| 173 | qos = kzalloc(sizeof(*qos), GFP_KERNEL); | ||
| 174 | if (!qos) | ||
| 175 | return -ENOMEM; | ||
| 176 | |||
| 177 | n = kzalloc(sizeof(*n), GFP_KERNEL); | ||
| 178 | if (!n) { | ||
| 179 | kfree(qos); | ||
| 180 | return -ENOMEM; | ||
| 181 | } | ||
| 182 | BLOCKING_INIT_NOTIFIER_HEAD(n); | ||
| 183 | |||
| 184 | c = &qos->latency; | ||
| 185 | plist_head_init(&c->list); | ||
| 186 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
| 187 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
| 188 | c->type = PM_QOS_MIN; | ||
| 189 | c->notifiers = n; | ||
| 190 | |||
| 191 | INIT_LIST_HEAD(&qos->flags.list); | ||
| 192 | |||
| 193 | spin_lock_irq(&dev->power.lock); | ||
| 194 | dev->power.qos = qos; | ||
| 195 | spin_unlock_irq(&dev->power.lock); | ||
| 196 | |||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | /** | ||
| 201 | * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. | ||
| 202 | * @dev: target device | ||
| 203 | * | ||
| 204 | * Called from the device PM subsystem during device insertion under | ||
| 205 | * device_pm_lock(). | ||
| 206 | */ | ||
| 207 | void dev_pm_qos_constraints_init(struct device *dev) | ||
| 208 | { | ||
| 209 | mutex_lock(&dev_pm_qos_mtx); | ||
| 210 | dev->power.qos = NULL; | ||
| 211 | dev->power.power_state = PMSG_ON; | ||
| 212 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 213 | } | ||
| 214 | |||
| 215 | /** | ||
| 216 | * dev_pm_qos_constraints_destroy | ||
| 217 | * @dev: target device | ||
| 218 | * | ||
| 219 | * Called from the device PM subsystem on device removal under device_pm_lock(). | ||
| 220 | */ | ||
| 221 | void dev_pm_qos_constraints_destroy(struct device *dev) | ||
| 222 | { | ||
| 223 | struct dev_pm_qos *qos; | ||
| 224 | struct dev_pm_qos_request *req, *tmp; | ||
| 225 | struct pm_qos_constraints *c; | ||
| 226 | struct pm_qos_flags *f; | ||
| 227 | |||
| 228 | /* | ||
| 229 | * If the device's PM QoS resume latency limit or PM QoS flags have been | ||
| 230 | * exposed to user space, they have to be hidden at this point. | ||
| 231 | */ | ||
| 232 | dev_pm_qos_hide_latency_limit(dev); | ||
| 233 | dev_pm_qos_hide_flags(dev); | ||
| 234 | |||
| 235 | mutex_lock(&dev_pm_qos_mtx); | ||
| 236 | |||
| 237 | dev->power.power_state = PMSG_INVALID; | ||
| 238 | qos = dev->power.qos; | ||
| 239 | if (!qos) | ||
| 240 | goto out; | ||
| 241 | |||
| 242 | /* Flush the constraints lists for the device. */ | ||
| 243 | c = &qos->latency; | ||
| 244 | plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { | ||
| 245 | /* | ||
| 246 | * Update constraints list and call the notification | ||
| 247 | * callbacks if needed | ||
| 248 | */ | ||
| 249 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
| 250 | memset(req, 0, sizeof(*req)); | ||
| 251 | } | ||
| 252 | f = &qos->flags; | ||
| 253 | list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { | ||
| 254 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
| 255 | memset(req, 0, sizeof(*req)); | ||
| 256 | } | ||
| 257 | |||
| 258 | spin_lock_irq(&dev->power.lock); | ||
| 259 | dev->power.qos = NULL; | ||
| 260 | spin_unlock_irq(&dev->power.lock); | ||
| 261 | |||
| 262 | kfree(c->notifiers); | ||
| 263 | kfree(qos); | ||
| 264 | |||
| 265 | out: | ||
| 266 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 267 | } | ||
| 268 | |||
| 269 | /** | ||
| 270 | * dev_pm_qos_add_request - inserts new qos request into the list | ||
| 271 | * @dev: target device for the constraint | ||
| 272 | * @req: pointer to a preallocated handle | ||
| 273 | * @type: type of the request | ||
| 274 | * @value: defines the qos request | ||
| 275 | * | ||
| 276 | * This function inserts a new entry in the device constraints list of | ||
| 277 | * requested qos performance characteristics. It recomputes the aggregate | ||
| 278 | * QoS expectations of parameters and initializes the dev_pm_qos_request | ||
| 279 | * handle. Caller needs to save this handle for later use in updates and | ||
| 280 | * removal. | ||
| 281 | * | ||
| 282 | * Returns 1 if the aggregated constraint value has changed, | ||
| 283 | * 0 if the aggregated constraint value has not changed, | ||
| 284 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory | ||
| 285 | * to allocate for data structures, -ENODEV if the device has just been removed | ||
| 286 | * from the system. | ||
| 287 | * | ||
| 288 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
| 289 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
| 290 | */ | ||
| 291 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | ||
| 292 | enum dev_pm_qos_req_type type, s32 value) | ||
| 293 | { | ||
| 294 | int ret = 0; | ||
| 295 | |||
| 296 | if (!dev || !req) /*guard against callers passing in null */ | ||
| 297 | return -EINVAL; | ||
| 298 | |||
| 299 | if (WARN(dev_pm_qos_request_active(req), | ||
| 300 | "%s() called for already added request\n", __func__)) | ||
| 301 | return -EINVAL; | ||
| 302 | |||
| 303 | req->dev = dev; | ||
| 304 | |||
| 305 | mutex_lock(&dev_pm_qos_mtx); | ||
| 306 | |||
| 307 | if (!dev->power.qos) { | ||
| 308 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | ||
| 309 | /* The device has been removed from the system. */ | ||
| 310 | req->dev = NULL; | ||
| 311 | ret = -ENODEV; | ||
| 312 | goto out; | ||
| 313 | } else { | ||
| 314 | /* | ||
| 315 | * Allocate the constraints data on the first call to | ||
| 316 | * add_request, i.e. only if the data is not already | ||
| 317 | * allocated and if the device has not been removed. | ||
| 318 | */ | ||
| 319 | ret = dev_pm_qos_constraints_allocate(dev); | ||
| 320 | } | ||
| 321 | } | ||
| 322 | |||
| 323 | if (!ret) { | ||
| 324 | req->type = type; | ||
| 325 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | ||
| 326 | } | ||
| 327 | |||
| 328 | out: | ||
| 329 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 330 | |||
| 331 | return ret; | ||
| 332 | } | ||
| 333 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | ||
| 334 | |||
| 335 | /** | ||
| 336 | * __dev_pm_qos_update_request - Modify an existing device PM QoS request. | ||
| 337 | * @req : PM QoS request to modify. | ||
| 338 | * @new_value: New value to request. | ||
| 339 | */ | ||
| 340 | static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
| 341 | s32 new_value) | ||
| 342 | { | ||
| 343 | s32 curr_value; | ||
| 344 | int ret = 0; | ||
| 345 | |||
| 346 | if (!req->dev->power.qos) | ||
| 347 | return -ENODEV; | ||
| 348 | |||
| 349 | switch(req->type) { | ||
| 350 | case DEV_PM_QOS_LATENCY: | ||
| 351 | curr_value = req->data.pnode.prio; | ||
| 352 | break; | ||
| 353 | case DEV_PM_QOS_FLAGS: | ||
| 354 | curr_value = req->data.flr.flags; | ||
| 355 | break; | ||
| 356 | default: | ||
| 357 | return -EINVAL; | ||
| 358 | } | ||
| 359 | |||
| 360 | if (curr_value != new_value) | ||
| 361 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); | ||
| 362 | |||
| 363 | return ret; | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * dev_pm_qos_update_request - modifies an existing qos request | ||
| 368 | * @req : handle to list element holding a dev_pm_qos request to use | ||
| 369 | * @new_value: defines the qos request | ||
| 370 | * | ||
| 371 | * Updates an existing dev PM qos request along with updating the | ||
| 372 | * target value. | ||
| 373 | * | ||
| 374 | * Attempts are made to make this code callable on hot code paths. | ||
| 375 | * | ||
| 376 | * Returns 1 if the aggregated constraint value has changed, | ||
| 377 | * 0 if the aggregated constraint value has not changed, | ||
| 378 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
| 379 | * removed from the system | ||
| 380 | * | ||
| 381 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
| 382 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
| 383 | */ | ||
| 384 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) | ||
| 385 | { | ||
| 386 | int ret; | ||
| 387 | |||
| 388 | if (!req) /*guard against callers passing in null */ | ||
| 389 | return -EINVAL; | ||
| 390 | |||
| 391 | if (WARN(!dev_pm_qos_request_active(req), | ||
| 392 | "%s() called for unknown object\n", __func__)) | ||
| 393 | return -EINVAL; | ||
| 394 | |||
| 395 | mutex_lock(&dev_pm_qos_mtx); | ||
| 396 | ret = __dev_pm_qos_update_request(req, new_value); | ||
| 397 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 398 | |||
| 399 | return ret; | ||
| 400 | } | ||
| 401 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | ||
| 402 | |||
| 403 | /** | ||
| 404 | * dev_pm_qos_remove_request - modifies an existing qos request | ||
| 405 | * @req: handle to request list element | ||
| 406 | * | ||
| 407 | * Will remove pm qos request from the list of constraints and | ||
| 408 | * recompute the current target value. Call this on slow code paths. | ||
| 409 | * | ||
| 410 | * Returns 1 if the aggregated constraint value has changed, | ||
| 411 | * 0 if the aggregated constraint value has not changed, | ||
| 412 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
| 413 | * removed from the system | ||
| 414 | * | ||
| 415 | * Callers should ensure that the target device is not RPM_SUSPENDED before | ||
| 416 | * using this function for requests of type DEV_PM_QOS_FLAGS. | ||
| 417 | */ | ||
| 418 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | ||
| 419 | { | ||
| 420 | int ret = 0; | ||
| 421 | |||
| 422 | if (!req) /*guard against callers passing in null */ | ||
| 423 | return -EINVAL; | ||
| 424 | |||
| 425 | if (WARN(!dev_pm_qos_request_active(req), | ||
| 426 | "%s() called for unknown object\n", __func__)) | ||
| 427 | return -EINVAL; | ||
| 428 | |||
| 429 | mutex_lock(&dev_pm_qos_mtx); | ||
| 430 | |||
| 431 | if (req->dev->power.qos) { | ||
| 432 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | ||
| 433 | PM_QOS_DEFAULT_VALUE); | ||
| 434 | memset(req, 0, sizeof(*req)); | ||
| 435 | } else { | ||
| 436 | /* Return if the device has been removed */ | ||
| 437 | ret = -ENODEV; | ||
| 438 | } | ||
| 439 | |||
| 440 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 441 | return ret; | ||
| 442 | } | ||
| 443 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | ||
| 444 | |||
| 445 | /** | ||
| 446 | * dev_pm_qos_add_notifier - sets notification entry for changes to target value | ||
| 447 | * of per-device PM QoS constraints | ||
| 448 | * | ||
| 449 | * @dev: target device for the constraint | ||
| 450 | * @notifier: notifier block managed by caller. | ||
| 451 | * | ||
| 452 | * Will register the notifier into a notification chain that gets called | ||
| 453 | * upon changes to the target value for the device. | ||
| 454 | * | ||
| 455 | * If the device's constraints object doesn't exist when this routine is called, | ||
| 456 | * it will be created (or error code will be returned if that fails). | ||
| 457 | */ | ||
| 458 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | ||
| 459 | { | ||
| 460 | int ret = 0; | ||
| 461 | |||
| 462 | mutex_lock(&dev_pm_qos_mtx); | ||
| 463 | |||
| 464 | if (!dev->power.qos) | ||
| 465 | ret = dev->power.power_state.event != PM_EVENT_INVALID ? | ||
| 466 | dev_pm_qos_constraints_allocate(dev) : -ENODEV; | ||
| 467 | |||
| 468 | if (!ret) | ||
| 469 | ret = blocking_notifier_chain_register( | ||
| 470 | dev->power.qos->latency.notifiers, notifier); | ||
| 471 | |||
| 472 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 473 | return ret; | ||
| 474 | } | ||
| 475 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); | ||
| 476 | |||
| 477 | /** | ||
| 478 | * dev_pm_qos_remove_notifier - deletes notification for changes to target value | ||
| 479 | * of per-device PM QoS constraints | ||
| 480 | * | ||
| 481 | * @dev: target device for the constraint | ||
| 482 | * @notifier: notifier block to be removed. | ||
| 483 | * | ||
| 484 | * Will remove the notifier from the notification chain that gets called | ||
| 485 | * upon changes to the target value. | ||
| 486 | */ | ||
| 487 | int dev_pm_qos_remove_notifier(struct device *dev, | ||
| 488 | struct notifier_block *notifier) | ||
| 489 | { | ||
| 490 | int retval = 0; | ||
| 491 | |||
| 492 | mutex_lock(&dev_pm_qos_mtx); | ||
| 493 | |||
| 494 | /* Silently return if the constraints object is not present. */ | ||
| 495 | if (dev->power.qos) | ||
| 496 | retval = blocking_notifier_chain_unregister( | ||
| 497 | dev->power.qos->latency.notifiers, | ||
| 498 | notifier); | ||
| 499 | |||
| 500 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 501 | return retval; | ||
| 502 | } | ||
| 503 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); | ||
| 504 | |||
| 505 | /** | ||
| 506 | * dev_pm_qos_add_global_notifier - sets notification entry for changes to | ||
| 507 | * target value of the PM QoS constraints for any device | ||
| 508 | * | ||
| 509 | * @notifier: notifier block managed by caller. | ||
| 510 | * | ||
| 511 | * Will register the notifier into a notification chain that gets called | ||
| 512 | * upon changes to the target value for any device. | ||
| 513 | */ | ||
| 514 | int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) | ||
| 515 | { | ||
| 516 | return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); | ||
| 517 | } | ||
| 518 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); | ||
| 519 | |||
| 520 | /** | ||
| 521 | * dev_pm_qos_remove_global_notifier - deletes notification for changes to | ||
| 522 | * target value of PM QoS constraints for any device | ||
| 523 | * | ||
| 524 | * @notifier: notifier block to be removed. | ||
| 525 | * | ||
| 526 | * Will remove the notifier from the notification chain that gets called | ||
| 527 | * upon changes to the target value for any device. | ||
| 528 | */ | ||
| 529 | int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) | ||
| 530 | { | ||
| 531 | return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); | ||
| 532 | } | ||
| 533 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); | ||
| 534 | |||
| 535 | /** | ||
| 536 | * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. | ||
| 537 | * @dev: Device whose ancestor to add the request for. | ||
| 538 | * @req: Pointer to the preallocated handle. | ||
| 539 | * @value: Constraint latency value. | ||
| 540 | */ | ||
| 541 | int dev_pm_qos_add_ancestor_request(struct device *dev, | ||
| 542 | struct dev_pm_qos_request *req, s32 value) | ||
| 543 | { | ||
| 544 | struct device *ancestor = dev->parent; | ||
| 545 | int ret = -ENODEV; | ||
| 546 | |||
| 547 | while (ancestor && !ancestor->power.ignore_children) | ||
| 548 | ancestor = ancestor->parent; | ||
| 549 | |||
| 550 | if (ancestor) | ||
| 551 | ret = dev_pm_qos_add_request(ancestor, req, | ||
| 552 | DEV_PM_QOS_LATENCY, value); | ||
| 553 | |||
| 554 | if (ret < 0) | ||
| 555 | req->dev = NULL; | ||
| 556 | |||
| 557 | return ret; | ||
| 558 | } | ||
| 559 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); | ||
| 560 | |||
| 561 | #ifdef CONFIG_PM_RUNTIME | ||
| 562 | static void __dev_pm_qos_drop_user_request(struct device *dev, | ||
| 563 | enum dev_pm_qos_req_type type) | ||
| 564 | { | ||
| 565 | switch(type) { | ||
| 566 | case DEV_PM_QOS_LATENCY: | ||
| 567 | dev_pm_qos_remove_request(dev->power.qos->latency_req); | ||
| 568 | dev->power.qos->latency_req = NULL; | ||
| 569 | break; | ||
| 570 | case DEV_PM_QOS_FLAGS: | ||
| 571 | dev_pm_qos_remove_request(dev->power.qos->flags_req); | ||
| 572 | dev->power.qos->flags_req = NULL; | ||
| 573 | break; | ||
| 574 | } | ||
| 575 | } | ||
| 576 | |||
| 577 | /** | ||
| 578 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | ||
| 579 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | ||
| 580 | * @value: Initial value of the latency limit. | ||
| 581 | */ | ||
| 582 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | ||
| 583 | { | ||
| 584 | struct dev_pm_qos_request *req; | ||
| 585 | int ret; | ||
| 586 | |||
| 587 | if (!device_is_registered(dev) || value < 0) | ||
| 588 | return -EINVAL; | ||
| 589 | |||
| 590 | if (dev->power.qos && dev->power.qos->latency_req) | ||
| 591 | return -EEXIST; | ||
| 592 | |||
| 593 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
| 594 | if (!req) | ||
| 595 | return -ENOMEM; | ||
| 596 | |||
| 597 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); | ||
| 598 | if (ret < 0) | ||
| 599 | return ret; | ||
| 600 | |||
| 601 | dev->power.qos->latency_req = req; | ||
| 602 | ret = pm_qos_sysfs_add_latency(dev); | ||
| 603 | if (ret) | ||
| 604 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | ||
| 605 | |||
| 606 | return ret; | ||
| 607 | } | ||
| 608 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | ||
| 609 | |||
| 610 | /** | ||
| 611 | * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. | ||
| 612 | * @dev: Device whose PM QoS latency limit is to be hidden from user space. | ||
| 613 | */ | ||
| 614 | void dev_pm_qos_hide_latency_limit(struct device *dev) | ||
| 615 | { | ||
| 616 | if (dev->power.qos && dev->power.qos->latency_req) { | ||
| 617 | pm_qos_sysfs_remove_latency(dev); | ||
| 618 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | ||
| 619 | } | ||
| 620 | } | ||
| 621 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | ||
| 622 | |||
| 623 | /** | ||
| 624 | * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. | ||
| 625 | * @dev: Device whose PM QoS flags are to be exposed to user space. | ||
| 626 | * @val: Initial values of the flags. | ||
| 627 | */ | ||
| 628 | int dev_pm_qos_expose_flags(struct device *dev, s32 val) | ||
| 629 | { | ||
| 630 | struct dev_pm_qos_request *req; | ||
| 631 | int ret; | ||
| 632 | |||
| 633 | if (!device_is_registered(dev)) | ||
| 634 | return -EINVAL; | ||
| 635 | |||
| 636 | if (dev->power.qos && dev->power.qos->flags_req) | ||
| 637 | return -EEXIST; | ||
| 638 | |||
| 639 | req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
| 640 | if (!req) | ||
| 641 | return -ENOMEM; | ||
| 642 | |||
| 643 | pm_runtime_get_sync(dev); | ||
| 644 | ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); | ||
| 645 | if (ret < 0) | ||
| 646 | goto fail; | ||
| 647 | |||
| 648 | dev->power.qos->flags_req = req; | ||
| 649 | ret = pm_qos_sysfs_add_flags(dev); | ||
| 650 | if (ret) | ||
| 651 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
| 652 | |||
| 653 | fail: | ||
| 654 | pm_runtime_put(dev); | ||
| 655 | return ret; | ||
| 656 | } | ||
| 657 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | ||
| 658 | |||
| 659 | /** | ||
| 660 | * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. | ||
| 661 | * @dev: Device whose PM QoS flags are to be hidden from user space. | ||
| 662 | */ | ||
| 663 | void dev_pm_qos_hide_flags(struct device *dev) | ||
| 664 | { | ||
| 665 | if (dev->power.qos && dev->power.qos->flags_req) { | ||
| 666 | pm_qos_sysfs_remove_flags(dev); | ||
| 667 | pm_runtime_get_sync(dev); | ||
| 668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | ||
| 669 | pm_runtime_put(dev); | ||
| 670 | } | ||
| 671 | } | ||
| 672 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | ||
| 673 | |||
| 674 | /** | ||
| 675 | * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. | ||
| 676 | * @dev: Device to update the PM QoS flags request for. | ||
| 677 | * @mask: Flags to set/clear. | ||
| 678 | * @set: Whether to set or clear the flags (true means set). | ||
| 679 | */ | ||
| 680 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) | ||
| 681 | { | ||
| 682 | s32 value; | ||
| 683 | int ret; | ||
| 684 | |||
| 685 | if (!dev->power.qos || !dev->power.qos->flags_req) | ||
| 686 | return -EINVAL; | ||
| 687 | |||
| 688 | pm_runtime_get_sync(dev); | ||
| 689 | mutex_lock(&dev_pm_qos_mtx); | ||
| 690 | |||
| 691 | value = dev_pm_qos_requested_flags(dev); | ||
| 692 | if (set) | ||
| 693 | value |= mask; | ||
| 694 | else | ||
| 695 | value &= ~mask; | ||
| 696 | |||
| 697 | ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); | ||
| 698 | |||
| 699 | mutex_unlock(&dev_pm_qos_mtx); | ||
| 700 | pm_runtime_put(dev); | ||
| 701 | |||
| 702 | return ret; | ||
| 703 | } | ||
| 704 | #endif /* CONFIG_PM_RUNTIME */ | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3148b10dc2e..3f141ea9283 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -8,9 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/export.h> | ||
| 12 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
| 13 | #include <trace/events/rpm.h> | ||
| 14 | #include "power.h" | 12 | #include "power.h" |
| 15 | 13 | ||
| 16 | static int rpm_resume(struct device *dev, int rpmflags); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
| @@ -147,8 +145,6 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
| 147 | || (dev->power.request_pending | 145 | || (dev->power.request_pending |
| 148 | && dev->power.request == RPM_REQ_RESUME)) | 146 | && dev->power.request == RPM_REQ_RESUME)) |
| 149 | retval = -EAGAIN; | 147 | retval = -EAGAIN; |
| 150 | else if (__dev_pm_qos_read_value(dev) < 0) | ||
| 151 | retval = -EPERM; | ||
| 152 | else if (dev->power.runtime_status == RPM_SUSPENDED) | 148 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
| 153 | retval = 1; | 149 | retval = 1; |
| 154 | 150 | ||
| @@ -156,31 +152,6 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
| 156 | } | 152 | } |
| 157 | 153 | ||
| 158 | /** | 154 | /** |
| 159 | * __rpm_callback - Run a given runtime PM callback for a given device. | ||
| 160 | * @cb: Runtime PM callback to run. | ||
| 161 | * @dev: Device to run the callback for. | ||
| 162 | */ | ||
| 163 | static int __rpm_callback(int (*cb)(struct device *), struct device *dev) | ||
| 164 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
| 165 | { | ||
| 166 | int retval; | ||
| 167 | |||
| 168 | if (dev->power.irq_safe) | ||
| 169 | spin_unlock(&dev->power.lock); | ||
| 170 | else | ||
| 171 | spin_unlock_irq(&dev->power.lock); | ||
| 172 | |||
| 173 | retval = cb(dev); | ||
| 174 | |||
| 175 | if (dev->power.irq_safe) | ||
| 176 | spin_lock(&dev->power.lock); | ||
| 177 | else | ||
| 178 | spin_lock_irq(&dev->power.lock); | ||
| 179 | |||
| 180 | return retval; | ||
| 181 | } | ||
| 182 | |||
| 183 | /** | ||
| 184 | * rpm_idle - Notify device bus type if the device can be suspended. | 155 | * rpm_idle - Notify device bus type if the device can be suspended. |
| 185 | * @dev: Device to notify the bus type about. | 156 | * @dev: Device to notify the bus type about. |
| 186 | * @rpmflags: Flag bits. | 157 | * @rpmflags: Flag bits. |
| @@ -197,7 +168,6 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
| 197 | int (*callback)(struct device *); | 168 | int (*callback)(struct device *); |
| 198 | int retval; | 169 | int retval; |
| 199 | 170 | ||
| 200 | trace_rpm_idle(dev, rpmflags); | ||
| 201 | retval = rpm_check_suspend_allowed(dev); | 171 | retval = rpm_check_suspend_allowed(dev); |
| 202 | if (retval < 0) | 172 | if (retval < 0) |
| 203 | ; /* Conditions are wrong. */ | 173 | ; /* Conditions are wrong. */ |
| @@ -252,17 +222,24 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
| 252 | else | 222 | else |
| 253 | callback = NULL; | 223 | callback = NULL; |
| 254 | 224 | ||
| 255 | if (!callback && dev->driver && dev->driver->pm) | 225 | if (callback) { |
| 256 | callback = dev->driver->pm->runtime_idle; | 226 | if (dev->power.irq_safe) |
| 227 | spin_unlock(&dev->power.lock); | ||
| 228 | else | ||
| 229 | spin_unlock_irq(&dev->power.lock); | ||
| 257 | 230 | ||
| 258 | if (callback) | 231 | callback(dev); |
| 259 | __rpm_callback(callback, dev); | 232 | |
| 233 | if (dev->power.irq_safe) | ||
| 234 | spin_lock(&dev->power.lock); | ||
| 235 | else | ||
| 236 | spin_lock_irq(&dev->power.lock); | ||
| 237 | } | ||
| 260 | 238 | ||
| 261 | dev->power.idle_notification = false; | 239 | dev->power.idle_notification = false; |
| 262 | wake_up_all(&dev->power.wait_queue); | 240 | wake_up_all(&dev->power.wait_queue); |
| 263 | 241 | ||
| 264 | out: | 242 | out: |
| 265 | trace_rpm_return_int(dev, _THIS_IP_, retval); | ||
| 266 | return retval; | 243 | return retval; |
| 267 | } | 244 | } |
| 268 | 245 | ||
| @@ -272,14 +249,22 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
| 272 | * @dev: Device to run the callback for. | 249 | * @dev: Device to run the callback for. |
| 273 | */ | 250 | */ |
| 274 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) | 251 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
| 252 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
| 275 | { | 253 | { |
| 276 | int retval; | 254 | int retval; |
| 277 | 255 | ||
| 278 | if (!cb) | 256 | if (!cb) |
| 279 | return -ENOSYS; | 257 | return -ENOSYS; |
| 280 | 258 | ||
| 281 | retval = __rpm_callback(cb, dev); | 259 | if (dev->power.irq_safe) { |
| 260 | retval = cb(dev); | ||
| 261 | } else { | ||
| 262 | spin_unlock_irq(&dev->power.lock); | ||
| 263 | |||
| 264 | retval = cb(dev); | ||
| 282 | 265 | ||
| 266 | spin_lock_irq(&dev->power.lock); | ||
| 267 | } | ||
| 283 | dev->power.runtime_error = retval; | 268 | dev->power.runtime_error = retval; |
| 284 | return retval != -EACCES ? retval : -EIO; | 269 | return retval != -EACCES ? retval : -EIO; |
| 285 | } | 270 | } |
| @@ -289,16 +274,14 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
| 289 | * @dev: Device to suspend. | 274 | * @dev: Device to suspend. |
| 290 | * @rpmflags: Flag bits. | 275 | * @rpmflags: Flag bits. |
| 291 | * | 276 | * |
| 292 | * Check if the device's runtime PM status allows it to be suspended. | 277 | * Check if the device's runtime PM status allows it to be suspended. If |
| 293 | * Cancel a pending idle notification, autosuspend or suspend. If | 278 | * another suspend has been started earlier, either return immediately or wait |
| 294 | * another suspend has been started earlier, either return immediately | 279 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a |
| 295 | * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC | 280 | * pending idle notification. If the RPM_ASYNC flag is set then queue a |
| 296 | * flags. If the RPM_ASYNC flag is set then queue a suspend request; | 281 | * suspend request; otherwise run the ->runtime_suspend() callback directly. |
| 297 | * otherwise run the ->runtime_suspend() callback directly. When | 282 | * If a deferred resume was requested while the callback was running then carry |
| 298 | * ->runtime_suspend succeeded, if a deferred resume was requested while | 283 | * it out; otherwise send an idle notification for the device (if the suspend |
| 299 | * the callback was running then carry it out, otherwise send an idle | 284 | * failed) or for its parent (if the suspend succeeded). |
| 300 | * notification for its parent (if the suspend succeeded and both | ||
| 301 | * ignore_children of parent->power and irq_safe of dev->power are not set). | ||
| 302 | * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO | 285 | * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO |
| 303 | * flag is set and the next autosuspend-delay expiration time is in the | 286 | * flag is set and the next autosuspend-delay expiration time is in the |
| 304 | * future, schedule another autosuspend attempt. | 287 | * future, schedule another autosuspend attempt. |
| @@ -312,7 +295,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 312 | struct device *parent = NULL; | 295 | struct device *parent = NULL; |
| 313 | int retval; | 296 | int retval; |
| 314 | 297 | ||
| 315 | trace_rpm_suspend(dev, rpmflags); | 298 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
| 316 | 299 | ||
| 317 | repeat: | 300 | repeat: |
| 318 | retval = rpm_check_suspend_allowed(dev); | 301 | retval = rpm_check_suspend_allowed(dev); |
| @@ -364,15 +347,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 364 | goto out; | 347 | goto out; |
| 365 | } | 348 | } |
| 366 | 349 | ||
| 367 | if (dev->power.irq_safe) { | ||
| 368 | spin_unlock(&dev->power.lock); | ||
| 369 | |||
| 370 | cpu_relax(); | ||
| 371 | |||
| 372 | spin_lock(&dev->power.lock); | ||
| 373 | goto repeat; | ||
| 374 | } | ||
| 375 | |||
| 376 | /* Wait for the other suspend running in parallel with us. */ | 350 | /* Wait for the other suspend running in parallel with us. */ |
| 377 | for (;;) { | 351 | for (;;) { |
| 378 | prepare_to_wait(&dev->power.wait_queue, &wait, | 352 | prepare_to_wait(&dev->power.wait_queue, &wait, |
| @@ -390,6 +364,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 390 | goto repeat; | 364 | goto repeat; |
| 391 | } | 365 | } |
| 392 | 366 | ||
| 367 | dev->power.deferred_resume = false; | ||
| 393 | if (dev->power.no_callbacks) | 368 | if (dev->power.no_callbacks) |
| 394 | goto no_callback; /* Assume success. */ | 369 | goto no_callback; /* Assume success. */ |
| 395 | 370 | ||
| @@ -417,25 +392,38 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 417 | else | 392 | else |
| 418 | callback = NULL; | 393 | callback = NULL; |
| 419 | 394 | ||
| 420 | if (!callback && dev->driver && dev->driver->pm) | ||
| 421 | callback = dev->driver->pm->runtime_suspend; | ||
| 422 | |||
| 423 | retval = rpm_callback(callback, dev); | 395 | retval = rpm_callback(callback, dev); |
| 424 | if (retval) | 396 | if (retval) { |
| 425 | goto fail; | 397 | __update_runtime_status(dev, RPM_ACTIVE); |
| 398 | dev->power.deferred_resume = false; | ||
| 399 | if (retval == -EAGAIN || retval == -EBUSY) { | ||
| 400 | dev->power.runtime_error = 0; | ||
| 426 | 401 | ||
| 402 | /* | ||
| 403 | * If the callback routine failed an autosuspend, and | ||
| 404 | * if the last_busy time has been updated so that there | ||
| 405 | * is a new autosuspend expiration time, automatically | ||
| 406 | * reschedule another autosuspend. | ||
| 407 | */ | ||
| 408 | if ((rpmflags & RPM_AUTO) && | ||
| 409 | pm_runtime_autosuspend_expiration(dev) != 0) | ||
| 410 | goto repeat; | ||
| 411 | } else { | ||
| 412 | pm_runtime_cancel_pending(dev); | ||
| 413 | } | ||
| 414 | } else { | ||
| 427 | no_callback: | 415 | no_callback: |
| 428 | __update_runtime_status(dev, RPM_SUSPENDED); | 416 | __update_runtime_status(dev, RPM_SUSPENDED); |
| 429 | pm_runtime_deactivate_timer(dev); | 417 | pm_runtime_deactivate_timer(dev); |
| 430 | 418 | ||
| 431 | if (dev->parent) { | 419 | if (dev->parent) { |
| 432 | parent = dev->parent; | 420 | parent = dev->parent; |
| 433 | atomic_add_unless(&parent->power.child_count, -1, 0); | 421 | atomic_add_unless(&parent->power.child_count, -1, 0); |
| 422 | } | ||
| 434 | } | 423 | } |
| 435 | wake_up_all(&dev->power.wait_queue); | 424 | wake_up_all(&dev->power.wait_queue); |
| 436 | 425 | ||
| 437 | if (dev->power.deferred_resume) { | 426 | if (dev->power.deferred_resume) { |
| 438 | dev->power.deferred_resume = false; | ||
| 439 | rpm_resume(dev, 0); | 427 | rpm_resume(dev, 0); |
| 440 | retval = -EAGAIN; | 428 | retval = -EAGAIN; |
| 441 | goto out; | 429 | goto out; |
| @@ -453,31 +441,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 453 | } | 441 | } |
| 454 | 442 | ||
| 455 | out: | 443 | out: |
| 456 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 444 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
| 457 | 445 | ||
| 458 | return retval; | 446 | return retval; |
| 459 | |||
| 460 | fail: | ||
| 461 | __update_runtime_status(dev, RPM_ACTIVE); | ||
| 462 | dev->power.deferred_resume = false; | ||
| 463 | wake_up_all(&dev->power.wait_queue); | ||
| 464 | |||
| 465 | if (retval == -EAGAIN || retval == -EBUSY) { | ||
| 466 | dev->power.runtime_error = 0; | ||
| 467 | |||
| 468 | /* | ||
| 469 | * If the callback routine failed an autosuspend, and | ||
| 470 | * if the last_busy time has been updated so that there | ||
| 471 | * is a new autosuspend expiration time, automatically | ||
| 472 | * reschedule another autosuspend. | ||
| 473 | */ | ||
| 474 | if ((rpmflags & RPM_AUTO) && | ||
| 475 | pm_runtime_autosuspend_expiration(dev) != 0) | ||
| 476 | goto repeat; | ||
| 477 | } else { | ||
| 478 | pm_runtime_cancel_pending(dev); | ||
| 479 | } | ||
| 480 | goto out; | ||
| 481 | } | 447 | } |
| 482 | 448 | ||
| 483 | /** | 449 | /** |
| @@ -504,14 +470,11 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 504 | struct device *parent = NULL; | 470 | struct device *parent = NULL; |
| 505 | int retval = 0; | 471 | int retval = 0; |
| 506 | 472 | ||
| 507 | trace_rpm_resume(dev, rpmflags); | 473 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
| 508 | 474 | ||
| 509 | repeat: | 475 | repeat: |
| 510 | if (dev->power.runtime_error) | 476 | if (dev->power.runtime_error) |
| 511 | retval = -EINVAL; | 477 | retval = -EINVAL; |
| 512 | else if (dev->power.disable_depth == 1 && dev->power.is_suspended | ||
| 513 | && dev->power.runtime_status == RPM_ACTIVE) | ||
| 514 | retval = 1; | ||
| 515 | else if (dev->power.disable_depth > 0) | 478 | else if (dev->power.disable_depth > 0) |
| 516 | retval = -EACCES; | 479 | retval = -EACCES; |
| 517 | if (retval) | 480 | if (retval) |
| @@ -544,15 +507,6 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 544 | goto out; | 507 | goto out; |
| 545 | } | 508 | } |
| 546 | 509 | ||
| 547 | if (dev->power.irq_safe) { | ||
| 548 | spin_unlock(&dev->power.lock); | ||
| 549 | |||
| 550 | cpu_relax(); | ||
| 551 | |||
| 552 | spin_lock(&dev->power.lock); | ||
| 553 | goto repeat; | ||
| 554 | } | ||
| 555 | |||
| 556 | /* Wait for the operation carried out in parallel with us. */ | 510 | /* Wait for the operation carried out in parallel with us. */ |
| 557 | for (;;) { | 511 | for (;;) { |
| 558 | prepare_to_wait(&dev->power.wait_queue, &wait, | 512 | prepare_to_wait(&dev->power.wait_queue, &wait, |
| @@ -583,7 +537,6 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 583 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | 537 | || dev->parent->power.runtime_status == RPM_ACTIVE) { |
| 584 | atomic_inc(&dev->parent->power.child_count); | 538 | atomic_inc(&dev->parent->power.child_count); |
| 585 | spin_unlock(&dev->parent->power.lock); | 539 | spin_unlock(&dev->parent->power.lock); |
| 586 | retval = 1; | ||
| 587 | goto no_callback; /* Assume success. */ | 540 | goto no_callback; /* Assume success. */ |
| 588 | } | 541 | } |
| 589 | spin_unlock(&dev->parent->power.lock); | 542 | spin_unlock(&dev->parent->power.lock); |
| @@ -649,9 +602,6 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 649 | else | 602 | else |
| 650 | callback = NULL; | 603 | callback = NULL; |
| 651 | 604 | ||
| 652 | if (!callback && dev->driver && dev->driver->pm) | ||
| 653 | callback = dev->driver->pm->runtime_resume; | ||
| 654 | |||
| 655 | retval = rpm_callback(callback, dev); | 605 | retval = rpm_callback(callback, dev); |
| 656 | if (retval) { | 606 | if (retval) { |
| 657 | __update_runtime_status(dev, RPM_SUSPENDED); | 607 | __update_runtime_status(dev, RPM_SUSPENDED); |
| @@ -664,7 +614,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 664 | } | 614 | } |
| 665 | wake_up_all(&dev->power.wait_queue); | 615 | wake_up_all(&dev->power.wait_queue); |
| 666 | 616 | ||
| 667 | if (retval >= 0) | 617 | if (!retval) |
| 668 | rpm_idle(dev, RPM_ASYNC); | 618 | rpm_idle(dev, RPM_ASYNC); |
| 669 | 619 | ||
| 670 | out: | 620 | out: |
| @@ -676,7 +626,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 676 | spin_lock_irq(&dev->power.lock); | 626 | spin_lock_irq(&dev->power.lock); |
| 677 | } | 627 | } |
| 678 | 628 | ||
| 679 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 629 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
| 680 | 630 | ||
| 681 | return retval; | 631 | return retval; |
| 682 | } | 632 | } |
| @@ -793,15 +743,14 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend); | |||
| 793 | * return immediately if it is larger than zero. Then carry out an idle | 743 | * return immediately if it is larger than zero. Then carry out an idle |
| 794 | * notification, either synchronous or asynchronous. | 744 | * notification, either synchronous or asynchronous. |
| 795 | * | 745 | * |
| 796 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, | 746 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
| 797 | * or if pm_runtime_irq_safe() has been called. | ||
| 798 | */ | 747 | */ |
| 799 | int __pm_runtime_idle(struct device *dev, int rpmflags) | 748 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
| 800 | { | 749 | { |
| 801 | unsigned long flags; | 750 | unsigned long flags; |
| 802 | int retval; | 751 | int retval; |
| 803 | 752 | ||
| 804 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | 753 | might_sleep_if(!(rpmflags & RPM_ASYNC)); |
| 805 | 754 | ||
| 806 | if (rpmflags & RPM_GET_PUT) { | 755 | if (rpmflags & RPM_GET_PUT) { |
| 807 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 756 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
| @@ -825,8 +774,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); | |||
| 825 | * return immediately if it is larger than zero. Then carry out a suspend, | 774 | * return immediately if it is larger than zero. Then carry out a suspend, |
| 826 | * either synchronous or asynchronous. | 775 | * either synchronous or asynchronous. |
| 827 | * | 776 | * |
| 828 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, | 777 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
| 829 | * or if pm_runtime_irq_safe() has been called. | ||
| 830 | */ | 778 | */ |
| 831 | int __pm_runtime_suspend(struct device *dev, int rpmflags) | 779 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
| 832 | { | 780 | { |
| @@ -856,8 +804,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend); | |||
| 856 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | 804 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then |
| 857 | * carry out a resume, either synchronous or asynchronous. | 805 | * carry out a resume, either synchronous or asynchronous. |
| 858 | * | 806 | * |
| 859 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, | 807 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
| 860 | * or if pm_runtime_irq_safe() has been called. | ||
| 861 | */ | 808 | */ |
| 862 | int __pm_runtime_resume(struct device *dev, int rpmflags) | 809 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
| 863 | { | 810 | { |
| @@ -1055,6 +1002,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier); | |||
| 1055 | */ | 1002 | */ |
| 1056 | void __pm_runtime_disable(struct device *dev, bool check_resume) | 1003 | void __pm_runtime_disable(struct device *dev, bool check_resume) |
| 1057 | { | 1004 | { |
| 1005 | might_sleep(); | ||
| 1058 | spin_lock_irq(&dev->power.lock); | 1006 | spin_lock_irq(&dev->power.lock); |
| 1059 | 1007 | ||
| 1060 | if (dev->power.disable_depth > 0) { | 1008 | if (dev->power.disable_depth > 0) { |
| @@ -1240,6 +1188,8 @@ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) | |||
| 1240 | { | 1188 | { |
| 1241 | int old_delay, old_use; | 1189 | int old_delay, old_use; |
| 1242 | 1190 | ||
| 1191 | might_sleep(); | ||
| 1192 | |||
| 1243 | spin_lock_irq(&dev->power.lock); | 1193 | spin_lock_irq(&dev->power.lock); |
| 1244 | old_delay = dev->power.autosuspend_delay; | 1194 | old_delay = dev->power.autosuspend_delay; |
| 1245 | old_use = dev->power.use_autosuspend; | 1195 | old_use = dev->power.use_autosuspend; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 50d16e3cb0a..17b7934f31c 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
| @@ -4,8 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
| 6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
| 7 | #include <linux/export.h> | ||
| 8 | #include <linux/pm_qos.h> | ||
| 9 | #include <linux/pm_runtime.h> | 7 | #include <linux/pm_runtime.h> |
| 10 | #include <linux/atomic.h> | 8 | #include <linux/atomic.h> |
| 11 | #include <linux/jiffies.h> | 9 | #include <linux/jiffies.h> |
| @@ -218,85 +216,6 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, | |||
| 218 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | 216 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, |
| 219 | autosuspend_delay_ms_store); | 217 | autosuspend_delay_ms_store); |
| 220 | 218 | ||
| 221 | static ssize_t pm_qos_latency_show(struct device *dev, | ||
| 222 | struct device_attribute *attr, char *buf) | ||
| 223 | { | ||
| 224 | return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); | ||
| 225 | } | ||
| 226 | |||
| 227 | static ssize_t pm_qos_latency_store(struct device *dev, | ||
| 228 | struct device_attribute *attr, | ||
| 229 | const char *buf, size_t n) | ||
| 230 | { | ||
| 231 | s32 value; | ||
| 232 | int ret; | ||
| 233 | |||
| 234 | if (kstrtos32(buf, 0, &value)) | ||
| 235 | return -EINVAL; | ||
| 236 | |||
| 237 | if (value < 0) | ||
| 238 | return -EINVAL; | ||
| 239 | |||
| 240 | ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); | ||
| 241 | return ret < 0 ? ret : n; | ||
| 242 | } | ||
| 243 | |||
| 244 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, | ||
| 245 | pm_qos_latency_show, pm_qos_latency_store); | ||
| 246 | |||
| 247 | static ssize_t pm_qos_no_power_off_show(struct device *dev, | ||
| 248 | struct device_attribute *attr, | ||
| 249 | char *buf) | ||
| 250 | { | ||
| 251 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
| 252 | & PM_QOS_FLAG_NO_POWER_OFF)); | ||
| 253 | } | ||
| 254 | |||
| 255 | static ssize_t pm_qos_no_power_off_store(struct device *dev, | ||
| 256 | struct device_attribute *attr, | ||
| 257 | const char *buf, size_t n) | ||
| 258 | { | ||
| 259 | int ret; | ||
| 260 | |||
| 261 | if (kstrtoint(buf, 0, &ret)) | ||
| 262 | return -EINVAL; | ||
| 263 | |||
| 264 | if (ret != 0 && ret != 1) | ||
| 265 | return -EINVAL; | ||
| 266 | |||
| 267 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); | ||
| 268 | return ret < 0 ? ret : n; | ||
| 269 | } | ||
| 270 | |||
| 271 | static DEVICE_ATTR(pm_qos_no_power_off, 0644, | ||
| 272 | pm_qos_no_power_off_show, pm_qos_no_power_off_store); | ||
| 273 | |||
| 274 | static ssize_t pm_qos_remote_wakeup_show(struct device *dev, | ||
| 275 | struct device_attribute *attr, | ||
| 276 | char *buf) | ||
| 277 | { | ||
| 278 | return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) | ||
| 279 | & PM_QOS_FLAG_REMOTE_WAKEUP)); | ||
| 280 | } | ||
| 281 | |||
| 282 | static ssize_t pm_qos_remote_wakeup_store(struct device *dev, | ||
| 283 | struct device_attribute *attr, | ||
| 284 | const char *buf, size_t n) | ||
| 285 | { | ||
| 286 | int ret; | ||
| 287 | |||
| 288 | if (kstrtoint(buf, 0, &ret)) | ||
| 289 | return -EINVAL; | ||
| 290 | |||
| 291 | if (ret != 0 && ret != 1) | ||
| 292 | return -EINVAL; | ||
| 293 | |||
| 294 | ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret); | ||
| 295 | return ret < 0 ? ret : n; | ||
| 296 | } | ||
| 297 | |||
| 298 | static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, | ||
| 299 | pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store); | ||
| 300 | #endif /* CONFIG_PM_RUNTIME */ | 219 | #endif /* CONFIG_PM_RUNTIME */ |
| 301 | 220 | ||
| 302 | #ifdef CONFIG_PM_SLEEP | 221 | #ifdef CONFIG_PM_SLEEP |
| @@ -368,41 +287,22 @@ static ssize_t wakeup_active_count_show(struct device *dev, | |||
| 368 | 287 | ||
| 369 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); | 288 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); |
| 370 | 289 | ||
| 371 | static ssize_t wakeup_abort_count_show(struct device *dev, | 290 | static ssize_t wakeup_hit_count_show(struct device *dev, |
| 372 | struct device_attribute *attr, | 291 | struct device_attribute *attr, char *buf) |
| 373 | char *buf) | ||
| 374 | { | ||
| 375 | unsigned long count = 0; | ||
| 376 | bool enabled = false; | ||
| 377 | |||
| 378 | spin_lock_irq(&dev->power.lock); | ||
| 379 | if (dev->power.wakeup) { | ||
| 380 | count = dev->power.wakeup->wakeup_count; | ||
| 381 | enabled = true; | ||
| 382 | } | ||
| 383 | spin_unlock_irq(&dev->power.lock); | ||
| 384 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
| 385 | } | ||
| 386 | |||
| 387 | static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); | ||
| 388 | |||
| 389 | static ssize_t wakeup_expire_count_show(struct device *dev, | ||
| 390 | struct device_attribute *attr, | ||
| 391 | char *buf) | ||
| 392 | { | 292 | { |
| 393 | unsigned long count = 0; | 293 | unsigned long count = 0; |
| 394 | bool enabled = false; | 294 | bool enabled = false; |
| 395 | 295 | ||
| 396 | spin_lock_irq(&dev->power.lock); | 296 | spin_lock_irq(&dev->power.lock); |
| 397 | if (dev->power.wakeup) { | 297 | if (dev->power.wakeup) { |
| 398 | count = dev->power.wakeup->expire_count; | 298 | count = dev->power.wakeup->hit_count; |
| 399 | enabled = true; | 299 | enabled = true; |
| 400 | } | 300 | } |
| 401 | spin_unlock_irq(&dev->power.lock); | 301 | spin_unlock_irq(&dev->power.lock); |
| 402 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 302 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
| 403 | } | 303 | } |
| 404 | 304 | ||
| 405 | static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); | 305 | static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); |
| 406 | 306 | ||
| 407 | static ssize_t wakeup_active_show(struct device *dev, | 307 | static ssize_t wakeup_active_show(struct device *dev, |
| 408 | struct device_attribute *attr, char *buf) | 308 | struct device_attribute *attr, char *buf) |
| @@ -471,27 +371,6 @@ static ssize_t wakeup_last_time_show(struct device *dev, | |||
| 471 | } | 371 | } |
| 472 | 372 | ||
| 473 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); | 373 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); |
| 474 | |||
| 475 | #ifdef CONFIG_PM_AUTOSLEEP | ||
| 476 | static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, | ||
| 477 | struct device_attribute *attr, | ||
| 478 | char *buf) | ||
| 479 | { | ||
| 480 | s64 msec = 0; | ||
| 481 | bool enabled = false; | ||
| 482 | |||
| 483 | spin_lock_irq(&dev->power.lock); | ||
| 484 | if (dev->power.wakeup) { | ||
| 485 | msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); | ||
| 486 | enabled = true; | ||
| 487 | } | ||
| 488 | spin_unlock_irq(&dev->power.lock); | ||
| 489 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
| 490 | } | ||
| 491 | |||
| 492 | static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, | ||
| 493 | wakeup_prevent_sleep_time_show, NULL); | ||
| 494 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
| 495 | #endif /* CONFIG_PM_SLEEP */ | 374 | #endif /* CONFIG_PM_SLEEP */ |
| 496 | 375 | ||
| 497 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 376 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
| @@ -528,8 +407,6 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); | |||
| 528 | 407 | ||
| 529 | #endif | 408 | #endif |
| 530 | 409 | ||
| 531 | #ifdef CONFIG_PM_SLEEP | ||
| 532 | |||
| 533 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, | 410 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, |
| 534 | char *buf) | 411 | char *buf) |
| 535 | { | 412 | { |
| @@ -556,8 +433,6 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, | |||
| 556 | } | 433 | } |
| 557 | 434 | ||
| 558 | static DEVICE_ATTR(async, 0644, async_show, async_store); | 435 | static DEVICE_ATTR(async, 0644, async_show, async_store); |
| 559 | |||
| 560 | #endif | ||
| 561 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | 436 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
| 562 | 437 | ||
| 563 | static struct attribute *power_attrs[] = { | 438 | static struct attribute *power_attrs[] = { |
| @@ -584,15 +459,11 @@ static struct attribute *wakeup_attrs[] = { | |||
| 584 | &dev_attr_wakeup.attr, | 459 | &dev_attr_wakeup.attr, |
| 585 | &dev_attr_wakeup_count.attr, | 460 | &dev_attr_wakeup_count.attr, |
| 586 | &dev_attr_wakeup_active_count.attr, | 461 | &dev_attr_wakeup_active_count.attr, |
| 587 | &dev_attr_wakeup_abort_count.attr, | 462 | &dev_attr_wakeup_hit_count.attr, |
| 588 | &dev_attr_wakeup_expire_count.attr, | ||
| 589 | &dev_attr_wakeup_active.attr, | 463 | &dev_attr_wakeup_active.attr, |
| 590 | &dev_attr_wakeup_total_time_ms.attr, | 464 | &dev_attr_wakeup_total_time_ms.attr, |
| 591 | &dev_attr_wakeup_max_time_ms.attr, | 465 | &dev_attr_wakeup_max_time_ms.attr, |
| 592 | &dev_attr_wakeup_last_time_ms.attr, | 466 | &dev_attr_wakeup_last_time_ms.attr, |
| 593 | #ifdef CONFIG_PM_AUTOSLEEP | ||
| 594 | &dev_attr_wakeup_prevent_sleep_time_ms.attr, | ||
| 595 | #endif | ||
| 596 | #endif | 467 | #endif |
| 597 | NULL, | 468 | NULL, |
| 598 | }; | 469 | }; |
| @@ -618,29 +489,6 @@ static struct attribute_group pm_runtime_attr_group = { | |||
| 618 | .attrs = runtime_attrs, | 489 | .attrs = runtime_attrs, |
| 619 | }; | 490 | }; |
| 620 | 491 | ||
| 621 | static struct attribute *pm_qos_latency_attrs[] = { | ||
| 622 | #ifdef CONFIG_PM_RUNTIME | ||
| 623 | &dev_attr_pm_qos_resume_latency_us.attr, | ||
| 624 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 625 | NULL, | ||
| 626 | }; | ||
| 627 | static struct attribute_group pm_qos_latency_attr_group = { | ||
| 628 | .name = power_group_name, | ||
| 629 | .attrs = pm_qos_latency_attrs, | ||
| 630 | }; | ||
| 631 | |||
| 632 | static struct attribute *pm_qos_flags_attrs[] = { | ||
| 633 | #ifdef CONFIG_PM_RUNTIME | ||
| 634 | &dev_attr_pm_qos_no_power_off.attr, | ||
| 635 | &dev_attr_pm_qos_remote_wakeup.attr, | ||
| 636 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 637 | NULL, | ||
| 638 | }; | ||
| 639 | static struct attribute_group pm_qos_flags_attr_group = { | ||
| 640 | .name = power_group_name, | ||
| 641 | .attrs = pm_qos_flags_attrs, | ||
| 642 | }; | ||
| 643 | |||
| 644 | int dpm_sysfs_add(struct device *dev) | 492 | int dpm_sysfs_add(struct device *dev) |
| 645 | { | 493 | { |
| 646 | int rc; | 494 | int rc; |
| @@ -681,26 +529,6 @@ void wakeup_sysfs_remove(struct device *dev) | |||
| 681 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | 529 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |
| 682 | } | 530 | } |
| 683 | 531 | ||
| 684 | int pm_qos_sysfs_add_latency(struct device *dev) | ||
| 685 | { | ||
| 686 | return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
| 687 | } | ||
| 688 | |||
| 689 | void pm_qos_sysfs_remove_latency(struct device *dev) | ||
| 690 | { | ||
| 691 | sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); | ||
| 692 | } | ||
| 693 | |||
| 694 | int pm_qos_sysfs_add_flags(struct device *dev) | ||
| 695 | { | ||
| 696 | return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); | ||
| 697 | } | ||
| 698 | |||
| 699 | void pm_qos_sysfs_remove_flags(struct device *dev) | ||
| 700 | { | ||
| 701 | sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); | ||
| 702 | } | ||
| 703 | |||
| 704 | void rpm_sysfs_remove(struct device *dev) | 532 | void rpm_sysfs_remove(struct device *dev) |
| 705 | { | 533 | { |
| 706 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); | 534 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index d94a1f5121c..af10abecb99 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/resume-trace.h> | 10 | #include <linux/resume-trace.h> |
| 11 | #include <linux/export.h> | ||
| 12 | #include <linux/rtc.h> | 11 | #include <linux/rtc.h> |
| 13 | 12 | ||
| 14 | #include <asm/rtc.h> | 13 | #include <asm/rtc.h> |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index e6ee5e80e54..84f7c7d5a09 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -10,19 +10,19 @@ | |||
| 10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
| 12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
| 13 | #include <linux/export.h> | ||
| 14 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
| 15 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
| 16 | #include <linux/debugfs.h> | 15 | #include <linux/debugfs.h> |
| 17 | #include <trace/events/power.h> | ||
| 18 | 16 | ||
| 19 | #include "power.h" | 17 | #include "power.h" |
| 20 | 18 | ||
| 19 | #define TIMEOUT 100 | ||
| 20 | |||
| 21 | /* | 21 | /* |
| 22 | * If set, the suspend/hibernate code will abort transitions to a sleep state | 22 | * If set, the suspend/hibernate code will abort transitions to a sleep state |
| 23 | * if wakeup events are registered during or immediately before the transition. | 23 | * if wakeup events are registered during or immediately before the transition. |
| 24 | */ | 24 | */ |
| 25 | bool events_check_enabled __read_mostly; | 25 | bool events_check_enabled; |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * Combined counters of registered wakeup events and wakeup events in progress. | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
| @@ -51,25 +51,6 @@ static void pm_wakeup_timer_fn(unsigned long data); | |||
| 51 | 51 | ||
| 52 | static LIST_HEAD(wakeup_sources); | 52 | static LIST_HEAD(wakeup_sources); |
| 53 | 53 | ||
| 54 | static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); | ||
| 55 | |||
| 56 | /** | ||
| 57 | * wakeup_source_prepare - Prepare a new wakeup source for initialization. | ||
| 58 | * @ws: Wakeup source to prepare. | ||
| 59 | * @name: Pointer to the name of the new wakeup source. | ||
| 60 | * | ||
| 61 | * Callers must ensure that the @name string won't be freed when @ws is still in | ||
| 62 | * use. | ||
| 63 | */ | ||
| 64 | void wakeup_source_prepare(struct wakeup_source *ws, const char *name) | ||
| 65 | { | ||
| 66 | if (ws) { | ||
| 67 | memset(ws, 0, sizeof(*ws)); | ||
| 68 | ws->name = name; | ||
| 69 | } | ||
| 70 | } | ||
| 71 | EXPORT_SYMBOL_GPL(wakeup_source_prepare); | ||
| 72 | |||
| 73 | /** | 54 | /** |
| 74 | * wakeup_source_create - Create a struct wakeup_source object. | 55 | * wakeup_source_create - Create a struct wakeup_source object. |
| 75 | * @name: Name of the new wakeup source. | 56 | * @name: Name of the new wakeup source. |
| @@ -78,44 +59,37 @@ struct wakeup_source *wakeup_source_create(const char *name) | |||
| 78 | { | 59 | { |
| 79 | struct wakeup_source *ws; | 60 | struct wakeup_source *ws; |
| 80 | 61 | ||
| 81 | ws = kmalloc(sizeof(*ws), GFP_KERNEL); | 62 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); |
| 82 | if (!ws) | 63 | if (!ws) |
| 83 | return NULL; | 64 | return NULL; |
| 84 | 65 | ||
| 85 | wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); | 66 | spin_lock_init(&ws->lock); |
| 67 | if (name) | ||
| 68 | ws->name = kstrdup(name, GFP_KERNEL); | ||
| 69 | |||
| 86 | return ws; | 70 | return ws; |
| 87 | } | 71 | } |
| 88 | EXPORT_SYMBOL_GPL(wakeup_source_create); | 72 | EXPORT_SYMBOL_GPL(wakeup_source_create); |
| 89 | 73 | ||
| 90 | /** | 74 | /** |
| 91 | * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. | ||
| 92 | * @ws: Wakeup source to prepare for destruction. | ||
| 93 | * | ||
| 94 | * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never | ||
| 95 | * be run in parallel with this function for the same wakeup source object. | ||
| 96 | */ | ||
| 97 | void wakeup_source_drop(struct wakeup_source *ws) | ||
| 98 | { | ||
| 99 | if (!ws) | ||
| 100 | return; | ||
| 101 | |||
| 102 | del_timer_sync(&ws->timer); | ||
| 103 | __pm_relax(ws); | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL_GPL(wakeup_source_drop); | ||
| 106 | |||
| 107 | /** | ||
| 108 | * wakeup_source_destroy - Destroy a struct wakeup_source object. | 75 | * wakeup_source_destroy - Destroy a struct wakeup_source object. |
| 109 | * @ws: Wakeup source to destroy. | 76 | * @ws: Wakeup source to destroy. |
| 110 | * | ||
| 111 | * Use only for wakeup source objects created with wakeup_source_create(). | ||
| 112 | */ | 77 | */ |
| 113 | void wakeup_source_destroy(struct wakeup_source *ws) | 78 | void wakeup_source_destroy(struct wakeup_source *ws) |
| 114 | { | 79 | { |
| 115 | if (!ws) | 80 | if (!ws) |
| 116 | return; | 81 | return; |
| 117 | 82 | ||
| 118 | wakeup_source_drop(ws); | 83 | spin_lock_irq(&ws->lock); |
| 84 | while (ws->active) { | ||
| 85 | spin_unlock_irq(&ws->lock); | ||
| 86 | |||
| 87 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
| 88 | |||
| 89 | spin_lock_irq(&ws->lock); | ||
| 90 | } | ||
| 91 | spin_unlock_irq(&ws->lock); | ||
| 92 | |||
| 119 | kfree(ws->name); | 93 | kfree(ws->name); |
| 120 | kfree(ws); | 94 | kfree(ws); |
| 121 | } | 95 | } |
| @@ -127,19 +101,15 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy); | |||
| 127 | */ | 101 | */ |
| 128 | void wakeup_source_add(struct wakeup_source *ws) | 102 | void wakeup_source_add(struct wakeup_source *ws) |
| 129 | { | 103 | { |
| 130 | unsigned long flags; | ||
| 131 | |||
| 132 | if (WARN_ON(!ws)) | 104 | if (WARN_ON(!ws)) |
| 133 | return; | 105 | return; |
| 134 | 106 | ||
| 135 | spin_lock_init(&ws->lock); | ||
| 136 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | 107 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); |
| 137 | ws->active = false; | 108 | ws->active = false; |
| 138 | ws->last_time = ktime_get(); | ||
| 139 | 109 | ||
| 140 | spin_lock_irqsave(&events_lock, flags); | 110 | spin_lock_irq(&events_lock); |
| 141 | list_add_rcu(&ws->entry, &wakeup_sources); | 111 | list_add_rcu(&ws->entry, &wakeup_sources); |
| 142 | spin_unlock_irqrestore(&events_lock, flags); | 112 | spin_unlock_irq(&events_lock); |
| 143 | } | 113 | } |
| 144 | EXPORT_SYMBOL_GPL(wakeup_source_add); | 114 | EXPORT_SYMBOL_GPL(wakeup_source_add); |
| 145 | 115 | ||
| @@ -149,14 +119,12 @@ EXPORT_SYMBOL_GPL(wakeup_source_add); | |||
| 149 | */ | 119 | */ |
| 150 | void wakeup_source_remove(struct wakeup_source *ws) | 120 | void wakeup_source_remove(struct wakeup_source *ws) |
| 151 | { | 121 | { |
| 152 | unsigned long flags; | ||
| 153 | |||
| 154 | if (WARN_ON(!ws)) | 122 | if (WARN_ON(!ws)) |
| 155 | return; | 123 | return; |
| 156 | 124 | ||
| 157 | spin_lock_irqsave(&events_lock, flags); | 125 | spin_lock_irq(&events_lock); |
| 158 | list_del_rcu(&ws->entry); | 126 | list_del_rcu(&ws->entry); |
| 159 | spin_unlock_irqrestore(&events_lock, flags); | 127 | spin_unlock_irq(&events_lock); |
| 160 | synchronize_rcu(); | 128 | synchronize_rcu(); |
| 161 | } | 129 | } |
| 162 | EXPORT_SYMBOL_GPL(wakeup_source_remove); | 130 | EXPORT_SYMBOL_GPL(wakeup_source_remove); |
| @@ -183,10 +151,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_register); | |||
| 183 | */ | 151 | */ |
| 184 | void wakeup_source_unregister(struct wakeup_source *ws) | 152 | void wakeup_source_unregister(struct wakeup_source *ws) |
| 185 | { | 153 | { |
| 186 | if (ws) { | 154 | wakeup_source_remove(ws); |
| 187 | wakeup_source_remove(ws); | 155 | wakeup_source_destroy(ws); |
| 188 | wakeup_source_destroy(ws); | ||
| 189 | } | ||
| 190 | } | 156 | } |
| 191 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); | 157 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); |
| 192 | 158 | ||
| @@ -310,9 +276,7 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | |||
| 310 | * | 276 | * |
| 311 | * By default, most devices should leave wakeup disabled. The exceptions are | 277 | * By default, most devices should leave wakeup disabled. The exceptions are |
| 312 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, | 278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, |
| 313 | * possibly network interfaces, etc. Also, devices that don't generate their | 279 | * possibly network interfaces, etc. |
| 314 | * own wakeup requests but merely forward requests from one bus to another | ||
| 315 | * (like PCI bridges) should have wakeup enabled by default. | ||
| 316 | */ | 280 | */ |
| 317 | int device_init_wakeup(struct device *dev, bool enable) | 281 | int device_init_wakeup(struct device *dev, bool enable) |
| 318 | { | 282 | { |
| @@ -380,33 +344,13 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_enable); | |||
| 380 | */ | 344 | */ |
| 381 | static void wakeup_source_activate(struct wakeup_source *ws) | 345 | static void wakeup_source_activate(struct wakeup_source *ws) |
| 382 | { | 346 | { |
| 383 | unsigned int cec; | ||
| 384 | |||
| 385 | ws->active = true; | 347 | ws->active = true; |
| 386 | ws->active_count++; | 348 | ws->active_count++; |
| 349 | ws->timer_expires = jiffies; | ||
| 387 | ws->last_time = ktime_get(); | 350 | ws->last_time = ktime_get(); |
| 388 | if (ws->autosleep_enabled) | ||
| 389 | ws->start_prevent_time = ws->last_time; | ||
| 390 | 351 | ||
| 391 | /* Increment the counter of events in progress. */ | 352 | /* Increment the counter of events in progress. */ |
| 392 | cec = atomic_inc_return(&combined_event_count); | 353 | atomic_inc(&combined_event_count); |
| 393 | |||
| 394 | trace_wakeup_source_activate(ws->name, cec); | ||
| 395 | } | ||
| 396 | |||
| 397 | /** | ||
| 398 | * wakeup_source_report_event - Report wakeup event using the given source. | ||
| 399 | * @ws: Wakeup source to report the event for. | ||
| 400 | */ | ||
| 401 | static void wakeup_source_report_event(struct wakeup_source *ws) | ||
| 402 | { | ||
| 403 | ws->event_count++; | ||
| 404 | /* This is racy, but the counter is approximate anyway. */ | ||
| 405 | if (events_check_enabled) | ||
| 406 | ws->wakeup_count++; | ||
| 407 | |||
| 408 | if (!ws->active) | ||
| 409 | wakeup_source_activate(ws); | ||
| 410 | } | 354 | } |
| 411 | 355 | ||
| 412 | /** | 356 | /** |
| @@ -423,11 +367,9 @@ void __pm_stay_awake(struct wakeup_source *ws) | |||
| 423 | return; | 367 | return; |
| 424 | 368 | ||
| 425 | spin_lock_irqsave(&ws->lock, flags); | 369 | spin_lock_irqsave(&ws->lock, flags); |
| 426 | 370 | ws->event_count++; | |
| 427 | wakeup_source_report_event(ws); | 371 | if (!ws->active) |
| 428 | del_timer(&ws->timer); | 372 | wakeup_source_activate(ws); |
| 429 | ws->timer_expires = 0; | ||
| 430 | |||
| 431 | spin_unlock_irqrestore(&ws->lock, flags); | 373 | spin_unlock_irqrestore(&ws->lock, flags); |
| 432 | } | 374 | } |
| 433 | EXPORT_SYMBOL_GPL(__pm_stay_awake); | 375 | EXPORT_SYMBOL_GPL(__pm_stay_awake); |
| @@ -456,17 +398,6 @@ void pm_stay_awake(struct device *dev) | |||
| 456 | } | 398 | } |
| 457 | EXPORT_SYMBOL_GPL(pm_stay_awake); | 399 | EXPORT_SYMBOL_GPL(pm_stay_awake); |
| 458 | 400 | ||
| 459 | #ifdef CONFIG_PM_AUTOSLEEP | ||
| 460 | static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) | ||
| 461 | { | ||
| 462 | ktime_t delta = ktime_sub(now, ws->start_prevent_time); | ||
| 463 | ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); | ||
| 464 | } | ||
| 465 | #else | ||
| 466 | static inline void update_prevent_sleep_time(struct wakeup_source *ws, | ||
| 467 | ktime_t now) {} | ||
| 468 | #endif | ||
| 469 | |||
| 470 | /** | 401 | /** |
| 471 | * wakup_source_deactivate - Mark given wakeup source as inactive. | 402 | * wakup_source_deactivate - Mark given wakeup source as inactive. |
| 472 | * @ws: Wakeup source to handle. | 403 | * @ws: Wakeup source to handle. |
| @@ -477,7 +408,6 @@ static inline void update_prevent_sleep_time(struct wakeup_source *ws, | |||
| 477 | */ | 408 | */ |
| 478 | static void wakeup_source_deactivate(struct wakeup_source *ws) | 409 | static void wakeup_source_deactivate(struct wakeup_source *ws) |
| 479 | { | 410 | { |
| 480 | unsigned int cnt, inpr, cec; | ||
| 481 | ktime_t duration; | 411 | ktime_t duration; |
| 482 | ktime_t now; | 412 | ktime_t now; |
| 483 | 413 | ||
| @@ -504,23 +434,13 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
| 504 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) | 434 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) |
| 505 | ws->max_time = duration; | 435 | ws->max_time = duration; |
| 506 | 436 | ||
| 507 | ws->last_time = now; | ||
| 508 | del_timer(&ws->timer); | 437 | del_timer(&ws->timer); |
| 509 | ws->timer_expires = 0; | ||
| 510 | |||
| 511 | if (ws->autosleep_enabled) | ||
| 512 | update_prevent_sleep_time(ws, now); | ||
| 513 | 438 | ||
| 514 | /* | 439 | /* |
| 515 | * Increment the counter of registered wakeup events and decrement the | 440 | * Increment the counter of registered wakeup events and decrement the |
| 516 | * couter of wakeup events in progress simultaneously. | 441 | * couter of wakeup events in progress simultaneously. |
| 517 | */ | 442 | */ |
| 518 | cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); | 443 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); |
| 519 | trace_wakeup_source_deactivate(ws->name, cec); | ||
| 520 | |||
| 521 | split_counters(&cnt, &inpr); | ||
| 522 | if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) | ||
| 523 | wake_up(&wakeup_count_wait_queue); | ||
| 524 | } | 444 | } |
| 525 | 445 | ||
| 526 | /** | 446 | /** |
| @@ -569,24 +489,11 @@ EXPORT_SYMBOL_GPL(pm_relax); | |||
| 569 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | 489 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. |
| 570 | * @data: Address of the wakeup source object associated with the event source. | 490 | * @data: Address of the wakeup source object associated with the event source. |
| 571 | * | 491 | * |
| 572 | * Call wakeup_source_deactivate() for the wakeup source whose address is stored | 492 | * Call __pm_relax() for the wakeup source whose address is stored in @data. |
| 573 | * in @data if it is currently active and its timer has not been canceled and | ||
| 574 | * the expiration time of the timer is not in future. | ||
| 575 | */ | 493 | */ |
| 576 | static void pm_wakeup_timer_fn(unsigned long data) | 494 | static void pm_wakeup_timer_fn(unsigned long data) |
| 577 | { | 495 | { |
| 578 | struct wakeup_source *ws = (struct wakeup_source *)data; | 496 | __pm_relax((struct wakeup_source *)data); |
| 579 | unsigned long flags; | ||
| 580 | |||
| 581 | spin_lock_irqsave(&ws->lock, flags); | ||
| 582 | |||
| 583 | if (ws->active && ws->timer_expires | ||
| 584 | && time_after_eq(jiffies, ws->timer_expires)) { | ||
| 585 | wakeup_source_deactivate(ws); | ||
| 586 | ws->expire_count++; | ||
| 587 | } | ||
| 588 | |||
| 589 | spin_unlock_irqrestore(&ws->lock, flags); | ||
| 590 | } | 497 | } |
| 591 | 498 | ||
| 592 | /** | 499 | /** |
| @@ -611,7 +518,9 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | |||
| 611 | 518 | ||
| 612 | spin_lock_irqsave(&ws->lock, flags); | 519 | spin_lock_irqsave(&ws->lock, flags); |
| 613 | 520 | ||
| 614 | wakeup_source_report_event(ws); | 521 | ws->event_count++; |
| 522 | if (!ws->active) | ||
| 523 | wakeup_source_activate(ws); | ||
| 615 | 524 | ||
| 616 | if (!msec) { | 525 | if (!msec) { |
| 617 | wakeup_source_deactivate(ws); | 526 | wakeup_source_deactivate(ws); |
| @@ -622,7 +531,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | |||
| 622 | if (!expires) | 531 | if (!expires) |
| 623 | expires = 1; | 532 | expires = 1; |
| 624 | 533 | ||
| 625 | if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { | 534 | if (time_after(expires, ws->timer_expires)) { |
| 626 | mod_timer(&ws->timer, expires); | 535 | mod_timer(&ws->timer, expires); |
| 627 | ws->timer_expires = expires; | 536 | ws->timer_expires = expires; |
| 628 | } | 537 | } |
| @@ -653,28 +562,21 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) | |||
| 653 | } | 562 | } |
| 654 | EXPORT_SYMBOL_GPL(pm_wakeup_event); | 563 | EXPORT_SYMBOL_GPL(pm_wakeup_event); |
| 655 | 564 | ||
| 656 | static void print_active_wakeup_sources(void) | 565 | /** |
| 566 | * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. | ||
| 567 | */ | ||
| 568 | static void pm_wakeup_update_hit_counts(void) | ||
| 657 | { | 569 | { |
| 570 | unsigned long flags; | ||
| 658 | struct wakeup_source *ws; | 571 | struct wakeup_source *ws; |
| 659 | int active = 0; | ||
| 660 | struct wakeup_source *last_activity_ws = NULL; | ||
| 661 | 572 | ||
| 662 | rcu_read_lock(); | 573 | rcu_read_lock(); |
| 663 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { | 574 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { |
| 664 | if (ws->active) { | 575 | spin_lock_irqsave(&ws->lock, flags); |
| 665 | pr_info("active wakeup source: %s\n", ws->name); | 576 | if (ws->active) |
| 666 | active = 1; | 577 | ws->hit_count++; |
| 667 | } else if (!active && | 578 | spin_unlock_irqrestore(&ws->lock, flags); |
| 668 | (!last_activity_ws || | ||
| 669 | ktime_to_ns(ws->last_time) > | ||
| 670 | ktime_to_ns(last_activity_ws->last_time))) { | ||
| 671 | last_activity_ws = ws; | ||
| 672 | } | ||
| 673 | } | 579 | } |
| 674 | |||
| 675 | if (!active && last_activity_ws) | ||
| 676 | pr_info("last active wakeup source: %s\n", | ||
| 677 | last_activity_ws->name); | ||
| 678 | rcu_read_unlock(); | 580 | rcu_read_unlock(); |
| 679 | } | 581 | } |
| 680 | 582 | ||
| @@ -700,42 +602,32 @@ bool pm_wakeup_pending(void) | |||
| 700 | events_check_enabled = !ret; | 602 | events_check_enabled = !ret; |
| 701 | } | 603 | } |
| 702 | spin_unlock_irqrestore(&events_lock, flags); | 604 | spin_unlock_irqrestore(&events_lock, flags); |
| 703 | |||
| 704 | if (ret) | 605 | if (ret) |
| 705 | print_active_wakeup_sources(); | 606 | pm_wakeup_update_hit_counts(); |
| 706 | |||
| 707 | return ret; | 607 | return ret; |
| 708 | } | 608 | } |
| 709 | 609 | ||
| 710 | /** | 610 | /** |
| 711 | * pm_get_wakeup_count - Read the number of registered wakeup events. | 611 | * pm_get_wakeup_count - Read the number of registered wakeup events. |
| 712 | * @count: Address to store the value at. | 612 | * @count: Address to store the value at. |
| 713 | * @block: Whether or not to block. | ||
| 714 | * | 613 | * |
| 715 | * Store the number of registered wakeup events at the address in @count. If | 614 | * Store the number of registered wakeup events at the address in @count. Block |
| 716 | * @block is set, block until the current number of wakeup events being | 615 | * if the current number of wakeup events being processed is nonzero. |
| 717 | * processed is zero. | ||
| 718 | * | 616 | * |
| 719 | * Return 'false' if the current number of wakeup events being processed is | 617 | * Return 'false' if the wait for the number of wakeup events being processed to |
| 720 | * nonzero. Otherwise return 'true'. | 618 | * drop down to zero has been interrupted by a signal (and the current number |
| 619 | * of wakeup events being processed is still nonzero). Otherwise return 'true'. | ||
| 721 | */ | 620 | */ |
| 722 | bool pm_get_wakeup_count(unsigned int *count, bool block) | 621 | bool pm_get_wakeup_count(unsigned int *count) |
| 723 | { | 622 | { |
| 724 | unsigned int cnt, inpr; | 623 | unsigned int cnt, inpr; |
| 725 | 624 | ||
| 726 | if (block) { | 625 | for (;;) { |
| 727 | DEFINE_WAIT(wait); | 626 | split_counters(&cnt, &inpr); |
| 728 | 627 | if (inpr == 0 || signal_pending(current)) | |
| 729 | for (;;) { | 628 | break; |
| 730 | prepare_to_wait(&wakeup_count_wait_queue, &wait, | 629 | pm_wakeup_update_hit_counts(); |
| 731 | TASK_INTERRUPTIBLE); | 630 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); |
| 732 | split_counters(&cnt, &inpr); | ||
| 733 | if (inpr == 0 || signal_pending(current)) | ||
| 734 | break; | ||
| 735 | |||
| 736 | schedule(); | ||
| 737 | } | ||
| 738 | finish_wait(&wakeup_count_wait_queue, &wait); | ||
| 739 | } | 631 | } |
| 740 | 632 | ||
| 741 | split_counters(&cnt, &inpr); | 633 | split_counters(&cnt, &inpr); |
| @@ -756,47 +648,20 @@ bool pm_get_wakeup_count(unsigned int *count, bool block) | |||
| 756 | bool pm_save_wakeup_count(unsigned int count) | 648 | bool pm_save_wakeup_count(unsigned int count) |
| 757 | { | 649 | { |
| 758 | unsigned int cnt, inpr; | 650 | unsigned int cnt, inpr; |
| 759 | unsigned long flags; | ||
| 760 | 651 | ||
| 761 | events_check_enabled = false; | 652 | events_check_enabled = false; |
| 762 | spin_lock_irqsave(&events_lock, flags); | 653 | spin_lock_irq(&events_lock); |
| 763 | split_counters(&cnt, &inpr); | 654 | split_counters(&cnt, &inpr); |
| 764 | if (cnt == count && inpr == 0) { | 655 | if (cnt == count && inpr == 0) { |
| 765 | saved_count = count; | 656 | saved_count = count; |
| 766 | events_check_enabled = true; | 657 | events_check_enabled = true; |
| 767 | } | 658 | } |
| 768 | spin_unlock_irqrestore(&events_lock, flags); | 659 | spin_unlock_irq(&events_lock); |
| 660 | if (!events_check_enabled) | ||
| 661 | pm_wakeup_update_hit_counts(); | ||
| 769 | return events_check_enabled; | 662 | return events_check_enabled; |
| 770 | } | 663 | } |
| 771 | 664 | ||
| 772 | #ifdef CONFIG_PM_AUTOSLEEP | ||
| 773 | /** | ||
| 774 | * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. | ||
| 775 | * @enabled: Whether to set or to clear the autosleep_enabled flags. | ||
| 776 | */ | ||
| 777 | void pm_wakep_autosleep_enabled(bool set) | ||
| 778 | { | ||
| 779 | struct wakeup_source *ws; | ||
| 780 | ktime_t now = ktime_get(); | ||
| 781 | |||
| 782 | rcu_read_lock(); | ||
| 783 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { | ||
| 784 | spin_lock_irq(&ws->lock); | ||
| 785 | if (ws->autosleep_enabled != set) { | ||
| 786 | ws->autosleep_enabled = set; | ||
| 787 | if (ws->active) { | ||
| 788 | if (set) | ||
| 789 | ws->start_prevent_time = now; | ||
| 790 | else | ||
| 791 | update_prevent_sleep_time(ws, now); | ||
| 792 | } | ||
| 793 | } | ||
| 794 | spin_unlock_irq(&ws->lock); | ||
| 795 | } | ||
| 796 | rcu_read_unlock(); | ||
| 797 | } | ||
| 798 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
| 799 | |||
| 800 | static struct dentry *wakeup_sources_stats_dentry; | 665 | static struct dentry *wakeup_sources_stats_dentry; |
| 801 | 666 | ||
| 802 | /** | 667 | /** |
| @@ -812,37 +677,27 @@ static int print_wakeup_source_stats(struct seq_file *m, | |||
| 812 | ktime_t max_time; | 677 | ktime_t max_time; |
| 813 | unsigned long active_count; | 678 | unsigned long active_count; |
| 814 | ktime_t active_time; | 679 | ktime_t active_time; |
| 815 | ktime_t prevent_sleep_time; | ||
| 816 | int ret; | 680 | int ret; |
| 817 | 681 | ||
| 818 | spin_lock_irqsave(&ws->lock, flags); | 682 | spin_lock_irqsave(&ws->lock, flags); |
| 819 | 683 | ||
| 820 | total_time = ws->total_time; | 684 | total_time = ws->total_time; |
| 821 | max_time = ws->max_time; | 685 | max_time = ws->max_time; |
| 822 | prevent_sleep_time = ws->prevent_sleep_time; | ||
| 823 | active_count = ws->active_count; | 686 | active_count = ws->active_count; |
| 824 | if (ws->active) { | 687 | if (ws->active) { |
| 825 | ktime_t now = ktime_get(); | 688 | active_time = ktime_sub(ktime_get(), ws->last_time); |
| 826 | |||
| 827 | active_time = ktime_sub(now, ws->last_time); | ||
| 828 | total_time = ktime_add(total_time, active_time); | 689 | total_time = ktime_add(total_time, active_time); |
| 829 | if (active_time.tv64 > max_time.tv64) | 690 | if (active_time.tv64 > max_time.tv64) |
| 830 | max_time = active_time; | 691 | max_time = active_time; |
| 831 | |||
| 832 | if (ws->autosleep_enabled) | ||
| 833 | prevent_sleep_time = ktime_add(prevent_sleep_time, | ||
| 834 | ktime_sub(now, ws->start_prevent_time)); | ||
| 835 | } else { | 692 | } else { |
| 836 | active_time = ktime_set(0, 0); | 693 | active_time = ktime_set(0, 0); |
| 837 | } | 694 | } |
| 838 | 695 | ||
| 839 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t" | 696 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" |
| 840 | "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", | 697 | "%lld\t\t%lld\t\t%lld\t\t%lld\n", |
| 841 | ws->name, active_count, ws->event_count, | 698 | ws->name, active_count, ws->event_count, ws->hit_count, |
| 842 | ws->wakeup_count, ws->expire_count, | ||
| 843 | ktime_to_ms(active_time), ktime_to_ms(total_time), | 699 | ktime_to_ms(active_time), ktime_to_ms(total_time), |
| 844 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time), | 700 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); |
| 845 | ktime_to_ms(prevent_sleep_time)); | ||
| 846 | 701 | ||
| 847 | spin_unlock_irqrestore(&ws->lock, flags); | 702 | spin_unlock_irqrestore(&ws->lock, flags); |
| 848 | 703 | ||
| @@ -857,9 +712,8 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) | |||
| 857 | { | 712 | { |
| 858 | struct wakeup_source *ws; | 713 | struct wakeup_source *ws; |
| 859 | 714 | ||
| 860 | seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" | 715 | seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" |
| 861 | "expire_count\tactive_since\ttotal_time\tmax_time\t" | 716 | "active_since\ttotal_time\tmax_time\tlast_change\n"); |
| 862 | "last_change\tprevent_suspend_time\n"); | ||
| 863 | 717 | ||
| 864 | rcu_read_lock(); | 718 | rcu_read_lock(); |
| 865 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) | 719 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) |
