diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-11-01 18:54:30 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-11-01 18:54:30 -0500 |
commit | dcf3d0183f100a14f60eb5993e124a4a1322ee9a (patch) | |
tree | f2263904aec923f249fbe18aaa7d20068b6ec90b /drivers/base | |
parent | 69f8947b8c4777f58d90cc79da2e124570d9e6f0 (diff) | |
parent | 2b1d88cda32f81685bae45c00bf517f77bcda3cd (diff) |
Merge branch 'pm-domains'
* pm-domains:
PM / Domains: Merge measurements for PM QoS device latencies
PM / Domains: Don't measure ->start|stop() latency in system PM callbacks
PM / Domains: Rename *pm_genpd_poweron|poweroff()
PM / Domains: Remove pm_genpd_poweron() API
PM / Domains: Remove pm_genpd_poweroff_unused() API
soc: dove: Let genpd deal with disabling of unused PM domains
PM / Domains: Remove in_progress counter from struct generic_pm_domain
PM / domains: Drop unused label
PM / Domains: Remove cpuidle attach
PM / Domains: Remove name based API for genpd
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/domain.c | 368 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 6 |
2 files changed, 75 insertions, 299 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 16550c63d611..a7dfdf9f15ba 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -34,43 +34,9 @@ | |||
34 | __ret; \ | 34 | __ret; \ |
35 | }) | 35 | }) |
36 | 36 | ||
37 | #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ | ||
38 | ({ \ | ||
39 | ktime_t __start = ktime_get(); \ | ||
40 | type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ | ||
41 | s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ | ||
42 | struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ | ||
43 | if (!__retval && __elapsed > __td->field) { \ | ||
44 | __td->field = __elapsed; \ | ||
45 | dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ | ||
46 | __elapsed); \ | ||
47 | genpd->max_off_time_changed = true; \ | ||
48 | __td->constraint_changed = true; \ | ||
49 | } \ | ||
50 | __retval; \ | ||
51 | }) | ||
52 | |||
53 | static LIST_HEAD(gpd_list); | 37 | static LIST_HEAD(gpd_list); |
54 | static DEFINE_MUTEX(gpd_list_lock); | 38 | static DEFINE_MUTEX(gpd_list_lock); |
55 | 39 | ||
56 | static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) | ||
57 | { | ||
58 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
59 | |||
60 | if (IS_ERR_OR_NULL(domain_name)) | ||
61 | return NULL; | ||
62 | |||
63 | mutex_lock(&gpd_list_lock); | ||
64 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
65 | if (!strcmp(gpd->name, domain_name)) { | ||
66 | genpd = gpd; | ||
67 | break; | ||
68 | } | ||
69 | } | ||
70 | mutex_unlock(&gpd_list_lock); | ||
71 | return genpd; | ||
72 | } | ||
73 | |||
74 | /* | 40 | /* |
75 | * Get the generic PM domain for a particular struct device. | 41 | * Get the generic PM domain for a particular struct device. |
76 | * This validates the struct device pointer, the PM domain pointer, | 42 | * This validates the struct device pointer, the PM domain pointer, |
@@ -110,18 +76,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
110 | 76 | ||
111 | static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) | 77 | static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) |
112 | { | 78 | { |
113 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, | 79 | return GENPD_DEV_CALLBACK(genpd, int, stop, dev); |
114 | stop_latency_ns, "stop"); | ||
115 | } | 80 | } |
116 | 81 | ||
117 | static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, | 82 | static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) |
118 | bool timed) | ||
119 | { | 83 | { |
120 | if (!timed) | 84 | return GENPD_DEV_CALLBACK(genpd, int, start, dev); |
121 | return GENPD_DEV_CALLBACK(genpd, int, start, dev); | ||
122 | |||
123 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, | ||
124 | start_latency_ns, "start"); | ||
125 | } | 85 | } |
126 | 86 | ||
127 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 87 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
@@ -140,19 +100,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | |||
140 | smp_mb__after_atomic(); | 100 | smp_mb__after_atomic(); |
141 | } | 101 | } |
142 | 102 | ||
143 | static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) | ||
144 | { | ||
145 | s64 usecs64; | ||
146 | |||
147 | if (!genpd->cpuidle_data) | ||
148 | return; | ||
149 | |||
150 | usecs64 = genpd->power_on_latency_ns; | ||
151 | do_div(usecs64, NSEC_PER_USEC); | ||
152 | usecs64 += genpd->cpuidle_data->saved_exit_latency; | ||
153 | genpd->cpuidle_data->idle_state->exit_latency = usecs64; | ||
154 | } | ||
155 | |||
156 | static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) | 103 | static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) |
157 | { | 104 | { |
158 | ktime_t time_start; | 105 | ktime_t time_start; |
@@ -176,7 +123,6 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) | |||
176 | 123 | ||
177 | genpd->power_on_latency_ns = elapsed_ns; | 124 | genpd->power_on_latency_ns = elapsed_ns; |
178 | genpd->max_off_time_changed = true; | 125 | genpd->max_off_time_changed = true; |
179 | genpd_recalc_cpu_exit_latency(genpd); | ||
180 | pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", | 126 | pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", |
181 | genpd->name, "on", elapsed_ns); | 127 | genpd->name, "on", elapsed_ns); |
182 | 128 | ||
@@ -213,10 +159,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) | |||
213 | } | 159 | } |
214 | 160 | ||
215 | /** | 161 | /** |
216 | * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). | 162 | * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). |
217 | * @genpd: PM domait to power off. | 163 | * @genpd: PM domait to power off. |
218 | * | 164 | * |
219 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done | 165 | * Queue up the execution of genpd_poweroff() unless it's already been done |
220 | * before. | 166 | * before. |
221 | */ | 167 | */ |
222 | static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | 168 | static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) |
@@ -224,14 +170,16 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
224 | queue_work(pm_wq, &genpd->power_off_work); | 170 | queue_work(pm_wq, &genpd->power_off_work); |
225 | } | 171 | } |
226 | 172 | ||
173 | static int genpd_poweron(struct generic_pm_domain *genpd); | ||
174 | |||
227 | /** | 175 | /** |
228 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. | 176 | * __genpd_poweron - Restore power to a given PM domain and its masters. |
229 | * @genpd: PM domain to power up. | 177 | * @genpd: PM domain to power up. |
230 | * | 178 | * |
231 | * Restore power to @genpd and all of its masters so that it is possible to | 179 | * Restore power to @genpd and all of its masters so that it is possible to |
232 | * resume a device belonging to it. | 180 | * resume a device belonging to it. |
233 | */ | 181 | */ |
234 | static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | 182 | static int __genpd_poweron(struct generic_pm_domain *genpd) |
235 | { | 183 | { |
236 | struct gpd_link *link; | 184 | struct gpd_link *link; |
237 | int ret = 0; | 185 | int ret = 0; |
@@ -240,13 +188,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
240 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 188 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
241 | return 0; | 189 | return 0; |
242 | 190 | ||
243 | if (genpd->cpuidle_data) { | ||
244 | cpuidle_pause_and_lock(); | ||
245 | genpd->cpuidle_data->idle_state->disabled = true; | ||
246 | cpuidle_resume_and_unlock(); | ||
247 | goto out; | ||
248 | } | ||
249 | |||
250 | /* | 191 | /* |
251 | * The list is guaranteed not to change while the loop below is being | 192 | * The list is guaranteed not to change while the loop below is being |
252 | * executed, unless one of the masters' .power_on() callbacks fiddles | 193 | * executed, unless one of the masters' .power_on() callbacks fiddles |
@@ -255,7 +196,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
255 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 196 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
256 | genpd_sd_counter_inc(link->master); | 197 | genpd_sd_counter_inc(link->master); |
257 | 198 | ||
258 | ret = pm_genpd_poweron(link->master); | 199 | ret = genpd_poweron(link->master); |
259 | if (ret) { | 200 | if (ret) { |
260 | genpd_sd_counter_dec(link->master); | 201 | genpd_sd_counter_dec(link->master); |
261 | goto err; | 202 | goto err; |
@@ -266,7 +207,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
266 | if (ret) | 207 | if (ret) |
267 | goto err; | 208 | goto err; |
268 | 209 | ||
269 | out: | ||
270 | genpd->status = GPD_STATE_ACTIVE; | 210 | genpd->status = GPD_STATE_ACTIVE; |
271 | return 0; | 211 | return 0; |
272 | 212 | ||
@@ -282,46 +222,28 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
282 | } | 222 | } |
283 | 223 | ||
284 | /** | 224 | /** |
285 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | 225 | * genpd_poweron - Restore power to a given PM domain and its masters. |
286 | * @genpd: PM domain to power up. | 226 | * @genpd: PM domain to power up. |
287 | */ | 227 | */ |
288 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 228 | static int genpd_poweron(struct generic_pm_domain *genpd) |
289 | { | 229 | { |
290 | int ret; | 230 | int ret; |
291 | 231 | ||
292 | mutex_lock(&genpd->lock); | 232 | mutex_lock(&genpd->lock); |
293 | ret = __pm_genpd_poweron(genpd); | 233 | ret = __genpd_poweron(genpd); |
294 | mutex_unlock(&genpd->lock); | 234 | mutex_unlock(&genpd->lock); |
295 | return ret; | 235 | return ret; |
296 | } | 236 | } |
297 | 237 | ||
298 | /** | ||
299 | * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. | ||
300 | * @domain_name: Name of the PM domain to power up. | ||
301 | */ | ||
302 | int pm_genpd_name_poweron(const char *domain_name) | ||
303 | { | ||
304 | struct generic_pm_domain *genpd; | ||
305 | |||
306 | genpd = pm_genpd_lookup_name(domain_name); | ||
307 | return genpd ? pm_genpd_poweron(genpd) : -EINVAL; | ||
308 | } | ||
309 | |||
310 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) | 238 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) |
311 | { | 239 | { |
312 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, | 240 | return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); |
313 | save_state_latency_ns, "state save"); | ||
314 | } | 241 | } |
315 | 242 | ||
316 | static int genpd_restore_dev(struct generic_pm_domain *genpd, | 243 | static int genpd_restore_dev(struct generic_pm_domain *genpd, |
317 | struct device *dev, bool timed) | 244 | struct device *dev) |
318 | { | 245 | { |
319 | if (!timed) | 246 | return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); |
320 | return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); | ||
321 | |||
322 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, | ||
323 | restore_state_latency_ns, | ||
324 | "state restore"); | ||
325 | } | 247 | } |
326 | 248 | ||
327 | static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | 249 | static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, |
@@ -365,13 +287,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | |||
365 | } | 287 | } |
366 | 288 | ||
367 | /** | 289 | /** |
368 | * pm_genpd_poweroff - Remove power from a given PM domain. | 290 | * genpd_poweroff - Remove power from a given PM domain. |
369 | * @genpd: PM domain to power down. | 291 | * @genpd: PM domain to power down. |
292 | * @is_async: PM domain is powered down from a scheduled work | ||
370 | * | 293 | * |
371 | * If all of the @genpd's devices have been suspended and all of its subdomains | 294 | * If all of the @genpd's devices have been suspended and all of its subdomains |
372 | * have been powered down, remove power from @genpd. | 295 | * have been powered down, remove power from @genpd. |
373 | */ | 296 | */ |
374 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 297 | static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) |
375 | { | 298 | { |
376 | struct pm_domain_data *pdd; | 299 | struct pm_domain_data *pdd; |
377 | struct gpd_link *link; | 300 | struct gpd_link *link; |
@@ -403,7 +326,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
403 | not_suspended++; | 326 | not_suspended++; |
404 | } | 327 | } |
405 | 328 | ||
406 | if (not_suspended > genpd->in_progress) | 329 | if (not_suspended > 1 || (not_suspended == 1 && is_async)) |
407 | return -EBUSY; | 330 | return -EBUSY; |
408 | 331 | ||
409 | if (genpd->gov && genpd->gov->power_down_ok) { | 332 | if (genpd->gov && genpd->gov->power_down_ok) { |
@@ -411,21 +334,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
411 | return -EAGAIN; | 334 | return -EAGAIN; |
412 | } | 335 | } |
413 | 336 | ||
414 | if (genpd->cpuidle_data) { | ||
415 | /* | ||
416 | * If cpuidle_data is set, cpuidle should turn the domain off | ||
417 | * when the CPU in it is idle. In that case we don't decrement | ||
418 | * the subdomain counts of the master domains, so that power is | ||
419 | * not removed from the current domain prematurely as a result | ||
420 | * of cutting off the masters' power. | ||
421 | */ | ||
422 | genpd->status = GPD_STATE_POWER_OFF; | ||
423 | cpuidle_pause_and_lock(); | ||
424 | genpd->cpuidle_data->idle_state->disabled = false; | ||
425 | cpuidle_resume_and_unlock(); | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | if (genpd->power_off) { | 337 | if (genpd->power_off) { |
430 | int ret; | 338 | int ret; |
431 | 339 | ||
@@ -434,10 +342,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
434 | 342 | ||
435 | /* | 343 | /* |
436 | * If sd_count > 0 at this point, one of the subdomains hasn't | 344 | * If sd_count > 0 at this point, one of the subdomains hasn't |
437 | * managed to call pm_genpd_poweron() for the master yet after | 345 | * managed to call genpd_poweron() for the master yet after |
438 | * incrementing it. In that case pm_genpd_poweron() will wait | 346 | * incrementing it. In that case genpd_poweron() will wait |
439 | * for us to drop the lock, so we can call .power_off() and let | 347 | * for us to drop the lock, so we can call .power_off() and let |
440 | * the pm_genpd_poweron() restore power for us (this shouldn't | 348 | * the genpd_poweron() restore power for us (this shouldn't |
441 | * happen very often). | 349 | * happen very often). |
442 | */ | 350 | */ |
443 | ret = genpd_power_off(genpd, true); | 351 | ret = genpd_power_off(genpd, true); |
@@ -466,7 +374,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) | |||
466 | genpd = container_of(work, struct generic_pm_domain, power_off_work); | 374 | genpd = container_of(work, struct generic_pm_domain, power_off_work); |
467 | 375 | ||
468 | mutex_lock(&genpd->lock); | 376 | mutex_lock(&genpd->lock); |
469 | pm_genpd_poweroff(genpd); | 377 | genpd_poweroff(genpd, true); |
470 | mutex_unlock(&genpd->lock); | 378 | mutex_unlock(&genpd->lock); |
471 | } | 379 | } |
472 | 380 | ||
@@ -482,6 +390,9 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
482 | { | 390 | { |
483 | struct generic_pm_domain *genpd; | 391 | struct generic_pm_domain *genpd; |
484 | bool (*stop_ok)(struct device *__dev); | 392 | bool (*stop_ok)(struct device *__dev); |
393 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | ||
394 | ktime_t time_start; | ||
395 | s64 elapsed_ns; | ||
485 | int ret; | 396 | int ret; |
486 | 397 | ||
487 | dev_dbg(dev, "%s()\n", __func__); | 398 | dev_dbg(dev, "%s()\n", __func__); |
@@ -494,16 +405,29 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
494 | if (stop_ok && !stop_ok(dev)) | 405 | if (stop_ok && !stop_ok(dev)) |
495 | return -EBUSY; | 406 | return -EBUSY; |
496 | 407 | ||
408 | /* Measure suspend latency. */ | ||
409 | time_start = ktime_get(); | ||
410 | |||
497 | ret = genpd_save_dev(genpd, dev); | 411 | ret = genpd_save_dev(genpd, dev); |
498 | if (ret) | 412 | if (ret) |
499 | return ret; | 413 | return ret; |
500 | 414 | ||
501 | ret = genpd_stop_dev(genpd, dev); | 415 | ret = genpd_stop_dev(genpd, dev); |
502 | if (ret) { | 416 | if (ret) { |
503 | genpd_restore_dev(genpd, dev, true); | 417 | genpd_restore_dev(genpd, dev); |
504 | return ret; | 418 | return ret; |
505 | } | 419 | } |
506 | 420 | ||
421 | /* Update suspend latency value if the measured time exceeds it. */ | ||
422 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | ||
423 | if (elapsed_ns > td->suspend_latency_ns) { | ||
424 | td->suspend_latency_ns = elapsed_ns; | ||
425 | dev_dbg(dev, "suspend latency exceeded, %lld ns\n", | ||
426 | elapsed_ns); | ||
427 | genpd->max_off_time_changed = true; | ||
428 | td->constraint_changed = true; | ||
429 | } | ||
430 | |||
507 | /* | 431 | /* |
508 | * If power.irq_safe is set, this routine will be run with interrupts | 432 | * If power.irq_safe is set, this routine will be run with interrupts |
509 | * off, so it can't use mutexes. | 433 | * off, so it can't use mutexes. |
@@ -512,9 +436,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
512 | return 0; | 436 | return 0; |
513 | 437 | ||
514 | mutex_lock(&genpd->lock); | 438 | mutex_lock(&genpd->lock); |
515 | genpd->in_progress++; | 439 | genpd_poweroff(genpd, false); |
516 | pm_genpd_poweroff(genpd); | ||
517 | genpd->in_progress--; | ||
518 | mutex_unlock(&genpd->lock); | 440 | mutex_unlock(&genpd->lock); |
519 | 441 | ||
520 | return 0; | 442 | return 0; |
@@ -531,6 +453,9 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
531 | static int pm_genpd_runtime_resume(struct device *dev) | 453 | static int pm_genpd_runtime_resume(struct device *dev) |
532 | { | 454 | { |
533 | struct generic_pm_domain *genpd; | 455 | struct generic_pm_domain *genpd; |
456 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | ||
457 | ktime_t time_start; | ||
458 | s64 elapsed_ns; | ||
534 | int ret; | 459 | int ret; |
535 | bool timed = true; | 460 | bool timed = true; |
536 | 461 | ||
@@ -547,15 +472,31 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
547 | } | 472 | } |
548 | 473 | ||
549 | mutex_lock(&genpd->lock); | 474 | mutex_lock(&genpd->lock); |
550 | ret = __pm_genpd_poweron(genpd); | 475 | ret = __genpd_poweron(genpd); |
551 | mutex_unlock(&genpd->lock); | 476 | mutex_unlock(&genpd->lock); |
552 | 477 | ||
553 | if (ret) | 478 | if (ret) |
554 | return ret; | 479 | return ret; |
555 | 480 | ||
556 | out: | 481 | out: |
557 | genpd_start_dev(genpd, dev, timed); | 482 | /* Measure resume latency. */ |
558 | genpd_restore_dev(genpd, dev, timed); | 483 | if (timed) |
484 | time_start = ktime_get(); | ||
485 | |||
486 | genpd_start_dev(genpd, dev); | ||
487 | genpd_restore_dev(genpd, dev); | ||
488 | |||
489 | /* Update resume latency value if the measured time exceeds it. */ | ||
490 | if (timed) { | ||
491 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | ||
492 | if (elapsed_ns > td->resume_latency_ns) { | ||
493 | td->resume_latency_ns = elapsed_ns; | ||
494 | dev_dbg(dev, "resume latency exceeded, %lld ns\n", | ||
495 | elapsed_ns); | ||
496 | genpd->max_off_time_changed = true; | ||
497 | td->constraint_changed = true; | ||
498 | } | ||
499 | } | ||
559 | 500 | ||
560 | return 0; | 501 | return 0; |
561 | } | 502 | } |
@@ -569,15 +510,15 @@ static int __init pd_ignore_unused_setup(char *__unused) | |||
569 | __setup("pd_ignore_unused", pd_ignore_unused_setup); | 510 | __setup("pd_ignore_unused", pd_ignore_unused_setup); |
570 | 511 | ||
571 | /** | 512 | /** |
572 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | 513 | * genpd_poweroff_unused - Power off all PM domains with no devices in use. |
573 | */ | 514 | */ |
574 | void pm_genpd_poweroff_unused(void) | 515 | static int __init genpd_poweroff_unused(void) |
575 | { | 516 | { |
576 | struct generic_pm_domain *genpd; | 517 | struct generic_pm_domain *genpd; |
577 | 518 | ||
578 | if (pd_ignore_unused) { | 519 | if (pd_ignore_unused) { |
579 | pr_warn("genpd: Not disabling unused power domains\n"); | 520 | pr_warn("genpd: Not disabling unused power domains\n"); |
580 | return; | 521 | return 0; |
581 | } | 522 | } |
582 | 523 | ||
583 | mutex_lock(&gpd_list_lock); | 524 | mutex_lock(&gpd_list_lock); |
@@ -586,11 +527,7 @@ void pm_genpd_poweroff_unused(void) | |||
586 | genpd_queue_power_off_work(genpd); | 527 | genpd_queue_power_off_work(genpd); |
587 | 528 | ||
588 | mutex_unlock(&gpd_list_lock); | 529 | mutex_unlock(&gpd_list_lock); |
589 | } | ||
590 | 530 | ||
591 | static int __init genpd_poweroff_unused(void) | ||
592 | { | ||
593 | pm_genpd_poweroff_unused(); | ||
594 | return 0; | 531 | return 0; |
595 | } | 532 | } |
596 | late_initcall(genpd_poweroff_unused); | 533 | late_initcall(genpd_poweroff_unused); |
@@ -764,7 +701,7 @@ static int pm_genpd_prepare(struct device *dev) | |||
764 | 701 | ||
765 | /* | 702 | /* |
766 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, | 703 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, |
767 | * so pm_genpd_poweron() will return immediately, but if the device | 704 | * so genpd_poweron() will return immediately, but if the device |
768 | * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need | 705 | * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need |
769 | * to make it operational. | 706 | * to make it operational. |
770 | */ | 707 | */ |
@@ -890,7 +827,7 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
890 | pm_genpd_sync_poweron(genpd, true); | 827 | pm_genpd_sync_poweron(genpd, true); |
891 | genpd->suspended_count--; | 828 | genpd->suspended_count--; |
892 | 829 | ||
893 | return genpd_start_dev(genpd, dev, true); | 830 | return genpd_start_dev(genpd, dev); |
894 | } | 831 | } |
895 | 832 | ||
896 | /** | 833 | /** |
@@ -1018,7 +955,8 @@ static int pm_genpd_thaw_noirq(struct device *dev) | |||
1018 | if (IS_ERR(genpd)) | 955 | if (IS_ERR(genpd)) |
1019 | return -EINVAL; | 956 | return -EINVAL; |
1020 | 957 | ||
1021 | return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); | 958 | return genpd->suspend_power_off ? |
959 | 0 : genpd_start_dev(genpd, dev); | ||
1022 | } | 960 | } |
1023 | 961 | ||
1024 | /** | 962 | /** |
@@ -1112,7 +1050,7 @@ static int pm_genpd_restore_noirq(struct device *dev) | |||
1112 | 1050 | ||
1113 | pm_genpd_sync_poweron(genpd, true); | 1051 | pm_genpd_sync_poweron(genpd, true); |
1114 | 1052 | ||
1115 | return genpd_start_dev(genpd, dev, true); | 1053 | return genpd_start_dev(genpd, dev); |
1116 | } | 1054 | } |
1117 | 1055 | ||
1118 | /** | 1056 | /** |
@@ -1317,18 +1255,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1317 | } | 1255 | } |
1318 | 1256 | ||
1319 | /** | 1257 | /** |
1320 | * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. | ||
1321 | * @domain_name: Name of the PM domain to add the device to. | ||
1322 | * @dev: Device to be added. | ||
1323 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
1324 | */ | ||
1325 | int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, | ||
1326 | struct gpd_timing_data *td) | ||
1327 | { | ||
1328 | return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); | ||
1329 | } | ||
1330 | |||
1331 | /** | ||
1332 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | 1258 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. |
1333 | * @genpd: PM domain to remove the device from. | 1259 | * @genpd: PM domain to remove the device from. |
1334 | * @dev: Device to be removed. | 1260 | * @dev: Device to be removed. |
@@ -1429,35 +1355,6 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1429 | } | 1355 | } |
1430 | 1356 | ||
1431 | /** | 1357 | /** |
1432 | * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. | ||
1433 | * @master_name: Name of the master PM domain to add the subdomain to. | ||
1434 | * @subdomain_name: Name of the subdomain to be added. | ||
1435 | */ | ||
1436 | int pm_genpd_add_subdomain_names(const char *master_name, | ||
1437 | const char *subdomain_name) | ||
1438 | { | ||
1439 | struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; | ||
1440 | |||
1441 | if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) | ||
1442 | return -EINVAL; | ||
1443 | |||
1444 | mutex_lock(&gpd_list_lock); | ||
1445 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
1446 | if (!master && !strcmp(gpd->name, master_name)) | ||
1447 | master = gpd; | ||
1448 | |||
1449 | if (!subdomain && !strcmp(gpd->name, subdomain_name)) | ||
1450 | subdomain = gpd; | ||
1451 | |||
1452 | if (master && subdomain) | ||
1453 | break; | ||
1454 | } | ||
1455 | mutex_unlock(&gpd_list_lock); | ||
1456 | |||
1457 | return pm_genpd_add_subdomain(master, subdomain); | ||
1458 | } | ||
1459 | |||
1460 | /** | ||
1461 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1358 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1462 | * @genpd: Master PM domain to remove the subdomain from. | 1359 | * @genpd: Master PM domain to remove the subdomain from. |
1463 | * @subdomain: Subdomain to be removed. | 1360 | * @subdomain: Subdomain to be removed. |
@@ -1504,124 +1401,6 @@ out: | |||
1504 | return ret; | 1401 | return ret; |
1505 | } | 1402 | } |
1506 | 1403 | ||
1507 | /** | ||
1508 | * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. | ||
1509 | * @genpd: PM domain to be connected with cpuidle. | ||
1510 | * @state: cpuidle state this domain can disable/enable. | ||
1511 | * | ||
1512 | * Make a PM domain behave as though it contained a CPU core, that is, instead | ||
1513 | * of calling its power down routine it will enable the given cpuidle state so | ||
1514 | * that the cpuidle subsystem can power it down (if possible and desirable). | ||
1515 | */ | ||
1516 | int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | ||
1517 | { | ||
1518 | struct cpuidle_driver *cpuidle_drv; | ||
1519 | struct gpd_cpuidle_data *cpuidle_data; | ||
1520 | struct cpuidle_state *idle_state; | ||
1521 | int ret = 0; | ||
1522 | |||
1523 | if (IS_ERR_OR_NULL(genpd) || state < 0) | ||
1524 | return -EINVAL; | ||
1525 | |||
1526 | mutex_lock(&genpd->lock); | ||
1527 | |||
1528 | if (genpd->cpuidle_data) { | ||
1529 | ret = -EEXIST; | ||
1530 | goto out; | ||
1531 | } | ||
1532 | cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); | ||
1533 | if (!cpuidle_data) { | ||
1534 | ret = -ENOMEM; | ||
1535 | goto out; | ||
1536 | } | ||
1537 | cpuidle_drv = cpuidle_driver_ref(); | ||
1538 | if (!cpuidle_drv) { | ||
1539 | ret = -ENODEV; | ||
1540 | goto err_drv; | ||
1541 | } | ||
1542 | if (cpuidle_drv->state_count <= state) { | ||
1543 | ret = -EINVAL; | ||
1544 | goto err; | ||
1545 | } | ||
1546 | idle_state = &cpuidle_drv->states[state]; | ||
1547 | if (!idle_state->disabled) { | ||
1548 | ret = -EAGAIN; | ||
1549 | goto err; | ||
1550 | } | ||
1551 | cpuidle_data->idle_state = idle_state; | ||
1552 | cpuidle_data->saved_exit_latency = idle_state->exit_latency; | ||
1553 | genpd->cpuidle_data = cpuidle_data; | ||
1554 | genpd_recalc_cpu_exit_latency(genpd); | ||
1555 | |||
1556 | out: | ||
1557 | mutex_unlock(&genpd->lock); | ||
1558 | return ret; | ||
1559 | |||
1560 | err: | ||
1561 | cpuidle_driver_unref(); | ||
1562 | |||
1563 | err_drv: | ||
1564 | kfree(cpuidle_data); | ||
1565 | goto out; | ||
1566 | } | ||
1567 | |||
1568 | /** | ||
1569 | * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. | ||
1570 | * @name: Name of the domain to connect to cpuidle. | ||
1571 | * @state: cpuidle state this domain can manipulate. | ||
1572 | */ | ||
1573 | int pm_genpd_name_attach_cpuidle(const char *name, int state) | ||
1574 | { | ||
1575 | return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); | ||
1576 | } | ||
1577 | |||
1578 | /** | ||
1579 | * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. | ||
1580 | * @genpd: PM domain to remove the cpuidle connection from. | ||
1581 | * | ||
1582 | * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the | ||
1583 | * given PM domain. | ||
1584 | */ | ||
1585 | int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | ||
1586 | { | ||
1587 | struct gpd_cpuidle_data *cpuidle_data; | ||
1588 | struct cpuidle_state *idle_state; | ||
1589 | int ret = 0; | ||
1590 | |||
1591 | if (IS_ERR_OR_NULL(genpd)) | ||
1592 | return -EINVAL; | ||
1593 | |||
1594 | mutex_lock(&genpd->lock); | ||
1595 | |||
1596 | cpuidle_data = genpd->cpuidle_data; | ||
1597 | if (!cpuidle_data) { | ||
1598 | ret = -ENODEV; | ||
1599 | goto out; | ||
1600 | } | ||
1601 | idle_state = cpuidle_data->idle_state; | ||
1602 | if (!idle_state->disabled) { | ||
1603 | ret = -EAGAIN; | ||
1604 | goto out; | ||
1605 | } | ||
1606 | idle_state->exit_latency = cpuidle_data->saved_exit_latency; | ||
1607 | cpuidle_driver_unref(); | ||
1608 | genpd->cpuidle_data = NULL; | ||
1609 | kfree(cpuidle_data); | ||
1610 | |||
1611 | out: | ||
1612 | mutex_unlock(&genpd->lock); | ||
1613 | return ret; | ||
1614 | } | ||
1615 | |||
1616 | /** | ||
1617 | * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. | ||
1618 | * @name: Name of the domain to disconnect cpuidle from. | ||
1619 | */ | ||
1620 | int pm_genpd_name_detach_cpuidle(const char *name) | ||
1621 | { | ||
1622 | return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); | ||
1623 | } | ||
1624 | |||
1625 | /* Default device callbacks for generic PM domains. */ | 1404 | /* Default device callbacks for generic PM domains. */ |
1626 | 1405 | ||
1627 | /** | 1406 | /** |
@@ -1688,7 +1467,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1688 | mutex_init(&genpd->lock); | 1467 | mutex_init(&genpd->lock); |
1689 | genpd->gov = gov; | 1468 | genpd->gov = gov; |
1690 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1469 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
1691 | genpd->in_progress = 0; | ||
1692 | atomic_set(&genpd->sd_count, 0); | 1470 | atomic_set(&genpd->sd_count, 0); |
1693 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1471 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
1694 | genpd->device_count = 0; | 1472 | genpd->device_count = 0; |
@@ -2023,7 +1801,7 @@ int genpd_dev_pm_attach(struct device *dev) | |||
2023 | 1801 | ||
2024 | dev->pm_domain->detach = genpd_dev_pm_detach; | 1802 | dev->pm_domain->detach = genpd_dev_pm_detach; |
2025 | dev->pm_domain->sync = genpd_dev_pm_sync; | 1803 | dev->pm_domain->sync = genpd_dev_pm_sync; |
2026 | ret = pm_genpd_poweron(pd); | 1804 | ret = genpd_poweron(pd); |
2027 | 1805 | ||
2028 | out: | 1806 | out: |
2029 | return ret ? -EPROBE_DEFER : 0; | 1807 | return ret ? -EPROBE_DEFER : 0; |
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 85e17bacc834..e60dd12e23aa 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
@@ -77,10 +77,8 @@ static bool default_stop_ok(struct device *dev) | |||
77 | dev_update_qos_constraint); | 77 | dev_update_qos_constraint); |
78 | 78 | ||
79 | if (constraint_ns > 0) { | 79 | if (constraint_ns > 0) { |
80 | constraint_ns -= td->save_state_latency_ns + | 80 | constraint_ns -= td->suspend_latency_ns + |
81 | td->stop_latency_ns + | 81 | td->resume_latency_ns; |
82 | td->start_latency_ns + | ||
83 | td->restore_state_latency_ns; | ||
84 | if (constraint_ns == 0) | 82 | if (constraint_ns == 0) |
85 | return false; | 83 | return false; |
86 | } | 84 | } |