diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:01:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:01:57 -0400 |
commit | 431bf99d26157d56689e5de65bd27ce9f077fc3f (patch) | |
tree | b15e357039956fcdd0e0e6177d2fc99bb3cfa822 /drivers/base/power/domain.c | |
parent | 72f96e0e38d7e29ba16dcfd824ecaebe38b8293e (diff) | |
parent | 7ae033cc0dfce68d8e0c83aca60837cf2bf0d2e6 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: (51 commits)
PM: Improve error code of pm_notifier_call_chain()
PM: Add "RTC" to PM trace time stamps to avoid confusion
PM / Suspend: Export suspend_set_ops, suspend_valid_only_mem
PM / Suspend: Add .suspend_again() callback to suspend_ops
PM / OPP: Introduce function to free cpufreq table
ARM / shmobile: Return -EBUSY from A4LC power off if A3RV is active
PM / Domains: Take .power_off() error code into account
ARM / shmobile: Use genpd_queue_power_off_work()
ARM / shmobile: Use pm_genpd_poweroff_unused()
PM / Domains: Introduce function to power off all unused PM domains
OMAP: PM: disable idle on suspend for GPIO and UART
OMAP: PM: omap_device: add API to disable idle on suspend
OMAP: PM: omap_device: add system PM methods for PM domain handling
OMAP: PM: omap_device: conditionally use PM domain runtime helpers
PM / Runtime: Add new helper function: pm_runtime_status_suspended()
PM / Domains: Queue up power off work only if it is not pending
PM / Domains: Improve handling of wakeup devices during system suspend
PM / Domains: Do not restore all devices on power off error
PM / Domains: Allow callbacks to execute all runtime PM helpers
PM / Domains: Do not execute device callbacks under locks
...
Diffstat (limited to 'drivers/base/power/domain.c')
-rw-r--r-- | drivers/base/power/domain.c | 1273 |
1 files changed, 1273 insertions, 0 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c new file mode 100644 index 000000000000..be8714aa9dd6 --- /dev/null +++ b/drivers/base/power/domain.c | |||
@@ -0,0 +1,1273 @@ | |||
1 | /* | ||
2 | * drivers/base/power/domain.c - Common code related to device power domains. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/pm_runtime.h> | ||
13 | #include <linux/pm_domain.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/suspend.h> | ||
18 | |||
19 | static LIST_HEAD(gpd_list); | ||
20 | static DEFINE_MUTEX(gpd_list_lock); | ||
21 | |||
22 | #ifdef CONFIG_PM | ||
23 | |||
24 | static struct generic_pm_domain *dev_to_genpd(struct device *dev) | ||
25 | { | ||
26 | if (IS_ERR_OR_NULL(dev->pm_domain)) | ||
27 | return ERR_PTR(-EINVAL); | ||
28 | |||
29 | return pd_to_genpd(dev->pm_domain); | ||
30 | } | ||
31 | |||
32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | ||
33 | { | ||
34 | if (!WARN_ON(genpd->sd_count == 0)) | ||
35 | genpd->sd_count--; | ||
36 | } | ||
37 | |||
38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | ||
39 | { | ||
40 | DEFINE_WAIT(wait); | ||
41 | |||
42 | mutex_lock(&genpd->lock); | ||
43 | /* | ||
44 | * Wait for the domain to transition into either the active, | ||
45 | * or the power off state. | ||
46 | */ | ||
47 | for (;;) { | ||
48 | prepare_to_wait(&genpd->status_wait_queue, &wait, | ||
49 | TASK_UNINTERRUPTIBLE); | ||
50 | if (genpd->status == GPD_STATE_ACTIVE | ||
51 | || genpd->status == GPD_STATE_POWER_OFF) | ||
52 | break; | ||
53 | mutex_unlock(&genpd->lock); | ||
54 | |||
55 | schedule(); | ||
56 | |||
57 | mutex_lock(&genpd->lock); | ||
58 | } | ||
59 | finish_wait(&genpd->status_wait_queue, &wait); | ||
60 | } | ||
61 | |||
62 | static void genpd_release_lock(struct generic_pm_domain *genpd) | ||
63 | { | ||
64 | mutex_unlock(&genpd->lock); | ||
65 | } | ||
66 | |||
67 | static void genpd_set_active(struct generic_pm_domain *genpd) | ||
68 | { | ||
69 | if (genpd->resume_count == 0) | ||
70 | genpd->status = GPD_STATE_ACTIVE; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. | ||
75 | * @genpd: PM domain to power up. | ||
76 | * | ||
77 | * Restore power to @genpd and all of its parents so that it is possible to | ||
78 | * resume a device belonging to it. | ||
79 | */ | ||
80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
81 | { | ||
82 | struct generic_pm_domain *parent = genpd->parent; | ||
83 | DEFINE_WAIT(wait); | ||
84 | int ret = 0; | ||
85 | |||
86 | start: | ||
87 | if (parent) { | ||
88 | genpd_acquire_lock(parent); | ||
89 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
90 | } else { | ||
91 | mutex_lock(&genpd->lock); | ||
92 | } | ||
93 | |||
94 | if (genpd->status == GPD_STATE_ACTIVE | ||
95 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | ||
96 | goto out; | ||
97 | |||
98 | if (genpd->status != GPD_STATE_POWER_OFF) { | ||
99 | genpd_set_active(genpd); | ||
100 | goto out; | ||
101 | } | ||
102 | |||
103 | if (parent && parent->status != GPD_STATE_ACTIVE) { | ||
104 | mutex_unlock(&genpd->lock); | ||
105 | genpd_release_lock(parent); | ||
106 | |||
107 | ret = pm_genpd_poweron(parent); | ||
108 | if (ret) | ||
109 | return ret; | ||
110 | |||
111 | goto start; | ||
112 | } | ||
113 | |||
114 | if (genpd->power_on) { | ||
115 | int ret = genpd->power_on(genpd); | ||
116 | if (ret) | ||
117 | goto out; | ||
118 | } | ||
119 | |||
120 | genpd_set_active(genpd); | ||
121 | if (parent) | ||
122 | parent->sd_count++; | ||
123 | |||
124 | out: | ||
125 | mutex_unlock(&genpd->lock); | ||
126 | if (parent) | ||
127 | genpd_release_lock(parent); | ||
128 | |||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | #endif /* CONFIG_PM */ | ||
133 | |||
134 | #ifdef CONFIG_PM_RUNTIME | ||
135 | |||
136 | /** | ||
137 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | ||
138 | * @dle: Device list entry of the device to save the state of. | ||
139 | * @genpd: PM domain the device belongs to. | ||
140 | */ | ||
141 | static int __pm_genpd_save_device(struct dev_list_entry *dle, | ||
142 | struct generic_pm_domain *genpd) | ||
143 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
144 | { | ||
145 | struct device *dev = dle->dev; | ||
146 | struct device_driver *drv = dev->driver; | ||
147 | int ret = 0; | ||
148 | |||
149 | if (dle->need_restore) | ||
150 | return 0; | ||
151 | |||
152 | mutex_unlock(&genpd->lock); | ||
153 | |||
154 | if (drv && drv->pm && drv->pm->runtime_suspend) { | ||
155 | if (genpd->start_device) | ||
156 | genpd->start_device(dev); | ||
157 | |||
158 | ret = drv->pm->runtime_suspend(dev); | ||
159 | |||
160 | if (genpd->stop_device) | ||
161 | genpd->stop_device(dev); | ||
162 | } | ||
163 | |||
164 | mutex_lock(&genpd->lock); | ||
165 | |||
166 | if (!ret) | ||
167 | dle->need_restore = true; | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | ||
174 | * @dle: Device list entry of the device to restore the state of. | ||
175 | * @genpd: PM domain the device belongs to. | ||
176 | */ | ||
177 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, | ||
178 | struct generic_pm_domain *genpd) | ||
179 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
180 | { | ||
181 | struct device *dev = dle->dev; | ||
182 | struct device_driver *drv = dev->driver; | ||
183 | |||
184 | if (!dle->need_restore) | ||
185 | return; | ||
186 | |||
187 | mutex_unlock(&genpd->lock); | ||
188 | |||
189 | if (drv && drv->pm && drv->pm->runtime_resume) { | ||
190 | if (genpd->start_device) | ||
191 | genpd->start_device(dev); | ||
192 | |||
193 | drv->pm->runtime_resume(dev); | ||
194 | |||
195 | if (genpd->stop_device) | ||
196 | genpd->stop_device(dev); | ||
197 | } | ||
198 | |||
199 | mutex_lock(&genpd->lock); | ||
200 | |||
201 | dle->need_restore = false; | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * genpd_abort_poweroff - Check if a PM domain power off should be aborted. | ||
206 | * @genpd: PM domain to check. | ||
207 | * | ||
208 | * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during | ||
209 | * a "power off" operation, which means that a "power on" has occured in the | ||
210 | * meantime, or if its resume_count field is different from zero, which means | ||
211 | * that one of its devices has been resumed in the meantime. | ||
212 | */ | ||
213 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | ||
214 | { | ||
215 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). | ||
220 | * @genpd: PM domait to power off. | ||
221 | * | ||
222 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done | ||
223 | * before. | ||
224 | */ | ||
225 | void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | ||
226 | { | ||
227 | if (!work_pending(&genpd->power_off_work)) | ||
228 | queue_work(pm_wq, &genpd->power_off_work); | ||
229 | } | ||
230 | |||
231 | /** | ||
232 | * pm_genpd_poweroff - Remove power from a given PM domain. | ||
233 | * @genpd: PM domain to power down. | ||
234 | * | ||
235 | * If all of the @genpd's devices have been suspended and all of its subdomains | ||
236 | * have been powered down, run the runtime suspend callbacks provided by all of | ||
237 | * the @genpd's devices' drivers and remove power from @genpd. | ||
238 | */ | ||
239 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | ||
240 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
241 | { | ||
242 | struct generic_pm_domain *parent; | ||
243 | struct dev_list_entry *dle; | ||
244 | unsigned int not_suspended; | ||
245 | int ret = 0; | ||
246 | |||
247 | start: | ||
248 | /* | ||
249 | * Do not try to power off the domain in the following situations: | ||
250 | * (1) The domain is already in the "power off" state. | ||
251 | * (2) System suspend is in progress. | ||
252 | * (3) One of the domain's devices is being resumed right now. | ||
253 | */ | ||
254 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 | ||
255 | || genpd->resume_count > 0) | ||
256 | return 0; | ||
257 | |||
258 | if (genpd->sd_count > 0) | ||
259 | return -EBUSY; | ||
260 | |||
261 | not_suspended = 0; | ||
262 | list_for_each_entry(dle, &genpd->dev_list, node) | ||
263 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) | ||
264 | not_suspended++; | ||
265 | |||
266 | if (not_suspended > genpd->in_progress) | ||
267 | return -EBUSY; | ||
268 | |||
269 | if (genpd->poweroff_task) { | ||
270 | /* | ||
271 | * Another instance of pm_genpd_poweroff() is executing | ||
272 | * callbacks, so tell it to start over and return. | ||
273 | */ | ||
274 | genpd->status = GPD_STATE_REPEAT; | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | if (genpd->gov && genpd->gov->power_down_ok) { | ||
279 | if (!genpd->gov->power_down_ok(&genpd->domain)) | ||
280 | return -EAGAIN; | ||
281 | } | ||
282 | |||
283 | genpd->status = GPD_STATE_BUSY; | ||
284 | genpd->poweroff_task = current; | ||
285 | |||
286 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { | ||
287 | ret = __pm_genpd_save_device(dle, genpd); | ||
288 | if (ret) { | ||
289 | genpd_set_active(genpd); | ||
290 | goto out; | ||
291 | } | ||
292 | |||
293 | if (genpd_abort_poweroff(genpd)) | ||
294 | goto out; | ||
295 | |||
296 | if (genpd->status == GPD_STATE_REPEAT) { | ||
297 | genpd->poweroff_task = NULL; | ||
298 | goto start; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | parent = genpd->parent; | ||
303 | if (parent) { | ||
304 | mutex_unlock(&genpd->lock); | ||
305 | |||
306 | genpd_acquire_lock(parent); | ||
307 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
308 | |||
309 | if (genpd_abort_poweroff(genpd)) { | ||
310 | genpd_release_lock(parent); | ||
311 | goto out; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | if (genpd->power_off) { | ||
316 | ret = genpd->power_off(genpd); | ||
317 | if (ret == -EBUSY) { | ||
318 | genpd_set_active(genpd); | ||
319 | if (parent) | ||
320 | genpd_release_lock(parent); | ||
321 | |||
322 | goto out; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | genpd->status = GPD_STATE_POWER_OFF; | ||
327 | |||
328 | if (parent) { | ||
329 | genpd_sd_counter_dec(parent); | ||
330 | if (parent->sd_count == 0) | ||
331 | genpd_queue_power_off_work(parent); | ||
332 | |||
333 | genpd_release_lock(parent); | ||
334 | } | ||
335 | |||
336 | out: | ||
337 | genpd->poweroff_task = NULL; | ||
338 | wake_up_all(&genpd->status_wait_queue); | ||
339 | return ret; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. | ||
344 | * @work: Work structure used for scheduling the execution of this function. | ||
345 | */ | ||
346 | static void genpd_power_off_work_fn(struct work_struct *work) | ||
347 | { | ||
348 | struct generic_pm_domain *genpd; | ||
349 | |||
350 | genpd = container_of(work, struct generic_pm_domain, power_off_work); | ||
351 | |||
352 | genpd_acquire_lock(genpd); | ||
353 | pm_genpd_poweroff(genpd); | ||
354 | genpd_release_lock(genpd); | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. | ||
359 | * @dev: Device to suspend. | ||
360 | * | ||
361 | * Carry out a runtime suspend of a device under the assumption that its | ||
362 | * pm_domain field points to the domain member of an object of type | ||
363 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
364 | */ | ||
365 | static int pm_genpd_runtime_suspend(struct device *dev) | ||
366 | { | ||
367 | struct generic_pm_domain *genpd; | ||
368 | |||
369 | dev_dbg(dev, "%s()\n", __func__); | ||
370 | |||
371 | genpd = dev_to_genpd(dev); | ||
372 | if (IS_ERR(genpd)) | ||
373 | return -EINVAL; | ||
374 | |||
375 | if (genpd->stop_device) { | ||
376 | int ret = genpd->stop_device(dev); | ||
377 | if (ret) | ||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | mutex_lock(&genpd->lock); | ||
382 | genpd->in_progress++; | ||
383 | pm_genpd_poweroff(genpd); | ||
384 | genpd->in_progress--; | ||
385 | mutex_unlock(&genpd->lock); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
392 | * @dev: Device to resume. | ||
393 | * @genpd: PM domain the device belongs to. | ||
394 | */ | ||
395 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
396 | struct generic_pm_domain *genpd) | ||
397 | { | ||
398 | struct dev_list_entry *dle; | ||
399 | |||
400 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
401 | if (dle->dev == dev) { | ||
402 | __pm_genpd_restore_device(dle, genpd); | ||
403 | break; | ||
404 | } | ||
405 | } | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
410 | * @dev: Device to resume. | ||
411 | * | ||
412 | * Carry out a runtime resume of a device under the assumption that its | ||
413 | * pm_domain field points to the domain member of an object of type | ||
414 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
415 | */ | ||
416 | static int pm_genpd_runtime_resume(struct device *dev) | ||
417 | { | ||
418 | struct generic_pm_domain *genpd; | ||
419 | DEFINE_WAIT(wait); | ||
420 | int ret; | ||
421 | |||
422 | dev_dbg(dev, "%s()\n", __func__); | ||
423 | |||
424 | genpd = dev_to_genpd(dev); | ||
425 | if (IS_ERR(genpd)) | ||
426 | return -EINVAL; | ||
427 | |||
428 | ret = pm_genpd_poweron(genpd); | ||
429 | if (ret) | ||
430 | return ret; | ||
431 | |||
432 | mutex_lock(&genpd->lock); | ||
433 | genpd->status = GPD_STATE_BUSY; | ||
434 | genpd->resume_count++; | ||
435 | for (;;) { | ||
436 | prepare_to_wait(&genpd->status_wait_queue, &wait, | ||
437 | TASK_UNINTERRUPTIBLE); | ||
438 | /* | ||
439 | * If current is the powering off task, we have been called | ||
440 | * reentrantly from one of the device callbacks, so we should | ||
441 | * not wait. | ||
442 | */ | ||
443 | if (!genpd->poweroff_task || genpd->poweroff_task == current) | ||
444 | break; | ||
445 | mutex_unlock(&genpd->lock); | ||
446 | |||
447 | schedule(); | ||
448 | |||
449 | mutex_lock(&genpd->lock); | ||
450 | } | ||
451 | finish_wait(&genpd->status_wait_queue, &wait); | ||
452 | __pm_genpd_runtime_resume(dev, genpd); | ||
453 | genpd->resume_count--; | ||
454 | genpd_set_active(genpd); | ||
455 | wake_up_all(&genpd->status_wait_queue); | ||
456 | mutex_unlock(&genpd->lock); | ||
457 | |||
458 | if (genpd->start_device) | ||
459 | genpd->start_device(dev); | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | #else | ||
465 | |||
466 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | ||
467 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
468 | struct generic_pm_domain *genpd) {} | ||
469 | |||
470 | #define pm_genpd_runtime_suspend NULL | ||
471 | #define pm_genpd_runtime_resume NULL | ||
472 | |||
473 | #endif /* CONFIG_PM_RUNTIME */ | ||
474 | |||
475 | #ifdef CONFIG_PM_SLEEP | ||
476 | |||
477 | /** | ||
478 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | ||
479 | * @genpd: PM domain to power off, if possible. | ||
480 | * | ||
481 | * Check if the given PM domain can be powered off (during system suspend or | ||
482 | * hibernation) and do that if so. Also, in that case propagate to its parent. | ||
483 | * | ||
484 | * This function is only called in "noirq" stages of system power transitions, | ||
485 | * so it need not acquire locks (all of the "noirq" callbacks are executed | ||
486 | * sequentially, so it is guaranteed that it will never run twice in parallel). | ||
487 | */ | ||
488 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | ||
489 | { | ||
490 | struct generic_pm_domain *parent = genpd->parent; | ||
491 | |||
492 | if (genpd->status == GPD_STATE_POWER_OFF) | ||
493 | return; | ||
494 | |||
495 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | ||
496 | return; | ||
497 | |||
498 | if (genpd->power_off) | ||
499 | genpd->power_off(genpd); | ||
500 | |||
501 | genpd->status = GPD_STATE_POWER_OFF; | ||
502 | if (parent) { | ||
503 | genpd_sd_counter_dec(parent); | ||
504 | pm_genpd_sync_poweroff(parent); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * resume_needed - Check whether to resume a device before system suspend. | ||
510 | * @dev: Device to check. | ||
511 | * @genpd: PM domain the device belongs to. | ||
512 | * | ||
513 | * There are two cases in which a device that can wake up the system from sleep | ||
514 | * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled | ||
515 | * to wake up the system and it has to remain active for this purpose while the | ||
516 | * system is in the sleep state and (2) if the device is not enabled to wake up | ||
517 | * the system from sleep states and it generally doesn't generate wakeup signals | ||
518 | * by itself (those signals are generated on its behalf by other parts of the | ||
519 | * system). In the latter case it may be necessary to reconfigure the device's | ||
520 | * wakeup settings during system suspend, because it may have been set up to | ||
521 | * signal remote wakeup from the system's working state as needed by runtime PM. | ||
522 | * Return 'true' in either of the above cases. | ||
523 | */ | ||
524 | static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) | ||
525 | { | ||
526 | bool active_wakeup; | ||
527 | |||
528 | if (!device_can_wakeup(dev)) | ||
529 | return false; | ||
530 | |||
531 | active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); | ||
532 | return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * pm_genpd_prepare - Start power transition of a device in a PM domain. | ||
537 | * @dev: Device to start the transition of. | ||
538 | * | ||
539 | * Start a power transition of a device (during a system-wide power transition) | ||
540 | * under the assumption that its pm_domain field points to the domain member of | ||
541 | * an object of type struct generic_pm_domain representing a PM domain | ||
542 | * consisting of I/O devices. | ||
543 | */ | ||
544 | static int pm_genpd_prepare(struct device *dev) | ||
545 | { | ||
546 | struct generic_pm_domain *genpd; | ||
547 | int ret; | ||
548 | |||
549 | dev_dbg(dev, "%s()\n", __func__); | ||
550 | |||
551 | genpd = dev_to_genpd(dev); | ||
552 | if (IS_ERR(genpd)) | ||
553 | return -EINVAL; | ||
554 | |||
555 | /* | ||
556 | * If a wakeup request is pending for the device, it should be woken up | ||
557 | * at this point and a system wakeup event should be reported if it's | ||
558 | * set up to wake up the system from sleep states. | ||
559 | */ | ||
560 | pm_runtime_get_noresume(dev); | ||
561 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) | ||
562 | pm_wakeup_event(dev, 0); | ||
563 | |||
564 | if (pm_wakeup_pending()) { | ||
565 | pm_runtime_put_sync(dev); | ||
566 | return -EBUSY; | ||
567 | } | ||
568 | |||
569 | if (resume_needed(dev, genpd)) | ||
570 | pm_runtime_resume(dev); | ||
571 | |||
572 | genpd_acquire_lock(genpd); | ||
573 | |||
574 | if (genpd->prepared_count++ == 0) | ||
575 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; | ||
576 | |||
577 | genpd_release_lock(genpd); | ||
578 | |||
579 | if (genpd->suspend_power_off) { | ||
580 | pm_runtime_put_noidle(dev); | ||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, | ||
586 | * so pm_genpd_poweron() will return immediately, but if the device | ||
587 | * is suspended (e.g. it's been stopped by .stop_device()), we need | ||
588 | * to make it operational. | ||
589 | */ | ||
590 | pm_runtime_resume(dev); | ||
591 | __pm_runtime_disable(dev, false); | ||
592 | |||
593 | ret = pm_generic_prepare(dev); | ||
594 | if (ret) { | ||
595 | mutex_lock(&genpd->lock); | ||
596 | |||
597 | if (--genpd->prepared_count == 0) | ||
598 | genpd->suspend_power_off = false; | ||
599 | |||
600 | mutex_unlock(&genpd->lock); | ||
601 | pm_runtime_enable(dev); | ||
602 | } | ||
603 | |||
604 | pm_runtime_put_sync(dev); | ||
605 | return ret; | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. | ||
610 | * @dev: Device to suspend. | ||
611 | * | ||
612 | * Suspend a device under the assumption that its pm_domain field points to the | ||
613 | * domain member of an object of type struct generic_pm_domain representing | ||
614 | * a PM domain consisting of I/O devices. | ||
615 | */ | ||
616 | static int pm_genpd_suspend(struct device *dev) | ||
617 | { | ||
618 | struct generic_pm_domain *genpd; | ||
619 | |||
620 | dev_dbg(dev, "%s()\n", __func__); | ||
621 | |||
622 | genpd = dev_to_genpd(dev); | ||
623 | if (IS_ERR(genpd)) | ||
624 | return -EINVAL; | ||
625 | |||
626 | return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); | ||
627 | } | ||
628 | |||
629 | /** | ||
630 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. | ||
631 | * @dev: Device to suspend. | ||
632 | * | ||
633 | * Carry out a late suspend of a device under the assumption that its | ||
634 | * pm_domain field points to the domain member of an object of type | ||
635 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
636 | */ | ||
637 | static int pm_genpd_suspend_noirq(struct device *dev) | ||
638 | { | ||
639 | struct generic_pm_domain *genpd; | ||
640 | int ret; | ||
641 | |||
642 | dev_dbg(dev, "%s()\n", __func__); | ||
643 | |||
644 | genpd = dev_to_genpd(dev); | ||
645 | if (IS_ERR(genpd)) | ||
646 | return -EINVAL; | ||
647 | |||
648 | if (genpd->suspend_power_off) | ||
649 | return 0; | ||
650 | |||
651 | ret = pm_generic_suspend_noirq(dev); | ||
652 | if (ret) | ||
653 | return ret; | ||
654 | |||
655 | if (device_may_wakeup(dev) | ||
656 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | ||
657 | return 0; | ||
658 | |||
659 | if (genpd->stop_device) | ||
660 | genpd->stop_device(dev); | ||
661 | |||
662 | /* | ||
663 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
664 | * guaranteed that this function will never run twice in parallel for | ||
665 | * the same PM domain, so it is not necessary to use locking here. | ||
666 | */ | ||
667 | genpd->suspended_count++; | ||
668 | pm_genpd_sync_poweroff(genpd); | ||
669 | |||
670 | return 0; | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. | ||
675 | * @dev: Device to resume. | ||
676 | * | ||
677 | * Carry out an early resume of a device under the assumption that its | ||
678 | * pm_domain field points to the domain member of an object of type | ||
679 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
680 | * devices. | ||
681 | */ | ||
682 | static int pm_genpd_resume_noirq(struct device *dev) | ||
683 | { | ||
684 | struct generic_pm_domain *genpd; | ||
685 | |||
686 | dev_dbg(dev, "%s()\n", __func__); | ||
687 | |||
688 | genpd = dev_to_genpd(dev); | ||
689 | if (IS_ERR(genpd)) | ||
690 | return -EINVAL; | ||
691 | |||
692 | if (genpd->suspend_power_off) | ||
693 | return 0; | ||
694 | |||
695 | /* | ||
696 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
697 | * guaranteed that this function will never run twice in parallel for | ||
698 | * the same PM domain, so it is not necessary to use locking here. | ||
699 | */ | ||
700 | pm_genpd_poweron(genpd); | ||
701 | genpd->suspended_count--; | ||
702 | if (genpd->start_device) | ||
703 | genpd->start_device(dev); | ||
704 | |||
705 | return pm_generic_resume_noirq(dev); | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. | ||
710 | * @dev: Device to resume. | ||
711 | * | ||
712 | * Resume a device under the assumption that its pm_domain field points to the | ||
713 | * domain member of an object of type struct generic_pm_domain representing | ||
714 | * a power domain consisting of I/O devices. | ||
715 | */ | ||
716 | static int pm_genpd_resume(struct device *dev) | ||
717 | { | ||
718 | struct generic_pm_domain *genpd; | ||
719 | |||
720 | dev_dbg(dev, "%s()\n", __func__); | ||
721 | |||
722 | genpd = dev_to_genpd(dev); | ||
723 | if (IS_ERR(genpd)) | ||
724 | return -EINVAL; | ||
725 | |||
726 | return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); | ||
727 | } | ||
728 | |||
729 | /** | ||
730 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. | ||
731 | * @dev: Device to freeze. | ||
732 | * | ||
733 | * Freeze a device under the assumption that its pm_domain field points to the | ||
734 | * domain member of an object of type struct generic_pm_domain representing | ||
735 | * a power domain consisting of I/O devices. | ||
736 | */ | ||
737 | static int pm_genpd_freeze(struct device *dev) | ||
738 | { | ||
739 | struct generic_pm_domain *genpd; | ||
740 | |||
741 | dev_dbg(dev, "%s()\n", __func__); | ||
742 | |||
743 | genpd = dev_to_genpd(dev); | ||
744 | if (IS_ERR(genpd)) | ||
745 | return -EINVAL; | ||
746 | |||
747 | return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); | ||
748 | } | ||
749 | |||
750 | /** | ||
751 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. | ||
752 | * @dev: Device to freeze. | ||
753 | * | ||
754 | * Carry out a late freeze of a device under the assumption that its | ||
755 | * pm_domain field points to the domain member of an object of type | ||
756 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
757 | * devices. | ||
758 | */ | ||
759 | static int pm_genpd_freeze_noirq(struct device *dev) | ||
760 | { | ||
761 | struct generic_pm_domain *genpd; | ||
762 | int ret; | ||
763 | |||
764 | dev_dbg(dev, "%s()\n", __func__); | ||
765 | |||
766 | genpd = dev_to_genpd(dev); | ||
767 | if (IS_ERR(genpd)) | ||
768 | return -EINVAL; | ||
769 | |||
770 | if (genpd->suspend_power_off) | ||
771 | return 0; | ||
772 | |||
773 | ret = pm_generic_freeze_noirq(dev); | ||
774 | if (ret) | ||
775 | return ret; | ||
776 | |||
777 | if (genpd->stop_device) | ||
778 | genpd->stop_device(dev); | ||
779 | |||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | /** | ||
784 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. | ||
785 | * @dev: Device to thaw. | ||
786 | * | ||
787 | * Carry out an early thaw of a device under the assumption that its | ||
788 | * pm_domain field points to the domain member of an object of type | ||
789 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
790 | * devices. | ||
791 | */ | ||
792 | static int pm_genpd_thaw_noirq(struct device *dev) | ||
793 | { | ||
794 | struct generic_pm_domain *genpd; | ||
795 | |||
796 | dev_dbg(dev, "%s()\n", __func__); | ||
797 | |||
798 | genpd = dev_to_genpd(dev); | ||
799 | if (IS_ERR(genpd)) | ||
800 | return -EINVAL; | ||
801 | |||
802 | if (genpd->suspend_power_off) | ||
803 | return 0; | ||
804 | |||
805 | if (genpd->start_device) | ||
806 | genpd->start_device(dev); | ||
807 | |||
808 | return pm_generic_thaw_noirq(dev); | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. | ||
813 | * @dev: Device to thaw. | ||
814 | * | ||
815 | * Thaw a device under the assumption that its pm_domain field points to the | ||
816 | * domain member of an object of type struct generic_pm_domain representing | ||
817 | * a power domain consisting of I/O devices. | ||
818 | */ | ||
819 | static int pm_genpd_thaw(struct device *dev) | ||
820 | { | ||
821 | struct generic_pm_domain *genpd; | ||
822 | |||
823 | dev_dbg(dev, "%s()\n", __func__); | ||
824 | |||
825 | genpd = dev_to_genpd(dev); | ||
826 | if (IS_ERR(genpd)) | ||
827 | return -EINVAL; | ||
828 | |||
829 | return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. | ||
834 | * @dev: Device to suspend. | ||
835 | * | ||
836 | * Power off a device under the assumption that its pm_domain field points to | ||
837 | * the domain member of an object of type struct generic_pm_domain representing | ||
838 | * a PM domain consisting of I/O devices. | ||
839 | */ | ||
840 | static int pm_genpd_dev_poweroff(struct device *dev) | ||
841 | { | ||
842 | struct generic_pm_domain *genpd; | ||
843 | |||
844 | dev_dbg(dev, "%s()\n", __func__); | ||
845 | |||
846 | genpd = dev_to_genpd(dev); | ||
847 | if (IS_ERR(genpd)) | ||
848 | return -EINVAL; | ||
849 | |||
850 | return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. | ||
855 | * @dev: Device to suspend. | ||
856 | * | ||
857 | * Carry out a late powering off of a device under the assumption that its | ||
858 | * pm_domain field points to the domain member of an object of type | ||
859 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
860 | */ | ||
861 | static int pm_genpd_dev_poweroff_noirq(struct device *dev) | ||
862 | { | ||
863 | struct generic_pm_domain *genpd; | ||
864 | int ret; | ||
865 | |||
866 | dev_dbg(dev, "%s()\n", __func__); | ||
867 | |||
868 | genpd = dev_to_genpd(dev); | ||
869 | if (IS_ERR(genpd)) | ||
870 | return -EINVAL; | ||
871 | |||
872 | if (genpd->suspend_power_off) | ||
873 | return 0; | ||
874 | |||
875 | ret = pm_generic_poweroff_noirq(dev); | ||
876 | if (ret) | ||
877 | return ret; | ||
878 | |||
879 | if (device_may_wakeup(dev) | ||
880 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | ||
881 | return 0; | ||
882 | |||
883 | if (genpd->stop_device) | ||
884 | genpd->stop_device(dev); | ||
885 | |||
886 | /* | ||
887 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
888 | * guaranteed that this function will never run twice in parallel for | ||
889 | * the same PM domain, so it is not necessary to use locking here. | ||
890 | */ | ||
891 | genpd->suspended_count++; | ||
892 | pm_genpd_sync_poweroff(genpd); | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. | ||
899 | * @dev: Device to resume. | ||
900 | * | ||
901 | * Carry out an early restore of a device under the assumption that its | ||
902 | * pm_domain field points to the domain member of an object of type | ||
903 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
904 | * devices. | ||
905 | */ | ||
906 | static int pm_genpd_restore_noirq(struct device *dev) | ||
907 | { | ||
908 | struct generic_pm_domain *genpd; | ||
909 | |||
910 | dev_dbg(dev, "%s()\n", __func__); | ||
911 | |||
912 | genpd = dev_to_genpd(dev); | ||
913 | if (IS_ERR(genpd)) | ||
914 | return -EINVAL; | ||
915 | |||
916 | /* | ||
917 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
918 | * guaranteed that this function will never run twice in parallel for | ||
919 | * the same PM domain, so it is not necessary to use locking here. | ||
920 | */ | ||
921 | genpd->status = GPD_STATE_POWER_OFF; | ||
922 | if (genpd->suspend_power_off) { | ||
923 | /* | ||
924 | * The boot kernel might put the domain into the power on state, | ||
925 | * so make sure it really is powered off. | ||
926 | */ | ||
927 | if (genpd->power_off) | ||
928 | genpd->power_off(genpd); | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | pm_genpd_poweron(genpd); | ||
933 | genpd->suspended_count--; | ||
934 | if (genpd->start_device) | ||
935 | genpd->start_device(dev); | ||
936 | |||
937 | return pm_generic_restore_noirq(dev); | ||
938 | } | ||
939 | |||
940 | /** | ||
941 | * pm_genpd_restore - Restore a device belonging to an I/O power domain. | ||
942 | * @dev: Device to resume. | ||
943 | * | ||
944 | * Restore a device under the assumption that its pm_domain field points to the | ||
945 | * domain member of an object of type struct generic_pm_domain representing | ||
946 | * a power domain consisting of I/O devices. | ||
947 | */ | ||
948 | static int pm_genpd_restore(struct device *dev) | ||
949 | { | ||
950 | struct generic_pm_domain *genpd; | ||
951 | |||
952 | dev_dbg(dev, "%s()\n", __func__); | ||
953 | |||
954 | genpd = dev_to_genpd(dev); | ||
955 | if (IS_ERR(genpd)) | ||
956 | return -EINVAL; | ||
957 | |||
958 | return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); | ||
959 | } | ||
960 | |||
961 | /** | ||
962 | * pm_genpd_complete - Complete power transition of a device in a power domain. | ||
963 | * @dev: Device to complete the transition of. | ||
964 | * | ||
965 | * Complete a power transition of a device (during a system-wide power | ||
966 | * transition) under the assumption that its pm_domain field points to the | ||
967 | * domain member of an object of type struct generic_pm_domain representing | ||
968 | * a power domain consisting of I/O devices. | ||
969 | */ | ||
970 | static void pm_genpd_complete(struct device *dev) | ||
971 | { | ||
972 | struct generic_pm_domain *genpd; | ||
973 | bool run_complete; | ||
974 | |||
975 | dev_dbg(dev, "%s()\n", __func__); | ||
976 | |||
977 | genpd = dev_to_genpd(dev); | ||
978 | if (IS_ERR(genpd)) | ||
979 | return; | ||
980 | |||
981 | mutex_lock(&genpd->lock); | ||
982 | |||
983 | run_complete = !genpd->suspend_power_off; | ||
984 | if (--genpd->prepared_count == 0) | ||
985 | genpd->suspend_power_off = false; | ||
986 | |||
987 | mutex_unlock(&genpd->lock); | ||
988 | |||
989 | if (run_complete) { | ||
990 | pm_generic_complete(dev); | ||
991 | pm_runtime_set_active(dev); | ||
992 | pm_runtime_enable(dev); | ||
993 | pm_runtime_idle(dev); | ||
994 | } | ||
995 | } | ||
996 | |||
997 | #else | ||
998 | |||
999 | #define pm_genpd_prepare NULL | ||
1000 | #define pm_genpd_suspend NULL | ||
1001 | #define pm_genpd_suspend_noirq NULL | ||
1002 | #define pm_genpd_resume_noirq NULL | ||
1003 | #define pm_genpd_resume NULL | ||
1004 | #define pm_genpd_freeze NULL | ||
1005 | #define pm_genpd_freeze_noirq NULL | ||
1006 | #define pm_genpd_thaw_noirq NULL | ||
1007 | #define pm_genpd_thaw NULL | ||
1008 | #define pm_genpd_dev_poweroff_noirq NULL | ||
1009 | #define pm_genpd_dev_poweroff NULL | ||
1010 | #define pm_genpd_restore_noirq NULL | ||
1011 | #define pm_genpd_restore NULL | ||
1012 | #define pm_genpd_complete NULL | ||
1013 | |||
1014 | #endif /* CONFIG_PM_SLEEP */ | ||
1015 | |||
1016 | /** | ||
1017 | * pm_genpd_add_device - Add a device to an I/O PM domain. | ||
1018 | * @genpd: PM domain to add the device to. | ||
1019 | * @dev: Device to be added. | ||
1020 | */ | ||
1021 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | ||
1022 | { | ||
1023 | struct dev_list_entry *dle; | ||
1024 | int ret = 0; | ||
1025 | |||
1026 | dev_dbg(dev, "%s()\n", __func__); | ||
1027 | |||
1028 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | ||
1029 | return -EINVAL; | ||
1030 | |||
1031 | genpd_acquire_lock(genpd); | ||
1032 | |||
1033 | if (genpd->status == GPD_STATE_POWER_OFF) { | ||
1034 | ret = -EINVAL; | ||
1035 | goto out; | ||
1036 | } | ||
1037 | |||
1038 | if (genpd->prepared_count > 0) { | ||
1039 | ret = -EAGAIN; | ||
1040 | goto out; | ||
1041 | } | ||
1042 | |||
1043 | list_for_each_entry(dle, &genpd->dev_list, node) | ||
1044 | if (dle->dev == dev) { | ||
1045 | ret = -EINVAL; | ||
1046 | goto out; | ||
1047 | } | ||
1048 | |||
1049 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); | ||
1050 | if (!dle) { | ||
1051 | ret = -ENOMEM; | ||
1052 | goto out; | ||
1053 | } | ||
1054 | |||
1055 | dle->dev = dev; | ||
1056 | dle->need_restore = false; | ||
1057 | list_add_tail(&dle->node, &genpd->dev_list); | ||
1058 | genpd->device_count++; | ||
1059 | |||
1060 | spin_lock_irq(&dev->power.lock); | ||
1061 | dev->pm_domain = &genpd->domain; | ||
1062 | spin_unlock_irq(&dev->power.lock); | ||
1063 | |||
1064 | out: | ||
1065 | genpd_release_lock(genpd); | ||
1066 | |||
1067 | return ret; | ||
1068 | } | ||
1069 | |||
1070 | /** | ||
1071 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | ||
1072 | * @genpd: PM domain to remove the device from. | ||
1073 | * @dev: Device to be removed. | ||
1074 | */ | ||
1075 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | ||
1076 | struct device *dev) | ||
1077 | { | ||
1078 | struct dev_list_entry *dle; | ||
1079 | int ret = -EINVAL; | ||
1080 | |||
1081 | dev_dbg(dev, "%s()\n", __func__); | ||
1082 | |||
1083 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | ||
1084 | return -EINVAL; | ||
1085 | |||
1086 | genpd_acquire_lock(genpd); | ||
1087 | |||
1088 | if (genpd->prepared_count > 0) { | ||
1089 | ret = -EAGAIN; | ||
1090 | goto out; | ||
1091 | } | ||
1092 | |||
1093 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
1094 | if (dle->dev != dev) | ||
1095 | continue; | ||
1096 | |||
1097 | spin_lock_irq(&dev->power.lock); | ||
1098 | dev->pm_domain = NULL; | ||
1099 | spin_unlock_irq(&dev->power.lock); | ||
1100 | |||
1101 | genpd->device_count--; | ||
1102 | list_del(&dle->node); | ||
1103 | kfree(dle); | ||
1104 | |||
1105 | ret = 0; | ||
1106 | break; | ||
1107 | } | ||
1108 | |||
1109 | out: | ||
1110 | genpd_release_lock(genpd); | ||
1111 | |||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | /** | ||
1116 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | ||
1117 | * @genpd: Master PM domain to add the subdomain to. | ||
1118 | * @new_subdomain: Subdomain to be added. | ||
1119 | */ | ||
1120 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | ||
1121 | struct generic_pm_domain *new_subdomain) | ||
1122 | { | ||
1123 | struct generic_pm_domain *subdomain; | ||
1124 | int ret = 0; | ||
1125 | |||
1126 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) | ||
1127 | return -EINVAL; | ||
1128 | |||
1129 | start: | ||
1130 | genpd_acquire_lock(genpd); | ||
1131 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); | ||
1132 | |||
1133 | if (new_subdomain->status != GPD_STATE_POWER_OFF | ||
1134 | && new_subdomain->status != GPD_STATE_ACTIVE) { | ||
1135 | mutex_unlock(&new_subdomain->lock); | ||
1136 | genpd_release_lock(genpd); | ||
1137 | goto start; | ||
1138 | } | ||
1139 | |||
1140 | if (genpd->status == GPD_STATE_POWER_OFF | ||
1141 | && new_subdomain->status != GPD_STATE_POWER_OFF) { | ||
1142 | ret = -EINVAL; | ||
1143 | goto out; | ||
1144 | } | ||
1145 | |||
1146 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | ||
1147 | if (subdomain == new_subdomain) { | ||
1148 | ret = -EINVAL; | ||
1149 | goto out; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); | ||
1154 | new_subdomain->parent = genpd; | ||
1155 | if (subdomain->status != GPD_STATE_POWER_OFF) | ||
1156 | genpd->sd_count++; | ||
1157 | |||
1158 | out: | ||
1159 | mutex_unlock(&new_subdomain->lock); | ||
1160 | genpd_release_lock(genpd); | ||
1161 | |||
1162 | return ret; | ||
1163 | } | ||
1164 | |||
1165 | /** | ||
1166 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | ||
1167 | * @genpd: Master PM domain to remove the subdomain from. | ||
1168 | * @target: Subdomain to be removed. | ||
1169 | */ | ||
1170 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | ||
1171 | struct generic_pm_domain *target) | ||
1172 | { | ||
1173 | struct generic_pm_domain *subdomain; | ||
1174 | int ret = -EINVAL; | ||
1175 | |||
1176 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) | ||
1177 | return -EINVAL; | ||
1178 | |||
1179 | start: | ||
1180 | genpd_acquire_lock(genpd); | ||
1181 | |||
1182 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | ||
1183 | if (subdomain != target) | ||
1184 | continue; | ||
1185 | |||
1186 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | ||
1187 | |||
1188 | if (subdomain->status != GPD_STATE_POWER_OFF | ||
1189 | && subdomain->status != GPD_STATE_ACTIVE) { | ||
1190 | mutex_unlock(&subdomain->lock); | ||
1191 | genpd_release_lock(genpd); | ||
1192 | goto start; | ||
1193 | } | ||
1194 | |||
1195 | list_del(&subdomain->sd_node); | ||
1196 | subdomain->parent = NULL; | ||
1197 | if (subdomain->status != GPD_STATE_POWER_OFF) | ||
1198 | genpd_sd_counter_dec(genpd); | ||
1199 | |||
1200 | mutex_unlock(&subdomain->lock); | ||
1201 | |||
1202 | ret = 0; | ||
1203 | break; | ||
1204 | } | ||
1205 | |||
1206 | genpd_release_lock(genpd); | ||
1207 | |||
1208 | return ret; | ||
1209 | } | ||
1210 | |||
1211 | /** | ||
1212 | * pm_genpd_init - Initialize a generic I/O PM domain object. | ||
1213 | * @genpd: PM domain object to initialize. | ||
1214 | * @gov: PM domain governor to associate with the domain (may be NULL). | ||
1215 | * @is_off: Initial value of the domain's power_is_off field. | ||
1216 | */ | ||
1217 | void pm_genpd_init(struct generic_pm_domain *genpd, | ||
1218 | struct dev_power_governor *gov, bool is_off) | ||
1219 | { | ||
1220 | if (IS_ERR_OR_NULL(genpd)) | ||
1221 | return; | ||
1222 | |||
1223 | INIT_LIST_HEAD(&genpd->sd_node); | ||
1224 | genpd->parent = NULL; | ||
1225 | INIT_LIST_HEAD(&genpd->dev_list); | ||
1226 | INIT_LIST_HEAD(&genpd->sd_list); | ||
1227 | mutex_init(&genpd->lock); | ||
1228 | genpd->gov = gov; | ||
1229 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | ||
1230 | genpd->in_progress = 0; | ||
1231 | genpd->sd_count = 0; | ||
1232 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | ||
1233 | init_waitqueue_head(&genpd->status_wait_queue); | ||
1234 | genpd->poweroff_task = NULL; | ||
1235 | genpd->resume_count = 0; | ||
1236 | genpd->device_count = 0; | ||
1237 | genpd->suspended_count = 0; | ||
1238 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; | ||
1239 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; | ||
1240 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; | ||
1241 | genpd->domain.ops.prepare = pm_genpd_prepare; | ||
1242 | genpd->domain.ops.suspend = pm_genpd_suspend; | ||
1243 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; | ||
1244 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; | ||
1245 | genpd->domain.ops.resume = pm_genpd_resume; | ||
1246 | genpd->domain.ops.freeze = pm_genpd_freeze; | ||
1247 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; | ||
1248 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; | ||
1249 | genpd->domain.ops.thaw = pm_genpd_thaw; | ||
1250 | genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; | ||
1251 | genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; | ||
1252 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; | ||
1253 | genpd->domain.ops.restore = pm_genpd_restore; | ||
1254 | genpd->domain.ops.complete = pm_genpd_complete; | ||
1255 | mutex_lock(&gpd_list_lock); | ||
1256 | list_add(&genpd->gpd_list_node, &gpd_list); | ||
1257 | mutex_unlock(&gpd_list_lock); | ||
1258 | } | ||
1259 | |||
1260 | /** | ||
1261 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | ||
1262 | */ | ||
1263 | void pm_genpd_poweroff_unused(void) | ||
1264 | { | ||
1265 | struct generic_pm_domain *genpd; | ||
1266 | |||
1267 | mutex_lock(&gpd_list_lock); | ||
1268 | |||
1269 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | ||
1270 | genpd_queue_power_off_work(genpd); | ||
1271 | |||
1272 | mutex_unlock(&gpd_list_lock); | ||
1273 | } | ||