aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-07-11 18:39:29 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-07-11 18:39:29 -0400
commit17b75eca7683d4942f4d8d00563fd15f37c39589 (patch)
tree539ee88f2c4008eec206ca98cec54263deeae708 /drivers/base/power
parentb6c10c84665912985d0bf9b6ae8ce19fc4298d9f (diff)
PM / Domains: Do not execute device callbacks under locks
Currently, the .start_device() and .stop_device() callbacks from struct generic_pm_domain() as well as the device drivers' runtime PM callbacks used by the generic PM domains code are executed under the generic PM domain lock. This, unfortunately, is prone to deadlocks, for example if a device and its parent are boths members of the same PM domain. For this reason, it would be better if the PM domains code didn't execute device callbacks under the lock. Rework the locking in the generic PM domains code so that the lock is dropped for the execution of device callbacks. To this end, introduce PM domains states reflecting the current status of a PM domain and such that the PM domain lock cannot be acquired if the status is GPD_STATE_BUSY. Make threads attempting to acquire a PM domain's lock wait until the status changes to either GPD_STATE_ACTIVE or GPD_STATE_POWER_OFF. This change by itself doesn't fix the deadlock problem mentioned above, but the mechanism introduced by it will be used for for this purpose by a subsequent patch. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/domain.c249
1 files changed, 178 insertions, 71 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a20d9302fcd..d06f3bb80b2e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -13,6 +13,8 @@
13#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/sched.h>
17#include <linux/suspend.h>
16 18
17#ifdef CONFIG_PM 19#ifdef CONFIG_PM
18 20
@@ -30,6 +32,34 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
30 genpd->sd_count--; 32 genpd->sd_count--;
31} 33}
32 34
35static void genpd_acquire_lock(struct generic_pm_domain *genpd)
36{
37 DEFINE_WAIT(wait);
38
39 mutex_lock(&genpd->lock);
40 /*
41 * Wait for the domain to transition into either the active,
42 * or the power off state.
43 */
44 for (;;) {
45 prepare_to_wait(&genpd->status_wait_queue, &wait,
46 TASK_UNINTERRUPTIBLE);
47 if (genpd->status != GPD_STATE_BUSY)
48 break;
49 mutex_unlock(&genpd->lock);
50
51 schedule();
52
53 mutex_lock(&genpd->lock);
54 }
55 finish_wait(&genpd->status_wait_queue, &wait);
56}
57
58static void genpd_release_lock(struct generic_pm_domain *genpd)
59{
60 mutex_unlock(&genpd->lock);
61}
62
33/** 63/**
34 * pm_genpd_poweron - Restore power to a given PM domain and its parents. 64 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
35 * @genpd: PM domain to power up. 65 * @genpd: PM domain to power up.
@@ -39,22 +69,50 @@ static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
39 */ 69 */
40int pm_genpd_poweron(struct generic_pm_domain *genpd) 70int pm_genpd_poweron(struct generic_pm_domain *genpd)
41{ 71{
72 struct generic_pm_domain *parent = genpd->parent;
73 DEFINE_WAIT(wait);
42 int ret = 0; 74 int ret = 0;
43 75
44 start: 76 start:
45 if (genpd->parent) 77 if (parent) {
46 mutex_lock(&genpd->parent->lock); 78 mutex_lock(&parent->lock);
47 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 79 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
80 } else {
81 mutex_lock(&genpd->lock);
82 }
83 /*
84 * Wait for the domain to transition into either the active,
85 * or the power off state.
86 */
87 for (;;) {
88 prepare_to_wait(&genpd->status_wait_queue, &wait,
89 TASK_UNINTERRUPTIBLE);
90 if (genpd->status != GPD_STATE_BUSY)
91 break;
92 mutex_unlock(&genpd->lock);
93 if (parent)
94 mutex_unlock(&parent->lock);
95
96 schedule();
48 97
49 if (!genpd->power_is_off 98 if (parent) {
99 mutex_lock(&parent->lock);
100 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
101 } else {
102 mutex_lock(&genpd->lock);
103 }
104 }
105 finish_wait(&genpd->status_wait_queue, &wait);
106
107 if (genpd->status == GPD_STATE_ACTIVE
50 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 108 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
51 goto out; 109 goto out;
52 110
53 if (genpd->parent && genpd->parent->power_is_off) { 111 if (parent && parent->status != GPD_STATE_ACTIVE) {
54 mutex_unlock(&genpd->lock); 112 mutex_unlock(&genpd->lock);
55 mutex_unlock(&genpd->parent->lock); 113 mutex_unlock(&parent->lock);
56 114
57 ret = pm_genpd_poweron(genpd->parent); 115 ret = pm_genpd_poweron(parent);
58 if (ret) 116 if (ret)
59 return ret; 117 return ret;
60 118
@@ -67,14 +125,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
67 goto out; 125 goto out;
68 } 126 }
69 127
70 genpd->power_is_off = false; 128 genpd->status = GPD_STATE_ACTIVE;
71 if (genpd->parent) 129 if (parent)
72 genpd->parent->sd_count++; 130 parent->sd_count++;
73 131
74 out: 132 out:
75 mutex_unlock(&genpd->lock); 133 mutex_unlock(&genpd->lock);
76 if (genpd->parent) 134 if (parent)
77 mutex_unlock(&genpd->parent->lock); 135 mutex_unlock(&parent->lock);
78 136
79 return ret; 137 return ret;
80} 138}
@@ -90,6 +148,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
90 */ 148 */
91static int __pm_genpd_save_device(struct dev_list_entry *dle, 149static int __pm_genpd_save_device(struct dev_list_entry *dle,
92 struct generic_pm_domain *genpd) 150 struct generic_pm_domain *genpd)
151 __releases(&genpd->lock) __acquires(&genpd->lock)
93{ 152{
94 struct device *dev = dle->dev; 153 struct device *dev = dle->dev;
95 struct device_driver *drv = dev->driver; 154 struct device_driver *drv = dev->driver;
@@ -98,6 +157,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
98 if (dle->need_restore) 157 if (dle->need_restore)
99 return 0; 158 return 0;
100 159
160 mutex_unlock(&genpd->lock);
161
101 if (drv && drv->pm && drv->pm->runtime_suspend) { 162 if (drv && drv->pm && drv->pm->runtime_suspend) {
102 if (genpd->start_device) 163 if (genpd->start_device)
103 genpd->start_device(dev); 164 genpd->start_device(dev);
@@ -108,6 +169,8 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
108 genpd->stop_device(dev); 169 genpd->stop_device(dev);
109 } 170 }
110 171
172 mutex_lock(&genpd->lock);
173
111 if (!ret) 174 if (!ret)
112 dle->need_restore = true; 175 dle->need_restore = true;
113 176
@@ -121,6 +184,7 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
121 */ 184 */
122static void __pm_genpd_restore_device(struct dev_list_entry *dle, 185static void __pm_genpd_restore_device(struct dev_list_entry *dle,
123 struct generic_pm_domain *genpd) 186 struct generic_pm_domain *genpd)
187 __releases(&genpd->lock) __acquires(&genpd->lock)
124{ 188{
125 struct device *dev = dle->dev; 189 struct device *dev = dle->dev;
126 struct device_driver *drv = dev->driver; 190 struct device_driver *drv = dev->driver;
@@ -128,6 +192,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
128 if (!dle->need_restore) 192 if (!dle->need_restore)
129 return; 193 return;
130 194
195 mutex_unlock(&genpd->lock);
196
131 if (drv && drv->pm && drv->pm->runtime_resume) { 197 if (drv && drv->pm && drv->pm->runtime_resume) {
132 if (genpd->start_device) 198 if (genpd->start_device)
133 genpd->start_device(dev); 199 genpd->start_device(dev);
@@ -138,6 +204,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
138 genpd->stop_device(dev); 204 genpd->stop_device(dev);
139 } 205 }
140 206
207 mutex_lock(&genpd->lock);
208
141 dle->need_restore = false; 209 dle->need_restore = false;
142} 210}
143 211
@@ -150,13 +218,14 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
150 * the @genpd's devices' drivers and remove power from @genpd. 218 * the @genpd's devices' drivers and remove power from @genpd.
151 */ 219 */
152static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 220static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
221 __releases(&genpd->lock) __acquires(&genpd->lock)
153{ 222{
154 struct generic_pm_domain *parent; 223 struct generic_pm_domain *parent;
155 struct dev_list_entry *dle; 224 struct dev_list_entry *dle;
156 unsigned int not_suspended; 225 unsigned int not_suspended;
157 int ret; 226 int ret;
158 227
159 if (genpd->power_is_off || genpd->prepared_count > 0) 228 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0)
160 return 0; 229 return 0;
161 230
162 if (genpd->sd_count > 0) 231 if (genpd->sd_count > 0)
@@ -175,22 +244,36 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
175 return -EAGAIN; 244 return -EAGAIN;
176 } 245 }
177 246
247 genpd->status = GPD_STATE_BUSY;
248
178 list_for_each_entry_reverse(dle, &genpd->dev_list, node) { 249 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
179 ret = __pm_genpd_save_device(dle, genpd); 250 ret = __pm_genpd_save_device(dle, genpd);
180 if (ret) 251 if (ret)
181 goto err_dev; 252 goto err_dev;
182 } 253 }
183 254
255 mutex_unlock(&genpd->lock);
256
257 parent = genpd->parent;
258 if (parent) {
259 genpd_acquire_lock(parent);
260 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
261 } else {
262 mutex_lock(&genpd->lock);
263 }
264
184 if (genpd->power_off) 265 if (genpd->power_off)
185 genpd->power_off(genpd); 266 genpd->power_off(genpd);
186 267
187 genpd->power_is_off = true; 268 genpd->status = GPD_STATE_POWER_OFF;
269 wake_up_all(&genpd->status_wait_queue);
188 270
189 parent = genpd->parent;
190 if (parent) { 271 if (parent) {
191 genpd_sd_counter_dec(parent); 272 genpd_sd_counter_dec(parent);
192 if (parent->sd_count == 0) 273 if (parent->sd_count == 0)
193 queue_work(pm_wq, &parent->power_off_work); 274 queue_work(pm_wq, &parent->power_off_work);
275
276 genpd_release_lock(parent);
194 } 277 }
195 278
196 return 0; 279 return 0;
@@ -199,6 +282,9 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
199 list_for_each_entry_continue(dle, &genpd->dev_list, node) 282 list_for_each_entry_continue(dle, &genpd->dev_list, node)
200 __pm_genpd_restore_device(dle, genpd); 283 __pm_genpd_restore_device(dle, genpd);
201 284
285 genpd->status = GPD_STATE_ACTIVE;
286 wake_up_all(&genpd->status_wait_queue);
287
202 return ret; 288 return ret;
203} 289}
204 290
@@ -212,13 +298,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
212 298
213 genpd = container_of(work, struct generic_pm_domain, power_off_work); 299 genpd = container_of(work, struct generic_pm_domain, power_off_work);
214 300
215 if (genpd->parent) 301 genpd_acquire_lock(genpd);
216 mutex_lock(&genpd->parent->lock);
217 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
218 pm_genpd_poweroff(genpd); 302 pm_genpd_poweroff(genpd);
219 mutex_unlock(&genpd->lock); 303 genpd_release_lock(genpd);
220 if (genpd->parent)
221 mutex_unlock(&genpd->parent->lock);
222} 304}
223 305
224/** 306/**
@@ -239,23 +321,17 @@ static int pm_genpd_runtime_suspend(struct device *dev)
239 if (IS_ERR(genpd)) 321 if (IS_ERR(genpd))
240 return -EINVAL; 322 return -EINVAL;
241 323
242 if (genpd->parent)
243 mutex_lock(&genpd->parent->lock);
244 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
245
246 if (genpd->stop_device) { 324 if (genpd->stop_device) {
247 int ret = genpd->stop_device(dev); 325 int ret = genpd->stop_device(dev);
248 if (ret) 326 if (ret)
249 goto out; 327 return ret;
250 } 328 }
329
330 genpd_acquire_lock(genpd);
251 genpd->in_progress++; 331 genpd->in_progress++;
252 pm_genpd_poweroff(genpd); 332 pm_genpd_poweroff(genpd);
253 genpd->in_progress--; 333 genpd->in_progress--;
254 334 genpd_release_lock(genpd);
255 out:
256 mutex_unlock(&genpd->lock);
257 if (genpd->parent)
258 mutex_unlock(&genpd->parent->lock);
259 335
260 return 0; 336 return 0;
261} 337}
@@ -276,9 +352,6 @@ static void __pm_genpd_runtime_resume(struct device *dev,
276 break; 352 break;
277 } 353 }
278 } 354 }
279
280 if (genpd->start_device)
281 genpd->start_device(dev);
282} 355}
283 356
284/** 357/**
@@ -304,9 +377,15 @@ static int pm_genpd_runtime_resume(struct device *dev)
304 if (ret) 377 if (ret)
305 return ret; 378 return ret;
306 379
307 mutex_lock(&genpd->lock); 380 genpd_acquire_lock(genpd);
381 genpd->status = GPD_STATE_BUSY;
308 __pm_genpd_runtime_resume(dev, genpd); 382 __pm_genpd_runtime_resume(dev, genpd);
309 mutex_unlock(&genpd->lock); 383 genpd->status = GPD_STATE_ACTIVE;
384 wake_up_all(&genpd->status_wait_queue);
385 genpd_release_lock(genpd);
386
387 if (genpd->start_device)
388 genpd->start_device(dev);
310 389
311 return 0; 390 return 0;
312} 391}
@@ -339,7 +418,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
339{ 418{
340 struct generic_pm_domain *parent = genpd->parent; 419 struct generic_pm_domain *parent = genpd->parent;
341 420
342 if (genpd->power_is_off) 421 if (genpd->status == GPD_STATE_POWER_OFF)
343 return; 422 return;
344 423
345 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) 424 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
@@ -348,7 +427,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
348 if (genpd->power_off) 427 if (genpd->power_off)
349 genpd->power_off(genpd); 428 genpd->power_off(genpd);
350 429
351 genpd->power_is_off = true; 430 genpd->status = GPD_STATE_POWER_OFF;
352 if (parent) { 431 if (parent) {
353 genpd_sd_counter_dec(parent); 432 genpd_sd_counter_dec(parent);
354 pm_genpd_sync_poweroff(parent); 433 pm_genpd_sync_poweroff(parent);
@@ -375,32 +454,41 @@ static int pm_genpd_prepare(struct device *dev)
375 if (IS_ERR(genpd)) 454 if (IS_ERR(genpd))
376 return -EINVAL; 455 return -EINVAL;
377 456
378 mutex_lock(&genpd->lock); 457 /*
458 * If a wakeup request is pending for the device, it should be woken up
459 * at this point and a system wakeup event should be reported if it's
460 * set up to wake up the system from sleep states.
461 */
462 pm_runtime_get_noresume(dev);
463 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
464 pm_wakeup_event(dev, 0);
465
466 if (pm_wakeup_pending()) {
467 pm_runtime_put_sync(dev);
468 return -EBUSY;
469 }
470
471 genpd_acquire_lock(genpd);
379 472
380 if (genpd->prepared_count++ == 0) 473 if (genpd->prepared_count++ == 0)
381 genpd->suspend_power_off = genpd->power_is_off; 474 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
475
476 genpd_release_lock(genpd);
382 477
383 if (genpd->suspend_power_off) { 478 if (genpd->suspend_power_off) {
384 mutex_unlock(&genpd->lock); 479 pm_runtime_put_noidle(dev);
385 return 0; 480 return 0;
386 } 481 }
387 482
388 /* 483 /*
389 * If the device is in the (runtime) "suspended" state, call 484 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
390 * .start_device() for it, if defined. 485 * so pm_genpd_poweron() will return immediately, but if the device
391 */ 486 * is suspended (e.g. it's been stopped by .stop_device()), we need
392 if (pm_runtime_suspended(dev)) 487 * to make it operational.
393 __pm_genpd_runtime_resume(dev, genpd);
394
395 /*
396 * Do not check if runtime resume is pending at this point, because it
397 * has been taken care of already and if pm_genpd_poweron() ran at this
398 * point as a result of the check, it would deadlock.
399 */ 488 */
489 pm_runtime_resume(dev);
400 __pm_runtime_disable(dev, false); 490 __pm_runtime_disable(dev, false);
401 491
402 mutex_unlock(&genpd->lock);
403
404 ret = pm_generic_prepare(dev); 492 ret = pm_generic_prepare(dev);
405 if (ret) { 493 if (ret) {
406 mutex_lock(&genpd->lock); 494 mutex_lock(&genpd->lock);
@@ -409,7 +497,10 @@ static int pm_genpd_prepare(struct device *dev)
409 genpd->suspend_power_off = false; 497 genpd->suspend_power_off = false;
410 498
411 mutex_unlock(&genpd->lock); 499 mutex_unlock(&genpd->lock);
500 pm_runtime_enable(dev);
412 } 501 }
502
503 pm_runtime_put_sync(dev);
413 return ret; 504 return ret;
414} 505}
415 506
@@ -726,7 +817,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
726 * guaranteed that this function will never run twice in parallel for 817 * guaranteed that this function will never run twice in parallel for
727 * the same PM domain, so it is not necessary to use locking here. 818 * the same PM domain, so it is not necessary to use locking here.
728 */ 819 */
729 genpd->power_is_off = true; 820 genpd->status = GPD_STATE_POWER_OFF;
730 if (genpd->suspend_power_off) { 821 if (genpd->suspend_power_off) {
731 /* 822 /*
732 * The boot kernel might put the domain into the power on state, 823 * The boot kernel might put the domain into the power on state,
@@ -836,9 +927,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
836 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 927 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
837 return -EINVAL; 928 return -EINVAL;
838 929
839 mutex_lock(&genpd->lock); 930 genpd_acquire_lock(genpd);
840 931
841 if (genpd->power_is_off) { 932 if (genpd->status == GPD_STATE_POWER_OFF) {
842 ret = -EINVAL; 933 ret = -EINVAL;
843 goto out; 934 goto out;
844 } 935 }
@@ -870,7 +961,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
870 spin_unlock_irq(&dev->power.lock); 961 spin_unlock_irq(&dev->power.lock);
871 962
872 out: 963 out:
873 mutex_unlock(&genpd->lock); 964 genpd_release_lock(genpd);
874 965
875 return ret; 966 return ret;
876} 967}
@@ -891,7 +982,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
891 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 982 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
892 return -EINVAL; 983 return -EINVAL;
893 984
894 mutex_lock(&genpd->lock); 985 genpd_acquire_lock(genpd);
895 986
896 if (genpd->prepared_count > 0) { 987 if (genpd->prepared_count > 0) {
897 ret = -EAGAIN; 988 ret = -EAGAIN;
@@ -915,7 +1006,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
915 } 1006 }
916 1007
917 out: 1008 out:
918 mutex_unlock(&genpd->lock); 1009 genpd_release_lock(genpd);
919 1010
920 return ret; 1011 return ret;
921} 1012}
@@ -934,9 +1025,19 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
934 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) 1025 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
935 return -EINVAL; 1026 return -EINVAL;
936 1027
937 mutex_lock(&genpd->lock); 1028 start:
1029 genpd_acquire_lock(genpd);
1030 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
938 1031
939 if (genpd->power_is_off && !new_subdomain->power_is_off) { 1032 if (new_subdomain->status != GPD_STATE_POWER_OFF
1033 && new_subdomain->status != GPD_STATE_ACTIVE) {
1034 mutex_unlock(&new_subdomain->lock);
1035 genpd_release_lock(genpd);
1036 goto start;
1037 }
1038
1039 if (genpd->status == GPD_STATE_POWER_OFF
1040 && new_subdomain->status != GPD_STATE_POWER_OFF) {
940 ret = -EINVAL; 1041 ret = -EINVAL;
941 goto out; 1042 goto out;
942 } 1043 }
@@ -948,17 +1049,14 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
948 } 1049 }
949 } 1050 }
950 1051
951 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
952
953 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); 1052 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
954 new_subdomain->parent = genpd; 1053 new_subdomain->parent = genpd;
955 if (!subdomain->power_is_off) 1054 if (subdomain->status != GPD_STATE_POWER_OFF)
956 genpd->sd_count++; 1055 genpd->sd_count++;
957 1056
958 mutex_unlock(&new_subdomain->lock);
959
960 out: 1057 out:
961 mutex_unlock(&genpd->lock); 1058 mutex_unlock(&new_subdomain->lock);
1059 genpd_release_lock(genpd);
962 1060
963 return ret; 1061 return ret;
964} 1062}
@@ -977,7 +1075,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
977 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) 1075 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
978 return -EINVAL; 1076 return -EINVAL;
979 1077
980 mutex_lock(&genpd->lock); 1078 start:
1079 genpd_acquire_lock(genpd);
981 1080
982 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { 1081 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
983 if (subdomain != target) 1082 if (subdomain != target)
@@ -985,9 +1084,16 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
985 1084
986 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1085 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
987 1086
1087 if (subdomain->status != GPD_STATE_POWER_OFF
1088 && subdomain->status != GPD_STATE_ACTIVE) {
1089 mutex_unlock(&subdomain->lock);
1090 genpd_release_lock(genpd);
1091 goto start;
1092 }
1093
988 list_del(&subdomain->sd_node); 1094 list_del(&subdomain->sd_node);
989 subdomain->parent = NULL; 1095 subdomain->parent = NULL;
990 if (!subdomain->power_is_off) 1096 if (subdomain->status != GPD_STATE_POWER_OFF)
991 genpd_sd_counter_dec(genpd); 1097 genpd_sd_counter_dec(genpd);
992 1098
993 mutex_unlock(&subdomain->lock); 1099 mutex_unlock(&subdomain->lock);
@@ -996,7 +1102,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
996 break; 1102 break;
997 } 1103 }
998 1104
999 mutex_unlock(&genpd->lock); 1105 genpd_release_lock(genpd);
1000 1106
1001 return ret; 1107 return ret;
1002} 1108}
@@ -1022,7 +1128,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1022 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1128 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1023 genpd->in_progress = 0; 1129 genpd->in_progress = 0;
1024 genpd->sd_count = 0; 1130 genpd->sd_count = 0;
1025 genpd->power_is_off = is_off; 1131 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1132 init_waitqueue_head(&genpd->status_wait_queue);
1026 genpd->device_count = 0; 1133 genpd->device_count = 0;
1027 genpd->suspended_count = 0; 1134 genpd->suspended_count = 0;
1028 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1135 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;