aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-07-11 18:39:36 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-07-11 18:39:36 -0400
commitc6d22b37263607ba5aeeb2e11169fa65caa29bee (patch)
tree890d56f9559a93be0d09b77b54cf1b6a80fd34f4 /drivers
parent17b75eca7683d4942f4d8d00563fd15f37c39589 (diff)
PM / Domains: Allow callbacks to execute all runtime PM helpers
A deadlock may occur if one of the PM domains' .start_device() or .stop_device() callbacks or a device driver's .runtime_suspend() or .runtime_resume() callback executed by the core generic PM domain code uses a "wrong" runtime PM helper function. This happens, for example, if .runtime_resume() from one device's driver calls pm_runtime_resume() for another device in the same PM domain. A similar situation may take place if a device's parent is in the same PM domain, in which case the runtime PM framework may execute pm_genpd_runtime_resume() automatically for the parent (if it is suspended at the moment). This, of course, is undesirable, so the generic PM domains code should be modified to prevent it from happening. The runtime PM framework guarantees that pm_genpd_runtime_suspend() and pm_genpd_runtime_resume() won't be executed in parallel for the same device, so the generic PM domains code need not worry about those cases. Still, it needs to prevent the other possible race conditions between pm_genpd_runtime_suspend(), pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff() from happening and it needs to avoid deadlocks at the same time. To this end, modify the generic PM domains code to relax synchronization rules so that: * pm_genpd_poweron() doesn't wait for the PM domain status to change from GPD_STATE_BUSY. If it finds that the status is not GPD_STATE_POWER_OFF, it returns without powering the domain on (it may modify the status depending on the circumstances). * pm_genpd_poweroff() returns as soon as it finds that the PM domain's status changed from GPD_STATE_BUSY after it's released the PM domain's lock. * pm_genpd_runtime_suspend() doesn't wait for the PM domain status to change from GPD_STATE_BUSY after executing the domain's .stop_device() callback and executes pm_genpd_poweroff() only if pm_genpd_runtime_resume() is not executed in parallel. * pm_genpd_runtime_resume() doesn't wait for the PM domain status to change from GPD_STATE_BUSY after executing pm_genpd_poweron() and sets the domain's status to GPD_STATE_BUSY and increments its counter of resuming devices (introduced by this change) immediately after acquiring the lock. The counter of resuming devices is then decremented after executing __pm_genpd_runtime_resume() for the device and the domain's status is reset to GPD_STATE_ACTIVE (unless there are more resuming devices in the domain, in which case the status remains GPD_STATE_BUSY). This way, for example, if a device driver's .runtime_resume() callback executes pm_runtime_resume() for another device in the same PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume() invoked by the runtime PM framework will not block and it will see that there's nothing to do for it. Next, the PM domain's lock will be acquired without waiting for its status to change from GPD_STATE_BUSY and the device driver's .runtime_resume() callback will be executed. In turn, if pm_runtime_suspend() is executed by one device driver's .runtime_resume() callback for another device in the same PM domain, pm_genpd_poweroff() executed by pm_genpd_runtime_suspend() invoked by the runtime PM framework as a result will notice that one of the devices in the domain is being resumed, so it will return immediately. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/domain.c144
1 files changed, 99 insertions, 45 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index d06f3bb80b2e..7e6cc8a5ce5b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -44,7 +44,8 @@ static void genpd_acquire_lock(struct generic_pm_domain *genpd)
44 for (;;) { 44 for (;;) {
45 prepare_to_wait(&genpd->status_wait_queue, &wait, 45 prepare_to_wait(&genpd->status_wait_queue, &wait,
46 TASK_UNINTERRUPTIBLE); 46 TASK_UNINTERRUPTIBLE);
47 if (genpd->status != GPD_STATE_BUSY) 47 if (genpd->status == GPD_STATE_ACTIVE
48 || genpd->status == GPD_STATE_POWER_OFF)
48 break; 49 break;
49 mutex_unlock(&genpd->lock); 50 mutex_unlock(&genpd->lock);
50 51
@@ -60,6 +61,12 @@ static void genpd_release_lock(struct generic_pm_domain *genpd)
60 mutex_unlock(&genpd->lock); 61 mutex_unlock(&genpd->lock);
61} 62}
62 63
64static void genpd_set_active(struct generic_pm_domain *genpd)
65{
66 if (genpd->resume_count == 0)
67 genpd->status = GPD_STATE_ACTIVE;
68}
69
63/** 70/**
64 * pm_genpd_poweron - Restore power to a given PM domain and its parents. 71 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
65 * @genpd: PM domain to power up. 72 * @genpd: PM domain to power up.
@@ -75,42 +82,24 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
75 82
76 start: 83 start:
77 if (parent) { 84 if (parent) {
78 mutex_lock(&parent->lock); 85 genpd_acquire_lock(parent);
79 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 86 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
80 } else { 87 } else {
81 mutex_lock(&genpd->lock); 88 mutex_lock(&genpd->lock);
82 } 89 }
83 /*
84 * Wait for the domain to transition into either the active,
85 * or the power off state.
86 */
87 for (;;) {
88 prepare_to_wait(&genpd->status_wait_queue, &wait,
89 TASK_UNINTERRUPTIBLE);
90 if (genpd->status != GPD_STATE_BUSY)
91 break;
92 mutex_unlock(&genpd->lock);
93 if (parent)
94 mutex_unlock(&parent->lock);
95
96 schedule();
97
98 if (parent) {
99 mutex_lock(&parent->lock);
100 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
101 } else {
102 mutex_lock(&genpd->lock);
103 }
104 }
105 finish_wait(&genpd->status_wait_queue, &wait);
106 90
107 if (genpd->status == GPD_STATE_ACTIVE 91 if (genpd->status == GPD_STATE_ACTIVE
108 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 92 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
109 goto out; 93 goto out;
110 94
95 if (genpd->status != GPD_STATE_POWER_OFF) {
96 genpd_set_active(genpd);
97 goto out;
98 }
99
111 if (parent && parent->status != GPD_STATE_ACTIVE) { 100 if (parent && parent->status != GPD_STATE_ACTIVE) {
112 mutex_unlock(&genpd->lock); 101 mutex_unlock(&genpd->lock);
113 mutex_unlock(&parent->lock); 102 genpd_release_lock(parent);
114 103
115 ret = pm_genpd_poweron(parent); 104 ret = pm_genpd_poweron(parent);
116 if (ret) 105 if (ret)
@@ -125,14 +114,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
125 goto out; 114 goto out;
126 } 115 }
127 116
128 genpd->status = GPD_STATE_ACTIVE; 117 genpd_set_active(genpd);
129 if (parent) 118 if (parent)
130 parent->sd_count++; 119 parent->sd_count++;
131 120
132 out: 121 out:
133 mutex_unlock(&genpd->lock); 122 mutex_unlock(&genpd->lock);
134 if (parent) 123 if (parent)
135 mutex_unlock(&parent->lock); 124 genpd_release_lock(parent);
136 125
137 return ret; 126 return ret;
138} 127}
@@ -210,6 +199,20 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
210} 199}
211 200
212/** 201/**
202 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
203 * @genpd: PM domain to check.
204 *
205 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
206 * a "power off" operation, which means that a "power on" has occured in the
207 * meantime, or if its resume_count field is different from zero, which means
208 * that one of its devices has been resumed in the meantime.
209 */
210static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
211{
212 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
213}
214
215/**
213 * pm_genpd_poweroff - Remove power from a given PM domain. 216 * pm_genpd_poweroff - Remove power from a given PM domain.
214 * @genpd: PM domain to power down. 217 * @genpd: PM domain to power down.
215 * 218 *
@@ -223,9 +226,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
223 struct generic_pm_domain *parent; 226 struct generic_pm_domain *parent;
224 struct dev_list_entry *dle; 227 struct dev_list_entry *dle;
225 unsigned int not_suspended; 228 unsigned int not_suspended;
226 int ret; 229 int ret = 0;
227 230
228 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0) 231 start:
232 /*
233 * Do not try to power off the domain in the following situations:
234 * (1) The domain is already in the "power off" state.
235 * (2) System suspend is in progress.
236 * (3) One of the domain's devices is being resumed right now.
237 */
238 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
239 || genpd->resume_count > 0)
229 return 0; 240 return 0;
230 241
231 if (genpd->sd_count > 0) 242 if (genpd->sd_count > 0)
@@ -239,34 +250,54 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
239 if (not_suspended > genpd->in_progress) 250 if (not_suspended > genpd->in_progress)
240 return -EBUSY; 251 return -EBUSY;
241 252
253 if (genpd->poweroff_task) {
254 /*
255 * Another instance of pm_genpd_poweroff() is executing
256 * callbacks, so tell it to start over and return.
257 */
258 genpd->status = GPD_STATE_REPEAT;
259 return 0;
260 }
261
242 if (genpd->gov && genpd->gov->power_down_ok) { 262 if (genpd->gov && genpd->gov->power_down_ok) {
243 if (!genpd->gov->power_down_ok(&genpd->domain)) 263 if (!genpd->gov->power_down_ok(&genpd->domain))
244 return -EAGAIN; 264 return -EAGAIN;
245 } 265 }
246 266
247 genpd->status = GPD_STATE_BUSY; 267 genpd->status = GPD_STATE_BUSY;
268 genpd->poweroff_task = current;
248 269
249 list_for_each_entry_reverse(dle, &genpd->dev_list, node) { 270 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
250 ret = __pm_genpd_save_device(dle, genpd); 271 ret = __pm_genpd_save_device(dle, genpd);
251 if (ret) 272 if (ret)
252 goto err_dev; 273 goto err_dev;
253 }
254 274
255 mutex_unlock(&genpd->lock); 275 if (genpd_abort_poweroff(genpd))
276 goto out;
277
278 if (genpd->status == GPD_STATE_REPEAT) {
279 genpd->poweroff_task = NULL;
280 goto start;
281 }
282 }
256 283
257 parent = genpd->parent; 284 parent = genpd->parent;
258 if (parent) { 285 if (parent) {
286 mutex_unlock(&genpd->lock);
287
259 genpd_acquire_lock(parent); 288 genpd_acquire_lock(parent);
260 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 289 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
261 } else { 290
262 mutex_lock(&genpd->lock); 291 if (genpd_abort_poweroff(genpd)) {
292 genpd_release_lock(parent);
293 goto out;
294 }
263 } 295 }
264 296
265 if (genpd->power_off) 297 if (genpd->power_off)
266 genpd->power_off(genpd); 298 genpd->power_off(genpd);
267 299
268 genpd->status = GPD_STATE_POWER_OFF; 300 genpd->status = GPD_STATE_POWER_OFF;
269 wake_up_all(&genpd->status_wait_queue);
270 301
271 if (parent) { 302 if (parent) {
272 genpd_sd_counter_dec(parent); 303 genpd_sd_counter_dec(parent);
@@ -276,16 +307,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
276 genpd_release_lock(parent); 307 genpd_release_lock(parent);
277 } 308 }
278 309
279 return 0; 310 out:
311 genpd->poweroff_task = NULL;
312 wake_up_all(&genpd->status_wait_queue);
313 return ret;
280 314
281 err_dev: 315 err_dev:
282 list_for_each_entry_continue(dle, &genpd->dev_list, node) 316 list_for_each_entry_continue(dle, &genpd->dev_list, node)
283 __pm_genpd_restore_device(dle, genpd); 317 __pm_genpd_restore_device(dle, genpd);
284 318
285 genpd->status = GPD_STATE_ACTIVE; 319 genpd_set_active(genpd);
286 wake_up_all(&genpd->status_wait_queue); 320 goto out;
287
288 return ret;
289} 321}
290 322
291/** 323/**
@@ -327,11 +359,11 @@ static int pm_genpd_runtime_suspend(struct device *dev)
327 return ret; 359 return ret;
328 } 360 }
329 361
330 genpd_acquire_lock(genpd); 362 mutex_lock(&genpd->lock);
331 genpd->in_progress++; 363 genpd->in_progress++;
332 pm_genpd_poweroff(genpd); 364 pm_genpd_poweroff(genpd);
333 genpd->in_progress--; 365 genpd->in_progress--;
334 genpd_release_lock(genpd); 366 mutex_unlock(&genpd->lock);
335 367
336 return 0; 368 return 0;
337} 369}
@@ -365,6 +397,7 @@ static void __pm_genpd_runtime_resume(struct device *dev,
365static int pm_genpd_runtime_resume(struct device *dev) 397static int pm_genpd_runtime_resume(struct device *dev)
366{ 398{
367 struct generic_pm_domain *genpd; 399 struct generic_pm_domain *genpd;
400 DEFINE_WAIT(wait);
368 int ret; 401 int ret;
369 402
370 dev_dbg(dev, "%s()\n", __func__); 403 dev_dbg(dev, "%s()\n", __func__);
@@ -377,12 +410,31 @@ static int pm_genpd_runtime_resume(struct device *dev)
377 if (ret) 410 if (ret)
378 return ret; 411 return ret;
379 412
380 genpd_acquire_lock(genpd); 413 mutex_lock(&genpd->lock);
381 genpd->status = GPD_STATE_BUSY; 414 genpd->status = GPD_STATE_BUSY;
415 genpd->resume_count++;
416 for (;;) {
417 prepare_to_wait(&genpd->status_wait_queue, &wait,
418 TASK_UNINTERRUPTIBLE);
419 /*
420 * If current is the powering off task, we have been called
421 * reentrantly from one of the device callbacks, so we should
422 * not wait.
423 */
424 if (!genpd->poweroff_task || genpd->poweroff_task == current)
425 break;
426 mutex_unlock(&genpd->lock);
427
428 schedule();
429
430 mutex_lock(&genpd->lock);
431 }
432 finish_wait(&genpd->status_wait_queue, &wait);
382 __pm_genpd_runtime_resume(dev, genpd); 433 __pm_genpd_runtime_resume(dev, genpd);
383 genpd->status = GPD_STATE_ACTIVE; 434 genpd->resume_count--;
435 genpd_set_active(genpd);
384 wake_up_all(&genpd->status_wait_queue); 436 wake_up_all(&genpd->status_wait_queue);
385 genpd_release_lock(genpd); 437 mutex_unlock(&genpd->lock);
386 438
387 if (genpd->start_device) 439 if (genpd->start_device)
388 genpd->start_device(dev); 440 genpd->start_device(dev);
@@ -1130,6 +1182,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1130 genpd->sd_count = 0; 1182 genpd->sd_count = 0;
1131 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1183 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1132 init_waitqueue_head(&genpd->status_wait_queue); 1184 init_waitqueue_head(&genpd->status_wait_queue);
1185 genpd->poweroff_task = NULL;
1186 genpd->resume_count = 0;
1133 genpd->device_count = 0; 1187 genpd->device_count = 0;
1134 genpd->suspended_count = 0; 1188 genpd->suspended_count = 0;
1135 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1189 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;