aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-10-30 12:28:49 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-10-31 13:42:51 -0400
commit8c73b4288496407d91bc616df3f7c62a88356cb2 (patch)
treebcbe494732ed13c518504738fe5f79b8de41f1e2
parent9ed9895370aedd6032af2a9181c62c394d08223b (diff)
PM / sleep: Make async suspend/resume of devices use device links
Make the device suspend/resume part of the core system suspend/resume code use device links to ensure that supplier and consumer devices will be suspended and resumed in the right order in case of async suspend/resume. The idea, roughly, is to use dpm_wait() to wait for all consumers before a supplier device suspend and to wait for all suppliers before a consumer device resume. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/base/power/main.c85
1 files changed, 79 insertions, 6 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 420914061405..04bcb11ed8de 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -246,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
246 device_for_each_child(dev, &async, dpm_wait_fn); 246 device_for_each_child(dev, &async, dpm_wait_fn);
247} 247}
248 248
249static void dpm_wait_for_suppliers(struct device *dev, bool async)
250{
251 struct device_link *link;
252 int idx;
253
254 idx = device_links_read_lock();
255
256 /*
257 * If the supplier goes away right after we've checked the link to it,
258 * we'll wait for its completion to change the state, but that's fine,
259 * because the only things that will block as a result are the SRCU
260 * callbacks freeing the link objects for the links in the list we're
261 * walking.
262 */
263 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
264 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
265 dpm_wait(link->supplier, async);
266
267 device_links_read_unlock(idx);
268}
269
270static void dpm_wait_for_superior(struct device *dev, bool async)
271{
272 dpm_wait(dev->parent, async);
273 dpm_wait_for_suppliers(dev, async);
274}
275
276static void dpm_wait_for_consumers(struct device *dev, bool async)
277{
278 struct device_link *link;
279 int idx;
280
281 idx = device_links_read_lock();
282
283 /*
284 * The status of a device link can only be changed from "dormant" by a
285 * probe, but that cannot happen during system suspend/resume. In
286 * theory it can change to "dormant" at that time, but then it is
287 * reasonable to wait for the target device anyway (eg. if it goes
288 * away, it's better to wait for it to go away completely and then
289 * continue instead of trying to continue in parallel with its
290 * unregistration).
291 */
292 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
293 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
294 dpm_wait(link->consumer, async);
295
296 device_links_read_unlock(idx);
297}
298
299static void dpm_wait_for_subordinate(struct device *dev, bool async)
300{
301 dpm_wait_for_children(dev, async);
302 dpm_wait_for_consumers(dev, async);
303}
304
249/** 305/**
250 * pm_op - Return the PM operation appropriate for given PM event. 306 * pm_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from. 307 * @ops: PM operations to choose from.
@@ -490,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
490 if (!dev->power.is_noirq_suspended) 546 if (!dev->power.is_noirq_suspended)
491 goto Out; 547 goto Out;
492 548
493 dpm_wait(dev->parent, async); 549 dpm_wait_for_superior(dev, async);
494 550
495 if (dev->pm_domain) { 551 if (dev->pm_domain) {
496 info = "noirq power domain "; 552 info = "noirq power domain ";
@@ -620,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
620 if (!dev->power.is_late_suspended) 676 if (!dev->power.is_late_suspended)
621 goto Out; 677 goto Out;
622 678
623 dpm_wait(dev->parent, async); 679 dpm_wait_for_superior(dev, async);
624 680
625 if (dev->pm_domain) { 681 if (dev->pm_domain) {
626 info = "early power domain "; 682 info = "early power domain ";
@@ -752,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
752 goto Complete; 808 goto Complete;
753 } 809 }
754 810
755 dpm_wait(dev->parent, async); 811 dpm_wait_for_superior(dev, async);
756 dpm_watchdog_set(&wd, dev); 812 dpm_watchdog_set(&wd, dev);
757 device_lock(dev); 813 device_lock(dev);
758 814
@@ -1040,7 +1096,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1040 if (dev->power.syscore || dev->power.direct_complete) 1096 if (dev->power.syscore || dev->power.direct_complete)
1041 goto Complete; 1097 goto Complete;
1042 1098
1043 dpm_wait_for_children(dev, async); 1099 dpm_wait_for_subordinate(dev, async);
1044 1100
1045 if (dev->pm_domain) { 1101 if (dev->pm_domain) {
1046 info = "noirq power domain "; 1102 info = "noirq power domain ";
@@ -1187,7 +1243,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
1187 if (dev->power.syscore || dev->power.direct_complete) 1243 if (dev->power.syscore || dev->power.direct_complete)
1188 goto Complete; 1244 goto Complete;
1189 1245
1190 dpm_wait_for_children(dev, async); 1246 dpm_wait_for_subordinate(dev, async);
1191 1247
1192 if (dev->pm_domain) { 1248 if (dev->pm_domain) {
1193 info = "late power domain "; 1249 info = "late power domain ";
@@ -1344,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
1344 return error; 1400 return error;
1345} 1401}
1346 1402
1403static void dpm_clear_suppliers_direct_complete(struct device *dev)
1404{
1405 struct device_link *link;
1406 int idx;
1407
1408 idx = device_links_read_lock();
1409
1410 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1411 spin_lock_irq(&link->supplier->power.lock);
1412 link->supplier->power.direct_complete = false;
1413 spin_unlock_irq(&link->supplier->power.lock);
1414 }
1415
1416 device_links_read_unlock(idx);
1417}
1418
1347/** 1419/**
1348 * device_suspend - Execute "suspend" callbacks for given device. 1420 * device_suspend - Execute "suspend" callbacks for given device.
1349 * @dev: Device to handle. 1421 * @dev: Device to handle.
@@ -1360,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1360 TRACE_DEVICE(dev); 1432 TRACE_DEVICE(dev);
1361 TRACE_SUSPEND(0); 1433 TRACE_SUSPEND(0);
1362 1434
1363 dpm_wait_for_children(dev, async); 1435 dpm_wait_for_subordinate(dev, async);
1364 1436
1365 if (async_error) 1437 if (async_error)
1366 goto Complete; 1438 goto Complete;
@@ -1456,6 +1528,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1456 1528
1457 spin_unlock_irq(&parent->power.lock); 1529 spin_unlock_irq(&parent->power.lock);
1458 } 1530 }
1531 dpm_clear_suppliers_direct_complete(dev);
1459 } 1532 }
1460 1533
1461 device_unlock(dev); 1534 device_unlock(dev);