aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2010-01-23 16:25:31 -0500
committerRafael J. Wysocki <rjw@sisk.pl>2010-02-26 14:39:11 -0500
commit97df8c12995c5bac73e3bfeea4c5be155c1f4401 (patch)
tree74de2a645bbdfc28048892f4964172a3e2cde781 /drivers/base/power
parent5a2eb8585f3b38e01e30aacaa8b985a1520a993d (diff)
PM: Start asynchronous resume threads upfront
It has been shown by testing that total device resume time can be reduced significantly (by as much as 50% or more) if the async threads executing some devices' resume routines are all started before the main resume thread starts to handle the "synchronous" devices. This is a consequence of the fact that the slowest devices tend to be located at the end of dpm_list, so their resume routines are started very late. Consequently, they have to wait for all the preceding "synchronous" devices before their resume routines can be started by the main resume thread, even if they are "asynchronous". By starting their async threads upfront we effectively move those devices towards the beginning of dpm_list, without breaking their ordering with respect to their parents and children. As a result, their resume routines are started much earlier and we are able to save much more device resume time this way. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/main.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7e79201b09bb..6efef9fb23a1 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -495,12 +495,12 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
495} 495}
496 496
497/** 497/**
498 * __device_resume - Execute "resume" callbacks for given device. 498 * device_resume - Execute "resume" callbacks for given device.
499 * @dev: Device to handle. 499 * @dev: Device to handle.
500 * @state: PM transition of the system being carried out. 500 * @state: PM transition of the system being carried out.
501 * @async: If true, the device is being resumed asynchronously. 501 * @async: If true, the device is being resumed asynchronously.
502 */ 502 */
503static int __device_resume(struct device *dev, pm_message_t state, bool async) 503static int device_resume(struct device *dev, pm_message_t state, bool async)
504{ 504{
505 int error = 0; 505 int error = 0;
506 506
@@ -510,6 +510,8 @@ static int __device_resume(struct device *dev, pm_message_t state, bool async)
510 dpm_wait(dev->parent, async); 510 dpm_wait(dev->parent, async);
511 down(&dev->sem); 511 down(&dev->sem);
512 512
513 dev->power.status = DPM_RESUMING;
514
513 if (dev->bus) { 515 if (dev->bus) {
514 if (dev->bus->pm) { 516 if (dev->bus->pm) {
515 pm_dev_dbg(dev, state, ""); 517 pm_dev_dbg(dev, state, "");
@@ -553,24 +555,16 @@ static void async_resume(void *data, async_cookie_t cookie)
553 struct device *dev = (struct device *)data; 555 struct device *dev = (struct device *)data;
554 int error; 556 int error;
555 557
556 error = __device_resume(dev, pm_transition, true); 558 error = device_resume(dev, pm_transition, true);
557 if (error) 559 if (error)
558 pm_dev_err(dev, pm_transition, " async", error); 560 pm_dev_err(dev, pm_transition, " async", error);
559 put_device(dev); 561 put_device(dev);
560} 562}
561 563
562static int device_resume(struct device *dev) 564static bool is_async(struct device *dev)
563{ 565{
564 INIT_COMPLETION(dev->power.completion); 566 return dev->power.async_suspend && pm_async_enabled
565 567 && !pm_trace_is_enabled();
566 if (pm_async_enabled && dev->power.async_suspend
567 && !pm_trace_is_enabled()) {
568 get_device(dev);
569 async_schedule(async_resume, dev);
570 return 0;
571 }
572
573 return __device_resume(dev, pm_transition, false);
574} 568}
575 569
576/** 570/**
@@ -583,22 +577,33 @@ static int device_resume(struct device *dev)
583static void dpm_resume(pm_message_t state) 577static void dpm_resume(pm_message_t state)
584{ 578{
585 struct list_head list; 579 struct list_head list;
580 struct device *dev;
586 ktime_t starttime = ktime_get(); 581 ktime_t starttime = ktime_get();
587 582
588 INIT_LIST_HEAD(&list); 583 INIT_LIST_HEAD(&list);
589 mutex_lock(&dpm_list_mtx); 584 mutex_lock(&dpm_list_mtx);
590 pm_transition = state; 585 pm_transition = state;
591 while (!list_empty(&dpm_list)) {
592 struct device *dev = to_device(dpm_list.next);
593 586
587 list_for_each_entry(dev, &dpm_list, power.entry) {
588 if (dev->power.status < DPM_OFF)
589 continue;
590
591 INIT_COMPLETION(dev->power.completion);
592 if (is_async(dev)) {
593 get_device(dev);
594 async_schedule(async_resume, dev);
595 }
596 }
597
598 while (!list_empty(&dpm_list)) {
599 dev = to_device(dpm_list.next);
594 get_device(dev); 600 get_device(dev);
595 if (dev->power.status >= DPM_OFF) { 601 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
596 int error; 602 int error;
597 603
598 dev->power.status = DPM_RESUMING;
599 mutex_unlock(&dpm_list_mtx); 604 mutex_unlock(&dpm_list_mtx);
600 605
601 error = device_resume(dev); 606 error = device_resume(dev, state, false);
602 607
603 mutex_lock(&dpm_list_mtx); 608 mutex_lock(&dpm_list_mtx);
604 if (error) 609 if (error)