aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLiu, Chuansheng <chuansheng.liu@intel.com>2014-02-17 21:28:46 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-19 19:30:09 -0500
commit9e5e7910df824ba02aedd2b5d2ca556426ea6d0b (patch)
treeb9db6a7e5e1921a8a9da0c8b1b588fda45f3f80f /drivers/base
parent76569faa62c46382e080c3e190c66e19515aae1c (diff)
PM / sleep: Asynchronous threads for resume_early
In analogy with commits 5af84b82701a and 97df8c12995, using asynchronous threads can improve the overall resume_early time significantly. This patch is for resume_early phase. Signed-off-by: Chuansheng Liu <chuansheng.liu@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c55
1 files changed, 44 insertions, 11 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ea3f1d2c28cf..6d41165701c4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -595,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
595 * 595 *
596 * Runtime PM is disabled for @dev while this function is being executed. 596 * Runtime PM is disabled for @dev while this function is being executed.
597 */ 597 */
598static int device_resume_early(struct device *dev, pm_message_t state) 598static int device_resume_early(struct device *dev, pm_message_t state, bool async)
599{ 599{
600 pm_callback_t callback = NULL; 600 pm_callback_t callback = NULL;
601 char *info = NULL; 601 char *info = NULL;
@@ -610,6 +610,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
610 if (!dev->power.is_late_suspended) 610 if (!dev->power.is_late_suspended)
611 goto Out; 611 goto Out;
612 612
613 dpm_wait(dev->parent, async);
614
613 if (dev->pm_domain) { 615 if (dev->pm_domain) {
614 info = "early power domain "; 616 info = "early power domain ";
615 callback = pm_late_early_op(&dev->pm_domain->ops, state); 617 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -636,38 +638,69 @@ static int device_resume_early(struct device *dev, pm_message_t state)
636 TRACE_RESUME(error); 638 TRACE_RESUME(error);
637 639
638 pm_runtime_enable(dev); 640 pm_runtime_enable(dev);
641 complete_all(&dev->power.completion);
639 return error; 642 return error;
640} 643}
641 644
645static void async_resume_early(void *data, async_cookie_t cookie)
646{
647 struct device *dev = (struct device *)data;
648 int error;
649
650 error = device_resume_early(dev, pm_transition, true);
651 if (error)
652 pm_dev_err(dev, pm_transition, " async", error);
653
654 put_device(dev);
655}
656
642/** 657/**
643 * dpm_resume_early - Execute "early resume" callbacks for all devices. 658 * dpm_resume_early - Execute "early resume" callbacks for all devices.
644 * @state: PM transition of the system being carried out. 659 * @state: PM transition of the system being carried out.
645 */ 660 */
646static void dpm_resume_early(pm_message_t state) 661static void dpm_resume_early(pm_message_t state)
647{ 662{
663 struct device *dev;
648 ktime_t starttime = ktime_get(); 664 ktime_t starttime = ktime_get();
649 665
650 mutex_lock(&dpm_list_mtx); 666 mutex_lock(&dpm_list_mtx);
651 while (!list_empty(&dpm_late_early_list)) { 667 pm_transition = state;
652 struct device *dev = to_device(dpm_late_early_list.next); 668
653 int error; 669 /*
670 * Advanced the async threads upfront,
671 * in case the starting of async threads is
672 * delayed by non-async resuming devices.
673 */
674 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
675 reinit_completion(&dev->power.completion);
676 if (is_async(dev)) {
677 get_device(dev);
678 async_schedule(async_resume_early, dev);
679 }
680 }
654 681
682 while (!list_empty(&dpm_late_early_list)) {
683 dev = to_device(dpm_late_early_list.next);
655 get_device(dev); 684 get_device(dev);
656 list_move_tail(&dev->power.entry, &dpm_suspended_list); 685 list_move_tail(&dev->power.entry, &dpm_suspended_list);
657 mutex_unlock(&dpm_list_mtx); 686 mutex_unlock(&dpm_list_mtx);
658 687
659 error = device_resume_early(dev, state); 688 if (!is_async(dev)) {
660 if (error) { 689 int error;
661 suspend_stats.failed_resume_early++;
662 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
663 dpm_save_failed_dev(dev_name(dev));
664 pm_dev_err(dev, state, " early", error);
665 }
666 690
691 error = device_resume_early(dev, state, false);
692 if (error) {
693 suspend_stats.failed_resume_early++;
694 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
695 dpm_save_failed_dev(dev_name(dev));
696 pm_dev_err(dev, state, " early", error);
697 }
698 }
667 mutex_lock(&dpm_list_mtx); 699 mutex_lock(&dpm_list_mtx);
668 put_device(dev); 700 put_device(dev);
669 } 701 }
670 mutex_unlock(&dpm_list_mtx); 702 mutex_unlock(&dpm_list_mtx);
703 async_synchronize_full();
671 dpm_show_time(starttime, state, "early"); 704 dpm_show_time(starttime, state, "early");
672} 705}
673 706