aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/main.c
diff options
context:
space:
mode:
authorLiu, Chuansheng <chuansheng.liu@intel.com>2014-02-17 21:28:45 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-19 19:30:09 -0500
commit76569faa62c46382e080c3e190c66e19515aae1c (patch)
tree6ed8ae6ed50e2fdc10886f43841fe75cea245682 /drivers/base/power/main.c
parent3d2699bc179a10eee7d2aa1db50f822be01636f7 (diff)
PM / sleep: Asynchronous threads for resume_noirq
In analogy with commits 5af84b82701a and 97df8c12995, using asynchronous threads can improve the overall resume_noirq time significantly. One typical case is: In resume_noirq phase and for the PCI devices, the function pci_pm_resume_noirq() will be called, and there is one d3_delay (10ms) at least. With the way of asynchronous threads, we just need wait d3_delay time once in parallel for each calling, which saves much time to resume quickly. Signed-off-by: Chuansheng Liu <chuansheng.liu@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r--drivers/base/power/main.c66
1 files changed, 50 insertions, 16 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 00c53eb8ebca..ea3f1d2c28cf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -469,7 +469,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
469 * The driver of @dev will not receive interrupts while this function is being 469 * The driver of @dev will not receive interrupts while this function is being
470 * executed. 470 * executed.
471 */ 471 */
472static int device_resume_noirq(struct device *dev, pm_message_t state) 472static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
473{ 473{
474 pm_callback_t callback = NULL; 474 pm_callback_t callback = NULL;
475 char *info = NULL; 475 char *info = NULL;
@@ -484,6 +484,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
484 if (!dev->power.is_noirq_suspended) 484 if (!dev->power.is_noirq_suspended)
485 goto Out; 485 goto Out;
486 486
487 dpm_wait(dev->parent, async);
488
487 if (dev->pm_domain) { 489 if (dev->pm_domain) {
488 info = "noirq power domain "; 490 info = "noirq power domain ";
489 callback = pm_noirq_op(&dev->pm_domain->ops, state); 491 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -507,10 +509,29 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
507 dev->power.is_noirq_suspended = false; 509 dev->power.is_noirq_suspended = false;
508 510
509 Out: 511 Out:
512 complete_all(&dev->power.completion);
510 TRACE_RESUME(error); 513 TRACE_RESUME(error);
511 return error; 514 return error;
512} 515}
513 516
517static bool is_async(struct device *dev)
518{
519 return dev->power.async_suspend && pm_async_enabled
520 && !pm_trace_is_enabled();
521}
522
523static void async_resume_noirq(void *data, async_cookie_t cookie)
524{
525 struct device *dev = (struct device *)data;
526 int error;
527
528 error = device_resume_noirq(dev, pm_transition, true);
529 if (error)
530 pm_dev_err(dev, pm_transition, " async", error);
531
532 put_device(dev);
533}
534
514/** 535/**
515 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 536 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
516 * @state: PM transition of the system being carried out. 537 * @state: PM transition of the system being carried out.
@@ -520,29 +541,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
520 */ 541 */
521static void dpm_resume_noirq(pm_message_t state) 542static void dpm_resume_noirq(pm_message_t state)
522{ 543{
544 struct device *dev;
523 ktime_t starttime = ktime_get(); 545 ktime_t starttime = ktime_get();
524 546
525 mutex_lock(&dpm_list_mtx); 547 mutex_lock(&dpm_list_mtx);
526 while (!list_empty(&dpm_noirq_list)) { 548 pm_transition = state;
527 struct device *dev = to_device(dpm_noirq_list.next); 549
528 int error; 550 /*
551 * Advanced the async threads upfront,
552 * in case the starting of async threads is
553 * delayed by non-async resuming devices.
554 */
555 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
556 reinit_completion(&dev->power.completion);
557 if (is_async(dev)) {
558 get_device(dev);
559 async_schedule(async_resume_noirq, dev);
560 }
561 }
529 562
563 while (!list_empty(&dpm_noirq_list)) {
564 dev = to_device(dpm_noirq_list.next);
530 get_device(dev); 565 get_device(dev);
531 list_move_tail(&dev->power.entry, &dpm_late_early_list); 566 list_move_tail(&dev->power.entry, &dpm_late_early_list);
532 mutex_unlock(&dpm_list_mtx); 567 mutex_unlock(&dpm_list_mtx);
533 568
534 error = device_resume_noirq(dev, state); 569 if (!is_async(dev)) {
535 if (error) { 570 int error;
536 suspend_stats.failed_resume_noirq++; 571
537 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 572 error = device_resume_noirq(dev, state, false);
538 dpm_save_failed_dev(dev_name(dev)); 573 if (error) {
539 pm_dev_err(dev, state, " noirq", error); 574 suspend_stats.failed_resume_noirq++;
575 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
576 dpm_save_failed_dev(dev_name(dev));
577 pm_dev_err(dev, state, " noirq", error);
578 }
540 } 579 }
541 580
542 mutex_lock(&dpm_list_mtx); 581 mutex_lock(&dpm_list_mtx);
543 put_device(dev); 582 put_device(dev);
544 } 583 }
545 mutex_unlock(&dpm_list_mtx); 584 mutex_unlock(&dpm_list_mtx);
585 async_synchronize_full();
546 dpm_show_time(starttime, state, "noirq"); 586 dpm_show_time(starttime, state, "noirq");
547 resume_device_irqs(); 587 resume_device_irqs();
548 cpuidle_resume(); 588 cpuidle_resume();
@@ -742,12 +782,6 @@ static void async_resume(void *data, async_cookie_t cookie)
742 put_device(dev); 782 put_device(dev);
743} 783}
744 784
745static bool is_async(struct device *dev)
746{
747 return dev->power.async_suspend && pm_async_enabled
748 && !pm_trace_is_enabled();
749}
750
751/** 785/**
752 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 786 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
753 * @state: PM transition of the system being carried out. 787 * @state: PM transition of the system being carried out.