aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/main.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 15:48:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 15:48:54 -0400
commit4dedde7c7a18f55180574f934dbc1be84ca0400b (patch)
treed7cc511e8ba8ffceadf3f45b9a63395c4e4183c5 /drivers/base/power/main.c
parent683b6c6f82a60fabf47012581c2cfbf1b037ab95 (diff)
parent0ecfe310f4517d7505599be738158087c165be7c (diff)
Merge tag 'pm+acpi-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: "The majority of this material spent some time in linux-next, some of it even several weeks. There are a few relatively fresh commits in it, but they are mostly fixes and simple cleanups. ACPI took the lead this time, both in terms of the number of commits and the number of modified lines of code, cpufreq follows and there are a few changes in the PM core and in cpuidle too. A new feature that already got some LWN.net's attention is the device PM QoS extension allowing latency tolerance requirements to be propagated from leaf devices to their ancestors with hardware interfaces for specifying latency tolerance. That should help systems with hardware-driven power management to avoid going too far with it in cases when there are latency tolerance constraints. There also are some significant changes in the ACPI core related to the way in which hotplug notifications are handled. They affect PCI hotplug (ACPIPHP) and the ACPI dock station code too. The bottom line is that all those notification now go through the root notify handler and are propagated to the interested subsystems by means of callbacks instead of having to install a notify handler for each device object that we can potentially get hotplug notifications for. In addition to that ACPICA will now advertise "Windows 2013" compatibility for _OSI, because some systems out there don't work correctly if that is not done (some of them don't even boot). On the system suspend side of things, all of the device suspend and resume callbacks, except for ->prepare() and ->complete(), are now going to be executed asynchronously as that turns out to speed up system suspend and resume on some platforms quite significantly and we have a few more optimizations in that area. Apart from that, there are some new device IDs and fixes and cleanups all over. In particular, the system suspend and resume handling by cpufreq should be improved and the cpuidle menu governor should be a bit more robust now. Specifics: - Device PM QoS support for latency tolerance constraints on systems with hardware interfaces allowing such constraints to be specified. That is necessary to prevent hardware-driven power management from becoming overly aggressive on some systems and to prevent power management features leading to excessive latencies from being used in some cases. - Consolidation of the handling of ACPI hotplug notifications for device objects. This causes all device hotplug notifications to go through the root notify handler (that was executed for all of them anyway before) that propagates them to individual subsystems, if necessary, by executing callbacks provided by those subsystems (those callbacks are associated with struct acpi_device objects during device enumeration). As a result, the code in question becomes both smaller in size and more straightforward and all of those changes should not affect users. - ACPICA update, including fixes related to the handling of _PRT in cases when it is broken and the addition of "Windows 2013" to the list of supported "features" for _OSI (which is necessary to support systems that work incorrectly or don't even boot without it). Changes from Bob Moore and Lv Zheng. - Consolidation of ACPI _OST handling from Jiang Liu. - ACPI battery and AC fixes allowing unusual system configurations to be handled by that code from Alexander Mezin. - New device IDs for the ACPI LPSS driver from Chiau Ee Chew. - ACPI fan and thermal optimizations related to system suspend and resume from Aaron Lu. - Cleanups related to ACPI video from Jean Delvare. - Assorted ACPI fixes and cleanups from Al Stone, Hanjun Guo, Lan Tianyu, Paul Bolle, Tomasz Nowicki. - Intel RAPL (Running Average Power Limits) driver cleanups from Jacob Pan. - intel_pstate fixes and cleanups from Dirk Brandewie. - cpufreq fixes related to system suspend/resume handling from Viresh Kumar. - cpufreq core fixes and cleanups from Viresh Kumar, Stratos Karafotis, Saravana Kannan, Rashika Kheria, Joe Perches. - cpufreq drivers updates from Viresh Kumar, Zhuoyu Zhang, Rob Herring. - cpuidle fixes related to the menu governor from Tuukka Tikkanen. - cpuidle fix related to coupled CPUs handling from Paul Burton. - Asynchronous execution of all device suspend and resume callbacks, except for ->prepare and ->complete, during system suspend and resume from Chuansheng Liu. - Delayed resuming of runtime-suspended devices during system suspend for the PCI bus type and ACPI PM domain. - New set of PM helper routines to allow device runtime PM callbacks to be used during system suspend and resume more easily from Ulf Hansson. - Assorted fixes and cleanups in the PM core from Geert Uytterhoeven, Prabhakar Lad, Philipp Zabel, Rashika Kheria, Sebastian Capella. - devfreq fix from Saravana Kannan" * tag 'pm+acpi-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (162 commits) PM / devfreq: Rewrite devfreq_update_status() to fix multiple bugs PM / sleep: Correct whitespace errors in <linux/pm.h> intel_pstate: Set core to min P state during core offline cpufreq: Add stop CPU callback to cpufreq_driver interface cpufreq: Remove unnecessary braces cpufreq: Fix checkpatch errors and warnings cpufreq: powerpc: add cpufreq transition latency for FSL e500mc SoCs MAINTAINERS: Reorder maintainer addresses for PM and ACPI PM / Runtime: Update runtime_idle() documentation for return value meaning video / output: Drop display output class support fujitsu-laptop: Drop unneeded include acer-wmi: Stop selecting VIDEO_OUTPUT_CONTROL ACPI / gpu / drm: Stop selecting VIDEO_OUTPUT_CONTROL ACPI / video: fix ACPI_VIDEO dependencies cpufreq: remove unused notifier: CPUFREQ_{SUSPENDCHANGE|RESUMECHANGE} cpufreq: Do not allow ->setpolicy drivers to provide ->target cpufreq: arm_big_little: set 'physical_cluster' for each CPU cpufreq: arm_big_little: make vexpress driver depend on bL core driver ACPI / button: Add ACPI Button event via netlink routine ACPI: Remove duplicate definitions of PREFIX ...
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r--drivers/base/power/main.c280
1 files changed, 230 insertions, 50 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b41fca3d65a..86d5e4fb5b98 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,7 @@
29#include <linux/async.h> 29#include <linux/async.h>
30#include <linux/suspend.h> 30#include <linux/suspend.h>
31#include <trace/events/power.h> 31#include <trace/events/power.h>
32#include <linux/cpufreq.h>
32#include <linux/cpuidle.h> 33#include <linux/cpuidle.h>
33#include <linux/timer.h> 34#include <linux/timer.h>
34 35
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
91{ 92{
92 dev->power.is_prepared = false; 93 dev->power.is_prepared = false;
93 dev->power.is_suspended = false; 94 dev->power.is_suspended = false;
95 dev->power.is_noirq_suspended = false;
96 dev->power.is_late_suspended = false;
94 init_completion(&dev->power.completion); 97 init_completion(&dev->power.completion);
95 complete_all(&dev->power.completion); 98 complete_all(&dev->power.completion);
96 dev->power.wakeup = NULL; 99 dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
467 * The driver of @dev will not receive interrupts while this function is being 470 * The driver of @dev will not receive interrupts while this function is being
468 * executed. 471 * executed.
469 */ 472 */
470static int device_resume_noirq(struct device *dev, pm_message_t state) 473static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
471{ 474{
472 pm_callback_t callback = NULL; 475 pm_callback_t callback = NULL;
473 char *info = NULL; 476 char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
479 if (dev->power.syscore) 482 if (dev->power.syscore)
480 goto Out; 483 goto Out;
481 484
485 if (!dev->power.is_noirq_suspended)
486 goto Out;
487
488 dpm_wait(dev->parent, async);
489
482 if (dev->pm_domain) { 490 if (dev->pm_domain) {
483 info = "noirq power domain "; 491 info = "noirq power domain ";
484 callback = pm_noirq_op(&dev->pm_domain->ops, state); 492 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
499 } 507 }
500 508
501 error = dpm_run_callback(callback, dev, state, info); 509 error = dpm_run_callback(callback, dev, state, info);
510 dev->power.is_noirq_suspended = false;
502 511
503 Out: 512 Out:
513 complete_all(&dev->power.completion);
504 TRACE_RESUME(error); 514 TRACE_RESUME(error);
505 return error; 515 return error;
506} 516}
507 517
518static bool is_async(struct device *dev)
519{
520 return dev->power.async_suspend && pm_async_enabled
521 && !pm_trace_is_enabled();
522}
523
524static void async_resume_noirq(void *data, async_cookie_t cookie)
525{
526 struct device *dev = (struct device *)data;
527 int error;
528
529 error = device_resume_noirq(dev, pm_transition, true);
530 if (error)
531 pm_dev_err(dev, pm_transition, " async", error);
532
533 put_device(dev);
534}
535
508/** 536/**
509 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
510 * @state: PM transition of the system being carried out. 538 * @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
514 */ 542 */
515static void dpm_resume_noirq(pm_message_t state) 543static void dpm_resume_noirq(pm_message_t state)
516{ 544{
545 struct device *dev;
517 ktime_t starttime = ktime_get(); 546 ktime_t starttime = ktime_get();
518 547
519 mutex_lock(&dpm_list_mtx); 548 mutex_lock(&dpm_list_mtx);
520 while (!list_empty(&dpm_noirq_list)) { 549 pm_transition = state;
521 struct device *dev = to_device(dpm_noirq_list.next);
522 int error;
523 550
551 /*
552 * Advanced the async threads upfront,
553 * in case the starting of async threads is
554 * delayed by non-async resuming devices.
555 */
556 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
557 reinit_completion(&dev->power.completion);
558 if (is_async(dev)) {
559 get_device(dev);
560 async_schedule(async_resume_noirq, dev);
561 }
562 }
563
564 while (!list_empty(&dpm_noirq_list)) {
565 dev = to_device(dpm_noirq_list.next);
524 get_device(dev); 566 get_device(dev);
525 list_move_tail(&dev->power.entry, &dpm_late_early_list); 567 list_move_tail(&dev->power.entry, &dpm_late_early_list);
526 mutex_unlock(&dpm_list_mtx); 568 mutex_unlock(&dpm_list_mtx);
527 569
528 error = device_resume_noirq(dev, state); 570 if (!is_async(dev)) {
529 if (error) { 571 int error;
530 suspend_stats.failed_resume_noirq++; 572
531 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 573 error = device_resume_noirq(dev, state, false);
532 dpm_save_failed_dev(dev_name(dev)); 574 if (error) {
533 pm_dev_err(dev, state, " noirq", error); 575 suspend_stats.failed_resume_noirq++;
576 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
577 dpm_save_failed_dev(dev_name(dev));
578 pm_dev_err(dev, state, " noirq", error);
579 }
534 } 580 }
535 581
536 mutex_lock(&dpm_list_mtx); 582 mutex_lock(&dpm_list_mtx);
537 put_device(dev); 583 put_device(dev);
538 } 584 }
539 mutex_unlock(&dpm_list_mtx); 585 mutex_unlock(&dpm_list_mtx);
586 async_synchronize_full();
540 dpm_show_time(starttime, state, "noirq"); 587 dpm_show_time(starttime, state, "noirq");
541 resume_device_irqs(); 588 resume_device_irqs();
542 cpuidle_resume(); 589 cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
549 * 596 *
550 * Runtime PM is disabled for @dev while this function is being executed. 597 * Runtime PM is disabled for @dev while this function is being executed.
551 */ 598 */
552static int device_resume_early(struct device *dev, pm_message_t state) 599static int device_resume_early(struct device *dev, pm_message_t state, bool async)
553{ 600{
554 pm_callback_t callback = NULL; 601 pm_callback_t callback = NULL;
555 char *info = NULL; 602 char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
561 if (dev->power.syscore) 608 if (dev->power.syscore)
562 goto Out; 609 goto Out;
563 610
611 if (!dev->power.is_late_suspended)
612 goto Out;
613
614 dpm_wait(dev->parent, async);
615
564 if (dev->pm_domain) { 616 if (dev->pm_domain) {
565 info = "early power domain "; 617 info = "early power domain ";
566 callback = pm_late_early_op(&dev->pm_domain->ops, state); 618 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
581 } 633 }
582 634
583 error = dpm_run_callback(callback, dev, state, info); 635 error = dpm_run_callback(callback, dev, state, info);
636 dev->power.is_late_suspended = false;
584 637
585 Out: 638 Out:
586 TRACE_RESUME(error); 639 TRACE_RESUME(error);
587 640
588 pm_runtime_enable(dev); 641 pm_runtime_enable(dev);
642 complete_all(&dev->power.completion);
589 return error; 643 return error;
590} 644}
591 645
646static void async_resume_early(void *data, async_cookie_t cookie)
647{
648 struct device *dev = (struct device *)data;
649 int error;
650
651 error = device_resume_early(dev, pm_transition, true);
652 if (error)
653 pm_dev_err(dev, pm_transition, " async", error);
654
655 put_device(dev);
656}
657
592/** 658/**
593 * dpm_resume_early - Execute "early resume" callbacks for all devices. 659 * dpm_resume_early - Execute "early resume" callbacks for all devices.
594 * @state: PM transition of the system being carried out. 660 * @state: PM transition of the system being carried out.
595 */ 661 */
596static void dpm_resume_early(pm_message_t state) 662static void dpm_resume_early(pm_message_t state)
597{ 663{
664 struct device *dev;
598 ktime_t starttime = ktime_get(); 665 ktime_t starttime = ktime_get();
599 666
600 mutex_lock(&dpm_list_mtx); 667 mutex_lock(&dpm_list_mtx);
601 while (!list_empty(&dpm_late_early_list)) { 668 pm_transition = state;
602 struct device *dev = to_device(dpm_late_early_list.next);
603 int error;
604 669
670 /*
671 * Advanced the async threads upfront,
672 * in case the starting of async threads is
673 * delayed by non-async resuming devices.
674 */
675 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
676 reinit_completion(&dev->power.completion);
677 if (is_async(dev)) {
678 get_device(dev);
679 async_schedule(async_resume_early, dev);
680 }
681 }
682
683 while (!list_empty(&dpm_late_early_list)) {
684 dev = to_device(dpm_late_early_list.next);
605 get_device(dev); 685 get_device(dev);
606 list_move_tail(&dev->power.entry, &dpm_suspended_list); 686 list_move_tail(&dev->power.entry, &dpm_suspended_list);
607 mutex_unlock(&dpm_list_mtx); 687 mutex_unlock(&dpm_list_mtx);
608 688
609 error = device_resume_early(dev, state); 689 if (!is_async(dev)) {
610 if (error) { 690 int error;
611 suspend_stats.failed_resume_early++;
612 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
613 dpm_save_failed_dev(dev_name(dev));
614 pm_dev_err(dev, state, " early", error);
615 }
616 691
692 error = device_resume_early(dev, state, false);
693 if (error) {
694 suspend_stats.failed_resume_early++;
695 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
696 dpm_save_failed_dev(dev_name(dev));
697 pm_dev_err(dev, state, " early", error);
698 }
699 }
617 mutex_lock(&dpm_list_mtx); 700 mutex_lock(&dpm_list_mtx);
618 put_device(dev); 701 put_device(dev);
619 } 702 }
620 mutex_unlock(&dpm_list_mtx); 703 mutex_unlock(&dpm_list_mtx);
704 async_synchronize_full();
621 dpm_show_time(starttime, state, "early"); 705 dpm_show_time(starttime, state, "early");
622} 706}
623 707
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
732 put_device(dev); 816 put_device(dev);
733} 817}
734 818
735static bool is_async(struct device *dev)
736{
737 return dev->power.async_suspend && pm_async_enabled
738 && !pm_trace_is_enabled();
739}
740
741/** 819/**
742 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 820 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
743 * @state: PM transition of the system being carried out. 821 * @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
789 mutex_unlock(&dpm_list_mtx); 867 mutex_unlock(&dpm_list_mtx);
790 async_synchronize_full(); 868 async_synchronize_full();
791 dpm_show_time(starttime, state, NULL); 869 dpm_show_time(starttime, state, NULL);
870
871 cpufreq_resume();
792} 872}
793 873
794/** 874/**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
913 * The driver of @dev will not receive interrupts while this function is being 993 * The driver of @dev will not receive interrupts while this function is being
914 * executed. 994 * executed.
915 */ 995 */
916static int device_suspend_noirq(struct device *dev, pm_message_t state) 996static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
917{ 997{
918 pm_callback_t callback = NULL; 998 pm_callback_t callback = NULL;
919 char *info = NULL; 999 char *info = NULL;
1000 int error = 0;
1001
1002 if (async_error)
1003 goto Complete;
1004
1005 if (pm_wakeup_pending()) {
1006 async_error = -EBUSY;
1007 goto Complete;
1008 }
920 1009
921 if (dev->power.syscore) 1010 if (dev->power.syscore)
922 return 0; 1011 goto Complete;
1012
1013 dpm_wait_for_children(dev, async);
923 1014
924 if (dev->pm_domain) { 1015 if (dev->pm_domain) {
925 info = "noirq power domain "; 1016 info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
940 callback = pm_noirq_op(dev->driver->pm, state); 1031 callback = pm_noirq_op(dev->driver->pm, state);
941 } 1032 }
942 1033
943 return dpm_run_callback(callback, dev, state, info); 1034 error = dpm_run_callback(callback, dev, state, info);
1035 if (!error)
1036 dev->power.is_noirq_suspended = true;
1037 else
1038 async_error = error;
1039
1040Complete:
1041 complete_all(&dev->power.completion);
1042 return error;
1043}
1044
1045static void async_suspend_noirq(void *data, async_cookie_t cookie)
1046{
1047 struct device *dev = (struct device *)data;
1048 int error;
1049
1050 error = __device_suspend_noirq(dev, pm_transition, true);
1051 if (error) {
1052 dpm_save_failed_dev(dev_name(dev));
1053 pm_dev_err(dev, pm_transition, " async", error);
1054 }
1055
1056 put_device(dev);
1057}
1058
1059static int device_suspend_noirq(struct device *dev)
1060{
1061 reinit_completion(&dev->power.completion);
1062
1063 if (pm_async_enabled && dev->power.async_suspend) {
1064 get_device(dev);
1065 async_schedule(async_suspend_noirq, dev);
1066 return 0;
1067 }
1068 return __device_suspend_noirq(dev, pm_transition, false);
944} 1069}
945 1070
946/** 1071/**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
958 cpuidle_pause(); 1083 cpuidle_pause();
959 suspend_device_irqs(); 1084 suspend_device_irqs();
960 mutex_lock(&dpm_list_mtx); 1085 mutex_lock(&dpm_list_mtx);
1086 pm_transition = state;
1087 async_error = 0;
1088
961 while (!list_empty(&dpm_late_early_list)) { 1089 while (!list_empty(&dpm_late_early_list)) {
962 struct device *dev = to_device(dpm_late_early_list.prev); 1090 struct device *dev = to_device(dpm_late_early_list.prev);
963 1091
964 get_device(dev); 1092 get_device(dev);
965 mutex_unlock(&dpm_list_mtx); 1093 mutex_unlock(&dpm_list_mtx);
966 1094
967 error = device_suspend_noirq(dev, state); 1095 error = device_suspend_noirq(dev);
968 1096
969 mutex_lock(&dpm_list_mtx); 1097 mutex_lock(&dpm_list_mtx);
970 if (error) { 1098 if (error) {
971 pm_dev_err(dev, state, " noirq", error); 1099 pm_dev_err(dev, state, " noirq", error);
972 suspend_stats.failed_suspend_noirq++;
973 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
974 dpm_save_failed_dev(dev_name(dev)); 1100 dpm_save_failed_dev(dev_name(dev));
975 put_device(dev); 1101 put_device(dev);
976 break; 1102 break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
979 list_move(&dev->power.entry, &dpm_noirq_list); 1105 list_move(&dev->power.entry, &dpm_noirq_list);
980 put_device(dev); 1106 put_device(dev);
981 1107
982 if (pm_wakeup_pending()) { 1108 if (async_error)
983 error = -EBUSY;
984 break; 1109 break;
985 }
986 } 1110 }
987 mutex_unlock(&dpm_list_mtx); 1111 mutex_unlock(&dpm_list_mtx);
988 if (error) 1112 async_synchronize_full();
1113 if (!error)
1114 error = async_error;
1115
1116 if (error) {
1117 suspend_stats.failed_suspend_noirq++;
1118 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
989 dpm_resume_noirq(resume_event(state)); 1119 dpm_resume_noirq(resume_event(state));
990 else 1120 } else {
991 dpm_show_time(starttime, state, "noirq"); 1121 dpm_show_time(starttime, state, "noirq");
1122 }
992 return error; 1123 return error;
993} 1124}
994 1125
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
999 * 1130 *
1000 * Runtime PM is disabled for @dev while this function is being executed. 1131 * Runtime PM is disabled for @dev while this function is being executed.
1001 */ 1132 */
1002static int device_suspend_late(struct device *dev, pm_message_t state) 1133static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1003{ 1134{
1004 pm_callback_t callback = NULL; 1135 pm_callback_t callback = NULL;
1005 char *info = NULL; 1136 char *info = NULL;
1137 int error = 0;
1006 1138
1007 __pm_runtime_disable(dev, false); 1139 __pm_runtime_disable(dev, false);
1008 1140
1141 if (async_error)
1142 goto Complete;
1143
1144 if (pm_wakeup_pending()) {
1145 async_error = -EBUSY;
1146 goto Complete;
1147 }
1148
1009 if (dev->power.syscore) 1149 if (dev->power.syscore)
1010 return 0; 1150 goto Complete;
1151
1152 dpm_wait_for_children(dev, async);
1011 1153
1012 if (dev->pm_domain) { 1154 if (dev->pm_domain) {
1013 info = "late power domain "; 1155 info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
1028 callback = pm_late_early_op(dev->driver->pm, state); 1170 callback = pm_late_early_op(dev->driver->pm, state);
1029 } 1171 }
1030 1172
1031 return dpm_run_callback(callback, dev, state, info); 1173 error = dpm_run_callback(callback, dev, state, info);
1174 if (!error)
1175 dev->power.is_late_suspended = true;
1176 else
1177 async_error = error;
1178
1179Complete:
1180 complete_all(&dev->power.completion);
1181 return error;
1182}
1183
1184static void async_suspend_late(void *data, async_cookie_t cookie)
1185{
1186 struct device *dev = (struct device *)data;
1187 int error;
1188
1189 error = __device_suspend_late(dev, pm_transition, true);
1190 if (error) {
1191 dpm_save_failed_dev(dev_name(dev));
1192 pm_dev_err(dev, pm_transition, " async", error);
1193 }
1194 put_device(dev);
1195}
1196
1197static int device_suspend_late(struct device *dev)
1198{
1199 reinit_completion(&dev->power.completion);
1200
1201 if (pm_async_enabled && dev->power.async_suspend) {
1202 get_device(dev);
1203 async_schedule(async_suspend_late, dev);
1204 return 0;
1205 }
1206
1207 return __device_suspend_late(dev, pm_transition, false);
1032} 1208}
1033 1209
1034/** 1210/**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
1041 int error = 0; 1217 int error = 0;
1042 1218
1043 mutex_lock(&dpm_list_mtx); 1219 mutex_lock(&dpm_list_mtx);
1220 pm_transition = state;
1221 async_error = 0;
1222
1044 while (!list_empty(&dpm_suspended_list)) { 1223 while (!list_empty(&dpm_suspended_list)) {
1045 struct device *dev = to_device(dpm_suspended_list.prev); 1224 struct device *dev = to_device(dpm_suspended_list.prev);
1046 1225
1047 get_device(dev); 1226 get_device(dev);
1048 mutex_unlock(&dpm_list_mtx); 1227 mutex_unlock(&dpm_list_mtx);
1049 1228
1050 error = device_suspend_late(dev, state); 1229 error = device_suspend_late(dev);
1051 1230
1052 mutex_lock(&dpm_list_mtx); 1231 mutex_lock(&dpm_list_mtx);
1053 if (error) { 1232 if (error) {
1054 pm_dev_err(dev, state, " late", error); 1233 pm_dev_err(dev, state, " late", error);
1055 suspend_stats.failed_suspend_late++;
1056 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1057 dpm_save_failed_dev(dev_name(dev)); 1234 dpm_save_failed_dev(dev_name(dev));
1058 put_device(dev); 1235 put_device(dev);
1059 break; 1236 break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
1062 list_move(&dev->power.entry, &dpm_late_early_list); 1239 list_move(&dev->power.entry, &dpm_late_early_list);
1063 put_device(dev); 1240 put_device(dev);
1064 1241
1065 if (pm_wakeup_pending()) { 1242 if (async_error)
1066 error = -EBUSY;
1067 break; 1243 break;
1068 }
1069 } 1244 }
1070 mutex_unlock(&dpm_list_mtx); 1245 mutex_unlock(&dpm_list_mtx);
1071 if (error) 1246 async_synchronize_full();
1247 if (error) {
1248 suspend_stats.failed_suspend_late++;
1249 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1072 dpm_resume_early(resume_event(state)); 1250 dpm_resume_early(resume_event(state));
1073 else 1251 } else {
1074 dpm_show_time(starttime, state, "late"); 1252 dpm_show_time(starttime, state, "late");
1075 1253 }
1076 return error; 1254 return error;
1077} 1255}
1078 1256
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
1259 1437
1260 might_sleep(); 1438 might_sleep();
1261 1439
1440 cpufreq_suspend();
1441
1262 mutex_lock(&dpm_list_mtx); 1442 mutex_lock(&dpm_list_mtx);
1263 pm_transition = state; 1443 pm_transition = state;
1264 async_error = 0; 1444 async_error = 0;