aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-power12
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c4
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/acpi/sleep.c202
-rw-r--r--drivers/base/power/main.c103
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/idle/intel_idle.c180
-rw-r--r--drivers/platform/x86/intel-hid.c17
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/suspend.h48
-rw-r--r--kernel/power/hibernate.c29
-rw-r--r--kernel/power/main.c64
-rw-r--r--kernel/power/power.h5
-rw-r--r--kernel/power/suspend.c184
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/sched/idle.c8
-rw-r--r--kernel/time/timekeeping_debug.c5
22 files changed, 622 insertions, 291 deletions
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index f523e5a3ac33..713cab1d5f12 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -273,3 +273,15 @@ Description:
273 273
274 This output is useful for system wakeup diagnostics of spurious 274 This output is useful for system wakeup diagnostics of spurious
275 wakeup interrupts. 275 wakeup interrupts.
276
277What: /sys/power/pm_debug_messages
278Date: July 2017
279Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
280Description:
281 The /sys/power/pm_debug_messages file controls the printing
282 of debug messages from the system suspend/hiberbation
283 infrastructure to the kernel log.
284
285 Writing a "1" to this file enables the debug messages and
286 writing a "0" (default) to it disables them. Reads from
287 this file return the current value.
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index bc4548245a24..205e45ad7c65 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,7 +35,9 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
35The default suspend mode (ie. the one to be used without writing anything into 35The default suspend mode (ie. the one to be used without writing anything into
36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or 36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
37"s2idle", but it can be overridden by the value of the "mem_sleep_default" 37"s2idle", but it can be overridden by the value of the "mem_sleep_default"
38parameter in the kernel command line. 38parameter in the kernel command line. On some ACPI-based systems, depending on
39the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
40is supported.
39 41
40The properties of all of the sleep states are described below. 42The properties of all of the sleep states are described below.
41 43
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index d3aa9be16621..e3fbcfedf845 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -60,7 +60,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
60 return index; 60 return index;
61} 61}
62 62
63static void tegra114_idle_enter_freeze(struct cpuidle_device *dev, 63static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev,
64 struct cpuidle_driver *drv, 64 struct cpuidle_driver *drv,
65 int index) 65 int index)
66{ 66{
@@ -77,7 +77,7 @@ static struct cpuidle_driver tegra_idle_driver = {
77#ifdef CONFIG_PM_SLEEP 77#ifdef CONFIG_PM_SLEEP
78 [1] = { 78 [1] = {
79 .enter = tegra114_idle_power_down, 79 .enter = tegra114_idle_power_down,
80 .enter_freeze = tegra114_idle_enter_freeze, 80 .enter_s2idle = tegra114_idle_enter_s2idle,
81 .exit_latency = 500, 81 .exit_latency = 500,
82 .target_residency = 1000, 82 .target_residency = 1000,
83 .flags = CPUIDLE_FLAG_TIMER_STOP, 83 .flags = CPUIDLE_FLAG_TIMER_STOP,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index df38e81cc672..d50a7b6ccddd 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -791,7 +791,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
791 return index; 791 return index;
792} 792}
793 793
794static void acpi_idle_enter_freeze(struct cpuidle_device *dev, 794static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
795 struct cpuidle_driver *drv, int index) 795 struct cpuidle_driver *drv, int index)
796{ 796{
797 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 797 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -876,14 +876,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
876 drv->safe_state_index = count; 876 drv->safe_state_index = count;
877 } 877 }
878 /* 878 /*
879 * Halt-induced C1 is not good for ->enter_freeze, because it 879 * Halt-induced C1 is not good for ->enter_s2idle, because it
880 * re-enables interrupts on exit. Moreover, C1 is generally not 880 * re-enables interrupts on exit. Moreover, C1 is generally not
881 * particularly interesting from the suspend-to-idle angle, so 881 * particularly interesting from the suspend-to-idle angle, so
882 * avoid C1 and the situations in which we may need to fall back 882 * avoid C1 and the situations in which we may need to fall back
883 * to it altogether. 883 * to it altogether.
884 */ 884 */
885 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 885 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
886 state->enter_freeze = acpi_idle_enter_freeze; 886 state->enter_s2idle = acpi_idle_enter_s2idle;
887 887
888 count++; 888 count++;
889 if (count == CPUIDLE_STATE_MAX) 889 if (count == CPUIDLE_STATE_MAX)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fa8243c5c062..09460d9f9208 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
669 669
670#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" 670#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
671 671
672#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
672#define ACPI_LPS0_SCREEN_OFF 3 673#define ACPI_LPS0_SCREEN_OFF 3
673#define ACPI_LPS0_SCREEN_ON 4 674#define ACPI_LPS0_SCREEN_ON 4
674#define ACPI_LPS0_ENTRY 5 675#define ACPI_LPS0_ENTRY 5
@@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
680static guid_t lps0_dsm_guid; 681static guid_t lps0_dsm_guid;
681static char lps0_dsm_func_mask; 682static char lps0_dsm_func_mask;
682 683
684/* Device constraint entry structure */
685struct lpi_device_info {
686 char *name;
687 int enabled;
688 union acpi_object *package;
689};
690
691/* Constraint package structure */
692struct lpi_device_constraint {
693 int uid;
694 int min_dstate;
695 int function_states;
696};
697
698struct lpi_constraints {
699 acpi_handle handle;
700 int min_dstate;
701};
702
703static struct lpi_constraints *lpi_constraints_table;
704static int lpi_constraints_table_size;
705
706static void lpi_device_get_constraints(void)
707{
708 union acpi_object *out_obj;
709 int i;
710
711 out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
712 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
713 NULL, ACPI_TYPE_PACKAGE);
714
715 acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
716 out_obj ? "successful" : "failed");
717
718 if (!out_obj)
719 return;
720
721 lpi_constraints_table = kcalloc(out_obj->package.count,
722 sizeof(*lpi_constraints_table),
723 GFP_KERNEL);
724 if (!lpi_constraints_table)
725 goto free_acpi_buffer;
726
727 acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
728
729 for (i = 0; i < out_obj->package.count; i++) {
730 struct lpi_constraints *constraint;
731 acpi_status status;
732 union acpi_object *package = &out_obj->package.elements[i];
733 struct lpi_device_info info = { };
734 int package_count = 0, j;
735
736 if (!package)
737 continue;
738
739 for (j = 0; j < package->package.count; ++j) {
740 union acpi_object *element =
741 &(package->package.elements[j]);
742
743 switch (element->type) {
744 case ACPI_TYPE_INTEGER:
745 info.enabled = element->integer.value;
746 break;
747 case ACPI_TYPE_STRING:
748 info.name = element->string.pointer;
749 break;
750 case ACPI_TYPE_PACKAGE:
751 package_count = element->package.count;
752 info.package = element->package.elements;
753 break;
754 }
755 }
756
757 if (!info.enabled || !info.package || !info.name)
758 continue;
759
760 constraint = &lpi_constraints_table[lpi_constraints_table_size];
761
762 status = acpi_get_handle(NULL, info.name, &constraint->handle);
763 if (ACPI_FAILURE(status))
764 continue;
765
766 acpi_handle_debug(lps0_device_handle,
767 "index:%d Name:%s\n", i, info.name);
768
769 constraint->min_dstate = -1;
770
771 for (j = 0; j < package_count; ++j) {
772 union acpi_object *info_obj = &info.package[j];
773 union acpi_object *cnstr_pkg;
774 union acpi_object *obj;
775 struct lpi_device_constraint dev_info;
776
777 switch (info_obj->type) {
778 case ACPI_TYPE_INTEGER:
779 /* version */
780 break;
781 case ACPI_TYPE_PACKAGE:
782 if (info_obj->package.count < 2)
783 break;
784
785 cnstr_pkg = info_obj->package.elements;
786 obj = &cnstr_pkg[0];
787 dev_info.uid = obj->integer.value;
788 obj = &cnstr_pkg[1];
789 dev_info.min_dstate = obj->integer.value;
790
791 acpi_handle_debug(lps0_device_handle,
792 "uid:%d min_dstate:%s\n",
793 dev_info.uid,
794 acpi_power_state_string(dev_info.min_dstate));
795
796 constraint->min_dstate = dev_info.min_dstate;
797 break;
798 }
799 }
800
801 if (constraint->min_dstate < 0) {
802 acpi_handle_debug(lps0_device_handle,
803 "Incomplete constraint defined\n");
804 continue;
805 }
806
807 lpi_constraints_table_size++;
808 }
809
810 acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
811
812free_acpi_buffer:
813 ACPI_FREE(out_obj);
814}
815
816static void lpi_check_constraints(void)
817{
818 int i;
819
820 for (i = 0; i < lpi_constraints_table_size; ++i) {
821 struct acpi_device *adev;
822
823 if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
824 continue;
825
826 acpi_handle_debug(adev->handle,
827 "LPI: required min power state:%s current power state:%s\n",
828 acpi_power_state_string(lpi_constraints_table[i].min_dstate),
829 acpi_power_state_string(adev->power.state));
830
831 if (!adev->flags.power_manageable) {
832 acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
833 continue;
834 }
835
836 if (adev->power.state < lpi_constraints_table[i].min_dstate)
837 acpi_handle_info(adev->handle,
838 "LPI: Constraint not met; min power state:%s current power state:%s\n",
839 acpi_power_state_string(lpi_constraints_table[i].min_dstate),
840 acpi_power_state_string(adev->power.state));
841 }
842}
843
683static void acpi_sleep_run_lps0_dsm(unsigned int func) 844static void acpi_sleep_run_lps0_dsm(unsigned int func)
684{ 845{
685 union acpi_object *out_obj; 846 union acpi_object *out_obj;
@@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
714 if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { 875 if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
715 lps0_dsm_func_mask = bitmask; 876 lps0_dsm_func_mask = bitmask;
716 lps0_device_handle = adev->handle; 877 lps0_device_handle = adev->handle;
878 /*
879 * Use suspend-to-idle by default if the default
880 * suspend mode was not set from the command line.
881 */
882 if (mem_sleep_default > PM_SUSPEND_MEM)
883 mem_sleep_current = PM_SUSPEND_TO_IDLE;
717 } 884 }
718 885
719 acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", 886 acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
@@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
723 "_DSM function 0 evaluation failed\n"); 890 "_DSM function 0 evaluation failed\n");
724 } 891 }
725 ACPI_FREE(out_obj); 892 ACPI_FREE(out_obj);
893
894 lpi_device_get_constraints();
895
726 return 0; 896 return 0;
727} 897}
728 898
@@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
731 .attach = lps0_device_attach, 901 .attach = lps0_device_attach,
732}; 902};
733 903
734static int acpi_freeze_begin(void) 904static int acpi_s2idle_begin(void)
735{ 905{
736 acpi_scan_lock_acquire(); 906 acpi_scan_lock_acquire();
737 s2idle_in_progress = true; 907 s2idle_in_progress = true;
738 return 0; 908 return 0;
739} 909}
740 910
741static int acpi_freeze_prepare(void) 911static int acpi_s2idle_prepare(void)
742{ 912{
743 if (lps0_device_handle) { 913 if (lps0_device_handle) {
744 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); 914 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
@@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
758 return 0; 928 return 0;
759} 929}
760 930
761static void acpi_freeze_wake(void) 931static void acpi_s2idle_wake(void)
762{ 932{
933
934 if (pm_debug_messages_on)
935 lpi_check_constraints();
936
763 /* 937 /*
764 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means 938 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
765 * that the SCI has triggered while suspended, so cancel the wakeup in 939 * that the SCI has triggered while suspended, so cancel the wakeup in
@@ -772,7 +946,7 @@ static void acpi_freeze_wake(void)
772 } 946 }
773} 947}
774 948
775static void acpi_freeze_sync(void) 949static void acpi_s2idle_sync(void)
776{ 950{
777 /* 951 /*
778 * Process all pending events in case there are any wakeup ones. 952 * Process all pending events in case there are any wakeup ones.
@@ -785,7 +959,7 @@ static void acpi_freeze_sync(void)
785 s2idle_wakeup = false; 959 s2idle_wakeup = false;
786} 960}
787 961
788static void acpi_freeze_restore(void) 962static void acpi_s2idle_restore(void)
789{ 963{
790 if (acpi_sci_irq_valid()) 964 if (acpi_sci_irq_valid())
791 disable_irq_wake(acpi_sci_irq); 965 disable_irq_wake(acpi_sci_irq);
@@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
798 } 972 }
799} 973}
800 974
801static void acpi_freeze_end(void) 975static void acpi_s2idle_end(void)
802{ 976{
803 s2idle_in_progress = false; 977 s2idle_in_progress = false;
804 acpi_scan_lock_release(); 978 acpi_scan_lock_release();
805} 979}
806 980
807static const struct platform_freeze_ops acpi_freeze_ops = { 981static const struct platform_s2idle_ops acpi_s2idle_ops = {
808 .begin = acpi_freeze_begin, 982 .begin = acpi_s2idle_begin,
809 .prepare = acpi_freeze_prepare, 983 .prepare = acpi_s2idle_prepare,
810 .wake = acpi_freeze_wake, 984 .wake = acpi_s2idle_wake,
811 .sync = acpi_freeze_sync, 985 .sync = acpi_s2idle_sync,
812 .restore = acpi_freeze_restore, 986 .restore = acpi_s2idle_restore,
813 .end = acpi_freeze_end, 987 .end = acpi_s2idle_end,
814}; 988};
815 989
816static void acpi_sleep_suspend_setup(void) 990static void acpi_sleep_suspend_setup(void)
@@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
825 &acpi_suspend_ops_old : &acpi_suspend_ops); 999 &acpi_suspend_ops_old : &acpi_suspend_ops);
826 1000
827 acpi_scan_add_handler(&lps0_handler); 1001 acpi_scan_add_handler(&lps0_handler);
828 freeze_set_ops(&acpi_freeze_ops); 1002 s2idle_set_ops(&acpi_s2idle_ops);
829} 1003}
830 1004
831#else /* !CONFIG_SUSPEND */ 1005#else /* !CONFIG_SUSPEND */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c99f8730de82..ea1732ed7a9d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
418 dev_name(dev), pm_verb(state.event), info, error); 418 dev_name(dev), pm_verb(state.event), info, error);
419} 419}
420 420
421#ifdef CONFIG_PM_DEBUG 421static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
422static void dpm_show_time(ktime_t starttime, pm_message_t state,
423 const char *info) 422 const char *info)
424{ 423{
425 ktime_t calltime; 424 ktime_t calltime;
@@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
432 usecs = usecs64; 431 usecs = usecs64;
433 if (usecs == 0) 432 if (usecs == 0)
434 usecs = 1; 433 usecs = 1;
435 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 434
436 info ?: "", info ? " " : "", pm_verb(state.event), 435 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
437 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 436 info ?: "", info ? " " : "", pm_verb(state.event),
437 error ? "aborted" : "complete",
438 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
438} 439}
439#else
440static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
441 const char *info) {}
442#endif /* CONFIG_PM_DEBUG */
443 440
444static int dpm_run_callback(pm_callback_t cb, struct device *dev, 441static int dpm_run_callback(pm_callback_t cb, struct device *dev,
445 pm_message_t state, const char *info) 442 pm_message_t state, const char *info)
@@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
602 put_device(dev); 599 put_device(dev);
603} 600}
604 601
605/** 602void dpm_noirq_resume_devices(pm_message_t state)
606 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
607 * @state: PM transition of the system being carried out.
608 *
609 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
610 * enable device drivers to receive interrupts.
611 */
612void dpm_resume_noirq(pm_message_t state)
613{ 603{
614 struct device *dev; 604 struct device *dev;
615 ktime_t starttime = ktime_get(); 605 ktime_t starttime = ktime_get();
@@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
654 } 644 }
655 mutex_unlock(&dpm_list_mtx); 645 mutex_unlock(&dpm_list_mtx);
656 async_synchronize_full(); 646 async_synchronize_full();
657 dpm_show_time(starttime, state, "noirq"); 647 dpm_show_time(starttime, state, 0, "noirq");
648 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
649}
650
651void dpm_noirq_end(void)
652{
658 resume_device_irqs(); 653 resume_device_irqs();
659 device_wakeup_disarm_wake_irqs(); 654 device_wakeup_disarm_wake_irqs();
660 cpuidle_resume(); 655 cpuidle_resume();
661 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 656}
657
658/**
659 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
660 * @state: PM transition of the system being carried out.
661 *
662 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
663 * allow device drivers' interrupt handlers to be called.
664 */
665void dpm_resume_noirq(pm_message_t state)
666{
667 dpm_noirq_resume_devices(state);
668 dpm_noirq_end();
662} 669}
663 670
664/** 671/**
@@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
776 } 783 }
777 mutex_unlock(&dpm_list_mtx); 784 mutex_unlock(&dpm_list_mtx);
778 async_synchronize_full(); 785 async_synchronize_full();
779 dpm_show_time(starttime, state, "early"); 786 dpm_show_time(starttime, state, 0, "early");
780 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 787 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
781} 788}
782 789
@@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
948 } 955 }
949 mutex_unlock(&dpm_list_mtx); 956 mutex_unlock(&dpm_list_mtx);
950 async_synchronize_full(); 957 async_synchronize_full();
951 dpm_show_time(starttime, state, NULL); 958 dpm_show_time(starttime, state, 0, NULL);
952 959
953 cpufreq_resume(); 960 cpufreq_resume();
954 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 961 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
@@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1098 if (async_error) 1105 if (async_error)
1099 goto Complete; 1106 goto Complete;
1100 1107
1108 if (pm_wakeup_pending()) {
1109 async_error = -EBUSY;
1110 goto Complete;
1111 }
1112
1101 if (dev->power.syscore || dev->power.direct_complete) 1113 if (dev->power.syscore || dev->power.direct_complete)
1102 goto Complete; 1114 goto Complete;
1103 1115
@@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
1158 return __device_suspend_noirq(dev, pm_transition, false); 1170 return __device_suspend_noirq(dev, pm_transition, false);
1159} 1171}
1160 1172
1161/** 1173void dpm_noirq_begin(void)
1162 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1174{
1163 * @state: PM transition of the system being carried out. 1175 cpuidle_pause();
1164 * 1176 device_wakeup_arm_wake_irqs();
1165 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1177 suspend_device_irqs();
1166 * handlers for all non-sysdev devices. 1178}
1167 */ 1179
1168int dpm_suspend_noirq(pm_message_t state) 1180int dpm_noirq_suspend_devices(pm_message_t state)
1169{ 1181{
1170 ktime_t starttime = ktime_get(); 1182 ktime_t starttime = ktime_get();
1171 int error = 0; 1183 int error = 0;
1172 1184
1173 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1185 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1174 cpuidle_pause();
1175 device_wakeup_arm_wake_irqs();
1176 suspend_device_irqs();
1177 mutex_lock(&dpm_list_mtx); 1186 mutex_lock(&dpm_list_mtx);
1178 pm_transition = state; 1187 pm_transition = state;
1179 async_error = 0; 1188 async_error = 0;
@@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state)
1208 if (error) { 1217 if (error) {
1209 suspend_stats.failed_suspend_noirq++; 1218 suspend_stats.failed_suspend_noirq++;
1210 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1219 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1211 dpm_resume_noirq(resume_event(state));
1212 } else {
1213 dpm_show_time(starttime, state, "noirq");
1214 } 1220 }
1221 dpm_show_time(starttime, state, error, "noirq");
1215 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1222 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1216 return error; 1223 return error;
1217} 1224}
1218 1225
1219/** 1226/**
1227 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1228 * @state: PM transition of the system being carried out.
1229 *
1230 * Prevent device drivers' interrupt handlers from being called and invoke
1231 * "noirq" suspend callbacks for all non-sysdev devices.
1232 */
1233int dpm_suspend_noirq(pm_message_t state)
1234{
1235 int ret;
1236
1237 dpm_noirq_begin();
1238 ret = dpm_noirq_suspend_devices(state);
1239 if (ret)
1240 dpm_resume_noirq(resume_event(state));
1241
1242 return ret;
1243}
1244
1245/**
1220 * device_suspend_late - Execute a "late suspend" callback for given device. 1246 * device_suspend_late - Execute a "late suspend" callback for given device.
1221 * @dev: Device to handle. 1247 * @dev: Device to handle.
1222 * @state: PM transition of the system being carried out. 1248 * @state: PM transition of the system being carried out.
@@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
1350 suspend_stats.failed_suspend_late++; 1376 suspend_stats.failed_suspend_late++;
1351 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1377 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1352 dpm_resume_early(resume_event(state)); 1378 dpm_resume_early(resume_event(state));
1353 } else {
1354 dpm_show_time(starttime, state, "late");
1355 } 1379 }
1380 dpm_show_time(starttime, state, error, "late");
1356 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1381 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1357 return error; 1382 return error;
1358} 1383}
@@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
1618 if (error) { 1643 if (error) {
1619 suspend_stats.failed_suspend++; 1644 suspend_stats.failed_suspend++;
1620 dpm_save_failed_step(SUSPEND_SUSPEND); 1645 dpm_save_failed_step(SUSPEND_SUSPEND);
1621 } else 1646 }
1622 dpm_show_time(starttime, state, NULL); 1647 dpm_show_time(starttime, state, error, NULL);
1623 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1648 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1624 return error; 1649 return error;
1625} 1650}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index b49efe33099e..cdd6f256da59 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -865,7 +865,7 @@ bool pm_wakeup_pending(void)
865void pm_system_wakeup(void) 865void pm_system_wakeup(void)
866{ 866{
867 atomic_inc(&pm_abort_suspend); 867 atomic_inc(&pm_abort_suspend);
868 freeze_wake(); 868 s2idle_wake();
869} 869}
870EXPORT_SYMBOL_GPL(pm_system_wakeup); 870EXPORT_SYMBOL_GPL(pm_system_wakeup);
871 871
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 60bb64f4329d..484cc8909d5c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
77 struct cpuidle_device *dev, 77 struct cpuidle_device *dev,
78 unsigned int max_latency, 78 unsigned int max_latency,
79 unsigned int forbidden_flags, 79 unsigned int forbidden_flags,
80 bool freeze) 80 bool s2idle)
81{ 81{
82 unsigned int latency_req = 0; 82 unsigned int latency_req = 0;
83 int i, ret = 0; 83 int i, ret = 0;
@@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
89 if (s->disabled || su->disable || s->exit_latency <= latency_req 89 if (s->disabled || su->disable || s->exit_latency <= latency_req
90 || s->exit_latency > max_latency 90 || s->exit_latency > max_latency
91 || (s->flags & forbidden_flags) 91 || (s->flags & forbidden_flags)
92 || (freeze && !s->enter_freeze)) 92 || (s2idle && !s->enter_s2idle))
93 continue; 93 continue;
94 94
95 latency_req = s->exit_latency; 95 latency_req = s->exit_latency;
@@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
128} 128}
129 129
130#ifdef CONFIG_SUSPEND 130#ifdef CONFIG_SUSPEND
131static void enter_freeze_proper(struct cpuidle_driver *drv, 131static void enter_s2idle_proper(struct cpuidle_driver *drv,
132 struct cpuidle_device *dev, int index) 132 struct cpuidle_device *dev, int index)
133{ 133{
134 /* 134 /*
@@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
143 * suspended is generally unsafe. 143 * suspended is generally unsafe.
144 */ 144 */
145 stop_critical_timings(); 145 stop_critical_timings();
146 drv->states[index].enter_freeze(dev, drv, index); 146 drv->states[index].enter_s2idle(dev, drv, index);
147 WARN_ON(!irqs_disabled()); 147 WARN_ON(!irqs_disabled());
148 /* 148 /*
149 * timekeeping_resume() that will be called by tick_unfreeze() for the 149 * timekeeping_resume() that will be called by tick_unfreeze() for the
@@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
155} 155}
156 156
157/** 157/**
158 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 158 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
159 * @drv: cpuidle driver for the given CPU. 159 * @drv: cpuidle driver for the given CPU.
160 * @dev: cpuidle device for the given CPU. 160 * @dev: cpuidle device for the given CPU.
161 * 161 *
162 * If there are states with the ->enter_freeze callback, find the deepest of 162 * If there are states with the ->enter_s2idle callback, find the deepest of
163 * them and enter it with frozen tick. 163 * them and enter it with frozen tick.
164 */ 164 */
165int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 165int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
166{ 166{
167 int index; 167 int index;
168 168
169 /* 169 /*
170 * Find the deepest state with ->enter_freeze present, which guarantees 170 * Find the deepest state with ->enter_s2idle present, which guarantees
171 * that interrupts won't be enabled when it exits and allows the tick to 171 * that interrupts won't be enabled when it exits and allows the tick to
172 * be frozen safely. 172 * be frozen safely.
173 */ 173 */
174 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 174 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
175 if (index > 0) 175 if (index > 0)
176 enter_freeze_proper(drv, dev, index); 176 enter_s2idle_proper(drv, dev, index);
177 177
178 return index; 178 return index;
179} 179}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index bafd4dbf55d4..53342b7f1010 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state,
41 /* 41 /*
42 * Since this is not a "coupled" state, it's safe to assume interrupts 42 * Since this is not a "coupled" state, it's safe to assume interrupts
43 * won't be enabled when it exits allowing the tick to be frozen 43 * won't be enabled when it exits allowing the tick to be frozen
44 * safely. So enter() can be also enter_freeze() callback. 44 * safely. So enter() can be also enter_s2idle() callback.
45 */ 45 */
46 idle_state->enter_freeze = match_id->data; 46 idle_state->enter_s2idle = match_id->data;
47 47
48 err = of_property_read_u32(state_node, "wakeup-latency-us", 48 err = of_property_read_u32(state_node, "wakeup-latency-us",
49 &idle_state->exit_latency); 49 &idle_state->exit_latency);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 7bf8739e33bc..f0b06b14e782 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -97,7 +97,7 @@ static const struct idle_cpu *icpu;
97static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 97static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
98static int intel_idle(struct cpuidle_device *dev, 98static int intel_idle(struct cpuidle_device *dev,
99 struct cpuidle_driver *drv, int index); 99 struct cpuidle_driver *drv, int index);
100static void intel_idle_freeze(struct cpuidle_device *dev, 100static void intel_idle_s2idle(struct cpuidle_device *dev,
101 struct cpuidle_driver *drv, int index); 101 struct cpuidle_driver *drv, int index);
102static struct cpuidle_state *cpuidle_state_table; 102static struct cpuidle_state *cpuidle_state_table;
103 103
@@ -132,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = {
132 .exit_latency = 3, 132 .exit_latency = 3,
133 .target_residency = 6, 133 .target_residency = 6,
134 .enter = &intel_idle, 134 .enter = &intel_idle,
135 .enter_freeze = intel_idle_freeze, }, 135 .enter_s2idle = intel_idle_s2idle, },
136 { 136 {
137 .name = "C1E", 137 .name = "C1E",
138 .desc = "MWAIT 0x01", 138 .desc = "MWAIT 0x01",
@@ -140,7 +140,7 @@ static struct cpuidle_state nehalem_cstates[] = {
140 .exit_latency = 10, 140 .exit_latency = 10,
141 .target_residency = 20, 141 .target_residency = 20,
142 .enter = &intel_idle, 142 .enter = &intel_idle,
143 .enter_freeze = intel_idle_freeze, }, 143 .enter_s2idle = intel_idle_s2idle, },
144 { 144 {
145 .name = "C3", 145 .name = "C3",
146 .desc = "MWAIT 0x10", 146 .desc = "MWAIT 0x10",
@@ -148,7 +148,7 @@ static struct cpuidle_state nehalem_cstates[] = {
148 .exit_latency = 20, 148 .exit_latency = 20,
149 .target_residency = 80, 149 .target_residency = 80,
150 .enter = &intel_idle, 150 .enter = &intel_idle,
151 .enter_freeze = intel_idle_freeze, }, 151 .enter_s2idle = intel_idle_s2idle, },
152 { 152 {
153 .name = "C6", 153 .name = "C6",
154 .desc = "MWAIT 0x20", 154 .desc = "MWAIT 0x20",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] = {
156 .exit_latency = 200, 156 .exit_latency = 200,
157 .target_residency = 800, 157 .target_residency = 800,
158 .enter = &intel_idle, 158 .enter = &intel_idle,
159 .enter_freeze = intel_idle_freeze, }, 159 .enter_s2idle = intel_idle_s2idle, },
160 { 160 {
161 .enter = NULL } 161 .enter = NULL }
162}; 162};
@@ -169,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = {
169 .exit_latency = 2, 169 .exit_latency = 2,
170 .target_residency = 2, 170 .target_residency = 2,
171 .enter = &intel_idle, 171 .enter = &intel_idle,
172 .enter_freeze = intel_idle_freeze, }, 172 .enter_s2idle = intel_idle_s2idle, },
173 { 173 {
174 .name = "C1E", 174 .name = "C1E",
175 .desc = "MWAIT 0x01", 175 .desc = "MWAIT 0x01",
@@ -177,7 +177,7 @@ static struct cpuidle_state snb_cstates[] = {
177 .exit_latency = 10, 177 .exit_latency = 10,
178 .target_residency = 20, 178 .target_residency = 20,
179 .enter = &intel_idle, 179 .enter = &intel_idle,
180 .enter_freeze = intel_idle_freeze, }, 180 .enter_s2idle = intel_idle_s2idle, },
181 { 181 {
182 .name = "C3", 182 .name = "C3",
183 .desc = "MWAIT 0x10", 183 .desc = "MWAIT 0x10",
@@ -185,7 +185,7 @@ static struct cpuidle_state snb_cstates[] = {
185 .exit_latency = 80, 185 .exit_latency = 80,
186 .target_residency = 211, 186 .target_residency = 211,
187 .enter = &intel_idle, 187 .enter = &intel_idle,
188 .enter_freeze = intel_idle_freeze, }, 188 .enter_s2idle = intel_idle_s2idle, },
189 { 189 {
190 .name = "C6", 190 .name = "C6",
191 .desc = "MWAIT 0x20", 191 .desc = "MWAIT 0x20",
@@ -193,7 +193,7 @@ static struct cpuidle_state snb_cstates[] = {
193 .exit_latency = 104, 193 .exit_latency = 104,
194 .target_residency = 345, 194 .target_residency = 345,
195 .enter = &intel_idle, 195 .enter = &intel_idle,
196 .enter_freeze = intel_idle_freeze, }, 196 .enter_s2idle = intel_idle_s2idle, },
197 { 197 {
198 .name = "C7", 198 .name = "C7",
199 .desc = "MWAIT 0x30", 199 .desc = "MWAIT 0x30",
@@ -201,7 +201,7 @@ static struct cpuidle_state snb_cstates[] = {
201 .exit_latency = 109, 201 .exit_latency = 109,
202 .target_residency = 345, 202 .target_residency = 345,
203 .enter = &intel_idle, 203 .enter = &intel_idle,
204 .enter_freeze = intel_idle_freeze, }, 204 .enter_s2idle = intel_idle_s2idle, },
205 { 205 {
206 .enter = NULL } 206 .enter = NULL }
207}; 207};
@@ -214,7 +214,7 @@ static struct cpuidle_state byt_cstates[] = {
214 .exit_latency = 1, 214 .exit_latency = 1,
215 .target_residency = 1, 215 .target_residency = 1,
216 .enter = &intel_idle, 216 .enter = &intel_idle,
217 .enter_freeze = intel_idle_freeze, }, 217 .enter_s2idle = intel_idle_s2idle, },
218 { 218 {
219 .name = "C6N", 219 .name = "C6N",
220 .desc = "MWAIT 0x58", 220 .desc = "MWAIT 0x58",
@@ -222,7 +222,7 @@ static struct cpuidle_state byt_cstates[] = {
222 .exit_latency = 300, 222 .exit_latency = 300,
223 .target_residency = 275, 223 .target_residency = 275,
224 .enter = &intel_idle, 224 .enter = &intel_idle,
225 .enter_freeze = intel_idle_freeze, }, 225 .enter_s2idle = intel_idle_s2idle, },
226 { 226 {
227 .name = "C6S", 227 .name = "C6S",
228 .desc = "MWAIT 0x52", 228 .desc = "MWAIT 0x52",
@@ -230,7 +230,7 @@ static struct cpuidle_state byt_cstates[] = {
230 .exit_latency = 500, 230 .exit_latency = 500,
231 .target_residency = 560, 231 .target_residency = 560,
232 .enter = &intel_idle, 232 .enter = &intel_idle,
233 .enter_freeze = intel_idle_freeze, }, 233 .enter_s2idle = intel_idle_s2idle, },
234 { 234 {
235 .name = "C7", 235 .name = "C7",
236 .desc = "MWAIT 0x60", 236 .desc = "MWAIT 0x60",
@@ -238,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
238 .exit_latency = 1200, 238 .exit_latency = 1200,
239 .target_residency = 4000, 239 .target_residency = 4000,
240 .enter = &intel_idle, 240 .enter = &intel_idle,
241 .enter_freeze = intel_idle_freeze, }, 241 .enter_s2idle = intel_idle_s2idle, },
242 { 242 {
243 .name = "C7S", 243 .name = "C7S",
244 .desc = "MWAIT 0x64", 244 .desc = "MWAIT 0x64",
@@ -246,7 +246,7 @@ static struct cpuidle_state byt_cstates[] = {
246 .exit_latency = 10000, 246 .exit_latency = 10000,
247 .target_residency = 20000, 247 .target_residency = 20000,
248 .enter = &intel_idle, 248 .enter = &intel_idle,
249 .enter_freeze = intel_idle_freeze, }, 249 .enter_s2idle = intel_idle_s2idle, },
250 { 250 {
251 .enter = NULL } 251 .enter = NULL }
252}; 252};
@@ -259,7 +259,7 @@ static struct cpuidle_state cht_cstates[] = {
259 .exit_latency = 1, 259 .exit_latency = 1,
260 .target_residency = 1, 260 .target_residency = 1,
261 .enter = &intel_idle, 261 .enter = &intel_idle,
262 .enter_freeze = intel_idle_freeze, }, 262 .enter_s2idle = intel_idle_s2idle, },
263 { 263 {
264 .name = "C6N", 264 .name = "C6N",
265 .desc = "MWAIT 0x58", 265 .desc = "MWAIT 0x58",
@@ -267,7 +267,7 @@ static struct cpuidle_state cht_cstates[] = {
267 .exit_latency = 80, 267 .exit_latency = 80,
268 .target_residency = 275, 268 .target_residency = 275,
269 .enter = &intel_idle, 269 .enter = &intel_idle,
270 .enter_freeze = intel_idle_freeze, }, 270 .enter_s2idle = intel_idle_s2idle, },
271 { 271 {
272 .name = "C6S", 272 .name = "C6S",
273 .desc = "MWAIT 0x52", 273 .desc = "MWAIT 0x52",
@@ -275,7 +275,7 @@ static struct cpuidle_state cht_cstates[] = {
275 .exit_latency = 200, 275 .exit_latency = 200,
276 .target_residency = 560, 276 .target_residency = 560,
277 .enter = &intel_idle, 277 .enter = &intel_idle,
278 .enter_freeze = intel_idle_freeze, }, 278 .enter_s2idle = intel_idle_s2idle, },
279 { 279 {
280 .name = "C7", 280 .name = "C7",
281 .desc = "MWAIT 0x60", 281 .desc = "MWAIT 0x60",
@@ -283,7 +283,7 @@ static struct cpuidle_state cht_cstates[] = {
283 .exit_latency = 1200, 283 .exit_latency = 1200,
284 .target_residency = 4000, 284 .target_residency = 4000,
285 .enter = &intel_idle, 285 .enter = &intel_idle,
286 .enter_freeze = intel_idle_freeze, }, 286 .enter_s2idle = intel_idle_s2idle, },
287 { 287 {
288 .name = "C7S", 288 .name = "C7S",
289 .desc = "MWAIT 0x64", 289 .desc = "MWAIT 0x64",
@@ -291,7 +291,7 @@ static struct cpuidle_state cht_cstates[] = {
291 .exit_latency = 10000, 291 .exit_latency = 10000,
292 .target_residency = 20000, 292 .target_residency = 20000,
293 .enter = &intel_idle, 293 .enter = &intel_idle,
294 .enter_freeze = intel_idle_freeze, }, 294 .enter_s2idle = intel_idle_s2idle, },
295 { 295 {
296 .enter = NULL } 296 .enter = NULL }
297}; 297};
@@ -304,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = {
304 .exit_latency = 1, 304 .exit_latency = 1,
305 .target_residency = 1, 305 .target_residency = 1,
306 .enter = &intel_idle, 306 .enter = &intel_idle,
307 .enter_freeze = intel_idle_freeze, }, 307 .enter_s2idle = intel_idle_s2idle, },
308 { 308 {
309 .name = "C1E", 309 .name = "C1E",
310 .desc = "MWAIT 0x01", 310 .desc = "MWAIT 0x01",
@@ -312,7 +312,7 @@ static struct cpuidle_state ivb_cstates[] = {
312 .exit_latency = 10, 312 .exit_latency = 10,
313 .target_residency = 20, 313 .target_residency = 20,
314 .enter = &intel_idle, 314 .enter = &intel_idle,
315 .enter_freeze = intel_idle_freeze, }, 315 .enter_s2idle = intel_idle_s2idle, },
316 { 316 {
317 .name = "C3", 317 .name = "C3",
318 .desc = "MWAIT 0x10", 318 .desc = "MWAIT 0x10",
@@ -320,7 +320,7 @@ static struct cpuidle_state ivb_cstates[] = {
320 .exit_latency = 59, 320 .exit_latency = 59,
321 .target_residency = 156, 321 .target_residency = 156,
322 .enter = &intel_idle, 322 .enter = &intel_idle,
323 .enter_freeze = intel_idle_freeze, }, 323 .enter_s2idle = intel_idle_s2idle, },
324 { 324 {
325 .name = "C6", 325 .name = "C6",
326 .desc = "MWAIT 0x20", 326 .desc = "MWAIT 0x20",
@@ -328,7 +328,7 @@ static struct cpuidle_state ivb_cstates[] = {
328 .exit_latency = 80, 328 .exit_latency = 80,
329 .target_residency = 300, 329 .target_residency = 300,
330 .enter = &intel_idle, 330 .enter = &intel_idle,
331 .enter_freeze = intel_idle_freeze, }, 331 .enter_s2idle = intel_idle_s2idle, },
332 { 332 {
333 .name = "C7", 333 .name = "C7",
334 .desc = "MWAIT 0x30", 334 .desc = "MWAIT 0x30",
@@ -336,7 +336,7 @@ static struct cpuidle_state ivb_cstates[] = {
336 .exit_latency = 87, 336 .exit_latency = 87,
337 .target_residency = 300, 337 .target_residency = 300,
338 .enter = &intel_idle, 338 .enter = &intel_idle,
339 .enter_freeze = intel_idle_freeze, }, 339 .enter_s2idle = intel_idle_s2idle, },
340 { 340 {
341 .enter = NULL } 341 .enter = NULL }
342}; 342};
@@ -349,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = {
349 .exit_latency = 1, 349 .exit_latency = 1,
350 .target_residency = 1, 350 .target_residency = 1,
351 .enter = &intel_idle, 351 .enter = &intel_idle,
352 .enter_freeze = intel_idle_freeze, }, 352 .enter_s2idle = intel_idle_s2idle, },
353 { 353 {
354 .name = "C1E", 354 .name = "C1E",
355 .desc = "MWAIT 0x01", 355 .desc = "MWAIT 0x01",
@@ -357,7 +357,7 @@ static struct cpuidle_state ivt_cstates[] = {
357 .exit_latency = 10, 357 .exit_latency = 10,
358 .target_residency = 80, 358 .target_residency = 80,
359 .enter = &intel_idle, 359 .enter = &intel_idle,
360 .enter_freeze = intel_idle_freeze, }, 360 .enter_s2idle = intel_idle_s2idle, },
361 { 361 {
362 .name = "C3", 362 .name = "C3",
363 .desc = "MWAIT 0x10", 363 .desc = "MWAIT 0x10",
@@ -365,7 +365,7 @@ static struct cpuidle_state ivt_cstates[] = {
365 .exit_latency = 59, 365 .exit_latency = 59,
366 .target_residency = 156, 366 .target_residency = 156,
367 .enter = &intel_idle, 367 .enter = &intel_idle,
368 .enter_freeze = intel_idle_freeze, }, 368 .enter_s2idle = intel_idle_s2idle, },
369 { 369 {
370 .name = "C6", 370 .name = "C6",
371 .desc = "MWAIT 0x20", 371 .desc = "MWAIT 0x20",
@@ -373,7 +373,7 @@ static struct cpuidle_state ivt_cstates[] = {
373 .exit_latency = 82, 373 .exit_latency = 82,
374 .target_residency = 300, 374 .target_residency = 300,
375 .enter = &intel_idle, 375 .enter = &intel_idle,
376 .enter_freeze = intel_idle_freeze, }, 376 .enter_s2idle = intel_idle_s2idle, },
377 { 377 {
378 .enter = NULL } 378 .enter = NULL }
379}; 379};
@@ -386,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
386 .exit_latency = 1, 386 .exit_latency = 1,
387 .target_residency = 1, 387 .target_residency = 1,
388 .enter = &intel_idle, 388 .enter = &intel_idle,
389 .enter_freeze = intel_idle_freeze, }, 389 .enter_s2idle = intel_idle_s2idle, },
390 { 390 {
391 .name = "C1E", 391 .name = "C1E",
392 .desc = "MWAIT 0x01", 392 .desc = "MWAIT 0x01",
@@ -394,7 +394,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
394 .exit_latency = 10, 394 .exit_latency = 10,
395 .target_residency = 250, 395 .target_residency = 250,
396 .enter = &intel_idle, 396 .enter = &intel_idle,
397 .enter_freeze = intel_idle_freeze, }, 397 .enter_s2idle = intel_idle_s2idle, },
398 { 398 {
399 .name = "C3", 399 .name = "C3",
400 .desc = "MWAIT 0x10", 400 .desc = "MWAIT 0x10",
@@ -402,7 +402,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
402 .exit_latency = 59, 402 .exit_latency = 59,
403 .target_residency = 300, 403 .target_residency = 300,
404 .enter = &intel_idle, 404 .enter = &intel_idle,
405 .enter_freeze = intel_idle_freeze, }, 405 .enter_s2idle = intel_idle_s2idle, },
406 { 406 {
407 .name = "C6", 407 .name = "C6",
408 .desc = "MWAIT 0x20", 408 .desc = "MWAIT 0x20",
@@ -410,7 +410,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
410 .exit_latency = 84, 410 .exit_latency = 84,
411 .target_residency = 400, 411 .target_residency = 400,
412 .enter = &intel_idle, 412 .enter = &intel_idle,
413 .enter_freeze = intel_idle_freeze, }, 413 .enter_s2idle = intel_idle_s2idle, },
414 { 414 {
415 .enter = NULL } 415 .enter = NULL }
416}; 416};
@@ -423,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
423 .exit_latency = 1, 423 .exit_latency = 1,
424 .target_residency = 1, 424 .target_residency = 1,
425 .enter = &intel_idle, 425 .enter = &intel_idle,
426 .enter_freeze = intel_idle_freeze, }, 426 .enter_s2idle = intel_idle_s2idle, },
427 { 427 {
428 .name = "C1E", 428 .name = "C1E",
429 .desc = "MWAIT 0x01", 429 .desc = "MWAIT 0x01",
@@ -431,7 +431,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
431 .exit_latency = 10, 431 .exit_latency = 10,
432 .target_residency = 500, 432 .target_residency = 500,
433 .enter = &intel_idle, 433 .enter = &intel_idle,
434 .enter_freeze = intel_idle_freeze, }, 434 .enter_s2idle = intel_idle_s2idle, },
435 { 435 {
436 .name = "C3", 436 .name = "C3",
437 .desc = "MWAIT 0x10", 437 .desc = "MWAIT 0x10",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
439 .exit_latency = 59, 439 .exit_latency = 59,
440 .target_residency = 600, 440 .target_residency = 600,
441 .enter = &intel_idle, 441 .enter = &intel_idle,
442 .enter_freeze = intel_idle_freeze, }, 442 .enter_s2idle = intel_idle_s2idle, },
443 { 443 {
444 .name = "C6", 444 .name = "C6",
445 .desc = "MWAIT 0x20", 445 .desc = "MWAIT 0x20",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
447 .exit_latency = 88, 447 .exit_latency = 88,
448 .target_residency = 700, 448 .target_residency = 700,
449 .enter = &intel_idle, 449 .enter = &intel_idle,
450 .enter_freeze = intel_idle_freeze, }, 450 .enter_s2idle = intel_idle_s2idle, },
451 { 451 {
452 .enter = NULL } 452 .enter = NULL }
453}; 453};
@@ -460,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = {
460 .exit_latency = 2, 460 .exit_latency = 2,
461 .target_residency = 2, 461 .target_residency = 2,
462 .enter = &intel_idle, 462 .enter = &intel_idle,
463 .enter_freeze = intel_idle_freeze, }, 463 .enter_s2idle = intel_idle_s2idle, },
464 { 464 {
465 .name = "C1E", 465 .name = "C1E",
466 .desc = "MWAIT 0x01", 466 .desc = "MWAIT 0x01",
@@ -468,7 +468,7 @@ static struct cpuidle_state hsw_cstates[] = {
468 .exit_latency = 10, 468 .exit_latency = 10,
469 .target_residency = 20, 469 .target_residency = 20,
470 .enter = &intel_idle, 470 .enter = &intel_idle,
471 .enter_freeze = intel_idle_freeze, }, 471 .enter_s2idle = intel_idle_s2idle, },
472 { 472 {
473 .name = "C3", 473 .name = "C3",
474 .desc = "MWAIT 0x10", 474 .desc = "MWAIT 0x10",
@@ -476,7 +476,7 @@ static struct cpuidle_state hsw_cstates[] = {
476 .exit_latency = 33, 476 .exit_latency = 33,
477 .target_residency = 100, 477 .target_residency = 100,
478 .enter = &intel_idle, 478 .enter = &intel_idle,
479 .enter_freeze = intel_idle_freeze, }, 479 .enter_s2idle = intel_idle_s2idle, },
480 { 480 {
481 .name = "C6", 481 .name = "C6",
482 .desc = "MWAIT 0x20", 482 .desc = "MWAIT 0x20",
@@ -484,7 +484,7 @@ static struct cpuidle_state hsw_cstates[] = {
484 .exit_latency = 133, 484 .exit_latency = 133,
485 .target_residency = 400, 485 .target_residency = 400,
486 .enter = &intel_idle, 486 .enter = &intel_idle,
487 .enter_freeze = intel_idle_freeze, }, 487 .enter_s2idle = intel_idle_s2idle, },
488 { 488 {
489 .name = "C7s", 489 .name = "C7s",
490 .desc = "MWAIT 0x32", 490 .desc = "MWAIT 0x32",
@@ -492,7 +492,7 @@ static struct cpuidle_state hsw_cstates[] = {
492 .exit_latency = 166, 492 .exit_latency = 166,
493 .target_residency = 500, 493 .target_residency = 500,
494 .enter = &intel_idle, 494 .enter = &intel_idle,
495 .enter_freeze = intel_idle_freeze, }, 495 .enter_s2idle = intel_idle_s2idle, },
496 { 496 {
497 .name = "C8", 497 .name = "C8",
498 .desc = "MWAIT 0x40", 498 .desc = "MWAIT 0x40",
@@ -500,7 +500,7 @@ static struct cpuidle_state hsw_cstates[] = {
500 .exit_latency = 300, 500 .exit_latency = 300,
501 .target_residency = 900, 501 .target_residency = 900,
502 .enter = &intel_idle, 502 .enter = &intel_idle,
503 .enter_freeze = intel_idle_freeze, }, 503 .enter_s2idle = intel_idle_s2idle, },
504 { 504 {
505 .name = "C9", 505 .name = "C9",
506 .desc = "MWAIT 0x50", 506 .desc = "MWAIT 0x50",
@@ -508,7 +508,7 @@ static struct cpuidle_state hsw_cstates[] = {
508 .exit_latency = 600, 508 .exit_latency = 600,
509 .target_residency = 1800, 509 .target_residency = 1800,
510 .enter = &intel_idle, 510 .enter = &intel_idle,
511 .enter_freeze = intel_idle_freeze, }, 511 .enter_s2idle = intel_idle_s2idle, },
512 { 512 {
513 .name = "C10", 513 .name = "C10",
514 .desc = "MWAIT 0x60", 514 .desc = "MWAIT 0x60",
@@ -516,7 +516,7 @@ static struct cpuidle_state hsw_cstates[] = {
516 .exit_latency = 2600, 516 .exit_latency = 2600,
517 .target_residency = 7700, 517 .target_residency = 7700,
518 .enter = &intel_idle, 518 .enter = &intel_idle,
519 .enter_freeze = intel_idle_freeze, }, 519 .enter_s2idle = intel_idle_s2idle, },
520 { 520 {
521 .enter = NULL } 521 .enter = NULL }
522}; 522};
@@ -528,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = {
528 .exit_latency = 2, 528 .exit_latency = 2,
529 .target_residency = 2, 529 .target_residency = 2,
530 .enter = &intel_idle, 530 .enter = &intel_idle,
531 .enter_freeze = intel_idle_freeze, }, 531 .enter_s2idle = intel_idle_s2idle, },
532 { 532 {
533 .name = "C1E", 533 .name = "C1E",
534 .desc = "MWAIT 0x01", 534 .desc = "MWAIT 0x01",
@@ -536,7 +536,7 @@ static struct cpuidle_state bdw_cstates[] = {
536 .exit_latency = 10, 536 .exit_latency = 10,
537 .target_residency = 20, 537 .target_residency = 20,
538 .enter = &intel_idle, 538 .enter = &intel_idle,
539 .enter_freeze = intel_idle_freeze, }, 539 .enter_s2idle = intel_idle_s2idle, },
540 { 540 {
541 .name = "C3", 541 .name = "C3",
542 .desc = "MWAIT 0x10", 542 .desc = "MWAIT 0x10",
@@ -544,7 +544,7 @@ static struct cpuidle_state bdw_cstates[] = {
544 .exit_latency = 40, 544 .exit_latency = 40,
545 .target_residency = 100, 545 .target_residency = 100,
546 .enter = &intel_idle, 546 .enter = &intel_idle,
547 .enter_freeze = intel_idle_freeze, }, 547 .enter_s2idle = intel_idle_s2idle, },
548 { 548 {
549 .name = "C6", 549 .name = "C6",
550 .desc = "MWAIT 0x20", 550 .desc = "MWAIT 0x20",
@@ -552,7 +552,7 @@ static struct cpuidle_state bdw_cstates[] = {
552 .exit_latency = 133, 552 .exit_latency = 133,
553 .target_residency = 400, 553 .target_residency = 400,
554 .enter = &intel_idle, 554 .enter = &intel_idle,
555 .enter_freeze = intel_idle_freeze, }, 555 .enter_s2idle = intel_idle_s2idle, },
556 { 556 {
557 .name = "C7s", 557 .name = "C7s",
558 .desc = "MWAIT 0x32", 558 .desc = "MWAIT 0x32",
@@ -560,7 +560,7 @@ static struct cpuidle_state bdw_cstates[] = {
560 .exit_latency = 166, 560 .exit_latency = 166,
561 .target_residency = 500, 561 .target_residency = 500,
562 .enter = &intel_idle, 562 .enter = &intel_idle,
563 .enter_freeze = intel_idle_freeze, }, 563 .enter_s2idle = intel_idle_s2idle, },
564 { 564 {
565 .name = "C8", 565 .name = "C8",
566 .desc = "MWAIT 0x40", 566 .desc = "MWAIT 0x40",
@@ -568,7 +568,7 @@ static struct cpuidle_state bdw_cstates[] = {
568 .exit_latency = 300, 568 .exit_latency = 300,
569 .target_residency = 900, 569 .target_residency = 900,
570 .enter = &intel_idle, 570 .enter = &intel_idle,
571 .enter_freeze = intel_idle_freeze, }, 571 .enter_s2idle = intel_idle_s2idle, },
572 { 572 {
573 .name = "C9", 573 .name = "C9",
574 .desc = "MWAIT 0x50", 574 .desc = "MWAIT 0x50",
@@ -576,7 +576,7 @@ static struct cpuidle_state bdw_cstates[] = {
576 .exit_latency = 600, 576 .exit_latency = 600,
577 .target_residency = 1800, 577 .target_residency = 1800,
578 .enter = &intel_idle, 578 .enter = &intel_idle,
579 .enter_freeze = intel_idle_freeze, }, 579 .enter_s2idle = intel_idle_s2idle, },
580 { 580 {
581 .name = "C10", 581 .name = "C10",
582 .desc = "MWAIT 0x60", 582 .desc = "MWAIT 0x60",
@@ -584,7 +584,7 @@ static struct cpuidle_state bdw_cstates[] = {
584 .exit_latency = 2600, 584 .exit_latency = 2600,
585 .target_residency = 7700, 585 .target_residency = 7700,
586 .enter = &intel_idle, 586 .enter = &intel_idle,
587 .enter_freeze = intel_idle_freeze, }, 587 .enter_s2idle = intel_idle_s2idle, },
588 { 588 {
589 .enter = NULL } 589 .enter = NULL }
590}; 590};
@@ -597,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = {
597 .exit_latency = 2, 597 .exit_latency = 2,
598 .target_residency = 2, 598 .target_residency = 2,
599 .enter = &intel_idle, 599 .enter = &intel_idle,
600 .enter_freeze = intel_idle_freeze, }, 600 .enter_s2idle = intel_idle_s2idle, },
601 { 601 {
602 .name = "C1E", 602 .name = "C1E",
603 .desc = "MWAIT 0x01", 603 .desc = "MWAIT 0x01",
@@ -605,7 +605,7 @@ static struct cpuidle_state skl_cstates[] = {
605 .exit_latency = 10, 605 .exit_latency = 10,
606 .target_residency = 20, 606 .target_residency = 20,
607 .enter = &intel_idle, 607 .enter = &intel_idle,
608 .enter_freeze = intel_idle_freeze, }, 608 .enter_s2idle = intel_idle_s2idle, },
609 { 609 {
610 .name = "C3", 610 .name = "C3",
611 .desc = "MWAIT 0x10", 611 .desc = "MWAIT 0x10",
@@ -613,7 +613,7 @@ static struct cpuidle_state skl_cstates[] = {
613 .exit_latency = 70, 613 .exit_latency = 70,
614 .target_residency = 100, 614 .target_residency = 100,
615 .enter = &intel_idle, 615 .enter = &intel_idle,
616 .enter_freeze = intel_idle_freeze, }, 616 .enter_s2idle = intel_idle_s2idle, },
617 { 617 {
618 .name = "C6", 618 .name = "C6",
619 .desc = "MWAIT 0x20", 619 .desc = "MWAIT 0x20",
@@ -621,7 +621,7 @@ static struct cpuidle_state skl_cstates[] = {
621 .exit_latency = 85, 621 .exit_latency = 85,
622 .target_residency = 200, 622 .target_residency = 200,
623 .enter = &intel_idle, 623 .enter = &intel_idle,
624 .enter_freeze = intel_idle_freeze, }, 624 .enter_s2idle = intel_idle_s2idle, },
625 { 625 {
626 .name = "C7s", 626 .name = "C7s",
627 .desc = "MWAIT 0x33", 627 .desc = "MWAIT 0x33",
@@ -629,7 +629,7 @@ static struct cpuidle_state skl_cstates[] = {
629 .exit_latency = 124, 629 .exit_latency = 124,
630 .target_residency = 800, 630 .target_residency = 800,
631 .enter = &intel_idle, 631 .enter = &intel_idle,
632 .enter_freeze = intel_idle_freeze, }, 632 .enter_s2idle = intel_idle_s2idle, },
633 { 633 {
634 .name = "C8", 634 .name = "C8",
635 .desc = "MWAIT 0x40", 635 .desc = "MWAIT 0x40",
@@ -637,7 +637,7 @@ static struct cpuidle_state skl_cstates[] = {
637 .exit_latency = 200, 637 .exit_latency = 200,
638 .target_residency = 800, 638 .target_residency = 800,
639 .enter = &intel_idle, 639 .enter = &intel_idle,
640 .enter_freeze = intel_idle_freeze, }, 640 .enter_s2idle = intel_idle_s2idle, },
641 { 641 {
642 .name = "C9", 642 .name = "C9",
643 .desc = "MWAIT 0x50", 643 .desc = "MWAIT 0x50",
@@ -645,7 +645,7 @@ static struct cpuidle_state skl_cstates[] = {
645 .exit_latency = 480, 645 .exit_latency = 480,
646 .target_residency = 5000, 646 .target_residency = 5000,
647 .enter = &intel_idle, 647 .enter = &intel_idle,
648 .enter_freeze = intel_idle_freeze, }, 648 .enter_s2idle = intel_idle_s2idle, },
649 { 649 {
650 .name = "C10", 650 .name = "C10",
651 .desc = "MWAIT 0x60", 651 .desc = "MWAIT 0x60",
@@ -653,7 +653,7 @@ static struct cpuidle_state skl_cstates[] = {
653 .exit_latency = 890, 653 .exit_latency = 890,
654 .target_residency = 5000, 654 .target_residency = 5000,
655 .enter = &intel_idle, 655 .enter = &intel_idle,
656 .enter_freeze = intel_idle_freeze, }, 656 .enter_s2idle = intel_idle_s2idle, },
657 { 657 {
658 .enter = NULL } 658 .enter = NULL }
659}; 659};
@@ -666,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = {
666 .exit_latency = 2, 666 .exit_latency = 2,
667 .target_residency = 2, 667 .target_residency = 2,
668 .enter = &intel_idle, 668 .enter = &intel_idle,
669 .enter_freeze = intel_idle_freeze, }, 669 .enter_s2idle = intel_idle_s2idle, },
670 { 670 {
671 .name = "C1E", 671 .name = "C1E",
672 .desc = "MWAIT 0x01", 672 .desc = "MWAIT 0x01",
@@ -674,7 +674,7 @@ static struct cpuidle_state skx_cstates[] = {
674 .exit_latency = 10, 674 .exit_latency = 10,
675 .target_residency = 20, 675 .target_residency = 20,
676 .enter = &intel_idle, 676 .enter = &intel_idle,
677 .enter_freeze = intel_idle_freeze, }, 677 .enter_s2idle = intel_idle_s2idle, },
678 { 678 {
679 .name = "C6", 679 .name = "C6",
680 .desc = "MWAIT 0x20", 680 .desc = "MWAIT 0x20",
@@ -682,7 +682,7 @@ static struct cpuidle_state skx_cstates[] = {
682 .exit_latency = 133, 682 .exit_latency = 133,
683 .target_residency = 600, 683 .target_residency = 600,
684 .enter = &intel_idle, 684 .enter = &intel_idle,
685 .enter_freeze = intel_idle_freeze, }, 685 .enter_s2idle = intel_idle_s2idle, },
686 { 686 {
687 .enter = NULL } 687 .enter = NULL }
688}; 688};
@@ -695,7 +695,7 @@ static struct cpuidle_state atom_cstates[] = {
695 .exit_latency = 10, 695 .exit_latency = 10,
696 .target_residency = 20, 696 .target_residency = 20,
697 .enter = &intel_idle, 697 .enter = &intel_idle,
698 .enter_freeze = intel_idle_freeze, }, 698 .enter_s2idle = intel_idle_s2idle, },
699 { 699 {
700 .name = "C2", 700 .name = "C2",
701 .desc = "MWAIT 0x10", 701 .desc = "MWAIT 0x10",
@@ -703,7 +703,7 @@ static struct cpuidle_state atom_cstates[] = {
703 .exit_latency = 20, 703 .exit_latency = 20,
704 .target_residency = 80, 704 .target_residency = 80,
705 .enter = &intel_idle, 705 .enter = &intel_idle,
706 .enter_freeze = intel_idle_freeze, }, 706 .enter_s2idle = intel_idle_s2idle, },
707 { 707 {
708 .name = "C4", 708 .name = "C4",
709 .desc = "MWAIT 0x30", 709 .desc = "MWAIT 0x30",
@@ -711,7 +711,7 @@ static struct cpuidle_state atom_cstates[] = {
711 .exit_latency = 100, 711 .exit_latency = 100,
712 .target_residency = 400, 712 .target_residency = 400,
713 .enter = &intel_idle, 713 .enter = &intel_idle,
714 .enter_freeze = intel_idle_freeze, }, 714 .enter_s2idle = intel_idle_s2idle, },
715 { 715 {
716 .name = "C6", 716 .name = "C6",
717 .desc = "MWAIT 0x52", 717 .desc = "MWAIT 0x52",
@@ -719,7 +719,7 @@ static struct cpuidle_state atom_cstates[] = {
719 .exit_latency = 140, 719 .exit_latency = 140,
720 .target_residency = 560, 720 .target_residency = 560,
721 .enter = &intel_idle, 721 .enter = &intel_idle,
722 .enter_freeze = intel_idle_freeze, }, 722 .enter_s2idle = intel_idle_s2idle, },
723 { 723 {
724 .enter = NULL } 724 .enter = NULL }
725}; 725};
@@ -731,7 +731,7 @@ static struct cpuidle_state tangier_cstates[] = {
731 .exit_latency = 1, 731 .exit_latency = 1,
732 .target_residency = 4, 732 .target_residency = 4,
733 .enter = &intel_idle, 733 .enter = &intel_idle,
734 .enter_freeze = intel_idle_freeze, }, 734 .enter_s2idle = intel_idle_s2idle, },
735 { 735 {
736 .name = "C4", 736 .name = "C4",
737 .desc = "MWAIT 0x30", 737 .desc = "MWAIT 0x30",
@@ -739,7 +739,7 @@ static struct cpuidle_state tangier_cstates[] = {
739 .exit_latency = 100, 739 .exit_latency = 100,
740 .target_residency = 400, 740 .target_residency = 400,
741 .enter = &intel_idle, 741 .enter = &intel_idle,
742 .enter_freeze = intel_idle_freeze, }, 742 .enter_s2idle = intel_idle_s2idle, },
743 { 743 {
744 .name = "C6", 744 .name = "C6",
745 .desc = "MWAIT 0x52", 745 .desc = "MWAIT 0x52",
@@ -747,7 +747,7 @@ static struct cpuidle_state tangier_cstates[] = {
747 .exit_latency = 140, 747 .exit_latency = 140,
748 .target_residency = 560, 748 .target_residency = 560,
749 .enter = &intel_idle, 749 .enter = &intel_idle,
750 .enter_freeze = intel_idle_freeze, }, 750 .enter_s2idle = intel_idle_s2idle, },
751 { 751 {
752 .name = "C7", 752 .name = "C7",
753 .desc = "MWAIT 0x60", 753 .desc = "MWAIT 0x60",
@@ -755,7 +755,7 @@ static struct cpuidle_state tangier_cstates[] = {
755 .exit_latency = 1200, 755 .exit_latency = 1200,
756 .target_residency = 4000, 756 .target_residency = 4000,
757 .enter = &intel_idle, 757 .enter = &intel_idle,
758 .enter_freeze = intel_idle_freeze, }, 758 .enter_s2idle = intel_idle_s2idle, },
759 { 759 {
760 .name = "C9", 760 .name = "C9",
761 .desc = "MWAIT 0x64", 761 .desc = "MWAIT 0x64",
@@ -763,7 +763,7 @@ static struct cpuidle_state tangier_cstates[] = {
763 .exit_latency = 10000, 763 .exit_latency = 10000,
764 .target_residency = 20000, 764 .target_residency = 20000,
765 .enter = &intel_idle, 765 .enter = &intel_idle,
766 .enter_freeze = intel_idle_freeze, }, 766 .enter_s2idle = intel_idle_s2idle, },
767 { 767 {
768 .enter = NULL } 768 .enter = NULL }
769}; 769};
@@ -775,7 +775,7 @@ static struct cpuidle_state avn_cstates[] = {
775 .exit_latency = 2, 775 .exit_latency = 2,
776 .target_residency = 2, 776 .target_residency = 2,
777 .enter = &intel_idle, 777 .enter = &intel_idle,
778 .enter_freeze = intel_idle_freeze, }, 778 .enter_s2idle = intel_idle_s2idle, },
779 { 779 {
780 .name = "C6", 780 .name = "C6",
781 .desc = "MWAIT 0x51", 781 .desc = "MWAIT 0x51",
@@ -783,7 +783,7 @@ static struct cpuidle_state avn_cstates[] = {
783 .exit_latency = 15, 783 .exit_latency = 15,
784 .target_residency = 45, 784 .target_residency = 45,
785 .enter = &intel_idle, 785 .enter = &intel_idle,
786 .enter_freeze = intel_idle_freeze, }, 786 .enter_s2idle = intel_idle_s2idle, },
787 { 787 {
788 .enter = NULL } 788 .enter = NULL }
789}; 789};
@@ -795,7 +795,7 @@ static struct cpuidle_state knl_cstates[] = {
795 .exit_latency = 1, 795 .exit_latency = 1,
796 .target_residency = 2, 796 .target_residency = 2,
797 .enter = &intel_idle, 797 .enter = &intel_idle,
798 .enter_freeze = intel_idle_freeze }, 798 .enter_s2idle = intel_idle_s2idle },
799 { 799 {
800 .name = "C6", 800 .name = "C6",
801 .desc = "MWAIT 0x10", 801 .desc = "MWAIT 0x10",
@@ -803,7 +803,7 @@ static struct cpuidle_state knl_cstates[] = {
803 .exit_latency = 120, 803 .exit_latency = 120,
804 .target_residency = 500, 804 .target_residency = 500,
805 .enter = &intel_idle, 805 .enter = &intel_idle,
806 .enter_freeze = intel_idle_freeze }, 806 .enter_s2idle = intel_idle_s2idle },
807 { 807 {
808 .enter = NULL } 808 .enter = NULL }
809}; 809};
@@ -816,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = {
816 .exit_latency = 2, 816 .exit_latency = 2,
817 .target_residency = 2, 817 .target_residency = 2,
818 .enter = &intel_idle, 818 .enter = &intel_idle,
819 .enter_freeze = intel_idle_freeze, }, 819 .enter_s2idle = intel_idle_s2idle, },
820 { 820 {
821 .name = "C1E", 821 .name = "C1E",
822 .desc = "MWAIT 0x01", 822 .desc = "MWAIT 0x01",
@@ -824,7 +824,7 @@ static struct cpuidle_state bxt_cstates[] = {
824 .exit_latency = 10, 824 .exit_latency = 10,
825 .target_residency = 20, 825 .target_residency = 20,
826 .enter = &intel_idle, 826 .enter = &intel_idle,
827 .enter_freeze = intel_idle_freeze, }, 827 .enter_s2idle = intel_idle_s2idle, },
828 { 828 {
829 .name = "C6", 829 .name = "C6",
830 .desc = "MWAIT 0x20", 830 .desc = "MWAIT 0x20",
@@ -832,7 +832,7 @@ static struct cpuidle_state bxt_cstates[] = {
832 .exit_latency = 133, 832 .exit_latency = 133,
833 .target_residency = 133, 833 .target_residency = 133,
834 .enter = &intel_idle, 834 .enter = &intel_idle,
835 .enter_freeze = intel_idle_freeze, }, 835 .enter_s2idle = intel_idle_s2idle, },
836 { 836 {
837 .name = "C7s", 837 .name = "C7s",
838 .desc = "MWAIT 0x31", 838 .desc = "MWAIT 0x31",
@@ -840,7 +840,7 @@ static struct cpuidle_state bxt_cstates[] = {
840 .exit_latency = 155, 840 .exit_latency = 155,
841 .target_residency = 155, 841 .target_residency = 155,
842 .enter = &intel_idle, 842 .enter = &intel_idle,
843 .enter_freeze = intel_idle_freeze, }, 843 .enter_s2idle = intel_idle_s2idle, },
844 { 844 {
845 .name = "C8", 845 .name = "C8",
846 .desc = "MWAIT 0x40", 846 .desc = "MWAIT 0x40",
@@ -848,7 +848,7 @@ static struct cpuidle_state bxt_cstates[] = {
848 .exit_latency = 1000, 848 .exit_latency = 1000,
849 .target_residency = 1000, 849 .target_residency = 1000,
850 .enter = &intel_idle, 850 .enter = &intel_idle,
851 .enter_freeze = intel_idle_freeze, }, 851 .enter_s2idle = intel_idle_s2idle, },
852 { 852 {
853 .name = "C9", 853 .name = "C9",
854 .desc = "MWAIT 0x50", 854 .desc = "MWAIT 0x50",
@@ -856,7 +856,7 @@ static struct cpuidle_state bxt_cstates[] = {
856 .exit_latency = 2000, 856 .exit_latency = 2000,
857 .target_residency = 2000, 857 .target_residency = 2000,
858 .enter = &intel_idle, 858 .enter = &intel_idle,
859 .enter_freeze = intel_idle_freeze, }, 859 .enter_s2idle = intel_idle_s2idle, },
860 { 860 {
861 .name = "C10", 861 .name = "C10",
862 .desc = "MWAIT 0x60", 862 .desc = "MWAIT 0x60",
@@ -864,7 +864,7 @@ static struct cpuidle_state bxt_cstates[] = {
864 .exit_latency = 10000, 864 .exit_latency = 10000,
865 .target_residency = 10000, 865 .target_residency = 10000,
866 .enter = &intel_idle, 866 .enter = &intel_idle,
867 .enter_freeze = intel_idle_freeze, }, 867 .enter_s2idle = intel_idle_s2idle, },
868 { 868 {
869 .enter = NULL } 869 .enter = NULL }
870}; 870};
@@ -877,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = {
877 .exit_latency = 2, 877 .exit_latency = 2,
878 .target_residency = 2, 878 .target_residency = 2,
879 .enter = &intel_idle, 879 .enter = &intel_idle,
880 .enter_freeze = intel_idle_freeze, }, 880 .enter_s2idle = intel_idle_s2idle, },
881 { 881 {
882 .name = "C1E", 882 .name = "C1E",
883 .desc = "MWAIT 0x01", 883 .desc = "MWAIT 0x01",
@@ -885,7 +885,7 @@ static struct cpuidle_state dnv_cstates[] = {
885 .exit_latency = 10, 885 .exit_latency = 10,
886 .target_residency = 20, 886 .target_residency = 20,
887 .enter = &intel_idle, 887 .enter = &intel_idle,
888 .enter_freeze = intel_idle_freeze, }, 888 .enter_s2idle = intel_idle_s2idle, },
889 { 889 {
890 .name = "C6", 890 .name = "C6",
891 .desc = "MWAIT 0x20", 891 .desc = "MWAIT 0x20",
@@ -893,7 +893,7 @@ static struct cpuidle_state dnv_cstates[] = {
893 .exit_latency = 50, 893 .exit_latency = 50,
894 .target_residency = 500, 894 .target_residency = 500,
895 .enter = &intel_idle, 895 .enter = &intel_idle,
896 .enter_freeze = intel_idle_freeze, }, 896 .enter_s2idle = intel_idle_s2idle, },
897 { 897 {
898 .enter = NULL } 898 .enter = NULL }
899}; 899};
@@ -936,12 +936,12 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
936} 936}
937 937
938/** 938/**
939 * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle 939 * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
940 * @dev: cpuidle_device 940 * @dev: cpuidle_device
941 * @drv: cpuidle driver 941 * @drv: cpuidle driver
942 * @index: state index 942 * @index: state index
943 */ 943 */
944static void intel_idle_freeze(struct cpuidle_device *dev, 944static void intel_idle_s2idle(struct cpuidle_device *dev,
945 struct cpuidle_driver *drv, int index) 945 struct cpuidle_driver *drv, int index)
946{ 946{
947 unsigned long ecx = 1; /* break on interrupt flag */ 947 unsigned long ecx = 1; /* break on interrupt flag */
@@ -1338,7 +1338,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
1338 int num_substates, mwait_hint, mwait_cstate; 1338 int num_substates, mwait_hint, mwait_cstate;
1339 1339
1340 if ((cpuidle_state_table[cstate].enter == NULL) && 1340 if ((cpuidle_state_table[cstate].enter == NULL) &&
1341 (cpuidle_state_table[cstate].enter_freeze == NULL)) 1341 (cpuidle_state_table[cstate].enter_s2idle == NULL))
1342 break; 1342 break;
1343 1343
1344 if (cstate + 1 > max_cstate) { 1344 if (cstate + 1 > max_cstate) {
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 8519e0f97bdd..a782c78e7c63 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -203,15 +203,26 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
203 acpi_status status; 203 acpi_status status;
204 204
205 if (priv->wakeup_mode) { 205 if (priv->wakeup_mode) {
206 /*
207 * Needed for wakeup from suspend-to-idle to work on some
208 * platforms that don't expose the 5-button array, but still
209 * send notifies with the power button event code to this
210 * device object on power button actions while suspended.
211 */
212 if (event == 0xce)
213 goto wakeup;
214
206 /* Wake up on 5-button array events only. */ 215 /* Wake up on 5-button array events only. */
207 if (event == 0xc0 || !priv->array) 216 if (event == 0xc0 || !priv->array)
208 return; 217 return;
209 218
210 if (sparse_keymap_entry_from_scancode(priv->array, event)) 219 if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
211 pm_wakeup_hard_event(&device->dev);
212 else
213 dev_info(&device->dev, "unknown event 0x%x\n", event); 220 dev_info(&device->dev, "unknown event 0x%x\n", event);
221 return;
222 }
214 223
224wakeup:
225 pm_wakeup_hard_event(&device->dev);
215 return; 226 return;
216 } 227 }
217 228
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 96bf75458da5..860480ecf2be 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -150,7 +150,7 @@ static void of_get_regulation_constraints(struct device_node *np,
150 suspend_state = &constraints->state_disk; 150 suspend_state = &constraints->state_disk;
151 break; 151 break;
152 case PM_SUSPEND_ON: 152 case PM_SUSPEND_ON:
153 case PM_SUSPEND_FREEZE: 153 case PM_SUSPEND_TO_IDLE:
154 case PM_SUSPEND_STANDBY: 154 case PM_SUSPEND_STANDBY:
155 default: 155 default:
156 continue; 156 continue;
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 5baacd3a0559..8f7788d23b57 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -52,11 +52,11 @@ struct cpuidle_state {
52 int (*enter_dead) (struct cpuidle_device *dev, int index); 52 int (*enter_dead) (struct cpuidle_device *dev, int index);
53 53
54 /* 54 /*
55 * CPUs execute ->enter_freeze with the local tick or entire timekeeping 55 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
56 * suspended, so it must not re-enable interrupts at any point (even 56 * suspended, so it must not re-enable interrupts at any point (even
57 * temporarily) or attempt to change states of clock event devices. 57 * temporarily) or attempt to change states of clock event devices.
58 */ 58 */
59 void (*enter_freeze) (struct cpuidle_device *dev, 59 void (*enter_s2idle) (struct cpuidle_device *dev,
60 struct cpuidle_driver *drv, 60 struct cpuidle_driver *drv,
61 int index); 61 int index);
62}; 62};
@@ -198,14 +198,14 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
198#ifdef CONFIG_CPU_IDLE 198#ifdef CONFIG_CPU_IDLE
199extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 199extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
200 struct cpuidle_device *dev); 200 struct cpuidle_device *dev);
201extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, 201extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
202 struct cpuidle_device *dev); 202 struct cpuidle_device *dev);
203extern void cpuidle_use_deepest_state(bool enable); 203extern void cpuidle_use_deepest_state(bool enable);
204#else 204#else
205static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 205static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
206 struct cpuidle_device *dev) 206 struct cpuidle_device *dev)
207{return -ENODEV; } 207{return -ENODEV; }
208static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, 208static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
209 struct cpuidle_device *dev) 209 struct cpuidle_device *dev)
210{return -ENODEV; } 210{return -ENODEV; }
211static inline void cpuidle_use_deepest_state(bool enable) 211static inline void cpuidle_use_deepest_state(bool enable)
diff --git a/include/linux/pm.h b/include/linux/pm.h
index b8b4df09fd8f..47ded8aa8a5d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -689,6 +689,8 @@ struct dev_pm_domain {
689extern void device_pm_lock(void); 689extern void device_pm_lock(void);
690extern void dpm_resume_start(pm_message_t state); 690extern void dpm_resume_start(pm_message_t state);
691extern void dpm_resume_end(pm_message_t state); 691extern void dpm_resume_end(pm_message_t state);
692extern void dpm_noirq_resume_devices(pm_message_t state);
693extern void dpm_noirq_end(void);
692extern void dpm_resume_noirq(pm_message_t state); 694extern void dpm_resume_noirq(pm_message_t state);
693extern void dpm_resume_early(pm_message_t state); 695extern void dpm_resume_early(pm_message_t state);
694extern void dpm_resume(pm_message_t state); 696extern void dpm_resume(pm_message_t state);
@@ -697,6 +699,8 @@ extern void dpm_complete(pm_message_t state);
697extern void device_pm_unlock(void); 699extern void device_pm_unlock(void);
698extern int dpm_suspend_end(pm_message_t state); 700extern int dpm_suspend_end(pm_message_t state);
699extern int dpm_suspend_start(pm_message_t state); 701extern int dpm_suspend_start(pm_message_t state);
702extern void dpm_noirq_begin(void);
703extern int dpm_noirq_suspend_devices(pm_message_t state);
700extern int dpm_suspend_noirq(pm_message_t state); 704extern int dpm_suspend_noirq(pm_message_t state);
701extern int dpm_suspend_late(pm_message_t state); 705extern int dpm_suspend_late(pm_message_t state);
702extern int dpm_suspend(pm_message_t state); 706extern int dpm_suspend(pm_message_t state);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d10b7980799d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -33,10 +33,10 @@ static inline void pm_restore_console(void)
33typedef int __bitwise suspend_state_t; 33typedef int __bitwise suspend_state_t;
34 34
35#define PM_SUSPEND_ON ((__force suspend_state_t) 0) 35#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
36#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1) 36#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
37#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2) 37#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
38#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 38#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
39#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE 39#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
40#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) 40#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
41 41
42enum suspend_stat_step { 42enum suspend_stat_step {
@@ -186,7 +186,7 @@ struct platform_suspend_ops {
186 void (*recover)(void); 186 void (*recover)(void);
187}; 187};
188 188
189struct platform_freeze_ops { 189struct platform_s2idle_ops {
190 int (*begin)(void); 190 int (*begin)(void);
191 int (*prepare)(void); 191 int (*prepare)(void);
192 void (*wake)(void); 192 void (*wake)(void);
@@ -196,6 +196,9 @@ struct platform_freeze_ops {
196}; 196};
197 197
198#ifdef CONFIG_SUSPEND 198#ifdef CONFIG_SUSPEND
199extern suspend_state_t mem_sleep_current;
200extern suspend_state_t mem_sleep_default;
201
199/** 202/**
200 * suspend_set_ops - set platform dependent suspend operations 203 * suspend_set_ops - set platform dependent suspend operations
201 * @ops: The new suspend operations to set. 204 * @ops: The new suspend operations to set.
@@ -234,22 +237,22 @@ static inline bool pm_resume_via_firmware(void)
234} 237}
235 238
236/* Suspend-to-idle state machnine. */ 239/* Suspend-to-idle state machnine. */
237enum freeze_state { 240enum s2idle_states {
238 FREEZE_STATE_NONE, /* Not suspended/suspending. */ 241 S2IDLE_STATE_NONE, /* Not suspended/suspending. */
239 FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ 242 S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
240 FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ 243 S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
241}; 244};
242 245
243extern enum freeze_state __read_mostly suspend_freeze_state; 246extern enum s2idle_states __read_mostly s2idle_state;
244 247
245static inline bool idle_should_freeze(void) 248static inline bool idle_should_enter_s2idle(void)
246{ 249{
247 return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); 250 return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
248} 251}
249 252
250extern void __init pm_states_init(void); 253extern void __init pm_states_init(void);
251extern void freeze_set_ops(const struct platform_freeze_ops *ops); 254extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
252extern void freeze_wake(void); 255extern void s2idle_wake(void);
253 256
254/** 257/**
255 * arch_suspend_disable_irqs - disable IRQs for suspend 258 * arch_suspend_disable_irqs - disable IRQs for suspend
@@ -281,10 +284,10 @@ static inline bool pm_resume_via_firmware(void) { return false; }
281 284
282static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 285static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
283static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 286static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
284static inline bool idle_should_freeze(void) { return false; } 287static inline bool idle_should_enter_s2idle(void) { return false; }
285static inline void __init pm_states_init(void) {} 288static inline void __init pm_states_init(void) {}
286static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} 289static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
287static inline void freeze_wake(void) {} 290static inline void s2idle_wake(void) {}
288#endif /* !CONFIG_SUSPEND */ 291#endif /* !CONFIG_SUSPEND */
289 292
290/* struct pbe is used for creating lists of pages that should be restored 293/* struct pbe is used for creating lists of pages that should be restored
@@ -427,6 +430,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
427/* drivers/base/power/wakeup.c */ 430/* drivers/base/power/wakeup.c */
428extern bool events_check_enabled; 431extern bool events_check_enabled;
429extern unsigned int pm_wakeup_irq; 432extern unsigned int pm_wakeup_irq;
433extern suspend_state_t pm_suspend_target_state;
430 434
431extern bool pm_wakeup_pending(void); 435extern bool pm_wakeup_pending(void);
432extern void pm_system_wakeup(void); 436extern void pm_system_wakeup(void);
@@ -491,10 +495,24 @@ static inline void unlock_system_sleep(void) {}
491 495
492#ifdef CONFIG_PM_SLEEP_DEBUG 496#ifdef CONFIG_PM_SLEEP_DEBUG
493extern bool pm_print_times_enabled; 497extern bool pm_print_times_enabled;
498extern bool pm_debug_messages_on;
499extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
494#else 500#else
495#define pm_print_times_enabled (false) 501#define pm_print_times_enabled (false)
502#define pm_debug_messages_on (false)
503
504#include <linux/printk.h>
505
506#define __pm_pr_dbg(defer, fmt, ...) \
507 no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
496#endif 508#endif
497 509
510#define pm_pr_dbg(fmt, ...) \
511 __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
512
513#define pm_deferred_pr_dbg(fmt, ...) \
514 __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
515
498#ifdef CONFIG_PM_AUTOSLEEP 516#ifdef CONFIG_PM_AUTOSLEEP
499 517
500/* kernel/power/autosleep.c */ 518/* kernel/power/autosleep.c */
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index e1914c7b85b1..a5c36e9c56a6 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -651,7 +651,7 @@ static int load_image_and_restore(void)
651 int error; 651 int error;
652 unsigned int flags; 652 unsigned int flags;
653 653
654 pr_debug("Loading hibernation image.\n"); 654 pm_pr_dbg("Loading hibernation image.\n");
655 655
656 lock_device_hotplug(); 656 lock_device_hotplug();
657 error = create_basic_memory_bitmaps(); 657 error = create_basic_memory_bitmaps();
@@ -681,7 +681,7 @@ int hibernate(void)
681 bool snapshot_test = false; 681 bool snapshot_test = false;
682 682
683 if (!hibernation_available()) { 683 if (!hibernation_available()) {
684 pr_debug("Hibernation not available.\n"); 684 pm_pr_dbg("Hibernation not available.\n");
685 return -EPERM; 685 return -EPERM;
686 } 686 }
687 687
@@ -692,6 +692,7 @@ int hibernate(void)
692 goto Unlock; 692 goto Unlock;
693 } 693 }
694 694
695 pr_info("hibernation entry\n");
695 pm_prepare_console(); 696 pm_prepare_console();
696 error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); 697 error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
697 if (error) { 698 if (error) {
@@ -727,7 +728,7 @@ int hibernate(void)
727 else 728 else
728 flags |= SF_CRC32_MODE; 729 flags |= SF_CRC32_MODE;
729 730
730 pr_debug("Writing image.\n"); 731 pm_pr_dbg("Writing image.\n");
731 error = swsusp_write(flags); 732 error = swsusp_write(flags);
732 swsusp_free(); 733 swsusp_free();
733 if (!error) { 734 if (!error) {
@@ -739,7 +740,7 @@ int hibernate(void)
739 in_suspend = 0; 740 in_suspend = 0;
740 pm_restore_gfp_mask(); 741 pm_restore_gfp_mask();
741 } else { 742 } else {
742 pr_debug("Image restored successfully.\n"); 743 pm_pr_dbg("Image restored successfully.\n");
743 } 744 }
744 745
745 Free_bitmaps: 746 Free_bitmaps:
@@ -747,7 +748,7 @@ int hibernate(void)
747 Thaw: 748 Thaw:
748 unlock_device_hotplug(); 749 unlock_device_hotplug();
749 if (snapshot_test) { 750 if (snapshot_test) {
750 pr_debug("Checking hibernation image\n"); 751 pm_pr_dbg("Checking hibernation image\n");
751 error = swsusp_check(); 752 error = swsusp_check();
752 if (!error) 753 if (!error)
753 error = load_image_and_restore(); 754 error = load_image_and_restore();
@@ -762,6 +763,8 @@ int hibernate(void)
762 atomic_inc(&snapshot_device_available); 763 atomic_inc(&snapshot_device_available);
763 Unlock: 764 Unlock:
764 unlock_system_sleep(); 765 unlock_system_sleep();
766 pr_info("hibernation exit\n");
767
765 return error; 768 return error;
766} 769}
767 770
@@ -811,7 +814,7 @@ static int software_resume(void)
811 goto Unlock; 814 goto Unlock;
812 } 815 }
813 816
814 pr_debug("Checking hibernation image partition %s\n", resume_file); 817 pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
815 818
816 if (resume_delay) { 819 if (resume_delay) {
817 pr_info("Waiting %dsec before reading resume device ...\n", 820 pr_info("Waiting %dsec before reading resume device ...\n",
@@ -853,10 +856,10 @@ static int software_resume(void)
853 } 856 }
854 857
855 Check_image: 858 Check_image:
856 pr_debug("Hibernation image partition %d:%d present\n", 859 pm_pr_dbg("Hibernation image partition %d:%d present\n",
857 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); 860 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
858 861
859 pr_debug("Looking for hibernation image.\n"); 862 pm_pr_dbg("Looking for hibernation image.\n");
860 error = swsusp_check(); 863 error = swsusp_check();
861 if (error) 864 if (error)
862 goto Unlock; 865 goto Unlock;
@@ -868,6 +871,7 @@ static int software_resume(void)
868 goto Unlock; 871 goto Unlock;
869 } 872 }
870 873
874 pr_info("resume from hibernation\n");
871 pm_prepare_console(); 875 pm_prepare_console();
872 error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); 876 error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
873 if (error) { 877 if (error) {
@@ -875,7 +879,7 @@ static int software_resume(void)
875 goto Close_Finish; 879 goto Close_Finish;
876 } 880 }
877 881
878 pr_debug("Preparing processes for restore.\n"); 882 pm_pr_dbg("Preparing processes for restore.\n");
879 error = freeze_processes(); 883 error = freeze_processes();
880 if (error) 884 if (error)
881 goto Close_Finish; 885 goto Close_Finish;
@@ -884,11 +888,12 @@ static int software_resume(void)
884 Finish: 888 Finish:
885 __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); 889 __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
886 pm_restore_console(); 890 pm_restore_console();
891 pr_info("resume from hibernation failed (%d)\n", error);
887 atomic_inc(&snapshot_device_available); 892 atomic_inc(&snapshot_device_available);
888 /* For success case, the suspend path will release the lock */ 893 /* For success case, the suspend path will release the lock */
889 Unlock: 894 Unlock:
890 mutex_unlock(&pm_mutex); 895 mutex_unlock(&pm_mutex);
891 pr_debug("Hibernation image not present or could not be loaded.\n"); 896 pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
892 return error; 897 return error;
893 Close_Finish: 898 Close_Finish:
894 swsusp_close(FMODE_READ); 899 swsusp_close(FMODE_READ);
@@ -1012,8 +1017,8 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
1012 error = -EINVAL; 1017 error = -EINVAL;
1013 1018
1014 if (!error) 1019 if (!error)
1015 pr_debug("Hibernation mode set to '%s'\n", 1020 pm_pr_dbg("Hibernation mode set to '%s'\n",
1016 hibernation_modes[mode]); 1021 hibernation_modes[mode]);
1017 unlock_system_sleep(); 1022 unlock_system_sleep();
1018 return error ? error : n; 1023 return error ? error : n;
1019} 1024}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 42bd800a6755..3a2ca9066583 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -150,7 +150,7 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
150power_attr(mem_sleep); 150power_attr(mem_sleep);
151#endif /* CONFIG_SUSPEND */ 151#endif /* CONFIG_SUSPEND */
152 152
153#ifdef CONFIG_PM_DEBUG 153#ifdef CONFIG_PM_SLEEP_DEBUG
154int pm_test_level = TEST_NONE; 154int pm_test_level = TEST_NONE;
155 155
156static const char * const pm_tests[__TEST_AFTER_LAST] = { 156static const char * const pm_tests[__TEST_AFTER_LAST] = {
@@ -211,7 +211,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
211} 211}
212 212
213power_attr(pm_test); 213power_attr(pm_test);
214#endif /* CONFIG_PM_DEBUG */ 214#endif /* CONFIG_PM_SLEEP_DEBUG */
215 215
216#ifdef CONFIG_DEBUG_FS 216#ifdef CONFIG_DEBUG_FS
217static char *suspend_step_name(enum suspend_stat_step step) 217static char *suspend_step_name(enum suspend_stat_step step)
@@ -361,6 +361,61 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
361 361
362power_attr_ro(pm_wakeup_irq); 362power_attr_ro(pm_wakeup_irq);
363 363
364bool pm_debug_messages_on __read_mostly;
365
366static ssize_t pm_debug_messages_show(struct kobject *kobj,
367 struct kobj_attribute *attr, char *buf)
368{
369 return sprintf(buf, "%d\n", pm_debug_messages_on);
370}
371
372static ssize_t pm_debug_messages_store(struct kobject *kobj,
373 struct kobj_attribute *attr,
374 const char *buf, size_t n)
375{
376 unsigned long val;
377
378 if (kstrtoul(buf, 10, &val))
379 return -EINVAL;
380
381 if (val > 1)
382 return -EINVAL;
383
384 pm_debug_messages_on = !!val;
385 return n;
386}
387
388power_attr(pm_debug_messages);
389
390/**
391 * __pm_pr_dbg - Print a suspend debug message to the kernel log.
392 * @defer: Whether or not to use printk_deferred() to print the message.
393 * @fmt: Message format.
394 *
395 * The message will be emitted if enabled through the pm_debug_messages
396 * sysfs attribute.
397 */
398void __pm_pr_dbg(bool defer, const char *fmt, ...)
399{
400 struct va_format vaf;
401 va_list args;
402
403 if (!pm_debug_messages_on)
404 return;
405
406 va_start(args, fmt);
407
408 vaf.fmt = fmt;
409 vaf.va = &args;
410
411 if (defer)
412 printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
413 else
414 printk(KERN_DEBUG "PM: %pV", &vaf);
415
416 va_end(args);
417}
418
364#else /* !CONFIG_PM_SLEEP_DEBUG */ 419#else /* !CONFIG_PM_SLEEP_DEBUG */
365static inline void pm_print_times_init(void) {} 420static inline void pm_print_times_init(void) {}
366#endif /* CONFIG_PM_SLEEP_DEBUG */ 421#endif /* CONFIG_PM_SLEEP_DEBUG */
@@ -691,12 +746,11 @@ static struct attribute * g[] = {
691 &wake_lock_attr.attr, 746 &wake_lock_attr.attr,
692 &wake_unlock_attr.attr, 747 &wake_unlock_attr.attr,
693#endif 748#endif
694#ifdef CONFIG_PM_DEBUG
695 &pm_test_attr.attr,
696#endif
697#ifdef CONFIG_PM_SLEEP_DEBUG 749#ifdef CONFIG_PM_SLEEP_DEBUG
750 &pm_test_attr.attr,
698 &pm_print_times_attr.attr, 751 &pm_print_times_attr.attr,
699 &pm_wakeup_irq_attr.attr, 752 &pm_wakeup_irq_attr.attr,
753 &pm_debug_messages_attr.attr,
700#endif 754#endif
701#endif 755#endif
702#ifdef CONFIG_FREEZER 756#ifdef CONFIG_FREEZER
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 7fdc40d31b7d..1d2d761e3c25 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -192,7 +192,6 @@ extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
192extern const char * const pm_labels[]; 192extern const char * const pm_labels[];
193extern const char *pm_states[]; 193extern const char *pm_states[];
194extern const char *mem_sleep_states[]; 194extern const char *mem_sleep_states[];
195extern suspend_state_t mem_sleep_current;
196 195
197extern int suspend_devices_and_enter(suspend_state_t state); 196extern int suspend_devices_and_enter(suspend_state_t state);
198#else /* !CONFIG_SUSPEND */ 197#else /* !CONFIG_SUSPEND */
@@ -245,7 +244,11 @@ enum {
245#define TEST_FIRST TEST_NONE 244#define TEST_FIRST TEST_NONE
246#define TEST_MAX (__TEST_AFTER_LAST - 1) 245#define TEST_MAX (__TEST_AFTER_LAST - 1)
247 246
247#ifdef CONFIG_PM_SLEEP_DEBUG
248extern int pm_test_level; 248extern int pm_test_level;
249#else
250#define pm_test_level (TEST_NONE)
251#endif
249 252
250#ifdef CONFIG_SUSPEND_FREEZER 253#ifdef CONFIG_SUSPEND_FREEZER
251static inline int suspend_freeze_processes(void) 254static inline int suspend_freeze_processes(void)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3ecf275d7e44..3e2b4f519009 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -8,6 +8,8 @@
8 * This file is released under the GPLv2. 8 * This file is released under the GPLv2.
9 */ 9 */
10 10
11#define pr_fmt(fmt) "PM: " fmt
12
11#include <linux/string.h> 13#include <linux/string.h>
12#include <linux/delay.h> 14#include <linux/delay.h>
13#include <linux/errno.h> 15#include <linux/errno.h>
@@ -33,53 +35,55 @@
33#include "power.h" 35#include "power.h"
34 36
35const char * const pm_labels[] = { 37const char * const pm_labels[] = {
36 [PM_SUSPEND_FREEZE] = "freeze", 38 [PM_SUSPEND_TO_IDLE] = "freeze",
37 [PM_SUSPEND_STANDBY] = "standby", 39 [PM_SUSPEND_STANDBY] = "standby",
38 [PM_SUSPEND_MEM] = "mem", 40 [PM_SUSPEND_MEM] = "mem",
39}; 41};
40const char *pm_states[PM_SUSPEND_MAX]; 42const char *pm_states[PM_SUSPEND_MAX];
41static const char * const mem_sleep_labels[] = { 43static const char * const mem_sleep_labels[] = {
42 [PM_SUSPEND_FREEZE] = "s2idle", 44 [PM_SUSPEND_TO_IDLE] = "s2idle",
43 [PM_SUSPEND_STANDBY] = "shallow", 45 [PM_SUSPEND_STANDBY] = "shallow",
44 [PM_SUSPEND_MEM] = "deep", 46 [PM_SUSPEND_MEM] = "deep",
45}; 47};
46const char *mem_sleep_states[PM_SUSPEND_MAX]; 48const char *mem_sleep_states[PM_SUSPEND_MAX];
47 49
48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; 50suspend_state_t mem_sleep_current = PM_SUSPEND_TO_IDLE;
49static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM; 51suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
52suspend_state_t pm_suspend_target_state;
53EXPORT_SYMBOL_GPL(pm_suspend_target_state);
50 54
51unsigned int pm_suspend_global_flags; 55unsigned int pm_suspend_global_flags;
52EXPORT_SYMBOL_GPL(pm_suspend_global_flags); 56EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
53 57
54static const struct platform_suspend_ops *suspend_ops; 58static const struct platform_suspend_ops *suspend_ops;
55static const struct platform_freeze_ops *freeze_ops; 59static const struct platform_s2idle_ops *s2idle_ops;
56static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 60static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head);
57 61
58enum freeze_state __read_mostly suspend_freeze_state; 62enum s2idle_states __read_mostly s2idle_state;
59static DEFINE_SPINLOCK(suspend_freeze_lock); 63static DEFINE_SPINLOCK(s2idle_lock);
60 64
61void freeze_set_ops(const struct platform_freeze_ops *ops) 65void s2idle_set_ops(const struct platform_s2idle_ops *ops)
62{ 66{
63 lock_system_sleep(); 67 lock_system_sleep();
64 freeze_ops = ops; 68 s2idle_ops = ops;
65 unlock_system_sleep(); 69 unlock_system_sleep();
66} 70}
67 71
68static void freeze_begin(void) 72static void s2idle_begin(void)
69{ 73{
70 suspend_freeze_state = FREEZE_STATE_NONE; 74 s2idle_state = S2IDLE_STATE_NONE;
71} 75}
72 76
73static void freeze_enter(void) 77static void s2idle_enter(void)
74{ 78{
75 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true); 79 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
76 80
77 spin_lock_irq(&suspend_freeze_lock); 81 spin_lock_irq(&s2idle_lock);
78 if (pm_wakeup_pending()) 82 if (pm_wakeup_pending())
79 goto out; 83 goto out;
80 84
81 suspend_freeze_state = FREEZE_STATE_ENTER; 85 s2idle_state = S2IDLE_STATE_ENTER;
82 spin_unlock_irq(&suspend_freeze_lock); 86 spin_unlock_irq(&s2idle_lock);
83 87
84 get_online_cpus(); 88 get_online_cpus();
85 cpuidle_resume(); 89 cpuidle_resume();
@@ -87,56 +91,75 @@ static void freeze_enter(void)
87 /* Push all the CPUs into the idle loop. */ 91 /* Push all the CPUs into the idle loop. */
88 wake_up_all_idle_cpus(); 92 wake_up_all_idle_cpus();
89 /* Make the current CPU wait so it can enter the idle loop too. */ 93 /* Make the current CPU wait so it can enter the idle loop too. */
90 wait_event(suspend_freeze_wait_head, 94 wait_event(s2idle_wait_head,
91 suspend_freeze_state == FREEZE_STATE_WAKE); 95 s2idle_state == S2IDLE_STATE_WAKE);
92 96
93 cpuidle_pause(); 97 cpuidle_pause();
94 put_online_cpus(); 98 put_online_cpus();
95 99
96 spin_lock_irq(&suspend_freeze_lock); 100 spin_lock_irq(&s2idle_lock);
97 101
98 out: 102 out:
99 suspend_freeze_state = FREEZE_STATE_NONE; 103 s2idle_state = S2IDLE_STATE_NONE;
100 spin_unlock_irq(&suspend_freeze_lock); 104 spin_unlock_irq(&s2idle_lock);
101 105
102 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false); 106 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
103} 107}
104 108
105static void s2idle_loop(void) 109static void s2idle_loop(void)
106{ 110{
107 pr_debug("PM: suspend-to-idle\n"); 111 pm_pr_dbg("suspend-to-idle\n");
112
113 for (;;) {
114 int error;
115
116 dpm_noirq_begin();
117
118 /*
119 * Suspend-to-idle equals
120 * frozen processes + suspended devices + idle processors.
121 * Thus s2idle_enter() should be called right after
122 * all devices have been suspended.
123 */
124 error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
125 if (!error)
126 s2idle_enter();
127
128 dpm_noirq_resume_devices(PMSG_RESUME);
129 if (error && (error != -EBUSY || !pm_wakeup_pending())) {
130 dpm_noirq_end();
131 break;
132 }
108 133
109 do { 134 if (s2idle_ops && s2idle_ops->wake)
110 freeze_enter(); 135 s2idle_ops->wake();
111 136
112 if (freeze_ops && freeze_ops->wake) 137 dpm_noirq_end();
113 freeze_ops->wake();
114 138
115 dpm_resume_noirq(PMSG_RESUME); 139 if (s2idle_ops && s2idle_ops->sync)
116 if (freeze_ops && freeze_ops->sync) 140 s2idle_ops->sync();
117 freeze_ops->sync();
118 141
119 if (pm_wakeup_pending()) 142 if (pm_wakeup_pending())
120 break; 143 break;
121 144
122 pm_wakeup_clear(false); 145 pm_wakeup_clear(false);
123 } while (!dpm_suspend_noirq(PMSG_SUSPEND)); 146 }
124 147
125 pr_debug("PM: resume from suspend-to-idle\n"); 148 pm_pr_dbg("resume from suspend-to-idle\n");
126} 149}
127 150
128void freeze_wake(void) 151void s2idle_wake(void)
129{ 152{
130 unsigned long flags; 153 unsigned long flags;
131 154
132 spin_lock_irqsave(&suspend_freeze_lock, flags); 155 spin_lock_irqsave(&s2idle_lock, flags);
133 if (suspend_freeze_state > FREEZE_STATE_NONE) { 156 if (s2idle_state > S2IDLE_STATE_NONE) {
134 suspend_freeze_state = FREEZE_STATE_WAKE; 157 s2idle_state = S2IDLE_STATE_WAKE;
135 wake_up(&suspend_freeze_wait_head); 158 wake_up(&s2idle_wait_head);
136 } 159 }
137 spin_unlock_irqrestore(&suspend_freeze_lock, flags); 160 spin_unlock_irqrestore(&s2idle_lock, flags);
138} 161}
139EXPORT_SYMBOL_GPL(freeze_wake); 162EXPORT_SYMBOL_GPL(s2idle_wake);
140 163
141static bool valid_state(suspend_state_t state) 164static bool valid_state(suspend_state_t state)
142{ 165{
@@ -152,19 +175,19 @@ void __init pm_states_init(void)
152{ 175{
153 /* "mem" and "freeze" are always present in /sys/power/state. */ 176 /* "mem" and "freeze" are always present in /sys/power/state. */
154 pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM]; 177 pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM];
155 pm_states[PM_SUSPEND_FREEZE] = pm_labels[PM_SUSPEND_FREEZE]; 178 pm_states[PM_SUSPEND_TO_IDLE] = pm_labels[PM_SUSPEND_TO_IDLE];
156 /* 179 /*
157 * Suspend-to-idle should be supported even without any suspend_ops, 180 * Suspend-to-idle should be supported even without any suspend_ops,
158 * initialize mem_sleep_states[] accordingly here. 181 * initialize mem_sleep_states[] accordingly here.
159 */ 182 */
160 mem_sleep_states[PM_SUSPEND_FREEZE] = mem_sleep_labels[PM_SUSPEND_FREEZE]; 183 mem_sleep_states[PM_SUSPEND_TO_IDLE] = mem_sleep_labels[PM_SUSPEND_TO_IDLE];
161} 184}
162 185
163static int __init mem_sleep_default_setup(char *str) 186static int __init mem_sleep_default_setup(char *str)
164{ 187{
165 suspend_state_t state; 188 suspend_state_t state;
166 189
167 for (state = PM_SUSPEND_FREEZE; state <= PM_SUSPEND_MEM; state++) 190 for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++)
168 if (mem_sleep_labels[state] && 191 if (mem_sleep_labels[state] &&
169 !strcmp(str, mem_sleep_labels[state])) { 192 !strcmp(str, mem_sleep_labels[state])) {
170 mem_sleep_default = state; 193 mem_sleep_default = state;
@@ -193,7 +216,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
193 } 216 }
194 if (valid_state(PM_SUSPEND_MEM)) { 217 if (valid_state(PM_SUSPEND_MEM)) {
195 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; 218 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
196 if (mem_sleep_default == PM_SUSPEND_MEM) 219 if (mem_sleep_default >= PM_SUSPEND_MEM)
197 mem_sleep_current = PM_SUSPEND_MEM; 220 mem_sleep_current = PM_SUSPEND_MEM;
198 } 221 }
199 222
@@ -216,49 +239,49 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
216 239
217static bool sleep_state_supported(suspend_state_t state) 240static bool sleep_state_supported(suspend_state_t state)
218{ 241{
219 return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter); 242 return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter);
220} 243}
221 244
222static int platform_suspend_prepare(suspend_state_t state) 245static int platform_suspend_prepare(suspend_state_t state)
223{ 246{
224 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ? 247 return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare ?
225 suspend_ops->prepare() : 0; 248 suspend_ops->prepare() : 0;
226} 249}
227 250
228static int platform_suspend_prepare_late(suspend_state_t state) 251static int platform_suspend_prepare_late(suspend_state_t state)
229{ 252{
230 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ? 253 return state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->prepare ?
231 freeze_ops->prepare() : 0; 254 s2idle_ops->prepare() : 0;
232} 255}
233 256
234static int platform_suspend_prepare_noirq(suspend_state_t state) 257static int platform_suspend_prepare_noirq(suspend_state_t state)
235{ 258{
236 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? 259 return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ?
237 suspend_ops->prepare_late() : 0; 260 suspend_ops->prepare_late() : 0;
238} 261}
239 262
240static void platform_resume_noirq(suspend_state_t state) 263static void platform_resume_noirq(suspend_state_t state)
241{ 264{
242 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) 265 if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake)
243 suspend_ops->wake(); 266 suspend_ops->wake();
244} 267}
245 268
246static void platform_resume_early(suspend_state_t state) 269static void platform_resume_early(suspend_state_t state)
247{ 270{
248 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore) 271 if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->restore)
249 freeze_ops->restore(); 272 s2idle_ops->restore();
250} 273}
251 274
252static void platform_resume_finish(suspend_state_t state) 275static void platform_resume_finish(suspend_state_t state)
253{ 276{
254 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) 277 if (state != PM_SUSPEND_TO_IDLE && suspend_ops->finish)
255 suspend_ops->finish(); 278 suspend_ops->finish();
256} 279}
257 280
258static int platform_suspend_begin(suspend_state_t state) 281static int platform_suspend_begin(suspend_state_t state)
259{ 282{
260 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) 283 if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->begin)
261 return freeze_ops->begin(); 284 return s2idle_ops->begin();
262 else if (suspend_ops && suspend_ops->begin) 285 else if (suspend_ops && suspend_ops->begin)
263 return suspend_ops->begin(state); 286 return suspend_ops->begin(state);
264 else 287 else
@@ -267,21 +290,21 @@ static int platform_suspend_begin(suspend_state_t state)
267 290
268static void platform_resume_end(suspend_state_t state) 291static void platform_resume_end(suspend_state_t state)
269{ 292{
270 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) 293 if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->end)
271 freeze_ops->end(); 294 s2idle_ops->end();
272 else if (suspend_ops && suspend_ops->end) 295 else if (suspend_ops && suspend_ops->end)
273 suspend_ops->end(); 296 suspend_ops->end();
274} 297}
275 298
276static void platform_recover(suspend_state_t state) 299static void platform_recover(suspend_state_t state)
277{ 300{
278 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) 301 if (state != PM_SUSPEND_TO_IDLE && suspend_ops->recover)
279 suspend_ops->recover(); 302 suspend_ops->recover();
280} 303}
281 304
282static bool platform_suspend_again(suspend_state_t state) 305static bool platform_suspend_again(suspend_state_t state)
283{ 306{
284 return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ? 307 return state != PM_SUSPEND_TO_IDLE && suspend_ops->suspend_again ?
285 suspend_ops->suspend_again() : false; 308 suspend_ops->suspend_again() : false;
286} 309}
287 310
@@ -370,16 +393,21 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
370 393
371 error = dpm_suspend_late(PMSG_SUSPEND); 394 error = dpm_suspend_late(PMSG_SUSPEND);
372 if (error) { 395 if (error) {
373 pr_err("PM: late suspend of devices failed\n"); 396 pr_err("late suspend of devices failed\n");
374 goto Platform_finish; 397 goto Platform_finish;
375 } 398 }
376 error = platform_suspend_prepare_late(state); 399 error = platform_suspend_prepare_late(state);
377 if (error) 400 if (error)
378 goto Devices_early_resume; 401 goto Devices_early_resume;
379 402
403 if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) {
404 s2idle_loop();
405 goto Platform_early_resume;
406 }
407
380 error = dpm_suspend_noirq(PMSG_SUSPEND); 408 error = dpm_suspend_noirq(PMSG_SUSPEND);
381 if (error) { 409 if (error) {
382 pr_err("PM: noirq suspend of devices failed\n"); 410 pr_err("noirq suspend of devices failed\n");
383 goto Platform_early_resume; 411 goto Platform_early_resume;
384 } 412 }
385 error = platform_suspend_prepare_noirq(state); 413 error = platform_suspend_prepare_noirq(state);
@@ -389,17 +417,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
389 if (suspend_test(TEST_PLATFORM)) 417 if (suspend_test(TEST_PLATFORM))
390 goto Platform_wake; 418 goto Platform_wake;
391 419
392 /*
393 * PM_SUSPEND_FREEZE equals
394 * frozen processes + suspended devices + idle processors.
395 * Thus we should invoke freeze_enter() soon after
396 * all the devices are suspended.
397 */
398 if (state == PM_SUSPEND_FREEZE) {
399 s2idle_loop();
400 goto Platform_early_resume;
401 }
402
403 error = disable_nonboot_cpus(); 420 error = disable_nonboot_cpus();
404 if (error || suspend_test(TEST_CPUS)) 421 if (error || suspend_test(TEST_CPUS))
405 goto Enable_cpus; 422 goto Enable_cpus;
@@ -456,6 +473,8 @@ int suspend_devices_and_enter(suspend_state_t state)
456 if (!sleep_state_supported(state)) 473 if (!sleep_state_supported(state))
457 return -ENOSYS; 474 return -ENOSYS;
458 475
476 pm_suspend_target_state = state;
477
459 error = platform_suspend_begin(state); 478 error = platform_suspend_begin(state);
460 if (error) 479 if (error)
461 goto Close; 480 goto Close;
@@ -464,7 +483,7 @@ int suspend_devices_and_enter(suspend_state_t state)
464 suspend_test_start(); 483 suspend_test_start();
465 error = dpm_suspend_start(PMSG_SUSPEND); 484 error = dpm_suspend_start(PMSG_SUSPEND);
466 if (error) { 485 if (error) {
467 pr_err("PM: Some devices failed to suspend, or early wake event detected\n"); 486 pr_err("Some devices failed to suspend, or early wake event detected\n");
468 goto Recover_platform; 487 goto Recover_platform;
469 } 488 }
470 suspend_test_finish("suspend devices"); 489 suspend_test_finish("suspend devices");
@@ -485,6 +504,7 @@ int suspend_devices_and_enter(suspend_state_t state)
485 504
486 Close: 505 Close:
487 platform_resume_end(state); 506 platform_resume_end(state);
507 pm_suspend_target_state = PM_SUSPEND_ON;
488 return error; 508 return error;
489 509
490 Recover_platform: 510 Recover_platform:
@@ -518,10 +538,10 @@ static int enter_state(suspend_state_t state)
518 int error; 538 int error;
519 539
520 trace_suspend_resume(TPS("suspend_enter"), state, true); 540 trace_suspend_resume(TPS("suspend_enter"), state, true);
521 if (state == PM_SUSPEND_FREEZE) { 541 if (state == PM_SUSPEND_TO_IDLE) {
522#ifdef CONFIG_PM_DEBUG 542#ifdef CONFIG_PM_DEBUG
523 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { 543 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
524 pr_warn("PM: Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n"); 544 pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
525 return -EAGAIN; 545 return -EAGAIN;
526 } 546 }
527#endif 547#endif
@@ -531,18 +551,18 @@ static int enter_state(suspend_state_t state)
531 if (!mutex_trylock(&pm_mutex)) 551 if (!mutex_trylock(&pm_mutex))
532 return -EBUSY; 552 return -EBUSY;
533 553
534 if (state == PM_SUSPEND_FREEZE) 554 if (state == PM_SUSPEND_TO_IDLE)
535 freeze_begin(); 555 s2idle_begin();
536 556
537#ifndef CONFIG_SUSPEND_SKIP_SYNC 557#ifndef CONFIG_SUSPEND_SKIP_SYNC
538 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 558 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
539 pr_info("PM: Syncing filesystems ... "); 559 pr_info("Syncing filesystems ... ");
540 sys_sync(); 560 sys_sync();
541 pr_cont("done.\n"); 561 pr_cont("done.\n");
542 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 562 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
543#endif 563#endif
544 564
545 pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); 565 pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
546 pm_suspend_clear_flags(); 566 pm_suspend_clear_flags();
547 error = suspend_prepare(state); 567 error = suspend_prepare(state);
548 if (error) 568 if (error)
@@ -552,13 +572,13 @@ static int enter_state(suspend_state_t state)
552 goto Finish; 572 goto Finish;
553 573
554 trace_suspend_resume(TPS("suspend_enter"), state, false); 574 trace_suspend_resume(TPS("suspend_enter"), state, false);
555 pr_debug("PM: Suspending system (%s)\n", pm_states[state]); 575 pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]);
556 pm_restrict_gfp_mask(); 576 pm_restrict_gfp_mask();
557 error = suspend_devices_and_enter(state); 577 error = suspend_devices_and_enter(state);
558 pm_restore_gfp_mask(); 578 pm_restore_gfp_mask();
559 579
560 Finish: 580 Finish:
561 pr_debug("PM: Finishing wakeup.\n"); 581 pm_pr_dbg("Finishing wakeup.\n");
562 suspend_finish(); 582 suspend_finish();
563 Unlock: 583 Unlock:
564 mutex_unlock(&pm_mutex); 584 mutex_unlock(&pm_mutex);
@@ -579,6 +599,7 @@ int pm_suspend(suspend_state_t state)
579 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) 599 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
580 return -EINVAL; 600 return -EINVAL;
581 601
602 pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
582 error = enter_state(state); 603 error = enter_state(state);
583 if (error) { 604 if (error) {
584 suspend_stats.fail++; 605 suspend_stats.fail++;
@@ -586,6 +607,7 @@ int pm_suspend(suspend_state_t state)
586 } else { 607 } else {
587 suspend_stats.success++; 608 suspend_stats.success++;
588 } 609 }
610 pr_info("suspend exit\n");
589 return error; 611 return error;
590} 612}
591EXPORT_SYMBOL(pm_suspend); 613EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 5db217051232..6a897e8b2a88 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -104,9 +104,9 @@ repeat:
104 printk(info_test, pm_states[state]); 104 printk(info_test, pm_states[state]);
105 status = pm_suspend(state); 105 status = pm_suspend(state);
106 if (status < 0) 106 if (status < 0)
107 state = PM_SUSPEND_FREEZE; 107 state = PM_SUSPEND_TO_IDLE;
108 } 108 }
109 if (state == PM_SUSPEND_FREEZE) { 109 if (state == PM_SUSPEND_TO_IDLE) {
110 printk(info_test, pm_states[state]); 110 printk(info_test, pm_states[state]);
111 status = pm_suspend(state); 111 status = pm_suspend(state);
112 } 112 }
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6c23e30c0e5c..257f4f0b4532 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -158,7 +158,7 @@ static void cpuidle_idle_call(void)
158 } 158 }
159 159
160 /* 160 /*
161 * Suspend-to-idle ("freeze") is a system state in which all user space 161 * Suspend-to-idle ("s2idle") is a system state in which all user space
162 * has been frozen, all I/O devices have been suspended and the only 162 * has been frozen, all I/O devices have been suspended and the only
163 * activity happens here and in iterrupts (if any). In that case bypass 163 * activity happens here and in iterrupts (if any). In that case bypass
164 * the cpuidle governor and go stratight for the deepest idle state 164 * the cpuidle governor and go stratight for the deepest idle state
@@ -167,9 +167,9 @@ static void cpuidle_idle_call(void)
167 * until a proper wakeup interrupt happens. 167 * until a proper wakeup interrupt happens.
168 */ 168 */
169 169
170 if (idle_should_freeze() || dev->use_deepest_state) { 170 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
171 if (idle_should_freeze()) { 171 if (idle_should_enter_s2idle()) {
172 entered_state = cpuidle_enter_freeze(drv, dev); 172 entered_state = cpuidle_enter_s2idle(drv, dev);
173 if (entered_state > 0) { 173 if (entered_state > 0) {
174 local_irq_enable(); 174 local_irq_enable();
175 goto exit_idle; 175 goto exit_idle;
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 38bc4d2208e8..0754cadfa9e6 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/suspend.h>
22#include <linux/time.h> 23#include <linux/time.h>
23 24
24#include "timekeeping_internal.h" 25#include "timekeeping_internal.h"
@@ -75,7 +76,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
75 int bin = min(fls(t->tv_sec), NUM_BINS-1); 76 int bin = min(fls(t->tv_sec), NUM_BINS-1);
76 77
77 sleep_time_bin[bin]++; 78 sleep_time_bin[bin]++;
78 printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n", 79 pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
79 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); 80 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
80} 81}
81 82