aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-12 19:41:26 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-12 19:41:26 -0500
commit1efef68262dc567f0c09da9d11924e8287cd3a8b (patch)
treeef4534f2683ea2e5bb6b1091d3b910f8d0181fbf
parent05d658b5b57214944067fb4f62bce59200bf496f (diff)
parent05087360fd7acf2cc9b7bbb243c12765c44c7693 (diff)
Merge branch 'pm-core'
* pm-core: ACPI / PM: Take SMART_SUSPEND driver flag into account PCI / PM: Take SMART_SUSPEND driver flag into account PCI / PM: Drop unnecessary invocations of pcibios_pm_ops callbacks PM / core: Add SMART_SUSPEND driver flag PCI / PM: Use the NEVER_SKIP driver flag PM / core: Add NEVER_SKIP and SMART_PREPARE driver flags PM / core: Convert timers to use timer_setup() PM / core: Fix kerneldoc comments of four functions PM / core: Drop legacy class suspend/resume operations
-rw-r--r--Documentation/driver-api/pm/devices.rst34
-rw-r--r--Documentation/power/pci.txt33
-rw-r--r--drivers/acpi/acpi_lpss.c13
-rw-r--r--drivers/acpi/device_pm.c124
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/power/main.c53
-rw-r--r--drivers/base/power/runtime.c7
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/pci/pci-driver.c124
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--include/linux/acpi.h10
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pm.h30
17 files changed, 370 insertions, 102 deletions
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index b5d7d4948e93..53c1b0b06da5 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -354,6 +354,20 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
354 is because all such devices are initially set to runtime-suspended with 354 is because all such devices are initially set to runtime-suspended with
355 runtime PM disabled. 355 runtime PM disabled.
356 356
357 This feature also can be controlled by device drivers by using the
358 ``DPM_FLAG_NEVER_SKIP`` and ``DPM_FLAG_SMART_PREPARE`` driver power
359 management flags. [Typically, they are set at the time the driver is
360 probed against the device in question by passing them to the
361 :c:func:`dev_pm_set_driver_flags` helper function.] If the first of
362 these flags is set, the PM core will not apply the direct-complete
363 procedure described above to the given device and, consequenty, to any
364 of its ancestors. The second flag, when set, informs the middle layer
365 code (bus types, device types, PM domains, classes) that it should take
366 the return value of the ``->prepare`` callback provided by the driver
367 into account and it may only return a positive value from its own
368 ``->prepare`` callback if the driver's one also has returned a positive
369 value.
370
357 2. The ``->suspend`` methods should quiesce the device to stop it from 371 2. The ``->suspend`` methods should quiesce the device to stop it from
358 performing I/O. They also may save the device registers and put it into 372 performing I/O. They also may save the device registers and put it into
359 the appropriate low-power state, depending on the bus type the device is 373 the appropriate low-power state, depending on the bus type the device is
@@ -752,6 +766,26 @@ the state of devices (possibly except for resuming them from runtime suspend)
752from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before* 766from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before*
753invoking device drivers' ``->suspend`` callbacks (or equivalent). 767invoking device drivers' ``->suspend`` callbacks (or equivalent).
754 768
769Some bus types and PM domains have a policy to resume all devices from runtime
770suspend upfront in their ``->suspend`` callbacks, but that may not be really
771necessary if the driver of the device can cope with runtime-suspended devices.
772The driver can indicate that by setting ``DPM_FLAG_SMART_SUSPEND`` in
773:c:member:`power.driver_flags` at the probe time, by passing it to the
774:c:func:`dev_pm_set_driver_flags` helper. That also may cause middle-layer code
775(bus types, PM domains etc.) to skip the ``->suspend_late`` and
776``->suspend_noirq`` callbacks provided by the driver if the device remains in
777runtime suspend at the beginning of the ``suspend_late`` phase of system-wide
778suspend (or in the ``poweroff_late`` phase of hibernation), when runtime PM
779has been disabled for it, under the assumption that its state should not change
780after that point until the system-wide transition is over. If that happens, the
781driver's system-wide resume callbacks, if present, may still be invoked during
782the subsequent system-wide resume transition and the device's runtime power
783management status may be set to "active" before enabling runtime PM for it,
784so the driver must be prepared to cope with the invocation of its system-wide
785resume callbacks back-to-back with its ``->runtime_suspend`` one (without the
786intervening ``->runtime_resume`` and so on) and the final state of the device
787must reflect the "active" status for runtime PM in that case.
788
755During system-wide resume from a sleep state it's easiest to put devices into 789During system-wide resume from a sleep state it's easiest to put devices into
756the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. 790the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
757Refer to that document for more information regarding this particular issue as 791Refer to that document for more information regarding this particular issue as
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index a1b7f7158930..304162ea377e 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -961,6 +961,39 @@ dev_pm_ops to indicate that one suspend routine is to be pointed to by the
961.suspend(), .freeze(), and .poweroff() members and one resume routine is to 961.suspend(), .freeze(), and .poweroff() members and one resume routine is to
962be pointed to by the .resume(), .thaw(), and .restore() members. 962be pointed to by the .resume(), .thaw(), and .restore() members.
963 963
9643.1.19. Driver Flags for Power Management
965
966The PM core allows device drivers to set flags that influence the handling of
967power management for the devices by the core itself and by middle layer code
968including the PCI bus type. The flags should be set once at the driver probe
969time with the help of the dev_pm_set_driver_flags() function and they should not
970be updated directly afterwards.
971
972The DPM_FLAG_NEVER_SKIP flag prevents the PM core from using the direct-complete
973mechanism allowing device suspend/resume callbacks to be skipped if the device
974is in runtime suspend when the system suspend starts. That also affects all of
975the ancestors of the device, so this flag should only be used if absolutely
976necessary.
977
978The DPM_FLAG_SMART_PREPARE flag instructs the PCI bus type to only return a
979positive value from pci_pm_prepare() if the ->prepare callback provided by the
980driver of the device returns a positive value. That allows the driver to opt
981out from using the direct-complete mechanism dynamically.
982
983The DPM_FLAG_SMART_SUSPEND flag tells the PCI bus type that from the driver's
984perspective the device can be safely left in runtime suspend during system
985suspend. That causes pci_pm_suspend(), pci_pm_freeze() and pci_pm_poweroff()
986to skip resuming the device from runtime suspend unless there are PCI-specific
987reasons for doing that. Also, it causes pci_pm_suspend_late/noirq(),
988pci_pm_freeze_late/noirq() and pci_pm_poweroff_late/noirq() to return early
989if the device remains in runtime suspend in the beginning of the "late" phase
990of the system-wide transition under way. Moreover, if the device is in
991runtime suspend in pci_pm_resume_noirq() or pci_pm_restore_noirq(), its runtime
992power management status will be changed to "active" (as it is going to be put
993into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(),
994the function will set the power.direct_complete flag for it (to make the PM core
995skip the subsequent "thaw" callbacks for it) and return.
996
9643.2. Device Runtime Power Management 9973.2. Device Runtime Power Management
965------------------------------------ 998------------------------------------
966In addition to providing device power management callbacks PCI device drivers 999In addition to providing device power management callbacks PCI device drivers
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 04d32bdb5a95..de7385b824e1 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -849,8 +849,12 @@ static int acpi_lpss_resume(struct device *dev)
849#ifdef CONFIG_PM_SLEEP 849#ifdef CONFIG_PM_SLEEP
850static int acpi_lpss_suspend_late(struct device *dev) 850static int acpi_lpss_suspend_late(struct device *dev)
851{ 851{
852 int ret = pm_generic_suspend_late(dev); 852 int ret;
853
854 if (dev_pm_smart_suspend_and_suspended(dev))
855 return 0;
853 856
857 ret = pm_generic_suspend_late(dev);
854 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); 858 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
855} 859}
856 860
@@ -889,10 +893,17 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
889 .complete = acpi_subsys_complete, 893 .complete = acpi_subsys_complete,
890 .suspend = acpi_subsys_suspend, 894 .suspend = acpi_subsys_suspend,
891 .suspend_late = acpi_lpss_suspend_late, 895 .suspend_late = acpi_lpss_suspend_late,
896 .suspend_noirq = acpi_subsys_suspend_noirq,
897 .resume_noirq = acpi_subsys_resume_noirq,
892 .resume_early = acpi_lpss_resume_early, 898 .resume_early = acpi_lpss_resume_early,
893 .freeze = acpi_subsys_freeze, 899 .freeze = acpi_subsys_freeze,
900 .freeze_late = acpi_subsys_freeze_late,
901 .freeze_noirq = acpi_subsys_freeze_noirq,
902 .thaw_noirq = acpi_subsys_thaw_noirq,
894 .poweroff = acpi_subsys_suspend, 903 .poweroff = acpi_subsys_suspend,
895 .poweroff_late = acpi_lpss_suspend_late, 904 .poweroff_late = acpi_lpss_suspend_late,
905 .poweroff_noirq = acpi_subsys_suspend_noirq,
906 .restore_noirq = acpi_subsys_resume_noirq,
896 .restore_early = acpi_lpss_resume_early, 907 .restore_early = acpi_lpss_resume_early,
897#endif 908#endif
898 .runtime_suspend = acpi_lpss_runtime_suspend, 909 .runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 69ffd1dc1de7..e4ffaeec9ec2 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -939,7 +939,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
939 u32 sys_target = acpi_target_system_state(); 939 u32 sys_target = acpi_target_system_state();
940 int ret, state; 940 int ret, state;
941 941
942 if (device_may_wakeup(dev) != !!adev->wakeup.prepare_count) 942 if (!pm_runtime_suspended(dev) || !adev ||
943 device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
943 return true; 944 return true;
944 945
945 if (sys_target == ACPI_STATE_S0) 946 if (sys_target == ACPI_STATE_S0)
@@ -962,14 +963,16 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
962int acpi_subsys_prepare(struct device *dev) 963int acpi_subsys_prepare(struct device *dev)
963{ 964{
964 struct acpi_device *adev = ACPI_COMPANION(dev); 965 struct acpi_device *adev = ACPI_COMPANION(dev);
965 int ret;
966 966
967 ret = pm_generic_prepare(dev); 967 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
968 if (ret < 0) 968 int ret = dev->driver->pm->prepare(dev);
969 return ret;
970 969
971 if (!adev || !pm_runtime_suspended(dev)) 970 if (ret < 0)
972 return 0; 971 return ret;
972
973 if (!ret && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
974 return 0;
975 }
973 976
974 return !acpi_dev_needs_resume(dev, adev); 977 return !acpi_dev_needs_resume(dev, adev);
975} 978}
@@ -996,12 +999,17 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete);
996 * acpi_subsys_suspend - Run the device driver's suspend callback. 999 * acpi_subsys_suspend - Run the device driver's suspend callback.
997 * @dev: Device to handle. 1000 * @dev: Device to handle.
998 * 1001 *
999 * Follow PCI and resume devices suspended at run time before running their 1002 * Follow PCI and resume devices from runtime suspend before running their
1000 * system suspend callbacks. 1003 * system suspend callbacks, unless the driver can cope with runtime-suspended
1004 * devices during system suspend and there are no ACPI-specific reasons for
1005 * resuming them.
1001 */ 1006 */
1002int acpi_subsys_suspend(struct device *dev) 1007int acpi_subsys_suspend(struct device *dev)
1003{ 1008{
1004 pm_runtime_resume(dev); 1009 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
1010 acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
1011 pm_runtime_resume(dev);
1012
1005 return pm_generic_suspend(dev); 1013 return pm_generic_suspend(dev);
1006} 1014}
1007EXPORT_SYMBOL_GPL(acpi_subsys_suspend); 1015EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
@@ -1015,12 +1023,48 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
1015 */ 1023 */
1016int acpi_subsys_suspend_late(struct device *dev) 1024int acpi_subsys_suspend_late(struct device *dev)
1017{ 1025{
1018 int ret = pm_generic_suspend_late(dev); 1026 int ret;
1027
1028 if (dev_pm_smart_suspend_and_suspended(dev))
1029 return 0;
1030
1031 ret = pm_generic_suspend_late(dev);
1019 return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev)); 1032 return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
1020} 1033}
1021EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); 1034EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
1022 1035
1023/** 1036/**
1037 * acpi_subsys_suspend_noirq - Run the device driver's "noirq" suspend callback.
1038 * @dev: Device to suspend.
1039 */
1040int acpi_subsys_suspend_noirq(struct device *dev)
1041{
1042 if (dev_pm_smart_suspend_and_suspended(dev))
1043 return 0;
1044
1045 return pm_generic_suspend_noirq(dev);
1046}
1047EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
1048
1049/**
1050 * acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
1051 * @dev: Device to handle.
1052 */
1053int acpi_subsys_resume_noirq(struct device *dev)
1054{
1055 /*
1056 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
1057 * during system suspend, so update their runtime PM status to "active"
1058 * as they will be put into D0 going forward.
1059 */
1060 if (dev_pm_smart_suspend_and_suspended(dev))
1061 pm_runtime_set_active(dev);
1062
1063 return pm_generic_resume_noirq(dev);
1064}
1065EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
1066
1067/**
1024 * acpi_subsys_resume_early - Resume device using ACPI. 1068 * acpi_subsys_resume_early - Resume device using ACPI.
1025 * @dev: Device to Resume. 1069 * @dev: Device to Resume.
1026 * 1070 *
@@ -1047,11 +1091,60 @@ int acpi_subsys_freeze(struct device *dev)
1047 * runtime-suspended devices should not be touched during freeze/thaw 1091 * runtime-suspended devices should not be touched during freeze/thaw
1048 * transitions. 1092 * transitions.
1049 */ 1093 */
1050 pm_runtime_resume(dev); 1094 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
1095 pm_runtime_resume(dev);
1096
1051 return pm_generic_freeze(dev); 1097 return pm_generic_freeze(dev);
1052} 1098}
1053EXPORT_SYMBOL_GPL(acpi_subsys_freeze); 1099EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
1054 1100
1101/**
1102 * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
1103 * @dev: Device to handle.
1104 */
1105int acpi_subsys_freeze_late(struct device *dev)
1106{
1107
1108 if (dev_pm_smart_suspend_and_suspended(dev))
1109 return 0;
1110
1111 return pm_generic_freeze_late(dev);
1112}
1113EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
1114
1115/**
1116 * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
1117 * @dev: Device to handle.
1118 */
1119int acpi_subsys_freeze_noirq(struct device *dev)
1120{
1121
1122 if (dev_pm_smart_suspend_and_suspended(dev))
1123 return 0;
1124
1125 return pm_generic_freeze_noirq(dev);
1126}
1127EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
1128
1129/**
1130 * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
1131 * @dev: Device to handle.
1132 */
1133int acpi_subsys_thaw_noirq(struct device *dev)
1134{
1135 /*
1136 * If the device is in runtime suspend, the "thaw" code may not work
1137 * correctly with it, so skip the driver callback and make the PM core
1138 * skip all of the subsequent "thaw" callbacks for the device.
1139 */
1140 if (dev_pm_smart_suspend_and_suspended(dev)) {
1141 dev->power.direct_complete = true;
1142 return 0;
1143 }
1144
1145 return pm_generic_thaw_noirq(dev);
1146}
1147EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
1055#endif /* CONFIG_PM_SLEEP */ 1148#endif /* CONFIG_PM_SLEEP */
1056 1149
1057static struct dev_pm_domain acpi_general_pm_domain = { 1150static struct dev_pm_domain acpi_general_pm_domain = {
@@ -1063,10 +1156,17 @@ static struct dev_pm_domain acpi_general_pm_domain = {
1063 .complete = acpi_subsys_complete, 1156 .complete = acpi_subsys_complete,
1064 .suspend = acpi_subsys_suspend, 1157 .suspend = acpi_subsys_suspend,
1065 .suspend_late = acpi_subsys_suspend_late, 1158 .suspend_late = acpi_subsys_suspend_late,
1159 .suspend_noirq = acpi_subsys_suspend_noirq,
1160 .resume_noirq = acpi_subsys_resume_noirq,
1066 .resume_early = acpi_subsys_resume_early, 1161 .resume_early = acpi_subsys_resume_early,
1067 .freeze = acpi_subsys_freeze, 1162 .freeze = acpi_subsys_freeze,
1163 .freeze_late = acpi_subsys_freeze_late,
1164 .freeze_noirq = acpi_subsys_freeze_noirq,
1165 .thaw_noirq = acpi_subsys_thaw_noirq,
1068 .poweroff = acpi_subsys_suspend, 1166 .poweroff = acpi_subsys_suspend,
1069 .poweroff_late = acpi_subsys_suspend_late, 1167 .poweroff_late = acpi_subsys_suspend_late,
1168 .poweroff_noirq = acpi_subsys_suspend_noirq,
1169 .restore_noirq = acpi_subsys_resume_noirq,
1070 .restore_early = acpi_subsys_resume_early, 1170 .restore_early = acpi_subsys_resume_early,
1071#endif 1171#endif
1072 }, 1172 },
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ad44b40fe284..45575e134696 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -464,6 +464,7 @@ pinctrl_bind_failed:
464 if (dev->pm_domain && dev->pm_domain->dismiss) 464 if (dev->pm_domain && dev->pm_domain->dismiss)
465 dev->pm_domain->dismiss(dev); 465 dev->pm_domain->dismiss(dev);
466 pm_runtime_reinit(dev); 466 pm_runtime_reinit(dev);
467 dev_pm_set_driver_flags(dev, 0);
467 468
468 switch (ret) { 469 switch (ret) {
469 case -EPROBE_DEFER: 470 case -EPROBE_DEFER:
@@ -869,6 +870,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
869 if (dev->pm_domain && dev->pm_domain->dismiss) 870 if (dev->pm_domain && dev->pm_domain->dismiss)
870 dev->pm_domain->dismiss(dev); 871 dev->pm_domain->dismiss(dev);
871 pm_runtime_reinit(dev); 872 pm_runtime_reinit(dev);
873 dev_pm_set_driver_flags(dev, 0);
872 874
873 klist_remove(&dev->p->knode_driver); 875 klist_remove(&dev->p->knode_driver);
874 device_pm_check_callbacks(dev); 876 device_pm_check_callbacks(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 770b1539a083..6c6f1c74c24c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -528,7 +528,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
528/*------------------------- Resume routines -------------------------*/ 528/*------------------------- Resume routines -------------------------*/
529 529
530/** 530/**
531 * device_resume_noirq - Execute an "early resume" callback for given device. 531 * device_resume_noirq - Execute a "noirq resume" callback for given device.
532 * @dev: Device to handle. 532 * @dev: Device to handle.
533 * @state: PM transition of the system being carried out. 533 * @state: PM transition of the system being carried out.
534 * @async: If true, the device is being resumed asynchronously. 534 * @async: If true, the device is being resumed asynchronously.
@@ -848,16 +848,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
848 goto Driver; 848 goto Driver;
849 } 849 }
850 850
851 if (dev->class) { 851 if (dev->class && dev->class->pm) {
852 if (dev->class->pm) { 852 info = "class ";
853 info = "class "; 853 callback = pm_op(dev->class->pm, state);
854 callback = pm_op(dev->class->pm, state); 854 goto Driver;
855 goto Driver;
856 } else if (dev->class->resume) {
857 info = "legacy class ";
858 callback = dev->class->resume;
859 goto End;
860 }
861 } 855 }
862 856
863 if (dev->bus) { 857 if (dev->bus) {
@@ -1083,7 +1077,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
1083} 1077}
1084 1078
1085/** 1079/**
1086 * device_suspend_noirq - Execute a "late suspend" callback for given device. 1080 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1087 * @dev: Device to handle. 1081 * @dev: Device to handle.
1088 * @state: PM transition of the system being carried out. 1082 * @state: PM transition of the system being carried out.
1089 * @async: If true, the device is being suspended asynchronously. 1083 * @async: If true, the device is being suspended asynchronously.
@@ -1243,7 +1237,7 @@ int dpm_suspend_noirq(pm_message_t state)
1243} 1237}
1244 1238
1245/** 1239/**
1246 * device_suspend_late - Execute a "late suspend" callback for given device. 1240 * __device_suspend_late - Execute a "late suspend" callback for given device.
1247 * @dev: Device to handle. 1241 * @dev: Device to handle.
1248 * @state: PM transition of the system being carried out. 1242 * @state: PM transition of the system being carried out.
1249 * @async: If true, the device is being suspended asynchronously. 1243 * @async: If true, the device is being suspended asynchronously.
@@ -1445,7 +1439,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev)
1445} 1439}
1446 1440
1447/** 1441/**
1448 * device_suspend - Execute "suspend" callbacks for given device. 1442 * __device_suspend - Execute "suspend" callbacks for given device.
1449 * @dev: Device to handle. 1443 * @dev: Device to handle.
1450 * @state: PM transition of the system being carried out. 1444 * @state: PM transition of the system being carried out.
1451 * @async: If true, the device is being suspended asynchronously. 1445 * @async: If true, the device is being suspended asynchronously.
@@ -1508,17 +1502,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1508 goto Run; 1502 goto Run;
1509 } 1503 }
1510 1504
1511 if (dev->class) { 1505 if (dev->class && dev->class->pm) {
1512 if (dev->class->pm) { 1506 info = "class ";
1513 info = "class "; 1507 callback = pm_op(dev->class->pm, state);
1514 callback = pm_op(dev->class->pm, state); 1508 goto Run;
1515 goto Run;
1516 } else if (dev->class->suspend) {
1517 pm_dev_dbg(dev, state, "legacy class ");
1518 error = legacy_suspend(dev, state, dev->class->suspend,
1519 "legacy class ");
1520 goto End;
1521 }
1522 } 1509 }
1523 1510
1524 if (dev->bus) { 1511 if (dev->bus) {
@@ -1665,6 +1652,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
1665 if (dev->power.syscore) 1652 if (dev->power.syscore)
1666 return 0; 1653 return 0;
1667 1654
1655 WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1656 !pm_runtime_enabled(dev));
1657
1668 /* 1658 /*
1669 * If a device's parent goes into runtime suspend at the wrong time, 1659 * If a device's parent goes into runtime suspend at the wrong time,
1670 * it won't be possible to resume the device. To prevent this we 1660 * it won't be possible to resume the device. To prevent this we
@@ -1713,7 +1703,9 @@ unlock:
1713 * applies to suspend transitions, however. 1703 * applies to suspend transitions, however.
1714 */ 1704 */
1715 spin_lock_irq(&dev->power.lock); 1705 spin_lock_irq(&dev->power.lock);
1716 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1706 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1707 pm_runtime_suspended(dev) && ret > 0 &&
1708 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1717 spin_unlock_irq(&dev->power.lock); 1709 spin_unlock_irq(&dev->power.lock);
1718 return 0; 1710 return 0;
1719} 1711}
@@ -1862,11 +1854,16 @@ void device_pm_check_callbacks(struct device *dev)
1862 dev->power.no_pm_callbacks = 1854 dev->power.no_pm_callbacks =
1863 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && 1855 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1864 !dev->bus->suspend && !dev->bus->resume)) && 1856 !dev->bus->suspend && !dev->bus->resume)) &&
1865 (!dev->class || (pm_ops_is_empty(dev->class->pm) && 1857 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1866 !dev->class->suspend && !dev->class->resume)) &&
1867 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1858 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1868 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1859 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1869 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && 1860 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1870 !dev->driver->suspend && !dev->driver->resume)); 1861 !dev->driver->suspend && !dev->driver->resume));
1871 spin_unlock_irq(&dev->power.lock); 1862 spin_unlock_irq(&dev->power.lock);
1872} 1863}
1864
1865bool dev_pm_smart_suspend_and_suspended(struct device *dev)
1866{
1867 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1868 pm_runtime_status_suspended(dev);
1869}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 13e015905543..e57b5d2f81cf 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -894,9 +894,9 @@ static void pm_runtime_work(struct work_struct *work)
894 * 894 *
895 * Check if the time is right and queue a suspend request. 895 * Check if the time is right and queue a suspend request.
896 */ 896 */
897static void pm_suspend_timer_fn(unsigned long data) 897static void pm_suspend_timer_fn(struct timer_list *t)
898{ 898{
899 struct device *dev = (struct device *)data; 899 struct device *dev = from_timer(dev, t, power.suspend_timer);
900 unsigned long flags; 900 unsigned long flags;
901 unsigned long expires; 901 unsigned long expires;
902 902
@@ -1499,8 +1499,7 @@ void pm_runtime_init(struct device *dev)
1499 INIT_WORK(&dev->power.work, pm_runtime_work); 1499 INIT_WORK(&dev->power.work, pm_runtime_work);
1500 1500
1501 dev->power.timer_expires = 0; 1501 dev->power.timer_expires = 0;
1502 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1502 timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1503 (unsigned long)dev);
1504 1503
1505 init_waitqueue_head(&dev->power.wait_queue); 1504 init_waitqueue_head(&dev->power.wait_queue);
1506} 1505}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cdd6f256da59..680ee1d36ac9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -54,7 +54,7 @@ static unsigned int saved_count;
54 54
55static DEFINE_SPINLOCK(events_lock); 55static DEFINE_SPINLOCK(events_lock);
56 56
57static void pm_wakeup_timer_fn(unsigned long data); 57static void pm_wakeup_timer_fn(struct timer_list *t);
58 58
59static LIST_HEAD(wakeup_sources); 59static LIST_HEAD(wakeup_sources);
60 60
@@ -176,7 +176,7 @@ void wakeup_source_add(struct wakeup_source *ws)
176 return; 176 return;
177 177
178 spin_lock_init(&ws->lock); 178 spin_lock_init(&ws->lock);
179 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 179 timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
180 ws->active = false; 180 ws->active = false;
181 ws->last_time = ktime_get(); 181 ws->last_time = ktime_get();
182 182
@@ -481,8 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
481 * Use timer struct to check if the given source is initialized 481 * Use timer struct to check if the given source is initialized
482 * by wakeup_source_add. 482 * by wakeup_source_add.
483 */ 483 */
484 return ws->timer.function != pm_wakeup_timer_fn || 484 return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
485 ws->timer.data != (unsigned long)ws;
486} 485}
487 486
488/* 487/*
@@ -724,9 +723,9 @@ EXPORT_SYMBOL_GPL(pm_relax);
724 * in @data if it is currently active and its timer has not been canceled and 723 * in @data if it is currently active and its timer has not been canceled and
725 * the expiration time of the timer is not in future. 724 * the expiration time of the timer is not in future.
726 */ 725 */
727static void pm_wakeup_timer_fn(unsigned long data) 726static void pm_wakeup_timer_fn(struct timer_list *t)
728{ 727{
729 struct wakeup_source *ws = (struct wakeup_source *)data; 728 struct wakeup_source *ws = from_timer(ws, t, timer);
730 unsigned long flags; 729 unsigned long flags;
731 730
732 spin_lock_irqsave(&ws->lock, flags); 731 spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9f45cfeae775..f124de3a0668 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1304,7 +1304,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1304 * becaue the HDA driver may require us to enable the audio power 1304 * becaue the HDA driver may require us to enable the audio power
1305 * domain during system suspend. 1305 * domain during system suspend.
1306 */ 1306 */
1307 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 1307 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
1308 1308
1309 ret = i915_driver_init_early(dev_priv, ent); 1309 ret = i915_driver_init_early(dev_priv, ent);
1310 if (ret < 0) 1310 if (ret < 0)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 78b3172c8e6e..f4f17552c9b8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -225,7 +225,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
225 * MEI requires to resume from runtime suspend mode 225 * MEI requires to resume from runtime suspend mode
226 * in order to perform link reset flow upon system suspend. 226 * in order to perform link reset flow upon system suspend.
227 */ 227 */
228 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 228 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
229 229
230 /* 230 /*
231 * ME maps runtime suspend/resume to D0i states, 231 * ME maps runtime suspend/resume to D0i states,
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 0566f9bfa7de..e1b909123fb0 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -141,7 +141,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
141 * MEI requires to resume from runtime suspend mode 141 * MEI requires to resume from runtime suspend mode
142 * in order to perform link reset flow upon system suspend. 142 * in order to perform link reset flow upon system suspend.
143 */ 143 */
144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 144 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
145 145
146 /* 146 /*
147 * TXE maps runtime suspend/resume to own power gating states, 147 * TXE maps runtime suspend/resume to own power gating states,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9be563067c0c..07b8a9b385ab 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -682,8 +682,11 @@ static int pci_pm_prepare(struct device *dev)
682 682
683 if (drv && drv->pm && drv->pm->prepare) { 683 if (drv && drv->pm && drv->pm->prepare) {
684 int error = drv->pm->prepare(dev); 684 int error = drv->pm->prepare(dev);
685 if (error) 685 if (error < 0)
686 return error; 686 return error;
687
688 if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
689 return 0;
687 } 690 }
688 return pci_dev_keep_suspended(to_pci_dev(dev)); 691 return pci_dev_keep_suspended(to_pci_dev(dev));
689} 692}
@@ -724,18 +727,25 @@ static int pci_pm_suspend(struct device *dev)
724 727
725 if (!pm) { 728 if (!pm) {
726 pci_pm_default_suspend(pci_dev); 729 pci_pm_default_suspend(pci_dev);
727 goto Fixup; 730 return 0;
728 } 731 }
729 732
730 /* 733 /*
731 * PCI devices suspended at run time need to be resumed at this point, 734 * PCI devices suspended at run time may need to be resumed at this
732 * because in general it is necessary to reconfigure them for system 735 * point, because in general it may be necessary to reconfigure them for
733 * suspend. Namely, if the device is supposed to wake up the system 736 * system suspend. Namely, if the device is expected to wake up the
734 * from the sleep state, we may need to reconfigure it for this purpose. 737 * system from the sleep state, it may have to be reconfigured for this
735 * In turn, if the device is not supposed to wake up the system from the 738 * purpose, or if the device is not expected to wake up the system from
736 * sleep state, we'll have to prevent it from signaling wake-up. 739 * the sleep state, it should be prevented from signaling wakeup events
740 * going forward.
741 *
742 * Also if the driver of the device does not indicate that its system
743 * suspend callbacks can cope with runtime-suspended devices, it is
744 * better to resume the device from runtime suspend here.
737 */ 745 */
738 pm_runtime_resume(dev); 746 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
747 !pci_dev_keep_suspended(pci_dev))
748 pm_runtime_resume(dev);
739 749
740 pci_dev->state_saved = false; 750 pci_dev->state_saved = false;
741 if (pm->suspend) { 751 if (pm->suspend) {
@@ -755,17 +765,27 @@ static int pci_pm_suspend(struct device *dev)
755 } 765 }
756 } 766 }
757 767
758 Fixup:
759 pci_fixup_device(pci_fixup_suspend, pci_dev);
760
761 return 0; 768 return 0;
762} 769}
763 770
771static int pci_pm_suspend_late(struct device *dev)
772{
773 if (dev_pm_smart_suspend_and_suspended(dev))
774 return 0;
775
776 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
777
778 return pm_generic_suspend_late(dev);
779}
780
764static int pci_pm_suspend_noirq(struct device *dev) 781static int pci_pm_suspend_noirq(struct device *dev)
765{ 782{
766 struct pci_dev *pci_dev = to_pci_dev(dev); 783 struct pci_dev *pci_dev = to_pci_dev(dev);
767 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 784 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
768 785
786 if (dev_pm_smart_suspend_and_suspended(dev))
787 return 0;
788
769 if (pci_has_legacy_pm_support(pci_dev)) 789 if (pci_has_legacy_pm_support(pci_dev))
770 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 790 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
771 791
@@ -827,6 +847,14 @@ static int pci_pm_resume_noirq(struct device *dev)
827 struct device_driver *drv = dev->driver; 847 struct device_driver *drv = dev->driver;
828 int error = 0; 848 int error = 0;
829 849
850 /*
851 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
852 * during system suspend, so update their runtime PM status to "active"
853 * as they are going to be put into D0 shortly.
854 */
855 if (dev_pm_smart_suspend_and_suspended(dev))
856 pm_runtime_set_active(dev);
857
830 pci_pm_default_resume_early(pci_dev); 858 pci_pm_default_resume_early(pci_dev);
831 859
832 if (pci_has_legacy_pm_support(pci_dev)) 860 if (pci_has_legacy_pm_support(pci_dev))
@@ -869,6 +897,7 @@ static int pci_pm_resume(struct device *dev)
869#else /* !CONFIG_SUSPEND */ 897#else /* !CONFIG_SUSPEND */
870 898
871#define pci_pm_suspend NULL 899#define pci_pm_suspend NULL
900#define pci_pm_suspend_late NULL
872#define pci_pm_suspend_noirq NULL 901#define pci_pm_suspend_noirq NULL
873#define pci_pm_resume NULL 902#define pci_pm_resume NULL
874#define pci_pm_resume_noirq NULL 903#define pci_pm_resume_noirq NULL
@@ -903,7 +932,8 @@ static int pci_pm_freeze(struct device *dev)
903 * devices should not be touched during freeze/thaw transitions, 932 * devices should not be touched during freeze/thaw transitions,
904 * however. 933 * however.
905 */ 934 */
906 pm_runtime_resume(dev); 935 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
936 pm_runtime_resume(dev);
907 937
908 pci_dev->state_saved = false; 938 pci_dev->state_saved = false;
909 if (pm->freeze) { 939 if (pm->freeze) {
@@ -915,17 +945,25 @@ static int pci_pm_freeze(struct device *dev)
915 return error; 945 return error;
916 } 946 }
917 947
918 if (pcibios_pm_ops.freeze)
919 return pcibios_pm_ops.freeze(dev);
920
921 return 0; 948 return 0;
922} 949}
923 950
951static int pci_pm_freeze_late(struct device *dev)
952{
953 if (dev_pm_smart_suspend_and_suspended(dev))
954 return 0;
955
956 return pm_generic_freeze_late(dev);;
957}
958
924static int pci_pm_freeze_noirq(struct device *dev) 959static int pci_pm_freeze_noirq(struct device *dev)
925{ 960{
926 struct pci_dev *pci_dev = to_pci_dev(dev); 961 struct pci_dev *pci_dev = to_pci_dev(dev);
927 struct device_driver *drv = dev->driver; 962 struct device_driver *drv = dev->driver;
928 963
964 if (dev_pm_smart_suspend_and_suspended(dev))
965 return 0;
966
929 if (pci_has_legacy_pm_support(pci_dev)) 967 if (pci_has_legacy_pm_support(pci_dev))
930 return pci_legacy_suspend_late(dev, PMSG_FREEZE); 968 return pci_legacy_suspend_late(dev, PMSG_FREEZE);
931 969
@@ -955,6 +993,16 @@ static int pci_pm_thaw_noirq(struct device *dev)
955 struct device_driver *drv = dev->driver; 993 struct device_driver *drv = dev->driver;
956 int error = 0; 994 int error = 0;
957 995
996 /*
997 * If the device is in runtime suspend, the code below may not work
998 * correctly with it, so skip that code and make the PM core skip all of
999 * the subsequent "thaw" callbacks for the device.
1000 */
1001 if (dev_pm_smart_suspend_and_suspended(dev)) {
1002 dev->power.direct_complete = true;
1003 return 0;
1004 }
1005
958 if (pcibios_pm_ops.thaw_noirq) { 1006 if (pcibios_pm_ops.thaw_noirq) {
959 error = pcibios_pm_ops.thaw_noirq(dev); 1007 error = pcibios_pm_ops.thaw_noirq(dev);
960 if (error) 1008 if (error)
@@ -979,12 +1027,6 @@ static int pci_pm_thaw(struct device *dev)
979 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1027 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
980 int error = 0; 1028 int error = 0;
981 1029
982 if (pcibios_pm_ops.thaw) {
983 error = pcibios_pm_ops.thaw(dev);
984 if (error)
985 return error;
986 }
987
988 if (pci_has_legacy_pm_support(pci_dev)) 1030 if (pci_has_legacy_pm_support(pci_dev))
989 return pci_legacy_resume(dev); 1031 return pci_legacy_resume(dev);
990 1032
@@ -1010,11 +1052,13 @@ static int pci_pm_poweroff(struct device *dev)
1010 1052
1011 if (!pm) { 1053 if (!pm) {
1012 pci_pm_default_suspend(pci_dev); 1054 pci_pm_default_suspend(pci_dev);
1013 goto Fixup; 1055 return 0;
1014 } 1056 }
1015 1057
1016 /* The reason to do that is the same as in pci_pm_suspend(). */ 1058 /* The reason to do that is the same as in pci_pm_suspend(). */
1017 pm_runtime_resume(dev); 1059 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
1060 !pci_dev_keep_suspended(pci_dev))
1061 pm_runtime_resume(dev);
1018 1062
1019 pci_dev->state_saved = false; 1063 pci_dev->state_saved = false;
1020 if (pm->poweroff) { 1064 if (pm->poweroff) {
@@ -1026,13 +1070,17 @@ static int pci_pm_poweroff(struct device *dev)
1026 return error; 1070 return error;
1027 } 1071 }
1028 1072
1029 Fixup: 1073 return 0;
1030 pci_fixup_device(pci_fixup_suspend, pci_dev); 1074}
1031 1075
1032 if (pcibios_pm_ops.poweroff) 1076static int pci_pm_poweroff_late(struct device *dev)
1033 return pcibios_pm_ops.poweroff(dev); 1077{
1078 if (dev_pm_smart_suspend_and_suspended(dev))
1079 return 0;
1034 1080
1035 return 0; 1081 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
1082
1083 return pm_generic_poweroff_late(dev);
1036} 1084}
1037 1085
1038static int pci_pm_poweroff_noirq(struct device *dev) 1086static int pci_pm_poweroff_noirq(struct device *dev)
@@ -1040,6 +1088,9 @@ static int pci_pm_poweroff_noirq(struct device *dev)
1040 struct pci_dev *pci_dev = to_pci_dev(dev); 1088 struct pci_dev *pci_dev = to_pci_dev(dev);
1041 struct device_driver *drv = dev->driver; 1089 struct device_driver *drv = dev->driver;
1042 1090
1091 if (dev_pm_smart_suspend_and_suspended(dev))
1092 return 0;
1093
1043 if (pci_has_legacy_pm_support(to_pci_dev(dev))) 1094 if (pci_has_legacy_pm_support(to_pci_dev(dev)))
1044 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 1095 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
1045 1096
@@ -1081,6 +1132,10 @@ static int pci_pm_restore_noirq(struct device *dev)
1081 struct device_driver *drv = dev->driver; 1132 struct device_driver *drv = dev->driver;
1082 int error = 0; 1133 int error = 0;
1083 1134
1135 /* This is analogous to the pci_pm_resume_noirq() case. */
1136 if (dev_pm_smart_suspend_and_suspended(dev))
1137 pm_runtime_set_active(dev);
1138
1084 if (pcibios_pm_ops.restore_noirq) { 1139 if (pcibios_pm_ops.restore_noirq) {
1085 error = pcibios_pm_ops.restore_noirq(dev); 1140 error = pcibios_pm_ops.restore_noirq(dev);
1086 if (error) 1141 if (error)
@@ -1104,12 +1159,6 @@ static int pci_pm_restore(struct device *dev)
1104 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1159 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
1105 int error = 0; 1160 int error = 0;
1106 1161
1107 if (pcibios_pm_ops.restore) {
1108 error = pcibios_pm_ops.restore(dev);
1109 if (error)
1110 return error;
1111 }
1112
1113 /* 1162 /*
1114 * This is necessary for the hibernation error path in which restore is 1163 * This is necessary for the hibernation error path in which restore is
1115 * called without restoring the standard config registers of the device. 1164 * called without restoring the standard config registers of the device.
@@ -1135,10 +1184,12 @@ static int pci_pm_restore(struct device *dev)
1135#else /* !CONFIG_HIBERNATE_CALLBACKS */ 1184#else /* !CONFIG_HIBERNATE_CALLBACKS */
1136 1185
1137#define pci_pm_freeze NULL 1186#define pci_pm_freeze NULL
1187#define pci_pm_freeze_late NULL
1138#define pci_pm_freeze_noirq NULL 1188#define pci_pm_freeze_noirq NULL
1139#define pci_pm_thaw NULL 1189#define pci_pm_thaw NULL
1140#define pci_pm_thaw_noirq NULL 1190#define pci_pm_thaw_noirq NULL
1141#define pci_pm_poweroff NULL 1191#define pci_pm_poweroff NULL
1192#define pci_pm_poweroff_late NULL
1142#define pci_pm_poweroff_noirq NULL 1193#define pci_pm_poweroff_noirq NULL
1143#define pci_pm_restore NULL 1194#define pci_pm_restore NULL
1144#define pci_pm_restore_noirq NULL 1195#define pci_pm_restore_noirq NULL
@@ -1254,10 +1305,13 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
1254 .prepare = pci_pm_prepare, 1305 .prepare = pci_pm_prepare,
1255 .complete = pci_pm_complete, 1306 .complete = pci_pm_complete,
1256 .suspend = pci_pm_suspend, 1307 .suspend = pci_pm_suspend,
1308 .suspend_late = pci_pm_suspend_late,
1257 .resume = pci_pm_resume, 1309 .resume = pci_pm_resume,
1258 .freeze = pci_pm_freeze, 1310 .freeze = pci_pm_freeze,
1311 .freeze_late = pci_pm_freeze_late,
1259 .thaw = pci_pm_thaw, 1312 .thaw = pci_pm_thaw,
1260 .poweroff = pci_pm_poweroff, 1313 .poweroff = pci_pm_poweroff,
1314 .poweroff_late = pci_pm_poweroff_late,
1261 .restore = pci_pm_restore, 1315 .restore = pci_pm_restore,
1262 .suspend_noirq = pci_pm_suspend_noirq, 1316 .suspend_noirq = pci_pm_suspend_noirq,
1263 .resume_noirq = pci_pm_resume_noirq, 1317 .resume_noirq = pci_pm_resume_noirq,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6078dfc11b11..374f5686e2bc 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2166,8 +2166,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2166 2166
2167 if (!pm_runtime_suspended(dev) 2167 if (!pm_runtime_suspended(dev)
2168 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state 2168 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2169 || platform_pci_need_resume(pci_dev) 2169 || platform_pci_need_resume(pci_dev))
2170 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2171 return false; 2170 return false;
2172 2171
2173 /* 2172 /*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0ada2a948b44..dc1ebfeeb5ec 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -885,17 +885,27 @@ int acpi_dev_suspend_late(struct device *dev);
885int acpi_subsys_prepare(struct device *dev); 885int acpi_subsys_prepare(struct device *dev);
886void acpi_subsys_complete(struct device *dev); 886void acpi_subsys_complete(struct device *dev);
887int acpi_subsys_suspend_late(struct device *dev); 887int acpi_subsys_suspend_late(struct device *dev);
888int acpi_subsys_suspend_noirq(struct device *dev);
889int acpi_subsys_resume_noirq(struct device *dev);
888int acpi_subsys_resume_early(struct device *dev); 890int acpi_subsys_resume_early(struct device *dev);
889int acpi_subsys_suspend(struct device *dev); 891int acpi_subsys_suspend(struct device *dev);
890int acpi_subsys_freeze(struct device *dev); 892int acpi_subsys_freeze(struct device *dev);
893int acpi_subsys_freeze_late(struct device *dev);
894int acpi_subsys_freeze_noirq(struct device *dev);
895int acpi_subsys_thaw_noirq(struct device *dev);
891#else 896#else
892static inline int acpi_dev_resume_early(struct device *dev) { return 0; } 897static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
893static inline int acpi_subsys_prepare(struct device *dev) { return 0; } 898static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
894static inline void acpi_subsys_complete(struct device *dev) {} 899static inline void acpi_subsys_complete(struct device *dev) {}
895static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } 900static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
901static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
902static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
896static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } 903static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
897static inline int acpi_subsys_suspend(struct device *dev) { return 0; } 904static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
898static inline int acpi_subsys_freeze(struct device *dev) { return 0; } 905static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
906static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
907static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
908static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
899#endif 909#endif
900 910
901#ifdef CONFIG_ACPI 911#ifdef CONFIG_ACPI
diff --git a/include/linux/device.h b/include/linux/device.h
index 66fe271c2544..fb9451599aca 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -370,9 +370,6 @@ int subsys_virtual_register(struct bus_type *subsys,
370 * @devnode: Callback to provide the devtmpfs. 370 * @devnode: Callback to provide the devtmpfs.
371 * @class_release: Called to release this class. 371 * @class_release: Called to release this class.
372 * @dev_release: Called to release the device. 372 * @dev_release: Called to release the device.
373 * @suspend: Used to put the device to sleep mode, usually to a low power
374 * state.
375 * @resume: Used to bring the device from the sleep mode.
376 * @shutdown_pre: Called at shut-down time before driver shutdown. 373 * @shutdown_pre: Called at shut-down time before driver shutdown.
377 * @ns_type: Callbacks so sysfs can detemine namespaces. 374 * @ns_type: Callbacks so sysfs can detemine namespaces.
378 * @namespace: Namespace of the device belongs to this class. 375 * @namespace: Namespace of the device belongs to this class.
@@ -400,8 +397,6 @@ struct class {
400 void (*class_release)(struct class *class); 397 void (*class_release)(struct class *class);
401 void (*dev_release)(struct device *dev); 398 void (*dev_release)(struct device *dev);
402 399
403 int (*suspend)(struct device *dev, pm_message_t state);
404 int (*resume)(struct device *dev);
405 int (*shutdown_pre)(struct device *dev); 400 int (*shutdown_pre)(struct device *dev);
406 401
407 const struct kobj_ns_type_operations *ns_type; 402 const struct kobj_ns_type_operations *ns_type;
@@ -1075,6 +1070,16 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val)
1075#endif 1070#endif
1076} 1071}
1077 1072
1073static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
1074{
1075 dev->power.driver_flags = flags;
1076}
1077
1078static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
1079{
1080 return !!(dev->power.driver_flags & flags);
1081}
1082
1078static inline void device_lock(struct device *dev) 1083static inline void device_lock(struct device *dev)
1079{ 1084{
1080 mutex_lock(&dev->mutex); 1085 mutex_lock(&dev->mutex);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d16a7c037ec0..e920a2527797 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -206,13 +206,8 @@ enum pci_dev_flags {
206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
207 /* Do not use FLR even if device advertises PCI_AF_CAP */ 207 /* Do not use FLR even if device advertises PCI_AF_CAP */
208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
209 /*
210 * Resume before calling the driver's system suspend hooks, disabling
211 * the direct_complete optimization.
212 */
213 PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
214 /* Don't use Relaxed Ordering for TLPs directed at this device */ 209 /* Don't use Relaxed Ordering for TLPs directed at this device */
215 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12), 210 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
216}; 211};
217 212
218enum pci_irq_reroute_variant { 213enum pci_irq_reroute_variant {
diff --git a/include/linux/pm.h b/include/linux/pm.h
index a0ceeccf2846..65d39115f06d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -550,6 +550,33 @@ struct pm_subsys_data {
550#endif 550#endif
551}; 551};
552 552
553/*
554 * Driver flags to control system suspend/resume behavior.
555 *
556 * These flags can be set by device drivers at the probe time. They need not be
557 * cleared by the drivers as the driver core will take care of that.
558 *
559 * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
560 * SMART_PREPARE: Check the return value of the driver's ->prepare callback.
561 * SMART_SUSPEND: No need to resume the device from runtime suspend.
562 *
563 * Setting SMART_PREPARE instructs bus types and PM domains which may want
564 * system suspend/resume callbacks to be skipped for the device to return 0 from
565 * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in
566 * other words, the system suspend/resume callbacks can only be skipped for the
567 * device if its driver doesn't object against that). This flag has no effect
568 * if NEVER_SKIP is set.
569 *
570 * Setting SMART_SUSPEND instructs bus types and PM domains which may want to
571 * runtime resume the device upfront during system suspend that doing so is not
572 * necessary from the driver's perspective. It also may cause them to skip
573 * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
574 * the driver if they decide to leave the device in runtime suspend.
575 */
576#define DPM_FLAG_NEVER_SKIP BIT(0)
577#define DPM_FLAG_SMART_PREPARE BIT(1)
578#define DPM_FLAG_SMART_SUSPEND BIT(2)
579
553struct dev_pm_info { 580struct dev_pm_info {
554 pm_message_t power_state; 581 pm_message_t power_state;
555 unsigned int can_wakeup:1; 582 unsigned int can_wakeup:1;
@@ -561,6 +588,7 @@ struct dev_pm_info {
561 bool is_late_suspended:1; 588 bool is_late_suspended:1;
562 bool early_init:1; /* Owned by the PM core */ 589 bool early_init:1; /* Owned by the PM core */
563 bool direct_complete:1; /* Owned by the PM core */ 590 bool direct_complete:1; /* Owned by the PM core */
591 u32 driver_flags;
564 spinlock_t lock; 592 spinlock_t lock;
565#ifdef CONFIG_PM_SLEEP 593#ifdef CONFIG_PM_SLEEP
566 struct list_head entry; 594 struct list_head entry;
@@ -737,6 +765,8 @@ extern int pm_generic_poweroff_late(struct device *dev);
737extern int pm_generic_poweroff(struct device *dev); 765extern int pm_generic_poweroff(struct device *dev);
738extern void pm_generic_complete(struct device *dev); 766extern void pm_generic_complete(struct device *dev);
739 767
768extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
769
740#else /* !CONFIG_PM_SLEEP */ 770#else /* !CONFIG_PM_SLEEP */
741 771
742#define device_pm_lock() do {} while (0) 772#define device_pm_lock() do {} while (0)