diff options
-rw-r--r-- | Documentation/driver-api/pm/devices.rst | 27 | ||||
-rw-r--r-- | Documentation/power/pci.txt | 11 | ||||
-rw-r--r-- | drivers/acpi/device_pm.c | 27 | ||||
-rw-r--r-- | drivers/base/power/main.c | 102 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 182 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 19 | ||||
-rw-r--r-- | include/linux/pm.h | 16 |
7 files changed, 252 insertions, 132 deletions
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index 53c1b0b06da5..b0fe63c91f8d 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst | |||
@@ -788,6 +788,29 @@ must reflect the "active" status for runtime PM in that case. | |||
788 | 788 | ||
789 | During system-wide resume from a sleep state it's easiest to put devices into | 789 | During system-wide resume from a sleep state it's easiest to put devices into |
790 | the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. | 790 | the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. |
791 | Refer to that document for more information regarding this particular issue as | 791 | [Refer to that document for more information regarding this particular issue as |
792 | well as for information on the device runtime power management framework in | 792 | well as for information on the device runtime power management framework in |
793 | general. | 793 | general.] |
794 | |||
795 | However, it often is desirable to leave devices in suspend after system | ||
796 | transitions to the working state, especially if those devices had been in | ||
797 | runtime suspend before the preceding system-wide suspend (or analogous) | ||
798 | transition. Device drivers can use the ``DPM_FLAG_LEAVE_SUSPENDED`` flag to | ||
799 | indicate to the PM core (and middle-layer code) that they prefer the specific | ||
800 | devices handled by them to be left suspended and they have no problems with | ||
801 | skipping their system-wide resume callbacks for this reason. Whether or not the | ||
802 | devices will actually be left in suspend may depend on their state before the | ||
803 | given system suspend-resume cycle and on the type of the system transition under | ||
804 | way. In particular, devices are not left suspended if that transition is a | ||
805 | restore from hibernation, as device states are not guaranteed to be reflected | ||
806 | by the information stored in the hibernation image in that case. | ||
807 | |||
808 | The middle-layer code involved in the handling of the device is expected to | ||
809 | indicate to the PM core if the device may be left in suspend by setting its | ||
810 | :c:member:`power.may_skip_resume` status bit which is checked by the PM core | ||
811 | during the "noirq" phase of the preceding system-wide suspend (or analogous) | ||
812 | transition. The middle layer is then responsible for handling the device as | ||
813 | appropriate in its "noirq" resume callback, which is executed regardless of | ||
814 | whether or not the device is left suspended, but the other resume callbacks | ||
815 | (except for ``->complete``) will be skipped automatically by the PM core if the | ||
816 | device really can be left in suspend. | ||
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 704cd36079b8..8eaf9ee24d43 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt | |||
@@ -994,6 +994,17 @@ into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(), | |||
994 | the function will set the power.direct_complete flag for it (to make the PM core | 994 | the function will set the power.direct_complete flag for it (to make the PM core |
995 | skip the subsequent "thaw" callbacks for it) and return. | 995 | skip the subsequent "thaw" callbacks for it) and return. |
996 | 996 | ||
997 | Setting the DPM_FLAG_LEAVE_SUSPENDED flag means that the driver prefers the | ||
998 | device to be left in suspend after system-wide transitions to the working state. | ||
999 | This flag is checked by the PM core, but the PCI bus type informs the PM core | ||
1000 | which devices may be left in suspend from its perspective (that happens during | ||
1001 | the "noirq" phase of system-wide suspend and analogous transitions) and next it | ||
1002 | uses the dev_pm_may_skip_resume() helper to decide whether or not to return from | ||
1003 | pci_pm_resume_noirq() early, as the PM core will skip the remaining resume | ||
1004 | callbacks for the device during the transition under way and will set its | ||
1005 | runtime PM status to "suspended" if dev_pm_may_skip_resume() returns "true" for | ||
1006 | it. | ||
1007 | |||
997 | 3.2. Device Runtime Power Management | 1008 | 3.2. Device Runtime Power Management |
998 | ------------------------------------ | 1009 | ------------------------------------ |
999 | In addition to providing device power management callbacks PCI device drivers | 1010 | In addition to providing device power management callbacks PCI device drivers |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a4c8ad98560d..c4d0a1c912f0 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev) | |||
990 | * the sleep state it is going out of and it has never been resumed till | 990 | * the sleep state it is going out of and it has never been resumed till |
991 | * now, resume it in case the firmware powered it up. | 991 | * now, resume it in case the firmware powered it up. |
992 | */ | 992 | */ |
993 | if (dev->power.direct_complete && pm_resume_via_firmware()) | 993 | if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) |
994 | pm_request_resume(dev); | 994 | pm_request_resume(dev); |
995 | } | 995 | } |
996 | EXPORT_SYMBOL_GPL(acpi_subsys_complete); | 996 | EXPORT_SYMBOL_GPL(acpi_subsys_complete); |
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); | |||
1039 | */ | 1039 | */ |
1040 | int acpi_subsys_suspend_noirq(struct device *dev) | 1040 | int acpi_subsys_suspend_noirq(struct device *dev) |
1041 | { | 1041 | { |
1042 | if (dev_pm_smart_suspend_and_suspended(dev)) | 1042 | int ret; |
1043 | |||
1044 | if (dev_pm_smart_suspend_and_suspended(dev)) { | ||
1045 | dev->power.may_skip_resume = true; | ||
1043 | return 0; | 1046 | return 0; |
1047 | } | ||
1048 | |||
1049 | ret = pm_generic_suspend_noirq(dev); | ||
1050 | if (ret) | ||
1051 | return ret; | ||
1052 | |||
1053 | /* | ||
1054 | * If the target system sleep state is suspend-to-idle, it is sufficient | ||
1055 | * to check whether or not the device's wakeup settings are good for | ||
1056 | * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause | ||
1057 | * acpi_subsys_complete() to take care of fixing up the device's state | ||
1058 | * anyway, if need be. | ||
1059 | */ | ||
1060 | dev->power.may_skip_resume = device_may_wakeup(dev) || | ||
1061 | !device_can_wakeup(dev); | ||
1044 | 1062 | ||
1045 | return pm_generic_suspend_noirq(dev); | 1063 | return 0; |
1046 | } | 1064 | } |
1047 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); | 1065 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); |
1048 | 1066 | ||
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); | |||
1052 | */ | 1070 | */ |
1053 | int acpi_subsys_resume_noirq(struct device *dev) | 1071 | int acpi_subsys_resume_noirq(struct device *dev) |
1054 | { | 1072 | { |
1073 | if (dev_pm_may_skip_resume(dev)) | ||
1074 | return 0; | ||
1075 | |||
1055 | /* | 1076 | /* |
1056 | * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend | 1077 | * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend |
1057 | * during system suspend, so update their runtime PM status to "active" | 1078 | * during system suspend, so update their runtime PM status to "active" |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 08744b572af6..6e8cc5de93fd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -18,7 +18,6 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/export.h> | 21 | #include <linux/export.h> |
23 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
24 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
@@ -541,6 +540,18 @@ void dev_pm_skip_next_resume_phases(struct device *dev) | |||
541 | } | 540 | } |
542 | 541 | ||
543 | /** | 542 | /** |
543 | * dev_pm_may_skip_resume - System-wide device resume optimization check. | ||
544 | * @dev: Target device. | ||
545 | * | ||
546 | * Checks whether or not the device may be left in suspend after a system-wide | ||
547 | * transition to the working state. | ||
548 | */ | ||
549 | bool dev_pm_may_skip_resume(struct device *dev) | ||
550 | { | ||
551 | return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE; | ||
552 | } | ||
553 | |||
554 | /** | ||
544 | * device_resume_noirq - Execute a "noirq resume" callback for given device. | 555 | * device_resume_noirq - Execute a "noirq resume" callback for given device. |
545 | * @dev: Device to handle. | 556 | * @dev: Device to handle. |
546 | * @state: PM transition of the system being carried out. | 557 | * @state: PM transition of the system being carried out. |
@@ -588,6 +599,18 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn | |||
588 | error = dpm_run_callback(callback, dev, state, info); | 599 | error = dpm_run_callback(callback, dev, state, info); |
589 | dev->power.is_noirq_suspended = false; | 600 | dev->power.is_noirq_suspended = false; |
590 | 601 | ||
602 | if (dev_pm_may_skip_resume(dev)) { | ||
603 | /* | ||
604 | * The device is going to be left in suspend, but it might not | ||
605 | * have been in runtime suspend before the system suspended, so | ||
606 | * its runtime PM status needs to be updated to avoid confusing | ||
607 | * the runtime PM framework when runtime PM is enabled for the | ||
608 | * device again. | ||
609 | */ | ||
610 | pm_runtime_set_suspended(dev); | ||
611 | dev_pm_skip_next_resume_phases(dev); | ||
612 | } | ||
613 | |||
591 | Out: | 614 | Out: |
592 | complete_all(&dev->power.completion); | 615 | complete_all(&dev->power.completion); |
593 | TRACE_RESUME(error); | 616 | TRACE_RESUME(error); |
@@ -1089,6 +1112,22 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
1089 | return PMSG_ON; | 1112 | return PMSG_ON; |
1090 | } | 1113 | } |
1091 | 1114 | ||
1115 | static void dpm_superior_set_must_resume(struct device *dev) | ||
1116 | { | ||
1117 | struct device_link *link; | ||
1118 | int idx; | ||
1119 | |||
1120 | if (dev->parent) | ||
1121 | dev->parent->power.must_resume = true; | ||
1122 | |||
1123 | idx = device_links_read_lock(); | ||
1124 | |||
1125 | list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) | ||
1126 | link->supplier->power.must_resume = true; | ||
1127 | |||
1128 | device_links_read_unlock(idx); | ||
1129 | } | ||
1130 | |||
1092 | /** | 1131 | /** |
1093 | * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. | 1132 | * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. |
1094 | * @dev: Device to handle. | 1133 | * @dev: Device to handle. |
@@ -1140,10 +1179,28 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
1140 | } | 1179 | } |
1141 | 1180 | ||
1142 | error = dpm_run_callback(callback, dev, state, info); | 1181 | error = dpm_run_callback(callback, dev, state, info); |
1143 | if (!error) | 1182 | if (error) { |
1144 | dev->power.is_noirq_suspended = true; | ||
1145 | else | ||
1146 | async_error = error; | 1183 | async_error = error; |
1184 | goto Complete; | ||
1185 | } | ||
1186 | |||
1187 | dev->power.is_noirq_suspended = true; | ||
1188 | |||
1189 | if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) { | ||
1190 | /* | ||
1191 | * The only safe strategy here is to require that if the device | ||
1192 | * may not be left in suspend, resume callbacks must be invoked | ||
1193 | * for it. | ||
1194 | */ | ||
1195 | dev->power.must_resume = dev->power.must_resume || | ||
1196 | !dev->power.may_skip_resume || | ||
1197 | atomic_read(&dev->power.usage_count) > 1; | ||
1198 | } else { | ||
1199 | dev->power.must_resume = true; | ||
1200 | } | ||
1201 | |||
1202 | if (dev->power.must_resume) | ||
1203 | dpm_superior_set_must_resume(dev); | ||
1147 | 1204 | ||
1148 | Complete: | 1205 | Complete: |
1149 | complete_all(&dev->power.completion); | 1206 | complete_all(&dev->power.completion); |
@@ -1435,6 +1492,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
1435 | return error; | 1492 | return error; |
1436 | } | 1493 | } |
1437 | 1494 | ||
1495 | static void dpm_propagate_to_parent(struct device *dev) | ||
1496 | { | ||
1497 | struct device *parent = dev->parent; | ||
1498 | |||
1499 | if (!parent) | ||
1500 | return; | ||
1501 | |||
1502 | spin_lock_irq(&parent->power.lock); | ||
1503 | |||
1504 | parent->power.direct_complete = false; | ||
1505 | if (dev->power.wakeup_path && !parent->power.ignore_children) | ||
1506 | parent->power.wakeup_path = true; | ||
1507 | |||
1508 | spin_unlock_irq(&parent->power.lock); | ||
1509 | } | ||
1510 | |||
1438 | static void dpm_clear_suppliers_direct_complete(struct device *dev) | 1511 | static void dpm_clear_suppliers_direct_complete(struct device *dev) |
1439 | { | 1512 | { |
1440 | struct device_link *link; | 1513 | struct device_link *link; |
@@ -1500,6 +1573,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1500 | dev->power.direct_complete = false; | 1573 | dev->power.direct_complete = false; |
1501 | } | 1574 | } |
1502 | 1575 | ||
1576 | dev->power.may_skip_resume = false; | ||
1577 | dev->power.must_resume = false; | ||
1578 | |||
1503 | dpm_watchdog_set(&wd, dev); | 1579 | dpm_watchdog_set(&wd, dev); |
1504 | device_lock(dev); | 1580 | device_lock(dev); |
1505 | 1581 | ||
@@ -1543,19 +1619,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1543 | 1619 | ||
1544 | End: | 1620 | End: |
1545 | if (!error) { | 1621 | if (!error) { |
1546 | struct device *parent = dev->parent; | ||
1547 | |||
1548 | dev->power.is_suspended = true; | 1622 | dev->power.is_suspended = true; |
1549 | if (parent) { | 1623 | dpm_propagate_to_parent(dev); |
1550 | spin_lock_irq(&parent->power.lock); | ||
1551 | |||
1552 | dev->parent->power.direct_complete = false; | ||
1553 | if (dev->power.wakeup_path | ||
1554 | && !dev->parent->power.ignore_children) | ||
1555 | dev->parent->power.wakeup_path = true; | ||
1556 | |||
1557 | spin_unlock_irq(&parent->power.lock); | ||
1558 | } | ||
1559 | dpm_clear_suppliers_direct_complete(dev); | 1624 | dpm_clear_suppliers_direct_complete(dev); |
1560 | } | 1625 | } |
1561 | 1626 | ||
@@ -1665,8 +1730,9 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1665 | if (dev->power.syscore) | 1730 | if (dev->power.syscore) |
1666 | return 0; | 1731 | return 0; |
1667 | 1732 | ||
1668 | WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && | 1733 | WARN_ON(!pm_runtime_enabled(dev) && |
1669 | !pm_runtime_enabled(dev)); | 1734 | dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | |
1735 | DPM_FLAG_LEAVE_SUSPENDED)); | ||
1670 | 1736 | ||
1671 | /* | 1737 | /* |
1672 | * If a device's parent goes into runtime suspend at the wrong time, | 1738 | * If a device's parent goes into runtime suspend at the wrong time, |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e153e28b1857..0f651efc58a1 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, | |||
108 | static ssize_t control_store(struct device * dev, struct device_attribute *attr, | 108 | static ssize_t control_store(struct device * dev, struct device_attribute *attr, |
109 | const char * buf, size_t n) | 109 | const char * buf, size_t n) |
110 | { | 110 | { |
111 | char *cp; | ||
112 | int len = n; | ||
113 | |||
114 | cp = memchr(buf, '\n', n); | ||
115 | if (cp) | ||
116 | len = cp - buf; | ||
117 | device_lock(dev); | 111 | device_lock(dev); |
118 | if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) | 112 | if (sysfs_streq(buf, ctrl_auto)) |
119 | pm_runtime_allow(dev); | 113 | pm_runtime_allow(dev); |
120 | else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) | 114 | else if (sysfs_streq(buf, ctrl_on)) |
121 | pm_runtime_forbid(dev); | 115 | pm_runtime_forbid(dev); |
122 | else | 116 | else |
123 | n = -EINVAL; | 117 | n = -EINVAL; |
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, | |||
125 | return n; | 119 | return n; |
126 | } | 120 | } |
127 | 121 | ||
128 | static DEVICE_ATTR(control, 0644, control_show, control_store); | 122 | static DEVICE_ATTR_RW(control); |
129 | 123 | ||
130 | static ssize_t rtpm_active_time_show(struct device *dev, | 124 | static ssize_t runtime_active_time_show(struct device *dev, |
131 | struct device_attribute *attr, char *buf) | 125 | struct device_attribute *attr, char *buf) |
132 | { | 126 | { |
133 | int ret; | 127 | int ret; |
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev, | |||
138 | return ret; | 132 | return ret; |
139 | } | 133 | } |
140 | 134 | ||
141 | static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); | 135 | static DEVICE_ATTR_RO(runtime_active_time); |
142 | 136 | ||
143 | static ssize_t rtpm_suspended_time_show(struct device *dev, | 137 | static ssize_t runtime_suspended_time_show(struct device *dev, |
144 | struct device_attribute *attr, char *buf) | 138 | struct device_attribute *attr, char *buf) |
145 | { | 139 | { |
146 | int ret; | 140 | int ret; |
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev, | |||
152 | return ret; | 146 | return ret; |
153 | } | 147 | } |
154 | 148 | ||
155 | static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); | 149 | static DEVICE_ATTR_RO(runtime_suspended_time); |
156 | 150 | ||
157 | static ssize_t rtpm_status_show(struct device *dev, | 151 | static ssize_t runtime_status_show(struct device *dev, |
158 | struct device_attribute *attr, char *buf) | 152 | struct device_attribute *attr, char *buf) |
159 | { | 153 | { |
160 | const char *p; | 154 | const char *p; |
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev, | |||
184 | return sprintf(buf, p); | 178 | return sprintf(buf, p); |
185 | } | 179 | } |
186 | 180 | ||
187 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | 181 | static DEVICE_ATTR_RO(runtime_status); |
188 | 182 | ||
189 | static ssize_t autosuspend_delay_ms_show(struct device *dev, | 183 | static ssize_t autosuspend_delay_ms_show(struct device *dev, |
190 | struct device_attribute *attr, char *buf) | 184 | struct device_attribute *attr, char *buf) |
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, | |||
211 | return n; | 205 | return n; |
212 | } | 206 | } |
213 | 207 | ||
214 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | 208 | static DEVICE_ATTR_RW(autosuspend_delay_ms); |
215 | autosuspend_delay_ms_store); | ||
216 | 209 | ||
217 | static ssize_t pm_qos_resume_latency_show(struct device *dev, | 210 | static ssize_t pm_qos_resume_latency_us_show(struct device *dev, |
218 | struct device_attribute *attr, | 211 | struct device_attribute *attr, |
219 | char *buf) | 212 | char *buf) |
220 | { | 213 | { |
221 | s32 value = dev_pm_qos_requested_resume_latency(dev); | 214 | s32 value = dev_pm_qos_requested_resume_latency(dev); |
222 | 215 | ||
223 | if (value == 0) | 216 | if (value == 0) |
224 | return sprintf(buf, "n/a\n"); | 217 | return sprintf(buf, "n/a\n"); |
225 | else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) | 218 | if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) |
226 | value = 0; | 219 | value = 0; |
227 | 220 | ||
228 | return sprintf(buf, "%d\n", value); | 221 | return sprintf(buf, "%d\n", value); |
229 | } | 222 | } |
230 | 223 | ||
231 | static ssize_t pm_qos_resume_latency_store(struct device *dev, | 224 | static ssize_t pm_qos_resume_latency_us_store(struct device *dev, |
232 | struct device_attribute *attr, | 225 | struct device_attribute *attr, |
233 | const char *buf, size_t n) | 226 | const char *buf, size_t n) |
234 | { | 227 | { |
235 | s32 value; | 228 | s32 value; |
236 | int ret; | 229 | int ret; |
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, | |||
245 | 238 | ||
246 | if (value == 0) | 239 | if (value == 0) |
247 | value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; | 240 | value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
248 | } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { | 241 | } else if (sysfs_streq(buf, "n/a")) { |
249 | value = 0; | 242 | value = 0; |
250 | } else { | 243 | } else { |
251 | return -EINVAL; | 244 | return -EINVAL; |
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, | |||
256 | return ret < 0 ? ret : n; | 249 | return ret < 0 ? ret : n; |
257 | } | 250 | } |
258 | 251 | ||
259 | static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, | 252 | static DEVICE_ATTR_RW(pm_qos_resume_latency_us); |
260 | pm_qos_resume_latency_show, pm_qos_resume_latency_store); | ||
261 | 253 | ||
262 | static ssize_t pm_qos_latency_tolerance_show(struct device *dev, | 254 | static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, |
263 | struct device_attribute *attr, | 255 | struct device_attribute *attr, |
264 | char *buf) | 256 | char *buf) |
265 | { | 257 | { |
266 | s32 value = dev_pm_qos_get_user_latency_tolerance(dev); | 258 | s32 value = dev_pm_qos_get_user_latency_tolerance(dev); |
267 | 259 | ||
268 | if (value < 0) | 260 | if (value < 0) |
269 | return sprintf(buf, "auto\n"); | 261 | return sprintf(buf, "auto\n"); |
270 | else if (value == PM_QOS_LATENCY_ANY) | 262 | if (value == PM_QOS_LATENCY_ANY) |
271 | return sprintf(buf, "any\n"); | 263 | return sprintf(buf, "any\n"); |
272 | 264 | ||
273 | return sprintf(buf, "%d\n", value); | 265 | return sprintf(buf, "%d\n", value); |
274 | } | 266 | } |
275 | 267 | ||
276 | static ssize_t pm_qos_latency_tolerance_store(struct device *dev, | 268 | static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, |
277 | struct device_attribute *attr, | 269 | struct device_attribute *attr, |
278 | const char *buf, size_t n) | 270 | const char *buf, size_t n) |
279 | { | 271 | { |
280 | s32 value; | 272 | s32 value; |
281 | int ret; | 273 | int ret; |
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, | |||
285 | if (value < 0) | 277 | if (value < 0) |
286 | return -EINVAL; | 278 | return -EINVAL; |
287 | } else { | 279 | } else { |
288 | if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) | 280 | if (sysfs_streq(buf, "auto")) |
289 | value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; | 281 | value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; |
290 | else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) | 282 | else if (sysfs_streq(buf, "any")) |
291 | value = PM_QOS_LATENCY_ANY; | 283 | value = PM_QOS_LATENCY_ANY; |
292 | else | 284 | else |
293 | return -EINVAL; | 285 | return -EINVAL; |
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, | |||
296 | return ret < 0 ? ret : n; | 288 | return ret < 0 ? ret : n; |
297 | } | 289 | } |
298 | 290 | ||
299 | static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, | 291 | static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); |
300 | pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); | ||
301 | 292 | ||
302 | static ssize_t pm_qos_no_power_off_show(struct device *dev, | 293 | static ssize_t pm_qos_no_power_off_show(struct device *dev, |
303 | struct device_attribute *attr, | 294 | struct device_attribute *attr, |
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev, | |||
323 | return ret < 0 ? ret : n; | 314 | return ret < 0 ? ret : n; |
324 | } | 315 | } |
325 | 316 | ||
326 | static DEVICE_ATTR(pm_qos_no_power_off, 0644, | 317 | static DEVICE_ATTR_RW(pm_qos_no_power_off); |
327 | pm_qos_no_power_off_show, pm_qos_no_power_off_store); | ||
328 | 318 | ||
329 | #ifdef CONFIG_PM_SLEEP | 319 | #ifdef CONFIG_PM_SLEEP |
330 | static const char _enabled[] = "enabled"; | 320 | static const char _enabled[] = "enabled"; |
331 | static const char _disabled[] = "disabled"; | 321 | static const char _disabled[] = "disabled"; |
332 | 322 | ||
333 | static ssize_t | 323 | static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, |
334 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 324 | char *buf) |
335 | { | 325 | { |
336 | return sprintf(buf, "%s\n", device_can_wakeup(dev) | 326 | return sprintf(buf, "%s\n", device_can_wakeup(dev) |
337 | ? (device_may_wakeup(dev) ? _enabled : _disabled) | 327 | ? (device_may_wakeup(dev) ? _enabled : _disabled) |
338 | : ""); | 328 | : ""); |
339 | } | 329 | } |
340 | 330 | ||
341 | static ssize_t | 331 | static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, |
342 | wake_store(struct device * dev, struct device_attribute *attr, | 332 | const char *buf, size_t n) |
343 | const char * buf, size_t n) | ||
344 | { | 333 | { |
345 | char *cp; | ||
346 | int len = n; | ||
347 | |||
348 | if (!device_can_wakeup(dev)) | 334 | if (!device_can_wakeup(dev)) |
349 | return -EINVAL; | 335 | return -EINVAL; |
350 | 336 | ||
351 | cp = memchr(buf, '\n', n); | 337 | if (sysfs_streq(buf, _enabled)) |
352 | if (cp) | ||
353 | len = cp - buf; | ||
354 | if (len == sizeof _enabled - 1 | ||
355 | && strncmp(buf, _enabled, sizeof _enabled - 1) == 0) | ||
356 | device_set_wakeup_enable(dev, 1); | 338 | device_set_wakeup_enable(dev, 1); |
357 | else if (len == sizeof _disabled - 1 | 339 | else if (sysfs_streq(buf, _disabled)) |
358 | && strncmp(buf, _disabled, sizeof _disabled - 1) == 0) | ||
359 | device_set_wakeup_enable(dev, 0); | 340 | device_set_wakeup_enable(dev, 0); |
360 | else | 341 | else |
361 | return -EINVAL; | 342 | return -EINVAL; |
362 | return n; | 343 | return n; |
363 | } | 344 | } |
364 | 345 | ||
365 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 346 | static DEVICE_ATTR_RW(wakeup); |
366 | 347 | ||
367 | static ssize_t wakeup_count_show(struct device *dev, | 348 | static ssize_t wakeup_count_show(struct device *dev, |
368 | struct device_attribute *attr, char *buf) | 349 | struct device_attribute *attr, char *buf) |
369 | { | 350 | { |
370 | unsigned long count = 0; | 351 | unsigned long count = 0; |
371 | bool enabled = false; | 352 | bool enabled = false; |
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev, | |||
379 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 360 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
380 | } | 361 | } |
381 | 362 | ||
382 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); | 363 | static DEVICE_ATTR_RO(wakeup_count); |
383 | 364 | ||
384 | static ssize_t wakeup_active_count_show(struct device *dev, | 365 | static ssize_t wakeup_active_count_show(struct device *dev, |
385 | struct device_attribute *attr, char *buf) | 366 | struct device_attribute *attr, |
367 | char *buf) | ||
386 | { | 368 | { |
387 | unsigned long count = 0; | 369 | unsigned long count = 0; |
388 | bool enabled = false; | 370 | bool enabled = false; |
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev, | |||
396 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 378 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
397 | } | 379 | } |
398 | 380 | ||
399 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); | 381 | static DEVICE_ATTR_RO(wakeup_active_count); |
400 | 382 | ||
401 | static ssize_t wakeup_abort_count_show(struct device *dev, | 383 | static ssize_t wakeup_abort_count_show(struct device *dev, |
402 | struct device_attribute *attr, | 384 | struct device_attribute *attr, |
403 | char *buf) | 385 | char *buf) |
404 | { | 386 | { |
405 | unsigned long count = 0; | 387 | unsigned long count = 0; |
406 | bool enabled = false; | 388 | bool enabled = false; |
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev, | |||
414 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 396 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
415 | } | 397 | } |
416 | 398 | ||
417 | static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); | 399 | static DEVICE_ATTR_RO(wakeup_abort_count); |
418 | 400 | ||
419 | static ssize_t wakeup_expire_count_show(struct device *dev, | 401 | static ssize_t wakeup_expire_count_show(struct device *dev, |
420 | struct device_attribute *attr, | 402 | struct device_attribute *attr, |
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev, | |||
432 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 414 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
433 | } | 415 | } |
434 | 416 | ||
435 | static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); | 417 | static DEVICE_ATTR_RO(wakeup_expire_count); |
436 | 418 | ||
437 | static ssize_t wakeup_active_show(struct device *dev, | 419 | static ssize_t wakeup_active_show(struct device *dev, |
438 | struct device_attribute *attr, char *buf) | 420 | struct device_attribute *attr, char *buf) |
439 | { | 421 | { |
440 | unsigned int active = 0; | 422 | unsigned int active = 0; |
441 | bool enabled = false; | 423 | bool enabled = false; |
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev, | |||
449 | return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); | 431 | return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); |
450 | } | 432 | } |
451 | 433 | ||
452 | static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); | 434 | static DEVICE_ATTR_RO(wakeup_active); |
453 | 435 | ||
454 | static ssize_t wakeup_total_time_show(struct device *dev, | 436 | static ssize_t wakeup_total_time_ms_show(struct device *dev, |
455 | struct device_attribute *attr, char *buf) | 437 | struct device_attribute *attr, |
438 | char *buf) | ||
456 | { | 439 | { |
457 | s64 msec = 0; | 440 | s64 msec = 0; |
458 | bool enabled = false; | 441 | bool enabled = false; |
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev, | |||
466 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | 449 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); |
467 | } | 450 | } |
468 | 451 | ||
469 | static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); | 452 | static DEVICE_ATTR_RO(wakeup_total_time_ms); |
470 | 453 | ||
471 | static ssize_t wakeup_max_time_show(struct device *dev, | 454 | static ssize_t wakeup_max_time_ms_show(struct device *dev, |
472 | struct device_attribute *attr, char *buf) | 455 | struct device_attribute *attr, char *buf) |
473 | { | 456 | { |
474 | s64 msec = 0; | 457 | s64 msec = 0; |
475 | bool enabled = false; | 458 | bool enabled = false; |
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev, | |||
483 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | 466 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); |
484 | } | 467 | } |
485 | 468 | ||
486 | static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); | 469 | static DEVICE_ATTR_RO(wakeup_max_time_ms); |
487 | 470 | ||
488 | static ssize_t wakeup_last_time_show(struct device *dev, | 471 | static ssize_t wakeup_last_time_ms_show(struct device *dev, |
489 | struct device_attribute *attr, char *buf) | 472 | struct device_attribute *attr, |
473 | char *buf) | ||
490 | { | 474 | { |
491 | s64 msec = 0; | 475 | s64 msec = 0; |
492 | bool enabled = false; | 476 | bool enabled = false; |
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev, | |||
500 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | 484 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); |
501 | } | 485 | } |
502 | 486 | ||
503 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); | 487 | static DEVICE_ATTR_RO(wakeup_last_time_ms); |
504 | 488 | ||
505 | #ifdef CONFIG_PM_AUTOSLEEP | 489 | #ifdef CONFIG_PM_AUTOSLEEP |
506 | static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, | 490 | static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, |
507 | struct device_attribute *attr, | 491 | struct device_attribute *attr, |
508 | char *buf) | 492 | char *buf) |
509 | { | 493 | { |
510 | s64 msec = 0; | 494 | s64 msec = 0; |
511 | bool enabled = false; | 495 | bool enabled = false; |
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, | |||
519 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | 503 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); |
520 | } | 504 | } |
521 | 505 | ||
522 | static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, | 506 | static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); |
523 | wakeup_prevent_sleep_time_show, NULL); | ||
524 | #endif /* CONFIG_PM_AUTOSLEEP */ | 507 | #endif /* CONFIG_PM_AUTOSLEEP */ |
525 | #endif /* CONFIG_PM_SLEEP */ | 508 | #endif /* CONFIG_PM_SLEEP */ |
526 | 509 | ||
527 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 510 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
528 | static ssize_t rtpm_usagecount_show(struct device *dev, | 511 | static ssize_t runtime_usage_show(struct device *dev, |
529 | struct device_attribute *attr, char *buf) | 512 | struct device_attribute *attr, char *buf) |
530 | { | 513 | { |
531 | return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); | 514 | return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); |
532 | } | 515 | } |
516 | static DEVICE_ATTR_RO(runtime_usage); | ||
533 | 517 | ||
534 | static ssize_t rtpm_children_show(struct device *dev, | 518 | static ssize_t runtime_active_kids_show(struct device *dev, |
535 | struct device_attribute *attr, char *buf) | 519 | struct device_attribute *attr, |
520 | char *buf) | ||
536 | { | 521 | { |
537 | return sprintf(buf, "%d\n", dev->power.ignore_children ? | 522 | return sprintf(buf, "%d\n", dev->power.ignore_children ? |
538 | 0 : atomic_read(&dev->power.child_count)); | 523 | 0 : atomic_read(&dev->power.child_count)); |
539 | } | 524 | } |
525 | static DEVICE_ATTR_RO(runtime_active_kids); | ||
540 | 526 | ||
541 | static ssize_t rtpm_enabled_show(struct device *dev, | 527 | static ssize_t runtime_enabled_show(struct device *dev, |
542 | struct device_attribute *attr, char *buf) | 528 | struct device_attribute *attr, char *buf) |
543 | { | 529 | { |
544 | if ((dev->power.disable_depth) && (dev->power.runtime_auto == false)) | 530 | if (dev->power.disable_depth && (dev->power.runtime_auto == false)) |
545 | return sprintf(buf, "disabled & forbidden\n"); | 531 | return sprintf(buf, "disabled & forbidden\n"); |
546 | else if (dev->power.disable_depth) | 532 | if (dev->power.disable_depth) |
547 | return sprintf(buf, "disabled\n"); | 533 | return sprintf(buf, "disabled\n"); |
548 | else if (dev->power.runtime_auto == false) | 534 | if (dev->power.runtime_auto == false) |
549 | return sprintf(buf, "forbidden\n"); | 535 | return sprintf(buf, "forbidden\n"); |
550 | return sprintf(buf, "enabled\n"); | 536 | return sprintf(buf, "enabled\n"); |
551 | } | 537 | } |
552 | 538 | static DEVICE_ATTR_RO(runtime_enabled); | |
553 | static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); | ||
554 | static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); | ||
555 | static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); | ||
556 | 539 | ||
557 | #ifdef CONFIG_PM_SLEEP | 540 | #ifdef CONFIG_PM_SLEEP |
558 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, | 541 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, |
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr, | |||
566 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, | 549 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, |
567 | const char *buf, size_t n) | 550 | const char *buf, size_t n) |
568 | { | 551 | { |
569 | char *cp; | 552 | if (sysfs_streq(buf, _enabled)) |
570 | int len = n; | ||
571 | |||
572 | cp = memchr(buf, '\n', n); | ||
573 | if (cp) | ||
574 | len = cp - buf; | ||
575 | if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0) | ||
576 | device_enable_async_suspend(dev); | 553 | device_enable_async_suspend(dev); |
577 | else if (len == sizeof _disabled - 1 && | 554 | else if (sysfs_streq(buf, _disabled)) |
578 | strncmp(buf, _disabled, len) == 0) | ||
579 | device_disable_async_suspend(dev); | 555 | device_disable_async_suspend(dev); |
580 | else | 556 | else |
581 | return -EINVAL; | 557 | return -EINVAL; |
582 | return n; | 558 | return n; |
583 | } | 559 | } |
584 | 560 | ||
585 | static DEVICE_ATTR(async, 0644, async_show, async_store); | 561 | static DEVICE_ATTR_RW(async); |
586 | 562 | ||
587 | #endif /* CONFIG_PM_SLEEP */ | 563 | #endif /* CONFIG_PM_SLEEP */ |
588 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | 564 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 945099d49f8f..9e53e51b91f3 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev) | |||
699 | pm_generic_complete(dev); | 699 | pm_generic_complete(dev); |
700 | 700 | ||
701 | /* Resume device if platform firmware has put it in reset-power-on */ | 701 | /* Resume device if platform firmware has put it in reset-power-on */ |
702 | if (dev->power.direct_complete && pm_resume_via_firmware()) { | 702 | if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { |
703 | pci_power_t pre_sleep_state = pci_dev->current_state; | 703 | pci_power_t pre_sleep_state = pci_dev->current_state; |
704 | 704 | ||
705 | pci_update_current_state(pci_dev, pci_dev->current_state); | 705 | pci_update_current_state(pci_dev, pci_dev->current_state); |
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev) | |||
783 | struct pci_dev *pci_dev = to_pci_dev(dev); | 783 | struct pci_dev *pci_dev = to_pci_dev(dev); |
784 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 784 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
785 | 785 | ||
786 | if (dev_pm_smart_suspend_and_suspended(dev)) | 786 | if (dev_pm_smart_suspend_and_suspended(dev)) { |
787 | dev->power.may_skip_resume = true; | ||
787 | return 0; | 788 | return 0; |
789 | } | ||
788 | 790 | ||
789 | if (pci_has_legacy_pm_support(pci_dev)) | 791 | if (pci_has_legacy_pm_support(pci_dev)) |
790 | return pci_legacy_suspend_late(dev, PMSG_SUSPEND); | 792 | return pci_legacy_suspend_late(dev, PMSG_SUSPEND); |
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev) | |||
838 | Fixup: | 840 | Fixup: |
839 | pci_fixup_device(pci_fixup_suspend_late, pci_dev); | 841 | pci_fixup_device(pci_fixup_suspend_late, pci_dev); |
840 | 842 | ||
843 | /* | ||
844 | * If the target system sleep state is suspend-to-idle, it is sufficient | ||
845 | * to check whether or not the device's wakeup settings are good for | ||
846 | * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause | ||
847 | * pci_pm_complete() to take care of fixing up the device's state | ||
848 | * anyway, if need be. | ||
849 | */ | ||
850 | dev->power.may_skip_resume = device_may_wakeup(dev) || | ||
851 | !device_can_wakeup(dev); | ||
852 | |||
841 | return 0; | 853 | return 0; |
842 | } | 854 | } |
843 | 855 | ||
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev) | |||
847 | struct device_driver *drv = dev->driver; | 859 | struct device_driver *drv = dev->driver; |
848 | int error = 0; | 860 | int error = 0; |
849 | 861 | ||
862 | if (dev_pm_may_skip_resume(dev)) | ||
863 | return 0; | ||
864 | |||
850 | /* | 865 | /* |
851 | * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend | 866 | * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend |
852 | * during system suspend, so update their runtime PM status to "active" | 867 | * during system suspend, so update their runtime PM status to "active" |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 492ed473ba7e..e723b78d8357 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -556,9 +556,10 @@ struct pm_subsys_data { | |||
556 | * These flags can be set by device drivers at the probe time. They need not be | 556 | * These flags can be set by device drivers at the probe time. They need not be |
557 | * cleared by the drivers as the driver core will take care of that. | 557 | * cleared by the drivers as the driver core will take care of that. |
558 | * | 558 | * |
559 | * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device. | 559 | * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. |
560 | * SMART_PREPARE: Check the return value of the driver's ->prepare callback. | 560 | * SMART_PREPARE: Check the return value of the driver's ->prepare callback. |
561 | * SMART_SUSPEND: No need to resume the device from runtime suspend. | 561 | * SMART_SUSPEND: No need to resume the device from runtime suspend. |
562 | * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. | ||
562 | * | 563 | * |
563 | * Setting SMART_PREPARE instructs bus types and PM domains which may want | 564 | * Setting SMART_PREPARE instructs bus types and PM domains which may want |
564 | * system suspend/resume callbacks to be skipped for the device to return 0 from | 565 | * system suspend/resume callbacks to be skipped for the device to return 0 from |
@@ -572,10 +573,14 @@ struct pm_subsys_data { | |||
572 | * necessary from the driver's perspective. It also may cause them to skip | 573 | * necessary from the driver's perspective. It also may cause them to skip |
573 | * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by | 574 | * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by |
574 | * the driver if they decide to leave the device in runtime suspend. | 575 | * the driver if they decide to leave the device in runtime suspend. |
576 | * | ||
577 | * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the | ||
578 | * driver prefers the device to be left in suspend after system resume. | ||
575 | */ | 579 | */ |
576 | #define DPM_FLAG_NEVER_SKIP BIT(0) | 580 | #define DPM_FLAG_NEVER_SKIP BIT(0) |
577 | #define DPM_FLAG_SMART_PREPARE BIT(1) | 581 | #define DPM_FLAG_SMART_PREPARE BIT(1) |
578 | #define DPM_FLAG_SMART_SUSPEND BIT(2) | 582 | #define DPM_FLAG_SMART_SUSPEND BIT(2) |
583 | #define DPM_FLAG_LEAVE_SUSPENDED BIT(3) | ||
579 | 584 | ||
580 | struct dev_pm_info { | 585 | struct dev_pm_info { |
581 | pm_message_t power_state; | 586 | pm_message_t power_state; |
@@ -597,6 +602,8 @@ struct dev_pm_info { | |||
597 | bool wakeup_path:1; | 602 | bool wakeup_path:1; |
598 | bool syscore:1; | 603 | bool syscore:1; |
599 | bool no_pm_callbacks:1; /* Owned by the PM core */ | 604 | bool no_pm_callbacks:1; /* Owned by the PM core */ |
605 | unsigned int must_resume:1; /* Owned by the PM core */ | ||
606 | unsigned int may_skip_resume:1; /* Set by subsystems */ | ||
600 | #else | 607 | #else |
601 | unsigned int should_wakeup:1; | 608 | unsigned int should_wakeup:1; |
602 | #endif | 609 | #endif |
@@ -766,6 +773,7 @@ extern int pm_generic_poweroff(struct device *dev); | |||
766 | extern void pm_generic_complete(struct device *dev); | 773 | extern void pm_generic_complete(struct device *dev); |
767 | 774 | ||
768 | extern void dev_pm_skip_next_resume_phases(struct device *dev); | 775 | extern void dev_pm_skip_next_resume_phases(struct device *dev); |
776 | extern bool dev_pm_may_skip_resume(struct device *dev); | ||
769 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); | 777 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); |
770 | 778 | ||
771 | #else /* !CONFIG_PM_SLEEP */ | 779 | #else /* !CONFIG_PM_SLEEP */ |