aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-12-11 08:25:48 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-12-11 08:25:48 -0500
commit4f86a21fae38242811d5c84f86df45e0eed6cd84 (patch)
tree84f71f318bf98f9803b8ed34812163d56979c944 /drivers
parent31eb7431805493e10f4731f366cf4d4e3e952035 (diff)
parent1172ee31259b51a9b2d83b05f01161fd5938b15d (diff)
Merge back earlier PM core material for v4.16.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/device_pm.c27
-rw-r--r--drivers/base/power/main.c102
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/pci/pci-driver.c19
4 files changed, 205 insertions, 125 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..5cfe794c36bd 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
990 * the sleep state it is going out of and it has never been resumed till 990 * the sleep state it is going out of and it has never been resumed till
991 * now, resume it in case the firmware powered it up. 991 * now, resume it in case the firmware powered it up.
992 */ 992 */
993 if (dev->power.direct_complete && pm_resume_via_firmware()) 993 if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
994 pm_request_resume(dev); 994 pm_request_resume(dev);
995} 995}
996EXPORT_SYMBOL_GPL(acpi_subsys_complete); 996EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
1039 */ 1039 */
1040int acpi_subsys_suspend_noirq(struct device *dev) 1040int acpi_subsys_suspend_noirq(struct device *dev)
1041{ 1041{
1042 if (dev_pm_smart_suspend_and_suspended(dev)) 1042 int ret;
1043
1044 if (dev_pm_smart_suspend_and_suspended(dev)) {
1045 dev->power.may_skip_resume = true;
1043 return 0; 1046 return 0;
1047 }
1048
1049 ret = pm_generic_suspend_noirq(dev);
1050 if (ret)
1051 return ret;
1052
1053 /*
1054 * If the target system sleep state is suspend-to-idle, it is sufficient
1055 * to check whether or not the device's wakeup settings are good for
1056 * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
1057 * acpi_subsys_complete() to take care of fixing up the device's state
1058 * anyway, if need be.
1059 */
1060 dev->power.may_skip_resume = device_may_wakeup(dev) ||
1061 !device_can_wakeup(dev);
1044 1062
1045 return pm_generic_suspend_noirq(dev); 1063 return 0;
1046} 1064}
1047EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); 1065EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
1048 1066
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
1052 */ 1070 */
1053int acpi_subsys_resume_noirq(struct device *dev) 1071int acpi_subsys_resume_noirq(struct device *dev)
1054{ 1072{
1073 if (dev_pm_may_skip_resume(dev))
1074 return 0;
1075
1055 /* 1076 /*
1056 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend 1077 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
1057 * during system suspend, so update their runtime PM status to "active" 1078 * during system suspend, so update their runtime PM status to "active"
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..c0d5f4a3611d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,6 +526,18 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
526/*------------------------- Resume routines -------------------------*/ 526/*------------------------- Resume routines -------------------------*/
527 527
528/** 528/**
529 * dev_pm_may_skip_resume - System-wide device resume optimization check.
530 * @dev: Target device.
531 *
532 * Checks whether or not the device may be left in suspend after a system-wide
533 * transition to the working state.
534 */
535bool dev_pm_may_skip_resume(struct device *dev)
536{
537 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
538}
539
540/**
529 * device_resume_noirq - Execute a "noirq resume" callback for given device. 541 * device_resume_noirq - Execute a "noirq resume" callback for given device.
530 * @dev: Device to handle. 542 * @dev: Device to handle.
531 * @state: PM transition of the system being carried out. 543 * @state: PM transition of the system being carried out.
@@ -573,6 +585,19 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
573 error = dpm_run_callback(callback, dev, state, info); 585 error = dpm_run_callback(callback, dev, state, info);
574 dev->power.is_noirq_suspended = false; 586 dev->power.is_noirq_suspended = false;
575 587
588 if (dev_pm_may_skip_resume(dev)) {
589 /*
590 * The device is going to be left in suspend, but it might not
591 * have been in runtime suspend before the system suspended, so
592 * its runtime PM status needs to be updated to avoid confusing
593 * the runtime PM framework when runtime PM is enabled for the
594 * device again.
595 */
596 pm_runtime_set_suspended(dev);
597 dev->power.is_late_suspended = false;
598 dev->power.is_suspended = false;
599 }
600
576 Out: 601 Out:
577 complete_all(&dev->power.completion); 602 complete_all(&dev->power.completion);
578 TRACE_RESUME(error); 603 TRACE_RESUME(error);
@@ -1074,6 +1099,22 @@ static pm_message_t resume_event(pm_message_t sleep_state)
1074 return PMSG_ON; 1099 return PMSG_ON;
1075} 1100}
1076 1101
1102static void dpm_superior_set_must_resume(struct device *dev)
1103{
1104 struct device_link *link;
1105 int idx;
1106
1107 if (dev->parent)
1108 dev->parent->power.must_resume = true;
1109
1110 idx = device_links_read_lock();
1111
1112 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1113 link->supplier->power.must_resume = true;
1114
1115 device_links_read_unlock(idx);
1116}
1117
1077/** 1118/**
1078 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. 1119 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1079 * @dev: Device to handle. 1120 * @dev: Device to handle.
@@ -1125,10 +1166,28 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1125 } 1166 }
1126 1167
1127 error = dpm_run_callback(callback, dev, state, info); 1168 error = dpm_run_callback(callback, dev, state, info);
1128 if (!error) 1169 if (error) {
1129 dev->power.is_noirq_suspended = true;
1130 else
1131 async_error = error; 1170 async_error = error;
1171 goto Complete;
1172 }
1173
1174 dev->power.is_noirq_suspended = true;
1175
1176 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1177 /*
1178 * The only safe strategy here is to require that if the device
1179 * may not be left in suspend, resume callbacks must be invoked
1180 * for it.
1181 */
1182 dev->power.must_resume = dev->power.must_resume ||
1183 !dev->power.may_skip_resume ||
1184 atomic_read(&dev->power.usage_count) > 1;
1185 } else {
1186 dev->power.must_resume = true;
1187 }
1188
1189 if (dev->power.must_resume)
1190 dpm_superior_set_must_resume(dev);
1132 1191
1133Complete: 1192Complete:
1134 complete_all(&dev->power.completion); 1193 complete_all(&dev->power.completion);
@@ -1420,6 +1479,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
1420 return error; 1479 return error;
1421} 1480}
1422 1481
1482static void dpm_propagate_to_parent(struct device *dev)
1483{
1484 struct device *parent = dev->parent;
1485
1486 if (!parent)
1487 return;
1488
1489 spin_lock_irq(&parent->power.lock);
1490
1491 parent->power.direct_complete = false;
1492 if (dev->power.wakeup_path && !parent->power.ignore_children)
1493 parent->power.wakeup_path = true;
1494
1495 spin_unlock_irq(&parent->power.lock);
1496}
1497
1423static void dpm_clear_suppliers_direct_complete(struct device *dev) 1498static void dpm_clear_suppliers_direct_complete(struct device *dev)
1424{ 1499{
1425 struct device_link *link; 1500 struct device_link *link;
@@ -1485,6 +1560,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1485 dev->power.direct_complete = false; 1560 dev->power.direct_complete = false;
1486 } 1561 }
1487 1562
1563 dev->power.may_skip_resume = false;
1564 dev->power.must_resume = false;
1565
1488 dpm_watchdog_set(&wd, dev); 1566 dpm_watchdog_set(&wd, dev);
1489 device_lock(dev); 1567 device_lock(dev);
1490 1568
@@ -1528,19 +1606,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1528 1606
1529 End: 1607 End:
1530 if (!error) { 1608 if (!error) {
1531 struct device *parent = dev->parent;
1532
1533 dev->power.is_suspended = true; 1609 dev->power.is_suspended = true;
1534 if (parent) { 1610 dpm_propagate_to_parent(dev);
1535 spin_lock_irq(&parent->power.lock);
1536
1537 dev->parent->power.direct_complete = false;
1538 if (dev->power.wakeup_path
1539 && !dev->parent->power.ignore_children)
1540 dev->parent->power.wakeup_path = true;
1541
1542 spin_unlock_irq(&parent->power.lock);
1543 }
1544 dpm_clear_suppliers_direct_complete(dev); 1611 dpm_clear_suppliers_direct_complete(dev);
1545 } 1612 }
1546 1613
@@ -1650,8 +1717,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
1650 if (dev->power.syscore) 1717 if (dev->power.syscore)
1651 return 0; 1718 return 0;
1652 1719
1653 WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && 1720 WARN_ON(!pm_runtime_enabled(dev) &&
1654 !pm_runtime_enabled(dev)); 1721 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1722 DPM_FLAG_LEAVE_SUSPENDED));
1655 1723
1656 /* 1724 /*
1657 * If a device's parent goes into runtime suspend at the wrong time, 1725 * If a device's parent goes into runtime suspend at the wrong time,
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28b1857..0f651efc58a1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
108static ssize_t control_store(struct device * dev, struct device_attribute *attr, 108static ssize_t control_store(struct device * dev, struct device_attribute *attr,
109 const char * buf, size_t n) 109 const char * buf, size_t n)
110{ 110{
111 char *cp;
112 int len = n;
113
114 cp = memchr(buf, '\n', n);
115 if (cp)
116 len = cp - buf;
117 device_lock(dev); 111 device_lock(dev);
118 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) 112 if (sysfs_streq(buf, ctrl_auto))
119 pm_runtime_allow(dev); 113 pm_runtime_allow(dev);
120 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) 114 else if (sysfs_streq(buf, ctrl_on))
121 pm_runtime_forbid(dev); 115 pm_runtime_forbid(dev);
122 else 116 else
123 n = -EINVAL; 117 n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
125 return n; 119 return n;
126} 120}
127 121
128static DEVICE_ATTR(control, 0644, control_show, control_store); 122static DEVICE_ATTR_RW(control);
129 123
130static ssize_t rtpm_active_time_show(struct device *dev, 124static ssize_t runtime_active_time_show(struct device *dev,
131 struct device_attribute *attr, char *buf) 125 struct device_attribute *attr, char *buf)
132{ 126{
133 int ret; 127 int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
138 return ret; 132 return ret;
139} 133}
140 134
141static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); 135static DEVICE_ATTR_RO(runtime_active_time);
142 136
143static ssize_t rtpm_suspended_time_show(struct device *dev, 137static ssize_t runtime_suspended_time_show(struct device *dev,
144 struct device_attribute *attr, char *buf) 138 struct device_attribute *attr, char *buf)
145{ 139{
146 int ret; 140 int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
152 return ret; 146 return ret;
153} 147}
154 148
155static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); 149static DEVICE_ATTR_RO(runtime_suspended_time);
156 150
157static ssize_t rtpm_status_show(struct device *dev, 151static ssize_t runtime_status_show(struct device *dev,
158 struct device_attribute *attr, char *buf) 152 struct device_attribute *attr, char *buf)
159{ 153{
160 const char *p; 154 const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
184 return sprintf(buf, p); 178 return sprintf(buf, p);
185} 179}
186 180
187static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 181static DEVICE_ATTR_RO(runtime_status);
188 182
189static ssize_t autosuspend_delay_ms_show(struct device *dev, 183static ssize_t autosuspend_delay_ms_show(struct device *dev,
190 struct device_attribute *attr, char *buf) 184 struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
211 return n; 205 return n;
212} 206}
213 207
214static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 208static DEVICE_ATTR_RW(autosuspend_delay_ms);
215 autosuspend_delay_ms_store);
216 209
217static ssize_t pm_qos_resume_latency_show(struct device *dev, 210static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
218 struct device_attribute *attr, 211 struct device_attribute *attr,
219 char *buf) 212 char *buf)
220{ 213{
221 s32 value = dev_pm_qos_requested_resume_latency(dev); 214 s32 value = dev_pm_qos_requested_resume_latency(dev);
222 215
223 if (value == 0) 216 if (value == 0)
224 return sprintf(buf, "n/a\n"); 217 return sprintf(buf, "n/a\n");
225 else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 218 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
226 value = 0; 219 value = 0;
227 220
228 return sprintf(buf, "%d\n", value); 221 return sprintf(buf, "%d\n", value);
229} 222}
230 223
231static ssize_t pm_qos_resume_latency_store(struct device *dev, 224static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
232 struct device_attribute *attr, 225 struct device_attribute *attr,
233 const char *buf, size_t n) 226 const char *buf, size_t n)
234{ 227{
235 s32 value; 228 s32 value;
236 int ret; 229 int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
245 238
246 if (value == 0) 239 if (value == 0)
247 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; 240 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
248 } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { 241 } else if (sysfs_streq(buf, "n/a")) {
249 value = 0; 242 value = 0;
250 } else { 243 } else {
251 return -EINVAL; 244 return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
256 return ret < 0 ? ret : n; 249 return ret < 0 ? ret : n;
257} 250}
258 251
259static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, 252static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
260 pm_qos_resume_latency_show, pm_qos_resume_latency_store);
261 253
262static ssize_t pm_qos_latency_tolerance_show(struct device *dev, 254static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
263 struct device_attribute *attr, 255 struct device_attribute *attr,
264 char *buf) 256 char *buf)
265{ 257{
266 s32 value = dev_pm_qos_get_user_latency_tolerance(dev); 258 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
267 259
268 if (value < 0) 260 if (value < 0)
269 return sprintf(buf, "auto\n"); 261 return sprintf(buf, "auto\n");
270 else if (value == PM_QOS_LATENCY_ANY) 262 if (value == PM_QOS_LATENCY_ANY)
271 return sprintf(buf, "any\n"); 263 return sprintf(buf, "any\n");
272 264
273 return sprintf(buf, "%d\n", value); 265 return sprintf(buf, "%d\n", value);
274} 266}
275 267
276static ssize_t pm_qos_latency_tolerance_store(struct device *dev, 268static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
277 struct device_attribute *attr, 269 struct device_attribute *attr,
278 const char *buf, size_t n) 270 const char *buf, size_t n)
279{ 271{
280 s32 value; 272 s32 value;
281 int ret; 273 int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
285 if (value < 0) 277 if (value < 0)
286 return -EINVAL; 278 return -EINVAL;
287 } else { 279 } else {
288 if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) 280 if (sysfs_streq(buf, "auto"))
289 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 281 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
290 else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) 282 else if (sysfs_streq(buf, "any"))
291 value = PM_QOS_LATENCY_ANY; 283 value = PM_QOS_LATENCY_ANY;
292 else 284 else
293 return -EINVAL; 285 return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
296 return ret < 0 ? ret : n; 288 return ret < 0 ? ret : n;
297} 289}
298 290
299static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, 291static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
300 pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
301 292
302static ssize_t pm_qos_no_power_off_show(struct device *dev, 293static ssize_t pm_qos_no_power_off_show(struct device *dev,
303 struct device_attribute *attr, 294 struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
323 return ret < 0 ? ret : n; 314 return ret < 0 ? ret : n;
324} 315}
325 316
326static DEVICE_ATTR(pm_qos_no_power_off, 0644, 317static DEVICE_ATTR_RW(pm_qos_no_power_off);
327 pm_qos_no_power_off_show, pm_qos_no_power_off_store);
328 318
329#ifdef CONFIG_PM_SLEEP 319#ifdef CONFIG_PM_SLEEP
330static const char _enabled[] = "enabled"; 320static const char _enabled[] = "enabled";
331static const char _disabled[] = "disabled"; 321static const char _disabled[] = "disabled";
332 322
333static ssize_t 323static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
334wake_show(struct device * dev, struct device_attribute *attr, char * buf) 324 char *buf)
335{ 325{
336 return sprintf(buf, "%s\n", device_can_wakeup(dev) 326 return sprintf(buf, "%s\n", device_can_wakeup(dev)
337 ? (device_may_wakeup(dev) ? _enabled : _disabled) 327 ? (device_may_wakeup(dev) ? _enabled : _disabled)
338 : ""); 328 : "");
339} 329}
340 330
341static ssize_t 331static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
342wake_store(struct device * dev, struct device_attribute *attr, 332 const char *buf, size_t n)
343 const char * buf, size_t n)
344{ 333{
345 char *cp;
346 int len = n;
347
348 if (!device_can_wakeup(dev)) 334 if (!device_can_wakeup(dev))
349 return -EINVAL; 335 return -EINVAL;
350 336
351 cp = memchr(buf, '\n', n); 337 if (sysfs_streq(buf, _enabled))
352 if (cp)
353 len = cp - buf;
354 if (len == sizeof _enabled - 1
355 && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
356 device_set_wakeup_enable(dev, 1); 338 device_set_wakeup_enable(dev, 1);
357 else if (len == sizeof _disabled - 1 339 else if (sysfs_streq(buf, _disabled))
358 && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
359 device_set_wakeup_enable(dev, 0); 340 device_set_wakeup_enable(dev, 0);
360 else 341 else
361 return -EINVAL; 342 return -EINVAL;
362 return n; 343 return n;
363} 344}
364 345
365static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 346static DEVICE_ATTR_RW(wakeup);
366 347
367static ssize_t wakeup_count_show(struct device *dev, 348static ssize_t wakeup_count_show(struct device *dev,
368 struct device_attribute *attr, char *buf) 349 struct device_attribute *attr, char *buf)
369{ 350{
370 unsigned long count = 0; 351 unsigned long count = 0;
371 bool enabled = false; 352 bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
379 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 360 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
380} 361}
381 362
382static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); 363static DEVICE_ATTR_RO(wakeup_count);
383 364
384static ssize_t wakeup_active_count_show(struct device *dev, 365static ssize_t wakeup_active_count_show(struct device *dev,
385 struct device_attribute *attr, char *buf) 366 struct device_attribute *attr,
367 char *buf)
386{ 368{
387 unsigned long count = 0; 369 unsigned long count = 0;
388 bool enabled = false; 370 bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
396 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 378 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
397} 379}
398 380
399static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); 381static DEVICE_ATTR_RO(wakeup_active_count);
400 382
401static ssize_t wakeup_abort_count_show(struct device *dev, 383static ssize_t wakeup_abort_count_show(struct device *dev,
402 struct device_attribute *attr, 384 struct device_attribute *attr,
403 char *buf) 385 char *buf)
404{ 386{
405 unsigned long count = 0; 387 unsigned long count = 0;
406 bool enabled = false; 388 bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
414 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 396 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
415} 397}
416 398
417static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); 399static DEVICE_ATTR_RO(wakeup_abort_count);
418 400
419static ssize_t wakeup_expire_count_show(struct device *dev, 401static ssize_t wakeup_expire_count_show(struct device *dev,
420 struct device_attribute *attr, 402 struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
432 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 414 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
433} 415}
434 416
435static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); 417static DEVICE_ATTR_RO(wakeup_expire_count);
436 418
437static ssize_t wakeup_active_show(struct device *dev, 419static ssize_t wakeup_active_show(struct device *dev,
438 struct device_attribute *attr, char *buf) 420 struct device_attribute *attr, char *buf)
439{ 421{
440 unsigned int active = 0; 422 unsigned int active = 0;
441 bool enabled = false; 423 bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
449 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); 431 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
450} 432}
451 433
452static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); 434static DEVICE_ATTR_RO(wakeup_active);
453 435
454static ssize_t wakeup_total_time_show(struct device *dev, 436static ssize_t wakeup_total_time_ms_show(struct device *dev,
455 struct device_attribute *attr, char *buf) 437 struct device_attribute *attr,
438 char *buf)
456{ 439{
457 s64 msec = 0; 440 s64 msec = 0;
458 bool enabled = false; 441 bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
466 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 449 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
467} 450}
468 451
469static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); 452static DEVICE_ATTR_RO(wakeup_total_time_ms);
470 453
471static ssize_t wakeup_max_time_show(struct device *dev, 454static ssize_t wakeup_max_time_ms_show(struct device *dev,
472 struct device_attribute *attr, char *buf) 455 struct device_attribute *attr, char *buf)
473{ 456{
474 s64 msec = 0; 457 s64 msec = 0;
475 bool enabled = false; 458 bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
483 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 466 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
484} 467}
485 468
486static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); 469static DEVICE_ATTR_RO(wakeup_max_time_ms);
487 470
488static ssize_t wakeup_last_time_show(struct device *dev, 471static ssize_t wakeup_last_time_ms_show(struct device *dev,
489 struct device_attribute *attr, char *buf) 472 struct device_attribute *attr,
473 char *buf)
490{ 474{
491 s64 msec = 0; 475 s64 msec = 0;
492 bool enabled = false; 476 bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
500 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 484 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
501} 485}
502 486
503static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); 487static DEVICE_ATTR_RO(wakeup_last_time_ms);
504 488
505#ifdef CONFIG_PM_AUTOSLEEP 489#ifdef CONFIG_PM_AUTOSLEEP
506static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, 490static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
507 struct device_attribute *attr, 491 struct device_attribute *attr,
508 char *buf) 492 char *buf)
509{ 493{
510 s64 msec = 0; 494 s64 msec = 0;
511 bool enabled = false; 495 bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
519 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 503 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
520} 504}
521 505
522static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, 506static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
523 wakeup_prevent_sleep_time_show, NULL);
524#endif /* CONFIG_PM_AUTOSLEEP */ 507#endif /* CONFIG_PM_AUTOSLEEP */
525#endif /* CONFIG_PM_SLEEP */ 508#endif /* CONFIG_PM_SLEEP */
526 509
527#ifdef CONFIG_PM_ADVANCED_DEBUG 510#ifdef CONFIG_PM_ADVANCED_DEBUG
528static ssize_t rtpm_usagecount_show(struct device *dev, 511static ssize_t runtime_usage_show(struct device *dev,
529 struct device_attribute *attr, char *buf) 512 struct device_attribute *attr, char *buf)
530{ 513{
531 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); 514 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
532} 515}
516static DEVICE_ATTR_RO(runtime_usage);
533 517
534static ssize_t rtpm_children_show(struct device *dev, 518static ssize_t runtime_active_kids_show(struct device *dev,
535 struct device_attribute *attr, char *buf) 519 struct device_attribute *attr,
520 char *buf)
536{ 521{
537 return sprintf(buf, "%d\n", dev->power.ignore_children ? 522 return sprintf(buf, "%d\n", dev->power.ignore_children ?
538 0 : atomic_read(&dev->power.child_count)); 523 0 : atomic_read(&dev->power.child_count));
539} 524}
525static DEVICE_ATTR_RO(runtime_active_kids);
540 526
541static ssize_t rtpm_enabled_show(struct device *dev, 527static ssize_t runtime_enabled_show(struct device *dev,
542 struct device_attribute *attr, char *buf) 528 struct device_attribute *attr, char *buf)
543{ 529{
544 if ((dev->power.disable_depth) && (dev->power.runtime_auto == false)) 530 if (dev->power.disable_depth && (dev->power.runtime_auto == false))
545 return sprintf(buf, "disabled & forbidden\n"); 531 return sprintf(buf, "disabled & forbidden\n");
546 else if (dev->power.disable_depth) 532 if (dev->power.disable_depth)
547 return sprintf(buf, "disabled\n"); 533 return sprintf(buf, "disabled\n");
548 else if (dev->power.runtime_auto == false) 534 if (dev->power.runtime_auto == false)
549 return sprintf(buf, "forbidden\n"); 535 return sprintf(buf, "forbidden\n");
550 return sprintf(buf, "enabled\n"); 536 return sprintf(buf, "enabled\n");
551} 537}
552 538static DEVICE_ATTR_RO(runtime_enabled);
553static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
554static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
555static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
556 539
557#ifdef CONFIG_PM_SLEEP 540#ifdef CONFIG_PM_SLEEP
558static ssize_t async_show(struct device *dev, struct device_attribute *attr, 541static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
566static ssize_t async_store(struct device *dev, struct device_attribute *attr, 549static ssize_t async_store(struct device *dev, struct device_attribute *attr,
567 const char *buf, size_t n) 550 const char *buf, size_t n)
568{ 551{
569 char *cp; 552 if (sysfs_streq(buf, _enabled))
570 int len = n;
571
572 cp = memchr(buf, '\n', n);
573 if (cp)
574 len = cp - buf;
575 if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
576 device_enable_async_suspend(dev); 553 device_enable_async_suspend(dev);
577 else if (len == sizeof _disabled - 1 && 554 else if (sysfs_streq(buf, _disabled))
578 strncmp(buf, _disabled, len) == 0)
579 device_disable_async_suspend(dev); 555 device_disable_async_suspend(dev);
580 else 556 else
581 return -EINVAL; 557 return -EINVAL;
582 return n; 558 return n;
583} 559}
584 560
585static DEVICE_ATTR(async, 0644, async_show, async_store); 561static DEVICE_ATTR_RW(async);
586 562
587#endif /* CONFIG_PM_SLEEP */ 563#endif /* CONFIG_PM_SLEEP */
588#endif /* CONFIG_PM_ADVANCED_DEBUG */ 564#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..3cf2da22acf2 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
699 pm_generic_complete(dev); 699 pm_generic_complete(dev);
700 700
701 /* Resume device if platform firmware has put it in reset-power-on */ 701 /* Resume device if platform firmware has put it in reset-power-on */
702 if (dev->power.direct_complete && pm_resume_via_firmware()) { 702 if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
703 pci_power_t pre_sleep_state = pci_dev->current_state; 703 pci_power_t pre_sleep_state = pci_dev->current_state;
704 704
705 pci_update_current_state(pci_dev, pci_dev->current_state); 705 pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
783 struct pci_dev *pci_dev = to_pci_dev(dev); 783 struct pci_dev *pci_dev = to_pci_dev(dev);
784 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 784 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
785 785
786 if (dev_pm_smart_suspend_and_suspended(dev)) 786 if (dev_pm_smart_suspend_and_suspended(dev)) {
787 dev->power.may_skip_resume = true;
787 return 0; 788 return 0;
789 }
788 790
789 if (pci_has_legacy_pm_support(pci_dev)) 791 if (pci_has_legacy_pm_support(pci_dev))
790 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 792 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
838Fixup: 840Fixup:
839 pci_fixup_device(pci_fixup_suspend_late, pci_dev); 841 pci_fixup_device(pci_fixup_suspend_late, pci_dev);
840 842
843 /*
844 * If the target system sleep state is suspend-to-idle, it is sufficient
845 * to check whether or not the device's wakeup settings are good for
846 * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
847 * pci_pm_complete() to take care of fixing up the device's state
848 * anyway, if need be.
849 */
850 dev->power.may_skip_resume = device_may_wakeup(dev) ||
851 !device_can_wakeup(dev);
852
841 return 0; 853 return 0;
842} 854}
843 855
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
847 struct device_driver *drv = dev->driver; 859 struct device_driver *drv = dev->driver;
848 int error = 0; 860 int error = 0;
849 861
862 if (dev_pm_may_skip_resume(dev))
863 return 0;
864
850 /* 865 /*
851 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend 866 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
852 * during system suspend, so update their runtime PM status to "active" 867 * during system suspend, so update their runtime PM status to "active"