aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 17:07:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 17:07:06 -0400
commit468f4d1a855f8039dabf441b8bf68cae264033ff (patch)
tree303ac5bc1ac3f86f136a30f9356e84f20dcbf13f /drivers/base
parenteb2689e06b3526c7684b09beecf26070f05ee825 (diff)
parent8714c8d74d313c3ba27bf9c2aaacb1ad71c644f8 (diff)
Merge tag 'pm-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: - Implementation of opportunistic suspend (autosleep) and user space interface for manipulating wakeup sources. - Hibernate updates from Bojan Smojver and Minho Ban. - Updates of the runtime PM core and generic PM domains framework related to PM QoS. - Assorted fixes. * tag 'pm-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (25 commits) epoll: Fix user space breakage related to EPOLLWAKEUP PM / Domains: Make it possible to add devices to inactive domains PM / Hibernate: Use get_gendisk to verify partition if resume_file is integer format PM / Domains: Fix computation of maximum domain off time PM / Domains: Fix link checking when add subdomain PM / Sleep: User space wakeup sources garbage collector Kconfig option PM / Sleep: Make the limit of user space wakeup sources configurable PM / Documentation: suspend-and-cpuhotplug.txt: Fix typo PM / Domains: Cache device stop and domain power off governor results, v3 PM / Domains: Make device removal more straightforward PM / Sleep: Fix a mistake in a conditional in autosleep_store() epoll: Add a flag, EPOLLWAKEUP, to prevent suspend while epoll events are ready PM / QoS: Create device constraints objects on notifier registration PM / Runtime: Remove device fields related to suspend time, v2 PM / Domains: Rework default domain power off governor function, v2 PM / Domains: Rework default device stop governor function, v2 PM / Sleep: Add user space interface for manipulating wakeup sources, v3 PM / Sleep: Add "prevent autosleep time" statistics to wakeup sources PM / Sleep: Implement opportunistic sleep, v2 PM / Sleep: Add wakeup_source_activate and wakeup_source_deactivate tracepoints ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c176
-rw-r--r--drivers/base/power/domain_governor.c166
-rw-r--r--drivers/base/power/main.c10
-rw-r--r--drivers/base/power/qos.c19
-rw-r--r--drivers/base/power/runtime.c103
-rw-r--r--drivers/base/power/sysfs.c54
-rw-r--r--drivers/base/power/wakeup.c174
7 files changed, 449 insertions, 253 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 73ce9fbe9839..83aa694a8efe 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
14#include <linux/pm_qos.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
@@ -38,11 +39,13 @@
38 ktime_t __start = ktime_get(); \ 39 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
42 if (__elapsed > __gpd_data->td.field) { \ 43 if (!__retval && __elapsed > __td->field) { \
43 __gpd_data->td.field = __elapsed; \ 44 __td->field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ 45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \ 46 __elapsed); \
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
46 } \ 49 } \
47 __retval; \ 50 __retval; \
48}) 51})
@@ -211,6 +214,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 214 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) { 215 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns; 216 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true;
214 if (genpd->name) 218 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, " 219 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name, 220 "new value %lld ns\n", genpd->name,
@@ -247,6 +251,53 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
247 251
248#ifdef CONFIG_PM_RUNTIME 252#ifdef CONFIG_PM_RUNTIME
249 253
254static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr)
256{
257 struct generic_pm_domain_data *gpd_data;
258 struct device *dev;
259
260 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
261
262 mutex_lock(&gpd_data->lock);
263 dev = gpd_data->base.dev;
264 if (!dev) {
265 mutex_unlock(&gpd_data->lock);
266 return NOTIFY_DONE;
267 }
268 mutex_unlock(&gpd_data->lock);
269
270 for (;;) {
271 struct generic_pm_domain *genpd;
272 struct pm_domain_data *pdd;
273
274 spin_lock_irq(&dev->power.lock);
275
276 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL;
278 if (pdd) {
279 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev);
281 } else {
282 genpd = ERR_PTR(-ENODATA);
283 }
284
285 spin_unlock_irq(&dev->power.lock);
286
287 if (!IS_ERR(genpd)) {
288 mutex_lock(&genpd->lock);
289 genpd->max_off_time_changed = true;
290 mutex_unlock(&genpd->lock);
291 }
292
293 dev = dev->parent;
294 if (!dev || dev->power.ignore_children)
295 break;
296 }
297
298 return NOTIFY_DONE;
299}
300
250/** 301/**
251 * __pm_genpd_save_device - Save the pre-suspend state of a device. 302 * __pm_genpd_save_device - Save the pre-suspend state of a device.
252 * @pdd: Domain data of the device to save the state of. 303 * @pdd: Domain data of the device to save the state of.
@@ -435,6 +486,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 486 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) { 487 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns; 488 genpd->power_off_latency_ns = elapsed_ns;
489 genpd->max_off_time_changed = true;
438 if (genpd->name) 490 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, " 491 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name, 492 "new value %lld ns\n", genpd->name,
@@ -443,17 +495,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
443 } 495 }
444 496
445 genpd->status = GPD_STATE_POWER_OFF; 497 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
457 498
458 list_for_each_entry(link, &genpd->slave_links, slave_node) { 499 list_for_each_entry(link, &genpd->slave_links, slave_node) {
459 genpd_sd_counter_dec(link->master); 500 genpd_sd_counter_dec(link->master);
@@ -514,9 +555,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
514 if (ret) 555 if (ret)
515 return ret; 556 return ret;
516 557
517 pm_runtime_update_max_time_suspended(dev,
518 dev_gpd_data(dev)->td.start_latency_ns);
519
520 /* 558 /*
521 * If power.irq_safe is set, this routine will be run with interrupts 559 * If power.irq_safe is set, this routine will be run with interrupts
522 * off, so it can't use mutexes. 560 * off, so it can't use mutexes.
@@ -613,6 +651,12 @@ void pm_genpd_poweroff_unused(void)
613 651
614#else 652#else
615 653
654static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655 unsigned long val, void *ptr)
656{
657 return NOTIFY_DONE;
658}
659
616static inline void genpd_power_off_work_fn(struct work_struct *work) {} 660static inline void genpd_power_off_work_fn(struct work_struct *work) {}
617 661
618#define pm_genpd_runtime_suspend NULL 662#define pm_genpd_runtime_suspend NULL
@@ -1209,12 +1253,15 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1209 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1210 return -EINVAL; 1254 return -EINVAL;
1211 1255
1212 genpd_acquire_lock(genpd); 1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1257 if (!gpd_data)
1258 return -ENOMEM;
1213 1259
1214 if (genpd->status == GPD_STATE_POWER_OFF) { 1260 mutex_init(&gpd_data->lock);
1215 ret = -EINVAL; 1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1216 goto out; 1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1217 } 1263
1264 genpd_acquire_lock(genpd);
1218 1265
1219 if (genpd->prepared_count > 0) { 1266 if (genpd->prepared_count > 0) {
1220 ret = -EAGAIN; 1267 ret = -EAGAIN;
@@ -1227,26 +1274,35 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1227 goto out; 1274 goto out;
1228 } 1275 }
1229 1276
1230 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1231 if (!gpd_data) {
1232 ret = -ENOMEM;
1233 goto out;
1234 }
1235
1236 genpd->device_count++; 1277 genpd->device_count++;
1278 genpd->max_off_time_changed = true;
1237 1279
1238 dev->pm_domain = &genpd->domain;
1239 dev_pm_get_subsys_data(dev); 1280 dev_pm_get_subsys_data(dev);
1281
1282 mutex_lock(&gpd_data->lock);
1283 spin_lock_irq(&dev->power.lock);
1284 dev->pm_domain = &genpd->domain;
1240 dev->power.subsys_data->domain_data = &gpd_data->base; 1285 dev->power.subsys_data->domain_data = &gpd_data->base;
1241 gpd_data->base.dev = dev; 1286 gpd_data->base.dev = dev;
1242 gpd_data->need_restore = false;
1243 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1287 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1288 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1244 if (td) 1289 if (td)
1245 gpd_data->td = *td; 1290 gpd_data->td = *td;
1246 1291
1292 gpd_data->td.constraint_changed = true;
1293 gpd_data->td.effective_constraint_ns = -1;
1294 spin_unlock_irq(&dev->power.lock);
1295 mutex_unlock(&gpd_data->lock);
1296
1297 genpd_release_lock(genpd);
1298
1299 return 0;
1300
1247 out: 1301 out:
1248 genpd_release_lock(genpd); 1302 genpd_release_lock(genpd);
1249 1303
1304 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1305 kfree(gpd_data);
1250 return ret; 1306 return ret;
1251} 1307}
1252 1308
@@ -1290,12 +1346,15 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1290int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1346int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1291 struct device *dev) 1347 struct device *dev)
1292{ 1348{
1349 struct generic_pm_domain_data *gpd_data;
1293 struct pm_domain_data *pdd; 1350 struct pm_domain_data *pdd;
1294 int ret = -EINVAL; 1351 int ret = 0;
1295 1352
1296 dev_dbg(dev, "%s()\n", __func__); 1353 dev_dbg(dev, "%s()\n", __func__);
1297 1354
1298 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1355 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1356 || IS_ERR_OR_NULL(dev->pm_domain)
1357 || pd_to_genpd(dev->pm_domain) != genpd)
1299 return -EINVAL; 1358 return -EINVAL;
1300 1359
1301 genpd_acquire_lock(genpd); 1360 genpd_acquire_lock(genpd);
@@ -1305,21 +1364,27 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1305 goto out; 1364 goto out;
1306 } 1365 }
1307 1366
1308 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 1367 genpd->device_count--;
1309 if (pdd->dev != dev) 1368 genpd->max_off_time_changed = true;
1310 continue;
1311 1369
1312 list_del_init(&pdd->list_node); 1370 spin_lock_irq(&dev->power.lock);
1313 pdd->dev = NULL; 1371 dev->pm_domain = NULL;
1314 dev_pm_put_subsys_data(dev); 1372 pdd = dev->power.subsys_data->domain_data;
1315 dev->pm_domain = NULL; 1373 list_del_init(&pdd->list_node);
1316 kfree(to_gpd_data(pdd)); 1374 dev->power.subsys_data->domain_data = NULL;
1375 spin_unlock_irq(&dev->power.lock);
1317 1376
1318 genpd->device_count--; 1377 gpd_data = to_gpd_data(pdd);
1378 mutex_lock(&gpd_data->lock);
1379 pdd->dev = NULL;
1380 mutex_unlock(&gpd_data->lock);
1319 1381
1320 ret = 0; 1382 genpd_release_lock(genpd);
1321 break; 1383
1322 } 1384 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1385 kfree(gpd_data);
1386 dev_pm_put_subsys_data(dev);
1387 return 0;
1323 1388
1324 out: 1389 out:
1325 genpd_release_lock(genpd); 1390 genpd_release_lock(genpd);
@@ -1348,6 +1413,26 @@ void pm_genpd_dev_always_on(struct device *dev, bool val)
1348EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); 1413EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1349 1414
1350/** 1415/**
1416 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1417 * @dev: Device to set/unset the flag for.
1418 * @val: The new value of the device's "need restore" flag.
1419 */
1420void pm_genpd_dev_need_restore(struct device *dev, bool val)
1421{
1422 struct pm_subsys_data *psd;
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&dev->power.lock, flags);
1426
1427 psd = dev_to_psd(dev);
1428 if (psd && psd->domain_data)
1429 to_gpd_data(psd->domain_data)->need_restore = val;
1430
1431 spin_unlock_irqrestore(&dev->power.lock, flags);
1432}
1433EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1434
1435/**
1351 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1436 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1352 * @genpd: Master PM domain to add the subdomain to. 1437 * @genpd: Master PM domain to add the subdomain to.
1353 * @subdomain: Subdomain to be added. 1438 * @subdomain: Subdomain to be added.
@@ -1378,7 +1463,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1378 goto out; 1463 goto out;
1379 } 1464 }
1380 1465
1381 list_for_each_entry(link, &genpd->slave_links, slave_node) { 1466 list_for_each_entry(link, &genpd->master_links, master_node) {
1382 if (link->slave == subdomain && link->master == genpd) { 1467 if (link->slave == subdomain && link->master == genpd) {
1383 ret = -EINVAL; 1468 ret = -EINVAL;
1384 goto out; 1469 goto out;
@@ -1690,6 +1775,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1690 genpd->resume_count = 0; 1775 genpd->resume_count = 0;
1691 genpd->device_count = 0; 1776 genpd->device_count = 0;
1692 genpd->max_off_time_ns = -1; 1777 genpd->max_off_time_ns = -1;
1778 genpd->max_off_time_changed = true;
1693 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1779 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1694 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1780 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1695 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1781 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 66a265bf5867..28dee3053f1f 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,6 +14,31 @@
14 14
15#ifdef CONFIG_PM_RUNTIME 15#ifdef CONFIG_PM_RUNTIME
16 16
17static int dev_update_qos_constraint(struct device *dev, void *data)
18{
19 s64 *constraint_ns_p = data;
20 s32 constraint_ns = -1;
21
22 if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
23 constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
24
25 if (constraint_ns < 0) {
26 constraint_ns = dev_pm_qos_read_value(dev);
27 constraint_ns *= NSEC_PER_USEC;
28 }
29 if (constraint_ns == 0)
30 return 0;
31
32 /*
33 * constraint_ns cannot be negative here, because the device has been
34 * suspended.
35 */
36 if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
37 *constraint_ns_p = constraint_ns;
38
39 return 0;
40}
41
17/** 42/**
18 * default_stop_ok - Default PM domain governor routine for stopping devices. 43 * default_stop_ok - Default PM domain governor routine for stopping devices.
19 * @dev: Device to check. 44 * @dev: Device to check.
@@ -21,14 +46,52 @@
21bool default_stop_ok(struct device *dev) 46bool default_stop_ok(struct device *dev)
22{ 47{
23 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 48 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
49 unsigned long flags;
50 s64 constraint_ns;
24 51
25 dev_dbg(dev, "%s()\n", __func__); 52 dev_dbg(dev, "%s()\n", __func__);
26 53
27 if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) 54 spin_lock_irqsave(&dev->power.lock, flags);
28 return true; 55
56 if (!td->constraint_changed) {
57 bool ret = td->cached_stop_ok;
29 58
30 return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns 59 spin_unlock_irqrestore(&dev->power.lock, flags);
31 && td->break_even_ns < dev->power.max_time_suspended_ns; 60 return ret;
61 }
62 td->constraint_changed = false;
63 td->cached_stop_ok = false;
64 td->effective_constraint_ns = -1;
65 constraint_ns = __dev_pm_qos_read_value(dev);
66
67 spin_unlock_irqrestore(&dev->power.lock, flags);
68
69 if (constraint_ns < 0)
70 return false;
71
72 constraint_ns *= NSEC_PER_USEC;
73 /*
74 * We can walk the children without any additional locking, because
75 * they all have been suspended at this point and their
76 * effective_constraint_ns fields won't be modified in parallel with us.
77 */
78 if (!dev->power.ignore_children)
79 device_for_each_child(dev, &constraint_ns,
80 dev_update_qos_constraint);
81
82 if (constraint_ns > 0) {
83 constraint_ns -= td->start_latency_ns;
84 if (constraint_ns == 0)
85 return false;
86 }
87 td->effective_constraint_ns = constraint_ns;
88 td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
89 constraint_ns == 0;
90 /*
91 * The children have been suspended already, so we don't need to take
92 * their stop latencies into account here.
93 */
94 return td->cached_stop_ok;
32} 95}
33 96
34/** 97/**
@@ -42,9 +105,27 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
42 struct generic_pm_domain *genpd = pd_to_genpd(pd); 105 struct generic_pm_domain *genpd = pd_to_genpd(pd);
43 struct gpd_link *link; 106 struct gpd_link *link;
44 struct pm_domain_data *pdd; 107 struct pm_domain_data *pdd;
45 s64 min_dev_off_time_ns; 108 s64 min_off_time_ns;
46 s64 off_on_time_ns; 109 s64 off_on_time_ns;
47 ktime_t time_now = ktime_get(); 110
111 if (genpd->max_off_time_changed) {
112 struct gpd_link *link;
113
114 /*
115 * We have to invalidate the cached results for the masters, so
116 * use the observation that default_power_down_ok() is not
117 * going to be called for any master until this instance
118 * returns.
119 */
120 list_for_each_entry(link, &genpd->slave_links, slave_node)
121 link->master->max_off_time_changed = true;
122
123 genpd->max_off_time_changed = false;
124 genpd->cached_power_down_ok = false;
125 genpd->max_off_time_ns = -1;
126 } else {
127 return genpd->cached_power_down_ok;
128 }
48 129
49 off_on_time_ns = genpd->power_off_latency_ns + 130 off_on_time_ns = genpd->power_off_latency_ns +
50 genpd->power_on_latency_ns; 131 genpd->power_on_latency_ns;
@@ -61,6 +142,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
61 to_gpd_data(pdd)->td.save_state_latency_ns; 142 to_gpd_data(pdd)->td.save_state_latency_ns;
62 } 143 }
63 144
145 min_off_time_ns = -1;
64 /* 146 /*
65 * Check if subdomains can be off for enough time. 147 * Check if subdomains can be off for enough time.
66 * 148 *
@@ -73,8 +155,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
73 if (sd_max_off_ns < 0) 155 if (sd_max_off_ns < 0)
74 continue; 156 continue;
75 157
76 sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
77 sd->power_off_time));
78 /* 158 /*
79 * Check if the subdomain is allowed to be off long enough for 159 * Check if the subdomain is allowed to be off long enough for
80 * the current domain to turn off and on (that's how much time 160 * the current domain to turn off and on (that's how much time
@@ -82,60 +162,64 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
82 */ 162 */
83 if (sd_max_off_ns <= off_on_time_ns) 163 if (sd_max_off_ns <= off_on_time_ns)
84 return false; 164 return false;
165
166 if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
167 min_off_time_ns = sd_max_off_ns;
85 } 168 }
86 169
87 /* 170 /*
88 * Check if the devices in the domain can be off enough time. 171 * Check if the devices in the domain can be off enough time.
89 */ 172 */
90 min_dev_off_time_ns = -1;
91 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 173 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
92 struct gpd_timing_data *td; 174 struct gpd_timing_data *td;
93 struct device *dev = pdd->dev; 175 s64 constraint_ns;
94 s64 dev_off_time_ns;
95 176
96 if (!dev->driver || dev->power.max_time_suspended_ns < 0) 177 if (!pdd->dev->driver)
97 continue; 178 continue;
98 179
180 /*
181 * Check if the device is allowed to be off long enough for the
182 * domain to turn off and on (that's how much time it will
183 * have to wait worst case).
184 */
99 td = &to_gpd_data(pdd)->td; 185 td = &to_gpd_data(pdd)->td;
100 dev_off_time_ns = dev->power.max_time_suspended_ns - 186 constraint_ns = td->effective_constraint_ns;
101 (td->start_latency_ns + td->restore_state_latency_ns + 187 /* default_stop_ok() need not be called before us. */
102 ktime_to_ns(ktime_sub(time_now, 188 if (constraint_ns < 0) {
103 dev->power.suspend_time))); 189 constraint_ns = dev_pm_qos_read_value(pdd->dev);
104 if (dev_off_time_ns <= off_on_time_ns) 190 constraint_ns *= NSEC_PER_USEC;
105 return false; 191 }
106 192 if (constraint_ns == 0)
107 if (min_dev_off_time_ns > dev_off_time_ns 193 continue;
108 || min_dev_off_time_ns < 0)
109 min_dev_off_time_ns = dev_off_time_ns;
110 }
111 194
112 if (min_dev_off_time_ns < 0) {
113 /* 195 /*
114 * There are no latency constraints, so the domain can spend 196 * constraint_ns cannot be negative here, because the device has
115 * arbitrary time in the "off" state. 197 * been suspended.
116 */ 198 */
117 genpd->max_off_time_ns = -1; 199 constraint_ns -= td->restore_state_latency_ns;
118 return true; 200 if (constraint_ns <= off_on_time_ns)
201 return false;
202
203 if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
204 min_off_time_ns = constraint_ns;
119 } 205 }
120 206
207 genpd->cached_power_down_ok = true;
208
121 /* 209 /*
122 * The difference between the computed minimum delta and the time needed 210 * If the computed minimum device off time is negative, there are no
123 * to turn the domain on is the maximum theoretical time this domain can 211 * latency constraints, so the domain can spend arbitrary time in the
124 * spend in the "off" state. 212 * "off" state.
125 */ 213 */
126 min_dev_off_time_ns -= genpd->power_on_latency_ns; 214 if (min_off_time_ns < 0)
215 return true;
127 216
128 /* 217 /*
129 * If the difference between the computed minimum delta and the time 218 * The difference between the computed minimum subdomain or device off
130 * needed to turn the domain off and back on on is smaller than the 219 * time and the time needed to turn the domain on is the maximum
131 * domain's power break even time, removing power from the domain is not 220 * theoretical time this domain can spend in the "off" state.
132 * worth it.
133 */ 221 */
134 if (genpd->break_even_ns > 222 genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
135 min_dev_off_time_ns - genpd->power_off_latency_ns)
136 return false;
137
138 genpd->max_off_time_ns = min_dev_off_time_ns;
139 return true; 223 return true;
140} 224}
141 225
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b462c0e341cb..e0fb5b0435a3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -889,6 +889,11 @@ static int dpm_suspend_noirq(pm_message_t state)
889 if (!list_empty(&dev->power.entry)) 889 if (!list_empty(&dev->power.entry))
890 list_move(&dev->power.entry, &dpm_noirq_list); 890 list_move(&dev->power.entry, &dpm_noirq_list);
891 put_device(dev); 891 put_device(dev);
892
893 if (pm_wakeup_pending()) {
894 error = -EBUSY;
895 break;
896 }
892 } 897 }
893 mutex_unlock(&dpm_list_mtx); 898 mutex_unlock(&dpm_list_mtx);
894 if (error) 899 if (error)
@@ -962,6 +967,11 @@ static int dpm_suspend_late(pm_message_t state)
962 if (!list_empty(&dev->power.entry)) 967 if (!list_empty(&dev->power.entry))
963 list_move(&dev->power.entry, &dpm_late_early_list); 968 list_move(&dev->power.entry, &dpm_late_early_list);
964 put_device(dev); 969 put_device(dev);
970
971 if (pm_wakeup_pending()) {
972 error = -EBUSY;
973 break;
974 }
965 } 975 }
966 mutex_unlock(&dpm_list_mtx); 976 mutex_unlock(&dpm_list_mtx);
967 if (error) 977 if (error)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 71855570922d..fd849a2c4fa8 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -352,21 +352,26 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
352 * 352 *
353 * Will register the notifier into a notification chain that gets called 353 * Will register the notifier into a notification chain that gets called
354 * upon changes to the target value for the device. 354 * upon changes to the target value for the device.
355 *
356 * If the device's constraints object doesn't exist when this routine is called,
357 * it will be created (or error code will be returned if that fails).
355 */ 358 */
356int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) 359int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
357{ 360{
358 int retval = 0; 361 int ret = 0;
359 362
360 mutex_lock(&dev_pm_qos_mtx); 363 mutex_lock(&dev_pm_qos_mtx);
361 364
362 /* Silently return if the constraints object is not present. */ 365 if (!dev->power.constraints)
363 if (dev->power.constraints) 366 ret = dev->power.power_state.event != PM_EVENT_INVALID ?
364 retval = blocking_notifier_chain_register( 367 dev_pm_qos_constraints_allocate(dev) : -ENODEV;
365 dev->power.constraints->notifiers, 368
366 notifier); 369 if (!ret)
370 ret = blocking_notifier_chain_register(
371 dev->power.constraints->notifiers, notifier);
367 372
368 mutex_unlock(&dev_pm_qos_mtx); 373 mutex_unlock(&dev_pm_qos_mtx);
369 return retval; 374 return ret;
370} 375}
371EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); 376EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
372 377
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bd0f3949bcf9..59894873a3b3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -282,47 +282,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
282 return retval != -EACCES ? retval : -EIO; 282 return retval != -EACCES ? retval : -EIO;
283} 283}
284 284
285struct rpm_qos_data {
286 ktime_t time_now;
287 s64 constraint_ns;
288};
289
290/**
291 * rpm_update_qos_constraint - Update a given PM QoS constraint data.
292 * @dev: Device whose timing data to use.
293 * @data: PM QoS constraint data to update.
294 *
295 * Use the suspend timing data of @dev to update PM QoS constraint data pointed
296 * to by @data.
297 */
298static int rpm_update_qos_constraint(struct device *dev, void *data)
299{
300 struct rpm_qos_data *qos = data;
301 unsigned long flags;
302 s64 delta_ns;
303 int ret = 0;
304
305 spin_lock_irqsave(&dev->power.lock, flags);
306
307 if (dev->power.max_time_suspended_ns < 0)
308 goto out;
309
310 delta_ns = dev->power.max_time_suspended_ns -
311 ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
312 if (delta_ns <= 0) {
313 ret = -EBUSY;
314 goto out;
315 }
316
317 if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
318 qos->constraint_ns = delta_ns;
319
320 out:
321 spin_unlock_irqrestore(&dev->power.lock, flags);
322
323 return ret;
324}
325
326/** 285/**
327 * rpm_suspend - Carry out runtime suspend of given device. 286 * rpm_suspend - Carry out runtime suspend of given device.
328 * @dev: Device to suspend. 287 * @dev: Device to suspend.
@@ -349,7 +308,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
349{ 308{
350 int (*callback)(struct device *); 309 int (*callback)(struct device *);
351 struct device *parent = NULL; 310 struct device *parent = NULL;
352 struct rpm_qos_data qos;
353 int retval; 311 int retval;
354 312
355 trace_rpm_suspend(dev, rpmflags); 313 trace_rpm_suspend(dev, rpmflags);
@@ -445,38 +403,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
445 goto out; 403 goto out;
446 } 404 }
447 405
448 qos.constraint_ns = __dev_pm_qos_read_value(dev); 406 if (__dev_pm_qos_read_value(dev) < 0) {
449 if (qos.constraint_ns < 0) { 407 /* Negative PM QoS constraint means "never suspend". */
450 /* Negative constraint means "never suspend". */
451 retval = -EPERM; 408 retval = -EPERM;
452 goto out; 409 goto out;
453 } 410 }
454 qos.constraint_ns *= NSEC_PER_USEC;
455 qos.time_now = ktime_get();
456 411
457 __update_runtime_status(dev, RPM_SUSPENDING); 412 __update_runtime_status(dev, RPM_SUSPENDING);
458 413
459 if (!dev->power.ignore_children) {
460 if (dev->power.irq_safe)
461 spin_unlock(&dev->power.lock);
462 else
463 spin_unlock_irq(&dev->power.lock);
464
465 retval = device_for_each_child(dev, &qos,
466 rpm_update_qos_constraint);
467
468 if (dev->power.irq_safe)
469 spin_lock(&dev->power.lock);
470 else
471 spin_lock_irq(&dev->power.lock);
472
473 if (retval)
474 goto fail;
475 }
476
477 dev->power.suspend_time = qos.time_now;
478 dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
479
480 if (dev->pm_domain) 414 if (dev->pm_domain)
481 callback = dev->pm_domain->ops.runtime_suspend; 415 callback = dev->pm_domain->ops.runtime_suspend;
482 else if (dev->type && dev->type->pm) 416 else if (dev->type && dev->type->pm)
@@ -529,8 +463,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
529 463
530 fail: 464 fail:
531 __update_runtime_status(dev, RPM_ACTIVE); 465 __update_runtime_status(dev, RPM_ACTIVE);
532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false; 466 dev->power.deferred_resume = false;
535 wake_up_all(&dev->power.wait_queue); 467 wake_up_all(&dev->power.wait_queue);
536 468
@@ -704,9 +636,6 @@ static int rpm_resume(struct device *dev, int rpmflags)
704 if (dev->power.no_callbacks) 636 if (dev->power.no_callbacks)
705 goto no_callback; /* Assume success. */ 637 goto no_callback; /* Assume success. */
706 638
707 dev->power.suspend_time = ktime_set(0, 0);
708 dev->power.max_time_suspended_ns = -1;
709
710 __update_runtime_status(dev, RPM_RESUMING); 639 __update_runtime_status(dev, RPM_RESUMING);
711 640
712 if (dev->pm_domain) 641 if (dev->pm_domain)
@@ -1369,9 +1298,6 @@ void pm_runtime_init(struct device *dev)
1369 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1298 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1370 (unsigned long)dev); 1299 (unsigned long)dev);
1371 1300
1372 dev->power.suspend_time = ktime_set(0, 0);
1373 dev->power.max_time_suspended_ns = -1;
1374
1375 init_waitqueue_head(&dev->power.wait_queue); 1301 init_waitqueue_head(&dev->power.wait_queue);
1376} 1302}
1377 1303
@@ -1389,28 +1315,3 @@ void pm_runtime_remove(struct device *dev)
1389 if (dev->power.irq_safe && dev->parent) 1315 if (dev->power.irq_safe && dev->parent)
1390 pm_runtime_put_sync(dev->parent); 1316 pm_runtime_put_sync(dev->parent);
1391} 1317}
1392
1393/**
1394 * pm_runtime_update_max_time_suspended - Update device's suspend time data.
1395 * @dev: Device to handle.
1396 * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
1397 *
1398 * Update the device's power.max_time_suspended_ns field by subtracting
1399 * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
1400 * never negative.
1401 */
1402void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
1403{
1404 unsigned long flags;
1405
1406 spin_lock_irqsave(&dev->power.lock, flags);
1407
1408 if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
1409 if (dev->power.max_time_suspended_ns > delta_ns)
1410 dev->power.max_time_suspended_ns -= delta_ns;
1411 else
1412 dev->power.max_time_suspended_ns = 0;
1413 }
1414
1415 spin_unlock_irqrestore(&dev->power.lock, flags);
1416}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95c12f6cb5b9..48be2ad4dd2c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -314,22 +314,41 @@ static ssize_t wakeup_active_count_show(struct device *dev,
314 314
315static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); 315static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
316 316
317static ssize_t wakeup_hit_count_show(struct device *dev, 317static ssize_t wakeup_abort_count_show(struct device *dev,
318 struct device_attribute *attr, char *buf) 318 struct device_attribute *attr,
319 char *buf)
320{
321 unsigned long count = 0;
322 bool enabled = false;
323
324 spin_lock_irq(&dev->power.lock);
325 if (dev->power.wakeup) {
326 count = dev->power.wakeup->wakeup_count;
327 enabled = true;
328 }
329 spin_unlock_irq(&dev->power.lock);
330 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
331}
332
333static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
334
335static ssize_t wakeup_expire_count_show(struct device *dev,
336 struct device_attribute *attr,
337 char *buf)
319{ 338{
320 unsigned long count = 0; 339 unsigned long count = 0;
321 bool enabled = false; 340 bool enabled = false;
322 341
323 spin_lock_irq(&dev->power.lock); 342 spin_lock_irq(&dev->power.lock);
324 if (dev->power.wakeup) { 343 if (dev->power.wakeup) {
325 count = dev->power.wakeup->hit_count; 344 count = dev->power.wakeup->expire_count;
326 enabled = true; 345 enabled = true;
327 } 346 }
328 spin_unlock_irq(&dev->power.lock); 347 spin_unlock_irq(&dev->power.lock);
329 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 348 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
330} 349}
331 350
332static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); 351static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
333 352
334static ssize_t wakeup_active_show(struct device *dev, 353static ssize_t wakeup_active_show(struct device *dev,
335 struct device_attribute *attr, char *buf) 354 struct device_attribute *attr, char *buf)
@@ -398,6 +417,27 @@ static ssize_t wakeup_last_time_show(struct device *dev,
398} 417}
399 418
400static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); 419static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
420
421#ifdef CONFIG_PM_AUTOSLEEP
422static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
423 struct device_attribute *attr,
424 char *buf)
425{
426 s64 msec = 0;
427 bool enabled = false;
428
429 spin_lock_irq(&dev->power.lock);
430 if (dev->power.wakeup) {
431 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
432 enabled = true;
433 }
434 spin_unlock_irq(&dev->power.lock);
435 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
436}
437
438static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
439 wakeup_prevent_sleep_time_show, NULL);
440#endif /* CONFIG_PM_AUTOSLEEP */
401#endif /* CONFIG_PM_SLEEP */ 441#endif /* CONFIG_PM_SLEEP */
402 442
403#ifdef CONFIG_PM_ADVANCED_DEBUG 443#ifdef CONFIG_PM_ADVANCED_DEBUG
@@ -486,11 +526,15 @@ static struct attribute *wakeup_attrs[] = {
486 &dev_attr_wakeup.attr, 526 &dev_attr_wakeup.attr,
487 &dev_attr_wakeup_count.attr, 527 &dev_attr_wakeup_count.attr,
488 &dev_attr_wakeup_active_count.attr, 528 &dev_attr_wakeup_active_count.attr,
489 &dev_attr_wakeup_hit_count.attr, 529 &dev_attr_wakeup_abort_count.attr,
530 &dev_attr_wakeup_expire_count.attr,
490 &dev_attr_wakeup_active.attr, 531 &dev_attr_wakeup_active.attr,
491 &dev_attr_wakeup_total_time_ms.attr, 532 &dev_attr_wakeup_total_time_ms.attr,
492 &dev_attr_wakeup_max_time_ms.attr, 533 &dev_attr_wakeup_max_time_ms.attr,
493 &dev_attr_wakeup_last_time_ms.attr, 534 &dev_attr_wakeup_last_time_ms.attr,
535#ifdef CONFIG_PM_AUTOSLEEP
536 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
537#endif
494#endif 538#endif
495 NULL, 539 NULL,
496}; 540};
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 2a3e581b8dcd..cbb463b3a750 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -14,16 +14,15 @@
14#include <linux/suspend.h> 14#include <linux/suspend.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <trace/events/power.h>
17 18
18#include "power.h" 19#include "power.h"
19 20
20#define TIMEOUT 100
21
22/* 21/*
23 * If set, the suspend/hibernate code will abort transitions to a sleep state 22 * If set, the suspend/hibernate code will abort transitions to a sleep state
24 * if wakeup events are registered during or immediately before the transition. 23 * if wakeup events are registered during or immediately before the transition.
25 */ 24 */
26bool events_check_enabled; 25bool events_check_enabled __read_mostly;
27 26
28/* 27/*
29 * Combined counters of registered wakeup events and wakeup events in progress. 28 * Combined counters of registered wakeup events and wakeup events in progress.
@@ -52,6 +51,8 @@ static void pm_wakeup_timer_fn(unsigned long data);
52 51
53static LIST_HEAD(wakeup_sources); 52static LIST_HEAD(wakeup_sources);
54 53
54static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
55
55/** 56/**
56 * wakeup_source_prepare - Prepare a new wakeup source for initialization. 57 * wakeup_source_prepare - Prepare a new wakeup source for initialization.
57 * @ws: Wakeup source to prepare. 58 * @ws: Wakeup source to prepare.
@@ -132,6 +133,7 @@ void wakeup_source_add(struct wakeup_source *ws)
132 spin_lock_init(&ws->lock); 133 spin_lock_init(&ws->lock);
133 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 134 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
134 ws->active = false; 135 ws->active = false;
136 ws->last_time = ktime_get();
135 137
136 spin_lock_irq(&events_lock); 138 spin_lock_irq(&events_lock);
137 list_add_rcu(&ws->entry, &wakeup_sources); 139 list_add_rcu(&ws->entry, &wakeup_sources);
@@ -374,12 +376,33 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
374 */ 376 */
375static void wakeup_source_activate(struct wakeup_source *ws) 377static void wakeup_source_activate(struct wakeup_source *ws)
376{ 378{
379 unsigned int cec;
380
377 ws->active = true; 381 ws->active = true;
378 ws->active_count++; 382 ws->active_count++;
379 ws->last_time = ktime_get(); 383 ws->last_time = ktime_get();
384 if (ws->autosleep_enabled)
385 ws->start_prevent_time = ws->last_time;
380 386
381 /* Increment the counter of events in progress. */ 387 /* Increment the counter of events in progress. */
382 atomic_inc(&combined_event_count); 388 cec = atomic_inc_return(&combined_event_count);
389
390 trace_wakeup_source_activate(ws->name, cec);
391}
392
393/**
394 * wakeup_source_report_event - Report wakeup event using the given source.
395 * @ws: Wakeup source to report the event for.
396 */
397static void wakeup_source_report_event(struct wakeup_source *ws)
398{
399 ws->event_count++;
400 /* This is racy, but the counter is approximate anyway. */
401 if (events_check_enabled)
402 ws->wakeup_count++;
403
404 if (!ws->active)
405 wakeup_source_activate(ws);
383} 406}
384 407
385/** 408/**
@@ -397,10 +420,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
397 420
398 spin_lock_irqsave(&ws->lock, flags); 421 spin_lock_irqsave(&ws->lock, flags);
399 422
400 ws->event_count++; 423 wakeup_source_report_event(ws);
401 if (!ws->active)
402 wakeup_source_activate(ws);
403
404 del_timer(&ws->timer); 424 del_timer(&ws->timer);
405 ws->timer_expires = 0; 425 ws->timer_expires = 0;
406 426
@@ -432,6 +452,17 @@ void pm_stay_awake(struct device *dev)
432} 452}
433EXPORT_SYMBOL_GPL(pm_stay_awake); 453EXPORT_SYMBOL_GPL(pm_stay_awake);
434 454
455#ifdef CONFIG_PM_AUTOSLEEP
456static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
457{
458 ktime_t delta = ktime_sub(now, ws->start_prevent_time);
459 ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
460}
461#else
462static inline void update_prevent_sleep_time(struct wakeup_source *ws,
463 ktime_t now) {}
464#endif
465
435/** 466/**
436 * wakup_source_deactivate - Mark given wakeup source as inactive. 467 * wakup_source_deactivate - Mark given wakeup source as inactive.
437 * @ws: Wakeup source to handle. 468 * @ws: Wakeup source to handle.
@@ -442,6 +473,7 @@ EXPORT_SYMBOL_GPL(pm_stay_awake);
442 */ 473 */
443static void wakeup_source_deactivate(struct wakeup_source *ws) 474static void wakeup_source_deactivate(struct wakeup_source *ws)
444{ 475{
476 unsigned int cnt, inpr, cec;
445 ktime_t duration; 477 ktime_t duration;
446 ktime_t now; 478 ktime_t now;
447 479
@@ -468,14 +500,23 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
468 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) 500 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
469 ws->max_time = duration; 501 ws->max_time = duration;
470 502
503 ws->last_time = now;
471 del_timer(&ws->timer); 504 del_timer(&ws->timer);
472 ws->timer_expires = 0; 505 ws->timer_expires = 0;
473 506
507 if (ws->autosleep_enabled)
508 update_prevent_sleep_time(ws, now);
509
474 /* 510 /*
475 * Increment the counter of registered wakeup events and decrement the 511 * Increment the counter of registered wakeup events and decrement the
476 * couter of wakeup events in progress simultaneously. 512 * couter of wakeup events in progress simultaneously.
477 */ 513 */
478 atomic_add(MAX_IN_PROGRESS, &combined_event_count); 514 cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
515 trace_wakeup_source_deactivate(ws->name, cec);
516
517 split_counters(&cnt, &inpr);
518 if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
519 wake_up(&wakeup_count_wait_queue);
479} 520}
480 521
481/** 522/**
@@ -536,8 +577,10 @@ static void pm_wakeup_timer_fn(unsigned long data)
536 spin_lock_irqsave(&ws->lock, flags); 577 spin_lock_irqsave(&ws->lock, flags);
537 578
538 if (ws->active && ws->timer_expires 579 if (ws->active && ws->timer_expires
539 && time_after_eq(jiffies, ws->timer_expires)) 580 && time_after_eq(jiffies, ws->timer_expires)) {
540 wakeup_source_deactivate(ws); 581 wakeup_source_deactivate(ws);
582 ws->expire_count++;
583 }
541 584
542 spin_unlock_irqrestore(&ws->lock, flags); 585 spin_unlock_irqrestore(&ws->lock, flags);
543} 586}
@@ -564,9 +607,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
564 607
565 spin_lock_irqsave(&ws->lock, flags); 608 spin_lock_irqsave(&ws->lock, flags);
566 609
567 ws->event_count++; 610 wakeup_source_report_event(ws);
568 if (!ws->active)
569 wakeup_source_activate(ws);
570 611
571 if (!msec) { 612 if (!msec) {
572 wakeup_source_deactivate(ws); 613 wakeup_source_deactivate(ws);
@@ -609,24 +650,6 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
609EXPORT_SYMBOL_GPL(pm_wakeup_event); 650EXPORT_SYMBOL_GPL(pm_wakeup_event);
610 651
611/** 652/**
612 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
613 */
614static void pm_wakeup_update_hit_counts(void)
615{
616 unsigned long flags;
617 struct wakeup_source *ws;
618
619 rcu_read_lock();
620 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
621 spin_lock_irqsave(&ws->lock, flags);
622 if (ws->active)
623 ws->hit_count++;
624 spin_unlock_irqrestore(&ws->lock, flags);
625 }
626 rcu_read_unlock();
627}
628
629/**
630 * pm_wakeup_pending - Check if power transition in progress should be aborted. 653 * pm_wakeup_pending - Check if power transition in progress should be aborted.
631 * 654 *
632 * Compare the current number of registered wakeup events with its preserved 655 * Compare the current number of registered wakeup events with its preserved
@@ -648,32 +671,38 @@ bool pm_wakeup_pending(void)
648 events_check_enabled = !ret; 671 events_check_enabled = !ret;
649 } 672 }
650 spin_unlock_irqrestore(&events_lock, flags); 673 spin_unlock_irqrestore(&events_lock, flags);
651 if (ret)
652 pm_wakeup_update_hit_counts();
653 return ret; 674 return ret;
654} 675}
655 676
656/** 677/**
657 * pm_get_wakeup_count - Read the number of registered wakeup events. 678 * pm_get_wakeup_count - Read the number of registered wakeup events.
658 * @count: Address to store the value at. 679 * @count: Address to store the value at.
680 * @block: Whether or not to block.
659 * 681 *
660 * Store the number of registered wakeup events at the address in @count. Block 682 * Store the number of registered wakeup events at the address in @count. If
661 * if the current number of wakeup events being processed is nonzero. 683 * @block is set, block until the current number of wakeup events being
684 * processed is zero.
662 * 685 *
663 * Return 'false' if the wait for the number of wakeup events being processed to 686 * Return 'false' if the current number of wakeup events being processed is
664 * drop down to zero has been interrupted by a signal (and the current number 687 * nonzero. Otherwise return 'true'.
665 * of wakeup events being processed is still nonzero). Otherwise return 'true'.
666 */ 688 */
667bool pm_get_wakeup_count(unsigned int *count) 689bool pm_get_wakeup_count(unsigned int *count, bool block)
668{ 690{
669 unsigned int cnt, inpr; 691 unsigned int cnt, inpr;
670 692
671 for (;;) { 693 if (block) {
672 split_counters(&cnt, &inpr); 694 DEFINE_WAIT(wait);
673 if (inpr == 0 || signal_pending(current)) 695
674 break; 696 for (;;) {
675 pm_wakeup_update_hit_counts(); 697 prepare_to_wait(&wakeup_count_wait_queue, &wait,
676 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); 698 TASK_INTERRUPTIBLE);
699 split_counters(&cnt, &inpr);
700 if (inpr == 0 || signal_pending(current))
701 break;
702
703 schedule();
704 }
705 finish_wait(&wakeup_count_wait_queue, &wait);
677 } 706 }
678 707
679 split_counters(&cnt, &inpr); 708 split_counters(&cnt, &inpr);
@@ -703,11 +732,37 @@ bool pm_save_wakeup_count(unsigned int count)
703 events_check_enabled = true; 732 events_check_enabled = true;
704 } 733 }
705 spin_unlock_irq(&events_lock); 734 spin_unlock_irq(&events_lock);
706 if (!events_check_enabled)
707 pm_wakeup_update_hit_counts();
708 return events_check_enabled; 735 return events_check_enabled;
709} 736}
710 737
738#ifdef CONFIG_PM_AUTOSLEEP
739/**
740 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
741 * @enabled: Whether to set or to clear the autosleep_enabled flags.
742 */
743void pm_wakep_autosleep_enabled(bool set)
744{
745 struct wakeup_source *ws;
746 ktime_t now = ktime_get();
747
748 rcu_read_lock();
749 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
750 spin_lock_irq(&ws->lock);
751 if (ws->autosleep_enabled != set) {
752 ws->autosleep_enabled = set;
753 if (ws->active) {
754 if (set)
755 ws->start_prevent_time = now;
756 else
757 update_prevent_sleep_time(ws, now);
758 }
759 }
760 spin_unlock_irq(&ws->lock);
761 }
762 rcu_read_unlock();
763}
764#endif /* CONFIG_PM_AUTOSLEEP */
765
711static struct dentry *wakeup_sources_stats_dentry; 766static struct dentry *wakeup_sources_stats_dentry;
712 767
713/** 768/**
@@ -723,27 +778,37 @@ static int print_wakeup_source_stats(struct seq_file *m,
723 ktime_t max_time; 778 ktime_t max_time;
724 unsigned long active_count; 779 unsigned long active_count;
725 ktime_t active_time; 780 ktime_t active_time;
781 ktime_t prevent_sleep_time;
726 int ret; 782 int ret;
727 783
728 spin_lock_irqsave(&ws->lock, flags); 784 spin_lock_irqsave(&ws->lock, flags);
729 785
730 total_time = ws->total_time; 786 total_time = ws->total_time;
731 max_time = ws->max_time; 787 max_time = ws->max_time;
788 prevent_sleep_time = ws->prevent_sleep_time;
732 active_count = ws->active_count; 789 active_count = ws->active_count;
733 if (ws->active) { 790 if (ws->active) {
734 active_time = ktime_sub(ktime_get(), ws->last_time); 791 ktime_t now = ktime_get();
792
793 active_time = ktime_sub(now, ws->last_time);
735 total_time = ktime_add(total_time, active_time); 794 total_time = ktime_add(total_time, active_time);
736 if (active_time.tv64 > max_time.tv64) 795 if (active_time.tv64 > max_time.tv64)
737 max_time = active_time; 796 max_time = active_time;
797
798 if (ws->autosleep_enabled)
799 prevent_sleep_time = ktime_add(prevent_sleep_time,
800 ktime_sub(now, ws->start_prevent_time));
738 } else { 801 } else {
739 active_time = ktime_set(0, 0); 802 active_time = ktime_set(0, 0);
740 } 803 }
741 804
742 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" 805 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
743 "%lld\t\t%lld\t\t%lld\t\t%lld\n", 806 "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
744 ws->name, active_count, ws->event_count, ws->hit_count, 807 ws->name, active_count, ws->event_count,
808 ws->wakeup_count, ws->expire_count,
745 ktime_to_ms(active_time), ktime_to_ms(total_time), 809 ktime_to_ms(active_time), ktime_to_ms(total_time),
746 ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); 810 ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
811 ktime_to_ms(prevent_sleep_time));
747 812
748 spin_unlock_irqrestore(&ws->lock, flags); 813 spin_unlock_irqrestore(&ws->lock, flags);
749 814
@@ -758,8 +823,9 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
758{ 823{
759 struct wakeup_source *ws; 824 struct wakeup_source *ws;
760 825
761 seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" 826 seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
762 "active_since\ttotal_time\tmax_time\tlast_change\n"); 827 "expire_count\tactive_since\ttotal_time\tmax_time\t"
828 "last_change\tprevent_suspend_time\n");
763 829
764 rcu_read_lock(); 830 rcu_read_lock();
765 list_for_each_entry_rcu(ws, &wakeup_sources, entry) 831 list_for_each_entry_rcu(ws, &wakeup_sources, entry)