aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2012-05-11 15:14:31 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2012-05-11 15:14:31 -0400
commite6d18093ea3d1d30a4de9e29cb1676c1f4b55147 (patch)
treed780bacd1d41022d527c52be41bd007044bda8c6
parent3a9da04aa0463c9ee86e2b3df6f9e55cb69cb880 (diff)
parentb723b0eb91e08a0ee9a401c0b22c0d52966d9daa (diff)
Merge branch 'pm-domains'
* pm-domains: PM / Domains: Fix computation of maximum domain off time PM / Domains: Fix link checking when add subdomain PM / Domains: Cache device stop and domain power off governor results, v3 PM / Domains: Make device removal more straightforward PM / QoS: Create device constraints objects on notifier registration PM / Runtime: Remove device fields related to suspend time, v2 PM / Domains: Rework default domain power off governor function, v2 PM / Domains: Rework default device stop governor function, v2
-rw-r--r--drivers/base/power/domain.c151
-rw-r--r--drivers/base/power/domain_governor.c166
-rw-r--r--drivers/base/power/qos.c19
-rw-r--r--drivers/base/power/runtime.c103
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h11
-rw-r--r--include/linux/pm_runtime.h3
7 files changed, 258 insertions, 197 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 73ce9fbe9839..c3eaa08a8f96 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
14#include <linux/pm_qos.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
@@ -38,11 +39,13 @@
38 ktime_t __start = ktime_get(); \ 39 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
42 if (__elapsed > __gpd_data->td.field) { \ 43 if (!__retval && __elapsed > __td->field) { \
43 __gpd_data->td.field = __elapsed; \ 44 __td->field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ 45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \ 46 __elapsed); \
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
46 } \ 49 } \
47 __retval; \ 50 __retval; \
48}) 51})
@@ -211,6 +214,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 214 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) { 215 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns; 216 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true;
214 if (genpd->name) 218 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, " 219 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name, 220 "new value %lld ns\n", genpd->name,
@@ -247,6 +251,53 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
247 251
248#ifdef CONFIG_PM_RUNTIME 252#ifdef CONFIG_PM_RUNTIME
249 253
254static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr)
256{
257 struct generic_pm_domain_data *gpd_data;
258 struct device *dev;
259
260 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
261
262 mutex_lock(&gpd_data->lock);
263 dev = gpd_data->base.dev;
264 if (!dev) {
265 mutex_unlock(&gpd_data->lock);
266 return NOTIFY_DONE;
267 }
268 mutex_unlock(&gpd_data->lock);
269
270 for (;;) {
271 struct generic_pm_domain *genpd;
272 struct pm_domain_data *pdd;
273
274 spin_lock_irq(&dev->power.lock);
275
276 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL;
278 if (pdd) {
279 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev);
281 } else {
282 genpd = ERR_PTR(-ENODATA);
283 }
284
285 spin_unlock_irq(&dev->power.lock);
286
287 if (!IS_ERR(genpd)) {
288 mutex_lock(&genpd->lock);
289 genpd->max_off_time_changed = true;
290 mutex_unlock(&genpd->lock);
291 }
292
293 dev = dev->parent;
294 if (!dev || dev->power.ignore_children)
295 break;
296 }
297
298 return NOTIFY_DONE;
299}
300
250/** 301/**
251 * __pm_genpd_save_device - Save the pre-suspend state of a device. 302 * __pm_genpd_save_device - Save the pre-suspend state of a device.
252 * @pdd: Domain data of the device to save the state of. 303 * @pdd: Domain data of the device to save the state of.
@@ -435,6 +486,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 486 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) { 487 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns; 488 genpd->power_off_latency_ns = elapsed_ns;
489 genpd->max_off_time_changed = true;
438 if (genpd->name) 490 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, " 491 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name, 492 "new value %lld ns\n", genpd->name,
@@ -443,17 +495,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
443 } 495 }
444 496
445 genpd->status = GPD_STATE_POWER_OFF; 497 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
457 498
458 list_for_each_entry(link, &genpd->slave_links, slave_node) { 499 list_for_each_entry(link, &genpd->slave_links, slave_node) {
459 genpd_sd_counter_dec(link->master); 500 genpd_sd_counter_dec(link->master);
@@ -514,9 +555,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
514 if (ret) 555 if (ret)
515 return ret; 556 return ret;
516 557
517 pm_runtime_update_max_time_suspended(dev,
518 dev_gpd_data(dev)->td.start_latency_ns);
519
520 /* 558 /*
521 * If power.irq_safe is set, this routine will be run with interrupts 559 * If power.irq_safe is set, this routine will be run with interrupts
522 * off, so it can't use mutexes. 560 * off, so it can't use mutexes.
@@ -613,6 +651,12 @@ void pm_genpd_poweroff_unused(void)
613 651
614#else 652#else
615 653
654static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655 unsigned long val, void *ptr)
656{
657 return NOTIFY_DONE;
658}
659
616static inline void genpd_power_off_work_fn(struct work_struct *work) {} 660static inline void genpd_power_off_work_fn(struct work_struct *work) {}
617 661
618#define pm_genpd_runtime_suspend NULL 662#define pm_genpd_runtime_suspend NULL
@@ -1209,6 +1253,14 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1209 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1210 return -EINVAL; 1254 return -EINVAL;
1211 1255
1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1257 if (!gpd_data)
1258 return -ENOMEM;
1259
1260 mutex_init(&gpd_data->lock);
1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
1212 genpd_acquire_lock(genpd); 1264 genpd_acquire_lock(genpd);
1213 1265
1214 if (genpd->status == GPD_STATE_POWER_OFF) { 1266 if (genpd->status == GPD_STATE_POWER_OFF) {
@@ -1227,26 +1279,35 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1227 goto out; 1279 goto out;
1228 } 1280 }
1229 1281
1230 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1231 if (!gpd_data) {
1232 ret = -ENOMEM;
1233 goto out;
1234 }
1235
1236 genpd->device_count++; 1282 genpd->device_count++;
1283 genpd->max_off_time_changed = true;
1237 1284
1238 dev->pm_domain = &genpd->domain;
1239 dev_pm_get_subsys_data(dev); 1285 dev_pm_get_subsys_data(dev);
1286
1287 mutex_lock(&gpd_data->lock);
1288 spin_lock_irq(&dev->power.lock);
1289 dev->pm_domain = &genpd->domain;
1240 dev->power.subsys_data->domain_data = &gpd_data->base; 1290 dev->power.subsys_data->domain_data = &gpd_data->base;
1241 gpd_data->base.dev = dev; 1291 gpd_data->base.dev = dev;
1242 gpd_data->need_restore = false;
1243 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1292 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1293 gpd_data->need_restore = false;
1244 if (td) 1294 if (td)
1245 gpd_data->td = *td; 1295 gpd_data->td = *td;
1246 1296
1297 gpd_data->td.constraint_changed = true;
1298 gpd_data->td.effective_constraint_ns = -1;
1299 spin_unlock_irq(&dev->power.lock);
1300 mutex_unlock(&gpd_data->lock);
1301
1302 genpd_release_lock(genpd);
1303
1304 return 0;
1305
1247 out: 1306 out:
1248 genpd_release_lock(genpd); 1307 genpd_release_lock(genpd);
1249 1308
1309 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1310 kfree(gpd_data);
1250 return ret; 1311 return ret;
1251} 1312}
1252 1313
@@ -1290,12 +1351,15 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1290int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1351int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1291 struct device *dev) 1352 struct device *dev)
1292{ 1353{
1354 struct generic_pm_domain_data *gpd_data;
1293 struct pm_domain_data *pdd; 1355 struct pm_domain_data *pdd;
1294 int ret = -EINVAL; 1356 int ret = 0;
1295 1357
1296 dev_dbg(dev, "%s()\n", __func__); 1358 dev_dbg(dev, "%s()\n", __func__);
1297 1359
1298 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1360 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1361 || IS_ERR_OR_NULL(dev->pm_domain)
1362 || pd_to_genpd(dev->pm_domain) != genpd)
1299 return -EINVAL; 1363 return -EINVAL;
1300 1364
1301 genpd_acquire_lock(genpd); 1365 genpd_acquire_lock(genpd);
@@ -1305,21 +1369,27 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1305 goto out; 1369 goto out;
1306 } 1370 }
1307 1371
1308 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 1372 genpd->device_count--;
1309 if (pdd->dev != dev) 1373 genpd->max_off_time_changed = true;
1310 continue;
1311 1374
1312 list_del_init(&pdd->list_node); 1375 spin_lock_irq(&dev->power.lock);
1313 pdd->dev = NULL; 1376 dev->pm_domain = NULL;
1314 dev_pm_put_subsys_data(dev); 1377 pdd = dev->power.subsys_data->domain_data;
1315 dev->pm_domain = NULL; 1378 list_del_init(&pdd->list_node);
1316 kfree(to_gpd_data(pdd)); 1379 dev->power.subsys_data->domain_data = NULL;
1380 spin_unlock_irq(&dev->power.lock);
1317 1381
1318 genpd->device_count--; 1382 gpd_data = to_gpd_data(pdd);
1383 mutex_lock(&gpd_data->lock);
1384 pdd->dev = NULL;
1385 mutex_unlock(&gpd_data->lock);
1319 1386
1320 ret = 0; 1387 genpd_release_lock(genpd);
1321 break; 1388
1322 } 1389 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1390 kfree(gpd_data);
1391 dev_pm_put_subsys_data(dev);
1392 return 0;
1323 1393
1324 out: 1394 out:
1325 genpd_release_lock(genpd); 1395 genpd_release_lock(genpd);
@@ -1378,7 +1448,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1378 goto out; 1448 goto out;
1379 } 1449 }
1380 1450
1381 list_for_each_entry(link, &genpd->slave_links, slave_node) { 1451 list_for_each_entry(link, &genpd->master_links, master_node) {
1382 if (link->slave == subdomain && link->master == genpd) { 1452 if (link->slave == subdomain && link->master == genpd) {
1383 ret = -EINVAL; 1453 ret = -EINVAL;
1384 goto out; 1454 goto out;
@@ -1690,6 +1760,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1690 genpd->resume_count = 0; 1760 genpd->resume_count = 0;
1691 genpd->device_count = 0; 1761 genpd->device_count = 0;
1692 genpd->max_off_time_ns = -1; 1762 genpd->max_off_time_ns = -1;
1763 genpd->max_off_time_changed = true;
1693 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1764 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1694 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1765 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1695 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1766 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 66a265bf5867..28dee3053f1f 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,6 +14,31 @@
14 14
15#ifdef CONFIG_PM_RUNTIME 15#ifdef CONFIG_PM_RUNTIME
16 16
17static int dev_update_qos_constraint(struct device *dev, void *data)
18{
19 s64 *constraint_ns_p = data;
20 s32 constraint_ns = -1;
21
22 if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
23 constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
24
25 if (constraint_ns < 0) {
26 constraint_ns = dev_pm_qos_read_value(dev);
27 constraint_ns *= NSEC_PER_USEC;
28 }
29 if (constraint_ns == 0)
30 return 0;
31
32 /*
33 * constraint_ns cannot be negative here, because the device has been
34 * suspended.
35 */
36 if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
37 *constraint_ns_p = constraint_ns;
38
39 return 0;
40}
41
17/** 42/**
18 * default_stop_ok - Default PM domain governor routine for stopping devices. 43 * default_stop_ok - Default PM domain governor routine for stopping devices.
19 * @dev: Device to check. 44 * @dev: Device to check.
@@ -21,14 +46,52 @@
21bool default_stop_ok(struct device *dev) 46bool default_stop_ok(struct device *dev)
22{ 47{
23 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 48 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
49 unsigned long flags;
50 s64 constraint_ns;
24 51
25 dev_dbg(dev, "%s()\n", __func__); 52 dev_dbg(dev, "%s()\n", __func__);
26 53
27 if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) 54 spin_lock_irqsave(&dev->power.lock, flags);
28 return true; 55
56 if (!td->constraint_changed) {
57 bool ret = td->cached_stop_ok;
29 58
30 return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns 59 spin_unlock_irqrestore(&dev->power.lock, flags);
31 && td->break_even_ns < dev->power.max_time_suspended_ns; 60 return ret;
61 }
62 td->constraint_changed = false;
63 td->cached_stop_ok = false;
64 td->effective_constraint_ns = -1;
65 constraint_ns = __dev_pm_qos_read_value(dev);
66
67 spin_unlock_irqrestore(&dev->power.lock, flags);
68
69 if (constraint_ns < 0)
70 return false;
71
72 constraint_ns *= NSEC_PER_USEC;
73 /*
74 * We can walk the children without any additional locking, because
75 * they all have been suspended at this point and their
76 * effective_constraint_ns fields won't be modified in parallel with us.
77 */
78 if (!dev->power.ignore_children)
79 device_for_each_child(dev, &constraint_ns,
80 dev_update_qos_constraint);
81
82 if (constraint_ns > 0) {
83 constraint_ns -= td->start_latency_ns;
84 if (constraint_ns == 0)
85 return false;
86 }
87 td->effective_constraint_ns = constraint_ns;
88 td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
89 constraint_ns == 0;
90 /*
91 * The children have been suspended already, so we don't need to take
92 * their stop latencies into account here.
93 */
94 return td->cached_stop_ok;
32} 95}
33 96
34/** 97/**
@@ -42,9 +105,27 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
42 struct generic_pm_domain *genpd = pd_to_genpd(pd); 105 struct generic_pm_domain *genpd = pd_to_genpd(pd);
43 struct gpd_link *link; 106 struct gpd_link *link;
44 struct pm_domain_data *pdd; 107 struct pm_domain_data *pdd;
45 s64 min_dev_off_time_ns; 108 s64 min_off_time_ns;
46 s64 off_on_time_ns; 109 s64 off_on_time_ns;
47 ktime_t time_now = ktime_get(); 110
111 if (genpd->max_off_time_changed) {
112 struct gpd_link *link;
113
114 /*
115 * We have to invalidate the cached results for the masters, so
116 * use the observation that default_power_down_ok() is not
117 * going to be called for any master until this instance
118 * returns.
119 */
120 list_for_each_entry(link, &genpd->slave_links, slave_node)
121 link->master->max_off_time_changed = true;
122
123 genpd->max_off_time_changed = false;
124 genpd->cached_power_down_ok = false;
125 genpd->max_off_time_ns = -1;
126 } else {
127 return genpd->cached_power_down_ok;
128 }
48 129
49 off_on_time_ns = genpd->power_off_latency_ns + 130 off_on_time_ns = genpd->power_off_latency_ns +
50 genpd->power_on_latency_ns; 131 genpd->power_on_latency_ns;
@@ -61,6 +142,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
61 to_gpd_data(pdd)->td.save_state_latency_ns; 142 to_gpd_data(pdd)->td.save_state_latency_ns;
62 } 143 }
63 144
145 min_off_time_ns = -1;
64 /* 146 /*
65 * Check if subdomains can be off for enough time. 147 * Check if subdomains can be off for enough time.
66 * 148 *
@@ -73,8 +155,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
73 if (sd_max_off_ns < 0) 155 if (sd_max_off_ns < 0)
74 continue; 156 continue;
75 157
76 sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
77 sd->power_off_time));
78 /* 158 /*
79 * Check if the subdomain is allowed to be off long enough for 159 * Check if the subdomain is allowed to be off long enough for
80 * the current domain to turn off and on (that's how much time 160 * the current domain to turn off and on (that's how much time
@@ -82,60 +162,64 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
82 */ 162 */
83 if (sd_max_off_ns <= off_on_time_ns) 163 if (sd_max_off_ns <= off_on_time_ns)
84 return false; 164 return false;
165
166 if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
167 min_off_time_ns = sd_max_off_ns;
85 } 168 }
86 169
87 /* 170 /*
88 * Check if the devices in the domain can be off enough time. 171 * Check if the devices in the domain can be off enough time.
89 */ 172 */
90 min_dev_off_time_ns = -1;
91 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 173 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
92 struct gpd_timing_data *td; 174 struct gpd_timing_data *td;
93 struct device *dev = pdd->dev; 175 s64 constraint_ns;
94 s64 dev_off_time_ns;
95 176
96 if (!dev->driver || dev->power.max_time_suspended_ns < 0) 177 if (!pdd->dev->driver)
97 continue; 178 continue;
98 179
180 /*
181 * Check if the device is allowed to be off long enough for the
182 * domain to turn off and on (that's how much time it will
183 * have to wait worst case).
184 */
99 td = &to_gpd_data(pdd)->td; 185 td = &to_gpd_data(pdd)->td;
100 dev_off_time_ns = dev->power.max_time_suspended_ns - 186 constraint_ns = td->effective_constraint_ns;
101 (td->start_latency_ns + td->restore_state_latency_ns + 187 /* default_stop_ok() need not be called before us. */
102 ktime_to_ns(ktime_sub(time_now, 188 if (constraint_ns < 0) {
103 dev->power.suspend_time))); 189 constraint_ns = dev_pm_qos_read_value(pdd->dev);
104 if (dev_off_time_ns <= off_on_time_ns) 190 constraint_ns *= NSEC_PER_USEC;
105 return false; 191 }
106 192 if (constraint_ns == 0)
107 if (min_dev_off_time_ns > dev_off_time_ns 193 continue;
108 || min_dev_off_time_ns < 0)
109 min_dev_off_time_ns = dev_off_time_ns;
110 }
111 194
112 if (min_dev_off_time_ns < 0) {
113 /* 195 /*
114 * There are no latency constraints, so the domain can spend 196 * constraint_ns cannot be negative here, because the device has
115 * arbitrary time in the "off" state. 197 * been suspended.
116 */ 198 */
117 genpd->max_off_time_ns = -1; 199 constraint_ns -= td->restore_state_latency_ns;
118 return true; 200 if (constraint_ns <= off_on_time_ns)
201 return false;
202
203 if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
204 min_off_time_ns = constraint_ns;
119 } 205 }
120 206
207 genpd->cached_power_down_ok = true;
208
121 /* 209 /*
122 * The difference between the computed minimum delta and the time needed 210 * If the computed minimum device off time is negative, there are no
123 * to turn the domain on is the maximum theoretical time this domain can 211 * latency constraints, so the domain can spend arbitrary time in the
124 * spend in the "off" state. 212 * "off" state.
125 */ 213 */
126 min_dev_off_time_ns -= genpd->power_on_latency_ns; 214 if (min_off_time_ns < 0)
215 return true;
127 216
128 /* 217 /*
129 * If the difference between the computed minimum delta and the time 218 * The difference between the computed minimum subdomain or device off
130 * needed to turn the domain off and back on on is smaller than the 219 * time and the time needed to turn the domain on is the maximum
131 * domain's power break even time, removing power from the domain is not 220 * theoretical time this domain can spend in the "off" state.
132 * worth it.
133 */ 221 */
134 if (genpd->break_even_ns > 222 genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
135 min_dev_off_time_ns - genpd->power_off_latency_ns)
136 return false;
137
138 genpd->max_off_time_ns = min_dev_off_time_ns;
139 return true; 223 return true;
140} 224}
141 225
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 71855570922d..fd849a2c4fa8 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -352,21 +352,26 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
352 * 352 *
353 * Will register the notifier into a notification chain that gets called 353 * Will register the notifier into a notification chain that gets called
354 * upon changes to the target value for the device. 354 * upon changes to the target value for the device.
355 *
356 * If the device's constraints object doesn't exist when this routine is called,
357 * it will be created (or error code will be returned if that fails).
355 */ 358 */
356int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) 359int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
357{ 360{
358 int retval = 0; 361 int ret = 0;
359 362
360 mutex_lock(&dev_pm_qos_mtx); 363 mutex_lock(&dev_pm_qos_mtx);
361 364
362 /* Silently return if the constraints object is not present. */ 365 if (!dev->power.constraints)
363 if (dev->power.constraints) 366 ret = dev->power.power_state.event != PM_EVENT_INVALID ?
364 retval = blocking_notifier_chain_register( 367 dev_pm_qos_constraints_allocate(dev) : -ENODEV;
365 dev->power.constraints->notifiers, 368
366 notifier); 369 if (!ret)
370 ret = blocking_notifier_chain_register(
371 dev->power.constraints->notifiers, notifier);
367 372
368 mutex_unlock(&dev_pm_qos_mtx); 373 mutex_unlock(&dev_pm_qos_mtx);
369 return retval; 374 return ret;
370} 375}
371EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); 376EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
372 377
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bd0f3949bcf9..59894873a3b3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -282,47 +282,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
282 return retval != -EACCES ? retval : -EIO; 282 return retval != -EACCES ? retval : -EIO;
283} 283}
284 284
285struct rpm_qos_data {
286 ktime_t time_now;
287 s64 constraint_ns;
288};
289
290/**
291 * rpm_update_qos_constraint - Update a given PM QoS constraint data.
292 * @dev: Device whose timing data to use.
293 * @data: PM QoS constraint data to update.
294 *
295 * Use the suspend timing data of @dev to update PM QoS constraint data pointed
296 * to by @data.
297 */
298static int rpm_update_qos_constraint(struct device *dev, void *data)
299{
300 struct rpm_qos_data *qos = data;
301 unsigned long flags;
302 s64 delta_ns;
303 int ret = 0;
304
305 spin_lock_irqsave(&dev->power.lock, flags);
306
307 if (dev->power.max_time_suspended_ns < 0)
308 goto out;
309
310 delta_ns = dev->power.max_time_suspended_ns -
311 ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
312 if (delta_ns <= 0) {
313 ret = -EBUSY;
314 goto out;
315 }
316
317 if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
318 qos->constraint_ns = delta_ns;
319
320 out:
321 spin_unlock_irqrestore(&dev->power.lock, flags);
322
323 return ret;
324}
325
326/** 285/**
327 * rpm_suspend - Carry out runtime suspend of given device. 286 * rpm_suspend - Carry out runtime suspend of given device.
328 * @dev: Device to suspend. 287 * @dev: Device to suspend.
@@ -349,7 +308,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
349{ 308{
350 int (*callback)(struct device *); 309 int (*callback)(struct device *);
351 struct device *parent = NULL; 310 struct device *parent = NULL;
352 struct rpm_qos_data qos;
353 int retval; 311 int retval;
354 312
355 trace_rpm_suspend(dev, rpmflags); 313 trace_rpm_suspend(dev, rpmflags);
@@ -445,38 +403,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
445 goto out; 403 goto out;
446 } 404 }
447 405
448 qos.constraint_ns = __dev_pm_qos_read_value(dev); 406 if (__dev_pm_qos_read_value(dev) < 0) {
449 if (qos.constraint_ns < 0) { 407 /* Negative PM QoS constraint means "never suspend". */
450 /* Negative constraint means "never suspend". */
451 retval = -EPERM; 408 retval = -EPERM;
452 goto out; 409 goto out;
453 } 410 }
454 qos.constraint_ns *= NSEC_PER_USEC;
455 qos.time_now = ktime_get();
456 411
457 __update_runtime_status(dev, RPM_SUSPENDING); 412 __update_runtime_status(dev, RPM_SUSPENDING);
458 413
459 if (!dev->power.ignore_children) {
460 if (dev->power.irq_safe)
461 spin_unlock(&dev->power.lock);
462 else
463 spin_unlock_irq(&dev->power.lock);
464
465 retval = device_for_each_child(dev, &qos,
466 rpm_update_qos_constraint);
467
468 if (dev->power.irq_safe)
469 spin_lock(&dev->power.lock);
470 else
471 spin_lock_irq(&dev->power.lock);
472
473 if (retval)
474 goto fail;
475 }
476
477 dev->power.suspend_time = qos.time_now;
478 dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
479
480 if (dev->pm_domain) 414 if (dev->pm_domain)
481 callback = dev->pm_domain->ops.runtime_suspend; 415 callback = dev->pm_domain->ops.runtime_suspend;
482 else if (dev->type && dev->type->pm) 416 else if (dev->type && dev->type->pm)
@@ -529,8 +463,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
529 463
530 fail: 464 fail:
531 __update_runtime_status(dev, RPM_ACTIVE); 465 __update_runtime_status(dev, RPM_ACTIVE);
532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false; 466 dev->power.deferred_resume = false;
535 wake_up_all(&dev->power.wait_queue); 467 wake_up_all(&dev->power.wait_queue);
536 468
@@ -704,9 +636,6 @@ static int rpm_resume(struct device *dev, int rpmflags)
704 if (dev->power.no_callbacks) 636 if (dev->power.no_callbacks)
705 goto no_callback; /* Assume success. */ 637 goto no_callback; /* Assume success. */
706 638
707 dev->power.suspend_time = ktime_set(0, 0);
708 dev->power.max_time_suspended_ns = -1;
709
710 __update_runtime_status(dev, RPM_RESUMING); 639 __update_runtime_status(dev, RPM_RESUMING);
711 640
712 if (dev->pm_domain) 641 if (dev->pm_domain)
@@ -1369,9 +1298,6 @@ void pm_runtime_init(struct device *dev)
1369 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1298 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1370 (unsigned long)dev); 1299 (unsigned long)dev);
1371 1300
1372 dev->power.suspend_time = ktime_set(0, 0);
1373 dev->power.max_time_suspended_ns = -1;
1374
1375 init_waitqueue_head(&dev->power.wait_queue); 1301 init_waitqueue_head(&dev->power.wait_queue);
1376} 1302}
1377 1303
@@ -1389,28 +1315,3 @@ void pm_runtime_remove(struct device *dev)
1389 if (dev->power.irq_safe && dev->parent) 1315 if (dev->power.irq_safe && dev->parent)
1390 pm_runtime_put_sync(dev->parent); 1316 pm_runtime_put_sync(dev->parent);
1391} 1317}
1392
1393/**
1394 * pm_runtime_update_max_time_suspended - Update device's suspend time data.
1395 * @dev: Device to handle.
1396 * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
1397 *
1398 * Update the device's power.max_time_suspended_ns field by subtracting
1399 * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
1400 * never negative.
1401 */
1402void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
1403{
1404 unsigned long flags;
1405
1406 spin_lock_irqsave(&dev->power.lock, flags);
1407
1408 if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
1409 if (dev->power.max_time_suspended_ns > delta_ns)
1410 dev->power.max_time_suspended_ns -= delta_ns;
1411 else
1412 dev->power.max_time_suspended_ns = 0;
1413 }
1414
1415 spin_unlock_irqrestore(&dev->power.lock, flags);
1416}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 715305e05123..f067e60a3832 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -544,8 +544,6 @@ struct dev_pm_info {
544 unsigned long active_jiffies; 544 unsigned long active_jiffies;
545 unsigned long suspended_jiffies; 545 unsigned long suspended_jiffies;
546 unsigned long accounting_timestamp; 546 unsigned long accounting_timestamp;
547 ktime_t suspend_time;
548 s64 max_time_suspended_ns;
549 struct dev_pm_qos_request *pq_req; 547 struct dev_pm_qos_request *pq_req;
550#endif 548#endif
551 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 549 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 91f8286106ea..1e994eeacdf3 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -14,6 +14,7 @@
14#include <linux/pm.h> 14#include <linux/pm.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/notifier.h>
17 18
18enum gpd_status { 19enum gpd_status {
19 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 20 GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -70,9 +71,9 @@ struct generic_pm_domain {
70 int (*power_on)(struct generic_pm_domain *domain); 71 int (*power_on)(struct generic_pm_domain *domain);
71 s64 power_on_latency_ns; 72 s64 power_on_latency_ns;
72 struct gpd_dev_ops dev_ops; 73 struct gpd_dev_ops dev_ops;
73 s64 break_even_ns; /* Power break even for the entire domain. */
74 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 74 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
75 ktime_t power_off_time; 75 bool max_off_time_changed;
76 bool cached_power_down_ok;
76 struct device_node *of_node; /* Node in device tree */ 77 struct device_node *of_node; /* Node in device tree */
77}; 78};
78 79
@@ -93,13 +94,17 @@ struct gpd_timing_data {
93 s64 start_latency_ns; 94 s64 start_latency_ns;
94 s64 save_state_latency_ns; 95 s64 save_state_latency_ns;
95 s64 restore_state_latency_ns; 96 s64 restore_state_latency_ns;
96 s64 break_even_ns; 97 s64 effective_constraint_ns;
98 bool constraint_changed;
99 bool cached_stop_ok;
97}; 100};
98 101
99struct generic_pm_domain_data { 102struct generic_pm_domain_data {
100 struct pm_domain_data base; 103 struct pm_domain_data base;
101 struct gpd_dev_ops ops; 104 struct gpd_dev_ops ops;
102 struct gpd_timing_data td; 105 struct gpd_timing_data td;
106 struct notifier_block nb;
107 struct mutex lock;
103 bool need_restore; 108 bool need_restore;
104 bool always_on; 109 bool always_on;
105}; 110};
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 609daae7a014..f271860c78d5 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -150,9 +150,6 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
150static inline unsigned long pm_runtime_autosuspend_expiration( 150static inline unsigned long pm_runtime_autosuspend_expiration(
151 struct device *dev) { return 0; } 151 struct device *dev) { return 0; }
152 152
153static inline void pm_runtime_update_max_time_suspended(struct device *dev,
154 s64 delta_ns) {}
155
156#endif /* !CONFIG_PM_RUNTIME */ 153#endif /* !CONFIG_PM_RUNTIME */
157 154
158static inline int pm_runtime_idle(struct device *dev) 155static inline int pm_runtime_idle(struct device *dev)