aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c220
-rw-r--r--drivers/base/power/runtime.c162
-rw-r--r--drivers/base/power/sysfs.c97
6 files changed, 368 insertions, 120 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c0..1cb8544598d5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
1obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o 1obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
2obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o 2obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 3obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 4obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o 5obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index bfb8955c406c..dc127e5dec4b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -42,7 +42,7 @@
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \ 43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \ 44 __td->field = __elapsed; \
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ 45 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
46 __elapsed); \ 46 __elapsed); \
47 genpd->max_off_time_changed = true; \ 47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \ 48 __td->constraint_changed = true; \
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index cfc3226ec492..a21223d95926 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
89extern void rpm_sysfs_remove(struct device *dev); 89extern void rpm_sysfs_remove(struct device *dev);
90extern int wakeup_sysfs_add(struct device *dev); 90extern int wakeup_sysfs_add(struct device *dev);
91extern void wakeup_sysfs_remove(struct device *dev); 91extern void wakeup_sysfs_remove(struct device *dev);
92extern int pm_qos_sysfs_add_latency(struct device *dev); 92extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
93extern void pm_qos_sysfs_remove_latency(struct device *dev); 93extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
94extern int pm_qos_sysfs_add_flags(struct device *dev); 94extern int pm_qos_sysfs_add_flags(struct device *dev);
95extern void pm_qos_sysfs_remove_flags(struct device *dev); 95extern void pm_qos_sysfs_remove_flags(struct device *dev);
96 96
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5c1361a9e5dd..36b9eb4862cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
105s32 __dev_pm_qos_read_value(struct device *dev) 105s32 __dev_pm_qos_read_value(struct device *dev)
106{ 106{
107 return IS_ERR_OR_NULL(dev->power.qos) ? 107 return IS_ERR_OR_NULL(dev->power.qos) ?
108 0 : pm_qos_read_value(&dev->power.qos->latency); 108 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
109} 109}
110 110
111/** 111/**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
141 int ret; 141 int ret;
142 142
143 switch(req->type) { 143 switch(req->type) {
144 case DEV_PM_QOS_LATENCY: 144 case DEV_PM_QOS_RESUME_LATENCY:
145 ret = pm_qos_update_target(&qos->latency, &req->data.pnode, 145 ret = pm_qos_update_target(&qos->resume_latency,
146 action, value); 146 &req->data.pnode, action, value);
147 if (ret) { 147 if (ret) {
148 value = pm_qos_read_value(&qos->latency); 148 value = pm_qos_read_value(&qos->resume_latency);
149 blocking_notifier_call_chain(&dev_pm_notifiers, 149 blocking_notifier_call_chain(&dev_pm_notifiers,
150 (unsigned long)value, 150 (unsigned long)value,
151 req); 151 req);
152 } 152 }
153 break; 153 break;
154 case DEV_PM_QOS_LATENCY_TOLERANCE:
155 ret = pm_qos_update_target(&qos->latency_tolerance,
156 &req->data.pnode, action, value);
157 if (ret) {
158 value = pm_qos_read_value(&qos->latency_tolerance);
159 req->dev->power.set_latency_tolerance(req->dev, value);
160 }
161 break;
154 case DEV_PM_QOS_FLAGS: 162 case DEV_PM_QOS_FLAGS:
155 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 163 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
156 action, value); 164 action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
186 } 194 }
187 BLOCKING_INIT_NOTIFIER_HEAD(n); 195 BLOCKING_INIT_NOTIFIER_HEAD(n);
188 196
189 c = &qos->latency; 197 c = &qos->resume_latency;
190 plist_head_init(&c->list); 198 plist_head_init(&c->list);
191 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 199 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 200 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
201 c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
193 c->type = PM_QOS_MIN; 202 c->type = PM_QOS_MIN;
194 c->notifiers = n; 203 c->notifiers = n;
195 204
205 c = &qos->latency_tolerance;
206 plist_head_init(&c->list);
207 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
210 c->type = PM_QOS_MIN;
211
196 INIT_LIST_HEAD(&qos->flags.list); 212 INIT_LIST_HEAD(&qos->flags.list);
197 213
198 spin_lock_irq(&dev->power.lock); 214 spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
224 * If the device's PM QoS resume latency limit or PM QoS flags have been 240 * If the device's PM QoS resume latency limit or PM QoS flags have been
225 * exposed to user space, they have to be hidden at this point. 241 * exposed to user space, they have to be hidden at this point.
226 */ 242 */
227 pm_qos_sysfs_remove_latency(dev); 243 pm_qos_sysfs_remove_resume_latency(dev);
228 pm_qos_sysfs_remove_flags(dev); 244 pm_qos_sysfs_remove_flags(dev);
229 245
230 mutex_lock(&dev_pm_qos_mtx); 246 mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
237 goto out; 253 goto out;
238 254
239 /* Flush the constraints lists for the device. */ 255 /* Flush the constraints lists for the device. */
240 c = &qos->latency; 256 c = &qos->resume_latency;
241 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 257 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
242 /* 258 /*
243 * Update constraints list and call the notification 259 * Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
246 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
247 memset(req, 0, sizeof(*req)); 263 memset(req, 0, sizeof(*req));
248 } 264 }
265 c = &qos->latency_tolerance;
266 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
267 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
268 memset(req, 0, sizeof(*req));
269 }
249 f = &qos->flags; 270 f = &qos->flags;
250 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { 271 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
251 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 272 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
265 mutex_unlock(&dev_pm_qos_sysfs_mtx); 286 mutex_unlock(&dev_pm_qos_sysfs_mtx);
266} 287}
267 288
289static bool dev_pm_qos_invalid_request(struct device *dev,
290 struct dev_pm_qos_request *req)
291{
292 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
293 && !dev->power.set_latency_tolerance);
294}
295
296static int __dev_pm_qos_add_request(struct device *dev,
297 struct dev_pm_qos_request *req,
298 enum dev_pm_qos_req_type type, s32 value)
299{
300 int ret = 0;
301
302 if (!dev || dev_pm_qos_invalid_request(dev, req))
303 return -EINVAL;
304
305 if (WARN(dev_pm_qos_request_active(req),
306 "%s() called for already added request\n", __func__))
307 return -EINVAL;
308
309 if (IS_ERR(dev->power.qos))
310 ret = -ENODEV;
311 else if (!dev->power.qos)
312 ret = dev_pm_qos_constraints_allocate(dev);
313
314 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
315 if (!ret) {
316 req->dev = dev;
317 req->type = type;
318 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
319 }
320 return ret;
321}
322
268/** 323/**
269 * dev_pm_qos_add_request - inserts new qos request into the list 324 * dev_pm_qos_add_request - inserts new qos request into the list
270 * @dev: target device for the constraint 325 * @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
290int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 345int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
291 enum dev_pm_qos_req_type type, s32 value) 346 enum dev_pm_qos_req_type type, s32 value)
292{ 347{
293 int ret = 0; 348 int ret;
294
295 if (!dev || !req) /*guard against callers passing in null */
296 return -EINVAL;
297
298 if (WARN(dev_pm_qos_request_active(req),
299 "%s() called for already added request\n", __func__))
300 return -EINVAL;
301 349
302 mutex_lock(&dev_pm_qos_mtx); 350 mutex_lock(&dev_pm_qos_mtx);
303 351 ret = __dev_pm_qos_add_request(dev, req, type, value);
304 if (IS_ERR(dev->power.qos))
305 ret = -ENODEV;
306 else if (!dev->power.qos)
307 ret = dev_pm_qos_constraints_allocate(dev);
308
309 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
310 if (!ret) {
311 req->dev = dev;
312 req->type = type;
313 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
314 }
315
316 mutex_unlock(&dev_pm_qos_mtx); 352 mutex_unlock(&dev_pm_qos_mtx);
317
318 return ret; 353 return ret;
319} 354}
320EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 355EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
341 return -ENODEV; 376 return -ENODEV;
342 377
343 switch(req->type) { 378 switch(req->type) {
344 case DEV_PM_QOS_LATENCY: 379 case DEV_PM_QOS_RESUME_LATENCY:
380 case DEV_PM_QOS_LATENCY_TOLERANCE:
345 curr_value = req->data.pnode.prio; 381 curr_value = req->data.pnode.prio;
346 break; 382 break;
347 case DEV_PM_QOS_FLAGS: 383 case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
460 ret = dev_pm_qos_constraints_allocate(dev); 496 ret = dev_pm_qos_constraints_allocate(dev);
461 497
462 if (!ret) 498 if (!ret)
463 ret = blocking_notifier_chain_register( 499 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
464 dev->power.qos->latency.notifiers, notifier); 500 notifier);
465 501
466 mutex_unlock(&dev_pm_qos_mtx); 502 mutex_unlock(&dev_pm_qos_mtx);
467 return ret; 503 return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
487 523
488 /* Silently return if the constraints object is not present. */ 524 /* Silently return if the constraints object is not present. */
489 if (!IS_ERR_OR_NULL(dev->power.qos)) 525 if (!IS_ERR_OR_NULL(dev->power.qos))
490 retval = blocking_notifier_chain_unregister( 526 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
491 dev->power.qos->latency.notifiers, 527 notifier);
492 notifier);
493 528
494 mutex_unlock(&dev_pm_qos_mtx); 529 mutex_unlock(&dev_pm_qos_mtx);
495 return retval; 530 return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
530 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. 565 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
531 * @dev: Device whose ancestor to add the request for. 566 * @dev: Device whose ancestor to add the request for.
532 * @req: Pointer to the preallocated handle. 567 * @req: Pointer to the preallocated handle.
568 * @type: Type of the request.
533 * @value: Constraint latency value. 569 * @value: Constraint latency value.
534 */ 570 */
535int dev_pm_qos_add_ancestor_request(struct device *dev, 571int dev_pm_qos_add_ancestor_request(struct device *dev,
536 struct dev_pm_qos_request *req, s32 value) 572 struct dev_pm_qos_request *req,
573 enum dev_pm_qos_req_type type, s32 value)
537{ 574{
538 struct device *ancestor = dev->parent; 575 struct device *ancestor = dev->parent;
539 int ret = -ENODEV; 576 int ret = -ENODEV;
540 577
541 while (ancestor && !ancestor->power.ignore_children) 578 switch (type) {
542 ancestor = ancestor->parent; 579 case DEV_PM_QOS_RESUME_LATENCY:
580 while (ancestor && !ancestor->power.ignore_children)
581 ancestor = ancestor->parent;
543 582
583 break;
584 case DEV_PM_QOS_LATENCY_TOLERANCE:
585 while (ancestor && !ancestor->power.set_latency_tolerance)
586 ancestor = ancestor->parent;
587
588 break;
589 default:
590 ancestor = NULL;
591 }
544 if (ancestor) 592 if (ancestor)
545 ret = dev_pm_qos_add_request(ancestor, req, 593 ret = dev_pm_qos_add_request(ancestor, req, type, value);
546 DEV_PM_QOS_LATENCY, value);
547 594
548 if (ret < 0) 595 if (ret < 0)
549 req->dev = NULL; 596 req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
559 struct dev_pm_qos_request *req = NULL; 606 struct dev_pm_qos_request *req = NULL;
560 607
561 switch(type) { 608 switch(type) {
562 case DEV_PM_QOS_LATENCY: 609 case DEV_PM_QOS_RESUME_LATENCY:
563 req = dev->power.qos->latency_req; 610 req = dev->power.qos->resume_latency_req;
564 dev->power.qos->latency_req = NULL; 611 dev->power.qos->resume_latency_req = NULL;
612 break;
613 case DEV_PM_QOS_LATENCY_TOLERANCE:
614 req = dev->power.qos->latency_tolerance_req;
615 dev->power.qos->latency_tolerance_req = NULL;
565 break; 616 break;
566 case DEV_PM_QOS_FLAGS: 617 case DEV_PM_QOS_FLAGS:
567 req = dev->power.qos->flags_req; 618 req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
597 if (!req) 648 if (!req)
598 return -ENOMEM; 649 return -ENOMEM;
599 650
600 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 651 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
601 if (ret < 0) { 652 if (ret < 0) {
602 kfree(req); 653 kfree(req);
603 return ret; 654 return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
609 660
610 if (IS_ERR_OR_NULL(dev->power.qos)) 661 if (IS_ERR_OR_NULL(dev->power.qos))
611 ret = -ENODEV; 662 ret = -ENODEV;
612 else if (dev->power.qos->latency_req) 663 else if (dev->power.qos->resume_latency_req)
613 ret = -EEXIST; 664 ret = -EEXIST;
614 665
615 if (ret < 0) { 666 if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
618 mutex_unlock(&dev_pm_qos_mtx); 669 mutex_unlock(&dev_pm_qos_mtx);
619 goto out; 670 goto out;
620 } 671 }
621 dev->power.qos->latency_req = req; 672 dev->power.qos->resume_latency_req = req;
622 673
623 mutex_unlock(&dev_pm_qos_mtx); 674 mutex_unlock(&dev_pm_qos_mtx);
624 675
625 ret = pm_qos_sysfs_add_latency(dev); 676 ret = pm_qos_sysfs_add_resume_latency(dev);
626 if (ret) 677 if (ret)
627 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 678 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
628 679
629 out: 680 out:
630 mutex_unlock(&dev_pm_qos_sysfs_mtx); 681 mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
634 685
635static void __dev_pm_qos_hide_latency_limit(struct device *dev) 686static void __dev_pm_qos_hide_latency_limit(struct device *dev)
636{ 687{
637 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) 688 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
638 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 689 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639} 690}
640 691
641/** 692/**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
646{ 697{
647 mutex_lock(&dev_pm_qos_sysfs_mtx); 698 mutex_lock(&dev_pm_qos_sysfs_mtx);
648 699
649 pm_qos_sysfs_remove_latency(dev); 700 pm_qos_sysfs_remove_resume_latency(dev);
650 701
651 mutex_lock(&dev_pm_qos_mtx); 702 mutex_lock(&dev_pm_qos_mtx);
652 __dev_pm_qos_hide_latency_limit(dev); 703 __dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
768 pm_runtime_put(dev); 819 pm_runtime_put(dev);
769 return ret; 820 return ret;
770} 821}
822
823/**
824 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
825 * @dev: Device to obtain the user space latency tolerance for.
826 */
827s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
828{
829 s32 ret;
830
831 mutex_lock(&dev_pm_qos_mtx);
832 ret = IS_ERR_OR_NULL(dev->power.qos)
833 || !dev->power.qos->latency_tolerance_req ?
834 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
835 dev->power.qos->latency_tolerance_req->data.pnode.prio;
836 mutex_unlock(&dev_pm_qos_mtx);
837 return ret;
838}
839
840/**
841 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
842 * @dev: Device to update the user space latency tolerance for.
843 * @val: New user space latency tolerance for @dev (negative values disable).
844 */
845int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
846{
847 int ret;
848
849 mutex_lock(&dev_pm_qos_mtx);
850
851 if (IS_ERR_OR_NULL(dev->power.qos)
852 || !dev->power.qos->latency_tolerance_req) {
853 struct dev_pm_qos_request *req;
854
855 if (val < 0) {
856 ret = -EINVAL;
857 goto out;
858 }
859 req = kzalloc(sizeof(*req), GFP_KERNEL);
860 if (!req) {
861 ret = -ENOMEM;
862 goto out;
863 }
864 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
865 if (ret < 0) {
866 kfree(req);
867 goto out;
868 }
869 dev->power.qos->latency_tolerance_req = req;
870 } else {
871 if (val < 0) {
872 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
873 ret = 0;
874 } else {
875 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
876 }
877 }
878
879 out:
880 mutex_unlock(&dev_pm_qos_mtx);
881 return ret;
882}
771#else /* !CONFIG_PM_RUNTIME */ 883#else /* !CONFIG_PM_RUNTIME */
772static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} 884static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
773static void __dev_pm_qos_hide_flags(struct device *dev) {} 885static void __dev_pm_qos_hide_flags(struct device *dev) {}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 72e00e66ecc5..4776cf528d08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
13#include <trace/events/rpm.h> 13#include <trace/events/rpm.h>
14#include "power.h" 14#include "power.h"
15 15
16#define RPM_GET_CALLBACK(dev, cb) \
17({ \
18 int (*__rpm_cb)(struct device *__d); \
19 \
20 if (dev->pm_domain) \
21 __rpm_cb = dev->pm_domain->ops.cb; \
22 else if (dev->type && dev->type->pm) \
23 __rpm_cb = dev->type->pm->cb; \
24 else if (dev->class && dev->class->pm) \
25 __rpm_cb = dev->class->pm->cb; \
26 else if (dev->bus && dev->bus->pm) \
27 __rpm_cb = dev->bus->pm->cb; \
28 else \
29 __rpm_cb = NULL; \
30 \
31 if (!__rpm_cb && dev->driver && dev->driver->pm) \
32 __rpm_cb = dev->driver->pm->cb; \
33 \
34 __rpm_cb; \
35})
36
37static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
38{
39 return RPM_GET_CALLBACK(dev, runtime_suspend);
40}
41
42static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
43{
44 return RPM_GET_CALLBACK(dev, runtime_resume);
45}
46
47#ifdef CONFIG_PM_RUNTIME
48static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
49{
50 return RPM_GET_CALLBACK(dev, runtime_idle);
51}
52
16static int rpm_resume(struct device *dev, int rpmflags); 53static int rpm_resume(struct device *dev, int rpmflags);
17static int rpm_suspend(struct device *dev, int rpmflags); 54static int rpm_suspend(struct device *dev, int rpmflags);
18 55
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
310 347
311 dev->power.idle_notification = true; 348 dev->power.idle_notification = true;
312 349
313 if (dev->pm_domain) 350 callback = rpm_get_idle_cb(dev);
314 callback = dev->pm_domain->ops.runtime_idle;
315 else if (dev->type && dev->type->pm)
316 callback = dev->type->pm->runtime_idle;
317 else if (dev->class && dev->class->pm)
318 callback = dev->class->pm->runtime_idle;
319 else if (dev->bus && dev->bus->pm)
320 callback = dev->bus->pm->runtime_idle;
321 else
322 callback = NULL;
323
324 if (!callback && dev->driver && dev->driver->pm)
325 callback = dev->driver->pm->runtime_idle;
326 351
327 if (callback) 352 if (callback)
328 retval = __rpm_callback(callback, dev); 353 retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
492 517
493 __update_runtime_status(dev, RPM_SUSPENDING); 518 __update_runtime_status(dev, RPM_SUSPENDING);
494 519
495 if (dev->pm_domain) 520 callback = rpm_get_suspend_cb(dev);
496 callback = dev->pm_domain->ops.runtime_suspend;
497 else if (dev->type && dev->type->pm)
498 callback = dev->type->pm->runtime_suspend;
499 else if (dev->class && dev->class->pm)
500 callback = dev->class->pm->runtime_suspend;
501 else if (dev->bus && dev->bus->pm)
502 callback = dev->bus->pm->runtime_suspend;
503 else
504 callback = NULL;
505
506 if (!callback && dev->driver && dev->driver->pm)
507 callback = dev->driver->pm->runtime_suspend;
508 521
509 retval = rpm_callback(callback, dev); 522 retval = rpm_callback(callback, dev);
510 if (retval) 523 if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
724 737
725 __update_runtime_status(dev, RPM_RESUMING); 738 __update_runtime_status(dev, RPM_RESUMING);
726 739
727 if (dev->pm_domain) 740 callback = rpm_get_resume_cb(dev);
728 callback = dev->pm_domain->ops.runtime_resume;
729 else if (dev->type && dev->type->pm)
730 callback = dev->type->pm->runtime_resume;
731 else if (dev->class && dev->class->pm)
732 callback = dev->class->pm->runtime_resume;
733 else if (dev->bus && dev->bus->pm)
734 callback = dev->bus->pm->runtime_resume;
735 else
736 callback = NULL;
737
738 if (!callback && dev->driver && dev->driver->pm)
739 callback = dev->driver->pm->runtime_resume;
740 741
741 retval = rpm_callback(callback, dev); 742 retval = rpm_callback(callback, dev);
742 if (retval) { 743 if (retval) {
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
1401 if (dev->power.irq_safe && dev->parent) 1402 if (dev->power.irq_safe && dev->parent)
1402 pm_runtime_put(dev->parent); 1403 pm_runtime_put(dev->parent);
1403} 1404}
1405#endif
1406
1407/**
1408 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1409 * @dev: Device to suspend.
1410 *
1411 * Disable runtime PM so we safely can check the device's runtime PM status and
1412 * if it is active, invoke it's .runtime_suspend callback to bring it into
1413 * suspend state. Keep runtime PM disabled to preserve the state unless we
1414 * encounter errors.
1415 *
1416 * Typically this function may be invoked from a system suspend callback to make
1417 * sure the device is put into low power state.
1418 */
1419int pm_runtime_force_suspend(struct device *dev)
1420{
1421 int (*callback)(struct device *);
1422 int ret = 0;
1423
1424 pm_runtime_disable(dev);
1425
1426 /*
1427 * Note that pm_runtime_status_suspended() returns false while
1428 * !CONFIG_PM_RUNTIME, which means the device will be put into low
1429 * power state.
1430 */
1431 if (pm_runtime_status_suspended(dev))
1432 return 0;
1433
1434 callback = rpm_get_suspend_cb(dev);
1435
1436 if (!callback) {
1437 ret = -ENOSYS;
1438 goto err;
1439 }
1440
1441 ret = callback(dev);
1442 if (ret)
1443 goto err;
1444
1445 pm_runtime_set_suspended(dev);
1446 return 0;
1447err:
1448 pm_runtime_enable(dev);
1449 return ret;
1450}
1451EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1452
1453/**
1454 * pm_runtime_force_resume - Force a device into resume state.
1455 * @dev: Device to resume.
1456 *
1457 * Prior invoking this function we expect the user to have brought the device
1458 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1459 * those actions and brings the device into full power. We update the runtime PM
1460 * status and re-enables runtime PM.
1461 *
1462 * Typically this function may be invoked from a system resume callback to make
1463 * sure the device is put into full power state.
1464 */
1465int pm_runtime_force_resume(struct device *dev)
1466{
1467 int (*callback)(struct device *);
1468 int ret = 0;
1469
1470 callback = rpm_get_resume_cb(dev);
1471
1472 if (!callback) {
1473 ret = -ENOSYS;
1474 goto out;
1475 }
1476
1477 ret = callback(dev);
1478 if (ret)
1479 goto out;
1480
1481 pm_runtime_set_active(dev);
1482 pm_runtime_mark_last_busy(dev);
1483out:
1484 pm_runtime_enable(dev);
1485 return ret;
1486}
1487EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 03e089ade5ce..95b181d1ca6d 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
218static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 218static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
219 autosuspend_delay_ms_store); 219 autosuspend_delay_ms_store);
220 220
221static ssize_t pm_qos_latency_show(struct device *dev, 221static ssize_t pm_qos_resume_latency_show(struct device *dev,
222 struct device_attribute *attr, char *buf) 222 struct device_attribute *attr,
223 char *buf)
223{ 224{
224 return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); 225 return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
225} 226}
226 227
227static ssize_t pm_qos_latency_store(struct device *dev, 228static ssize_t pm_qos_resume_latency_store(struct device *dev,
228 struct device_attribute *attr, 229 struct device_attribute *attr,
229 const char *buf, size_t n) 230 const char *buf, size_t n)
230{ 231{
231 s32 value; 232 s32 value;
232 int ret; 233 int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
237 if (value < 0) 238 if (value < 0)
238 return -EINVAL; 239 return -EINVAL;
239 240
240 ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); 241 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
242 value);
241 return ret < 0 ? ret : n; 243 return ret < 0 ? ret : n;
242} 244}
243 245
244static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, 246static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
245 pm_qos_latency_show, pm_qos_latency_store); 247 pm_qos_resume_latency_show, pm_qos_resume_latency_store);
248
249static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
250 struct device_attribute *attr,
251 char *buf)
252{
253 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
254
255 if (value < 0)
256 return sprintf(buf, "auto\n");
257 else if (value == PM_QOS_LATENCY_ANY)
258 return sprintf(buf, "any\n");
259
260 return sprintf(buf, "%d\n", value);
261}
262
263static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
264 struct device_attribute *attr,
265 const char *buf, size_t n)
266{
267 s32 value;
268 int ret;
269
270 if (kstrtos32(buf, 0, &value)) {
271 if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
272 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
273 else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
274 value = PM_QOS_LATENCY_ANY;
275 }
276 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
277 return ret < 0 ? ret : n;
278}
279
280static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
281 pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
246 282
247static ssize_t pm_qos_no_power_off_show(struct device *dev, 283static ssize_t pm_qos_no_power_off_show(struct device *dev,
248 struct device_attribute *attr, 284 struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
618 .attrs = runtime_attrs, 654 .attrs = runtime_attrs,
619}; 655};
620 656
621static struct attribute *pm_qos_latency_attrs[] = { 657static struct attribute *pm_qos_resume_latency_attrs[] = {
622#ifdef CONFIG_PM_RUNTIME 658#ifdef CONFIG_PM_RUNTIME
623 &dev_attr_pm_qos_resume_latency_us.attr, 659 &dev_attr_pm_qos_resume_latency_us.attr,
624#endif /* CONFIG_PM_RUNTIME */ 660#endif /* CONFIG_PM_RUNTIME */
625 NULL, 661 NULL,
626}; 662};
627static struct attribute_group pm_qos_latency_attr_group = { 663static struct attribute_group pm_qos_resume_latency_attr_group = {
664 .name = power_group_name,
665 .attrs = pm_qos_resume_latency_attrs,
666};
667
668static struct attribute *pm_qos_latency_tolerance_attrs[] = {
669#ifdef CONFIG_PM_RUNTIME
670 &dev_attr_pm_qos_latency_tolerance_us.attr,
671#endif /* CONFIG_PM_RUNTIME */
672 NULL,
673};
674static struct attribute_group pm_qos_latency_tolerance_attr_group = {
628 .name = power_group_name, 675 .name = power_group_name,
629 .attrs = pm_qos_latency_attrs, 676 .attrs = pm_qos_latency_tolerance_attrs,
630}; 677};
631 678
632static struct attribute *pm_qos_flags_attrs[] = { 679static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
654 if (rc) 701 if (rc)
655 goto err_out; 702 goto err_out;
656 } 703 }
657
658 if (device_can_wakeup(dev)) { 704 if (device_can_wakeup(dev)) {
659 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 705 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
660 if (rc) { 706 if (rc)
661 if (pm_runtime_callbacks_present(dev)) 707 goto err_runtime;
662 sysfs_unmerge_group(&dev->kobj, 708 }
663 &pm_runtime_attr_group); 709 if (dev->power.set_latency_tolerance) {
664 goto err_out; 710 rc = sysfs_merge_group(&dev->kobj,
665 } 711 &pm_qos_latency_tolerance_attr_group);
712 if (rc)
713 goto err_wakeup;
666 } 714 }
667 return 0; 715 return 0;
668 716
717 err_wakeup:
718 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
719 err_runtime:
720 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
669 err_out: 721 err_out:
670 sysfs_remove_group(&dev->kobj, &pm_attr_group); 722 sysfs_remove_group(&dev->kobj, &pm_attr_group);
671 return rc; 723 return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
681 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 733 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
682} 734}
683 735
684int pm_qos_sysfs_add_latency(struct device *dev) 736int pm_qos_sysfs_add_resume_latency(struct device *dev)
685{ 737{
686 return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); 738 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
687} 739}
688 740
689void pm_qos_sysfs_remove_latency(struct device *dev) 741void pm_qos_sysfs_remove_resume_latency(struct device *dev)
690{ 742{
691 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); 743 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
692} 744}
693 745
694int pm_qos_sysfs_add_flags(struct device *dev) 746int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
708 760
709void dpm_sysfs_remove(struct device *dev) 761void dpm_sysfs_remove(struct device *dev)
710{ 762{
763 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
711 dev_pm_qos_constraints_destroy(dev); 764 dev_pm_qos_constraints_destroy(dev);
712 rpm_sysfs_remove(dev); 765 rpm_sysfs_remove(dev);
713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 766 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);