aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c220
1 files changed, 166 insertions, 54 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5c1361a9e5dd..36b9eb4862cb 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
105s32 __dev_pm_qos_read_value(struct device *dev) 105s32 __dev_pm_qos_read_value(struct device *dev)
106{ 106{
107 return IS_ERR_OR_NULL(dev->power.qos) ? 107 return IS_ERR_OR_NULL(dev->power.qos) ?
108 0 : pm_qos_read_value(&dev->power.qos->latency); 108 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
109} 109}
110 110
111/** 111/**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
141 int ret; 141 int ret;
142 142
143 switch(req->type) { 143 switch(req->type) {
144 case DEV_PM_QOS_LATENCY: 144 case DEV_PM_QOS_RESUME_LATENCY:
145 ret = pm_qos_update_target(&qos->latency, &req->data.pnode, 145 ret = pm_qos_update_target(&qos->resume_latency,
146 action, value); 146 &req->data.pnode, action, value);
147 if (ret) { 147 if (ret) {
148 value = pm_qos_read_value(&qos->latency); 148 value = pm_qos_read_value(&qos->resume_latency);
149 blocking_notifier_call_chain(&dev_pm_notifiers, 149 blocking_notifier_call_chain(&dev_pm_notifiers,
150 (unsigned long)value, 150 (unsigned long)value,
151 req); 151 req);
152 } 152 }
153 break; 153 break;
154 case DEV_PM_QOS_LATENCY_TOLERANCE:
155 ret = pm_qos_update_target(&qos->latency_tolerance,
156 &req->data.pnode, action, value);
157 if (ret) {
158 value = pm_qos_read_value(&qos->latency_tolerance);
159 req->dev->power.set_latency_tolerance(req->dev, value);
160 }
161 break;
154 case DEV_PM_QOS_FLAGS: 162 case DEV_PM_QOS_FLAGS:
155 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 163 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
156 action, value); 164 action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
186 } 194 }
187 BLOCKING_INIT_NOTIFIER_HEAD(n); 195 BLOCKING_INIT_NOTIFIER_HEAD(n);
188 196
189 c = &qos->latency; 197 c = &qos->resume_latency;
190 plist_head_init(&c->list); 198 plist_head_init(&c->list);
191 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 199 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 200 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
201 c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
193 c->type = PM_QOS_MIN; 202 c->type = PM_QOS_MIN;
194 c->notifiers = n; 203 c->notifiers = n;
195 204
205 c = &qos->latency_tolerance;
206 plist_head_init(&c->list);
207 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
210 c->type = PM_QOS_MIN;
211
196 INIT_LIST_HEAD(&qos->flags.list); 212 INIT_LIST_HEAD(&qos->flags.list);
197 213
198 spin_lock_irq(&dev->power.lock); 214 spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
224 * If the device's PM QoS resume latency limit or PM QoS flags have been 240 * If the device's PM QoS resume latency limit or PM QoS flags have been
225 * exposed to user space, they have to be hidden at this point. 241 * exposed to user space, they have to be hidden at this point.
226 */ 242 */
227 pm_qos_sysfs_remove_latency(dev); 243 pm_qos_sysfs_remove_resume_latency(dev);
228 pm_qos_sysfs_remove_flags(dev); 244 pm_qos_sysfs_remove_flags(dev);
229 245
230 mutex_lock(&dev_pm_qos_mtx); 246 mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
237 goto out; 253 goto out;
238 254
239 /* Flush the constraints lists for the device. */ 255 /* Flush the constraints lists for the device. */
240 c = &qos->latency; 256 c = &qos->resume_latency;
241 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 257 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
242 /* 258 /*
243 * Update constraints list and call the notification 259 * Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
246 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
247 memset(req, 0, sizeof(*req)); 263 memset(req, 0, sizeof(*req));
248 } 264 }
265 c = &qos->latency_tolerance;
266 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
267 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
268 memset(req, 0, sizeof(*req));
269 }
249 f = &qos->flags; 270 f = &qos->flags;
250 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { 271 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
251 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 272 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
265 mutex_unlock(&dev_pm_qos_sysfs_mtx); 286 mutex_unlock(&dev_pm_qos_sysfs_mtx);
266} 287}
267 288
289static bool dev_pm_qos_invalid_request(struct device *dev,
290 struct dev_pm_qos_request *req)
291{
292 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
293 && !dev->power.set_latency_tolerance);
294}
295
296static int __dev_pm_qos_add_request(struct device *dev,
297 struct dev_pm_qos_request *req,
298 enum dev_pm_qos_req_type type, s32 value)
299{
300 int ret = 0;
301
302 if (!dev || dev_pm_qos_invalid_request(dev, req))
303 return -EINVAL;
304
305 if (WARN(dev_pm_qos_request_active(req),
306 "%s() called for already added request\n", __func__))
307 return -EINVAL;
308
309 if (IS_ERR(dev->power.qos))
310 ret = -ENODEV;
311 else if (!dev->power.qos)
312 ret = dev_pm_qos_constraints_allocate(dev);
313
314 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
315 if (!ret) {
316 req->dev = dev;
317 req->type = type;
318 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
319 }
320 return ret;
321}
322
268/** 323/**
269 * dev_pm_qos_add_request - inserts new qos request into the list 324 * dev_pm_qos_add_request - inserts new qos request into the list
270 * @dev: target device for the constraint 325 * @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
290int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 345int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
291 enum dev_pm_qos_req_type type, s32 value) 346 enum dev_pm_qos_req_type type, s32 value)
292{ 347{
293 int ret = 0; 348 int ret;
294
295 if (!dev || !req) /*guard against callers passing in null */
296 return -EINVAL;
297
298 if (WARN(dev_pm_qos_request_active(req),
299 "%s() called for already added request\n", __func__))
300 return -EINVAL;
301 349
302 mutex_lock(&dev_pm_qos_mtx); 350 mutex_lock(&dev_pm_qos_mtx);
303 351 ret = __dev_pm_qos_add_request(dev, req, type, value);
304 if (IS_ERR(dev->power.qos))
305 ret = -ENODEV;
306 else if (!dev->power.qos)
307 ret = dev_pm_qos_constraints_allocate(dev);
308
309 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
310 if (!ret) {
311 req->dev = dev;
312 req->type = type;
313 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
314 }
315
316 mutex_unlock(&dev_pm_qos_mtx); 352 mutex_unlock(&dev_pm_qos_mtx);
317
318 return ret; 353 return ret;
319} 354}
320EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 355EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
341 return -ENODEV; 376 return -ENODEV;
342 377
343 switch(req->type) { 378 switch(req->type) {
344 case DEV_PM_QOS_LATENCY: 379 case DEV_PM_QOS_RESUME_LATENCY:
380 case DEV_PM_QOS_LATENCY_TOLERANCE:
345 curr_value = req->data.pnode.prio; 381 curr_value = req->data.pnode.prio;
346 break; 382 break;
347 case DEV_PM_QOS_FLAGS: 383 case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
460 ret = dev_pm_qos_constraints_allocate(dev); 496 ret = dev_pm_qos_constraints_allocate(dev);
461 497
462 if (!ret) 498 if (!ret)
463 ret = blocking_notifier_chain_register( 499 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
464 dev->power.qos->latency.notifiers, notifier); 500 notifier);
465 501
466 mutex_unlock(&dev_pm_qos_mtx); 502 mutex_unlock(&dev_pm_qos_mtx);
467 return ret; 503 return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
487 523
488 /* Silently return if the constraints object is not present. */ 524 /* Silently return if the constraints object is not present. */
489 if (!IS_ERR_OR_NULL(dev->power.qos)) 525 if (!IS_ERR_OR_NULL(dev->power.qos))
490 retval = blocking_notifier_chain_unregister( 526 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
491 dev->power.qos->latency.notifiers, 527 notifier);
492 notifier);
493 528
494 mutex_unlock(&dev_pm_qos_mtx); 529 mutex_unlock(&dev_pm_qos_mtx);
495 return retval; 530 return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
530 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. 565 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
531 * @dev: Device whose ancestor to add the request for. 566 * @dev: Device whose ancestor to add the request for.
532 * @req: Pointer to the preallocated handle. 567 * @req: Pointer to the preallocated handle.
568 * @type: Type of the request.
533 * @value: Constraint latency value. 569 * @value: Constraint latency value.
534 */ 570 */
535int dev_pm_qos_add_ancestor_request(struct device *dev, 571int dev_pm_qos_add_ancestor_request(struct device *dev,
536 struct dev_pm_qos_request *req, s32 value) 572 struct dev_pm_qos_request *req,
573 enum dev_pm_qos_req_type type, s32 value)
537{ 574{
538 struct device *ancestor = dev->parent; 575 struct device *ancestor = dev->parent;
539 int ret = -ENODEV; 576 int ret = -ENODEV;
540 577
541 while (ancestor && !ancestor->power.ignore_children) 578 switch (type) {
542 ancestor = ancestor->parent; 579 case DEV_PM_QOS_RESUME_LATENCY:
580 while (ancestor && !ancestor->power.ignore_children)
581 ancestor = ancestor->parent;
543 582
583 break;
584 case DEV_PM_QOS_LATENCY_TOLERANCE:
585 while (ancestor && !ancestor->power.set_latency_tolerance)
586 ancestor = ancestor->parent;
587
588 break;
589 default:
590 ancestor = NULL;
591 }
544 if (ancestor) 592 if (ancestor)
545 ret = dev_pm_qos_add_request(ancestor, req, 593 ret = dev_pm_qos_add_request(ancestor, req, type, value);
546 DEV_PM_QOS_LATENCY, value);
547 594
548 if (ret < 0) 595 if (ret < 0)
549 req->dev = NULL; 596 req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
559 struct dev_pm_qos_request *req = NULL; 606 struct dev_pm_qos_request *req = NULL;
560 607
561 switch(type) { 608 switch(type) {
562 case DEV_PM_QOS_LATENCY: 609 case DEV_PM_QOS_RESUME_LATENCY:
563 req = dev->power.qos->latency_req; 610 req = dev->power.qos->resume_latency_req;
564 dev->power.qos->latency_req = NULL; 611 dev->power.qos->resume_latency_req = NULL;
612 break;
613 case DEV_PM_QOS_LATENCY_TOLERANCE:
614 req = dev->power.qos->latency_tolerance_req;
615 dev->power.qos->latency_tolerance_req = NULL;
565 break; 616 break;
566 case DEV_PM_QOS_FLAGS: 617 case DEV_PM_QOS_FLAGS:
567 req = dev->power.qos->flags_req; 618 req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
597 if (!req) 648 if (!req)
598 return -ENOMEM; 649 return -ENOMEM;
599 650
600 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 651 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
601 if (ret < 0) { 652 if (ret < 0) {
602 kfree(req); 653 kfree(req);
603 return ret; 654 return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
609 660
610 if (IS_ERR_OR_NULL(dev->power.qos)) 661 if (IS_ERR_OR_NULL(dev->power.qos))
611 ret = -ENODEV; 662 ret = -ENODEV;
612 else if (dev->power.qos->latency_req) 663 else if (dev->power.qos->resume_latency_req)
613 ret = -EEXIST; 664 ret = -EEXIST;
614 665
615 if (ret < 0) { 666 if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
618 mutex_unlock(&dev_pm_qos_mtx); 669 mutex_unlock(&dev_pm_qos_mtx);
619 goto out; 670 goto out;
620 } 671 }
621 dev->power.qos->latency_req = req; 672 dev->power.qos->resume_latency_req = req;
622 673
623 mutex_unlock(&dev_pm_qos_mtx); 674 mutex_unlock(&dev_pm_qos_mtx);
624 675
625 ret = pm_qos_sysfs_add_latency(dev); 676 ret = pm_qos_sysfs_add_resume_latency(dev);
626 if (ret) 677 if (ret)
627 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 678 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
628 679
629 out: 680 out:
630 mutex_unlock(&dev_pm_qos_sysfs_mtx); 681 mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
634 685
635static void __dev_pm_qos_hide_latency_limit(struct device *dev) 686static void __dev_pm_qos_hide_latency_limit(struct device *dev)
636{ 687{
637 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) 688 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
638 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 689 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639} 690}
640 691
641/** 692/**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
646{ 697{
647 mutex_lock(&dev_pm_qos_sysfs_mtx); 698 mutex_lock(&dev_pm_qos_sysfs_mtx);
648 699
649 pm_qos_sysfs_remove_latency(dev); 700 pm_qos_sysfs_remove_resume_latency(dev);
650 701
651 mutex_lock(&dev_pm_qos_mtx); 702 mutex_lock(&dev_pm_qos_mtx);
652 __dev_pm_qos_hide_latency_limit(dev); 703 __dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
768 pm_runtime_put(dev); 819 pm_runtime_put(dev);
769 return ret; 820 return ret;
770} 821}
822
823/**
824 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
825 * @dev: Device to obtain the user space latency tolerance for.
826 */
827s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
828{
829 s32 ret;
830
831 mutex_lock(&dev_pm_qos_mtx);
832 ret = IS_ERR_OR_NULL(dev->power.qos)
833 || !dev->power.qos->latency_tolerance_req ?
834 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
835 dev->power.qos->latency_tolerance_req->data.pnode.prio;
836 mutex_unlock(&dev_pm_qos_mtx);
837 return ret;
838}
839
840/**
841 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
842 * @dev: Device to update the user space latency tolerance for.
843 * @val: New user space latency tolerance for @dev (negative values disable).
844 */
845int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
846{
847 int ret;
848
849 mutex_lock(&dev_pm_qos_mtx);
850
851 if (IS_ERR_OR_NULL(dev->power.qos)
852 || !dev->power.qos->latency_tolerance_req) {
853 struct dev_pm_qos_request *req;
854
855 if (val < 0) {
856 ret = -EINVAL;
857 goto out;
858 }
859 req = kzalloc(sizeof(*req), GFP_KERNEL);
860 if (!req) {
861 ret = -ENOMEM;
862 goto out;
863 }
864 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
865 if (ret < 0) {
866 kfree(req);
867 goto out;
868 }
869 dev->power.qos->latency_tolerance_req = req;
870 } else {
871 if (val < 0) {
872 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
873 ret = 0;
874 } else {
875 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
876 }
877 }
878
879 out:
880 mutex_unlock(&dev_pm_qos_mtx);
881 return ret;
882}
771#else /* !CONFIG_PM_RUNTIME */ 883#else /* !CONFIG_PM_RUNTIME */
772static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} 884static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
773static void __dev_pm_qos_hide_flags(struct device *dev) {} 885static void __dev_pm_qos_hide_flags(struct device *dev) {}