aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c218
1 files changed, 124 insertions, 94 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index d21349544ce5..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -91,6 +92,7 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
91 92
92 return ret; 93 return ret;
93} 94}
95EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
94 96
95/** 97/**
96 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. 98 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
@@ -100,7 +102,8 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
100 */ 102 */
101s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
102{ 104{
103 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
104} 107}
105 108
106/** 109/**
@@ -197,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
197 return 0; 200 return 0;
198} 201}
199 202
200/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
201 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
202 * @dev: target device
203 *
204 * Called from the device PM subsystem during device insertion under
205 * device_pm_lock().
206 */
207void dev_pm_qos_constraints_init(struct device *dev)
208{
209 mutex_lock(&dev_pm_qos_mtx);
210 dev->power.qos = NULL;
211 dev->power.power_state = PMSG_ON;
212 mutex_unlock(&dev_pm_qos_mtx);
213}
214 205
215/** 206/**
216 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -225,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
225 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
226 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
227 218
219 mutex_lock(&dev_pm_qos_mtx);
220
228 /* 221 /*
229 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
230 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
231 */ 224 */
232 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
233 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
234 227
235 mutex_lock(&dev_pm_qos_mtx);
236
237 dev->power.power_state = PMSG_INVALID;
238 qos = dev->power.qos; 228 qos = dev->power.qos;
239 if (!qos) 229 if (!qos)
240 goto out; 230 goto out;
@@ -256,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
256 } 246 }
257 247
258 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
259 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
260 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
261 251
262 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -300,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
300 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
301 return -EINVAL; 291 return -EINVAL;
302 292
303 req->dev = dev;
304
305 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
306 294
307 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
308 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
309 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
310 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
311 ret = -ENODEV;
312 goto out;
313 } else {
314 /*
315 * Allocate the constraints data on the first call to
316 * add_request, i.e. only if the data is not already
317 * allocated and if the device has not been removed.
318 */
319 ret = dev_pm_qos_constraints_allocate(dev);
320 }
321 }
322 299
323 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
324 req->type = type; 302 req->type = type;
325 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
326 } 304 }
327 305
328 out:
329 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
330 307
331 return ret; 308 return ret;
@@ -343,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
343 s32 curr_value; 320 s32 curr_value;
344 int ret = 0; 321 int ret = 0;
345 322
346 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
347 return -ENODEV; 331 return -ENODEV;
348 332
349 switch(req->type) { 333 switch(req->type) {
@@ -385,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
385{ 369{
386 int ret; 370 int ret;
387 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
388 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
389 return -EINVAL; 384 return -EINVAL;
390 385
@@ -392,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
392 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
393 return -EINVAL; 388 return -EINVAL;
394 389
395 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
396 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
397 mutex_unlock(&dev_pm_qos_mtx);
398 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
399 return ret; 395 return ret;
400} 396}
401EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
402 397
403/** 398/**
404 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -417,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417 */ 412 */
418int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419{ 414{
420 int ret = 0; 415 int ret;
421
422 if (!req) /*guard against callers passing in null */
423 return -EINVAL;
424
425 if (WARN(!dev_pm_qos_request_active(req),
426 "%s() called for unknown object\n", __func__))
427 return -EINVAL;
428 416
429 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
430 418 ret = __dev_pm_qos_remove_request(req);
431 if (req->dev->power.qos) {
432 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
433 PM_QOS_DEFAULT_VALUE);
434 memset(req, 0, sizeof(*req));
435 } else {
436 /* Return if the device has been removed */
437 ret = -ENODEV;
438 }
439
440 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
441 return ret; 420 return ret;
442} 421}
@@ -461,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
461 440
462 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
463 442
464 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
465 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
466 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
467 447
468 if (!ret) 448 if (!ret)
469 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -492,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
492 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
493 473
494 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
495 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
496 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
497 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
498 notifier); 478 notifier);
@@ -562,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
563 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
564{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
565 switch(type) { 547 switch(type) {
566 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
567 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
568 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
569 break; 551 break;
570 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
571 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
572 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
573 break; 555 break;
574 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
575} 559}
576 560
577/** 561/**
@@ -587,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
587 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
588 return -EINVAL; 572 return -EINVAL;
589 573
590 if (dev->power.qos && dev->power.qos->latency_req)
591 return -EEXIST;
592
593 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
594 if (!req) 575 if (!req)
595 return -ENOMEM; 576 return -ENOMEM;
596 577
597 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
598 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
599 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
600 596
601 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
602 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
603 if (ret) 599 if (ret)
604 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
605 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
606 return ret; 604 return ret;
607} 605}
608EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
609 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
610/** 616/**
611 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
612 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
613 */ 619 */
614void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
615{ 621{
616 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
617 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
618 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
619 }
620} 625}
621EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
622 627
@@ -633,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
633 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
634 return -EINVAL; 639 return -EINVAL;
635 640
636 if (dev->power.qos && dev->power.qos->flags_req)
637 return -EEXIST;
638
639 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
640 if (!req) 642 if (!req)
641 return -ENOMEM; 643 return -ENOMEM;
642 644
643 pm_runtime_get_sync(dev);
644 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
645 if (ret < 0) 646 if (ret < 0) {
646 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
647 664
648 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
649 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
650 if (ret) 667 if (ret)
651 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
652 669
653fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
654 pm_runtime_put(dev); 672 pm_runtime_put(dev);
655 return ret; 673 return ret;
656} 674}
657EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
658 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
659/** 685/**
660 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
661 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
662 */ 688 */
663void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
664{ 690{
665 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
666 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
667 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
669 pm_runtime_put(dev); 695 pm_runtime_put(dev);
670 }
671} 696}
672EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
673 698
@@ -682,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
682 s32 value; 707 s32 value;
683 int ret; 708 int ret;
684 709
685 if (!dev->power.qos || !dev->power.qos->flags_req)
686 return -EINVAL;
687
688 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
689 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
690 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
691 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
692 if (set) 719 if (set)
693 value |= mask; 720 value |= mask;
@@ -696,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
696 723
697 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
698 725
726 out:
699 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
700 pm_runtime_put(dev); 728 pm_runtime_put(dev);
701
702 return ret; 729 return ret;
703} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
704#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */