aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-03-03 16:48:14 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-03-04 08:23:11 -0500
commitb81ea1b5ac4d3c6a628158b736dd4a98c46c29d9 (patch)
treea2dd6b230ac5cc2197ef6da7042d892c405ad32d /drivers/base/power/qos.c
parentf5f43dcfff3a3c7f7de4a0cfca0106a0ccd58bd7 (diff)
PM / QoS: Fix concurrency issues and memory leaks in device PM QoS
The current device PM QoS code assumes that certain functions will never be called in parallel with each other (for example, it is assumed that dev_pm_qos_expose_flags() won't be called in parallel with dev_pm_qos_hide_flags() for the same device and analogously for the latency limit), which may be overly optimistic. Moreover, dev_pm_qos_expose_flags() and dev_pm_qos_expose_latency_limit() leak memory in error code paths (req needs to be freed on errors) and __dev_pm_qos_drop_user_request() forgets to free the request. To fix the above issues put more things under the device PM QoS mutex to make them mutually exclusive and add the missing freeing of memory. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c129
1 files changed, 87 insertions, 42 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..2159d62c858a 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -344,6 +344,13 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 344 s32 curr_value;
345 int ret = 0; 345 int ret = 0;
346 346
347 if (!req) /*guard against callers passing in null */
348 return -EINVAL;
349
350 if (WARN(!dev_pm_qos_request_active(req),
351 "%s() called for unknown object\n", __func__))
352 return -EINVAL;
353
347 if (!req->dev->power.qos) 354 if (!req->dev->power.qos)
348 return -ENODEV; 355 return -ENODEV;
349 356
@@ -386,6 +393,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 393{
387 int ret; 394 int ret;
388 395
396 mutex_lock(&dev_pm_qos_mtx);
397 ret = __dev_pm_qos_update_request(req, new_value);
398 mutex_unlock(&dev_pm_qos_mtx);
399 return ret;
400}
401EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
402
403static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
404{
405 int ret = 0;
406
389 if (!req) /*guard against callers passing in null */ 407 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 408 return -EINVAL;
391 409
@@ -393,13 +411,15 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 411 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 412 return -EINVAL;
395 413
396 mutex_lock(&dev_pm_qos_mtx); 414 if (req->dev->power.qos) {
397 ret = __dev_pm_qos_update_request(req, new_value); 415 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
398 mutex_unlock(&dev_pm_qos_mtx); 416 PM_QOS_DEFAULT_VALUE);
399 417 memset(req, 0, sizeof(*req));
418 } else {
419 ret = -ENODEV;
420 }
400 return ret; 421 return ret;
401} 422}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 423
404/** 424/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 425 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +438,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 438 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 439int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 440{
421 int ret = 0; 441 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 442
430 mutex_lock(&dev_pm_qos_mtx); 443 mutex_lock(&dev_pm_qos_mtx);
431 444 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 445 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 446 return ret;
443} 447}
@@ -563,16 +567,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 567static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 568 enum dev_pm_qos_req_type type)
565{ 569{
570 struct dev_pm_qos_request *req = NULL;
571
566 switch(type) { 572 switch(type) {
567 case DEV_PM_QOS_LATENCY: 573 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 574 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 575 dev->power.qos->latency_req = NULL;
570 break; 576 break;
571 case DEV_PM_QOS_FLAGS: 577 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 578 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 579 dev->power.qos->flags_req = NULL;
574 break; 580 break;
575 } 581 }
582 __dev_pm_qos_remove_request(req);
583 kfree(req);
576} 584}
577 585
578/** 586/**
@@ -588,22 +596,36 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 596 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 597 return -EINVAL;
590 598
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 599 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 600 if (!req)
596 return -ENOMEM; 601 return -ENOMEM;
597 602
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 603 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 604 if (ret < 0) {
605 kfree(req);
600 return ret; 606 return ret;
607 }
608
609 mutex_lock(&dev_pm_qos_mtx);
610
611 if (!dev->power.qos)
612 ret = -ENODEV;
613 else if (dev->power.qos->latency_req)
614 ret = -EEXIST;
615
616 if (ret < 0) {
617 __dev_pm_qos_remove_request(req);
618 kfree(req);
619 goto out;
620 }
601 621
602 dev->power.qos->latency_req = req; 622 dev->power.qos->latency_req = req;
603 ret = pm_qos_sysfs_add_latency(dev); 623 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 624 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 625 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 626
627 out:
628 mutex_unlock(&dev_pm_qos_mtx);
607 return ret; 629 return ret;
608} 630}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 631EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
@@ -614,10 +636,14 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
614 */ 636 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 637void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 638{
639 mutex_lock(&dev_pm_qos_mtx);
640
617 if (dev->power.qos && dev->power.qos->latency_req) { 641 if (dev->power.qos && dev->power.qos->latency_req) {
618 pm_qos_sysfs_remove_latency(dev); 642 pm_qos_sysfs_remove_latency(dev);
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 643 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
620 } 644 }
645
646 mutex_unlock(&dev_pm_qos_mtx);
621} 647}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 648EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 649
@@ -634,24 +660,37 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 660 if (!device_is_registered(dev))
635 return -EINVAL; 661 return -EINVAL;
636 662
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 663 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 664 if (!req)
642 return -ENOMEM; 665 return -ENOMEM;
643 666
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 667 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 668 if (ret < 0) {
647 goto fail; 669 kfree(req);
670 return ret;
671 }
672
673 pm_runtime_get_sync(dev);
674 mutex_lock(&dev_pm_qos_mtx);
675
676 if (!dev->power.qos)
677 ret = -ENODEV;
678 else if (dev->power.qos->flags_req)
679 ret = -EEXIST;
680
681 if (ret < 0) {
682 __dev_pm_qos_remove_request(req);
683 kfree(req);
684 goto out;
685 }
648 686
649 dev->power.qos->flags_req = req; 687 dev->power.qos->flags_req = req;
650 ret = pm_qos_sysfs_add_flags(dev); 688 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 689 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 690 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 691
654fail: 692 out:
693 mutex_unlock(&dev_pm_qos_mtx);
655 pm_runtime_put(dev); 694 pm_runtime_put(dev);
656 return ret; 695 return ret;
657} 696}
@@ -663,12 +702,16 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
663 */ 702 */
664void dev_pm_qos_hide_flags(struct device *dev) 703void dev_pm_qos_hide_flags(struct device *dev)
665{ 704{
705 pm_runtime_get_sync(dev);
706 mutex_lock(&dev_pm_qos_mtx);
707
666 if (dev->power.qos && dev->power.qos->flags_req) { 708 if (dev->power.qos && dev->power.qos->flags_req) {
667 pm_qos_sysfs_remove_flags(dev); 709 pm_qos_sysfs_remove_flags(dev);
668 pm_runtime_get_sync(dev);
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 710 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
670 pm_runtime_put(dev);
671 } 711 }
712
713 mutex_unlock(&dev_pm_qos_mtx);
714 pm_runtime_put(dev);
672} 715}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 716EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 717
@@ -683,12 +726,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 726 s32 value;
684 int ret; 727 int ret;
685 728
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 729 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 730 mutex_lock(&dev_pm_qos_mtx);
691 731
732 if (!dev->power.qos || !dev->power.qos->flags_req) {
733 ret = -EINVAL;
734 goto out;
735 }
736
692 value = dev_pm_qos_requested_flags(dev); 737 value = dev_pm_qos_requested_flags(dev);
693 if (set) 738 if (set)
694 value |= mask; 739 value |= mask;
@@ -697,9 +742,9 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 742
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 743 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 744
745 out:
700 mutex_unlock(&dev_pm_qos_mtx); 746 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 747 pm_runtime_put(dev);
702
703 return ret; 748 return ret;
704} 749}
705#endif /* CONFIG_PM_RUNTIME */ 750#endif /* CONFIG_PM_RUNTIME */