diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-04-04 11:41:10 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-04-04 11:41:10 -0400 |
commit | 6cb437acd93a01fdbd6bac9c78ac8159c644e576 (patch) | |
tree | dcea21b85b8beb7fc4ac12420cea34f4307507e3 /drivers | |
parent | fc1a7fe8b6442a941a8af0cdcc9ff6b666707d60 (diff) | |
parent | beb0ff390e208e71668a1389e3db02631a88cff5 (diff) |
Merge branch 'pm-fixes' into fixes
* pm-fixes:
cpufreq: Correct header guards typo
cpufreq: check OF node /cpus presence before dereferencing it
PM / devfreq: Fix compiler warnings for CONFIG_PM_DEVFREQ unset
PM / QoS: Avoid possible deadlock related to sysfs access
USB / PM: Don't try to hide PM QoS flags from usb_port_device_release()
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/power/qos.c | 60 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-cpu0.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 6 | ||||
-rw-r--r-- | drivers/usb/core/port.c | 1 |
4 files changed, 58 insertions, 19 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "power.h" | 46 | #include "power.h" |
47 | 47 | ||
48 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 48 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
49 | static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); | ||
49 | 50 | ||
50 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 51 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
51 | 52 | ||
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
216 | struct pm_qos_constraints *c; | 217 | struct pm_qos_constraints *c; |
217 | struct pm_qos_flags *f; | 218 | struct pm_qos_flags *f; |
218 | 219 | ||
219 | mutex_lock(&dev_pm_qos_mtx); | 220 | mutex_lock(&dev_pm_qos_sysfs_mtx); |
220 | 221 | ||
221 | /* | 222 | /* |
222 | * If the device's PM QoS resume latency limit or PM QoS flags have been | 223 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
223 | * exposed to user space, they have to be hidden at this point. | 224 | * exposed to user space, they have to be hidden at this point. |
224 | */ | 225 | */ |
226 | pm_qos_sysfs_remove_latency(dev); | ||
227 | pm_qos_sysfs_remove_flags(dev); | ||
228 | |||
229 | mutex_lock(&dev_pm_qos_mtx); | ||
230 | |||
225 | __dev_pm_qos_hide_latency_limit(dev); | 231 | __dev_pm_qos_hide_latency_limit(dev); |
226 | __dev_pm_qos_hide_flags(dev); | 232 | __dev_pm_qos_hide_flags(dev); |
227 | 233 | ||
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
254 | 260 | ||
255 | out: | 261 | out: |
256 | mutex_unlock(&dev_pm_qos_mtx); | 262 | mutex_unlock(&dev_pm_qos_mtx); |
263 | |||
264 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
257 | } | 265 | } |
258 | 266 | ||
259 | /** | 267 | /** |
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, | |||
558 | kfree(req); | 566 | kfree(req); |
559 | } | 567 | } |
560 | 568 | ||
569 | static void dev_pm_qos_drop_user_request(struct device *dev, | ||
570 | enum dev_pm_qos_req_type type) | ||
571 | { | ||
572 | mutex_lock(&dev_pm_qos_mtx); | ||
573 | __dev_pm_qos_drop_user_request(dev, type); | ||
574 | mutex_unlock(&dev_pm_qos_mtx); | ||
575 | } | ||
576 | |||
561 | /** | 577 | /** |
562 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | 578 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. |
563 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | 579 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. |
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
581 | return ret; | 597 | return ret; |
582 | } | 598 | } |
583 | 599 | ||
600 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
601 | |||
584 | mutex_lock(&dev_pm_qos_mtx); | 602 | mutex_lock(&dev_pm_qos_mtx); |
585 | 603 | ||
586 | if (IS_ERR_OR_NULL(dev->power.qos)) | 604 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
591 | if (ret < 0) { | 609 | if (ret < 0) { |
592 | __dev_pm_qos_remove_request(req); | 610 | __dev_pm_qos_remove_request(req); |
593 | kfree(req); | 611 | kfree(req); |
612 | mutex_unlock(&dev_pm_qos_mtx); | ||
594 | goto out; | 613 | goto out; |
595 | } | 614 | } |
596 | |||
597 | dev->power.qos->latency_req = req; | 615 | dev->power.qos->latency_req = req; |
616 | |||
617 | mutex_unlock(&dev_pm_qos_mtx); | ||
618 | |||
598 | ret = pm_qos_sysfs_add_latency(dev); | 619 | ret = pm_qos_sysfs_add_latency(dev); |
599 | if (ret) | 620 | if (ret) |
600 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 621 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
601 | 622 | ||
602 | out: | 623 | out: |
603 | mutex_unlock(&dev_pm_qos_mtx); | 624 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
604 | return ret; | 625 | return ret; |
605 | } | 626 | } |
606 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | 627 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); |
607 | 628 | ||
608 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) | 629 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) |
609 | { | 630 | { |
610 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { | 631 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) |
611 | pm_qos_sysfs_remove_latency(dev); | ||
612 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 632 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
613 | } | ||
614 | } | 633 | } |
615 | 634 | ||
616 | /** | 635 | /** |
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) | |||
619 | */ | 638 | */ |
620 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 639 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
621 | { | 640 | { |
641 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
642 | |||
643 | pm_qos_sysfs_remove_latency(dev); | ||
644 | |||
622 | mutex_lock(&dev_pm_qos_mtx); | 645 | mutex_lock(&dev_pm_qos_mtx); |
623 | __dev_pm_qos_hide_latency_limit(dev); | 646 | __dev_pm_qos_hide_latency_limit(dev); |
624 | mutex_unlock(&dev_pm_qos_mtx); | 647 | mutex_unlock(&dev_pm_qos_mtx); |
648 | |||
649 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
625 | } | 650 | } |
626 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 651 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
627 | 652 | ||
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
649 | } | 674 | } |
650 | 675 | ||
651 | pm_runtime_get_sync(dev); | 676 | pm_runtime_get_sync(dev); |
677 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
678 | |||
652 | mutex_lock(&dev_pm_qos_mtx); | 679 | mutex_lock(&dev_pm_qos_mtx); |
653 | 680 | ||
654 | if (IS_ERR_OR_NULL(dev->power.qos)) | 681 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
659 | if (ret < 0) { | 686 | if (ret < 0) { |
660 | __dev_pm_qos_remove_request(req); | 687 | __dev_pm_qos_remove_request(req); |
661 | kfree(req); | 688 | kfree(req); |
689 | mutex_unlock(&dev_pm_qos_mtx); | ||
662 | goto out; | 690 | goto out; |
663 | } | 691 | } |
664 | |||
665 | dev->power.qos->flags_req = req; | 692 | dev->power.qos->flags_req = req; |
693 | |||
694 | mutex_unlock(&dev_pm_qos_mtx); | ||
695 | |||
666 | ret = pm_qos_sysfs_add_flags(dev); | 696 | ret = pm_qos_sysfs_add_flags(dev); |
667 | if (ret) | 697 | if (ret) |
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 698 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
669 | 699 | ||
670 | out: | 700 | out: |
671 | mutex_unlock(&dev_pm_qos_mtx); | 701 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
672 | pm_runtime_put(dev); | 702 | pm_runtime_put(dev); |
673 | return ret; | 703 | return ret; |
674 | } | 704 | } |
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | |||
676 | 706 | ||
677 | static void __dev_pm_qos_hide_flags(struct device *dev) | 707 | static void __dev_pm_qos_hide_flags(struct device *dev) |
678 | { | 708 | { |
679 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { | 709 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) |
680 | pm_qos_sysfs_remove_flags(dev); | ||
681 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 710 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
682 | } | ||
683 | } | 711 | } |
684 | 712 | ||
685 | /** | 713 | /** |
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) | |||
689 | void dev_pm_qos_hide_flags(struct device *dev) | 717 | void dev_pm_qos_hide_flags(struct device *dev) |
690 | { | 718 | { |
691 | pm_runtime_get_sync(dev); | 719 | pm_runtime_get_sync(dev); |
720 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
721 | |||
722 | pm_qos_sysfs_remove_flags(dev); | ||
723 | |||
692 | mutex_lock(&dev_pm_qos_mtx); | 724 | mutex_lock(&dev_pm_qos_mtx); |
693 | __dev_pm_qos_hide_flags(dev); | 725 | __dev_pm_qos_hide_flags(dev); |
694 | mutex_unlock(&dev_pm_qos_mtx); | 726 | mutex_unlock(&dev_pm_qos_mtx); |
727 | |||
728 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
695 | pm_runtime_put(dev); | 729 | pm_runtime_put(dev); |
696 | } | 730 | } |
697 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | 731 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
178 | 178 | ||
179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
180 | { | 180 | { |
181 | struct device_node *np; | 181 | struct device_node *np, *parent; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) { | 184 | parent = of_find_node_by_path("/cpus"); |
185 | if (!parent) { | ||
186 | pr_err("failed to find OF /cpus\n"); | ||
187 | return -ENOENT; | ||
188 | } | ||
189 | |||
190 | for_each_child_of_node(parent, np) { | ||
185 | if (of_get_property(np, "operating-points", NULL)) | 191 | if (of_get_property(np, "operating-points", NULL)) |
186 | break; | 192 | break; |
187 | } | 193 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -14,8 +14,8 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef _CPUFREQ_GOVERNER_H | 17 | #ifndef _CPUFREQ_GOVERNOR_H |
18 | #define _CPUFREQ_GOVERNER_H | 18 | #define _CPUFREQ_GOVERNOR_H |
19 | 19 | ||
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/kobject.h> | 21 | #include <linux/kobject.h> |
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, | |||
175 | unsigned int sampling_rate); | 175 | unsigned int sampling_rate); |
176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
177 | struct cpufreq_policy *policy, unsigned int event); | 177 | struct cpufreq_policy *policy, unsigned int event); |
178 | #endif /* _CPUFREQ_GOVERNER_H */ | 178 | #endif /* _CPUFREQ_GOVERNOR_H */ |
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c | |||
@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) | |||
67 | { | 67 | { |
68 | struct usb_port *port_dev = to_usb_port(dev); | 68 | struct usb_port *port_dev = to_usb_port(dev); |
69 | 69 | ||
70 | dev_pm_qos_hide_flags(dev); | ||
71 | kfree(port_dev); | 70 | kfree(port_dev); |
72 | } | 71 | } |
73 | 72 | ||