diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-02-15 07:59:07 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-02-15 07:59:07 -0500 |
commit | 4419fbd4b408c3a8634b3a8dd952055d0f0b601f (patch) | |
tree | dfa48db949d2b092a92a5adb3c070db6287a37be /drivers/cpufreq/cpufreq.c | |
parent | 95ecb407699825278f4031f153dbbe0f0713ff28 (diff) | |
parent | 191e5edf96dc4939f5db0605cc65de9f4d88d155 (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq: (55 commits)
cpufreq / intel_pstate: Fix 32 bit build
cpufreq: conservative: Fix typos in comments
cpufreq: ondemand: Fix typos in comments
cpufreq: exynos: simplify .init() for setting policy->cpus
cpufreq: kirkwood: Add a cpufreq driver for Marvell Kirkwood SoCs
cpufreq/x86: Add P-state driver for sandy bridge.
cpufreq_stats: do not remove sysfs files if frequency table is not present
cpufreq: Do not track governor name for scaling drivers with internal governors.
cpufreq: Only call cpufreq_out_of_sync() for driver that implement cpufreq_driver.target()
cpufreq: Retrieve current frequency from scaling drivers with internal governors
cpufreq: Fix locking issues
cpufreq: Create a macro for unlock_policy_rwsem{read,write}
cpufreq: Remove unused HOTPLUG_CPU code
cpufreq: governors: Fix WARN_ON() for multi-policy platforms
cpufreq: ondemand: Replace down_differential tuner with adj_up_threshold
cpufreq / stats: Get rid of CPUFREQ_STATDEVICE_ATTR
cpufreq: Don't check cpu_online(policy->cpu)
cpufreq: add imx6q-cpufreq driver
cpufreq: Don't remove sysfs link for policy->cpu
cpufreq: Remove unnecessary use of policy->shared_type
...
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 459 |
1 files changed, 215 insertions, 244 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 99faadf454ec..b02824d092e7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
59 | * mode before doing so. | 59 | * mode before doing so. |
60 | * | 60 | * |
61 | * Additional rules: | 61 | * Additional rules: |
62 | * - All holders of the lock should check to make sure that the CPU they | ||
63 | * are concerned with are online after they get the lock. | ||
64 | * - Governor routines that can be called in cpufreq hotplug path should not | 62 | * - Governor routines that can be called in cpufreq hotplug path should not |
65 | * take this sem as top level hotplug notifier handler takes this. | 63 | * take this sem as top level hotplug notifier handler takes this. |
66 | * - Lock should not be held across | 64 | * - Lock should not be held across |
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); | |||
70 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
71 | 69 | ||
72 | #define lock_policy_rwsem(mode, cpu) \ | 70 | #define lock_policy_rwsem(mode, cpu) \ |
73 | static int lock_policy_rwsem_##mode \ | 71 | static int lock_policy_rwsem_##mode(int cpu) \ |
74 | (int cpu) \ | ||
75 | { \ | 72 | { \ |
76 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ | 73 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
77 | BUG_ON(policy_cpu == -1); \ | 74 | BUG_ON(policy_cpu == -1); \ |
78 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 75 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
79 | if (unlikely(!cpu_online(cpu))) { \ | ||
80 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
81 | return -1; \ | ||
82 | } \ | ||
83 | \ | 76 | \ |
84 | return 0; \ | 77 | return 0; \ |
85 | } | 78 | } |
86 | 79 | ||
87 | lock_policy_rwsem(read, cpu); | 80 | lock_policy_rwsem(read, cpu); |
88 | |||
89 | lock_policy_rwsem(write, cpu); | 81 | lock_policy_rwsem(write, cpu); |
90 | 82 | ||
91 | static void unlock_policy_rwsem_read(int cpu) | 83 | #define unlock_policy_rwsem(mode, cpu) \ |
92 | { | 84 | static void unlock_policy_rwsem_##mode(int cpu) \ |
93 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | 85 | { \ |
94 | BUG_ON(policy_cpu == -1); | 86 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
95 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | 87 | BUG_ON(policy_cpu == -1); \ |
96 | } | 88 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
97 | |||
98 | static void unlock_policy_rwsem_write(int cpu) | ||
99 | { | ||
100 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | ||
101 | BUG_ON(policy_cpu == -1); | ||
102 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
103 | } | 89 | } |
104 | 90 | ||
91 | unlock_policy_rwsem(read, cpu); | ||
92 | unlock_policy_rwsem(write, cpu); | ||
105 | 93 | ||
106 | /* internal prototypes */ | 94 | /* internal prototypes */ |
107 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 95 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
@@ -180,6 +168,9 @@ err_out: | |||
180 | 168 | ||
181 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 169 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
182 | { | 170 | { |
171 | if (cpufreq_disabled()) | ||
172 | return NULL; | ||
173 | |||
183 | return __cpufreq_cpu_get(cpu, false); | 174 | return __cpufreq_cpu_get(cpu, false); |
184 | } | 175 | } |
185 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | 176 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); |
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) | |||
198 | 189 | ||
199 | void cpufreq_cpu_put(struct cpufreq_policy *data) | 190 | void cpufreq_cpu_put(struct cpufreq_policy *data) |
200 | { | 191 | { |
192 | if (cpufreq_disabled()) | ||
193 | return; | ||
194 | |||
201 | __cpufreq_cpu_put(data, false); | 195 | __cpufreq_cpu_put(data, false); |
202 | } | 196 | } |
203 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | 197 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); |
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
261 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) |
262 | { | 256 | { |
263 | struct cpufreq_policy *policy; | 257 | struct cpufreq_policy *policy; |
258 | unsigned long flags; | ||
264 | 259 | ||
265 | BUG_ON(irqs_disabled()); | 260 | BUG_ON(irqs_disabled()); |
266 | 261 | ||
262 | if (cpufreq_disabled()) | ||
263 | return; | ||
264 | |||
267 | freqs->flags = cpufreq_driver->flags; | 265 | freqs->flags = cpufreq_driver->flags; |
268 | pr_debug("notification %u of frequency transition to %u kHz\n", | 266 | pr_debug("notification %u of frequency transition to %u kHz\n", |
269 | state, freqs->new); | 267 | state, freqs->new); |
270 | 268 | ||
269 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
271 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); | 270 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); |
271 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
272 | |||
272 | switch (state) { | 273 | switch (state) { |
273 | 274 | ||
274 | case CPUFREQ_PRECHANGE: | 275 | case CPUFREQ_PRECHANGE: |
@@ -542,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf) | |||
542 | */ | 543 | */ |
543 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) | 544 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) |
544 | { | 545 | { |
545 | if (cpumask_empty(policy->related_cpus)) | ||
546 | return show_cpus(policy->cpus, buf); | ||
547 | return show_cpus(policy->related_cpus, buf); | 546 | return show_cpus(policy->related_cpus, buf); |
548 | } | 547 | } |
549 | 548 | ||
@@ -699,87 +698,6 @@ static struct kobj_type ktype_cpufreq = { | |||
699 | .release = cpufreq_sysfs_release, | 698 | .release = cpufreq_sysfs_release, |
700 | }; | 699 | }; |
701 | 700 | ||
702 | /* | ||
703 | * Returns: | ||
704 | * Negative: Failure | ||
705 | * 0: Success | ||
706 | * Positive: When we have a managed CPU and the sysfs got symlinked | ||
707 | */ | ||
708 | static int cpufreq_add_dev_policy(unsigned int cpu, | ||
709 | struct cpufreq_policy *policy, | ||
710 | struct device *dev) | ||
711 | { | ||
712 | int ret = 0; | ||
713 | #ifdef CONFIG_SMP | ||
714 | unsigned long flags; | ||
715 | unsigned int j; | ||
716 | #ifdef CONFIG_HOTPLUG_CPU | ||
717 | struct cpufreq_governor *gov; | ||
718 | |||
719 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | ||
720 | if (gov) { | ||
721 | policy->governor = gov; | ||
722 | pr_debug("Restoring governor %s for cpu %d\n", | ||
723 | policy->governor->name, cpu); | ||
724 | } | ||
725 | #endif | ||
726 | |||
727 | for_each_cpu(j, policy->cpus) { | ||
728 | struct cpufreq_policy *managed_policy; | ||
729 | |||
730 | if (cpu == j) | ||
731 | continue; | ||
732 | |||
733 | /* Check for existing affected CPUs. | ||
734 | * They may not be aware of it due to CPU Hotplug. | ||
735 | * cpufreq_cpu_put is called when the device is removed | ||
736 | * in __cpufreq_remove_dev() | ||
737 | */ | ||
738 | managed_policy = cpufreq_cpu_get(j); | ||
739 | if (unlikely(managed_policy)) { | ||
740 | |||
741 | /* Set proper policy_cpu */ | ||
742 | unlock_policy_rwsem_write(cpu); | ||
743 | per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu; | ||
744 | |||
745 | if (lock_policy_rwsem_write(cpu) < 0) { | ||
746 | /* Should not go through policy unlock path */ | ||
747 | if (cpufreq_driver->exit) | ||
748 | cpufreq_driver->exit(policy); | ||
749 | cpufreq_cpu_put(managed_policy); | ||
750 | return -EBUSY; | ||
751 | } | ||
752 | |||
753 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
754 | cpumask_copy(managed_policy->cpus, policy->cpus); | ||
755 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | ||
756 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
757 | |||
758 | pr_debug("CPU already managed, adding link\n"); | ||
759 | ret = sysfs_create_link(&dev->kobj, | ||
760 | &managed_policy->kobj, | ||
761 | "cpufreq"); | ||
762 | if (ret) | ||
763 | cpufreq_cpu_put(managed_policy); | ||
764 | /* | ||
765 | * Success. We only needed to be added to the mask. | ||
766 | * Call driver->exit() because only the cpu parent of | ||
767 | * the kobj needed to call init(). | ||
768 | */ | ||
769 | if (cpufreq_driver->exit) | ||
770 | cpufreq_driver->exit(policy); | ||
771 | |||
772 | if (!ret) | ||
773 | return 1; | ||
774 | else | ||
775 | return ret; | ||
776 | } | ||
777 | } | ||
778 | #endif | ||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | |||
783 | /* symlink affected CPUs */ | 701 | /* symlink affected CPUs */ |
784 | static int cpufreq_add_dev_symlink(unsigned int cpu, | 702 | static int cpufreq_add_dev_symlink(unsigned int cpu, |
785 | struct cpufreq_policy *policy) | 703 | struct cpufreq_policy *policy) |
@@ -793,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, | |||
793 | 711 | ||
794 | if (j == cpu) | 712 | if (j == cpu) |
795 | continue; | 713 | continue; |
796 | if (!cpu_online(j)) | ||
797 | continue; | ||
798 | 714 | ||
799 | pr_debug("CPU %u already managed, adding link\n", j); | 715 | pr_debug("CPU %u already managed, adding link\n", j); |
800 | managed_policy = cpufreq_cpu_get(cpu); | 716 | managed_policy = cpufreq_cpu_get(cpu); |
@@ -851,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
851 | 767 | ||
852 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 768 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
853 | for_each_cpu(j, policy->cpus) { | 769 | for_each_cpu(j, policy->cpus) { |
854 | if (!cpu_online(j)) | ||
855 | continue; | ||
856 | per_cpu(cpufreq_cpu_data, j) = policy; | 770 | per_cpu(cpufreq_cpu_data, j) = policy; |
857 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 771 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; |
858 | } | 772 | } |
@@ -884,6 +798,42 @@ err_out_kobj_put: | |||
884 | return ret; | 798 | return ret; |
885 | } | 799 | } |
886 | 800 | ||
801 | #ifdef CONFIG_HOTPLUG_CPU | ||
802 | static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, | ||
803 | struct device *dev) | ||
804 | { | ||
805 | struct cpufreq_policy *policy; | ||
806 | int ret = 0; | ||
807 | unsigned long flags; | ||
808 | |||
809 | policy = cpufreq_cpu_get(sibling); | ||
810 | WARN_ON(!policy); | ||
811 | |||
812 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | ||
813 | |||
814 | lock_policy_rwsem_write(sibling); | ||
815 | |||
816 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
817 | |||
818 | cpumask_set_cpu(cpu, policy->cpus); | ||
819 | per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; | ||
820 | per_cpu(cpufreq_cpu_data, cpu) = policy; | ||
821 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
822 | |||
823 | unlock_policy_rwsem_write(sibling); | ||
824 | |||
825 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | ||
826 | __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
827 | |||
828 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
829 | if (ret) { | ||
830 | cpufreq_cpu_put(policy); | ||
831 | return ret; | ||
832 | } | ||
833 | |||
834 | return 0; | ||
835 | } | ||
836 | #endif | ||
887 | 837 | ||
888 | /** | 838 | /** |
889 | * cpufreq_add_dev - add a CPU device | 839 | * cpufreq_add_dev - add a CPU device |
@@ -896,12 +846,12 @@ err_out_kobj_put: | |||
896 | */ | 846 | */ |
897 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 847 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
898 | { | 848 | { |
899 | unsigned int cpu = dev->id; | 849 | unsigned int j, cpu = dev->id; |
900 | int ret = 0, found = 0; | 850 | int ret = -ENOMEM; |
901 | struct cpufreq_policy *policy; | 851 | struct cpufreq_policy *policy; |
902 | unsigned long flags; | 852 | unsigned long flags; |
903 | unsigned int j; | ||
904 | #ifdef CONFIG_HOTPLUG_CPU | 853 | #ifdef CONFIG_HOTPLUG_CPU |
854 | struct cpufreq_governor *gov; | ||
905 | int sibling; | 855 | int sibling; |
906 | #endif | 856 | #endif |
907 | 857 | ||
@@ -918,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
918 | cpufreq_cpu_put(policy); | 868 | cpufreq_cpu_put(policy); |
919 | return 0; | 869 | return 0; |
920 | } | 870 | } |
871 | |||
872 | #ifdef CONFIG_HOTPLUG_CPU | ||
873 | /* Check if this cpu was hot-unplugged earlier and has siblings */ | ||
874 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
875 | for_each_online_cpu(sibling) { | ||
876 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
877 | if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { | ||
878 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
879 | return cpufreq_add_policy_cpu(cpu, sibling, dev); | ||
880 | } | ||
881 | } | ||
882 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
883 | #endif | ||
921 | #endif | 884 | #endif |
922 | 885 | ||
923 | if (!try_module_get(cpufreq_driver->owner)) { | 886 | if (!try_module_get(cpufreq_driver->owner)) { |
@@ -925,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
925 | goto module_out; | 888 | goto module_out; |
926 | } | 889 | } |
927 | 890 | ||
928 | ret = -ENOMEM; | ||
929 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | 891 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); |
930 | if (!policy) | 892 | if (!policy) |
931 | goto nomem_out; | 893 | goto nomem_out; |
@@ -937,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
937 | goto err_free_cpumask; | 899 | goto err_free_cpumask; |
938 | 900 | ||
939 | policy->cpu = cpu; | 901 | policy->cpu = cpu; |
902 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
940 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 903 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
941 | 904 | ||
942 | /* Initially set CPU itself as the policy_cpu */ | 905 | /* Initially set CPU itself as the policy_cpu */ |
943 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; | 906 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; |
944 | ret = (lock_policy_rwsem_write(cpu) < 0); | ||
945 | WARN_ON(ret); | ||
946 | 907 | ||
947 | init_completion(&policy->kobj_unregister); | 908 | init_completion(&policy->kobj_unregister); |
948 | INIT_WORK(&policy->update, handle_update); | 909 | INIT_WORK(&policy->update, handle_update); |
949 | 910 | ||
950 | /* Set governor before ->init, so that driver could check it */ | ||
951 | #ifdef CONFIG_HOTPLUG_CPU | ||
952 | for_each_online_cpu(sibling) { | ||
953 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
954 | if (cp && cp->governor && | ||
955 | (cpumask_test_cpu(cpu, cp->related_cpus))) { | ||
956 | policy->governor = cp->governor; | ||
957 | found = 1; | ||
958 | break; | ||
959 | } | ||
960 | } | ||
961 | #endif | ||
962 | if (!found) | ||
963 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
964 | /* call driver. From then on the cpufreq must be able | 911 | /* call driver. From then on the cpufreq must be able |
965 | * to accept all calls to ->verify and ->setpolicy for this CPU | 912 | * to accept all calls to ->verify and ->setpolicy for this CPU |
966 | */ | 913 | */ |
967 | ret = cpufreq_driver->init(policy); | 914 | ret = cpufreq_driver->init(policy); |
968 | if (ret) { | 915 | if (ret) { |
969 | pr_debug("initialization failed\n"); | 916 | pr_debug("initialization failed\n"); |
970 | goto err_unlock_policy; | 917 | goto err_set_policy_cpu; |
971 | } | 918 | } |
919 | |||
920 | /* related cpus should atleast have policy->cpus */ | ||
921 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | ||
922 | |||
923 | /* | ||
924 | * affected cpus must always be the one, which are online. We aren't | ||
925 | * managing offline cpus here. | ||
926 | */ | ||
927 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | ||
928 | |||
972 | policy->user_policy.min = policy->min; | 929 | policy->user_policy.min = policy->min; |
973 | policy->user_policy.max = policy->max; | 930 | policy->user_policy.max = policy->max; |
974 | 931 | ||
975 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 932 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
976 | CPUFREQ_START, policy); | 933 | CPUFREQ_START, policy); |
977 | 934 | ||
978 | ret = cpufreq_add_dev_policy(cpu, policy, dev); | 935 | #ifdef CONFIG_HOTPLUG_CPU |
979 | if (ret) { | 936 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
980 | if (ret > 0) | 937 | if (gov) { |
981 | /* This is a managed cpu, symlink created, | 938 | policy->governor = gov; |
982 | exit with 0 */ | 939 | pr_debug("Restoring governor %s for cpu %d\n", |
983 | ret = 0; | 940 | policy->governor->name, cpu); |
984 | goto err_unlock_policy; | ||
985 | } | 941 | } |
942 | #endif | ||
986 | 943 | ||
987 | ret = cpufreq_add_dev_interface(cpu, policy, dev); | 944 | ret = cpufreq_add_dev_interface(cpu, policy, dev); |
988 | if (ret) | 945 | if (ret) |
989 | goto err_out_unregister; | 946 | goto err_out_unregister; |
990 | 947 | ||
991 | unlock_policy_rwsem_write(cpu); | ||
992 | |||
993 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 948 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
994 | module_put(cpufreq_driver->owner); | 949 | module_put(cpufreq_driver->owner); |
995 | pr_debug("initialization complete\n"); | 950 | pr_debug("initialization complete\n"); |
996 | 951 | ||
997 | return 0; | 952 | return 0; |
998 | 953 | ||
999 | |||
1000 | err_out_unregister: | 954 | err_out_unregister: |
1001 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 955 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1002 | for_each_cpu(j, policy->cpus) | 956 | for_each_cpu(j, policy->cpus) |
@@ -1006,8 +960,8 @@ err_out_unregister: | |||
1006 | kobject_put(&policy->kobj); | 960 | kobject_put(&policy->kobj); |
1007 | wait_for_completion(&policy->kobj_unregister); | 961 | wait_for_completion(&policy->kobj_unregister); |
1008 | 962 | ||
1009 | err_unlock_policy: | 963 | err_set_policy_cpu: |
1010 | unlock_policy_rwsem_write(cpu); | 964 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
1011 | free_cpumask_var(policy->related_cpus); | 965 | free_cpumask_var(policy->related_cpus); |
1012 | err_free_cpumask: | 966 | err_free_cpumask: |
1013 | free_cpumask_var(policy->cpus); | 967 | free_cpumask_var(policy->cpus); |
@@ -1019,6 +973,22 @@ module_out: | |||
1019 | return ret; | 973 | return ret; |
1020 | } | 974 | } |
1021 | 975 | ||
976 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | ||
977 | { | ||
978 | int j; | ||
979 | |||
980 | policy->last_cpu = policy->cpu; | ||
981 | policy->cpu = cpu; | ||
982 | |||
983 | for_each_cpu(j, policy->cpus) | ||
984 | per_cpu(cpufreq_policy_cpu, j) = cpu; | ||
985 | |||
986 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
987 | cpufreq_frequency_table_update_policy_cpu(policy); | ||
988 | #endif | ||
989 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
990 | CPUFREQ_UPDATE_POLICY_CPU, policy); | ||
991 | } | ||
1022 | 992 | ||
1023 | /** | 993 | /** |
1024 | * __cpufreq_remove_dev - remove a CPU device | 994 | * __cpufreq_remove_dev - remove a CPU device |
@@ -1029,129 +999,103 @@ module_out: | |||
1029 | */ | 999 | */ |
1030 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1000 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1031 | { | 1001 | { |
1032 | unsigned int cpu = dev->id; | 1002 | unsigned int cpu = dev->id, ret, cpus; |
1033 | unsigned long flags; | 1003 | unsigned long flags; |
1034 | struct cpufreq_policy *data; | 1004 | struct cpufreq_policy *data; |
1035 | struct kobject *kobj; | 1005 | struct kobject *kobj; |
1036 | struct completion *cmp; | 1006 | struct completion *cmp; |
1037 | #ifdef CONFIG_SMP | ||
1038 | struct device *cpu_dev; | 1007 | struct device *cpu_dev; |
1039 | unsigned int j; | ||
1040 | #endif | ||
1041 | 1008 | ||
1042 | pr_debug("unregistering CPU %u\n", cpu); | 1009 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); |
1043 | 1010 | ||
1044 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1011 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1012 | |||
1045 | data = per_cpu(cpufreq_cpu_data, cpu); | 1013 | data = per_cpu(cpufreq_cpu_data, cpu); |
1014 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1015 | |||
1016 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1046 | 1017 | ||
1047 | if (!data) { | 1018 | if (!data) { |
1048 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1019 | pr_debug("%s: No cpu_data found\n", __func__); |
1049 | unlock_policy_rwsem_write(cpu); | ||
1050 | return -EINVAL; | 1020 | return -EINVAL; |
1051 | } | 1021 | } |
1052 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1053 | 1022 | ||
1023 | if (cpufreq_driver->target) | ||
1024 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
1054 | 1025 | ||
1055 | #ifdef CONFIG_SMP | 1026 | #ifdef CONFIG_HOTPLUG_CPU |
1056 | /* if this isn't the CPU which is the parent of the kobj, we | 1027 | if (!cpufreq_driver->setpolicy) |
1057 | * only need to unlink, put and exit | 1028 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1058 | */ | 1029 | data->governor->name, CPUFREQ_NAME_LEN); |
1059 | if (unlikely(cpu != data->cpu)) { | ||
1060 | pr_debug("removing link\n"); | ||
1061 | cpumask_clear_cpu(cpu, data->cpus); | ||
1062 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1063 | kobj = &dev->kobj; | ||
1064 | cpufreq_cpu_put(data); | ||
1065 | unlock_policy_rwsem_write(cpu); | ||
1066 | sysfs_remove_link(kobj, "cpufreq"); | ||
1067 | return 0; | ||
1068 | } | ||
1069 | #endif | 1030 | #endif |
1070 | 1031 | ||
1071 | #ifdef CONFIG_SMP | 1032 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1033 | cpus = cpumask_weight(data->cpus); | ||
1034 | cpumask_clear_cpu(cpu, data->cpus); | ||
1035 | unlock_policy_rwsem_write(cpu); | ||
1072 | 1036 | ||
1073 | #ifdef CONFIG_HOTPLUG_CPU | 1037 | if (cpu != data->cpu) { |
1074 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, | 1038 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1075 | CPUFREQ_NAME_LEN); | 1039 | } else if (cpus > 1) { |
1076 | #endif | 1040 | /* first sibling now owns the new sysfs dir */ |
1041 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); | ||
1042 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | ||
1043 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); | ||
1044 | if (ret) { | ||
1045 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | ||
1077 | 1046 | ||
1078 | /* if we have other CPUs still registered, we need to unlink them, | 1047 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1079 | * or else wait_for_completion below will lock up. Clean the | 1048 | cpumask_set_cpu(cpu, data->cpus); |
1080 | * per_cpu(cpufreq_cpu_data) while holding the lock, and remove | ||
1081 | * the sysfs links afterwards. | ||
1082 | */ | ||
1083 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1084 | for_each_cpu(j, data->cpus) { | ||
1085 | if (j == cpu) | ||
1086 | continue; | ||
1087 | per_cpu(cpufreq_cpu_data, j) = NULL; | ||
1088 | } | ||
1089 | } | ||
1090 | 1049 | ||
1091 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1050 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1051 | per_cpu(cpufreq_cpu_data, cpu) = data; | ||
1052 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1092 | 1053 | ||
1093 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1094 | for_each_cpu(j, data->cpus) { | ||
1095 | if (j == cpu) | ||
1096 | continue; | ||
1097 | pr_debug("removing link for cpu %u\n", j); | ||
1098 | #ifdef CONFIG_HOTPLUG_CPU | ||
1099 | strncpy(per_cpu(cpufreq_cpu_governor, j), | ||
1100 | data->governor->name, CPUFREQ_NAME_LEN); | ||
1101 | #endif | ||
1102 | cpu_dev = get_cpu_device(j); | ||
1103 | kobj = &cpu_dev->kobj; | ||
1104 | unlock_policy_rwsem_write(cpu); | 1054 | unlock_policy_rwsem_write(cpu); |
1105 | sysfs_remove_link(kobj, "cpufreq"); | ||
1106 | lock_policy_rwsem_write(cpu); | ||
1107 | cpufreq_cpu_put(data); | ||
1108 | } | ||
1109 | } | ||
1110 | #else | ||
1111 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1112 | #endif | ||
1113 | 1055 | ||
1114 | if (cpufreq_driver->target) | 1056 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, |
1115 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1057 | "cpufreq"); |
1058 | return -EINVAL; | ||
1059 | } | ||
1116 | 1060 | ||
1117 | kobj = &data->kobj; | 1061 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1118 | cmp = &data->kobj_unregister; | 1062 | update_policy_cpu(data, cpu_dev->id); |
1119 | unlock_policy_rwsem_write(cpu); | 1063 | unlock_policy_rwsem_write(cpu); |
1120 | kobject_put(kobj); | 1064 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1065 | __func__, cpu_dev->id, cpu); | ||
1066 | } | ||
1121 | 1067 | ||
1122 | /* we need to make sure that the underlying kobj is actually | 1068 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1123 | * not referenced anymore by anybody before we proceed with | 1069 | cpufreq_cpu_put(data); |
1124 | * unloading. | ||
1125 | */ | ||
1126 | pr_debug("waiting for dropping of refcount\n"); | ||
1127 | wait_for_completion(cmp); | ||
1128 | pr_debug("wait complete\n"); | ||
1129 | 1070 | ||
1130 | lock_policy_rwsem_write(cpu); | 1071 | /* If cpu is last user of policy, free policy */ |
1131 | if (cpufreq_driver->exit) | 1072 | if (cpus == 1) { |
1132 | cpufreq_driver->exit(data); | 1073 | lock_policy_rwsem_read(cpu); |
1133 | unlock_policy_rwsem_write(cpu); | 1074 | kobj = &data->kobj; |
1075 | cmp = &data->kobj_unregister; | ||
1076 | unlock_policy_rwsem_read(cpu); | ||
1077 | kobject_put(kobj); | ||
1078 | |||
1079 | /* we need to make sure that the underlying kobj is actually | ||
1080 | * not referenced anymore by anybody before we proceed with | ||
1081 | * unloading. | ||
1082 | */ | ||
1083 | pr_debug("waiting for dropping of refcount\n"); | ||
1084 | wait_for_completion(cmp); | ||
1085 | pr_debug("wait complete\n"); | ||
1134 | 1086 | ||
1135 | #ifdef CONFIG_HOTPLUG_CPU | 1087 | if (cpufreq_driver->exit) |
1136 | /* when the CPU which is the parent of the kobj is hotplugged | 1088 | cpufreq_driver->exit(data); |
1137 | * offline, check for siblings, and create cpufreq sysfs interface | ||
1138 | * and symlinks | ||
1139 | */ | ||
1140 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1141 | /* first sibling now owns the new sysfs dir */ | ||
1142 | cpumask_clear_cpu(cpu, data->cpus); | ||
1143 | cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); | ||
1144 | 1089 | ||
1145 | /* finally remove our own symlink */ | 1090 | free_cpumask_var(data->related_cpus); |
1146 | lock_policy_rwsem_write(cpu); | 1091 | free_cpumask_var(data->cpus); |
1147 | __cpufreq_remove_dev(dev, sif); | 1092 | kfree(data); |
1093 | } else if (cpufreq_driver->target) { | ||
1094 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1095 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1148 | } | 1096 | } |
1149 | #endif | ||
1150 | |||
1151 | free_cpumask_var(data->related_cpus); | ||
1152 | free_cpumask_var(data->cpus); | ||
1153 | kfree(data); | ||
1154 | 1097 | ||
1098 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | ||
1155 | return 0; | 1099 | return 0; |
1156 | } | 1100 | } |
1157 | 1101 | ||
@@ -1164,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
1164 | if (cpu_is_offline(cpu)) | 1108 | if (cpu_is_offline(cpu)) |
1165 | return 0; | 1109 | return 0; |
1166 | 1110 | ||
1167 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1168 | BUG(); | ||
1169 | |||
1170 | retval = __cpufreq_remove_dev(dev, sif); | 1111 | retval = __cpufreq_remove_dev(dev, sif); |
1171 | return retval; | 1112 | return retval; |
1172 | } | 1113 | } |
@@ -1215,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1215 | */ | 1156 | */ |
1216 | unsigned int cpufreq_quick_get(unsigned int cpu) | 1157 | unsigned int cpufreq_quick_get(unsigned int cpu) |
1217 | { | 1158 | { |
1218 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1159 | struct cpufreq_policy *policy; |
1219 | unsigned int ret_freq = 0; | 1160 | unsigned int ret_freq = 0; |
1220 | 1161 | ||
1162 | if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) | ||
1163 | return cpufreq_driver->get(cpu); | ||
1164 | |||
1165 | policy = cpufreq_cpu_get(cpu); | ||
1221 | if (policy) { | 1166 | if (policy) { |
1222 | ret_freq = policy->cur; | 1167 | ret_freq = policy->cur; |
1223 | cpufreq_cpu_put(policy); | 1168 | cpufreq_cpu_put(policy); |
@@ -1385,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = { | |||
1385 | .resume = cpufreq_bp_resume, | 1330 | .resume = cpufreq_bp_resume, |
1386 | }; | 1331 | }; |
1387 | 1332 | ||
1333 | /** | ||
1334 | * cpufreq_get_current_driver - return current driver's name | ||
1335 | * | ||
1336 | * Return the name string of the currently loaded cpufreq driver | ||
1337 | * or NULL, if none. | ||
1338 | */ | ||
1339 | const char *cpufreq_get_current_driver(void) | ||
1340 | { | ||
1341 | if (cpufreq_driver) | ||
1342 | return cpufreq_driver->name; | ||
1343 | |||
1344 | return NULL; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); | ||
1388 | 1347 | ||
1389 | /********************************************************************* | 1348 | /********************************************************************* |
1390 | * NOTIFIER LISTS INTERFACE * | 1349 | * NOTIFIER LISTS INTERFACE * |
@@ -1407,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | |||
1407 | { | 1366 | { |
1408 | int ret; | 1367 | int ret; |
1409 | 1368 | ||
1369 | if (cpufreq_disabled()) | ||
1370 | return -EINVAL; | ||
1371 | |||
1410 | WARN_ON(!init_cpufreq_transition_notifier_list_called); | 1372 | WARN_ON(!init_cpufreq_transition_notifier_list_called); |
1411 | 1373 | ||
1412 | switch (list) { | 1374 | switch (list) { |
@@ -1441,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | |||
1441 | { | 1403 | { |
1442 | int ret; | 1404 | int ret; |
1443 | 1405 | ||
1406 | if (cpufreq_disabled()) | ||
1407 | return -EINVAL; | ||
1408 | |||
1444 | switch (list) { | 1409 | switch (list) { |
1445 | case CPUFREQ_TRANSITION_NOTIFIER: | 1410 | case CPUFREQ_TRANSITION_NOTIFIER: |
1446 | ret = srcu_notifier_chain_unregister( | 1411 | ret = srcu_notifier_chain_unregister( |
@@ -1486,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1486 | if (target_freq == policy->cur) | 1451 | if (target_freq == policy->cur) |
1487 | return 0; | 1452 | return 0; |
1488 | 1453 | ||
1489 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1454 | if (cpufreq_driver->target) |
1490 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1455 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1491 | 1456 | ||
1492 | return retval; | 1457 | return retval; |
@@ -1521,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |||
1521 | { | 1486 | { |
1522 | int ret = 0; | 1487 | int ret = 0; |
1523 | 1488 | ||
1524 | if (!(cpu_online(cpu) && cpufreq_driver->getavg)) | 1489 | if (cpufreq_disabled()) |
1490 | return ret; | ||
1491 | |||
1492 | if (!cpufreq_driver->getavg) | ||
1525 | return 0; | 1493 | return 0; |
1526 | 1494 | ||
1527 | policy = cpufreq_cpu_get(policy->cpu); | 1495 | policy = cpufreq_cpu_get(policy->cpu); |
@@ -1576,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1576 | policy->cpu, event); | 1544 | policy->cpu, event); |
1577 | ret = policy->governor->governor(policy, event); | 1545 | ret = policy->governor->governor(policy, event); |
1578 | 1546 | ||
1547 | if (event == CPUFREQ_GOV_START) | ||
1548 | policy->governor->initialized++; | ||
1549 | else if (event == CPUFREQ_GOV_STOP) | ||
1550 | policy->governor->initialized--; | ||
1551 | |||
1579 | /* we keep one module reference alive for | 1552 | /* we keep one module reference alive for |
1580 | each CPU governed by this CPU */ | 1553 | each CPU governed by this CPU */ |
1581 | if ((event != CPUFREQ_GOV_START) || ret) | 1554 | if ((event != CPUFREQ_GOV_START) || ret) |
@@ -1599,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) | |||
1599 | 1572 | ||
1600 | mutex_lock(&cpufreq_governor_mutex); | 1573 | mutex_lock(&cpufreq_governor_mutex); |
1601 | 1574 | ||
1575 | governor->initialized = 0; | ||
1602 | err = -EBUSY; | 1576 | err = -EBUSY; |
1603 | if (__find_governor(governor->name) == NULL) { | 1577 | if (__find_governor(governor->name) == NULL) { |
1604 | err = 0; | 1578 | err = 0; |
@@ -1796,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1796 | pr_debug("Driver did not initialize current freq"); | 1770 | pr_debug("Driver did not initialize current freq"); |
1797 | data->cur = policy.cur; | 1771 | data->cur = policy.cur; |
1798 | } else { | 1772 | } else { |
1799 | if (data->cur != policy.cur) | 1773 | if (data->cur != policy.cur && cpufreq_driver->target) |
1800 | cpufreq_out_of_sync(cpu, data->cur, | 1774 | cpufreq_out_of_sync(cpu, data->cur, |
1801 | policy.cur); | 1775 | policy.cur); |
1802 | } | 1776 | } |
@@ -1828,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1828 | break; | 1802 | break; |
1829 | case CPU_DOWN_PREPARE: | 1803 | case CPU_DOWN_PREPARE: |
1830 | case CPU_DOWN_PREPARE_FROZEN: | 1804 | case CPU_DOWN_PREPARE_FROZEN: |
1831 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1832 | BUG(); | ||
1833 | |||
1834 | __cpufreq_remove_dev(dev, NULL); | 1805 | __cpufreq_remove_dev(dev, NULL); |
1835 | break; | 1806 | break; |
1836 | case CPU_DOWN_FAILED: | 1807 | case CPU_DOWN_FAILED: |