diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 460 |
1 files changed, 215 insertions, 245 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1f93dbd72355..b02824d092e7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
59 | * mode before doing so. | 59 | * mode before doing so. |
60 | * | 60 | * |
61 | * Additional rules: | 61 | * Additional rules: |
62 | * - All holders of the lock should check to make sure that the CPU they | ||
63 | * are concerned with are online after they get the lock. | ||
64 | * - Governor routines that can be called in cpufreq hotplug path should not | 62 | * - Governor routines that can be called in cpufreq hotplug path should not |
65 | * take this sem as top level hotplug notifier handler takes this. | 63 | * take this sem as top level hotplug notifier handler takes this. |
66 | * - Lock should not be held across | 64 | * - Lock should not be held across |
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); | |||
70 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
71 | 69 | ||
72 | #define lock_policy_rwsem(mode, cpu) \ | 70 | #define lock_policy_rwsem(mode, cpu) \ |
73 | static int lock_policy_rwsem_##mode \ | 71 | static int lock_policy_rwsem_##mode(int cpu) \ |
74 | (int cpu) \ | ||
75 | { \ | 72 | { \ |
76 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ | 73 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
77 | BUG_ON(policy_cpu == -1); \ | 74 | BUG_ON(policy_cpu == -1); \ |
78 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 75 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
79 | if (unlikely(!cpu_online(cpu))) { \ | ||
80 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
81 | return -1; \ | ||
82 | } \ | ||
83 | \ | 76 | \ |
84 | return 0; \ | 77 | return 0; \ |
85 | } | 78 | } |
86 | 79 | ||
87 | lock_policy_rwsem(read, cpu); | 80 | lock_policy_rwsem(read, cpu); |
88 | |||
89 | lock_policy_rwsem(write, cpu); | 81 | lock_policy_rwsem(write, cpu); |
90 | 82 | ||
91 | static void unlock_policy_rwsem_read(int cpu) | 83 | #define unlock_policy_rwsem(mode, cpu) \ |
92 | { | 84 | static void unlock_policy_rwsem_##mode(int cpu) \ |
93 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | 85 | { \ |
94 | BUG_ON(policy_cpu == -1); | 86 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
95 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | 87 | BUG_ON(policy_cpu == -1); \ |
96 | } | 88 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
97 | |||
98 | static void unlock_policy_rwsem_write(int cpu) | ||
99 | { | ||
100 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | ||
101 | BUG_ON(policy_cpu == -1); | ||
102 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
103 | } | 89 | } |
104 | 90 | ||
91 | unlock_policy_rwsem(read, cpu); | ||
92 | unlock_policy_rwsem(write, cpu); | ||
105 | 93 | ||
106 | /* internal prototypes */ | 94 | /* internal prototypes */ |
107 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 95 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
@@ -180,6 +168,9 @@ err_out: | |||
180 | 168 | ||
181 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 169 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
182 | { | 170 | { |
171 | if (cpufreq_disabled()) | ||
172 | return NULL; | ||
173 | |||
183 | return __cpufreq_cpu_get(cpu, false); | 174 | return __cpufreq_cpu_get(cpu, false); |
184 | } | 175 | } |
185 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | 176 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); |
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) | |||
198 | 189 | ||
199 | void cpufreq_cpu_put(struct cpufreq_policy *data) | 190 | void cpufreq_cpu_put(struct cpufreq_policy *data) |
200 | { | 191 | { |
192 | if (cpufreq_disabled()) | ||
193 | return; | ||
194 | |||
201 | __cpufreq_cpu_put(data, false); | 195 | __cpufreq_cpu_put(data, false); |
202 | } | 196 | } |
203 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | 197 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); |
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
261 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) |
262 | { | 256 | { |
263 | struct cpufreq_policy *policy; | 257 | struct cpufreq_policy *policy; |
258 | unsigned long flags; | ||
264 | 259 | ||
265 | BUG_ON(irqs_disabled()); | 260 | BUG_ON(irqs_disabled()); |
266 | 261 | ||
262 | if (cpufreq_disabled()) | ||
263 | return; | ||
264 | |||
267 | freqs->flags = cpufreq_driver->flags; | 265 | freqs->flags = cpufreq_driver->flags; |
268 | pr_debug("notification %u of frequency transition to %u kHz\n", | 266 | pr_debug("notification %u of frequency transition to %u kHz\n", |
269 | state, freqs->new); | 267 | state, freqs->new); |
270 | 268 | ||
269 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
271 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); | 270 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); |
271 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
272 | |||
272 | switch (state) { | 273 | switch (state) { |
273 | 274 | ||
274 | case CPUFREQ_PRECHANGE: | 275 | case CPUFREQ_PRECHANGE: |
@@ -294,7 +295,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
294 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | 295 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); |
295 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, | 296 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, |
296 | (unsigned long)freqs->cpu); | 297 | (unsigned long)freqs->cpu); |
297 | trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); | ||
298 | trace_cpu_frequency(freqs->new, freqs->cpu); | 298 | trace_cpu_frequency(freqs->new, freqs->cpu); |
299 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | 299 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, |
300 | CPUFREQ_POSTCHANGE, freqs); | 300 | CPUFREQ_POSTCHANGE, freqs); |
@@ -543,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf) | |||
543 | */ | 543 | */ |
544 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) | 544 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) |
545 | { | 545 | { |
546 | if (cpumask_empty(policy->related_cpus)) | ||
547 | return show_cpus(policy->cpus, buf); | ||
548 | return show_cpus(policy->related_cpus, buf); | 546 | return show_cpus(policy->related_cpus, buf); |
549 | } | 547 | } |
550 | 548 | ||
@@ -700,87 +698,6 @@ static struct kobj_type ktype_cpufreq = { | |||
700 | .release = cpufreq_sysfs_release, | 698 | .release = cpufreq_sysfs_release, |
701 | }; | 699 | }; |
702 | 700 | ||
703 | /* | ||
704 | * Returns: | ||
705 | * Negative: Failure | ||
706 | * 0: Success | ||
707 | * Positive: When we have a managed CPU and the sysfs got symlinked | ||
708 | */ | ||
709 | static int cpufreq_add_dev_policy(unsigned int cpu, | ||
710 | struct cpufreq_policy *policy, | ||
711 | struct device *dev) | ||
712 | { | ||
713 | int ret = 0; | ||
714 | #ifdef CONFIG_SMP | ||
715 | unsigned long flags; | ||
716 | unsigned int j; | ||
717 | #ifdef CONFIG_HOTPLUG_CPU | ||
718 | struct cpufreq_governor *gov; | ||
719 | |||
720 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | ||
721 | if (gov) { | ||
722 | policy->governor = gov; | ||
723 | pr_debug("Restoring governor %s for cpu %d\n", | ||
724 | policy->governor->name, cpu); | ||
725 | } | ||
726 | #endif | ||
727 | |||
728 | for_each_cpu(j, policy->cpus) { | ||
729 | struct cpufreq_policy *managed_policy; | ||
730 | |||
731 | if (cpu == j) | ||
732 | continue; | ||
733 | |||
734 | /* Check for existing affected CPUs. | ||
735 | * They may not be aware of it due to CPU Hotplug. | ||
736 | * cpufreq_cpu_put is called when the device is removed | ||
737 | * in __cpufreq_remove_dev() | ||
738 | */ | ||
739 | managed_policy = cpufreq_cpu_get(j); | ||
740 | if (unlikely(managed_policy)) { | ||
741 | |||
742 | /* Set proper policy_cpu */ | ||
743 | unlock_policy_rwsem_write(cpu); | ||
744 | per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu; | ||
745 | |||
746 | if (lock_policy_rwsem_write(cpu) < 0) { | ||
747 | /* Should not go through policy unlock path */ | ||
748 | if (cpufreq_driver->exit) | ||
749 | cpufreq_driver->exit(policy); | ||
750 | cpufreq_cpu_put(managed_policy); | ||
751 | return -EBUSY; | ||
752 | } | ||
753 | |||
754 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
755 | cpumask_copy(managed_policy->cpus, policy->cpus); | ||
756 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | ||
757 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
758 | |||
759 | pr_debug("CPU already managed, adding link\n"); | ||
760 | ret = sysfs_create_link(&dev->kobj, | ||
761 | &managed_policy->kobj, | ||
762 | "cpufreq"); | ||
763 | if (ret) | ||
764 | cpufreq_cpu_put(managed_policy); | ||
765 | /* | ||
766 | * Success. We only needed to be added to the mask. | ||
767 | * Call driver->exit() because only the cpu parent of | ||
768 | * the kobj needed to call init(). | ||
769 | */ | ||
770 | if (cpufreq_driver->exit) | ||
771 | cpufreq_driver->exit(policy); | ||
772 | |||
773 | if (!ret) | ||
774 | return 1; | ||
775 | else | ||
776 | return ret; | ||
777 | } | ||
778 | } | ||
779 | #endif | ||
780 | return ret; | ||
781 | } | ||
782 | |||
783 | |||
784 | /* symlink affected CPUs */ | 701 | /* symlink affected CPUs */ |
785 | static int cpufreq_add_dev_symlink(unsigned int cpu, | 702 | static int cpufreq_add_dev_symlink(unsigned int cpu, |
786 | struct cpufreq_policy *policy) | 703 | struct cpufreq_policy *policy) |
@@ -794,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, | |||
794 | 711 | ||
795 | if (j == cpu) | 712 | if (j == cpu) |
796 | continue; | 713 | continue; |
797 | if (!cpu_online(j)) | ||
798 | continue; | ||
799 | 714 | ||
800 | pr_debug("CPU %u already managed, adding link\n", j); | 715 | pr_debug("CPU %u already managed, adding link\n", j); |
801 | managed_policy = cpufreq_cpu_get(cpu); | 716 | managed_policy = cpufreq_cpu_get(cpu); |
@@ -852,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
852 | 767 | ||
853 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 768 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
854 | for_each_cpu(j, policy->cpus) { | 769 | for_each_cpu(j, policy->cpus) { |
855 | if (!cpu_online(j)) | ||
856 | continue; | ||
857 | per_cpu(cpufreq_cpu_data, j) = policy; | 770 | per_cpu(cpufreq_cpu_data, j) = policy; |
858 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 771 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; |
859 | } | 772 | } |
@@ -885,6 +798,42 @@ err_out_kobj_put: | |||
885 | return ret; | 798 | return ret; |
886 | } | 799 | } |
887 | 800 | ||
801 | #ifdef CONFIG_HOTPLUG_CPU | ||
802 | static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, | ||
803 | struct device *dev) | ||
804 | { | ||
805 | struct cpufreq_policy *policy; | ||
806 | int ret = 0; | ||
807 | unsigned long flags; | ||
808 | |||
809 | policy = cpufreq_cpu_get(sibling); | ||
810 | WARN_ON(!policy); | ||
811 | |||
812 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | ||
813 | |||
814 | lock_policy_rwsem_write(sibling); | ||
815 | |||
816 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
817 | |||
818 | cpumask_set_cpu(cpu, policy->cpus); | ||
819 | per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; | ||
820 | per_cpu(cpufreq_cpu_data, cpu) = policy; | ||
821 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
822 | |||
823 | unlock_policy_rwsem_write(sibling); | ||
824 | |||
825 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | ||
826 | __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
827 | |||
828 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
829 | if (ret) { | ||
830 | cpufreq_cpu_put(policy); | ||
831 | return ret; | ||
832 | } | ||
833 | |||
834 | return 0; | ||
835 | } | ||
836 | #endif | ||
888 | 837 | ||
889 | /** | 838 | /** |
890 | * cpufreq_add_dev - add a CPU device | 839 | * cpufreq_add_dev - add a CPU device |
@@ -897,12 +846,12 @@ err_out_kobj_put: | |||
897 | */ | 846 | */ |
898 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 847 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
899 | { | 848 | { |
900 | unsigned int cpu = dev->id; | 849 | unsigned int j, cpu = dev->id; |
901 | int ret = 0, found = 0; | 850 | int ret = -ENOMEM; |
902 | struct cpufreq_policy *policy; | 851 | struct cpufreq_policy *policy; |
903 | unsigned long flags; | 852 | unsigned long flags; |
904 | unsigned int j; | ||
905 | #ifdef CONFIG_HOTPLUG_CPU | 853 | #ifdef CONFIG_HOTPLUG_CPU |
854 | struct cpufreq_governor *gov; | ||
906 | int sibling; | 855 | int sibling; |
907 | #endif | 856 | #endif |
908 | 857 | ||
@@ -919,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
919 | cpufreq_cpu_put(policy); | 868 | cpufreq_cpu_put(policy); |
920 | return 0; | 869 | return 0; |
921 | } | 870 | } |
871 | |||
872 | #ifdef CONFIG_HOTPLUG_CPU | ||
873 | /* Check if this cpu was hot-unplugged earlier and has siblings */ | ||
874 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
875 | for_each_online_cpu(sibling) { | ||
876 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
877 | if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { | ||
878 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
879 | return cpufreq_add_policy_cpu(cpu, sibling, dev); | ||
880 | } | ||
881 | } | ||
882 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
883 | #endif | ||
922 | #endif | 884 | #endif |
923 | 885 | ||
924 | if (!try_module_get(cpufreq_driver->owner)) { | 886 | if (!try_module_get(cpufreq_driver->owner)) { |
@@ -926,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
926 | goto module_out; | 888 | goto module_out; |
927 | } | 889 | } |
928 | 890 | ||
929 | ret = -ENOMEM; | ||
930 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | 891 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); |
931 | if (!policy) | 892 | if (!policy) |
932 | goto nomem_out; | 893 | goto nomem_out; |
@@ -938,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
938 | goto err_free_cpumask; | 899 | goto err_free_cpumask; |
939 | 900 | ||
940 | policy->cpu = cpu; | 901 | policy->cpu = cpu; |
902 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
941 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 903 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
942 | 904 | ||
943 | /* Initially set CPU itself as the policy_cpu */ | 905 | /* Initially set CPU itself as the policy_cpu */ |
944 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; | 906 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; |
945 | ret = (lock_policy_rwsem_write(cpu) < 0); | ||
946 | WARN_ON(ret); | ||
947 | 907 | ||
948 | init_completion(&policy->kobj_unregister); | 908 | init_completion(&policy->kobj_unregister); |
949 | INIT_WORK(&policy->update, handle_update); | 909 | INIT_WORK(&policy->update, handle_update); |
950 | 910 | ||
951 | /* Set governor before ->init, so that driver could check it */ | ||
952 | #ifdef CONFIG_HOTPLUG_CPU | ||
953 | for_each_online_cpu(sibling) { | ||
954 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
955 | if (cp && cp->governor && | ||
956 | (cpumask_test_cpu(cpu, cp->related_cpus))) { | ||
957 | policy->governor = cp->governor; | ||
958 | found = 1; | ||
959 | break; | ||
960 | } | ||
961 | } | ||
962 | #endif | ||
963 | if (!found) | ||
964 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
965 | /* call driver. From then on the cpufreq must be able | 911 | /* call driver. From then on the cpufreq must be able |
966 | * to accept all calls to ->verify and ->setpolicy for this CPU | 912 | * to accept all calls to ->verify and ->setpolicy for this CPU |
967 | */ | 913 | */ |
968 | ret = cpufreq_driver->init(policy); | 914 | ret = cpufreq_driver->init(policy); |
969 | if (ret) { | 915 | if (ret) { |
970 | pr_debug("initialization failed\n"); | 916 | pr_debug("initialization failed\n"); |
971 | goto err_unlock_policy; | 917 | goto err_set_policy_cpu; |
972 | } | 918 | } |
919 | |||
920 | /* related cpus should atleast have policy->cpus */ | ||
921 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | ||
922 | |||
923 | /* | ||
924 | * affected cpus must always be the one, which are online. We aren't | ||
925 | * managing offline cpus here. | ||
926 | */ | ||
927 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | ||
928 | |||
973 | policy->user_policy.min = policy->min; | 929 | policy->user_policy.min = policy->min; |
974 | policy->user_policy.max = policy->max; | 930 | policy->user_policy.max = policy->max; |
975 | 931 | ||
976 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 932 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
977 | CPUFREQ_START, policy); | 933 | CPUFREQ_START, policy); |
978 | 934 | ||
979 | ret = cpufreq_add_dev_policy(cpu, policy, dev); | 935 | #ifdef CONFIG_HOTPLUG_CPU |
980 | if (ret) { | 936 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
981 | if (ret > 0) | 937 | if (gov) { |
982 | /* This is a managed cpu, symlink created, | 938 | policy->governor = gov; |
983 | exit with 0 */ | 939 | pr_debug("Restoring governor %s for cpu %d\n", |
984 | ret = 0; | 940 | policy->governor->name, cpu); |
985 | goto err_unlock_policy; | ||
986 | } | 941 | } |
942 | #endif | ||
987 | 943 | ||
988 | ret = cpufreq_add_dev_interface(cpu, policy, dev); | 944 | ret = cpufreq_add_dev_interface(cpu, policy, dev); |
989 | if (ret) | 945 | if (ret) |
990 | goto err_out_unregister; | 946 | goto err_out_unregister; |
991 | 947 | ||
992 | unlock_policy_rwsem_write(cpu); | ||
993 | |||
994 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 948 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
995 | module_put(cpufreq_driver->owner); | 949 | module_put(cpufreq_driver->owner); |
996 | pr_debug("initialization complete\n"); | 950 | pr_debug("initialization complete\n"); |
997 | 951 | ||
998 | return 0; | 952 | return 0; |
999 | 953 | ||
1000 | |||
1001 | err_out_unregister: | 954 | err_out_unregister: |
1002 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 955 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1003 | for_each_cpu(j, policy->cpus) | 956 | for_each_cpu(j, policy->cpus) |
@@ -1007,8 +960,8 @@ err_out_unregister: | |||
1007 | kobject_put(&policy->kobj); | 960 | kobject_put(&policy->kobj); |
1008 | wait_for_completion(&policy->kobj_unregister); | 961 | wait_for_completion(&policy->kobj_unregister); |
1009 | 962 | ||
1010 | err_unlock_policy: | 963 | err_set_policy_cpu: |
1011 | unlock_policy_rwsem_write(cpu); | 964 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
1012 | free_cpumask_var(policy->related_cpus); | 965 | free_cpumask_var(policy->related_cpus); |
1013 | err_free_cpumask: | 966 | err_free_cpumask: |
1014 | free_cpumask_var(policy->cpus); | 967 | free_cpumask_var(policy->cpus); |
@@ -1020,6 +973,22 @@ module_out: | |||
1020 | return ret; | 973 | return ret; |
1021 | } | 974 | } |
1022 | 975 | ||
976 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | ||
977 | { | ||
978 | int j; | ||
979 | |||
980 | policy->last_cpu = policy->cpu; | ||
981 | policy->cpu = cpu; | ||
982 | |||
983 | for_each_cpu(j, policy->cpus) | ||
984 | per_cpu(cpufreq_policy_cpu, j) = cpu; | ||
985 | |||
986 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
987 | cpufreq_frequency_table_update_policy_cpu(policy); | ||
988 | #endif | ||
989 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
990 | CPUFREQ_UPDATE_POLICY_CPU, policy); | ||
991 | } | ||
1023 | 992 | ||
1024 | /** | 993 | /** |
1025 | * __cpufreq_remove_dev - remove a CPU device | 994 | * __cpufreq_remove_dev - remove a CPU device |
@@ -1030,129 +999,103 @@ module_out: | |||
1030 | */ | 999 | */ |
1031 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1000 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1032 | { | 1001 | { |
1033 | unsigned int cpu = dev->id; | 1002 | unsigned int cpu = dev->id, ret, cpus; |
1034 | unsigned long flags; | 1003 | unsigned long flags; |
1035 | struct cpufreq_policy *data; | 1004 | struct cpufreq_policy *data; |
1036 | struct kobject *kobj; | 1005 | struct kobject *kobj; |
1037 | struct completion *cmp; | 1006 | struct completion *cmp; |
1038 | #ifdef CONFIG_SMP | ||
1039 | struct device *cpu_dev; | 1007 | struct device *cpu_dev; |
1040 | unsigned int j; | ||
1041 | #endif | ||
1042 | 1008 | ||
1043 | pr_debug("unregistering CPU %u\n", cpu); | 1009 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); |
1044 | 1010 | ||
1045 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1011 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1012 | |||
1046 | data = per_cpu(cpufreq_cpu_data, cpu); | 1013 | data = per_cpu(cpufreq_cpu_data, cpu); |
1014 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1015 | |||
1016 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1047 | 1017 | ||
1048 | if (!data) { | 1018 | if (!data) { |
1049 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1019 | pr_debug("%s: No cpu_data found\n", __func__); |
1050 | unlock_policy_rwsem_write(cpu); | ||
1051 | return -EINVAL; | 1020 | return -EINVAL; |
1052 | } | 1021 | } |
1053 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1054 | 1022 | ||
1023 | if (cpufreq_driver->target) | ||
1024 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
1055 | 1025 | ||
1056 | #ifdef CONFIG_SMP | 1026 | #ifdef CONFIG_HOTPLUG_CPU |
1057 | /* if this isn't the CPU which is the parent of the kobj, we | 1027 | if (!cpufreq_driver->setpolicy) |
1058 | * only need to unlink, put and exit | 1028 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1059 | */ | 1029 | data->governor->name, CPUFREQ_NAME_LEN); |
1060 | if (unlikely(cpu != data->cpu)) { | ||
1061 | pr_debug("removing link\n"); | ||
1062 | cpumask_clear_cpu(cpu, data->cpus); | ||
1063 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1064 | kobj = &dev->kobj; | ||
1065 | cpufreq_cpu_put(data); | ||
1066 | unlock_policy_rwsem_write(cpu); | ||
1067 | sysfs_remove_link(kobj, "cpufreq"); | ||
1068 | return 0; | ||
1069 | } | ||
1070 | #endif | 1030 | #endif |
1071 | 1031 | ||
1072 | #ifdef CONFIG_SMP | 1032 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1033 | cpus = cpumask_weight(data->cpus); | ||
1034 | cpumask_clear_cpu(cpu, data->cpus); | ||
1035 | unlock_policy_rwsem_write(cpu); | ||
1073 | 1036 | ||
1074 | #ifdef CONFIG_HOTPLUG_CPU | 1037 | if (cpu != data->cpu) { |
1075 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, | 1038 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1076 | CPUFREQ_NAME_LEN); | 1039 | } else if (cpus > 1) { |
1077 | #endif | 1040 | /* first sibling now owns the new sysfs dir */ |
1041 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); | ||
1042 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | ||
1043 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); | ||
1044 | if (ret) { | ||
1045 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | ||
1078 | 1046 | ||
1079 | /* if we have other CPUs still registered, we need to unlink them, | 1047 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1080 | * or else wait_for_completion below will lock up. Clean the | 1048 | cpumask_set_cpu(cpu, data->cpus); |
1081 | * per_cpu(cpufreq_cpu_data) while holding the lock, and remove | ||
1082 | * the sysfs links afterwards. | ||
1083 | */ | ||
1084 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1085 | for_each_cpu(j, data->cpus) { | ||
1086 | if (j == cpu) | ||
1087 | continue; | ||
1088 | per_cpu(cpufreq_cpu_data, j) = NULL; | ||
1089 | } | ||
1090 | } | ||
1091 | 1049 | ||
1092 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1050 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1051 | per_cpu(cpufreq_cpu_data, cpu) = data; | ||
1052 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1093 | 1053 | ||
1094 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1095 | for_each_cpu(j, data->cpus) { | ||
1096 | if (j == cpu) | ||
1097 | continue; | ||
1098 | pr_debug("removing link for cpu %u\n", j); | ||
1099 | #ifdef CONFIG_HOTPLUG_CPU | ||
1100 | strncpy(per_cpu(cpufreq_cpu_governor, j), | ||
1101 | data->governor->name, CPUFREQ_NAME_LEN); | ||
1102 | #endif | ||
1103 | cpu_dev = get_cpu_device(j); | ||
1104 | kobj = &cpu_dev->kobj; | ||
1105 | unlock_policy_rwsem_write(cpu); | 1054 | unlock_policy_rwsem_write(cpu); |
1106 | sysfs_remove_link(kobj, "cpufreq"); | ||
1107 | lock_policy_rwsem_write(cpu); | ||
1108 | cpufreq_cpu_put(data); | ||
1109 | } | ||
1110 | } | ||
1111 | #else | ||
1112 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1113 | #endif | ||
1114 | 1055 | ||
1115 | if (cpufreq_driver->target) | 1056 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, |
1116 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1057 | "cpufreq"); |
1058 | return -EINVAL; | ||
1059 | } | ||
1117 | 1060 | ||
1118 | kobj = &data->kobj; | 1061 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1119 | cmp = &data->kobj_unregister; | 1062 | update_policy_cpu(data, cpu_dev->id); |
1120 | unlock_policy_rwsem_write(cpu); | 1063 | unlock_policy_rwsem_write(cpu); |
1121 | kobject_put(kobj); | 1064 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1065 | __func__, cpu_dev->id, cpu); | ||
1066 | } | ||
1122 | 1067 | ||
1123 | /* we need to make sure that the underlying kobj is actually | 1068 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1124 | * not referenced anymore by anybody before we proceed with | 1069 | cpufreq_cpu_put(data); |
1125 | * unloading. | ||
1126 | */ | ||
1127 | pr_debug("waiting for dropping of refcount\n"); | ||
1128 | wait_for_completion(cmp); | ||
1129 | pr_debug("wait complete\n"); | ||
1130 | 1070 | ||
1131 | lock_policy_rwsem_write(cpu); | 1071 | /* If cpu is last user of policy, free policy */ |
1132 | if (cpufreq_driver->exit) | 1072 | if (cpus == 1) { |
1133 | cpufreq_driver->exit(data); | 1073 | lock_policy_rwsem_read(cpu); |
1134 | unlock_policy_rwsem_write(cpu); | 1074 | kobj = &data->kobj; |
1075 | cmp = &data->kobj_unregister; | ||
1076 | unlock_policy_rwsem_read(cpu); | ||
1077 | kobject_put(kobj); | ||
1078 | |||
1079 | /* we need to make sure that the underlying kobj is actually | ||
1080 | * not referenced anymore by anybody before we proceed with | ||
1081 | * unloading. | ||
1082 | */ | ||
1083 | pr_debug("waiting for dropping of refcount\n"); | ||
1084 | wait_for_completion(cmp); | ||
1085 | pr_debug("wait complete\n"); | ||
1135 | 1086 | ||
1136 | #ifdef CONFIG_HOTPLUG_CPU | 1087 | if (cpufreq_driver->exit) |
1137 | /* when the CPU which is the parent of the kobj is hotplugged | 1088 | cpufreq_driver->exit(data); |
1138 | * offline, check for siblings, and create cpufreq sysfs interface | ||
1139 | * and symlinks | ||
1140 | */ | ||
1141 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1142 | /* first sibling now owns the new sysfs dir */ | ||
1143 | cpumask_clear_cpu(cpu, data->cpus); | ||
1144 | cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); | ||
1145 | 1089 | ||
1146 | /* finally remove our own symlink */ | 1090 | free_cpumask_var(data->related_cpus); |
1147 | lock_policy_rwsem_write(cpu); | 1091 | free_cpumask_var(data->cpus); |
1148 | __cpufreq_remove_dev(dev, sif); | 1092 | kfree(data); |
1093 | } else if (cpufreq_driver->target) { | ||
1094 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1095 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1149 | } | 1096 | } |
1150 | #endif | ||
1151 | |||
1152 | free_cpumask_var(data->related_cpus); | ||
1153 | free_cpumask_var(data->cpus); | ||
1154 | kfree(data); | ||
1155 | 1097 | ||
1098 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | ||
1156 | return 0; | 1099 | return 0; |
1157 | } | 1100 | } |
1158 | 1101 | ||
@@ -1165,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
1165 | if (cpu_is_offline(cpu)) | 1108 | if (cpu_is_offline(cpu)) |
1166 | return 0; | 1109 | return 0; |
1167 | 1110 | ||
1168 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1169 | BUG(); | ||
1170 | |||
1171 | retval = __cpufreq_remove_dev(dev, sif); | 1111 | retval = __cpufreq_remove_dev(dev, sif); |
1172 | return retval; | 1112 | return retval; |
1173 | } | 1113 | } |
@@ -1216,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1216 | */ | 1156 | */ |
1217 | unsigned int cpufreq_quick_get(unsigned int cpu) | 1157 | unsigned int cpufreq_quick_get(unsigned int cpu) |
1218 | { | 1158 | { |
1219 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1159 | struct cpufreq_policy *policy; |
1220 | unsigned int ret_freq = 0; | 1160 | unsigned int ret_freq = 0; |
1221 | 1161 | ||
1162 | if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) | ||
1163 | return cpufreq_driver->get(cpu); | ||
1164 | |||
1165 | policy = cpufreq_cpu_get(cpu); | ||
1222 | if (policy) { | 1166 | if (policy) { |
1223 | ret_freq = policy->cur; | 1167 | ret_freq = policy->cur; |
1224 | cpufreq_cpu_put(policy); | 1168 | cpufreq_cpu_put(policy); |
@@ -1386,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = { | |||
1386 | .resume = cpufreq_bp_resume, | 1330 | .resume = cpufreq_bp_resume, |
1387 | }; | 1331 | }; |
1388 | 1332 | ||
1333 | /** | ||
1334 | * cpufreq_get_current_driver - return current driver's name | ||
1335 | * | ||
1336 | * Return the name string of the currently loaded cpufreq driver | ||
1337 | * or NULL, if none. | ||
1338 | */ | ||
1339 | const char *cpufreq_get_current_driver(void) | ||
1340 | { | ||
1341 | if (cpufreq_driver) | ||
1342 | return cpufreq_driver->name; | ||
1343 | |||
1344 | return NULL; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); | ||
1389 | 1347 | ||
1390 | /********************************************************************* | 1348 | /********************************************************************* |
1391 | * NOTIFIER LISTS INTERFACE * | 1349 | * NOTIFIER LISTS INTERFACE * |
@@ -1408,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | |||
1408 | { | 1366 | { |
1409 | int ret; | 1367 | int ret; |
1410 | 1368 | ||
1369 | if (cpufreq_disabled()) | ||
1370 | return -EINVAL; | ||
1371 | |||
1411 | WARN_ON(!init_cpufreq_transition_notifier_list_called); | 1372 | WARN_ON(!init_cpufreq_transition_notifier_list_called); |
1412 | 1373 | ||
1413 | switch (list) { | 1374 | switch (list) { |
@@ -1442,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | |||
1442 | { | 1403 | { |
1443 | int ret; | 1404 | int ret; |
1444 | 1405 | ||
1406 | if (cpufreq_disabled()) | ||
1407 | return -EINVAL; | ||
1408 | |||
1445 | switch (list) { | 1409 | switch (list) { |
1446 | case CPUFREQ_TRANSITION_NOTIFIER: | 1410 | case CPUFREQ_TRANSITION_NOTIFIER: |
1447 | ret = srcu_notifier_chain_unregister( | 1411 | ret = srcu_notifier_chain_unregister( |
@@ -1487,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1487 | if (target_freq == policy->cur) | 1451 | if (target_freq == policy->cur) |
1488 | return 0; | 1452 | return 0; |
1489 | 1453 | ||
1490 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1454 | if (cpufreq_driver->target) |
1491 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1455 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1492 | 1456 | ||
1493 | return retval; | 1457 | return retval; |
@@ -1522,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |||
1522 | { | 1486 | { |
1523 | int ret = 0; | 1487 | int ret = 0; |
1524 | 1488 | ||
1525 | if (!(cpu_online(cpu) && cpufreq_driver->getavg)) | 1489 | if (cpufreq_disabled()) |
1490 | return ret; | ||
1491 | |||
1492 | if (!cpufreq_driver->getavg) | ||
1526 | return 0; | 1493 | return 0; |
1527 | 1494 | ||
1528 | policy = cpufreq_cpu_get(policy->cpu); | 1495 | policy = cpufreq_cpu_get(policy->cpu); |
@@ -1577,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1577 | policy->cpu, event); | 1544 | policy->cpu, event); |
1578 | ret = policy->governor->governor(policy, event); | 1545 | ret = policy->governor->governor(policy, event); |
1579 | 1546 | ||
1547 | if (event == CPUFREQ_GOV_START) | ||
1548 | policy->governor->initialized++; | ||
1549 | else if (event == CPUFREQ_GOV_STOP) | ||
1550 | policy->governor->initialized--; | ||
1551 | |||
1580 | /* we keep one module reference alive for | 1552 | /* we keep one module reference alive for |
1581 | each CPU governed by this CPU */ | 1553 | each CPU governed by this CPU */ |
1582 | if ((event != CPUFREQ_GOV_START) || ret) | 1554 | if ((event != CPUFREQ_GOV_START) || ret) |
@@ -1600,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) | |||
1600 | 1572 | ||
1601 | mutex_lock(&cpufreq_governor_mutex); | 1573 | mutex_lock(&cpufreq_governor_mutex); |
1602 | 1574 | ||
1575 | governor->initialized = 0; | ||
1603 | err = -EBUSY; | 1576 | err = -EBUSY; |
1604 | if (__find_governor(governor->name) == NULL) { | 1577 | if (__find_governor(governor->name) == NULL) { |
1605 | err = 0; | 1578 | err = 0; |
@@ -1797,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1797 | pr_debug("Driver did not initialize current freq"); | 1770 | pr_debug("Driver did not initialize current freq"); |
1798 | data->cur = policy.cur; | 1771 | data->cur = policy.cur; |
1799 | } else { | 1772 | } else { |
1800 | if (data->cur != policy.cur) | 1773 | if (data->cur != policy.cur && cpufreq_driver->target) |
1801 | cpufreq_out_of_sync(cpu, data->cur, | 1774 | cpufreq_out_of_sync(cpu, data->cur, |
1802 | policy.cur); | 1775 | policy.cur); |
1803 | } | 1776 | } |
@@ -1829,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1829 | break; | 1802 | break; |
1830 | case CPU_DOWN_PREPARE: | 1803 | case CPU_DOWN_PREPARE: |
1831 | case CPU_DOWN_PREPARE_FROZEN: | 1804 | case CPU_DOWN_PREPARE_FROZEN: |
1832 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1833 | BUG(); | ||
1834 | |||
1835 | __cpufreq_remove_dev(dev, NULL); | 1805 | __cpufreq_remove_dev(dev, NULL); |
1836 | break; | 1806 | break; |
1837 | case CPU_DOWN_FAILED: | 1807 | case CPU_DOWN_FAILED: |