aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c145
1 files changed, 90 insertions, 55 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b02824d092e7..1b8a48eaf90f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
45/* This one keeps track of the previously set governor of a removed CPU */ 45/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47#endif 47#endif
48static DEFINE_SPINLOCK(cpufreq_driver_lock); 48static DEFINE_RWLOCK(cpufreq_driver_lock);
49 49
50/* 50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -128,6 +128,11 @@ void disable_cpufreq(void)
128static LIST_HEAD(cpufreq_governor_list); 128static LIST_HEAD(cpufreq_governor_list);
129static DEFINE_MUTEX(cpufreq_governor_mutex); 129static DEFINE_MUTEX(cpufreq_governor_mutex);
130 130
131bool have_governor_per_policy(void)
132{
133 return cpufreq_driver->have_governor_per_policy;
134}
135
131static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) 136static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
132{ 137{
133 struct cpufreq_policy *data; 138 struct cpufreq_policy *data;
@@ -137,7 +142,7 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
137 goto err_out; 142 goto err_out;
138 143
139 /* get the cpufreq driver */ 144 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags); 145 read_lock_irqsave(&cpufreq_driver_lock, flags);
141 146
142 if (!cpufreq_driver) 147 if (!cpufreq_driver)
143 goto err_out_unlock; 148 goto err_out_unlock;
@@ -155,13 +160,13 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
155 if (!sysfs && !kobject_get(&data->kobj)) 160 if (!sysfs && !kobject_get(&data->kobj))
156 goto err_out_put_module; 161 goto err_out_put_module;
157 162
158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
159 return data; 164 return data;
160 165
161err_out_put_module: 166err_out_put_module:
162 module_put(cpufreq_driver->owner); 167 module_put(cpufreq_driver->owner);
163err_out_unlock: 168err_out_unlock:
164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
165err_out: 170err_out:
166 return NULL; 171 return NULL;
167} 172}
@@ -244,19 +249,9 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
244#endif 249#endif
245 250
246 251
247/** 252void __cpufreq_notify_transition(struct cpufreq_policy *policy,
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 253 struct cpufreq_freqs *freqs, unsigned int state)
249 * on frequency transition.
250 *
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
253 * external effects.
254 */
255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
256{ 254{
257 struct cpufreq_policy *policy;
258 unsigned long flags;
259
260 BUG_ON(irqs_disabled()); 255 BUG_ON(irqs_disabled());
261 256
262 if (cpufreq_disabled()) 257 if (cpufreq_disabled())
@@ -266,10 +261,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
266 pr_debug("notification %u of frequency transition to %u kHz\n", 261 pr_debug("notification %u of frequency transition to %u kHz\n",
267 state, freqs->new); 262 state, freqs->new);
268 263
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
273 switch (state) { 264 switch (state) {
274 265
275 case CPUFREQ_PRECHANGE: 266 case CPUFREQ_PRECHANGE:
@@ -303,6 +294,20 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
303 break; 294 break;
304 } 295 }
305} 296}
297/**
298 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299 * on frequency transition.
300 *
301 * This function calls the transition notifiers and the "adjust_jiffies"
302 * function. It is called twice on all CPU frequency changes that have
303 * external effects.
304 */
305void cpufreq_notify_transition(struct cpufreq_policy *policy,
306 struct cpufreq_freqs *freqs, unsigned int state)
307{
308 for_each_cpu(freqs->cpu, policy->cpus)
309 __cpufreq_notify_transition(policy, freqs, state);
310}
306EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 311EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
307 312
308 313
@@ -765,12 +770,12 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
765 goto err_out_kobj_put; 770 goto err_out_kobj_put;
766 } 771 }
767 772
768 spin_lock_irqsave(&cpufreq_driver_lock, flags); 773 write_lock_irqsave(&cpufreq_driver_lock, flags);
769 for_each_cpu(j, policy->cpus) { 774 for_each_cpu(j, policy->cpus) {
770 per_cpu(cpufreq_cpu_data, j) = policy; 775 per_cpu(cpufreq_cpu_data, j) = policy;
771 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
772 } 777 }
773 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
774 779
775 ret = cpufreq_add_dev_symlink(cpu, policy); 780 ret = cpufreq_add_dev_symlink(cpu, policy);
776 if (ret) 781 if (ret)
@@ -803,27 +808,30 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
803 struct device *dev) 808 struct device *dev)
804{ 809{
805 struct cpufreq_policy *policy; 810 struct cpufreq_policy *policy;
806 int ret = 0; 811 int ret = 0, has_target = !!cpufreq_driver->target;
807 unsigned long flags; 812 unsigned long flags;
808 813
809 policy = cpufreq_cpu_get(sibling); 814 policy = cpufreq_cpu_get(sibling);
810 WARN_ON(!policy); 815 WARN_ON(!policy);
811 816
812 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 817 if (has_target)
818 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
813 819
814 lock_policy_rwsem_write(sibling); 820 lock_policy_rwsem_write(sibling);
815 821
816 spin_lock_irqsave(&cpufreq_driver_lock, flags); 822 write_lock_irqsave(&cpufreq_driver_lock, flags);
817 823
818 cpumask_set_cpu(cpu, policy->cpus); 824 cpumask_set_cpu(cpu, policy->cpus);
819 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; 825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
820 per_cpu(cpufreq_cpu_data, cpu) = policy; 826 per_cpu(cpufreq_cpu_data, cpu) = policy;
821 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
822 828
823 unlock_policy_rwsem_write(sibling); 829 unlock_policy_rwsem_write(sibling);
824 830
825 __cpufreq_governor(policy, CPUFREQ_GOV_START); 831 if (has_target) {
826 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 832 __cpufreq_governor(policy, CPUFREQ_GOV_START);
833 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
834 }
827 835
828 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 836 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
829 if (ret) { 837 if (ret) {
@@ -871,15 +879,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
871 879
872#ifdef CONFIG_HOTPLUG_CPU 880#ifdef CONFIG_HOTPLUG_CPU
873 /* Check if this cpu was hot-unplugged earlier and has siblings */ 881 /* Check if this cpu was hot-unplugged earlier and has siblings */
874 spin_lock_irqsave(&cpufreq_driver_lock, flags); 882 read_lock_irqsave(&cpufreq_driver_lock, flags);
875 for_each_online_cpu(sibling) { 883 for_each_online_cpu(sibling) {
876 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 884 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
877 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { 885 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
878 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 886 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
879 return cpufreq_add_policy_cpu(cpu, sibling, dev); 887 return cpufreq_add_policy_cpu(cpu, sibling, dev);
880 } 888 }
881 } 889 }
882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 890 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
883#endif 891#endif
884#endif 892#endif
885 893
@@ -952,10 +960,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
952 return 0; 960 return 0;
953 961
954err_out_unregister: 962err_out_unregister:
955 spin_lock_irqsave(&cpufreq_driver_lock, flags); 963 write_lock_irqsave(&cpufreq_driver_lock, flags);
956 for_each_cpu(j, policy->cpus) 964 for_each_cpu(j, policy->cpus)
957 per_cpu(cpufreq_cpu_data, j) = NULL; 965 per_cpu(cpufreq_cpu_data, j) = NULL;
958 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
959 967
960 kobject_put(&policy->kobj); 968 kobject_put(&policy->kobj);
961 wait_for_completion(&policy->kobj_unregister); 969 wait_for_completion(&policy->kobj_unregister);
@@ -1008,12 +1016,12 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1008 1016
1009 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1017 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1010 1018
1011 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
1012 1020
1013 data = per_cpu(cpufreq_cpu_data, cpu); 1021 data = per_cpu(cpufreq_cpu_data, cpu);
1014 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1022 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015 1023
1016 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1017 1025
1018 if (!data) { 1026 if (!data) {
1019 pr_debug("%s: No cpu_data found\n", __func__); 1027 pr_debug("%s: No cpu_data found\n", __func__);
@@ -1031,7 +1039,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1031 1039
1032 WARN_ON(lock_policy_rwsem_write(cpu)); 1040 WARN_ON(lock_policy_rwsem_write(cpu));
1033 cpus = cpumask_weight(data->cpus); 1041 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus); 1042
1043 if (cpus > 1)
1044 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu); 1045 unlock_policy_rwsem_write(cpu);
1036 1046
1037 if (cpu != data->cpu) { 1047 if (cpu != data->cpu) {
@@ -1047,9 +1057,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1047 WARN_ON(lock_policy_rwsem_write(cpu)); 1057 WARN_ON(lock_policy_rwsem_write(cpu));
1048 cpumask_set_cpu(cpu, data->cpus); 1058 cpumask_set_cpu(cpu, data->cpus);
1049 1059
1050 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1060 write_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data; 1061 per_cpu(cpufreq_cpu_data, cpu) = data;
1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1062 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1053 1063
1054 unlock_policy_rwsem_write(cpu); 1064 unlock_policy_rwsem_write(cpu);
1055 1065
@@ -1070,6 +1080,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1070 1080
1071 /* If cpu is last user of policy, free policy */ 1081 /* If cpu is last user of policy, free policy */
1072 if (cpus == 1) { 1082 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1073 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1074 kobj = &data->kobj; 1087 kobj = &data->kobj;
1075 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
@@ -1134,16 +1147,23 @@ static void handle_update(struct work_struct *work)
1134static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1147static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1135 unsigned int new_freq) 1148 unsigned int new_freq)
1136{ 1149{
1150 struct cpufreq_policy *policy;
1137 struct cpufreq_freqs freqs; 1151 struct cpufreq_freqs freqs;
1152 unsigned long flags;
1153
1138 1154
1139 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1155 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1140 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1156 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1141 1157
1142 freqs.cpu = cpu;
1143 freqs.old = old_freq; 1158 freqs.old = old_freq;
1144 freqs.new = new_freq; 1159 freqs.new = new_freq;
1145 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1160
1146 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1161 read_lock_irqsave(&cpufreq_driver_lock, flags);
1162 policy = per_cpu(cpufreq_cpu_data, cpu);
1163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164
1165 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1147} 1167}
1148 1168
1149 1169
@@ -1544,10 +1564,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1544 policy->cpu, event); 1564 policy->cpu, event);
1545 ret = policy->governor->governor(policy, event); 1565 ret = policy->governor->governor(policy, event);
1546 1566
1547 if (event == CPUFREQ_GOV_START) 1567 if (!ret) {
1548 policy->governor->initialized++; 1568 if (event == CPUFREQ_GOV_POLICY_INIT)
1549 else if (event == CPUFREQ_GOV_STOP) 1569 policy->governor->initialized++;
1550 policy->governor->initialized--; 1570 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1571 policy->governor->initialized--;
1572 }
1551 1573
1552 /* we keep one module reference alive for 1574 /* we keep one module reference alive for
1553 each CPU governed by this CPU */ 1575 each CPU governed by this CPU */
@@ -1651,7 +1673,7 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1651static int __cpufreq_set_policy(struct cpufreq_policy *data, 1673static int __cpufreq_set_policy(struct cpufreq_policy *data,
1652 struct cpufreq_policy *policy) 1674 struct cpufreq_policy *policy)
1653{ 1675{
1654 int ret = 0; 1676 int ret = 0, failed = 1;
1655 1677
1656 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1678 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1657 policy->min, policy->max); 1679 policy->min, policy->max);
@@ -1705,18 +1727,31 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1705 pr_debug("governor switch\n"); 1727 pr_debug("governor switch\n");
1706 1728
1707 /* end old governor */ 1729 /* end old governor */
1708 if (data->governor) 1730 if (data->governor) {
1709 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1731 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1732 __cpufreq_governor(data,
1733 CPUFREQ_GOV_POLICY_EXIT);
1734 }
1710 1735
1711 /* start new governor */ 1736 /* start new governor */
1712 data->governor = policy->governor; 1737 data->governor = policy->governor;
1713 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1738 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1739 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1740 failed = 0;
1741 else
1742 __cpufreq_governor(data,
1743 CPUFREQ_GOV_POLICY_EXIT);
1744 }
1745
1746 if (failed) {
1714 /* new governor failed, so re-start old one */ 1747 /* new governor failed, so re-start old one */
1715 pr_debug("starting governor %s failed\n", 1748 pr_debug("starting governor %s failed\n",
1716 data->governor->name); 1749 data->governor->name);
1717 if (old_gov) { 1750 if (old_gov) {
1718 data->governor = old_gov; 1751 data->governor = old_gov;
1719 __cpufreq_governor(data, 1752 __cpufreq_governor(data,
1753 CPUFREQ_GOV_POLICY_INIT);
1754 __cpufreq_governor(data,
1720 CPUFREQ_GOV_START); 1755 CPUFREQ_GOV_START);
1721 } 1756 }
1722 ret = -EINVAL; 1757 ret = -EINVAL;
@@ -1848,13 +1883,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1848 if (driver_data->setpolicy) 1883 if (driver_data->setpolicy)
1849 driver_data->flags |= CPUFREQ_CONST_LOOPS; 1884 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1850 1885
1851 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1886 write_lock_irqsave(&cpufreq_driver_lock, flags);
1852 if (cpufreq_driver) { 1887 if (cpufreq_driver) {
1853 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1888 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1854 return -EBUSY; 1889 return -EBUSY;
1855 } 1890 }
1856 cpufreq_driver = driver_data; 1891 cpufreq_driver = driver_data;
1857 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1858 1893
1859 ret = subsys_interface_register(&cpufreq_interface); 1894 ret = subsys_interface_register(&cpufreq_interface);
1860 if (ret) 1895 if (ret)
@@ -1886,9 +1921,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1886err_if_unreg: 1921err_if_unreg:
1887 subsys_interface_unregister(&cpufreq_interface); 1922 subsys_interface_unregister(&cpufreq_interface);
1888err_null_driver: 1923err_null_driver:
1889 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1924 write_lock_irqsave(&cpufreq_driver_lock, flags);
1890 cpufreq_driver = NULL; 1925 cpufreq_driver = NULL;
1891 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1926 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1892 return ret; 1927 return ret;
1893} 1928}
1894EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1929EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -1914,9 +1949,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1914 subsys_interface_unregister(&cpufreq_interface); 1949 subsys_interface_unregister(&cpufreq_interface);
1915 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 1950 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1916 1951
1917 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1952 write_lock_irqsave(&cpufreq_driver_lock, flags);
1918 cpufreq_driver = NULL; 1953 cpufreq_driver = NULL;
1919 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1954 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1920 1955
1921 return 0; 1956 return 0;
1922} 1957}