aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c322
1 files changed, 175 insertions, 147 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04548f7023af..02d534da22dd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list);
47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48#endif 48#endif
49 49
50/* 50static inline bool has_target(void)
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 51{
52 * all cpufreq/hotplug/workqueue/etc related lock issues. 52 return cpufreq_driver->target_index || cpufreq_driver->target;
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */
67static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69#define lock_policy_rwsem(mode, cpu) \
70static int lock_policy_rwsem_##mode(int cpu) \
71{ \
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
73 BUG_ON(!policy); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
75 \
76 return 0; \
77}
78
79lock_policy_rwsem(read, cpu);
80lock_policy_rwsem(write, cpu);
81
82#define unlock_policy_rwsem(mode, cpu) \
83static void unlock_policy_rwsem_##mode(int cpu) \
84{ \
85 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
86 BUG_ON(!policy); \
87 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
88} 53}
89 54
90unlock_policy_rwsem(read, cpu);
91unlock_policy_rwsem(write, cpu);
92
93/* 55/*
94 * rwsem to guarantee that cpufreq driver module doesn't unload during critical 56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
95 * sections 57 * sections
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
135 97
136bool have_governor_per_policy(void) 98bool have_governor_per_policy(void)
137{ 99{
138 return cpufreq_driver->have_governor_per_policy; 100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
139} 101}
140EXPORT_SYMBOL_GPL(have_governor_per_policy); 102EXPORT_SYMBOL_GPL(have_governor_per_policy);
141 103
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
183} 145}
184EXPORT_SYMBOL_GPL(get_cpu_idle_time); 146EXPORT_SYMBOL_GPL(get_cpu_idle_time);
185 147
148/*
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
154 */
155int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
158{
159 int ret;
160
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
165 }
166
167 policy->cpuinfo.transition_latency = transition_latency;
168
169 /*
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
172 */
173 cpumask_setall(policy->cpus);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178
186struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
187{ 180{
188 struct cpufreq_policy *policy = NULL; 181 struct cpufreq_policy *policy = NULL;
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
363 *policy = CPUFREQ_POLICY_POWERSAVE; 356 *policy = CPUFREQ_POLICY_POWERSAVE;
364 err = 0; 357 err = 0;
365 } 358 }
366 } else if (cpufreq_driver->target) { 359 } else if (has_target()) {
367 struct cpufreq_governor *t; 360 struct cpufreq_governor *t;
368 361
369 mutex_lock(&cpufreq_governor_mutex); 362 mutex_lock(&cpufreq_governor_mutex);
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min);
414show_one(scaling_max_freq, max); 407show_one(scaling_max_freq, max);
415show_one(scaling_cur_freq, cur); 408show_one(scaling_cur_freq, cur);
416 409
417static int __cpufreq_set_policy(struct cpufreq_policy *policy, 410static int cpufreq_set_policy(struct cpufreq_policy *policy,
418 struct cpufreq_policy *new_policy); 411 struct cpufreq_policy *new_policy);
419 412
420/** 413/**
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \
435 if (ret != 1) \ 428 if (ret != 1) \
436 return -EINVAL; \ 429 return -EINVAL; \
437 \ 430 \
438 ret = __cpufreq_set_policy(policy, &new_policy); \ 431 ret = cpufreq_set_policy(policy, &new_policy); \
439 policy->user_policy.object = policy->object; \ 432 policy->user_policy.object = policy->object; \
440 \ 433 \
441 return ret ? ret : count; \ 434 return ret ? ret : count; \
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
493 &new_policy.governor)) 486 &new_policy.governor))
494 return -EINVAL; 487 return -EINVAL;
495 488
496 /* 489 ret = cpufreq_set_policy(policy, &new_policy);
497 * Do not use cpufreq_set_policy here or the user_policy.max
498 * will be wrongly overridden
499 */
500 ret = __cpufreq_set_policy(policy, &new_policy);
501 490
502 policy->user_policy.policy = policy->policy; 491 policy->user_policy.policy = policy->policy;
503 policy->user_policy.governor = policy->governor; 492 policy->user_policy.governor = policy->governor;
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
525 ssize_t i = 0; 514 ssize_t i = 0;
526 struct cpufreq_governor *t; 515 struct cpufreq_governor *t;
527 516
528 if (!cpufreq_driver->target) { 517 if (!has_target()) {
529 i += sprintf(buf, "performance powersave"); 518 i += sprintf(buf, "performance powersave");
530 goto out; 519 goto out;
531 } 520 }
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
653{ 642{
654 struct cpufreq_policy *policy = to_policy(kobj); 643 struct cpufreq_policy *policy = to_policy(kobj);
655 struct freq_attr *fattr = to_attr(attr); 644 struct freq_attr *fattr = to_attr(attr);
656 ssize_t ret = -EINVAL; 645 ssize_t ret;
657 646
658 if (!down_read_trylock(&cpufreq_rwsem)) 647 if (!down_read_trylock(&cpufreq_rwsem))
659 goto exit; 648 return -EINVAL;
660 649
661 if (lock_policy_rwsem_read(policy->cpu) < 0) 650 down_read(&policy->rwsem);
662 goto up_read;
663 651
664 if (fattr->show) 652 if (fattr->show)
665 ret = fattr->show(policy, buf); 653 ret = fattr->show(policy, buf);
666 else 654 else
667 ret = -EIO; 655 ret = -EIO;
668 656
669 unlock_policy_rwsem_read(policy->cpu); 657 up_read(&policy->rwsem);
670
671up_read:
672 up_read(&cpufreq_rwsem); 658 up_read(&cpufreq_rwsem);
673exit: 659
674 return ret; 660 return ret;
675} 661}
676 662
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
689 if (!down_read_trylock(&cpufreq_rwsem)) 675 if (!down_read_trylock(&cpufreq_rwsem))
690 goto unlock; 676 goto unlock;
691 677
692 if (lock_policy_rwsem_write(policy->cpu) < 0) 678 down_write(&policy->rwsem);
693 goto up_read;
694 679
695 if (fattr->store) 680 if (fattr->store)
696 ret = fattr->store(policy, buf, count); 681 ret = fattr->store(policy, buf, count);
697 else 682 else
698 ret = -EIO; 683 ret = -EIO;
699 684
700 unlock_policy_rwsem_write(policy->cpu); 685 up_write(&policy->rwsem);
701 686
702up_read:
703 up_read(&cpufreq_rwsem); 687 up_read(&cpufreq_rwsem);
704unlock: 688unlock:
705 put_online_cpus(); 689 put_online_cpus();
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
815 if (ret) 799 if (ret)
816 goto err_out_kobj_put; 800 goto err_out_kobj_put;
817 } 801 }
818 if (cpufreq_driver->target) { 802 if (has_target()) {
819 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 803 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
820 if (ret) 804 if (ret)
821 goto err_out_kobj_put; 805 goto err_out_kobj_put;
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
844 int ret = 0; 828 int ret = 0;
845 829
846 memcpy(&new_policy, policy, sizeof(*policy)); 830 memcpy(&new_policy, policy, sizeof(*policy));
847 /* assure that the starting sequence is run in __cpufreq_set_policy */ 831 /* assure that the starting sequence is run in cpufreq_set_policy */
848 policy->governor = NULL; 832 policy->governor = NULL;
849 833
850 /* set default policy */ 834 /* set default policy */
851 ret = __cpufreq_set_policy(policy, &new_policy); 835 ret = cpufreq_set_policy(policy, &new_policy);
852 policy->user_policy.policy = policy->policy; 836 policy->user_policy.policy = policy->policy;
853 policy->user_policy.governor = policy->governor; 837 policy->user_policy.governor = policy->governor;
854 838
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
864 unsigned int cpu, struct device *dev, 848 unsigned int cpu, struct device *dev,
865 bool frozen) 849 bool frozen)
866{ 850{
867 int ret = 0, has_target = !!cpufreq_driver->target; 851 int ret = 0;
868 unsigned long flags; 852 unsigned long flags;
869 853
870 if (has_target) { 854 if (has_target()) {
871 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 855 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
872 if (ret) { 856 if (ret) {
873 pr_err("%s: Failed to stop governor\n", __func__); 857 pr_err("%s: Failed to stop governor\n", __func__);
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
875 } 859 }
876 } 860 }
877 861
878 lock_policy_rwsem_write(policy->cpu); 862 down_write(&policy->rwsem);
879 863
880 write_lock_irqsave(&cpufreq_driver_lock, flags); 864 write_lock_irqsave(&cpufreq_driver_lock, flags);
881 865
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
883 per_cpu(cpufreq_cpu_data, cpu) = policy; 867 per_cpu(cpufreq_cpu_data, cpu) = policy;
884 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 868 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
885 869
886 unlock_policy_rwsem_write(policy->cpu); 870 up_write(&policy->rwsem);
887 871
888 if (has_target) { 872 if (has_target()) {
889 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || 873 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
890 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { 874 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
891 pr_err("%s: Failed to start governor\n", __func__); 875 pr_err("%s: Failed to start governor\n", __func__);
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
930 goto err_free_cpumask; 914 goto err_free_cpumask;
931 915
932 INIT_LIST_HEAD(&policy->policy_list); 916 INIT_LIST_HEAD(&policy->policy_list);
917 init_rwsem(&policy->rwsem);
918
933 return policy; 919 return policy;
934 920
935err_free_cpumask: 921err_free_cpumask:
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
949 935
950static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 936static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
951{ 937{
952 if (cpu == policy->cpu) 938 if (WARN_ON(cpu == policy->cpu))
953 return; 939 return;
954 940
955 /* 941 down_write(&policy->rwsem);
956 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
957 * Also lock for last cpu is enough here as contention will happen only
958 * after policy->cpu is changed and after it is changed, other threads
959 * will try to acquire lock for new cpu. And policy is already updated
960 * by then.
961 */
962 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
963 942
964 policy->last_cpu = policy->cpu; 943 policy->last_cpu = policy->cpu;
965 policy->cpu = cpu; 944 policy->cpu = cpu;
966 945
967 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu)); 946 up_write(&policy->rwsem);
968 947
969#ifdef CONFIG_CPU_FREQ_TABLE
970 cpufreq_frequency_table_update_policy_cpu(policy); 948 cpufreq_frequency_table_update_policy_cpu(policy);
971#endif
972 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 949 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
973 CPUFREQ_UPDATE_POLICY_CPU, policy); 950 CPUFREQ_UPDATE_POLICY_CPU, policy);
974} 951}
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1053 goto err_set_policy_cpu; 1030 goto err_set_policy_cpu;
1054 } 1031 }
1055 1032
1033 if (cpufreq_driver->get) {
1034 policy->cur = cpufreq_driver->get(policy->cpu);
1035 if (!policy->cur) {
1036 pr_err("%s: ->get() failed\n", __func__);
1037 goto err_get_freq;
1038 }
1039 }
1040
1056 /* related cpus should atleast have policy->cpus */ 1041 /* related cpus should atleast have policy->cpus */
1057 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1042 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1058 1043
@@ -1107,6 +1092,9 @@ err_out_unregister:
1107 per_cpu(cpufreq_cpu_data, j) = NULL; 1092 per_cpu(cpufreq_cpu_data, j) = NULL;
1108 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1093 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1109 1094
1095err_get_freq:
1096 if (cpufreq_driver->exit)
1097 cpufreq_driver->exit(policy);
1110err_set_policy_cpu: 1098err_set_policy_cpu:
1111 cpufreq_policy_free(policy); 1099 cpufreq_policy_free(policy);
1112nomem_out: 1100nomem_out:
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1147 if (ret) { 1135 if (ret) {
1148 pr_err("%s: Failed to move kobj: %d", __func__, ret); 1136 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1149 1137
1150 WARN_ON(lock_policy_rwsem_write(old_cpu)); 1138 down_write(&policy->rwsem);
1151 cpumask_set_cpu(old_cpu, policy->cpus); 1139 cpumask_set_cpu(old_cpu, policy->cpus);
1152 unlock_policy_rwsem_write(old_cpu); 1140 up_write(&policy->rwsem);
1153 1141
1154 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 1142 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1155 "cpufreq"); 1143 "cpufreq");
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1186 return -EINVAL; 1174 return -EINVAL;
1187 } 1175 }
1188 1176
1189 if (cpufreq_driver->target) { 1177 if (has_target()) {
1190 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1178 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1191 if (ret) { 1179 if (ret) {
1192 pr_err("%s: Failed to stop governor\n", __func__); 1180 pr_err("%s: Failed to stop governor\n", __func__);
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1200 policy->governor->name, CPUFREQ_NAME_LEN); 1188 policy->governor->name, CPUFREQ_NAME_LEN);
1201#endif 1189#endif
1202 1190
1203 lock_policy_rwsem_read(cpu); 1191 down_read(&policy->rwsem);
1204 cpus = cpumask_weight(policy->cpus); 1192 cpus = cpumask_weight(policy->cpus);
1205 unlock_policy_rwsem_read(cpu); 1193 up_read(&policy->rwsem);
1206 1194
1207 if (cpu != policy->cpu) { 1195 if (cpu != policy->cpu) {
1208 if (!frozen) 1196 if (!frozen)
1209 sysfs_remove_link(&dev->kobj, "cpufreq"); 1197 sysfs_remove_link(&dev->kobj, "cpufreq");
1210 } else if (cpus > 1) { 1198 } else if (cpus > 1) {
1211
1212 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1199 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1213 if (new_cpu >= 0) { 1200 if (new_cpu >= 0) {
1214 update_policy_cpu(policy, new_cpu); 1201 update_policy_cpu(policy, new_cpu);
1215 1202
1216 if (!frozen) { 1203 if (!frozen) {
1217 pr_debug("%s: policy Kobject moved to cpu: %d " 1204 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1218 "from: %d\n",__func__, new_cpu, cpu); 1205 __func__, new_cpu, cpu);
1219 } 1206 }
1220 } 1207 }
1221 } 1208 }
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1243 return -EINVAL; 1230 return -EINVAL;
1244 } 1231 }
1245 1232
1246 WARN_ON(lock_policy_rwsem_write(cpu)); 1233 down_write(&policy->rwsem);
1247 cpus = cpumask_weight(policy->cpus); 1234 cpus = cpumask_weight(policy->cpus);
1248 1235
1249 if (cpus > 1) 1236 if (cpus > 1)
1250 cpumask_clear_cpu(cpu, policy->cpus); 1237 cpumask_clear_cpu(cpu, policy->cpus);
1251 unlock_policy_rwsem_write(cpu); 1238 up_write(&policy->rwsem);
1252 1239
1253 /* If cpu is last user of policy, free policy */ 1240 /* If cpu is last user of policy, free policy */
1254 if (cpus == 1) { 1241 if (cpus == 1) {
1255 if (cpufreq_driver->target) { 1242 if (has_target()) {
1256 ret = __cpufreq_governor(policy, 1243 ret = __cpufreq_governor(policy,
1257 CPUFREQ_GOV_POLICY_EXIT); 1244 CPUFREQ_GOV_POLICY_EXIT);
1258 if (ret) { 1245 if (ret) {
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1263 } 1250 }
1264 1251
1265 if (!frozen) { 1252 if (!frozen) {
1266 lock_policy_rwsem_read(cpu); 1253 down_read(&policy->rwsem);
1267 kobj = &policy->kobj; 1254 kobj = &policy->kobj;
1268 cmp = &policy->kobj_unregister; 1255 cmp = &policy->kobj_unregister;
1269 unlock_policy_rwsem_read(cpu); 1256 up_read(&policy->rwsem);
1270 kobject_put(kobj); 1257 kobject_put(kobj);
1271 1258
1272 /* 1259 /*
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1295 if (!frozen) 1282 if (!frozen)
1296 cpufreq_policy_free(policy); 1283 cpufreq_policy_free(policy);
1297 } else { 1284 } else {
1298 if (cpufreq_driver->target) { 1285 if (has_target()) {
1299 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || 1286 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1300 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { 1287 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1301 pr_err("%s: Failed to start governor\n", 1288 pr_err("%s: Failed to start governor\n",
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1310} 1297}
1311 1298
1312/** 1299/**
1313 * __cpufreq_remove_dev - remove a CPU device 1300 * cpufreq_remove_dev - remove a CPU device
1314 * 1301 *
1315 * Removes the cpufreq interface for a CPU device. 1302 * Removes the cpufreq interface for a CPU device.
1316 * Caller should already have policy_rwsem in write mode for this CPU.
1317 * This routine frees the rwsem before returning.
1318 */ 1303 */
1319static inline int __cpufreq_remove_dev(struct device *dev,
1320 struct subsys_interface *sif,
1321 bool frozen)
1322{
1323 int ret;
1324
1325 ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
1326
1327 if (!ret)
1328 ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
1329
1330 return ret;
1331}
1332
1333static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1304static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1334{ 1305{
1335 unsigned int cpu = dev->id; 1306 unsigned int cpu = dev->id;
1336 int retval; 1307 int ret;
1337 1308
1338 if (cpu_is_offline(cpu)) 1309 if (cpu_is_offline(cpu))
1339 return 0; 1310 return 0;
1340 1311
1341 retval = __cpufreq_remove_dev(dev, sif, false); 1312 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1342 return retval; 1313
1314 if (!ret)
1315 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1316
1317 return ret;
1343} 1318}
1344 1319
1345static void handle_update(struct work_struct *work) 1320static void handle_update(struct work_struct *work)
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1458 */ 1433 */
1459unsigned int cpufreq_get(unsigned int cpu) 1434unsigned int cpufreq_get(unsigned int cpu)
1460{ 1435{
1436 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1461 unsigned int ret_freq = 0; 1437 unsigned int ret_freq = 0;
1462 1438
1463 if (cpufreq_disabled() || !cpufreq_driver) 1439 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT; 1440 return -ENOENT;
1465 1441
1442 BUG_ON(!policy);
1443
1466 if (!down_read_trylock(&cpufreq_rwsem)) 1444 if (!down_read_trylock(&cpufreq_rwsem))
1467 return 0; 1445 return 0;
1468 1446
1469 if (unlikely(lock_policy_rwsem_read(cpu))) 1447 down_read(&policy->rwsem);
1470 goto out_policy;
1471 1448
1472 ret_freq = __cpufreq_get(cpu); 1449 ret_freq = __cpufreq_get(cpu);
1473 1450
1474 unlock_policy_rwsem_read(cpu); 1451 up_read(&policy->rwsem);
1475
1476out_policy:
1477 up_read(&cpufreq_rwsem); 1452 up_read(&cpufreq_rwsem);
1478 1453
1479 return ret_freq; 1454 return ret_freq;
@@ -1681,12 +1656,75 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1681 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 1656 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1682 policy->cpu, target_freq, relation, old_target_freq); 1657 policy->cpu, target_freq, relation, old_target_freq);
1683 1658
1659 /*
1660 * This might look like a redundant call as we are checking it again
1661 * after finding index. But it is left intentionally for cases where
1662 * exactly same freq is called again and so we can save on few function
1663 * calls.
1664 */
1684 if (target_freq == policy->cur) 1665 if (target_freq == policy->cur)
1685 return 0; 1666 return 0;
1686 1667
1687 if (cpufreq_driver->target) 1668 if (cpufreq_driver->target)
1688 retval = cpufreq_driver->target(policy, target_freq, relation); 1669 retval = cpufreq_driver->target(policy, target_freq, relation);
1670 else if (cpufreq_driver->target_index) {
1671 struct cpufreq_frequency_table *freq_table;
1672 struct cpufreq_freqs freqs;
1673 bool notify;
1674 int index;
1675
1676 freq_table = cpufreq_frequency_get_table(policy->cpu);
1677 if (unlikely(!freq_table)) {
1678 pr_err("%s: Unable to find freq_table\n", __func__);
1679 goto out;
1680 }
1681
1682 retval = cpufreq_frequency_table_target(policy, freq_table,
1683 target_freq, relation, &index);
1684 if (unlikely(retval)) {
1685 pr_err("%s: Unable to find matching freq\n", __func__);
1686 goto out;
1687 }
1688
1689 if (freq_table[index].frequency == policy->cur) {
1690 retval = 0;
1691 goto out;
1692 }
1693
1694 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1695
1696 if (notify) {
1697 freqs.old = policy->cur;
1698 freqs.new = freq_table[index].frequency;
1699 freqs.flags = 0;
1700
1701 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1702 __func__, policy->cpu, freqs.old,
1703 freqs.new);
1704
1705 cpufreq_notify_transition(policy, &freqs,
1706 CPUFREQ_PRECHANGE);
1707 }
1708
1709 retval = cpufreq_driver->target_index(policy, index);
1710 if (retval)
1711 pr_err("%s: Failed to change cpu frequency: %d\n",
1712 __func__, retval);
1713
1714 if (notify) {
1715 /*
1716 * Notify with old freq in case we failed to change
1717 * frequency
1718 */
1719 if (retval)
1720 freqs.new = freqs.old;
1721
1722 cpufreq_notify_transition(policy, &freqs,
1723 CPUFREQ_POSTCHANGE);
1724 }
1725 }
1689 1726
1727out:
1690 return retval; 1728 return retval;
1691} 1729}
1692EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1730EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1697,14 +1735,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1697{ 1735{
1698 int ret = -EINVAL; 1736 int ret = -EINVAL;
1699 1737
1700 if (unlikely(lock_policy_rwsem_write(policy->cpu))) 1738 down_write(&policy->rwsem);
1701 goto fail;
1702 1739
1703 ret = __cpufreq_driver_target(policy, target_freq, relation); 1740 ret = __cpufreq_driver_target(policy, target_freq, relation);
1704 1741
1705 unlock_policy_rwsem_write(policy->cpu); 1742 up_write(&policy->rwsem);
1706 1743
1707fail:
1708 return ret; 1744 return ret;
1709} 1745}
1710EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1746EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1907,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1871EXPORT_SYMBOL(cpufreq_get_policy); 1907EXPORT_SYMBOL(cpufreq_get_policy);
1872 1908
1873/* 1909/*
1874 * data : current policy. 1910 * policy : current policy.
1875 * policy : policy to be set. 1911 * new_policy: policy to be set.
1876 */ 1912 */
1877static int __cpufreq_set_policy(struct cpufreq_policy *policy, 1913static int cpufreq_set_policy(struct cpufreq_policy *policy,
1878 struct cpufreq_policy *new_policy) 1914 struct cpufreq_policy *new_policy)
1879{ 1915{
1880 int ret = 0, failed = 1; 1916 int ret = 0, failed = 1;
@@ -1934,10 +1970,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1934 /* end old governor */ 1970 /* end old governor */
1935 if (policy->governor) { 1971 if (policy->governor) {
1936 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1972 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1937 unlock_policy_rwsem_write(new_policy->cpu); 1973 up_write(&policy->rwsem);
1938 __cpufreq_governor(policy, 1974 __cpufreq_governor(policy,
1939 CPUFREQ_GOV_POLICY_EXIT); 1975 CPUFREQ_GOV_POLICY_EXIT);
1940 lock_policy_rwsem_write(new_policy->cpu); 1976 down_write(&policy->rwsem);
1941 } 1977 }
1942 1978
1943 /* start new governor */ 1979 /* start new governor */
@@ -1946,10 +1982,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1946 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { 1982 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1947 failed = 0; 1983 failed = 0;
1948 } else { 1984 } else {
1949 unlock_policy_rwsem_write(new_policy->cpu); 1985 up_write(&policy->rwsem);
1950 __cpufreq_governor(policy, 1986 __cpufreq_governor(policy,
1951 CPUFREQ_GOV_POLICY_EXIT); 1987 CPUFREQ_GOV_POLICY_EXIT);
1952 lock_policy_rwsem_write(new_policy->cpu); 1988 down_write(&policy->rwsem);
1953 } 1989 }
1954 } 1990 }
1955 1991
@@ -1995,10 +2031,7 @@ int cpufreq_update_policy(unsigned int cpu)
1995 goto no_policy; 2031 goto no_policy;
1996 } 2032 }
1997 2033
1998 if (unlikely(lock_policy_rwsem_write(cpu))) { 2034 down_write(&policy->rwsem);
1999 ret = -EINVAL;
2000 goto fail;
2001 }
2002 2035
2003 pr_debug("updating policy for CPU %u\n", cpu); 2036 pr_debug("updating policy for CPU %u\n", cpu);
2004 memcpy(&new_policy, policy, sizeof(*policy)); 2037 memcpy(&new_policy, policy, sizeof(*policy));
@@ -2017,17 +2050,16 @@ int cpufreq_update_policy(unsigned int cpu)
2017 pr_debug("Driver did not initialize current freq"); 2050 pr_debug("Driver did not initialize current freq");
2018 policy->cur = new_policy.cur; 2051 policy->cur = new_policy.cur;
2019 } else { 2052 } else {
2020 if (policy->cur != new_policy.cur && cpufreq_driver->target) 2053 if (policy->cur != new_policy.cur && has_target())
2021 cpufreq_out_of_sync(cpu, policy->cur, 2054 cpufreq_out_of_sync(cpu, policy->cur,
2022 new_policy.cur); 2055 new_policy.cur);
2023 } 2056 }
2024 } 2057 }
2025 2058
2026 ret = __cpufreq_set_policy(policy, &new_policy); 2059 ret = cpufreq_set_policy(policy, &new_policy);
2027 2060
2028 unlock_policy_rwsem_write(cpu); 2061 up_write(&policy->rwsem);
2029 2062
2030fail:
2031 cpufreq_cpu_put(policy); 2063 cpufreq_cpu_put(policy);
2032no_policy: 2064no_policy:
2033 return ret; 2065 return ret;
@@ -2096,7 +2128,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2096 return -ENODEV; 2128 return -ENODEV;
2097 2129
2098 if (!driver_data || !driver_data->verify || !driver_data->init || 2130 if (!driver_data || !driver_data->verify || !driver_data->init ||
2099 ((!driver_data->setpolicy) && (!driver_data->target))) 2131 !(driver_data->setpolicy || driver_data->target_index ||
2132 driver_data->target))
2100 return -EINVAL; 2133 return -EINVAL;
2101 2134
2102 pr_debug("trying to register driver %s\n", driver_data->name); 2135 pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2183,14 +2216,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2183 2216
2184static int __init cpufreq_core_init(void) 2217static int __init cpufreq_core_init(void)
2185{ 2218{
2186 int cpu;
2187
2188 if (cpufreq_disabled()) 2219 if (cpufreq_disabled())
2189 return -ENODEV; 2220 return -ENODEV;
2190 2221
2191 for_each_possible_cpu(cpu)
2192 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2193
2194 cpufreq_global_kobject = kobject_create(); 2222 cpufreq_global_kobject = kobject_create();
2195 BUG_ON(!cpufreq_global_kobject); 2223 BUG_ON(!cpufreq_global_kobject);
2196 register_syscore_ops(&cpufreq_syscore_ops); 2224 register_syscore_ops(&cpufreq_syscore_ops);