diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2013-01-14 08:23:03 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-02-01 18:01:14 -0500 |
commit | b8eed8af94f9203e0cc39245ea335f4b8dc1ed31 (patch) | |
tree | 39c9e57baa6daf91295c331c6b995c9d6286b2bc | |
parent | f85178048c083520bd920921744dd2c4a797fbc5 (diff) |
cpufreq: Simplify __cpufreq_remove_dev()
__cpufreq_remove_dev() is called on multiple occasions: cpufreq_driver
unregister and cpu removals.
Current implementation of this routine is overly complex without much need. If
the cpu to be removed is the policy->cpu, we remove the policy first and add all
other cpus again from policy->cpus and then finally call __cpufreq_remove_dev()
again to remove the cpu to be deleted. Haahhhh..
There exist a simple solution to removal of a cpu:
- Simply use the old policy structure
- update its fields like: policy->cpu, etc.
- notify any users of cpufreq, which depend on changing policy->cpu
Hence this patch, which tries to implement the above theory. It is tested well
by myself on ARM big.LITTLE TC2 SoC, which has 5 cores (2 A15 and 3 A7). Both
A15's share same struct policy and all A7's share same policy structure.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 161 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 27 | ||||
-rw-r--r-- | drivers/cpufreq/freq_table.c | 9 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 14 |
4 files changed, 113 insertions, 98 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 034d1836884b..9af14a8bbcdb 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1036,6 +1036,25 @@ module_out: | |||
1036 | return ret; | 1036 | return ret; |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | ||
1040 | { | ||
1041 | int j; | ||
1042 | |||
1043 | policy->last_cpu = policy->cpu; | ||
1044 | policy->cpu = cpu; | ||
1045 | |||
1046 | for_each_cpu(j, policy->cpus) { | ||
1047 | if (!cpu_online(j)) | ||
1048 | continue; | ||
1049 | per_cpu(cpufreq_policy_cpu, j) = cpu; | ||
1050 | } | ||
1051 | |||
1052 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
1053 | cpufreq_frequency_table_update_policy_cpu(policy); | ||
1054 | #endif | ||
1055 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1056 | CPUFREQ_UPDATE_POLICY_CPU, policy); | ||
1057 | } | ||
1039 | 1058 | ||
1040 | /** | 1059 | /** |
1041 | * __cpufreq_remove_dev - remove a CPU device | 1060 | * __cpufreq_remove_dev - remove a CPU device |
@@ -1046,132 +1065,92 @@ module_out: | |||
1046 | */ | 1065 | */ |
1047 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1066 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1048 | { | 1067 | { |
1049 | unsigned int cpu = dev->id; | 1068 | unsigned int cpu = dev->id, ret, cpus; |
1050 | unsigned long flags; | 1069 | unsigned long flags; |
1051 | struct cpufreq_policy *data; | 1070 | struct cpufreq_policy *data; |
1052 | struct kobject *kobj; | 1071 | struct kobject *kobj; |
1053 | struct completion *cmp; | 1072 | struct completion *cmp; |
1054 | #ifdef CONFIG_SMP | ||
1055 | struct device *cpu_dev; | 1073 | struct device *cpu_dev; |
1056 | unsigned int j; | ||
1057 | #endif | ||
1058 | 1074 | ||
1059 | pr_debug("unregistering CPU %u\n", cpu); | 1075 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); |
1060 | 1076 | ||
1061 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1077 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1062 | data = per_cpu(cpufreq_cpu_data, cpu); | 1078 | data = per_cpu(cpufreq_cpu_data, cpu); |
1063 | 1079 | ||
1064 | if (!data) { | 1080 | if (!data) { |
1081 | pr_debug("%s: No cpu_data found\n", __func__); | ||
1065 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1082 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1066 | unlock_policy_rwsem_write(cpu); | 1083 | unlock_policy_rwsem_write(cpu); |
1067 | return -EINVAL; | 1084 | return -EINVAL; |
1068 | } | 1085 | } |
1069 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1070 | 1086 | ||
1071 | #ifdef CONFIG_SMP | 1087 | if (cpufreq_driver->target) |
1072 | /* if this isn't the CPU which is the parent of the kobj, we | ||
1073 | * only need to unlink, put and exit | ||
1074 | */ | ||
1075 | if (unlikely(cpu != data->cpu)) { | ||
1076 | pr_debug("removing link\n"); | ||
1077 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1088 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1078 | cpumask_clear_cpu(cpu, data->cpus); | ||
1079 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1080 | |||
1081 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1082 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1083 | |||
1084 | kobj = &dev->kobj; | ||
1085 | cpufreq_cpu_put(data); | ||
1086 | unlock_policy_rwsem_write(cpu); | ||
1087 | sysfs_remove_link(kobj, "cpufreq"); | ||
1088 | return 0; | ||
1089 | } | ||
1090 | #endif | ||
1091 | |||
1092 | #ifdef CONFIG_SMP | ||
1093 | 1089 | ||
1094 | #ifdef CONFIG_HOTPLUG_CPU | 1090 | #ifdef CONFIG_HOTPLUG_CPU |
1095 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, | 1091 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, |
1096 | CPUFREQ_NAME_LEN); | 1092 | CPUFREQ_NAME_LEN); |
1097 | #endif | 1093 | #endif |
1098 | 1094 | ||
1099 | /* if we have other CPUs still registered, we need to unlink them, | 1095 | per_cpu(cpufreq_cpu_data, cpu) = NULL; |
1100 | * or else wait_for_completion below will lock up. Clean the | 1096 | cpus = cpumask_weight(data->cpus); |
1101 | * per_cpu(cpufreq_cpu_data) while holding the lock, and remove | 1097 | cpumask_clear_cpu(cpu, data->cpus); |
1102 | * the sysfs links afterwards. | ||
1103 | */ | ||
1104 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1105 | for_each_cpu(j, data->cpus) { | ||
1106 | if (j == cpu) | ||
1107 | continue; | ||
1108 | per_cpu(cpufreq_cpu_data, j) = NULL; | ||
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1113 | 1098 | ||
1114 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | 1099 | if (unlikely((cpu == data->cpu) && (cpus > 1))) { |
1115 | for_each_cpu(j, data->cpus) { | 1100 | /* first sibling now owns the new sysfs dir */ |
1116 | if (j == cpu) | 1101 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); |
1117 | continue; | 1102 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
1118 | pr_debug("removing link for cpu %u\n", j); | 1103 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); |
1119 | #ifdef CONFIG_HOTPLUG_CPU | 1104 | if (ret) { |
1120 | strncpy(per_cpu(cpufreq_cpu_governor, j), | 1105 | pr_err("%s: Failed to move kobj: %d", __func__, ret); |
1121 | data->governor->name, CPUFREQ_NAME_LEN); | 1106 | cpumask_set_cpu(cpu, data->cpus); |
1122 | #endif | 1107 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, |
1123 | cpu_dev = get_cpu_device(j); | 1108 | "cpufreq"); |
1124 | kobj = &cpu_dev->kobj; | 1109 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1125 | unlock_policy_rwsem_write(cpu); | 1110 | unlock_policy_rwsem_write(cpu); |
1126 | sysfs_remove_link(kobj, "cpufreq"); | 1111 | return -EINVAL; |
1127 | lock_policy_rwsem_write(cpu); | ||
1128 | cpufreq_cpu_put(data); | ||
1129 | } | 1112 | } |
1113 | |||
1114 | update_policy_cpu(data, cpu_dev->id); | ||
1115 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", | ||
1116 | __func__, cpu_dev->id, cpu); | ||
1130 | } | 1117 | } |
1131 | #else | ||
1132 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1133 | #endif | ||
1134 | 1118 | ||
1135 | if (cpufreq_driver->target) | 1119 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1136 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
1137 | 1120 | ||
1138 | kobj = &data->kobj; | 1121 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1139 | cmp = &data->kobj_unregister; | 1122 | cpufreq_cpu_put(data); |
1140 | unlock_policy_rwsem_write(cpu); | 1123 | unlock_policy_rwsem_write(cpu); |
1141 | kobject_put(kobj); | 1124 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1142 | 1125 | ||
1143 | /* we need to make sure that the underlying kobj is actually | 1126 | /* If cpu is last user of policy, free policy */ |
1144 | * not referenced anymore by anybody before we proceed with | 1127 | if (cpus == 1) { |
1145 | * unloading. | 1128 | lock_policy_rwsem_write(cpu); |
1146 | */ | 1129 | kobj = &data->kobj; |
1147 | pr_debug("waiting for dropping of refcount\n"); | 1130 | cmp = &data->kobj_unregister; |
1148 | wait_for_completion(cmp); | 1131 | unlock_policy_rwsem_write(cpu); |
1149 | pr_debug("wait complete\n"); | 1132 | kobject_put(kobj); |
1150 | |||
1151 | lock_policy_rwsem_write(cpu); | ||
1152 | if (cpufreq_driver->exit) | ||
1153 | cpufreq_driver->exit(data); | ||
1154 | unlock_policy_rwsem_write(cpu); | ||
1155 | 1133 | ||
1156 | #ifdef CONFIG_HOTPLUG_CPU | 1134 | /* we need to make sure that the underlying kobj is actually |
1157 | /* when the CPU which is the parent of the kobj is hotplugged | 1135 | * not referenced anymore by anybody before we proceed with |
1158 | * offline, check for siblings, and create cpufreq sysfs interface | 1136 | * unloading. |
1159 | * and symlinks | 1137 | */ |
1160 | */ | 1138 | pr_debug("waiting for dropping of refcount\n"); |
1161 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | 1139 | wait_for_completion(cmp); |
1162 | /* first sibling now owns the new sysfs dir */ | 1140 | pr_debug("wait complete\n"); |
1163 | cpumask_clear_cpu(cpu, data->cpus); | ||
1164 | cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); | ||
1165 | 1141 | ||
1166 | /* finally remove our own symlink */ | ||
1167 | lock_policy_rwsem_write(cpu); | 1142 | lock_policy_rwsem_write(cpu); |
1168 | __cpufreq_remove_dev(dev, sif); | 1143 | if (cpufreq_driver->exit) |
1169 | } | 1144 | cpufreq_driver->exit(data); |
1170 | #endif | 1145 | unlock_policy_rwsem_write(cpu); |
1171 | 1146 | ||
1172 | free_cpumask_var(data->related_cpus); | 1147 | free_cpumask_var(data->related_cpus); |
1173 | free_cpumask_var(data->cpus); | 1148 | free_cpumask_var(data->cpus); |
1174 | kfree(data); | 1149 | kfree(data); |
1150 | } else if (cpufreq_driver->target) { | ||
1151 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1152 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1153 | } | ||
1175 | 1154 | ||
1176 | return 0; | 1155 | return 0; |
1177 | } | 1156 | } |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 9d7732b81044..beef6b54382b 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -170,11 +170,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | |||
170 | static void cpufreq_stats_free_table(unsigned int cpu) | 170 | static void cpufreq_stats_free_table(unsigned int cpu) |
171 | { | 171 | { |
172 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); | 172 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); |
173 | |||
173 | if (stat) { | 174 | if (stat) { |
175 | pr_debug("%s: Free stat table\n", __func__); | ||
174 | kfree(stat->time_in_state); | 176 | kfree(stat->time_in_state); |
175 | kfree(stat); | 177 | kfree(stat); |
178 | per_cpu(cpufreq_stats_table, cpu) = NULL; | ||
176 | } | 179 | } |
177 | per_cpu(cpufreq_stats_table, cpu) = NULL; | ||
178 | } | 180 | } |
179 | 181 | ||
180 | /* must be called early in the CPU removal sequence (before | 182 | /* must be called early in the CPU removal sequence (before |
@@ -183,8 +185,10 @@ static void cpufreq_stats_free_table(unsigned int cpu) | |||
183 | static void cpufreq_stats_free_sysfs(unsigned int cpu) | 185 | static void cpufreq_stats_free_sysfs(unsigned int cpu) |
184 | { | 186 | { |
185 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 187 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
186 | if (policy && policy->cpu == cpu) | 188 | if (policy && (cpumask_weight(policy->cpus) == 1)) { |
189 | pr_debug("%s: Free sysfs stat\n", __func__); | ||
187 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | 190 | sysfs_remove_group(&policy->kobj, &stats_attr_group); |
191 | } | ||
188 | if (policy) | 192 | if (policy) |
189 | cpufreq_cpu_put(policy); | 193 | cpufreq_cpu_put(policy); |
190 | } | 194 | } |
@@ -262,6 +266,19 @@ error_get_fail: | |||
262 | return ret; | 266 | return ret; |
263 | } | 267 | } |
264 | 268 | ||
269 | static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) | ||
270 | { | ||
271 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, | ||
272 | policy->last_cpu); | ||
273 | |||
274 | pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n", | ||
275 | policy->cpu, policy->last_cpu); | ||
276 | per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table, | ||
277 | policy->last_cpu); | ||
278 | per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL; | ||
279 | stat->cpu = policy->cpu; | ||
280 | } | ||
281 | |||
265 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | 282 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, |
266 | unsigned long val, void *data) | 283 | unsigned long val, void *data) |
267 | { | 284 | { |
@@ -269,6 +286,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | |||
269 | struct cpufreq_policy *policy = data; | 286 | struct cpufreq_policy *policy = data; |
270 | struct cpufreq_frequency_table *table; | 287 | struct cpufreq_frequency_table *table; |
271 | unsigned int cpu = policy->cpu; | 288 | unsigned int cpu = policy->cpu; |
289 | |||
290 | if (val == CPUFREQ_UPDATE_POLICY_CPU) { | ||
291 | cpufreq_stats_update_policy_cpu(policy); | ||
292 | return 0; | ||
293 | } | ||
294 | |||
272 | if (val != CPUFREQ_NOTIFY) | 295 | if (val != CPUFREQ_NOTIFY) |
273 | return 0; | 296 | return 0; |
274 | table = cpufreq_frequency_get_table(cpu); | 297 | table = cpufreq_frequency_get_table(cpu); |
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 49cda256efb2..aa5bd39d129e 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -227,6 +227,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu) | |||
227 | } | 227 | } |
228 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 228 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
229 | 229 | ||
230 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) | ||
231 | { | ||
232 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", | ||
233 | policy->cpu, policy->last_cpu); | ||
234 | per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table, | ||
235 | policy->last_cpu); | ||
236 | per_cpu(cpufreq_show_table, policy->last_cpu) = NULL; | ||
237 | } | ||
238 | |||
230 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) | 239 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) |
231 | { | 240 | { |
232 | return per_cpu(cpufreq_show_table, cpu); | 241 | return per_cpu(cpufreq_show_table, cpu); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a55b88eaf96a..52be2d0c994a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -93,7 +93,9 @@ struct cpufreq_policy { | |||
93 | cpumask_var_t related_cpus; /* CPUs with any coordination */ | 93 | cpumask_var_t related_cpus; /* CPUs with any coordination */ |
94 | unsigned int shared_type; /* ANY or ALL affected CPUs | 94 | unsigned int shared_type; /* ANY or ALL affected CPUs |
95 | should set cpufreq */ | 95 | should set cpufreq */ |
96 | unsigned int cpu; /* cpu nr of registered CPU */ | 96 | unsigned int cpu; /* cpu nr of CPU managing this policy */ |
97 | unsigned int last_cpu; /* cpu nr of previous CPU that managed | ||
98 | * this policy */ | ||
97 | struct cpufreq_cpuinfo cpuinfo;/* see above */ | 99 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
98 | 100 | ||
99 | unsigned int min; /* in kHz */ | 101 | unsigned int min; /* in kHz */ |
@@ -112,10 +114,11 @@ struct cpufreq_policy { | |||
112 | struct completion kobj_unregister; | 114 | struct completion kobj_unregister; |
113 | }; | 115 | }; |
114 | 116 | ||
115 | #define CPUFREQ_ADJUST (0) | 117 | #define CPUFREQ_ADJUST (0) |
116 | #define CPUFREQ_INCOMPATIBLE (1) | 118 | #define CPUFREQ_INCOMPATIBLE (1) |
117 | #define CPUFREQ_NOTIFY (2) | 119 | #define CPUFREQ_NOTIFY (2) |
118 | #define CPUFREQ_START (3) | 120 | #define CPUFREQ_START (3) |
121 | #define CPUFREQ_UPDATE_POLICY_CPU (4) | ||
119 | 122 | ||
120 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ | 123 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
121 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ | 124 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ |
@@ -405,6 +408,7 @@ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | |||
405 | 408 | ||
406 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 409 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
407 | unsigned int cpu); | 410 | unsigned int cpu); |
411 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy); | ||
408 | 412 | ||
409 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 413 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
410 | #endif /* _LINUX_CPUFREQ_H */ | 414 | #endif /* _LINUX_CPUFREQ_H */ |