diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2017-06-23 05:25:33 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-07-17 11:32:05 -0400 |
commit | 62de1161e220bc6ded7806ef0d149560f06152b3 (patch) | |
tree | 7464f3c8901afc0f63ffdeab27156ec0a4d03d6b | |
parent | 805df2966f67a6b1a228c8e580e230b6c849b41e (diff) |
arch_topology: Localize cap_parsing_failed to topology_parse_cpu_capacity()
cap_parsing_failed is only required in topology_parse_cpu_capacity() to
know if we have already tried to allocate raw_capacity and failed, or if
at least one of the cpu_node didn't had the required
"capacity-dmips-mhz" property.
All other users can use raw_capacity instead of cap_parsing_failed.
Make sure we set raw_capacity to NULL after we free it.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Juri Lelli <juri.lelli@arm.com>
Tested-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/base/arch_topology.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 5728e2fbb765..9e4d2107f4fa 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c | |||
@@ -95,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl); | |||
95 | 95 | ||
96 | static u32 capacity_scale; | 96 | static u32 capacity_scale; |
97 | static u32 *raw_capacity; | 97 | static u32 *raw_capacity; |
98 | static bool cap_parsing_failed; | 98 | |
99 | static int __init free_raw_capacity(void) | ||
100 | { | ||
101 | kfree(raw_capacity); | ||
102 | raw_capacity = NULL; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
99 | 106 | ||
100 | void topology_normalize_cpu_scale(void) | 107 | void topology_normalize_cpu_scale(void) |
101 | { | 108 | { |
102 | u64 capacity; | 109 | u64 capacity; |
103 | int cpu; | 110 | int cpu; |
104 | 111 | ||
105 | if (!raw_capacity || cap_parsing_failed) | 112 | if (!raw_capacity) |
106 | return; | 113 | return; |
107 | 114 | ||
108 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); | 115 | pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); |
@@ -121,6 +128,7 @@ void topology_normalize_cpu_scale(void) | |||
121 | 128 | ||
122 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) | 129 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
123 | { | 130 | { |
131 | static bool cap_parsing_failed; | ||
124 | int ret; | 132 | int ret; |
125 | u32 cpu_capacity; | 133 | u32 cpu_capacity; |
126 | 134 | ||
@@ -151,7 +159,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) | |||
151 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); | 159 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
152 | } | 160 | } |
153 | cap_parsing_failed = true; | 161 | cap_parsing_failed = true; |
154 | kfree(raw_capacity); | 162 | free_raw_capacity(); |
155 | } | 163 | } |
156 | 164 | ||
157 | return !ret; | 165 | return !ret; |
@@ -171,7 +179,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, | |||
171 | struct cpufreq_policy *policy = data; | 179 | struct cpufreq_policy *policy = data; |
172 | int cpu; | 180 | int cpu; |
173 | 181 | ||
174 | if (cap_parsing_failed || cap_parsing_done) | 182 | if (!raw_capacity || cap_parsing_done) |
175 | return 0; | 183 | return 0; |
176 | 184 | ||
177 | if (val != CPUFREQ_NOTIFY) | 185 | if (val != CPUFREQ_NOTIFY) |
@@ -191,7 +199,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, | |||
191 | 199 | ||
192 | if (cpumask_empty(cpus_to_visit)) { | 200 | if (cpumask_empty(cpus_to_visit)) { |
193 | topology_normalize_cpu_scale(); | 201 | topology_normalize_cpu_scale(); |
194 | kfree(raw_capacity); | 202 | free_raw_capacity(); |
195 | pr_debug("cpu_capacity: parsing done\n"); | 203 | pr_debug("cpu_capacity: parsing done\n"); |
196 | cap_parsing_done = true; | 204 | cap_parsing_done = true; |
197 | schedule_work(&parsing_done_work); | 205 | schedule_work(&parsing_done_work); |
@@ -233,11 +241,5 @@ static void parsing_done_workfn(struct work_struct *work) | |||
233 | } | 241 | } |
234 | 242 | ||
235 | #else | 243 | #else |
236 | static int __init free_raw_capacity(void) | ||
237 | { | ||
238 | kfree(raw_capacity); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | core_initcall(free_raw_capacity); | 244 | core_initcall(free_raw_capacity); |
243 | #endif | 245 | #endif |