aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2014-08-28 01:52:28 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-09-08 19:44:41 -0400
commitd2f31f1da54f83c4eb2738402284c49cd51798d1 (patch)
tree91b299799d8b0293c0e37a39b4280d315e7f1a16
parentfbd48ca5911b3cd70da57c3313d13004e40aea54 (diff)
cpufreq: cpu0: Move per-cluster initialization code to ->init()
Currently this driver only support platforms on which all CPUs share clock & voltage lines and there is requirement to support platforms which have separate clock & voltage lines for CPUs, like Qualcomm's Krait and ARM's big LITTLE. Each group of CPUs sharing clock/voltage lines are represented by 'struct cpufreq_policy' in cpufreq framework. And core calls ->init() once for each policy. Currently we do all initialization/allocation from probe() which wouldn't work for above scenario. To make it work for these platforms, the first step is to move all initialization/allocation to ->init() and add ->exit() to do the reverse of it. Also, remove all global variables and allocate space for them at runtime. This patch creates 'struct private_data' for keeping all such information and a pointer to that would be stored in policy->driver_data. The changed probe() routine now tries to see if regulator/clocks are available or we need to defer probe. In case they are available, it registers cpufreq driver. Otherwise, returns with -EPROBE_DEFER. We still *don't* support platforms with separate clock/voltage lines for CPUs. This would be done in a separate patch later. Tested-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c189
1 files changed, 136 insertions, 53 deletions
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 741ff220f9bf..03e352b627dd 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -28,18 +28,21 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/thermal.h> 29#include <linux/thermal.h>
30 30
31static unsigned int transition_latency; 31struct private_data {
32static unsigned int voltage_tolerance; /* in percentage */ 32 struct device *cpu_dev;
33 33 struct regulator *cpu_reg;
34static struct device *cpu_dev; 34 struct thermal_cooling_device *cdev;
35static struct clk *cpu_clk; 35 unsigned int voltage_tolerance; /* in percentage */
36static struct regulator *cpu_reg; 36};
37static struct cpufreq_frequency_table *freq_table;
38static struct thermal_cooling_device *cdev;
39 37
40static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) 38static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
41{ 39{
42 struct dev_pm_opp *opp; 40 struct dev_pm_opp *opp;
41 struct cpufreq_frequency_table *freq_table = policy->freq_table;
42 struct clk *cpu_clk = policy->clk;
43 struct private_data *priv = policy->driver_data;
44 struct device *cpu_dev = priv->cpu_dev;
45 struct regulator *cpu_reg = priv->cpu_reg;
43 unsigned long volt = 0, volt_old = 0, tol = 0; 46 unsigned long volt = 0, volt_old = 0, tol = 0;
44 unsigned int old_freq, new_freq; 47 unsigned int old_freq, new_freq;
45 long freq_Hz, freq_exact; 48 long freq_Hz, freq_exact;
@@ -64,7 +67,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
64 } 67 }
65 volt = dev_pm_opp_get_voltage(opp); 68 volt = dev_pm_opp_get_voltage(opp);
66 rcu_read_unlock(); 69 rcu_read_unlock();
67 tol = volt * voltage_tolerance / 100; 70 tol = volt * priv->voltage_tolerance / 100;
68 volt_old = regulator_get_voltage(cpu_reg); 71 volt_old = regulator_get_voltage(cpu_reg);
69 } 72 }
70 73
@@ -103,26 +106,13 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
103 return ret; 106 return ret;
104} 107}
105 108
106static int cpu0_cpufreq_init(struct cpufreq_policy *policy) 109static int allocate_resources(struct device **cdev,
107{ 110 struct regulator **creg, struct clk **cclk)
108 policy->clk = cpu_clk;
109 return cpufreq_generic_init(policy, freq_table, transition_latency);
110}
111
112static struct cpufreq_driver cpu0_cpufreq_driver = {
113 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
114 .verify = cpufreq_generic_frequency_table_verify,
115 .target_index = cpu0_set_target,
116 .get = cpufreq_generic_get,
117 .init = cpu0_cpufreq_init,
118 .name = "generic_cpu0",
119 .attr = cpufreq_generic_attr,
120};
121
122static int cpu0_cpufreq_probe(struct platform_device *pdev)
123{ 111{
124 struct device_node *np; 112 struct device *cpu_dev;
125 int ret; 113 struct regulator *cpu_reg;
114 struct clk *cpu_clk;
115 int ret = 0;
126 116
127 cpu_dev = get_cpu_device(0); 117 cpu_dev = get_cpu_device(0);
128 if (!cpu_dev) { 118 if (!cpu_dev) {
@@ -130,12 +120,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENODEV; 120 return -ENODEV;
131 } 121 }
132 122
133 np = of_node_get(cpu_dev->of_node);
134 if (!np) {
135 dev_err(cpu_dev, "failed to find cpu0 node\n");
136 return -ENOENT;
137 }
138
139 cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); 123 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
140 if (IS_ERR(cpu_reg)) { 124 if (IS_ERR(cpu_reg)) {
141 /* 125 /*
@@ -144,8 +128,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
144 */ 128 */
145 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { 129 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
146 dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); 130 dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
147 ret = -EPROBE_DEFER; 131 return -EPROBE_DEFER;
148 goto out_put_node;
149 } 132 }
150 dev_warn(cpu_dev, "failed to get cpu0 regulator: %ld\n", 133 dev_warn(cpu_dev, "failed to get cpu0 regulator: %ld\n",
151 PTR_ERR(cpu_reg)); 134 PTR_ERR(cpu_reg));
@@ -153,6 +136,10 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
153 136
154 cpu_clk = clk_get(cpu_dev, NULL); 137 cpu_clk = clk_get(cpu_dev, NULL);
155 if (IS_ERR(cpu_clk)) { 138 if (IS_ERR(cpu_clk)) {
139 /* put regulator */
140 if (!IS_ERR(cpu_reg))
141 regulator_put(cpu_reg);
142
156 ret = PTR_ERR(cpu_clk); 143 ret = PTR_ERR(cpu_clk);
157 144
158 /* 145 /*
@@ -163,8 +150,39 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
163 dev_dbg(cpu_dev, "cpu0 clock not ready, retry\n"); 150 dev_dbg(cpu_dev, "cpu0 clock not ready, retry\n");
164 else 151 else
165 dev_err(cpu_dev, "failed to get cpu0 clock: %d\n", ret); 152 dev_err(cpu_dev, "failed to get cpu0 clock: %d\n", ret);
153 } else {
154 *cdev = cpu_dev;
155 *creg = cpu_reg;
156 *cclk = cpu_clk;
157 }
158
159 return ret;
160}
161
162static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
163{
164 struct cpufreq_frequency_table *freq_table;
165 struct thermal_cooling_device *cdev;
166 struct device_node *np;
167 struct private_data *priv;
168 struct device *cpu_dev;
169 struct regulator *cpu_reg;
170 struct clk *cpu_clk;
171 unsigned int transition_latency;
172 int ret;
173
174 /* We only support cpu0 currently */
175 ret = allocate_resources(&cpu_dev, &cpu_reg, &cpu_clk);
176 if (ret) {
177 pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
178 return ret;
179 }
166 180
167 goto out_put_reg; 181 np = of_node_get(cpu_dev->of_node);
182 if (!np) {
183 dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
184 ret = -ENOENT;
185 goto out_put_reg_clk;
168 } 186 }
169 187
170 /* OPPs might be populated at runtime, don't check for error here */ 188 /* OPPs might be populated at runtime, don't check for error here */
@@ -173,10 +191,16 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
173 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 191 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
174 if (ret) { 192 if (ret) {
175 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 193 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
176 goto out_put_clk; 194 goto out_put_node;
195 }
196
197 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
198 if (!priv) {
199 ret = -ENOMEM;
200 goto out_free_table;
177 } 201 }
178 202
179 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 203 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
180 204
181 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 205 if (of_property_read_u32(np, "clock-latency", &transition_latency))
182 transition_latency = CPUFREQ_ETERNAL; 206 transition_latency = CPUFREQ_ETERNAL;
@@ -206,12 +230,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
206 transition_latency += ret * 1000; 230 transition_latency += ret * 1000;
207 } 231 }
208 232
209 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
210 if (ret) {
211 dev_err(cpu_dev, "failed to register driver: %d\n", ret);
212 goto out_free_table;
213 }
214
215 /* 233 /*
216 * For now, just loading the cooling device; 234 * For now, just loading the cooling device;
217 * thermal DT code takes care of matching them. 235 * thermal DT code takes care of matching them.
@@ -222,29 +240,94 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
222 dev_err(cpu_dev, 240 dev_err(cpu_dev,
223 "running cpufreq without cooling device: %ld\n", 241 "running cpufreq without cooling device: %ld\n",
224 PTR_ERR(cdev)); 242 PTR_ERR(cdev));
243 else
244 priv->cdev = cdev;
225 } 245 }
226
227 of_node_put(np); 246 of_node_put(np);
247
248 priv->cpu_dev = cpu_dev;
249 priv->cpu_reg = cpu_reg;
250 policy->driver_data = priv;
251
252 policy->clk = cpu_clk;
253 ret = cpufreq_generic_init(policy, freq_table, transition_latency);
254 if (ret)
255 goto out_cooling_unregister;
256
228 return 0; 257 return 0;
229 258
259out_cooling_unregister:
260 cpufreq_cooling_unregister(priv->cdev);
261 kfree(priv);
230out_free_table: 262out_free_table:
231 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 263 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
232out_put_clk: 264out_put_node:
265 of_node_put(np);
266out_put_reg_clk:
233 clk_put(cpu_clk); 267 clk_put(cpu_clk);
234out_put_reg:
235 if (!IS_ERR(cpu_reg)) 268 if (!IS_ERR(cpu_reg))
236 regulator_put(cpu_reg); 269 regulator_put(cpu_reg);
237out_put_node: 270
238 of_node_put(np); 271 return ret;
272}
273
274static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
275{
276 struct private_data *priv = policy->driver_data;
277
278 cpufreq_cooling_unregister(priv->cdev);
279 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
280 clk_put(policy->clk);
281 if (!IS_ERR(priv->cpu_reg))
282 regulator_put(priv->cpu_reg);
283 kfree(priv);
284
285 return 0;
286}
287
288static struct cpufreq_driver cpu0_cpufreq_driver = {
289 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
290 .verify = cpufreq_generic_frequency_table_verify,
291 .target_index = cpu0_set_target,
292 .get = cpufreq_generic_get,
293 .init = cpu0_cpufreq_init,
294 .exit = cpu0_cpufreq_exit,
295 .name = "generic_cpu0",
296 .attr = cpufreq_generic_attr,
297};
298
299static int cpu0_cpufreq_probe(struct platform_device *pdev)
300{
301 struct device *cpu_dev;
302 struct regulator *cpu_reg;
303 struct clk *cpu_clk;
304 int ret;
305
306 /*
307 * All per-cluster (CPUs sharing clock/voltages) initialization is done
308 * from ->init(). In probe(), we just need to make sure that clk and
309 * regulators are available. Else defer probe and retry.
310 *
311 * FIXME: Is checking this only for CPU0 sufficient ?
312 */
313 ret = allocate_resources(&cpu_dev, &cpu_reg, &cpu_clk);
314 if (ret)
315 return ret;
316
317 clk_put(cpu_clk);
318 if (!IS_ERR(cpu_reg))
319 regulator_put(cpu_reg);
320
321 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
322 if (ret)
323 dev_err(cpu_dev, "failed register driver: %d\n", ret);
324
239 return ret; 325 return ret;
240} 326}
241 327
242static int cpu0_cpufreq_remove(struct platform_device *pdev) 328static int cpu0_cpufreq_remove(struct platform_device *pdev)
243{ 329{
244 cpufreq_cooling_unregister(cdev);
245 cpufreq_unregister_driver(&cpu0_cpufreq_driver); 330 cpufreq_unregister_driver(&cpu0_cpufreq_driver);
246 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
247
248 return 0; 331 return 0;
249} 332}
250 333