diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2017-09-26 18:12:40 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-10-02 20:45:12 -0400 |
commit | 7813dd6fc75fb375d4caf002e7f80a826fc3153a (patch) | |
tree | 37326fb95800c931382c3abda492d207c81aef41 /drivers/opp | |
parent | 9e66317d3c92ddaab330c125dfe9d06eee268aff (diff) |
PM / OPP: Move the OPP directory out of power/
The drivers/base/power/ directory is special and contains code related
to power management core like system suspend/resume, hibernation, etc.
It was fine to keep the OPP code inside it when we had just one file for
it, but it is growing now and already has a directory for itself.
Lets move it directly under drivers/ directory, just like cpufreq and
cpuidle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/opp')
-rw-r--r-- | drivers/opp/Kconfig | 13 | ||||
-rw-r--r-- | drivers/opp/Makefile | 4 | ||||
-rw-r--r-- | drivers/opp/core.c | 1747 | ||||
-rw-r--r-- | drivers/opp/cpu.c | 236 | ||||
-rw-r--r-- | drivers/opp/debugfs.c | 249 | ||||
-rw-r--r-- | drivers/opp/of.c | 633 | ||||
-rw-r--r-- | drivers/opp/opp.h | 222 |
7 files changed, 3104 insertions, 0 deletions
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig new file mode 100644 index 000000000000..a7fbb93f302c --- /dev/null +++ b/drivers/opp/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | config PM_OPP | ||
2 | bool | ||
3 | select SRCU | ||
4 | ---help--- | ||
5 | SOCs have a standard set of tuples consisting of frequency and | ||
6 | voltage pairs that the device will support per voltage domain. This | ||
7 | is called Operating Performance Point or OPP. The actual definitions | ||
8 | of OPP varies over silicon within the same family of devices. | ||
9 | |||
10 | OPP layer organizes the data internally using device pointers | ||
11 | representing individual voltage domains and provides SOC | ||
12 | implementations a ready to use framework to manage OPPs. | ||
13 | For more information, read <file:Documentation/power/opp.txt> | ||
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile new file mode 100644 index 000000000000..e70ceb406fe9 --- /dev/null +++ b/drivers/opp/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | ||
2 | obj-y += core.o cpu.o | ||
3 | obj-$(CONFIG_OF) += of.o | ||
4 | obj-$(CONFIG_DEBUG_FS) += debugfs.o | ||
diff --git a/drivers/opp/core.c b/drivers/opp/core.c new file mode 100644 index 000000000000..a6de32530693 --- /dev/null +++ b/drivers/opp/core.c | |||
@@ -0,0 +1,1747 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/export.h> | ||
22 | #include <linux/regulator/consumer.h> | ||
23 | |||
24 | #include "opp.h" | ||
25 | |||
26 | /* | ||
27 | * The root of the list of all opp-tables. All opp_table structures branch off | ||
28 | * from here, with each opp_table containing the list of opps it supports in | ||
29 | * various states of availability. | ||
30 | */ | ||
31 | LIST_HEAD(opp_tables); | ||
32 | /* Lock to allow exclusive modification to the device and opp lists */ | ||
33 | DEFINE_MUTEX(opp_table_lock); | ||
34 | |||
35 | static void dev_pm_opp_get(struct dev_pm_opp *opp); | ||
36 | |||
37 | static struct opp_device *_find_opp_dev(const struct device *dev, | ||
38 | struct opp_table *opp_table) | ||
39 | { | ||
40 | struct opp_device *opp_dev; | ||
41 | |||
42 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) | ||
43 | if (opp_dev->dev == dev) | ||
44 | return opp_dev; | ||
45 | |||
46 | return NULL; | ||
47 | } | ||
48 | |||
49 | static struct opp_table *_find_opp_table_unlocked(struct device *dev) | ||
50 | { | ||
51 | struct opp_table *opp_table; | ||
52 | |||
53 | list_for_each_entry(opp_table, &opp_tables, node) { | ||
54 | if (_find_opp_dev(dev, opp_table)) { | ||
55 | _get_opp_table_kref(opp_table); | ||
56 | |||
57 | return opp_table; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | return ERR_PTR(-ENODEV); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * _find_opp_table() - find opp_table struct using device pointer | ||
66 | * @dev: device pointer used to lookup OPP table | ||
67 | * | ||
68 | * Search OPP table for one containing matching device. | ||
69 | * | ||
70 | * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or | ||
71 | * -EINVAL based on type of error. | ||
72 | * | ||
73 | * The callers must call dev_pm_opp_put_opp_table() after the table is used. | ||
74 | */ | ||
75 | struct opp_table *_find_opp_table(struct device *dev) | ||
76 | { | ||
77 | struct opp_table *opp_table; | ||
78 | |||
79 | if (IS_ERR_OR_NULL(dev)) { | ||
80 | pr_err("%s: Invalid parameters\n", __func__); | ||
81 | return ERR_PTR(-EINVAL); | ||
82 | } | ||
83 | |||
84 | mutex_lock(&opp_table_lock); | ||
85 | opp_table = _find_opp_table_unlocked(dev); | ||
86 | mutex_unlock(&opp_table_lock); | ||
87 | |||
88 | return opp_table; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp | ||
93 | * @opp: opp for which voltage has to be returned for | ||
94 | * | ||
95 | * Return: voltage in micro volt corresponding to the opp, else | ||
96 | * return 0 | ||
97 | * | ||
98 | * This is useful only for devices with single power supply. | ||
99 | */ | ||
100 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) | ||
101 | { | ||
102 | if (IS_ERR_OR_NULL(opp)) { | ||
103 | pr_err("%s: Invalid parameters\n", __func__); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | return opp->supplies[0].u_volt; | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); | ||
110 | |||
111 | /** | ||
112 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp | ||
113 | * @opp: opp for which frequency has to be returned for | ||
114 | * | ||
115 | * Return: frequency in hertz corresponding to the opp, else | ||
116 | * return 0 | ||
117 | */ | ||
118 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) | ||
119 | { | ||
120 | if (IS_ERR_OR_NULL(opp) || !opp->available) { | ||
121 | pr_err("%s: Invalid parameters\n", __func__); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | return opp->rate; | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); | ||
128 | |||
129 | /** | ||
130 | * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not | ||
131 | * @opp: opp for which turbo mode is being verified | ||
132 | * | ||
133 | * Turbo OPPs are not for normal use, and can be enabled (under certain | ||
134 | * conditions) for short duration of times to finish high throughput work | ||
135 | * quickly. Running on them for longer times may overheat the chip. | ||
136 | * | ||
137 | * Return: true if opp is turbo opp, else false. | ||
138 | */ | ||
139 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) | ||
140 | { | ||
141 | if (IS_ERR_OR_NULL(opp) || !opp->available) { | ||
142 | pr_err("%s: Invalid parameters\n", __func__); | ||
143 | return false; | ||
144 | } | ||
145 | |||
146 | return opp->turbo; | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); | ||
149 | |||
150 | /** | ||
151 | * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds | ||
152 | * @dev: device for which we do this operation | ||
153 | * | ||
154 | * Return: This function returns the max clock latency in nanoseconds. | ||
155 | */ | ||
156 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) | ||
157 | { | ||
158 | struct opp_table *opp_table; | ||
159 | unsigned long clock_latency_ns; | ||
160 | |||
161 | opp_table = _find_opp_table(dev); | ||
162 | if (IS_ERR(opp_table)) | ||
163 | return 0; | ||
164 | |||
165 | clock_latency_ns = opp_table->clock_latency_ns_max; | ||
166 | |||
167 | dev_pm_opp_put_opp_table(opp_table); | ||
168 | |||
169 | return clock_latency_ns; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); | ||
172 | |||
173 | /** | ||
174 | * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds | ||
175 | * @dev: device for which we do this operation | ||
176 | * | ||
177 | * Return: This function returns the max voltage latency in nanoseconds. | ||
178 | */ | ||
179 | unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | ||
180 | { | ||
181 | struct opp_table *opp_table; | ||
182 | struct dev_pm_opp *opp; | ||
183 | struct regulator *reg; | ||
184 | unsigned long latency_ns = 0; | ||
185 | int ret, i, count; | ||
186 | struct { | ||
187 | unsigned long min; | ||
188 | unsigned long max; | ||
189 | } *uV; | ||
190 | |||
191 | opp_table = _find_opp_table(dev); | ||
192 | if (IS_ERR(opp_table)) | ||
193 | return 0; | ||
194 | |||
195 | count = opp_table->regulator_count; | ||
196 | |||
197 | /* Regulator may not be required for the device */ | ||
198 | if (!count) | ||
199 | goto put_opp_table; | ||
200 | |||
201 | uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); | ||
202 | if (!uV) | ||
203 | goto put_opp_table; | ||
204 | |||
205 | mutex_lock(&opp_table->lock); | ||
206 | |||
207 | for (i = 0; i < count; i++) { | ||
208 | uV[i].min = ~0; | ||
209 | uV[i].max = 0; | ||
210 | |||
211 | list_for_each_entry(opp, &opp_table->opp_list, node) { | ||
212 | if (!opp->available) | ||
213 | continue; | ||
214 | |||
215 | if (opp->supplies[i].u_volt_min < uV[i].min) | ||
216 | uV[i].min = opp->supplies[i].u_volt_min; | ||
217 | if (opp->supplies[i].u_volt_max > uV[i].max) | ||
218 | uV[i].max = opp->supplies[i].u_volt_max; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | mutex_unlock(&opp_table->lock); | ||
223 | |||
224 | /* | ||
225 | * The caller needs to ensure that opp_table (and hence the regulator) | ||
226 | * isn't freed, while we are executing this routine. | ||
227 | */ | ||
228 | for (i = 0; i < count; i++) { | ||
229 | reg = opp_table->regulators[i]; | ||
230 | ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); | ||
231 | if (ret > 0) | ||
232 | latency_ns += ret * 1000; | ||
233 | } | ||
234 | |||
235 | kfree(uV); | ||
236 | put_opp_table: | ||
237 | dev_pm_opp_put_opp_table(opp_table); | ||
238 | |||
239 | return latency_ns; | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); | ||
242 | |||
243 | /** | ||
244 | * dev_pm_opp_get_max_transition_latency() - Get max transition latency in | ||
245 | * nanoseconds | ||
246 | * @dev: device for which we do this operation | ||
247 | * | ||
248 | * Return: This function returns the max transition latency, in nanoseconds, to | ||
249 | * switch from one OPP to other. | ||
250 | */ | ||
251 | unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) | ||
252 | { | ||
253 | return dev_pm_opp_get_max_volt_latency(dev) + | ||
254 | dev_pm_opp_get_max_clock_latency(dev); | ||
255 | } | ||
256 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); | ||
257 | |||
258 | /** | ||
259 | * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz | ||
260 | * @dev: device for which we do this operation | ||
261 | * | ||
262 | * Return: This function returns the frequency of the OPP marked as suspend_opp | ||
263 | * if one is available, else returns 0; | ||
264 | */ | ||
265 | unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) | ||
266 | { | ||
267 | struct opp_table *opp_table; | ||
268 | unsigned long freq = 0; | ||
269 | |||
270 | opp_table = _find_opp_table(dev); | ||
271 | if (IS_ERR(opp_table)) | ||
272 | return 0; | ||
273 | |||
274 | if (opp_table->suspend_opp && opp_table->suspend_opp->available) | ||
275 | freq = dev_pm_opp_get_freq(opp_table->suspend_opp); | ||
276 | |||
277 | dev_pm_opp_put_opp_table(opp_table); | ||
278 | |||
279 | return freq; | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); | ||
282 | |||
283 | /** | ||
284 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table | ||
285 | * @dev: device for which we do this operation | ||
286 | * | ||
287 | * Return: This function returns the number of available opps if there are any, | ||
288 | * else returns 0 if none or the corresponding error value. | ||
289 | */ | ||
290 | int dev_pm_opp_get_opp_count(struct device *dev) | ||
291 | { | ||
292 | struct opp_table *opp_table; | ||
293 | struct dev_pm_opp *temp_opp; | ||
294 | int count = 0; | ||
295 | |||
296 | opp_table = _find_opp_table(dev); | ||
297 | if (IS_ERR(opp_table)) { | ||
298 | count = PTR_ERR(opp_table); | ||
299 | dev_err(dev, "%s: OPP table not found (%d)\n", | ||
300 | __func__, count); | ||
301 | return count; | ||
302 | } | ||
303 | |||
304 | mutex_lock(&opp_table->lock); | ||
305 | |||
306 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
307 | if (temp_opp->available) | ||
308 | count++; | ||
309 | } | ||
310 | |||
311 | mutex_unlock(&opp_table->lock); | ||
312 | dev_pm_opp_put_opp_table(opp_table); | ||
313 | |||
314 | return count; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); | ||
317 | |||
318 | /** | ||
319 | * dev_pm_opp_find_freq_exact() - search for an exact frequency | ||
320 | * @dev: device for which we do this operation | ||
321 | * @freq: frequency to search for | ||
322 | * @available: true/false - match for available opp | ||
323 | * | ||
324 | * Return: Searches for exact match in the opp table and returns pointer to the | ||
325 | * matching opp if found, else returns ERR_PTR in case of error and should | ||
326 | * be handled using IS_ERR. Error return values can be: | ||
327 | * EINVAL: for bad pointer | ||
328 | * ERANGE: no match found for search | ||
329 | * ENODEV: if device not found in list of registered devices | ||
330 | * | ||
331 | * Note: available is a modifier for the search. if available=true, then the | ||
332 | * match is for exact matching frequency and is available in the stored OPP | ||
333 | * table. if false, the match is for exact frequency which is not available. | ||
334 | * | ||
335 | * This provides a mechanism to enable an opp which is not available currently | ||
336 | * or the opposite as well. | ||
337 | * | ||
338 | * The callers are required to call dev_pm_opp_put() for the returned OPP after | ||
339 | * use. | ||
340 | */ | ||
341 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | ||
342 | unsigned long freq, | ||
343 | bool available) | ||
344 | { | ||
345 | struct opp_table *opp_table; | ||
346 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | ||
347 | |||
348 | opp_table = _find_opp_table(dev); | ||
349 | if (IS_ERR(opp_table)) { | ||
350 | int r = PTR_ERR(opp_table); | ||
351 | |||
352 | dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); | ||
353 | return ERR_PTR(r); | ||
354 | } | ||
355 | |||
356 | mutex_lock(&opp_table->lock); | ||
357 | |||
358 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
359 | if (temp_opp->available == available && | ||
360 | temp_opp->rate == freq) { | ||
361 | opp = temp_opp; | ||
362 | |||
363 | /* Increment the reference count of OPP */ | ||
364 | dev_pm_opp_get(opp); | ||
365 | break; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | mutex_unlock(&opp_table->lock); | ||
370 | dev_pm_opp_put_opp_table(opp_table); | ||
371 | |||
372 | return opp; | ||
373 | } | ||
374 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); | ||
375 | |||
376 | static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, | ||
377 | unsigned long *freq) | ||
378 | { | ||
379 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | ||
380 | |||
381 | mutex_lock(&opp_table->lock); | ||
382 | |||
383 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
384 | if (temp_opp->available && temp_opp->rate >= *freq) { | ||
385 | opp = temp_opp; | ||
386 | *freq = opp->rate; | ||
387 | |||
388 | /* Increment the reference count of OPP */ | ||
389 | dev_pm_opp_get(opp); | ||
390 | break; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | mutex_unlock(&opp_table->lock); | ||
395 | |||
396 | return opp; | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq | ||
401 | * @dev: device for which we do this operation | ||
402 | * @freq: Start frequency | ||
403 | * | ||
404 | * Search for the matching ceil *available* OPP from a starting freq | ||
405 | * for a device. | ||
406 | * | ||
407 | * Return: matching *opp and refreshes *freq accordingly, else returns | ||
408 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | ||
409 | * values can be: | ||
410 | * EINVAL: for bad pointer | ||
411 | * ERANGE: no match found for search | ||
412 | * ENODEV: if device not found in list of registered devices | ||
413 | * | ||
414 | * The callers are required to call dev_pm_opp_put() for the returned OPP after | ||
415 | * use. | ||
416 | */ | ||
417 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | ||
418 | unsigned long *freq) | ||
419 | { | ||
420 | struct opp_table *opp_table; | ||
421 | struct dev_pm_opp *opp; | ||
422 | |||
423 | if (!dev || !freq) { | ||
424 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
425 | return ERR_PTR(-EINVAL); | ||
426 | } | ||
427 | |||
428 | opp_table = _find_opp_table(dev); | ||
429 | if (IS_ERR(opp_table)) | ||
430 | return ERR_CAST(opp_table); | ||
431 | |||
432 | opp = _find_freq_ceil(opp_table, freq); | ||
433 | |||
434 | dev_pm_opp_put_opp_table(opp_table); | ||
435 | |||
436 | return opp; | ||
437 | } | ||
438 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); | ||
439 | |||
440 | /** | ||
441 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq | ||
442 | * @dev: device for which we do this operation | ||
443 | * @freq: Start frequency | ||
444 | * | ||
445 | * Search for the matching floor *available* OPP from a starting freq | ||
446 | * for a device. | ||
447 | * | ||
448 | * Return: matching *opp and refreshes *freq accordingly, else returns | ||
449 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | ||
450 | * values can be: | ||
451 | * EINVAL: for bad pointer | ||
452 | * ERANGE: no match found for search | ||
453 | * ENODEV: if device not found in list of registered devices | ||
454 | * | ||
455 | * The callers are required to call dev_pm_opp_put() for the returned OPP after | ||
456 | * use. | ||
457 | */ | ||
458 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | ||
459 | unsigned long *freq) | ||
460 | { | ||
461 | struct opp_table *opp_table; | ||
462 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | ||
463 | |||
464 | if (!dev || !freq) { | ||
465 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
466 | return ERR_PTR(-EINVAL); | ||
467 | } | ||
468 | |||
469 | opp_table = _find_opp_table(dev); | ||
470 | if (IS_ERR(opp_table)) | ||
471 | return ERR_CAST(opp_table); | ||
472 | |||
473 | mutex_lock(&opp_table->lock); | ||
474 | |||
475 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
476 | if (temp_opp->available) { | ||
477 | /* go to the next node, before choosing prev */ | ||
478 | if (temp_opp->rate > *freq) | ||
479 | break; | ||
480 | else | ||
481 | opp = temp_opp; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | /* Increment the reference count of OPP */ | ||
486 | if (!IS_ERR(opp)) | ||
487 | dev_pm_opp_get(opp); | ||
488 | mutex_unlock(&opp_table->lock); | ||
489 | dev_pm_opp_put_opp_table(opp_table); | ||
490 | |||
491 | if (!IS_ERR(opp)) | ||
492 | *freq = opp->rate; | ||
493 | |||
494 | return opp; | ||
495 | } | ||
496 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | ||
497 | |||
498 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, | ||
499 | struct dev_pm_opp_supply *supply) | ||
500 | { | ||
501 | int ret; | ||
502 | |||
503 | /* Regulator not available for device */ | ||
504 | if (IS_ERR(reg)) { | ||
505 | dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, | ||
506 | PTR_ERR(reg)); | ||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, | ||
511 | supply->u_volt_min, supply->u_volt, supply->u_volt_max); | ||
512 | |||
513 | ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, | ||
514 | supply->u_volt, supply->u_volt_max); | ||
515 | if (ret) | ||
516 | dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", | ||
517 | __func__, supply->u_volt_min, supply->u_volt, | ||
518 | supply->u_volt_max, ret); | ||
519 | |||
520 | return ret; | ||
521 | } | ||
522 | |||
523 | static inline int | ||
524 | _generic_set_opp_clk_only(struct device *dev, struct clk *clk, | ||
525 | unsigned long old_freq, unsigned long freq) | ||
526 | { | ||
527 | int ret; | ||
528 | |||
529 | ret = clk_set_rate(clk, freq); | ||
530 | if (ret) { | ||
531 | dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, | ||
532 | ret); | ||
533 | } | ||
534 | |||
535 | return ret; | ||
536 | } | ||
537 | |||
538 | static int _generic_set_opp_regulator(const struct opp_table *opp_table, | ||
539 | struct device *dev, | ||
540 | unsigned long old_freq, | ||
541 | unsigned long freq, | ||
542 | struct dev_pm_opp_supply *old_supply, | ||
543 | struct dev_pm_opp_supply *new_supply) | ||
544 | { | ||
545 | struct regulator *reg = opp_table->regulators[0]; | ||
546 | int ret; | ||
547 | |||
548 | /* This function only supports single regulator per device */ | ||
549 | if (WARN_ON(opp_table->regulator_count > 1)) { | ||
550 | dev_err(dev, "multiple regulators are not supported\n"); | ||
551 | return -EINVAL; | ||
552 | } | ||
553 | |||
554 | /* Scaling up? Scale voltage before frequency */ | ||
555 | if (freq > old_freq) { | ||
556 | ret = _set_opp_voltage(dev, reg, new_supply); | ||
557 | if (ret) | ||
558 | goto restore_voltage; | ||
559 | } | ||
560 | |||
561 | /* Change frequency */ | ||
562 | ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); | ||
563 | if (ret) | ||
564 | goto restore_voltage; | ||
565 | |||
566 | /* Scaling down? Scale voltage after frequency */ | ||
567 | if (freq < old_freq) { | ||
568 | ret = _set_opp_voltage(dev, reg, new_supply); | ||
569 | if (ret) | ||
570 | goto restore_freq; | ||
571 | } | ||
572 | |||
573 | return 0; | ||
574 | |||
575 | restore_freq: | ||
576 | if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) | ||
577 | dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", | ||
578 | __func__, old_freq); | ||
579 | restore_voltage: | ||
580 | /* This shouldn't harm even if the voltages weren't updated earlier */ | ||
581 | if (old_supply) | ||
582 | _set_opp_voltage(dev, reg, old_supply); | ||
583 | |||
584 | return ret; | ||
585 | } | ||
586 | |||
587 | /** | ||
588 | * dev_pm_opp_set_rate() - Configure new OPP based on frequency | ||
589 | * @dev: device for which we do this operation | ||
590 | * @target_freq: frequency to achieve | ||
591 | * | ||
592 | * This configures the power-supplies and clock source to the levels specified | ||
593 | * by the OPP corresponding to the target_freq. | ||
594 | */ | ||
595 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | ||
596 | { | ||
597 | struct opp_table *opp_table; | ||
598 | unsigned long freq, old_freq; | ||
599 | struct dev_pm_opp *old_opp, *opp; | ||
600 | struct clk *clk; | ||
601 | int ret, size; | ||
602 | |||
603 | if (unlikely(!target_freq)) { | ||
604 | dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, | ||
605 | target_freq); | ||
606 | return -EINVAL; | ||
607 | } | ||
608 | |||
609 | opp_table = _find_opp_table(dev); | ||
610 | if (IS_ERR(opp_table)) { | ||
611 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); | ||
612 | return PTR_ERR(opp_table); | ||
613 | } | ||
614 | |||
615 | clk = opp_table->clk; | ||
616 | if (IS_ERR(clk)) { | ||
617 | dev_err(dev, "%s: No clock available for the device\n", | ||
618 | __func__); | ||
619 | ret = PTR_ERR(clk); | ||
620 | goto put_opp_table; | ||
621 | } | ||
622 | |||
623 | freq = clk_round_rate(clk, target_freq); | ||
624 | if ((long)freq <= 0) | ||
625 | freq = target_freq; | ||
626 | |||
627 | old_freq = clk_get_rate(clk); | ||
628 | |||
629 | /* Return early if nothing to do */ | ||
630 | if (old_freq == freq) { | ||
631 | dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", | ||
632 | __func__, freq); | ||
633 | ret = 0; | ||
634 | goto put_opp_table; | ||
635 | } | ||
636 | |||
637 | old_opp = _find_freq_ceil(opp_table, &old_freq); | ||
638 | if (IS_ERR(old_opp)) { | ||
639 | dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", | ||
640 | __func__, old_freq, PTR_ERR(old_opp)); | ||
641 | } | ||
642 | |||
643 | opp = _find_freq_ceil(opp_table, &freq); | ||
644 | if (IS_ERR(opp)) { | ||
645 | ret = PTR_ERR(opp); | ||
646 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", | ||
647 | __func__, freq, ret); | ||
648 | goto put_old_opp; | ||
649 | } | ||
650 | |||
651 | dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, | ||
652 | old_freq, freq); | ||
653 | |||
654 | /* Only frequency scaling */ | ||
655 | if (!opp_table->regulators) { | ||
656 | ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); | ||
657 | } else if (!opp_table->set_opp) { | ||
658 | ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, | ||
659 | IS_ERR(old_opp) ? NULL : old_opp->supplies, | ||
660 | opp->supplies); | ||
661 | } else { | ||
662 | struct dev_pm_set_opp_data *data; | ||
663 | |||
664 | data = opp_table->set_opp_data; | ||
665 | data->regulators = opp_table->regulators; | ||
666 | data->regulator_count = opp_table->regulator_count; | ||
667 | data->clk = clk; | ||
668 | data->dev = dev; | ||
669 | |||
670 | data->old_opp.rate = old_freq; | ||
671 | size = sizeof(*opp->supplies) * opp_table->regulator_count; | ||
672 | if (IS_ERR(old_opp)) | ||
673 | memset(data->old_opp.supplies, 0, size); | ||
674 | else | ||
675 | memcpy(data->old_opp.supplies, old_opp->supplies, size); | ||
676 | |||
677 | data->new_opp.rate = freq; | ||
678 | memcpy(data->new_opp.supplies, opp->supplies, size); | ||
679 | |||
680 | ret = opp_table->set_opp(data); | ||
681 | } | ||
682 | |||
683 | dev_pm_opp_put(opp); | ||
684 | put_old_opp: | ||
685 | if (!IS_ERR(old_opp)) | ||
686 | dev_pm_opp_put(old_opp); | ||
687 | put_opp_table: | ||
688 | dev_pm_opp_put_opp_table(opp_table); | ||
689 | return ret; | ||
690 | } | ||
691 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); | ||
692 | |||
693 | /* OPP-dev Helpers */ | ||
694 | static void _remove_opp_dev(struct opp_device *opp_dev, | ||
695 | struct opp_table *opp_table) | ||
696 | { | ||
697 | opp_debug_unregister(opp_dev, opp_table); | ||
698 | list_del(&opp_dev->node); | ||
699 | kfree(opp_dev); | ||
700 | } | ||
701 | |||
702 | struct opp_device *_add_opp_dev(const struct device *dev, | ||
703 | struct opp_table *opp_table) | ||
704 | { | ||
705 | struct opp_device *opp_dev; | ||
706 | int ret; | ||
707 | |||
708 | opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); | ||
709 | if (!opp_dev) | ||
710 | return NULL; | ||
711 | |||
712 | /* Initialize opp-dev */ | ||
713 | opp_dev->dev = dev; | ||
714 | list_add(&opp_dev->node, &opp_table->dev_list); | ||
715 | |||
716 | /* Create debugfs entries for the opp_table */ | ||
717 | ret = opp_debug_register(opp_dev, opp_table); | ||
718 | if (ret) | ||
719 | dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", | ||
720 | __func__, ret); | ||
721 | |||
722 | return opp_dev; | ||
723 | } | ||
724 | |||
725 | static struct opp_table *_allocate_opp_table(struct device *dev) | ||
726 | { | ||
727 | struct opp_table *opp_table; | ||
728 | struct opp_device *opp_dev; | ||
729 | int ret; | ||
730 | |||
731 | /* | ||
732 | * Allocate a new OPP table. In the infrequent case where a new | ||
733 | * device is needed to be added, we pay this penalty. | ||
734 | */ | ||
735 | opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); | ||
736 | if (!opp_table) | ||
737 | return NULL; | ||
738 | |||
739 | INIT_LIST_HEAD(&opp_table->dev_list); | ||
740 | |||
741 | opp_dev = _add_opp_dev(dev, opp_table); | ||
742 | if (!opp_dev) { | ||
743 | kfree(opp_table); | ||
744 | return NULL; | ||
745 | } | ||
746 | |||
747 | _of_init_opp_table(opp_table, dev); | ||
748 | |||
749 | /* Find clk for the device */ | ||
750 | opp_table->clk = clk_get(dev, NULL); | ||
751 | if (IS_ERR(opp_table->clk)) { | ||
752 | ret = PTR_ERR(opp_table->clk); | ||
753 | if (ret != -EPROBE_DEFER) | ||
754 | dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, | ||
755 | ret); | ||
756 | } | ||
757 | |||
758 | BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); | ||
759 | INIT_LIST_HEAD(&opp_table->opp_list); | ||
760 | mutex_init(&opp_table->lock); | ||
761 | kref_init(&opp_table->kref); | ||
762 | |||
763 | /* Secure the device table modification */ | ||
764 | list_add(&opp_table->node, &opp_tables); | ||
765 | return opp_table; | ||
766 | } | ||
767 | |||
768 | void _get_opp_table_kref(struct opp_table *opp_table) | ||
769 | { | ||
770 | kref_get(&opp_table->kref); | ||
771 | } | ||
772 | |||
773 | struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) | ||
774 | { | ||
775 | struct opp_table *opp_table; | ||
776 | |||
777 | /* Hold our table modification lock here */ | ||
778 | mutex_lock(&opp_table_lock); | ||
779 | |||
780 | opp_table = _find_opp_table_unlocked(dev); | ||
781 | if (!IS_ERR(opp_table)) | ||
782 | goto unlock; | ||
783 | |||
784 | opp_table = _allocate_opp_table(dev); | ||
785 | |||
786 | unlock: | ||
787 | mutex_unlock(&opp_table_lock); | ||
788 | |||
789 | return opp_table; | ||
790 | } | ||
791 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); | ||
792 | |||
793 | static void _opp_table_kref_release(struct kref *kref) | ||
794 | { | ||
795 | struct opp_table *opp_table = container_of(kref, struct opp_table, kref); | ||
796 | struct opp_device *opp_dev; | ||
797 | |||
798 | /* Release clk */ | ||
799 | if (!IS_ERR(opp_table->clk)) | ||
800 | clk_put(opp_table->clk); | ||
801 | |||
802 | opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device, | ||
803 | node); | ||
804 | |||
805 | _remove_opp_dev(opp_dev, opp_table); | ||
806 | |||
807 | /* dev_list must be empty now */ | ||
808 | WARN_ON(!list_empty(&opp_table->dev_list)); | ||
809 | |||
810 | mutex_destroy(&opp_table->lock); | ||
811 | list_del(&opp_table->node); | ||
812 | kfree(opp_table); | ||
813 | |||
814 | mutex_unlock(&opp_table_lock); | ||
815 | } | ||
816 | |||
817 | void dev_pm_opp_put_opp_table(struct opp_table *opp_table) | ||
818 | { | ||
819 | kref_put_mutex(&opp_table->kref, _opp_table_kref_release, | ||
820 | &opp_table_lock); | ||
821 | } | ||
822 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); | ||
823 | |||
824 | void _opp_free(struct dev_pm_opp *opp) | ||
825 | { | ||
826 | kfree(opp); | ||
827 | } | ||
828 | |||
829 | static void _opp_kref_release(struct kref *kref) | ||
830 | { | ||
831 | struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); | ||
832 | struct opp_table *opp_table = opp->opp_table; | ||
833 | |||
834 | /* | ||
835 | * Notify the changes in the availability of the operable | ||
836 | * frequency/voltage list. | ||
837 | */ | ||
838 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); | ||
839 | opp_debug_remove_one(opp); | ||
840 | list_del(&opp->node); | ||
841 | kfree(opp); | ||
842 | |||
843 | mutex_unlock(&opp_table->lock); | ||
844 | dev_pm_opp_put_opp_table(opp_table); | ||
845 | } | ||
846 | |||
847 | static void dev_pm_opp_get(struct dev_pm_opp *opp) | ||
848 | { | ||
849 | kref_get(&opp->kref); | ||
850 | } | ||
851 | |||
852 | void dev_pm_opp_put(struct dev_pm_opp *opp) | ||
853 | { | ||
854 | kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); | ||
855 | } | ||
856 | EXPORT_SYMBOL_GPL(dev_pm_opp_put); | ||
857 | |||
858 | /** | ||
859 | * dev_pm_opp_remove() - Remove an OPP from OPP table | ||
860 | * @dev: device for which we do this operation | ||
861 | * @freq: OPP to remove with matching 'freq' | ||
862 | * | ||
863 | * This function removes an opp from the opp table. | ||
864 | */ | ||
865 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) | ||
866 | { | ||
867 | struct dev_pm_opp *opp; | ||
868 | struct opp_table *opp_table; | ||
869 | bool found = false; | ||
870 | |||
871 | opp_table = _find_opp_table(dev); | ||
872 | if (IS_ERR(opp_table)) | ||
873 | return; | ||
874 | |||
875 | mutex_lock(&opp_table->lock); | ||
876 | |||
877 | list_for_each_entry(opp, &opp_table->opp_list, node) { | ||
878 | if (opp->rate == freq) { | ||
879 | found = true; | ||
880 | break; | ||
881 | } | ||
882 | } | ||
883 | |||
884 | mutex_unlock(&opp_table->lock); | ||
885 | |||
886 | if (found) { | ||
887 | dev_pm_opp_put(opp); | ||
888 | } else { | ||
889 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", | ||
890 | __func__, freq); | ||
891 | } | ||
892 | |||
893 | dev_pm_opp_put_opp_table(opp_table); | ||
894 | } | ||
895 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | ||
896 | |||
897 | struct dev_pm_opp *_opp_allocate(struct opp_table *table) | ||
898 | { | ||
899 | struct dev_pm_opp *opp; | ||
900 | int count, supply_size; | ||
901 | |||
902 | /* Allocate space for at least one supply */ | ||
903 | count = table->regulator_count ? table->regulator_count : 1; | ||
904 | supply_size = sizeof(*opp->supplies) * count; | ||
905 | |||
906 | /* allocate new OPP node and supplies structures */ | ||
907 | opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); | ||
908 | if (!opp) | ||
909 | return NULL; | ||
910 | |||
911 | /* Put the supplies at the end of the OPP structure as an empty array */ | ||
912 | opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); | ||
913 | INIT_LIST_HEAD(&opp->node); | ||
914 | |||
915 | return opp; | ||
916 | } | ||
917 | |||
918 | static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, | ||
919 | struct opp_table *opp_table) | ||
920 | { | ||
921 | struct regulator *reg; | ||
922 | int i; | ||
923 | |||
924 | for (i = 0; i < opp_table->regulator_count; i++) { | ||
925 | reg = opp_table->regulators[i]; | ||
926 | |||
927 | if (!regulator_is_supported_voltage(reg, | ||
928 | opp->supplies[i].u_volt_min, | ||
929 | opp->supplies[i].u_volt_max)) { | ||
930 | pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", | ||
931 | __func__, opp->supplies[i].u_volt_min, | ||
932 | opp->supplies[i].u_volt_max); | ||
933 | return false; | ||
934 | } | ||
935 | } | ||
936 | |||
937 | return true; | ||
938 | } | ||
939 | |||
940 | /* | ||
941 | * Returns: | ||
942 | * 0: On success. And appropriate error message for duplicate OPPs. | ||
943 | * -EBUSY: For OPP with same freq/volt and is available. The callers of | ||
944 | * _opp_add() must return 0 if they receive -EBUSY from it. This is to make | ||
945 | * sure we don't print error messages unnecessarily if different parts of | ||
946 | * kernel try to initialize the OPP table. | ||
947 | * -EEXIST: For OPP with same freq but different volt or is unavailable. This | ||
948 | * should be considered an error by the callers of _opp_add(). | ||
949 | */ | ||
950 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | ||
951 | struct opp_table *opp_table) | ||
952 | { | ||
953 | struct dev_pm_opp *opp; | ||
954 | struct list_head *head; | ||
955 | int ret; | ||
956 | |||
957 | /* | ||
958 | * Insert new OPP in order of increasing frequency and discard if | ||
959 | * already present. | ||
960 | * | ||
961 | * Need to use &opp_table->opp_list in the condition part of the 'for' | ||
962 | * loop, don't replace it with head otherwise it will become an infinite | ||
963 | * loop. | ||
964 | */ | ||
965 | mutex_lock(&opp_table->lock); | ||
966 | head = &opp_table->opp_list; | ||
967 | |||
968 | list_for_each_entry(opp, &opp_table->opp_list, node) { | ||
969 | if (new_opp->rate > opp->rate) { | ||
970 | head = &opp->node; | ||
971 | continue; | ||
972 | } | ||
973 | |||
974 | if (new_opp->rate < opp->rate) | ||
975 | break; | ||
976 | |||
977 | /* Duplicate OPPs */ | ||
978 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", | ||
979 | __func__, opp->rate, opp->supplies[0].u_volt, | ||
980 | opp->available, new_opp->rate, | ||
981 | new_opp->supplies[0].u_volt, new_opp->available); | ||
982 | |||
983 | /* Should we compare voltages for all regulators here ? */ | ||
984 | ret = opp->available && | ||
985 | new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; | ||
986 | |||
987 | mutex_unlock(&opp_table->lock); | ||
988 | return ret; | ||
989 | } | ||
990 | |||
991 | list_add(&new_opp->node, head); | ||
992 | mutex_unlock(&opp_table->lock); | ||
993 | |||
994 | new_opp->opp_table = opp_table; | ||
995 | kref_init(&new_opp->kref); | ||
996 | |||
997 | /* Get a reference to the OPP table */ | ||
998 | _get_opp_table_kref(opp_table); | ||
999 | |||
1000 | ret = opp_debug_create_one(new_opp, opp_table); | ||
1001 | if (ret) | ||
1002 | dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", | ||
1003 | __func__, ret); | ||
1004 | |||
1005 | if (!_opp_supported_by_regulators(new_opp, opp_table)) { | ||
1006 | new_opp->available = false; | ||
1007 | dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", | ||
1008 | __func__, new_opp->rate); | ||
1009 | } | ||
1010 | |||
1011 | return 0; | ||
1012 | } | ||
1013 | |||
1014 | /** | ||
1015 | * _opp_add_v1() - Allocate a OPP based on v1 bindings. | ||
1016 | * @opp_table: OPP table | ||
1017 | * @dev: device for which we do this operation | ||
1018 | * @freq: Frequency in Hz for this OPP | ||
1019 | * @u_volt: Voltage in uVolts for this OPP | ||
1020 | * @dynamic: Dynamically added OPPs. | ||
1021 | * | ||
1022 | * This function adds an opp definition to the opp table and returns status. | ||
1023 | * The opp is made available by default and it can be controlled using | ||
1024 | * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. | ||
1025 | * | ||
1026 | * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table | ||
1027 | * and freed by dev_pm_opp_of_remove_table. | ||
1028 | * | ||
1029 | * Return: | ||
1030 | * 0 On success OR | ||
1031 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
1032 | * -EEXIST Freq are same and volt are different OR | ||
1033 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
1034 | * -ENOMEM Memory allocation failure | ||
1035 | */ | ||
1036 | int _opp_add_v1(struct opp_table *opp_table, struct device *dev, | ||
1037 | unsigned long freq, long u_volt, bool dynamic) | ||
1038 | { | ||
1039 | struct dev_pm_opp *new_opp; | ||
1040 | unsigned long tol; | ||
1041 | int ret; | ||
1042 | |||
1043 | new_opp = _opp_allocate(opp_table); | ||
1044 | if (!new_opp) | ||
1045 | return -ENOMEM; | ||
1046 | |||
1047 | /* populate the opp table */ | ||
1048 | new_opp->rate = freq; | ||
1049 | tol = u_volt * opp_table->voltage_tolerance_v1 / 100; | ||
1050 | new_opp->supplies[0].u_volt = u_volt; | ||
1051 | new_opp->supplies[0].u_volt_min = u_volt - tol; | ||
1052 | new_opp->supplies[0].u_volt_max = u_volt + tol; | ||
1053 | new_opp->available = true; | ||
1054 | new_opp->dynamic = dynamic; | ||
1055 | |||
1056 | ret = _opp_add(dev, new_opp, opp_table); | ||
1057 | if (ret) { | ||
1058 | /* Don't return error for duplicate OPPs */ | ||
1059 | if (ret == -EBUSY) | ||
1060 | ret = 0; | ||
1061 | goto free_opp; | ||
1062 | } | ||
1063 | |||
1064 | /* | ||
1065 | * Notify the changes in the availability of the operable | ||
1066 | * frequency/voltage list. | ||
1067 | */ | ||
1068 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); | ||
1069 | return 0; | ||
1070 | |||
1071 | free_opp: | ||
1072 | _opp_free(new_opp); | ||
1073 | |||
1074 | return ret; | ||
1075 | } | ||
1076 | |||
1077 | /** | ||
1078 | * dev_pm_opp_set_supported_hw() - Set supported platforms | ||
1079 | * @dev: Device for which supported-hw has to be set. | ||
1080 | * @versions: Array of hierarchy of versions to match. | ||
1081 | * @count: Number of elements in the array. | ||
1082 | * | ||
1083 | * This is required only for the V2 bindings, and it enables a platform to | ||
1084 | * specify the hierarchy of versions it supports. OPP layer will then enable | ||
1085 | * OPPs, which are available for those versions, based on its 'opp-supported-hw' | ||
1086 | * property. | ||
1087 | */ | ||
1088 | struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, | ||
1089 | const u32 *versions, unsigned int count) | ||
1090 | { | ||
1091 | struct opp_table *opp_table; | ||
1092 | int ret; | ||
1093 | |||
1094 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1095 | if (!opp_table) | ||
1096 | return ERR_PTR(-ENOMEM); | ||
1097 | |||
1098 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1099 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1100 | |||
1101 | /* Do we already have a version hierarchy associated with opp_table? */ | ||
1102 | if (opp_table->supported_hw) { | ||
1103 | dev_err(dev, "%s: Already have supported hardware list\n", | ||
1104 | __func__); | ||
1105 | ret = -EBUSY; | ||
1106 | goto err; | ||
1107 | } | ||
1108 | |||
1109 | opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), | ||
1110 | GFP_KERNEL); | ||
1111 | if (!opp_table->supported_hw) { | ||
1112 | ret = -ENOMEM; | ||
1113 | goto err; | ||
1114 | } | ||
1115 | |||
1116 | opp_table->supported_hw_count = count; | ||
1117 | |||
1118 | return opp_table; | ||
1119 | |||
1120 | err: | ||
1121 | dev_pm_opp_put_opp_table(opp_table); | ||
1122 | |||
1123 | return ERR_PTR(ret); | ||
1124 | } | ||
1125 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); | ||
1126 | |||
1127 | /** | ||
1128 | * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw | ||
1129 | * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw(). | ||
1130 | * | ||
1131 | * This is required only for the V2 bindings, and is called for a matching | ||
1132 | * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure | ||
1133 | * will not be freed. | ||
1134 | */ | ||
1135 | void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) | ||
1136 | { | ||
1137 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1138 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1139 | |||
1140 | if (!opp_table->supported_hw) { | ||
1141 | pr_err("%s: Doesn't have supported hardware list\n", | ||
1142 | __func__); | ||
1143 | return; | ||
1144 | } | ||
1145 | |||
1146 | kfree(opp_table->supported_hw); | ||
1147 | opp_table->supported_hw = NULL; | ||
1148 | opp_table->supported_hw_count = 0; | ||
1149 | |||
1150 | dev_pm_opp_put_opp_table(opp_table); | ||
1151 | } | ||
1152 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); | ||
1153 | |||
1154 | /** | ||
1155 | * dev_pm_opp_set_prop_name() - Set prop-extn name | ||
1156 | * @dev: Device for which the prop-name has to be set. | ||
1157 | * @name: name to postfix to properties. | ||
1158 | * | ||
1159 | * This is required only for the V2 bindings, and it enables a platform to | ||
1160 | * specify the extn to be used for certain property names. The properties to | ||
1161 | * which the extension will apply are opp-microvolt and opp-microamp. OPP core | ||
1162 | * should postfix the property name with -<name> while looking for them. | ||
1163 | */ | ||
1164 | struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) | ||
1165 | { | ||
1166 | struct opp_table *opp_table; | ||
1167 | int ret; | ||
1168 | |||
1169 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1170 | if (!opp_table) | ||
1171 | return ERR_PTR(-ENOMEM); | ||
1172 | |||
1173 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1174 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1175 | |||
1176 | /* Do we already have a prop-name associated with opp_table? */ | ||
1177 | if (opp_table->prop_name) { | ||
1178 | dev_err(dev, "%s: Already have prop-name %s\n", __func__, | ||
1179 | opp_table->prop_name); | ||
1180 | ret = -EBUSY; | ||
1181 | goto err; | ||
1182 | } | ||
1183 | |||
1184 | opp_table->prop_name = kstrdup(name, GFP_KERNEL); | ||
1185 | if (!opp_table->prop_name) { | ||
1186 | ret = -ENOMEM; | ||
1187 | goto err; | ||
1188 | } | ||
1189 | |||
1190 | return opp_table; | ||
1191 | |||
1192 | err: | ||
1193 | dev_pm_opp_put_opp_table(opp_table); | ||
1194 | |||
1195 | return ERR_PTR(ret); | ||
1196 | } | ||
1197 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); | ||
1198 | |||
1199 | /** | ||
1200 | * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name | ||
1201 | * @opp_table: OPP table returned by dev_pm_opp_set_prop_name(). | ||
1202 | * | ||
1203 | * This is required only for the V2 bindings, and is called for a matching | ||
1204 | * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure | ||
1205 | * will not be freed. | ||
1206 | */ | ||
1207 | void dev_pm_opp_put_prop_name(struct opp_table *opp_table) | ||
1208 | { | ||
1209 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1210 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1211 | |||
1212 | if (!opp_table->prop_name) { | ||
1213 | pr_err("%s: Doesn't have a prop-name\n", __func__); | ||
1214 | return; | ||
1215 | } | ||
1216 | |||
1217 | kfree(opp_table->prop_name); | ||
1218 | opp_table->prop_name = NULL; | ||
1219 | |||
1220 | dev_pm_opp_put_opp_table(opp_table); | ||
1221 | } | ||
1222 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); | ||
1223 | |||
1224 | static int _allocate_set_opp_data(struct opp_table *opp_table) | ||
1225 | { | ||
1226 | struct dev_pm_set_opp_data *data; | ||
1227 | int len, count = opp_table->regulator_count; | ||
1228 | |||
1229 | if (WARN_ON(!count)) | ||
1230 | return -EINVAL; | ||
1231 | |||
1232 | /* space for set_opp_data */ | ||
1233 | len = sizeof(*data); | ||
1234 | |||
1235 | /* space for old_opp.supplies and new_opp.supplies */ | ||
1236 | len += 2 * sizeof(struct dev_pm_opp_supply) * count; | ||
1237 | |||
1238 | data = kzalloc(len, GFP_KERNEL); | ||
1239 | if (!data) | ||
1240 | return -ENOMEM; | ||
1241 | |||
1242 | data->old_opp.supplies = (void *)(data + 1); | ||
1243 | data->new_opp.supplies = data->old_opp.supplies + count; | ||
1244 | |||
1245 | opp_table->set_opp_data = data; | ||
1246 | |||
1247 | return 0; | ||
1248 | } | ||
1249 | |||
1250 | static void _free_set_opp_data(struct opp_table *opp_table) | ||
1251 | { | ||
1252 | kfree(opp_table->set_opp_data); | ||
1253 | opp_table->set_opp_data = NULL; | ||
1254 | } | ||
1255 | |||
1256 | /** | ||
1257 | * dev_pm_opp_set_regulators() - Set regulator names for the device | ||
1258 | * @dev: Device for which regulator name is being set. | ||
1259 | * @names: Array of pointers to the names of the regulator. | ||
1260 | * @count: Number of regulators. | ||
1261 | * | ||
1262 | * In order to support OPP switching, OPP layer needs to know the name of the | ||
1263 | * device's regulators, as the core would be required to switch voltages as | ||
1264 | * well. | ||
1265 | * | ||
1266 | * This must be called before any OPPs are initialized for the device. | ||
1267 | */ | ||
1268 | struct opp_table *dev_pm_opp_set_regulators(struct device *dev, | ||
1269 | const char * const names[], | ||
1270 | unsigned int count) | ||
1271 | { | ||
1272 | struct opp_table *opp_table; | ||
1273 | struct regulator *reg; | ||
1274 | int ret, i; | ||
1275 | |||
1276 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1277 | if (!opp_table) | ||
1278 | return ERR_PTR(-ENOMEM); | ||
1279 | |||
1280 | /* This should be called before OPPs are initialized */ | ||
1281 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { | ||
1282 | ret = -EBUSY; | ||
1283 | goto err; | ||
1284 | } | ||
1285 | |||
1286 | /* Already have regulators set */ | ||
1287 | if (opp_table->regulators) { | ||
1288 | ret = -EBUSY; | ||
1289 | goto err; | ||
1290 | } | ||
1291 | |||
1292 | opp_table->regulators = kmalloc_array(count, | ||
1293 | sizeof(*opp_table->regulators), | ||
1294 | GFP_KERNEL); | ||
1295 | if (!opp_table->regulators) { | ||
1296 | ret = -ENOMEM; | ||
1297 | goto err; | ||
1298 | } | ||
1299 | |||
1300 | for (i = 0; i < count; i++) { | ||
1301 | reg = regulator_get_optional(dev, names[i]); | ||
1302 | if (IS_ERR(reg)) { | ||
1303 | ret = PTR_ERR(reg); | ||
1304 | if (ret != -EPROBE_DEFER) | ||
1305 | dev_err(dev, "%s: no regulator (%s) found: %d\n", | ||
1306 | __func__, names[i], ret); | ||
1307 | goto free_regulators; | ||
1308 | } | ||
1309 | |||
1310 | opp_table->regulators[i] = reg; | ||
1311 | } | ||
1312 | |||
1313 | opp_table->regulator_count = count; | ||
1314 | |||
1315 | /* Allocate block only once to pass to set_opp() routines */ | ||
1316 | ret = _allocate_set_opp_data(opp_table); | ||
1317 | if (ret) | ||
1318 | goto free_regulators; | ||
1319 | |||
1320 | return opp_table; | ||
1321 | |||
1322 | free_regulators: | ||
1323 | while (i != 0) | ||
1324 | regulator_put(opp_table->regulators[--i]); | ||
1325 | |||
1326 | kfree(opp_table->regulators); | ||
1327 | opp_table->regulators = NULL; | ||
1328 | opp_table->regulator_count = 0; | ||
1329 | err: | ||
1330 | dev_pm_opp_put_opp_table(opp_table); | ||
1331 | |||
1332 | return ERR_PTR(ret); | ||
1333 | } | ||
1334 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators); | ||
1335 | |||
1336 | /** | ||
1337 | * dev_pm_opp_put_regulators() - Releases resources blocked for regulator | ||
1338 | * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). | ||
1339 | */ | ||
1340 | void dev_pm_opp_put_regulators(struct opp_table *opp_table) | ||
1341 | { | ||
1342 | int i; | ||
1343 | |||
1344 | if (!opp_table->regulators) { | ||
1345 | pr_err("%s: Doesn't have regulators set\n", __func__); | ||
1346 | return; | ||
1347 | } | ||
1348 | |||
1349 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1350 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1351 | |||
1352 | for (i = opp_table->regulator_count - 1; i >= 0; i--) | ||
1353 | regulator_put(opp_table->regulators[i]); | ||
1354 | |||
1355 | _free_set_opp_data(opp_table); | ||
1356 | |||
1357 | kfree(opp_table->regulators); | ||
1358 | opp_table->regulators = NULL; | ||
1359 | opp_table->regulator_count = 0; | ||
1360 | |||
1361 | dev_pm_opp_put_opp_table(opp_table); | ||
1362 | } | ||
1363 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); | ||
1364 | |||
1365 | /** | ||
1366 | * dev_pm_opp_set_clkname() - Set clk name for the device | ||
1367 | * @dev: Device for which clk name is being set. | ||
1368 | * @name: Clk name. | ||
1369 | * | ||
1370 | * In order to support OPP switching, OPP layer needs to get pointer to the | ||
1371 | * clock for the device. Simple cases work fine without using this routine (i.e. | ||
1372 | * by passing connection-id as NULL), but for a device with multiple clocks | ||
1373 | * available, the OPP core needs to know the exact name of the clk to use. | ||
1374 | * | ||
1375 | * This must be called before any OPPs are initialized for the device. | ||
1376 | */ | ||
1377 | struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) | ||
1378 | { | ||
1379 | struct opp_table *opp_table; | ||
1380 | int ret; | ||
1381 | |||
1382 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1383 | if (!opp_table) | ||
1384 | return ERR_PTR(-ENOMEM); | ||
1385 | |||
1386 | /* This should be called before OPPs are initialized */ | ||
1387 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { | ||
1388 | ret = -EBUSY; | ||
1389 | goto err; | ||
1390 | } | ||
1391 | |||
1392 | /* Already have default clk set, free it */ | ||
1393 | if (!IS_ERR(opp_table->clk)) | ||
1394 | clk_put(opp_table->clk); | ||
1395 | |||
1396 | /* Find clk for the device */ | ||
1397 | opp_table->clk = clk_get(dev, name); | ||
1398 | if (IS_ERR(opp_table->clk)) { | ||
1399 | ret = PTR_ERR(opp_table->clk); | ||
1400 | if (ret != -EPROBE_DEFER) { | ||
1401 | dev_err(dev, "%s: Couldn't find clock: %d\n", __func__, | ||
1402 | ret); | ||
1403 | } | ||
1404 | goto err; | ||
1405 | } | ||
1406 | |||
1407 | return opp_table; | ||
1408 | |||
1409 | err: | ||
1410 | dev_pm_opp_put_opp_table(opp_table); | ||
1411 | |||
1412 | return ERR_PTR(ret); | ||
1413 | } | ||
1414 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname); | ||
1415 | |||
1416 | /** | ||
1417 | * dev_pm_opp_put_clkname() - Releases resources blocked for clk. | ||
1418 | * @opp_table: OPP table returned from dev_pm_opp_set_clkname(). | ||
1419 | */ | ||
1420 | void dev_pm_opp_put_clkname(struct opp_table *opp_table) | ||
1421 | { | ||
1422 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1423 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1424 | |||
1425 | clk_put(opp_table->clk); | ||
1426 | opp_table->clk = ERR_PTR(-EINVAL); | ||
1427 | |||
1428 | dev_pm_opp_put_opp_table(opp_table); | ||
1429 | } | ||
1430 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname); | ||
1431 | |||
1432 | /** | ||
1433 | * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper | ||
1434 | * @dev: Device for which the helper is getting registered. | ||
1435 | * @set_opp: Custom set OPP helper. | ||
1436 | * | ||
1437 | * This is useful to support complex platforms (like platforms with multiple | ||
1438 | * regulators per device), instead of the generic OPP set rate helper. | ||
1439 | * | ||
1440 | * This must be called before any OPPs are initialized for the device. | ||
1441 | */ | ||
1442 | struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, | ||
1443 | int (*set_opp)(struct dev_pm_set_opp_data *data)) | ||
1444 | { | ||
1445 | struct opp_table *opp_table; | ||
1446 | int ret; | ||
1447 | |||
1448 | if (!set_opp) | ||
1449 | return ERR_PTR(-EINVAL); | ||
1450 | |||
1451 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1452 | if (!opp_table) | ||
1453 | return ERR_PTR(-ENOMEM); | ||
1454 | |||
1455 | /* This should be called before OPPs are initialized */ | ||
1456 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { | ||
1457 | ret = -EBUSY; | ||
1458 | goto err; | ||
1459 | } | ||
1460 | |||
1461 | /* Already have custom set_opp helper */ | ||
1462 | if (WARN_ON(opp_table->set_opp)) { | ||
1463 | ret = -EBUSY; | ||
1464 | goto err; | ||
1465 | } | ||
1466 | |||
1467 | opp_table->set_opp = set_opp; | ||
1468 | |||
1469 | return opp_table; | ||
1470 | |||
1471 | err: | ||
1472 | dev_pm_opp_put_opp_table(opp_table); | ||
1473 | |||
1474 | return ERR_PTR(ret); | ||
1475 | } | ||
1476 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); | ||
1477 | |||
1478 | /** | ||
1479 | * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for | ||
1480 | * set_opp helper | ||
1481 | * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper(). | ||
1482 | * | ||
1483 | * Release resources blocked for platform specific set_opp helper. | ||
1484 | */ | ||
1485 | void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) | ||
1486 | { | ||
1487 | if (!opp_table->set_opp) { | ||
1488 | pr_err("%s: Doesn't have custom set_opp helper set\n", | ||
1489 | __func__); | ||
1490 | return; | ||
1491 | } | ||
1492 | |||
1493 | /* Make sure there are no concurrent readers while updating opp_table */ | ||
1494 | WARN_ON(!list_empty(&opp_table->opp_list)); | ||
1495 | |||
1496 | opp_table->set_opp = NULL; | ||
1497 | |||
1498 | dev_pm_opp_put_opp_table(opp_table); | ||
1499 | } | ||
1500 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); | ||
1501 | |||
1502 | /** | ||
1503 | * dev_pm_opp_add() - Add an OPP table from a table definitions | ||
1504 | * @dev: device for which we do this operation | ||
1505 | * @freq: Frequency in Hz for this OPP | ||
1506 | * @u_volt: Voltage in uVolts for this OPP | ||
1507 | * | ||
1508 | * This function adds an opp definition to the opp table and returns status. | ||
1509 | * The opp is made available by default and it can be controlled using | ||
1510 | * dev_pm_opp_enable/disable functions. | ||
1511 | * | ||
1512 | * Return: | ||
1513 | * 0 On success OR | ||
1514 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
1515 | * -EEXIST Freq are same and volt are different OR | ||
1516 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
1517 | * -ENOMEM Memory allocation failure | ||
1518 | */ | ||
1519 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | ||
1520 | { | ||
1521 | struct opp_table *opp_table; | ||
1522 | int ret; | ||
1523 | |||
1524 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
1525 | if (!opp_table) | ||
1526 | return -ENOMEM; | ||
1527 | |||
1528 | ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); | ||
1529 | |||
1530 | dev_pm_opp_put_opp_table(opp_table); | ||
1531 | return ret; | ||
1532 | } | ||
1533 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); | ||
1534 | |||
1535 | /** | ||
1536 | * _opp_set_availability() - helper to set the availability of an opp | ||
1537 | * @dev: device for which we do this operation | ||
1538 | * @freq: OPP frequency to modify availability | ||
1539 | * @availability_req: availability status requested for this opp | ||
1540 | * | ||
1541 | * Set the availability of an OPP, opp_{enable,disable} share a common logic | ||
1542 | * which is isolated here. | ||
1543 | * | ||
1544 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
1545 | * copy operation, returns 0 if no modification was done OR modification was | ||
1546 | * successful. | ||
1547 | */ | ||
1548 | static int _opp_set_availability(struct device *dev, unsigned long freq, | ||
1549 | bool availability_req) | ||
1550 | { | ||
1551 | struct opp_table *opp_table; | ||
1552 | struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); | ||
1553 | int r = 0; | ||
1554 | |||
1555 | /* Find the opp_table */ | ||
1556 | opp_table = _find_opp_table(dev); | ||
1557 | if (IS_ERR(opp_table)) { | ||
1558 | r = PTR_ERR(opp_table); | ||
1559 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
1560 | return r; | ||
1561 | } | ||
1562 | |||
1563 | mutex_lock(&opp_table->lock); | ||
1564 | |||
1565 | /* Do we have the frequency? */ | ||
1566 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { | ||
1567 | if (tmp_opp->rate == freq) { | ||
1568 | opp = tmp_opp; | ||
1569 | break; | ||
1570 | } | ||
1571 | } | ||
1572 | |||
1573 | if (IS_ERR(opp)) { | ||
1574 | r = PTR_ERR(opp); | ||
1575 | goto unlock; | ||
1576 | } | ||
1577 | |||
1578 | /* Is update really needed? */ | ||
1579 | if (opp->available == availability_req) | ||
1580 | goto unlock; | ||
1581 | |||
1582 | opp->available = availability_req; | ||
1583 | |||
1584 | dev_pm_opp_get(opp); | ||
1585 | mutex_unlock(&opp_table->lock); | ||
1586 | |||
1587 | /* Notify the change of the OPP availability */ | ||
1588 | if (availability_req) | ||
1589 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, | ||
1590 | opp); | ||
1591 | else | ||
1592 | blocking_notifier_call_chain(&opp_table->head, | ||
1593 | OPP_EVENT_DISABLE, opp); | ||
1594 | |||
1595 | dev_pm_opp_put(opp); | ||
1596 | goto put_table; | ||
1597 | |||
1598 | unlock: | ||
1599 | mutex_unlock(&opp_table->lock); | ||
1600 | put_table: | ||
1601 | dev_pm_opp_put_opp_table(opp_table); | ||
1602 | return r; | ||
1603 | } | ||
1604 | |||
1605 | /** | ||
1606 | * dev_pm_opp_enable() - Enable a specific OPP | ||
1607 | * @dev: device for which we do this operation | ||
1608 | * @freq: OPP frequency to enable | ||
1609 | * | ||
1610 | * Enables a provided opp. If the operation is valid, this returns 0, else the | ||
1611 | * corresponding error value. It is meant to be used for users an OPP available | ||
1612 | * after being temporarily made unavailable with dev_pm_opp_disable. | ||
1613 | * | ||
1614 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
1615 | * copy operation, returns 0 if no modification was done OR modification was | ||
1616 | * successful. | ||
1617 | */ | ||
1618 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) | ||
1619 | { | ||
1620 | return _opp_set_availability(dev, freq, true); | ||
1621 | } | ||
1622 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | ||
1623 | |||
1624 | /** | ||
1625 | * dev_pm_opp_disable() - Disable a specific OPP | ||
1626 | * @dev: device for which we do this operation | ||
1627 | * @freq: OPP frequency to disable | ||
1628 | * | ||
1629 | * Disables a provided opp. If the operation is valid, this returns | ||
1630 | * 0, else the corresponding error value. It is meant to be a temporary | ||
1631 | * control by users to make this OPP not available until the circumstances are | ||
1632 | * right to make it available again (with a call to dev_pm_opp_enable). | ||
1633 | * | ||
1634 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
1635 | * copy operation, returns 0 if no modification was done OR modification was | ||
1636 | * successful. | ||
1637 | */ | ||
1638 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) | ||
1639 | { | ||
1640 | return _opp_set_availability(dev, freq, false); | ||
1641 | } | ||
1642 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); | ||
1643 | |||
1644 | /** | ||
1645 | * dev_pm_opp_register_notifier() - Register OPP notifier for the device | ||
1646 | * @dev: Device for which notifier needs to be registered | ||
1647 | * @nb: Notifier block to be registered | ||
1648 | * | ||
1649 | * Return: 0 on success or a negative error value. | ||
1650 | */ | ||
1651 | int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) | ||
1652 | { | ||
1653 | struct opp_table *opp_table; | ||
1654 | int ret; | ||
1655 | |||
1656 | opp_table = _find_opp_table(dev); | ||
1657 | if (IS_ERR(opp_table)) | ||
1658 | return PTR_ERR(opp_table); | ||
1659 | |||
1660 | ret = blocking_notifier_chain_register(&opp_table->head, nb); | ||
1661 | |||
1662 | dev_pm_opp_put_opp_table(opp_table); | ||
1663 | |||
1664 | return ret; | ||
1665 | } | ||
1666 | EXPORT_SYMBOL(dev_pm_opp_register_notifier); | ||
1667 | |||
1668 | /** | ||
1669 | * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device | ||
1670 | * @dev: Device for which notifier needs to be unregistered | ||
1671 | * @nb: Notifier block to be unregistered | ||
1672 | * | ||
1673 | * Return: 0 on success or a negative error value. | ||
1674 | */ | ||
1675 | int dev_pm_opp_unregister_notifier(struct device *dev, | ||
1676 | struct notifier_block *nb) | ||
1677 | { | ||
1678 | struct opp_table *opp_table; | ||
1679 | int ret; | ||
1680 | |||
1681 | opp_table = _find_opp_table(dev); | ||
1682 | if (IS_ERR(opp_table)) | ||
1683 | return PTR_ERR(opp_table); | ||
1684 | |||
1685 | ret = blocking_notifier_chain_unregister(&opp_table->head, nb); | ||
1686 | |||
1687 | dev_pm_opp_put_opp_table(opp_table); | ||
1688 | |||
1689 | return ret; | ||
1690 | } | ||
1691 | EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); | ||
1692 | |||
1693 | /* | ||
1694 | * Free OPPs either created using static entries present in DT or even the | ||
1695 | * dynamically added entries based on remove_all param. | ||
1696 | */ | ||
1697 | void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, | ||
1698 | bool remove_all) | ||
1699 | { | ||
1700 | struct dev_pm_opp *opp, *tmp; | ||
1701 | |||
1702 | /* Find if opp_table manages a single device */ | ||
1703 | if (list_is_singular(&opp_table->dev_list)) { | ||
1704 | /* Free static OPPs */ | ||
1705 | list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { | ||
1706 | if (remove_all || !opp->dynamic) | ||
1707 | dev_pm_opp_put(opp); | ||
1708 | } | ||
1709 | } else { | ||
1710 | _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); | ||
1711 | } | ||
1712 | } | ||
1713 | |||
1714 | void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all) | ||
1715 | { | ||
1716 | struct opp_table *opp_table; | ||
1717 | |||
1718 | /* Check for existing table for 'dev' */ | ||
1719 | opp_table = _find_opp_table(dev); | ||
1720 | if (IS_ERR(opp_table)) { | ||
1721 | int error = PTR_ERR(opp_table); | ||
1722 | |||
1723 | if (error != -ENODEV) | ||
1724 | WARN(1, "%s: opp_table: %d\n", | ||
1725 | IS_ERR_OR_NULL(dev) ? | ||
1726 | "Invalid device" : dev_name(dev), | ||
1727 | error); | ||
1728 | return; | ||
1729 | } | ||
1730 | |||
1731 | _dev_pm_opp_remove_table(opp_table, dev, remove_all); | ||
1732 | |||
1733 | dev_pm_opp_put_opp_table(opp_table); | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * dev_pm_opp_remove_table() - Free all OPPs associated with the device | ||
1738 | * @dev: device pointer used to lookup OPP table. | ||
1739 | * | ||
1740 | * Free both OPPs created using static entries present in DT and the | ||
1741 | * dynamically added entries. | ||
1742 | */ | ||
1743 | void dev_pm_opp_remove_table(struct device *dev) | ||
1744 | { | ||
1745 | _dev_pm_opp_find_and_remove_table(dev, true); | ||
1746 | } | ||
1747 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); | ||
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c new file mode 100644 index 000000000000..2d87bc1adf38 --- /dev/null +++ b/drivers/opp/cpu.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Generic OPP helper interface for CPU device | ||
3 | * | ||
4 | * Copyright (C) 2009-2014 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpufreq.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/export.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include "opp.h" | ||
24 | |||
25 | #ifdef CONFIG_CPU_FREQ | ||
26 | |||
27 | /** | ||
28 | * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device | ||
29 | * @dev: device for which we do this operation | ||
30 | * @table: Cpufreq table returned back to caller | ||
31 | * | ||
32 | * Generate a cpufreq table for a provided device- this assumes that the | ||
33 | * opp table is already initialized and ready for usage. | ||
34 | * | ||
35 | * This function allocates required memory for the cpufreq table. It is | ||
36 | * expected that the caller does the required maintenance such as freeing | ||
37 | * the table as required. | ||
38 | * | ||
39 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | ||
40 | * if no memory available for the operation (table is not populated), returns 0 | ||
41 | * if successful and table is populated. | ||
42 | * | ||
43 | * WARNING: It is important for the callers to ensure refreshing their copy of | ||
44 | * the table if any of the mentioned functions have been invoked in the interim. | ||
45 | */ | ||
46 | int dev_pm_opp_init_cpufreq_table(struct device *dev, | ||
47 | struct cpufreq_frequency_table **table) | ||
48 | { | ||
49 | struct dev_pm_opp *opp; | ||
50 | struct cpufreq_frequency_table *freq_table = NULL; | ||
51 | int i, max_opps, ret = 0; | ||
52 | unsigned long rate; | ||
53 | |||
54 | max_opps = dev_pm_opp_get_opp_count(dev); | ||
55 | if (max_opps <= 0) | ||
56 | return max_opps ? max_opps : -ENODATA; | ||
57 | |||
58 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); | ||
59 | if (!freq_table) | ||
60 | return -ENOMEM; | ||
61 | |||
62 | for (i = 0, rate = 0; i < max_opps; i++, rate++) { | ||
63 | /* find next rate */ | ||
64 | opp = dev_pm_opp_find_freq_ceil(dev, &rate); | ||
65 | if (IS_ERR(opp)) { | ||
66 | ret = PTR_ERR(opp); | ||
67 | goto out; | ||
68 | } | ||
69 | freq_table[i].driver_data = i; | ||
70 | freq_table[i].frequency = rate / 1000; | ||
71 | |||
72 | /* Is Boost/turbo opp ? */ | ||
73 | if (dev_pm_opp_is_turbo(opp)) | ||
74 | freq_table[i].flags = CPUFREQ_BOOST_FREQ; | ||
75 | |||
76 | dev_pm_opp_put(opp); | ||
77 | } | ||
78 | |||
79 | freq_table[i].driver_data = i; | ||
80 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
81 | |||
82 | *table = &freq_table[0]; | ||
83 | |||
84 | out: | ||
85 | if (ret) | ||
86 | kfree(freq_table); | ||
87 | |||
88 | return ret; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); | ||
91 | |||
92 | /** | ||
93 | * dev_pm_opp_free_cpufreq_table() - free the cpufreq table | ||
94 | * @dev: device for which we do this operation | ||
95 | * @table: table to free | ||
96 | * | ||
97 | * Free up the table allocated by dev_pm_opp_init_cpufreq_table | ||
98 | */ | ||
99 | void dev_pm_opp_free_cpufreq_table(struct device *dev, | ||
100 | struct cpufreq_frequency_table **table) | ||
101 | { | ||
102 | if (!table) | ||
103 | return; | ||
104 | |||
105 | kfree(*table); | ||
106 | *table = NULL; | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); | ||
109 | #endif /* CONFIG_CPU_FREQ */ | ||
110 | |||
111 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) | ||
112 | { | ||
113 | struct device *cpu_dev; | ||
114 | int cpu; | ||
115 | |||
116 | WARN_ON(cpumask_empty(cpumask)); | ||
117 | |||
118 | for_each_cpu(cpu, cpumask) { | ||
119 | cpu_dev = get_cpu_device(cpu); | ||
120 | if (!cpu_dev) { | ||
121 | pr_err("%s: failed to get cpu%d device\n", __func__, | ||
122 | cpu); | ||
123 | continue; | ||
124 | } | ||
125 | |||
126 | if (of) | ||
127 | dev_pm_opp_of_remove_table(cpu_dev); | ||
128 | else | ||
129 | dev_pm_opp_remove_table(cpu_dev); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask | ||
135 | * @cpumask: cpumask for which OPP table needs to be removed | ||
136 | * | ||
137 | * This removes the OPP tables for CPUs present in the @cpumask. | ||
138 | * This should be used to remove all the OPPs entries associated with | ||
139 | * the cpus in @cpumask. | ||
140 | */ | ||
141 | void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) | ||
142 | { | ||
143 | _dev_pm_opp_cpumask_remove_table(cpumask, false); | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); | ||
146 | |||
147 | /** | ||
148 | * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs | ||
149 | * @cpu_dev: CPU device for which we do this operation | ||
150 | * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev | ||
151 | * | ||
152 | * This marks OPP table of the @cpu_dev as shared by the CPUs present in | ||
153 | * @cpumask. | ||
154 | * | ||
155 | * Returns -ENODEV if OPP table isn't already present. | ||
156 | */ | ||
157 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | ||
158 | const struct cpumask *cpumask) | ||
159 | { | ||
160 | struct opp_device *opp_dev; | ||
161 | struct opp_table *opp_table; | ||
162 | struct device *dev; | ||
163 | int cpu, ret = 0; | ||
164 | |||
165 | opp_table = _find_opp_table(cpu_dev); | ||
166 | if (IS_ERR(opp_table)) | ||
167 | return PTR_ERR(opp_table); | ||
168 | |||
169 | for_each_cpu(cpu, cpumask) { | ||
170 | if (cpu == cpu_dev->id) | ||
171 | continue; | ||
172 | |||
173 | dev = get_cpu_device(cpu); | ||
174 | if (!dev) { | ||
175 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | ||
176 | __func__, cpu); | ||
177 | continue; | ||
178 | } | ||
179 | |||
180 | opp_dev = _add_opp_dev(dev, opp_table); | ||
181 | if (!opp_dev) { | ||
182 | dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n", | ||
183 | __func__, cpu); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | /* Mark opp-table as multiple CPUs are sharing it now */ | ||
188 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; | ||
189 | } | ||
190 | |||
191 | dev_pm_opp_put_opp_table(opp_table); | ||
192 | |||
193 | return ret; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); | ||
196 | |||
197 | /** | ||
198 | * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev | ||
199 | * @cpu_dev: CPU device for which we do this operation | ||
200 | * @cpumask: cpumask to update with information of sharing CPUs | ||
201 | * | ||
202 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | ||
203 | * | ||
204 | * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP | ||
205 | * table's status is access-unknown. | ||
206 | */ | ||
207 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | ||
208 | { | ||
209 | struct opp_device *opp_dev; | ||
210 | struct opp_table *opp_table; | ||
211 | int ret = 0; | ||
212 | |||
213 | opp_table = _find_opp_table(cpu_dev); | ||
214 | if (IS_ERR(opp_table)) | ||
215 | return PTR_ERR(opp_table); | ||
216 | |||
217 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { | ||
218 | ret = -EINVAL; | ||
219 | goto put_opp_table; | ||
220 | } | ||
221 | |||
222 | cpumask_clear(cpumask); | ||
223 | |||
224 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { | ||
225 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) | ||
226 | cpumask_set_cpu(opp_dev->dev->id, cpumask); | ||
227 | } else { | ||
228 | cpumask_set_cpu(cpu_dev->id, cpumask); | ||
229 | } | ||
230 | |||
231 | put_opp_table: | ||
232 | dev_pm_opp_put_opp_table(opp_table); | ||
233 | |||
234 | return ret; | ||
235 | } | ||
236 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); | ||
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c new file mode 100644 index 000000000000..81cf120fcf43 --- /dev/null +++ b/drivers/opp/debugfs.c | |||
@@ -0,0 +1,249 @@ | |||
1 | /* | ||
2 | * Generic OPP debugfs interface | ||
3 | * | ||
4 | * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/limits.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include "opp.h" | ||
21 | |||
22 | static struct dentry *rootdir; | ||
23 | |||
24 | static void opp_set_dev_name(const struct device *dev, char *name) | ||
25 | { | ||
26 | if (dev->parent) | ||
27 | snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent), | ||
28 | dev_name(dev)); | ||
29 | else | ||
30 | snprintf(name, NAME_MAX, "%s", dev_name(dev)); | ||
31 | } | ||
32 | |||
33 | void opp_debug_remove_one(struct dev_pm_opp *opp) | ||
34 | { | ||
35 | debugfs_remove_recursive(opp->dentry); | ||
36 | } | ||
37 | |||
38 | static bool opp_debug_create_supplies(struct dev_pm_opp *opp, | ||
39 | struct opp_table *opp_table, | ||
40 | struct dentry *pdentry) | ||
41 | { | ||
42 | struct dentry *d; | ||
43 | int i; | ||
44 | char *name; | ||
45 | |||
46 | for (i = 0; i < opp_table->regulator_count; i++) { | ||
47 | name = kasprintf(GFP_KERNEL, "supply-%d", i); | ||
48 | |||
49 | /* Create per-opp directory */ | ||
50 | d = debugfs_create_dir(name, pdentry); | ||
51 | |||
52 | kfree(name); | ||
53 | |||
54 | if (!d) | ||
55 | return false; | ||
56 | |||
57 | if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, | ||
58 | &opp->supplies[i].u_volt)) | ||
59 | return false; | ||
60 | |||
61 | if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, | ||
62 | &opp->supplies[i].u_volt_min)) | ||
63 | return false; | ||
64 | |||
65 | if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, | ||
66 | &opp->supplies[i].u_volt_max)) | ||
67 | return false; | ||
68 | |||
69 | if (!debugfs_create_ulong("u_amp", S_IRUGO, d, | ||
70 | &opp->supplies[i].u_amp)) | ||
71 | return false; | ||
72 | } | ||
73 | |||
74 | return true; | ||
75 | } | ||
76 | |||
77 | int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) | ||
78 | { | ||
79 | struct dentry *pdentry = opp_table->dentry; | ||
80 | struct dentry *d; | ||
81 | char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */ | ||
82 | |||
83 | /* Rate is unique to each OPP, use it to give opp-name */ | ||
84 | snprintf(name, sizeof(name), "opp:%lu", opp->rate); | ||
85 | |||
86 | /* Create per-opp directory */ | ||
87 | d = debugfs_create_dir(name, pdentry); | ||
88 | if (!d) | ||
89 | return -ENOMEM; | ||
90 | |||
91 | if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) | ||
92 | return -ENOMEM; | ||
93 | |||
94 | if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) | ||
95 | return -ENOMEM; | ||
96 | |||
97 | if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) | ||
98 | return -ENOMEM; | ||
99 | |||
100 | if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | if (!opp_debug_create_supplies(opp, opp_table, d)) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, | ||
110 | &opp->clock_latency_ns)) | ||
111 | return -ENOMEM; | ||
112 | |||
113 | opp->dentry = d; | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int opp_list_debug_create_dir(struct opp_device *opp_dev, | ||
118 | struct opp_table *opp_table) | ||
119 | { | ||
120 | const struct device *dev = opp_dev->dev; | ||
121 | struct dentry *d; | ||
122 | |||
123 | opp_set_dev_name(dev, opp_table->dentry_name); | ||
124 | |||
125 | /* Create device specific directory */ | ||
126 | d = debugfs_create_dir(opp_table->dentry_name, rootdir); | ||
127 | if (!d) { | ||
128 | dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); | ||
129 | return -ENOMEM; | ||
130 | } | ||
131 | |||
132 | opp_dev->dentry = d; | ||
133 | opp_table->dentry = d; | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int opp_list_debug_create_link(struct opp_device *opp_dev, | ||
139 | struct opp_table *opp_table) | ||
140 | { | ||
141 | const struct device *dev = opp_dev->dev; | ||
142 | char name[NAME_MAX]; | ||
143 | struct dentry *d; | ||
144 | |||
145 | opp_set_dev_name(opp_dev->dev, name); | ||
146 | |||
147 | /* Create device specific directory link */ | ||
148 | d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name); | ||
149 | if (!d) { | ||
150 | dev_err(dev, "%s: Failed to create link\n", __func__); | ||
151 | return -ENOMEM; | ||
152 | } | ||
153 | |||
154 | opp_dev->dentry = d; | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * opp_debug_register - add a device opp node to the debugfs 'opp' directory | ||
161 | * @opp_dev: opp-dev pointer for device | ||
162 | * @opp_table: the device-opp being added | ||
163 | * | ||
164 | * Dynamically adds device specific directory in debugfs 'opp' directory. If the | ||
165 | * device-opp is shared with other devices, then links will be created for all | ||
166 | * devices except the first. | ||
167 | * | ||
168 | * Return: 0 on success, otherwise negative error. | ||
169 | */ | ||
170 | int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) | ||
171 | { | ||
172 | if (!rootdir) { | ||
173 | pr_debug("%s: Uninitialized rootdir\n", __func__); | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | |||
177 | if (opp_table->dentry) | ||
178 | return opp_list_debug_create_link(opp_dev, opp_table); | ||
179 | |||
180 | return opp_list_debug_create_dir(opp_dev, opp_table); | ||
181 | } | ||
182 | |||
183 | static void opp_migrate_dentry(struct opp_device *opp_dev, | ||
184 | struct opp_table *opp_table) | ||
185 | { | ||
186 | struct opp_device *new_dev; | ||
187 | const struct device *dev; | ||
188 | struct dentry *dentry; | ||
189 | |||
190 | /* Look for next opp-dev */ | ||
191 | list_for_each_entry(new_dev, &opp_table->dev_list, node) | ||
192 | if (new_dev != opp_dev) | ||
193 | break; | ||
194 | |||
195 | /* new_dev is guaranteed to be valid here */ | ||
196 | dev = new_dev->dev; | ||
197 | debugfs_remove_recursive(new_dev->dentry); | ||
198 | |||
199 | opp_set_dev_name(dev, opp_table->dentry_name); | ||
200 | |||
201 | dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, | ||
202 | opp_table->dentry_name); | ||
203 | if (!dentry) { | ||
204 | dev_err(dev, "%s: Failed to rename link from: %s to %s\n", | ||
205 | __func__, dev_name(opp_dev->dev), dev_name(dev)); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | new_dev->dentry = dentry; | ||
210 | opp_table->dentry = dentry; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * opp_debug_unregister - remove a device opp node from debugfs opp directory | ||
215 | * @opp_dev: opp-dev pointer for device | ||
216 | * @opp_table: the device-opp being removed | ||
217 | * | ||
218 | * Dynamically removes device specific directory from debugfs 'opp' directory. | ||
219 | */ | ||
220 | void opp_debug_unregister(struct opp_device *opp_dev, | ||
221 | struct opp_table *opp_table) | ||
222 | { | ||
223 | if (opp_dev->dentry == opp_table->dentry) { | ||
224 | /* Move the real dentry object under another device */ | ||
225 | if (!list_is_singular(&opp_table->dev_list)) { | ||
226 | opp_migrate_dentry(opp_dev, opp_table); | ||
227 | goto out; | ||
228 | } | ||
229 | opp_table->dentry = NULL; | ||
230 | } | ||
231 | |||
232 | debugfs_remove_recursive(opp_dev->dentry); | ||
233 | |||
234 | out: | ||
235 | opp_dev->dentry = NULL; | ||
236 | } | ||
237 | |||
238 | static int __init opp_debug_init(void) | ||
239 | { | ||
240 | /* Create /sys/kernel/debug/opp directory */ | ||
241 | rootdir = debugfs_create_dir("opp", NULL); | ||
242 | if (!rootdir) { | ||
243 | pr_err("%s: Failed to create root directory\n", __func__); | ||
244 | return -ENOMEM; | ||
245 | } | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | core_initcall(opp_debug_init); | ||
diff --git a/drivers/opp/of.c b/drivers/opp/of.c new file mode 100644 index 000000000000..0b718886479b --- /dev/null +++ b/drivers/opp/of.c | |||
@@ -0,0 +1,633 @@ | |||
1 | /* | ||
2 | * Generic OPP OF helpers | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/export.h> | ||
22 | |||
23 | #include "opp.h" | ||
24 | |||
25 | static struct opp_table *_managed_opp(const struct device_node *np) | ||
26 | { | ||
27 | struct opp_table *opp_table, *managed_table = NULL; | ||
28 | |||
29 | mutex_lock(&opp_table_lock); | ||
30 | |||
31 | list_for_each_entry(opp_table, &opp_tables, node) { | ||
32 | if (opp_table->np == np) { | ||
33 | /* | ||
34 | * Multiple devices can point to the same OPP table and | ||
35 | * so will have same node-pointer, np. | ||
36 | * | ||
37 | * But the OPPs will be considered as shared only if the | ||
38 | * OPP table contains a "opp-shared" property. | ||
39 | */ | ||
40 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { | ||
41 | _get_opp_table_kref(opp_table); | ||
42 | managed_table = opp_table; | ||
43 | } | ||
44 | |||
45 | break; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | mutex_unlock(&opp_table_lock); | ||
50 | |||
51 | return managed_table; | ||
52 | } | ||
53 | |||
54 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) | ||
55 | { | ||
56 | struct device_node *np; | ||
57 | |||
58 | /* | ||
59 | * Only required for backward compatibility with v1 bindings, but isn't | ||
60 | * harmful for other cases. And so we do it unconditionally. | ||
61 | */ | ||
62 | np = of_node_get(dev->of_node); | ||
63 | if (np) { | ||
64 | u32 val; | ||
65 | |||
66 | if (!of_property_read_u32(np, "clock-latency", &val)) | ||
67 | opp_table->clock_latency_ns_max = val; | ||
68 | of_property_read_u32(np, "voltage-tolerance", | ||
69 | &opp_table->voltage_tolerance_v1); | ||
70 | of_node_put(np); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, | ||
75 | struct device_node *np) | ||
76 | { | ||
77 | unsigned int count = opp_table->supported_hw_count; | ||
78 | u32 version; | ||
79 | int ret; | ||
80 | |||
81 | if (!opp_table->supported_hw) { | ||
82 | /* | ||
83 | * In the case that no supported_hw has been set by the | ||
84 | * platform but there is an opp-supported-hw value set for | ||
85 | * an OPP then the OPP should not be enabled as there is | ||
86 | * no way to see if the hardware supports it. | ||
87 | */ | ||
88 | if (of_find_property(np, "opp-supported-hw", NULL)) | ||
89 | return false; | ||
90 | else | ||
91 | return true; | ||
92 | } | ||
93 | |||
94 | while (count--) { | ||
95 | ret = of_property_read_u32_index(np, "opp-supported-hw", count, | ||
96 | &version); | ||
97 | if (ret) { | ||
98 | dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", | ||
99 | __func__, count, ret); | ||
100 | return false; | ||
101 | } | ||
102 | |||
103 | /* Both of these are bitwise masks of the versions */ | ||
104 | if (!(version & opp_table->supported_hw[count])) | ||
105 | return false; | ||
106 | } | ||
107 | |||
108 | return true; | ||
109 | } | ||
110 | |||
111 | static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, | ||
112 | struct opp_table *opp_table) | ||
113 | { | ||
114 | u32 *microvolt, *microamp = NULL; | ||
115 | int supplies, vcount, icount, ret, i, j; | ||
116 | struct property *prop = NULL; | ||
117 | char name[NAME_MAX]; | ||
118 | |||
119 | supplies = opp_table->regulator_count ? opp_table->regulator_count : 1; | ||
120 | |||
121 | /* Search for "opp-microvolt-<name>" */ | ||
122 | if (opp_table->prop_name) { | ||
123 | snprintf(name, sizeof(name), "opp-microvolt-%s", | ||
124 | opp_table->prop_name); | ||
125 | prop = of_find_property(opp->np, name, NULL); | ||
126 | } | ||
127 | |||
128 | if (!prop) { | ||
129 | /* Search for "opp-microvolt" */ | ||
130 | sprintf(name, "opp-microvolt"); | ||
131 | prop = of_find_property(opp->np, name, NULL); | ||
132 | |||
133 | /* Missing property isn't a problem, but an invalid entry is */ | ||
134 | if (!prop) { | ||
135 | if (!opp_table->regulator_count) | ||
136 | return 0; | ||
137 | |||
138 | dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", | ||
139 | __func__); | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | vcount = of_property_count_u32_elems(opp->np, name); | ||
145 | if (vcount < 0) { | ||
146 | dev_err(dev, "%s: Invalid %s property (%d)\n", | ||
147 | __func__, name, vcount); | ||
148 | return vcount; | ||
149 | } | ||
150 | |||
151 | /* There can be one or three elements per supply */ | ||
152 | if (vcount != supplies && vcount != supplies * 3) { | ||
153 | dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", | ||
154 | __func__, name, vcount, supplies); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL); | ||
159 | if (!microvolt) | ||
160 | return -ENOMEM; | ||
161 | |||
162 | ret = of_property_read_u32_array(opp->np, name, microvolt, vcount); | ||
163 | if (ret) { | ||
164 | dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); | ||
165 | ret = -EINVAL; | ||
166 | goto free_microvolt; | ||
167 | } | ||
168 | |||
169 | /* Search for "opp-microamp-<name>" */ | ||
170 | prop = NULL; | ||
171 | if (opp_table->prop_name) { | ||
172 | snprintf(name, sizeof(name), "opp-microamp-%s", | ||
173 | opp_table->prop_name); | ||
174 | prop = of_find_property(opp->np, name, NULL); | ||
175 | } | ||
176 | |||
177 | if (!prop) { | ||
178 | /* Search for "opp-microamp" */ | ||
179 | sprintf(name, "opp-microamp"); | ||
180 | prop = of_find_property(opp->np, name, NULL); | ||
181 | } | ||
182 | |||
183 | if (prop) { | ||
184 | icount = of_property_count_u32_elems(opp->np, name); | ||
185 | if (icount < 0) { | ||
186 | dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, | ||
187 | name, icount); | ||
188 | ret = icount; | ||
189 | goto free_microvolt; | ||
190 | } | ||
191 | |||
192 | if (icount != supplies) { | ||
193 | dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", | ||
194 | __func__, name, icount, supplies); | ||
195 | ret = -EINVAL; | ||
196 | goto free_microvolt; | ||
197 | } | ||
198 | |||
199 | microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL); | ||
200 | if (!microamp) { | ||
201 | ret = -EINVAL; | ||
202 | goto free_microvolt; | ||
203 | } | ||
204 | |||
205 | ret = of_property_read_u32_array(opp->np, name, microamp, | ||
206 | icount); | ||
207 | if (ret) { | ||
208 | dev_err(dev, "%s: error parsing %s: %d\n", __func__, | ||
209 | name, ret); | ||
210 | ret = -EINVAL; | ||
211 | goto free_microamp; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | for (i = 0, j = 0; i < supplies; i++) { | ||
216 | opp->supplies[i].u_volt = microvolt[j++]; | ||
217 | |||
218 | if (vcount == supplies) { | ||
219 | opp->supplies[i].u_volt_min = opp->supplies[i].u_volt; | ||
220 | opp->supplies[i].u_volt_max = opp->supplies[i].u_volt; | ||
221 | } else { | ||
222 | opp->supplies[i].u_volt_min = microvolt[j++]; | ||
223 | opp->supplies[i].u_volt_max = microvolt[j++]; | ||
224 | } | ||
225 | |||
226 | if (microamp) | ||
227 | opp->supplies[i].u_amp = microamp[i]; | ||
228 | } | ||
229 | |||
230 | free_microamp: | ||
231 | kfree(microamp); | ||
232 | free_microvolt: | ||
233 | kfree(microvolt); | ||
234 | |||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT | ||
240 | * entries | ||
241 | * @dev: device pointer used to lookup OPP table. | ||
242 | * | ||
243 | * Free OPPs created using static entries present in DT. | ||
244 | */ | ||
245 | void dev_pm_opp_of_remove_table(struct device *dev) | ||
246 | { | ||
247 | _dev_pm_opp_find_and_remove_table(dev, false); | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); | ||
250 | |||
251 | /* Returns opp descriptor node for a device node, caller must | ||
252 | * do of_node_put() */ | ||
253 | static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) | ||
254 | { | ||
255 | /* | ||
256 | * There should be only ONE phandle present in "operating-points-v2" | ||
257 | * property. | ||
258 | */ | ||
259 | |||
260 | return of_parse_phandle(np, "operating-points-v2", 0); | ||
261 | } | ||
262 | |||
263 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | ||
264 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) | ||
265 | { | ||
266 | return _opp_of_get_opp_desc_node(dev->of_node); | ||
267 | } | ||
268 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); | ||
269 | |||
270 | /** | ||
271 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) | ||
272 | * @opp_table: OPP table | ||
273 | * @dev: device for which we do this operation | ||
274 | * @np: device node | ||
275 | * | ||
276 | * This function adds an opp definition to the opp table and returns status. The | ||
277 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be | ||
278 | * removed by dev_pm_opp_remove. | ||
279 | * | ||
280 | * Return: | ||
281 | * 0 On success OR | ||
282 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
283 | * -EEXIST Freq are same and volt are different OR | ||
284 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
285 | * -ENOMEM Memory allocation failure | ||
286 | * -EINVAL Failed parsing the OPP node | ||
287 | */ | ||
288 | static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev, | ||
289 | struct device_node *np) | ||
290 | { | ||
291 | struct dev_pm_opp *new_opp; | ||
292 | u64 rate; | ||
293 | u32 val; | ||
294 | int ret; | ||
295 | |||
296 | new_opp = _opp_allocate(opp_table); | ||
297 | if (!new_opp) | ||
298 | return -ENOMEM; | ||
299 | |||
300 | ret = of_property_read_u64(np, "opp-hz", &rate); | ||
301 | if (ret < 0) { | ||
302 | dev_err(dev, "%s: opp-hz not found\n", __func__); | ||
303 | goto free_opp; | ||
304 | } | ||
305 | |||
306 | /* Check if the OPP supports hardware's hierarchy of versions or not */ | ||
307 | if (!_opp_is_supported(dev, opp_table, np)) { | ||
308 | dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); | ||
309 | goto free_opp; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Rate is defined as an unsigned long in clk API, and so casting | ||
314 | * explicitly to its type. Must be fixed once rate is 64 bit | ||
315 | * guaranteed in clk API. | ||
316 | */ | ||
317 | new_opp->rate = (unsigned long)rate; | ||
318 | new_opp->turbo = of_property_read_bool(np, "turbo-mode"); | ||
319 | |||
320 | new_opp->np = np; | ||
321 | new_opp->dynamic = false; | ||
322 | new_opp->available = true; | ||
323 | |||
324 | if (!of_property_read_u32(np, "clock-latency-ns", &val)) | ||
325 | new_opp->clock_latency_ns = val; | ||
326 | |||
327 | ret = opp_parse_supplies(new_opp, dev, opp_table); | ||
328 | if (ret) | ||
329 | goto free_opp; | ||
330 | |||
331 | ret = _opp_add(dev, new_opp, opp_table); | ||
332 | if (ret) { | ||
333 | /* Don't return error for duplicate OPPs */ | ||
334 | if (ret == -EBUSY) | ||
335 | ret = 0; | ||
336 | goto free_opp; | ||
337 | } | ||
338 | |||
339 | /* OPP to select on device suspend */ | ||
340 | if (of_property_read_bool(np, "opp-suspend")) { | ||
341 | if (opp_table->suspend_opp) { | ||
342 | dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", | ||
343 | __func__, opp_table->suspend_opp->rate, | ||
344 | new_opp->rate); | ||
345 | } else { | ||
346 | new_opp->suspend = true; | ||
347 | opp_table->suspend_opp = new_opp; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) | ||
352 | opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; | ||
353 | |||
354 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", | ||
355 | __func__, new_opp->turbo, new_opp->rate, | ||
356 | new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, | ||
357 | new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns); | ||
358 | |||
359 | /* | ||
360 | * Notify the changes in the availability of the operable | ||
361 | * frequency/voltage list. | ||
362 | */ | ||
363 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); | ||
364 | return 0; | ||
365 | |||
366 | free_opp: | ||
367 | _opp_free(new_opp); | ||
368 | |||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | /* Initializes OPP tables based on new bindings */ | ||
373 | static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | ||
374 | { | ||
375 | struct device_node *np; | ||
376 | struct opp_table *opp_table; | ||
377 | int ret = 0, count = 0; | ||
378 | |||
379 | opp_table = _managed_opp(opp_np); | ||
380 | if (opp_table) { | ||
381 | /* OPPs are already managed */ | ||
382 | if (!_add_opp_dev(dev, opp_table)) | ||
383 | ret = -ENOMEM; | ||
384 | goto put_opp_table; | ||
385 | } | ||
386 | |||
387 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
388 | if (!opp_table) | ||
389 | return -ENOMEM; | ||
390 | |||
391 | /* We have opp-table node now, iterate over it and add OPPs */ | ||
392 | for_each_available_child_of_node(opp_np, np) { | ||
393 | count++; | ||
394 | |||
395 | ret = _opp_add_static_v2(opp_table, dev, np); | ||
396 | if (ret) { | ||
397 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, | ||
398 | ret); | ||
399 | _dev_pm_opp_remove_table(opp_table, dev, false); | ||
400 | goto put_opp_table; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | /* There should be one of more OPP defined */ | ||
405 | if (WARN_ON(!count)) { | ||
406 | ret = -ENOENT; | ||
407 | goto put_opp_table; | ||
408 | } | ||
409 | |||
410 | opp_table->np = opp_np; | ||
411 | if (of_property_read_bool(opp_np, "opp-shared")) | ||
412 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; | ||
413 | else | ||
414 | opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; | ||
415 | |||
416 | put_opp_table: | ||
417 | dev_pm_opp_put_opp_table(opp_table); | ||
418 | |||
419 | return ret; | ||
420 | } | ||
421 | |||
422 | /* Initializes OPP tables based on old-deprecated bindings */ | ||
423 | static int _of_add_opp_table_v1(struct device *dev) | ||
424 | { | ||
425 | struct opp_table *opp_table; | ||
426 | const struct property *prop; | ||
427 | const __be32 *val; | ||
428 | int nr, ret = 0; | ||
429 | |||
430 | prop = of_find_property(dev->of_node, "operating-points", NULL); | ||
431 | if (!prop) | ||
432 | return -ENODEV; | ||
433 | if (!prop->value) | ||
434 | return -ENODATA; | ||
435 | |||
436 | /* | ||
437 | * Each OPP is a set of tuples consisting of frequency and | ||
438 | * voltage like <freq-kHz vol-uV>. | ||
439 | */ | ||
440 | nr = prop->length / sizeof(u32); | ||
441 | if (nr % 2) { | ||
442 | dev_err(dev, "%s: Invalid OPP table\n", __func__); | ||
443 | return -EINVAL; | ||
444 | } | ||
445 | |||
446 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
447 | if (!opp_table) | ||
448 | return -ENOMEM; | ||
449 | |||
450 | val = prop->value; | ||
451 | while (nr) { | ||
452 | unsigned long freq = be32_to_cpup(val++) * 1000; | ||
453 | unsigned long volt = be32_to_cpup(val++); | ||
454 | |||
455 | ret = _opp_add_v1(opp_table, dev, freq, volt, false); | ||
456 | if (ret) { | ||
457 | dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", | ||
458 | __func__, freq, ret); | ||
459 | _dev_pm_opp_remove_table(opp_table, dev, false); | ||
460 | break; | ||
461 | } | ||
462 | nr -= 2; | ||
463 | } | ||
464 | |||
465 | dev_pm_opp_put_opp_table(opp_table); | ||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * dev_pm_opp_of_add_table() - Initialize opp table from device tree | ||
471 | * @dev: device pointer used to lookup OPP table. | ||
472 | * | ||
473 | * Register the initial OPP table with the OPP library for given device. | ||
474 | * | ||
475 | * Return: | ||
476 | * 0 On success OR | ||
477 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
478 | * -EEXIST Freq are same and volt are different OR | ||
479 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
480 | * -ENOMEM Memory allocation failure | ||
481 | * -ENODEV when 'operating-points' property is not found or is invalid data | ||
482 | * in device node. | ||
483 | * -ENODATA when empty 'operating-points' property is found | ||
484 | * -EINVAL when invalid entries are found in opp-v2 table | ||
485 | */ | ||
486 | int dev_pm_opp_of_add_table(struct device *dev) | ||
487 | { | ||
488 | struct device_node *opp_np; | ||
489 | int ret; | ||
490 | |||
491 | /* | ||
492 | * OPPs have two version of bindings now. The older one is deprecated, | ||
493 | * try for the new binding first. | ||
494 | */ | ||
495 | opp_np = dev_pm_opp_of_get_opp_desc_node(dev); | ||
496 | if (!opp_np) { | ||
497 | /* | ||
498 | * Try old-deprecated bindings for backward compatibility with | ||
499 | * older dtbs. | ||
500 | */ | ||
501 | return _of_add_opp_table_v1(dev); | ||
502 | } | ||
503 | |||
504 | ret = _of_add_opp_table_v2(dev, opp_np); | ||
505 | of_node_put(opp_np); | ||
506 | |||
507 | return ret; | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); | ||
510 | |||
511 | /* CPU device specific helpers */ | ||
512 | |||
513 | /** | ||
514 | * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask | ||
515 | * @cpumask: cpumask for which OPP table needs to be removed | ||
516 | * | ||
517 | * This removes the OPP tables for CPUs present in the @cpumask. | ||
518 | * This should be used only to remove static entries created from DT. | ||
519 | */ | ||
520 | void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) | ||
521 | { | ||
522 | _dev_pm_opp_cpumask_remove_table(cpumask, true); | ||
523 | } | ||
524 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); | ||
525 | |||
526 | /** | ||
527 | * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask | ||
528 | * @cpumask: cpumask for which OPP table needs to be added. | ||
529 | * | ||
530 | * This adds the OPP tables for CPUs present in the @cpumask. | ||
531 | */ | ||
532 | int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) | ||
533 | { | ||
534 | struct device *cpu_dev; | ||
535 | int cpu, ret = 0; | ||
536 | |||
537 | WARN_ON(cpumask_empty(cpumask)); | ||
538 | |||
539 | for_each_cpu(cpu, cpumask) { | ||
540 | cpu_dev = get_cpu_device(cpu); | ||
541 | if (!cpu_dev) { | ||
542 | pr_err("%s: failed to get cpu%d device\n", __func__, | ||
543 | cpu); | ||
544 | continue; | ||
545 | } | ||
546 | |||
547 | ret = dev_pm_opp_of_add_table(cpu_dev); | ||
548 | if (ret) { | ||
549 | /* | ||
550 | * OPP may get registered dynamically, don't print error | ||
551 | * message here. | ||
552 | */ | ||
553 | pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", | ||
554 | __func__, cpu, ret); | ||
555 | |||
556 | /* Free all other OPPs */ | ||
557 | dev_pm_opp_of_cpumask_remove_table(cpumask); | ||
558 | break; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | return ret; | ||
563 | } | ||
564 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); | ||
565 | |||
566 | /* | ||
567 | * Works only for OPP v2 bindings. | ||
568 | * | ||
569 | * Returns -ENOENT if operating-points-v2 bindings aren't supported. | ||
570 | */ | ||
571 | /** | ||
572 | * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with | ||
573 | * @cpu_dev using operating-points-v2 | ||
574 | * bindings. | ||
575 | * | ||
576 | * @cpu_dev: CPU device for which we do this operation | ||
577 | * @cpumask: cpumask to update with information of sharing CPUs | ||
578 | * | ||
579 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | ||
580 | * | ||
581 | * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. | ||
582 | */ | ||
583 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | ||
584 | struct cpumask *cpumask) | ||
585 | { | ||
586 | struct device_node *np, *tmp_np, *cpu_np; | ||
587 | int cpu, ret = 0; | ||
588 | |||
589 | /* Get OPP descriptor node */ | ||
590 | np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); | ||
591 | if (!np) { | ||
592 | dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); | ||
593 | return -ENOENT; | ||
594 | } | ||
595 | |||
596 | cpumask_set_cpu(cpu_dev->id, cpumask); | ||
597 | |||
598 | /* OPPs are shared ? */ | ||
599 | if (!of_property_read_bool(np, "opp-shared")) | ||
600 | goto put_cpu_node; | ||
601 | |||
602 | for_each_possible_cpu(cpu) { | ||
603 | if (cpu == cpu_dev->id) | ||
604 | continue; | ||
605 | |||
606 | cpu_np = of_get_cpu_node(cpu, NULL); | ||
607 | if (!cpu_np) { | ||
608 | dev_err(cpu_dev, "%s: failed to get cpu%d node\n", | ||
609 | __func__, cpu); | ||
610 | ret = -ENOENT; | ||
611 | goto put_cpu_node; | ||
612 | } | ||
613 | |||
614 | /* Get OPP descriptor node */ | ||
615 | tmp_np = _opp_of_get_opp_desc_node(cpu_np); | ||
616 | if (!tmp_np) { | ||
617 | pr_err("%pOF: Couldn't find opp node\n", cpu_np); | ||
618 | ret = -ENOENT; | ||
619 | goto put_cpu_node; | ||
620 | } | ||
621 | |||
622 | /* CPUs are sharing opp node */ | ||
623 | if (np == tmp_np) | ||
624 | cpumask_set_cpu(cpu, cpumask); | ||
625 | |||
626 | of_node_put(tmp_np); | ||
627 | } | ||
628 | |||
629 | put_cpu_node: | ||
630 | of_node_put(np); | ||
631 | return ret; | ||
632 | } | ||
633 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); | ||
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h new file mode 100644 index 000000000000..166eef990599 --- /dev/null +++ b/drivers/opp/opp.h | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #ifndef __DRIVER_OPP_H__ | ||
15 | #define __DRIVER_OPP_H__ | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/kref.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/limits.h> | ||
22 | #include <linux/pm_opp.h> | ||
23 | #include <linux/notifier.h> | ||
24 | |||
25 | struct clk; | ||
26 | struct regulator; | ||
27 | |||
28 | /* Lock to allow exclusive modification to the device and opp lists */ | ||
29 | extern struct mutex opp_table_lock; | ||
30 | |||
31 | extern struct list_head opp_tables; | ||
32 | |||
33 | /* | ||
34 | * Internal data structure organization with the OPP layer library is as | ||
35 | * follows: | ||
36 | * opp_tables (root) | ||
37 | * |- device 1 (represents voltage domain 1) | ||
38 | * | |- opp 1 (availability, freq, voltage) | ||
39 | * | |- opp 2 .. | ||
40 | * ... ... | ||
41 | * | `- opp n .. | ||
42 | * |- device 2 (represents the next voltage domain) | ||
43 | * ... | ||
44 | * `- device m (represents mth voltage domain) | ||
45 | * device 1, 2.. are represented by opp_table structure while each opp | ||
46 | * is represented by the opp structure. | ||
47 | */ | ||
48 | |||
49 | /** | ||
50 | * struct dev_pm_opp - Generic OPP description structure | ||
51 | * @node: opp table node. The nodes are maintained throughout the lifetime | ||
52 | * of boot. It is expected only an optimal set of OPPs are | ||
53 | * added to the library by the SoC framework. | ||
54 | * IMPORTANT: the opp nodes should be maintained in increasing | ||
55 | * order. | ||
56 | * @kref: for reference count of the OPP. | ||
57 | * @available: true/false - marks if this OPP as available or not | ||
58 | * @dynamic: not-created from static DT entries. | ||
59 | * @turbo: true if turbo (boost) OPP | ||
60 | * @suspend: true if suspend OPP | ||
61 | * @rate: Frequency in hertz | ||
62 | * @supplies: Power supplies voltage/current values | ||
63 | * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's | ||
64 | * frequency from any other OPP's frequency. | ||
65 | * @opp_table: points back to the opp_table struct this opp belongs to | ||
66 | * @np: OPP's device node. | ||
67 | * @dentry: debugfs dentry pointer (per opp) | ||
68 | * | ||
69 | * This structure stores the OPP information for a given device. | ||
70 | */ | ||
71 | struct dev_pm_opp { | ||
72 | struct list_head node; | ||
73 | struct kref kref; | ||
74 | |||
75 | bool available; | ||
76 | bool dynamic; | ||
77 | bool turbo; | ||
78 | bool suspend; | ||
79 | unsigned long rate; | ||
80 | |||
81 | struct dev_pm_opp_supply *supplies; | ||
82 | |||
83 | unsigned long clock_latency_ns; | ||
84 | |||
85 | struct opp_table *opp_table; | ||
86 | |||
87 | struct device_node *np; | ||
88 | |||
89 | #ifdef CONFIG_DEBUG_FS | ||
90 | struct dentry *dentry; | ||
91 | #endif | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * struct opp_device - devices managed by 'struct opp_table' | ||
96 | * @node: list node | ||
97 | * @dev: device to which the struct object belongs | ||
98 | * @dentry: debugfs dentry pointer (per device) | ||
99 | * | ||
100 | * This is an internal data structure maintaining the devices that are managed | ||
101 | * by 'struct opp_table'. | ||
102 | */ | ||
103 | struct opp_device { | ||
104 | struct list_head node; | ||
105 | const struct device *dev; | ||
106 | |||
107 | #ifdef CONFIG_DEBUG_FS | ||
108 | struct dentry *dentry; | ||
109 | #endif | ||
110 | }; | ||
111 | |||
112 | enum opp_table_access { | ||
113 | OPP_TABLE_ACCESS_UNKNOWN = 0, | ||
114 | OPP_TABLE_ACCESS_EXCLUSIVE = 1, | ||
115 | OPP_TABLE_ACCESS_SHARED = 2, | ||
116 | }; | ||
117 | |||
118 | /** | ||
119 | * struct opp_table - Device opp structure | ||
120 | * @node: table node - contains the devices with OPPs that | ||
121 | * have been registered. Nodes once added are not modified in this | ||
122 | * table. | ||
123 | * @head: notifier head to notify the OPP availability changes. | ||
124 | * @dev_list: list of devices that share these OPPs | ||
125 | * @opp_list: table of opps | ||
126 | * @kref: for reference count of the table. | ||
127 | * @lock: mutex protecting the opp_list. | ||
128 | * @np: struct device_node pointer for opp's DT node. | ||
129 | * @clock_latency_ns_max: Max clock latency in nanoseconds. | ||
130 | * @shared_opp: OPP is shared between multiple devices. | ||
131 | * @suspend_opp: Pointer to OPP to be used during device suspend. | ||
132 | * @supported_hw: Array of version number to support. | ||
133 | * @supported_hw_count: Number of elements in supported_hw array. | ||
134 | * @prop_name: A name to postfix to many DT properties, while parsing them. | ||
135 | * @clk: Device's clock handle | ||
136 | * @regulators: Supply regulators | ||
137 | * @regulator_count: Number of power supply regulators | ||
138 | * @set_opp: Platform specific set_opp callback | ||
139 | * @set_opp_data: Data to be passed to set_opp callback | ||
140 | * @dentry: debugfs dentry pointer of the real device directory (not links). | ||
141 | * @dentry_name: Name of the real dentry. | ||
142 | * | ||
143 | * @voltage_tolerance_v1: In percentage, for v1 bindings only. | ||
144 | * | ||
145 | * This is an internal data structure maintaining the link to opps attached to | ||
146 | * a device. This structure is not meant to be shared to users as it is | ||
147 | * meant for book keeping and private to OPP library. | ||
148 | */ | ||
149 | struct opp_table { | ||
150 | struct list_head node; | ||
151 | |||
152 | struct blocking_notifier_head head; | ||
153 | struct list_head dev_list; | ||
154 | struct list_head opp_list; | ||
155 | struct kref kref; | ||
156 | struct mutex lock; | ||
157 | |||
158 | struct device_node *np; | ||
159 | unsigned long clock_latency_ns_max; | ||
160 | |||
161 | /* For backward compatibility with v1 bindings */ | ||
162 | unsigned int voltage_tolerance_v1; | ||
163 | |||
164 | enum opp_table_access shared_opp; | ||
165 | struct dev_pm_opp *suspend_opp; | ||
166 | |||
167 | unsigned int *supported_hw; | ||
168 | unsigned int supported_hw_count; | ||
169 | const char *prop_name; | ||
170 | struct clk *clk; | ||
171 | struct regulator **regulators; | ||
172 | unsigned int regulator_count; | ||
173 | |||
174 | int (*set_opp)(struct dev_pm_set_opp_data *data); | ||
175 | struct dev_pm_set_opp_data *set_opp_data; | ||
176 | |||
177 | #ifdef CONFIG_DEBUG_FS | ||
178 | struct dentry *dentry; | ||
179 | char dentry_name[NAME_MAX]; | ||
180 | #endif | ||
181 | }; | ||
182 | |||
183 | /* Routines internal to opp core */ | ||
184 | void _get_opp_table_kref(struct opp_table *opp_table); | ||
185 | struct opp_table *_find_opp_table(struct device *dev); | ||
186 | struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); | ||
187 | void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all); | ||
188 | void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all); | ||
189 | struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); | ||
190 | void _opp_free(struct dev_pm_opp *opp); | ||
191 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); | ||
192 | int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); | ||
193 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); | ||
194 | struct opp_table *_add_opp_table(struct device *dev); | ||
195 | |||
196 | #ifdef CONFIG_OF | ||
197 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev); | ||
198 | #else | ||
199 | static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {} | ||
200 | #endif | ||
201 | |||
202 | #ifdef CONFIG_DEBUG_FS | ||
203 | void opp_debug_remove_one(struct dev_pm_opp *opp); | ||
204 | int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); | ||
205 | int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); | ||
206 | void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); | ||
207 | #else | ||
208 | static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} | ||
209 | |||
210 | static inline int opp_debug_create_one(struct dev_pm_opp *opp, | ||
211 | struct opp_table *opp_table) | ||
212 | { return 0; } | ||
213 | static inline int opp_debug_register(struct opp_device *opp_dev, | ||
214 | struct opp_table *opp_table) | ||
215 | { return 0; } | ||
216 | |||
217 | static inline void opp_debug_unregister(struct opp_device *opp_dev, | ||
218 | struct opp_table *opp_table) | ||
219 | { } | ||
220 | #endif /* DEBUG_FS */ | ||
221 | |||
222 | #endif /* __DRIVER_OPP_H__ */ | ||