diff options
author | Nishanth Menon <nm@ti.com> | 2010-10-12 18:13:10 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2010-10-16 19:57:50 -0400 |
commit | e1f60b292ffd61151403327aa19ff7a1871820bd (patch) | |
tree | 63a01a1ab04e54b1ed859728b594e40123c80fc3 /drivers/base | |
parent | d33ac60beaf2c7dee5cd90aba7c1eb385dd70937 (diff) |
PM: Introduce library for device-specific OPPs (v7)
SoCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. These
are called Operating Performance Points or OPPs. The actual
definitions of OPP varies over silicon versions. For a specific domain,
we can have a set of {frequency, voltage} pairs. As the kernel boots
and more information is available, a default set of these are activated
based on the precise nature of device. Further on operation, based on
conditions prevailing in the system (such as temperature), some OPP
availability may be temporarily controlled by the SoC frameworks.
To implement an OPP, some sort of power management support is necessary
hence this library depends on CONFIG_PM.
Contributions include:
Sanjeev Premi for the initial concept:
http://patchwork.kernel.org/patch/50998/
Kevin Hilman for converting original design to device-based.
Kevin Hilman and Paul Walmsey for cleaning up many of the function
abstractions, improvements and data structure handling.
Romit Dasgupta for using enums instead of opp pointers.
Thara Gopinath, Eduardo Valentin and Vishwanath BS for fixes and
cleanups.
Linus Walleij for recommending this layer be made generic for usage
in other architectures beyond OMAP and ARM.
Mark Brown, Andrew Morton, Rafael J. Wysocki, Paul E. McKenney for
valuable improvements.
Discussions and comments from:
http://marc.info/?l=linux-omap&m=126033945313269&w=2
http://marc.info/?l=linux-omap&m=125482970102327&w=2
http://marc.info/?t=125809247500002&r=1&w=2
http://marc.info/?l=linux-omap&m=126025973426007&w=2
http://marc.info/?t=128152609200064&r=1&w=2
http://marc.info/?t=128468723000002&r=1&w=2
incorporated.
v1: http://marc.info/?t=128468723000002&r=1&w=2
Signed-off-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 628 |
2 files changed, 629 insertions, 0 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index cbccf9a3cee4..abe46edfe5b4 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | |||
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | 4 | obj-$(CONFIG_PM_OPS) += generic_ops.o |
5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
6 | obj-$(CONFIG_PM_OPP) += opp.o | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 8 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
8 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG | 9 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 000000000000..2bb9b4cf59d7 --- /dev/null +++ b/drivers/base/power/opp.c | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/rculist.h> | ||
22 | #include <linux/rcupdate.h> | ||
23 | #include <linux/opp.h> | ||
24 | |||
25 | /* | ||
26 | * Internal data structure organization with the OPP layer library is as | ||
27 | * follows: | ||
28 | * dev_opp_list (root) | ||
29 | * |- device 1 (represents voltage domain 1) | ||
30 | * | |- opp 1 (availability, freq, voltage) | ||
31 | * | |- opp 2 .. | ||
32 | * ... ... | ||
33 | * | `- opp n .. | ||
34 | * |- device 2 (represents the next voltage domain) | ||
35 | * ... | ||
36 | * `- device m (represents mth voltage domain) | ||
37 | * device 1, 2.. are represented by dev_opp structure while each opp | ||
38 | * is represented by the opp structure. | ||
39 | */ | ||
40 | |||
41 | /** | ||
42 | * struct opp - Generic OPP description structure | ||
43 | * @node: opp list node. The nodes are maintained throughout the lifetime | ||
44 | * of boot. It is expected only an optimal set of OPPs are | ||
45 | * added to the library by the SoC framework. | ||
46 | * RCU usage: opp list is traversed with RCU locks. node | ||
47 | * modification is possible realtime, hence the modifications | ||
48 | * are protected by the dev_opp_list_lock for integrity. | ||
49 | * IMPORTANT: the opp nodes should be maintained in increasing | ||
50 | * order. | ||
51 | * @available: true/false - marks if this OPP as available or not | ||
52 | * @rate: Frequency in hertz | ||
53 | * @u_volt: Nominal voltage in microvolts corresponding to this OPP | ||
54 | * @dev_opp: points back to the device_opp struct this opp belongs to | ||
55 | * | ||
56 | * This structure stores the OPP information for a given device. | ||
57 | */ | ||
58 | struct opp { | ||
59 | struct list_head node; | ||
60 | |||
61 | bool available; | ||
62 | unsigned long rate; | ||
63 | unsigned long u_volt; | ||
64 | |||
65 | struct device_opp *dev_opp; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct device_opp - Device opp structure | ||
70 | * @node: list node - contains the devices with OPPs that | ||
71 | * have been registered. Nodes once added are not modified in this | ||
72 | * list. | ||
73 | * RCU usage: nodes are not modified in the list of device_opp, | ||
74 | * however addition is possible and is secured by dev_opp_list_lock | ||
75 | * @dev: device pointer | ||
76 | * @opp_list: list of opps | ||
77 | * | ||
78 | * This is an internal data structure maintaining the link to opps attached to | ||
79 | * a device. This structure is not meant to be shared to users as it is | ||
80 | * meant for book keeping and private to OPP library | ||
81 | */ | ||
82 | struct device_opp { | ||
83 | struct list_head node; | ||
84 | |||
85 | struct device *dev; | ||
86 | struct list_head opp_list; | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * The root of the list of all devices. All device_opp structures branch off | ||
91 | * from here, with each device_opp containing the list of opp it supports in | ||
92 | * various states of availability. | ||
93 | */ | ||
94 | static LIST_HEAD(dev_opp_list); | ||
95 | /* Lock to allow exclusive modification to the device and opp lists */ | ||
96 | static DEFINE_MUTEX(dev_opp_list_lock); | ||
97 | |||
98 | /** | ||
99 | * find_device_opp() - find device_opp struct using device pointer | ||
100 | * @dev: device pointer used to lookup device OPPs | ||
101 | * | ||
102 | * Search list of device OPPs for one containing matching device. Does a RCU | ||
103 | * reader operation to grab the pointer needed. | ||
104 | * | ||
105 | * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or | ||
106 | * -EINVAL based on type of error. | ||
107 | * | ||
108 | * Locking: This function must be called under rcu_read_lock(). device_opp | ||
109 | * is a RCU protected pointer. This means that device_opp is valid as long | ||
110 | * as we are under RCU lock. | ||
111 | */ | ||
112 | static struct device_opp *find_device_opp(struct device *dev) | ||
113 | { | ||
114 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | ||
115 | |||
116 | if (unlikely(IS_ERR_OR_NULL(dev))) { | ||
117 | pr_err("%s: Invalid parameters\n", __func__); | ||
118 | return ERR_PTR(-EINVAL); | ||
119 | } | ||
120 | |||
121 | list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { | ||
122 | if (tmp_dev_opp->dev == dev) { | ||
123 | dev_opp = tmp_dev_opp; | ||
124 | break; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return dev_opp; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * opp_get_voltage() - Gets the voltage corresponding to an available opp | ||
133 | * @opp: opp for which voltage has to be returned for | ||
134 | * | ||
135 | * Return voltage in micro volt corresponding to the opp, else | ||
136 | * return 0 | ||
137 | * | ||
138 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
139 | * protected pointer. This means that opp which could have been fetched by | ||
140 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
141 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
142 | * used in the same section as the usage of this function with the pointer | ||
143 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
144 | * pointer. | ||
145 | */ | ||
146 | unsigned long opp_get_voltage(struct opp *opp) | ||
147 | { | ||
148 | struct opp *tmp_opp; | ||
149 | unsigned long v = 0; | ||
150 | |||
151 | tmp_opp = rcu_dereference(opp); | ||
152 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
153 | pr_err("%s: Invalid parameters\n", __func__); | ||
154 | else | ||
155 | v = tmp_opp->u_volt; | ||
156 | |||
157 | return v; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * opp_get_freq() - Gets the frequency corresponding to an available opp | ||
162 | * @opp: opp for which frequency has to be returned for | ||
163 | * | ||
164 | * Return frequency in hertz corresponding to the opp, else | ||
165 | * return 0 | ||
166 | * | ||
167 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
168 | * protected pointer. This means that opp which could have been fetched by | ||
169 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
170 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
171 | * used in the same section as the usage of this function with the pointer | ||
172 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
173 | * pointer. | ||
174 | */ | ||
175 | unsigned long opp_get_freq(struct opp *opp) | ||
176 | { | ||
177 | struct opp *tmp_opp; | ||
178 | unsigned long f = 0; | ||
179 | |||
180 | tmp_opp = rcu_dereference(opp); | ||
181 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
182 | pr_err("%s: Invalid parameters\n", __func__); | ||
183 | else | ||
184 | f = tmp_opp->rate; | ||
185 | |||
186 | return f; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * opp_get_opp_count() - Get number of opps available in the opp list | ||
191 | * @dev: device for which we do this operation | ||
192 | * | ||
193 | * This function returns the number of available opps if there are any, | ||
194 | * else returns 0 if none or the corresponding error value. | ||
195 | * | ||
196 | * Locking: This function must be called under rcu_read_lock(). This function | ||
197 | * internally references two RCU protected structures: device_opp and opp which | ||
198 | * are safe as long as we are under a common RCU locked section. | ||
199 | */ | ||
200 | int opp_get_opp_count(struct device *dev) | ||
201 | { | ||
202 | struct device_opp *dev_opp; | ||
203 | struct opp *temp_opp; | ||
204 | int count = 0; | ||
205 | |||
206 | dev_opp = find_device_opp(dev); | ||
207 | if (IS_ERR(dev_opp)) { | ||
208 | int r = PTR_ERR(dev_opp); | ||
209 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
210 | return r; | ||
211 | } | ||
212 | |||
213 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
214 | if (temp_opp->available) | ||
215 | count++; | ||
216 | } | ||
217 | |||
218 | return count; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * opp_find_freq_exact() - search for an exact frequency | ||
223 | * @dev: device for which we do this operation | ||
224 | * @freq: frequency to search for | ||
225 | * @is_available: true/false - match for available opp | ||
226 | * | ||
227 | * Searches for exact match in the opp list and returns pointer to the matching | ||
228 | * opp if found, else returns ERR_PTR in case of error and should be handled | ||
229 | * using IS_ERR. | ||
230 | * | ||
231 | * Note: available is a modifier for the search. if available=true, then the | ||
232 | * match is for exact matching frequency and is available in the stored OPP | ||
233 | * table. if false, the match is for exact frequency which is not available. | ||
234 | * | ||
235 | * This provides a mechanism to enable an opp which is not available currently | ||
236 | * or the opposite as well. | ||
237 | * | ||
238 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
239 | * protected pointer. The reason for the same is that the opp pointer which is | ||
240 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
241 | * under the locked area. The pointer returned must be used prior to unlocking | ||
242 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
243 | */ | ||
244 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | ||
245 | bool available) | ||
246 | { | ||
247 | struct device_opp *dev_opp; | ||
248 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
249 | |||
250 | dev_opp = find_device_opp(dev); | ||
251 | if (IS_ERR(dev_opp)) { | ||
252 | int r = PTR_ERR(dev_opp); | ||
253 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
254 | return ERR_PTR(r); | ||
255 | } | ||
256 | |||
257 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
258 | if (temp_opp->available == available && | ||
259 | temp_opp->rate == freq) { | ||
260 | opp = temp_opp; | ||
261 | break; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | return opp; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * opp_find_freq_ceil() - Search for an rounded ceil freq | ||
270 | * @dev: device for which we do this operation | ||
271 | * @freq: Start frequency | ||
272 | * | ||
273 | * Search for the matching ceil *available* OPP from a starting freq | ||
274 | * for a device. | ||
275 | * | ||
276 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
277 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
278 | * | ||
279 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
280 | * protected pointer. The reason for the same is that the opp pointer which is | ||
281 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
282 | * under the locked area. The pointer returned must be used prior to unlocking | ||
283 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
284 | */ | ||
285 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | ||
286 | { | ||
287 | struct device_opp *dev_opp; | ||
288 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
289 | |||
290 | if (!dev || !freq) { | ||
291 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
292 | return ERR_PTR(-EINVAL); | ||
293 | } | ||
294 | |||
295 | dev_opp = find_device_opp(dev); | ||
296 | if (IS_ERR(dev_opp)) | ||
297 | return opp; | ||
298 | |||
299 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
300 | if (temp_opp->available && temp_opp->rate >= *freq) { | ||
301 | opp = temp_opp; | ||
302 | *freq = opp->rate; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return opp; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * opp_find_freq_floor() - Search for a rounded floor freq | ||
312 | * @dev: device for which we do this operation | ||
313 | * @freq: Start frequency | ||
314 | * | ||
315 | * Search for the matching floor *available* OPP from a starting freq | ||
316 | * for a device. | ||
317 | * | ||
318 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
319 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
320 | * | ||
321 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
322 | * protected pointer. The reason for the same is that the opp pointer which is | ||
323 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
324 | * under the locked area. The pointer returned must be used prior to unlocking | ||
325 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
326 | */ | ||
327 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | ||
328 | { | ||
329 | struct device_opp *dev_opp; | ||
330 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
331 | |||
332 | if (!dev || !freq) { | ||
333 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
334 | return ERR_PTR(-EINVAL); | ||
335 | } | ||
336 | |||
337 | dev_opp = find_device_opp(dev); | ||
338 | if (IS_ERR(dev_opp)) | ||
339 | return opp; | ||
340 | |||
341 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
342 | if (temp_opp->available) { | ||
343 | /* go to the next node, before choosing prev */ | ||
344 | if (temp_opp->rate > *freq) | ||
345 | break; | ||
346 | else | ||
347 | opp = temp_opp; | ||
348 | } | ||
349 | } | ||
350 | if (!IS_ERR(opp)) | ||
351 | *freq = opp->rate; | ||
352 | |||
353 | return opp; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * opp_add() - Add an OPP table from a table definitions | ||
358 | * @dev: device for which we do this operation | ||
359 | * @freq: Frequency in Hz for this OPP | ||
360 | * @u_volt: Voltage in uVolts for this OPP | ||
361 | * | ||
362 | * This function adds an opp definition to the opp list and returns status. | ||
363 | * The opp is made available by default and it can be controlled using | ||
364 | * opp_enable/disable functions. | ||
365 | * | ||
366 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
367 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
368 | * to keep the integrity of the internal data structures. Callers should ensure | ||
369 | * that this function is *NOT* called under RCU protection or in contexts where | ||
370 | * mutex cannot be locked. | ||
371 | */ | ||
372 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | ||
373 | { | ||
374 | struct device_opp *dev_opp = NULL; | ||
375 | struct opp *opp, *new_opp; | ||
376 | struct list_head *head; | ||
377 | |||
378 | /* allocate new OPP node */ | ||
379 | new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); | ||
380 | if (!new_opp) { | ||
381 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | |||
385 | /* Hold our list modification lock here */ | ||
386 | mutex_lock(&dev_opp_list_lock); | ||
387 | |||
388 | /* Check for existing list for 'dev' */ | ||
389 | dev_opp = find_device_opp(dev); | ||
390 | if (IS_ERR(dev_opp)) { | ||
391 | /* | ||
392 | * Allocate a new device OPP table. In the infrequent case | ||
393 | * where a new device is needed to be added, we pay this | ||
394 | * penalty. | ||
395 | */ | ||
396 | dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); | ||
397 | if (!dev_opp) { | ||
398 | mutex_unlock(&dev_opp_list_lock); | ||
399 | kfree(new_opp); | ||
400 | dev_warn(dev, | ||
401 | "%s: Unable to create device OPP structure\n", | ||
402 | __func__); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | |||
406 | dev_opp->dev = dev; | ||
407 | INIT_LIST_HEAD(&dev_opp->opp_list); | ||
408 | |||
409 | /* Secure the device list modification */ | ||
410 | list_add_rcu(&dev_opp->node, &dev_opp_list); | ||
411 | } | ||
412 | |||
413 | /* populate the opp table */ | ||
414 | new_opp->dev_opp = dev_opp; | ||
415 | new_opp->rate = freq; | ||
416 | new_opp->u_volt = u_volt; | ||
417 | new_opp->available = true; | ||
418 | |||
419 | /* Insert new OPP in order of increasing frequency */ | ||
420 | head = &dev_opp->opp_list; | ||
421 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | ||
422 | if (new_opp->rate < opp->rate) | ||
423 | break; | ||
424 | else | ||
425 | head = &opp->node; | ||
426 | } | ||
427 | |||
428 | list_add_rcu(&new_opp->node, head); | ||
429 | mutex_unlock(&dev_opp_list_lock); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * opp_set_availability() - helper to set the availability of an opp | ||
436 | * @dev: device for which we do this operation | ||
437 | * @freq: OPP frequency to modify availability | ||
438 | * @availability_req: availability status requested for this opp | ||
439 | * | ||
440 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | ||
441 | * share a common logic which is isolated here. | ||
442 | * | ||
443 | * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
444 | * copy operation, returns 0 if no modifcation was done OR modification was | ||
445 | * successful. | ||
446 | * | ||
447 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
448 | * Hence this function internally uses RCU updater strategy with mutex locks to | ||
449 | * keep the integrity of the internal data structures. Callers should ensure | ||
450 | * that this function is *NOT* called under RCU protection or in contexts where | ||
451 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
452 | */ | ||
453 | static int opp_set_availability(struct device *dev, unsigned long freq, | ||
454 | bool availability_req) | ||
455 | { | ||
456 | struct device_opp *tmp_dev_opp, *dev_opp = NULL; | ||
457 | struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | ||
458 | int r = 0; | ||
459 | |||
460 | /* keep the node allocated */ | ||
461 | new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); | ||
462 | if (!new_opp) { | ||
463 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | ||
464 | return -ENOMEM; | ||
465 | } | ||
466 | |||
467 | mutex_lock(&dev_opp_list_lock); | ||
468 | |||
469 | /* Find the device_opp */ | ||
470 | list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { | ||
471 | if (dev == tmp_dev_opp->dev) { | ||
472 | dev_opp = tmp_dev_opp; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | if (IS_ERR(dev_opp)) { | ||
477 | r = PTR_ERR(dev_opp); | ||
478 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
479 | goto unlock; | ||
480 | } | ||
481 | |||
482 | /* Do we have the frequency? */ | ||
483 | list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { | ||
484 | if (tmp_opp->rate == freq) { | ||
485 | opp = tmp_opp; | ||
486 | break; | ||
487 | } | ||
488 | } | ||
489 | if (IS_ERR(opp)) { | ||
490 | r = PTR_ERR(opp); | ||
491 | goto unlock; | ||
492 | } | ||
493 | |||
494 | /* Is update really needed? */ | ||
495 | if (opp->available == availability_req) | ||
496 | goto unlock; | ||
497 | /* copy the old data over */ | ||
498 | *new_opp = *opp; | ||
499 | |||
500 | /* plug in new node */ | ||
501 | new_opp->available = availability_req; | ||
502 | |||
503 | list_replace_rcu(&opp->node, &new_opp->node); | ||
504 | mutex_unlock(&dev_opp_list_lock); | ||
505 | synchronize_rcu(); | ||
506 | |||
507 | /* clean up old opp */ | ||
508 | new_opp = opp; | ||
509 | goto out; | ||
510 | |||
511 | unlock: | ||
512 | mutex_unlock(&dev_opp_list_lock); | ||
513 | out: | ||
514 | kfree(new_opp); | ||
515 | return r; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * opp_enable() - Enable a specific OPP | ||
520 | * @dev: device for which we do this operation | ||
521 | * @freq: OPP frequency to enable | ||
522 | * | ||
523 | * Enables a provided opp. If the operation is valid, this returns 0, else the | ||
524 | * corresponding error value. It is meant to be used for users an OPP available | ||
525 | * after being temporarily made unavailable with opp_disable. | ||
526 | * | ||
527 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
528 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
529 | * integrity of the internal data structures. Callers should ensure that | ||
530 | * this function is *NOT* called under RCU protection or in contexts where | ||
531 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
532 | */ | ||
533 | int opp_enable(struct device *dev, unsigned long freq) | ||
534 | { | ||
535 | return opp_set_availability(dev, freq, true); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * opp_disable() - Disable a specific OPP | ||
540 | * @dev: device for which we do this operation | ||
541 | * @freq: OPP frequency to disable | ||
542 | * | ||
543 | * Disables a provided opp. If the operation is valid, this returns | ||
544 | * 0, else the corresponding error value. It is meant to be a temporary | ||
545 | * control by users to make this OPP not available until the circumstances are | ||
546 | * right to make it available again (with a call to opp_enable). | ||
547 | * | ||
548 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
549 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
550 | * integrity of the internal data structures. Callers should ensure that | ||
551 | * this function is *NOT* called under RCU protection or in contexts where | ||
552 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
553 | */ | ||
554 | int opp_disable(struct device *dev, unsigned long freq) | ||
555 | { | ||
556 | return opp_set_availability(dev, freq, false); | ||
557 | } | ||
558 | |||
559 | #ifdef CONFIG_CPU_FREQ | ||
560 | /** | ||
561 | * opp_init_cpufreq_table() - create a cpufreq table for a device | ||
562 | * @dev: device for which we do this operation | ||
563 | * @table: Cpufreq table returned back to caller | ||
564 | * | ||
565 | * Generate a cpufreq table for a provided device- this assumes that the | ||
566 | * opp list is already initialized and ready for usage. | ||
567 | * | ||
568 | * This function allocates required memory for the cpufreq table. It is | ||
569 | * expected that the caller does the required maintenance such as freeing | ||
570 | * the table as required. | ||
571 | * | ||
572 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | ||
573 | * if no memory available for the operation (table is not populated), returns 0 | ||
574 | * if successful and table is populated. | ||
575 | * | ||
576 | * WARNING: It is important for the callers to ensure refreshing their copy of | ||
577 | * the table if any of the mentioned functions have been invoked in the interim. | ||
578 | * | ||
579 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
580 | * To simplify the logic, we pretend we are updater and hold relevant mutex here | ||
581 | * Callers should ensure that this function is *NOT* called under RCU protection | ||
582 | * or in contexts where mutex locking cannot be used. | ||
583 | */ | ||
584 | int opp_init_cpufreq_table(struct device *dev, | ||
585 | struct cpufreq_frequency_table **table) | ||
586 | { | ||
587 | struct device_opp *dev_opp; | ||
588 | struct opp *opp; | ||
589 | struct cpufreq_frequency_table *freq_table; | ||
590 | int i = 0; | ||
591 | |||
592 | /* Pretend as if I am an updater */ | ||
593 | mutex_lock(&dev_opp_list_lock); | ||
594 | |||
595 | dev_opp = find_device_opp(dev); | ||
596 | if (IS_ERR(dev_opp)) { | ||
597 | int r = PTR_ERR(dev_opp); | ||
598 | mutex_unlock(&dev_opp_list_lock); | ||
599 | dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
600 | return r; | ||
601 | } | ||
602 | |||
603 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | ||
604 | (opp_get_opp_count(dev) + 1), GFP_KERNEL); | ||
605 | if (!freq_table) { | ||
606 | mutex_unlock(&dev_opp_list_lock); | ||
607 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | ||
608 | __func__); | ||
609 | return -ENOMEM; | ||
610 | } | ||
611 | |||
612 | list_for_each_entry(opp, &dev_opp->opp_list, node) { | ||
613 | if (opp->available) { | ||
614 | freq_table[i].index = i; | ||
615 | freq_table[i].frequency = opp->rate / 1000; | ||
616 | i++; | ||
617 | } | ||
618 | } | ||
619 | mutex_unlock(&dev_opp_list_lock); | ||
620 | |||
621 | freq_table[i].index = i; | ||
622 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
623 | |||
624 | *table = &freq_table[0]; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | #endif /* CONFIG_CPU_FREQ */ | ||