aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/opp/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/opp/core.c')
-rw-r--r--drivers/base/power/opp/core.c1300
1 files changed, 1300 insertions, 0 deletions
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
new file mode 100644
index 000000000000..d5c1149ff123
--- /dev/null
+++ b/drivers/base/power/opp/core.c
@@ -0,0 +1,1300 @@
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/errno.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/device.h>
18#include <linux/of.h>
19#include <linux/export.h>
20
21#include "opp.h"
22
23/*
24 * The root of the list of all devices. All device_opp structures branch off
25 * from here, with each device_opp containing the list of opp it supports in
26 * various states of availability.
27 */
28static LIST_HEAD(dev_opp_list);
29/* Lock to allow exclusive modification to the device and opp lists */
30static DEFINE_MUTEX(dev_opp_list_lock);
31
32#define opp_rcu_lockdep_assert() \
33do { \
34 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
35 !lockdep_is_held(&dev_opp_list_lock), \
36 "Missing rcu_read_lock() or " \
37 "dev_opp_list_lock protection"); \
38} while (0)
39
40static struct device_list_opp *_find_list_dev(const struct device *dev,
41 struct device_opp *dev_opp)
42{
43 struct device_list_opp *list_dev;
44
45 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
46 if (list_dev->dev == dev)
47 return list_dev;
48
49 return NULL;
50}
51
52static struct device_opp *_managed_opp(const struct device_node *np)
53{
54 struct device_opp *dev_opp;
55
56 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
57 if (dev_opp->np == np) {
58 /*
59 * Multiple devices can point to the same OPP table and
60 * so will have same node-pointer, np.
61 *
62 * But the OPPs will be considered as shared only if the
63 * OPP table contains a "opp-shared" property.
64 */
65 return dev_opp->shared_opp ? dev_opp : NULL;
66 }
67 }
68
69 return NULL;
70}
71
72/**
73 * _find_device_opp() - find device_opp struct using device pointer
74 * @dev: device pointer used to lookup device OPPs
75 *
76 * Search list of device OPPs for one containing matching device. Does a RCU
77 * reader operation to grab the pointer needed.
78 *
79 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
80 * -EINVAL based on type of error.
81 *
82 * Locking: This function must be called under rcu_read_lock(). device_opp
83 * is a RCU protected pointer. This means that device_opp is valid as long
84 * as we are under RCU lock.
85 */
86struct device_opp *_find_device_opp(struct device *dev)
87{
88 struct device_opp *dev_opp;
89
90 if (IS_ERR_OR_NULL(dev)) {
91 pr_err("%s: Invalid parameters\n", __func__);
92 return ERR_PTR(-EINVAL);
93 }
94
95 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
96 if (_find_list_dev(dev, dev_opp))
97 return dev_opp;
98
99 return ERR_PTR(-ENODEV);
100}
101
102/**
103 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
104 * @opp: opp for which voltage has to be returned for
105 *
106 * Return: voltage in micro volt corresponding to the opp, else
107 * return 0
108 *
109 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
110 * protected pointer. This means that opp which could have been fetched by
111 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
112 * under RCU lock. The pointer returned by the opp_find_freq family must be
113 * used in the same section as the usage of this function with the pointer
114 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
115 * pointer.
116 */
117unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
118{
119 struct dev_pm_opp *tmp_opp;
120 unsigned long v = 0;
121
122 opp_rcu_lockdep_assert();
123
124 tmp_opp = rcu_dereference(opp);
125 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
126 pr_err("%s: Invalid parameters\n", __func__);
127 else
128 v = tmp_opp->u_volt;
129
130 return v;
131}
132EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
133
134/**
135 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
136 * @opp: opp for which frequency has to be returned for
137 *
138 * Return: frequency in hertz corresponding to the opp, else
139 * return 0
140 *
141 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
142 * protected pointer. This means that opp which could have been fetched by
143 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
144 * under RCU lock. The pointer returned by the opp_find_freq family must be
145 * used in the same section as the usage of this function with the pointer
146 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
147 * pointer.
148 */
149unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
150{
151 struct dev_pm_opp *tmp_opp;
152 unsigned long f = 0;
153
154 opp_rcu_lockdep_assert();
155
156 tmp_opp = rcu_dereference(opp);
157 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
158 pr_err("%s: Invalid parameters\n", __func__);
159 else
160 f = tmp_opp->rate;
161
162 return f;
163}
164EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
165
166/**
167 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
168 * @opp: opp for which turbo mode is being verified
169 *
170 * Turbo OPPs are not for normal use, and can be enabled (under certain
171 * conditions) for short duration of times to finish high throughput work
172 * quickly. Running on them for longer times may overheat the chip.
173 *
174 * Return: true if opp is turbo opp, else false.
175 *
176 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
177 * protected pointer. This means that opp which could have been fetched by
178 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
179 * under RCU lock. The pointer returned by the opp_find_freq family must be
180 * used in the same section as the usage of this function with the pointer
181 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
182 * pointer.
183 */
184bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
185{
186 struct dev_pm_opp *tmp_opp;
187
188 opp_rcu_lockdep_assert();
189
190 tmp_opp = rcu_dereference(opp);
191 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
192 pr_err("%s: Invalid parameters\n", __func__);
193 return false;
194 }
195
196 return tmp_opp->turbo;
197}
198EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
199
200/**
201 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
202 * @dev: device for which we do this operation
203 *
204 * Return: This function returns the max clock latency in nanoseconds.
205 *
206 * Locking: This function takes rcu_read_lock().
207 */
208unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
209{
210 struct device_opp *dev_opp;
211 unsigned long clock_latency_ns;
212
213 rcu_read_lock();
214
215 dev_opp = _find_device_opp(dev);
216 if (IS_ERR(dev_opp))
217 clock_latency_ns = 0;
218 else
219 clock_latency_ns = dev_opp->clock_latency_ns_max;
220
221 rcu_read_unlock();
222 return clock_latency_ns;
223}
224EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
225
226/**
227 * dev_pm_opp_get_suspend_opp() - Get suspend opp
228 * @dev: device for which we do this operation
229 *
230 * Return: This function returns pointer to the suspend opp if it is
231 * defined and available, otherwise it returns NULL.
232 *
233 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
234 * protected pointer. The reason for the same is that the opp pointer which is
235 * returned will remain valid for use with opp_get_{voltage, freq} only while
236 * under the locked area. The pointer returned must be used prior to unlocking
237 * with rcu_read_unlock() to maintain the integrity of the pointer.
238 */
239struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
240{
241 struct device_opp *dev_opp;
242
243 opp_rcu_lockdep_assert();
244
245 dev_opp = _find_device_opp(dev);
246 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
247 !dev_opp->suspend_opp->available)
248 return NULL;
249
250 return dev_opp->suspend_opp;
251}
252EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
253
254/**
255 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
256 * @dev: device for which we do this operation
257 *
258 * Return: This function returns the number of available opps if there are any,
259 * else returns 0 if none or the corresponding error value.
260 *
261 * Locking: This function takes rcu_read_lock().
262 */
263int dev_pm_opp_get_opp_count(struct device *dev)
264{
265 struct device_opp *dev_opp;
266 struct dev_pm_opp *temp_opp;
267 int count = 0;
268
269 rcu_read_lock();
270
271 dev_opp = _find_device_opp(dev);
272 if (IS_ERR(dev_opp)) {
273 count = PTR_ERR(dev_opp);
274 dev_err(dev, "%s: device OPP not found (%d)\n",
275 __func__, count);
276 goto out_unlock;
277 }
278
279 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
280 if (temp_opp->available)
281 count++;
282 }
283
284out_unlock:
285 rcu_read_unlock();
286 return count;
287}
288EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
289
290/**
291 * dev_pm_opp_find_freq_exact() - search for an exact frequency
292 * @dev: device for which we do this operation
293 * @freq: frequency to search for
294 * @available: true/false - match for available opp
295 *
296 * Return: Searches for exact match in the opp list and returns pointer to the
297 * matching opp if found, else returns ERR_PTR in case of error and should
298 * be handled using IS_ERR. Error return values can be:
299 * EINVAL: for bad pointer
300 * ERANGE: no match found for search
301 * ENODEV: if device not found in list of registered devices
302 *
303 * Note: available is a modifier for the search. if available=true, then the
304 * match is for exact matching frequency and is available in the stored OPP
305 * table. if false, the match is for exact frequency which is not available.
306 *
307 * This provides a mechanism to enable an opp which is not available currently
308 * or the opposite as well.
309 *
310 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
311 * protected pointer. The reason for the same is that the opp pointer which is
312 * returned will remain valid for use with opp_get_{voltage, freq} only while
313 * under the locked area. The pointer returned must be used prior to unlocking
314 * with rcu_read_unlock() to maintain the integrity of the pointer.
315 */
316struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
317 unsigned long freq,
318 bool available)
319{
320 struct device_opp *dev_opp;
321 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
322
323 opp_rcu_lockdep_assert();
324
325 dev_opp = _find_device_opp(dev);
326 if (IS_ERR(dev_opp)) {
327 int r = PTR_ERR(dev_opp);
328 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
329 return ERR_PTR(r);
330 }
331
332 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
333 if (temp_opp->available == available &&
334 temp_opp->rate == freq) {
335 opp = temp_opp;
336 break;
337 }
338 }
339
340 return opp;
341}
342EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
343
344/**
345 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
346 * @dev: device for which we do this operation
347 * @freq: Start frequency
348 *
349 * Search for the matching ceil *available* OPP from a starting freq
350 * for a device.
351 *
352 * Return: matching *opp and refreshes *freq accordingly, else returns
353 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
354 * values can be:
355 * EINVAL: for bad pointer
356 * ERANGE: no match found for search
357 * ENODEV: if device not found in list of registered devices
358 *
359 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
360 * protected pointer. The reason for the same is that the opp pointer which is
361 * returned will remain valid for use with opp_get_{voltage, freq} only while
362 * under the locked area. The pointer returned must be used prior to unlocking
363 * with rcu_read_unlock() to maintain the integrity of the pointer.
364 */
365struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
366 unsigned long *freq)
367{
368 struct device_opp *dev_opp;
369 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
370
371 opp_rcu_lockdep_assert();
372
373 if (!dev || !freq) {
374 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
375 return ERR_PTR(-EINVAL);
376 }
377
378 dev_opp = _find_device_opp(dev);
379 if (IS_ERR(dev_opp))
380 return ERR_CAST(dev_opp);
381
382 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
383 if (temp_opp->available && temp_opp->rate >= *freq) {
384 opp = temp_opp;
385 *freq = opp->rate;
386 break;
387 }
388 }
389
390 return opp;
391}
392EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
393
394/**
395 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
396 * @dev: device for which we do this operation
397 * @freq: Start frequency
398 *
399 * Search for the matching floor *available* OPP from a starting freq
400 * for a device.
401 *
402 * Return: matching *opp and refreshes *freq accordingly, else returns
403 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
404 * values can be:
405 * EINVAL: for bad pointer
406 * ERANGE: no match found for search
407 * ENODEV: if device not found in list of registered devices
408 *
409 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
410 * protected pointer. The reason for the same is that the opp pointer which is
411 * returned will remain valid for use with opp_get_{voltage, freq} only while
412 * under the locked area. The pointer returned must be used prior to unlocking
413 * with rcu_read_unlock() to maintain the integrity of the pointer.
414 */
415struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
416 unsigned long *freq)
417{
418 struct device_opp *dev_opp;
419 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
420
421 opp_rcu_lockdep_assert();
422
423 if (!dev || !freq) {
424 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
425 return ERR_PTR(-EINVAL);
426 }
427
428 dev_opp = _find_device_opp(dev);
429 if (IS_ERR(dev_opp))
430 return ERR_CAST(dev_opp);
431
432 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
433 if (temp_opp->available) {
434 /* go to the next node, before choosing prev */
435 if (temp_opp->rate > *freq)
436 break;
437 else
438 opp = temp_opp;
439 }
440 }
441 if (!IS_ERR(opp))
442 *freq = opp->rate;
443
444 return opp;
445}
446EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
447
448/* List-dev Helpers */
449static void _kfree_list_dev_rcu(struct rcu_head *head)
450{
451 struct device_list_opp *list_dev;
452
453 list_dev = container_of(head, struct device_list_opp, rcu_head);
454 kfree_rcu(list_dev, rcu_head);
455}
456
457static void _remove_list_dev(struct device_list_opp *list_dev,
458 struct device_opp *dev_opp)
459{
460 list_del(&list_dev->node);
461 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
462 _kfree_list_dev_rcu);
463}
464
465struct device_list_opp *_add_list_dev(const struct device *dev,
466 struct device_opp *dev_opp)
467{
468 struct device_list_opp *list_dev;
469
470 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
471 if (!list_dev)
472 return NULL;
473
474 /* Initialize list-dev */
475 list_dev->dev = dev;
476 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
477
478 return list_dev;
479}
480
481/**
482 * _add_device_opp() - Find device OPP table or allocate a new one
483 * @dev: device for which we do this operation
484 *
485 * It tries to find an existing table first, if it couldn't find one, it
486 * allocates a new OPP table and returns that.
487 *
488 * Return: valid device_opp pointer if success, else NULL.
489 */
490static struct device_opp *_add_device_opp(struct device *dev)
491{
492 struct device_opp *dev_opp;
493 struct device_list_opp *list_dev;
494
495 /* Check for existing list for 'dev' first */
496 dev_opp = _find_device_opp(dev);
497 if (!IS_ERR(dev_opp))
498 return dev_opp;
499
500 /*
501 * Allocate a new device OPP table. In the infrequent case where a new
502 * device is needed to be added, we pay this penalty.
503 */
504 dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
505 if (!dev_opp)
506 return NULL;
507
508 INIT_LIST_HEAD(&dev_opp->dev_list);
509
510 list_dev = _add_list_dev(dev, dev_opp);
511 if (!list_dev) {
512 kfree(dev_opp);
513 return NULL;
514 }
515
516 srcu_init_notifier_head(&dev_opp->srcu_head);
517 INIT_LIST_HEAD(&dev_opp->opp_list);
518
519 /* Secure the device list modification */
520 list_add_rcu(&dev_opp->node, &dev_opp_list);
521 return dev_opp;
522}
523
524/**
525 * _kfree_device_rcu() - Free device_opp RCU handler
526 * @head: RCU head
527 */
528static void _kfree_device_rcu(struct rcu_head *head)
529{
530 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
531
532 kfree_rcu(device_opp, rcu_head);
533}
534
535/**
536 * _remove_device_opp() - Removes a device OPP table
537 * @dev_opp: device OPP table to be removed.
538 *
539 * Removes/frees device OPP table it it doesn't contain any OPPs.
540 */
541static void _remove_device_opp(struct device_opp *dev_opp)
542{
543 struct device_list_opp *list_dev;
544
545 if (!list_empty(&dev_opp->opp_list))
546 return;
547
548 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
549 node);
550
551 _remove_list_dev(list_dev, dev_opp);
552
553 /* dev_list must be empty now */
554 WARN_ON(!list_empty(&dev_opp->dev_list));
555
556 list_del_rcu(&dev_opp->node);
557 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
558 _kfree_device_rcu);
559}
560
561/**
562 * _kfree_opp_rcu() - Free OPP RCU handler
563 * @head: RCU head
564 */
565static void _kfree_opp_rcu(struct rcu_head *head)
566{
567 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
568
569 kfree_rcu(opp, rcu_head);
570}
571
572/**
573 * _opp_remove() - Remove an OPP from a table definition
574 * @dev_opp: points back to the device_opp struct this opp belongs to
575 * @opp: pointer to the OPP to remove
576 * @notify: OPP_EVENT_REMOVE notification should be sent or not
577 *
578 * This function removes an opp definition from the opp list.
579 *
580 * Locking: The internal device_opp and opp structures are RCU protected.
581 * It is assumed that the caller holds required mutex for an RCU updater
582 * strategy.
583 */
584static void _opp_remove(struct device_opp *dev_opp,
585 struct dev_pm_opp *opp, bool notify)
586{
587 /*
588 * Notify the changes in the availability of the operable
589 * frequency/voltage list.
590 */
591 if (notify)
592 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
593 list_del_rcu(&opp->node);
594 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
595
596 _remove_device_opp(dev_opp);
597}
598
599/**
600 * dev_pm_opp_remove() - Remove an OPP from OPP list
601 * @dev: device for which we do this operation
602 * @freq: OPP to remove with matching 'freq'
603 *
604 * This function removes an opp from the opp list.
605 *
606 * Locking: The internal device_opp and opp structures are RCU protected.
607 * Hence this function internally uses RCU updater strategy with mutex locks
608 * to keep the integrity of the internal data structures. Callers should ensure
609 * that this function is *NOT* called under RCU protection or in contexts where
610 * mutex cannot be locked.
611 */
612void dev_pm_opp_remove(struct device *dev, unsigned long freq)
613{
614 struct dev_pm_opp *opp;
615 struct device_opp *dev_opp;
616 bool found = false;
617
618 /* Hold our list modification lock here */
619 mutex_lock(&dev_opp_list_lock);
620
621 dev_opp = _find_device_opp(dev);
622 if (IS_ERR(dev_opp))
623 goto unlock;
624
625 list_for_each_entry(opp, &dev_opp->opp_list, node) {
626 if (opp->rate == freq) {
627 found = true;
628 break;
629 }
630 }
631
632 if (!found) {
633 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
634 __func__, freq);
635 goto unlock;
636 }
637
638 _opp_remove(dev_opp, opp, true);
639unlock:
640 mutex_unlock(&dev_opp_list_lock);
641}
642EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
643
644static struct dev_pm_opp *_allocate_opp(struct device *dev,
645 struct device_opp **dev_opp)
646{
647 struct dev_pm_opp *opp;
648
649 /* allocate new OPP node */
650 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
651 if (!opp)
652 return NULL;
653
654 INIT_LIST_HEAD(&opp->node);
655
656 *dev_opp = _add_device_opp(dev);
657 if (!*dev_opp) {
658 kfree(opp);
659 return NULL;
660 }
661
662 return opp;
663}
664
665static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
666 struct device_opp *dev_opp)
667{
668 struct dev_pm_opp *opp;
669 struct list_head *head = &dev_opp->opp_list;
670
671 /*
672 * Insert new OPP in order of increasing frequency and discard if
673 * already present.
674 *
675 * Need to use &dev_opp->opp_list in the condition part of the 'for'
676 * loop, don't replace it with head otherwise it will become an infinite
677 * loop.
678 */
679 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
680 if (new_opp->rate > opp->rate) {
681 head = &opp->node;
682 continue;
683 }
684
685 if (new_opp->rate < opp->rate)
686 break;
687
688 /* Duplicate OPPs */
689 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
690 __func__, opp->rate, opp->u_volt, opp->available,
691 new_opp->rate, new_opp->u_volt, new_opp->available);
692
693 return opp->available && new_opp->u_volt == opp->u_volt ?
694 0 : -EEXIST;
695 }
696
697 new_opp->dev_opp = dev_opp;
698 list_add_rcu(&new_opp->node, head);
699
700 return 0;
701}
702
703/**
704 * _opp_add_dynamic() - Allocate a dynamic OPP.
705 * @dev: device for which we do this operation
706 * @freq: Frequency in Hz for this OPP
707 * @u_volt: Voltage in uVolts for this OPP
708 * @dynamic: Dynamically added OPPs.
709 *
710 * This function adds an opp definition to the opp list and returns status.
711 * The opp is made available by default and it can be controlled using
712 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
713 *
714 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
715 * and freed by dev_pm_opp_of_remove_table.
716 *
717 * Locking: The internal device_opp and opp structures are RCU protected.
718 * Hence this function internally uses RCU updater strategy with mutex locks
719 * to keep the integrity of the internal data structures. Callers should ensure
720 * that this function is *NOT* called under RCU protection or in contexts where
721 * mutex cannot be locked.
722 *
723 * Return:
724 * 0 On success OR
725 * Duplicate OPPs (both freq and volt are same) and opp->available
726 * -EEXIST Freq are same and volt are different OR
727 * Duplicate OPPs (both freq and volt are same) and !opp->available
728 * -ENOMEM Memory allocation failure
729 */
730static int _opp_add_dynamic(struct device *dev, unsigned long freq,
731 long u_volt, bool dynamic)
732{
733 struct device_opp *dev_opp;
734 struct dev_pm_opp *new_opp;
735 int ret;
736
737 /* Hold our list modification lock here */
738 mutex_lock(&dev_opp_list_lock);
739
740 new_opp = _allocate_opp(dev, &dev_opp);
741 if (!new_opp) {
742 ret = -ENOMEM;
743 goto unlock;
744 }
745
746 /* populate the opp table */
747 new_opp->rate = freq;
748 new_opp->u_volt = u_volt;
749 new_opp->available = true;
750 new_opp->dynamic = dynamic;
751
752 ret = _opp_add(dev, new_opp, dev_opp);
753 if (ret)
754 goto free_opp;
755
756 mutex_unlock(&dev_opp_list_lock);
757
758 /*
759 * Notify the changes in the availability of the operable
760 * frequency/voltage list.
761 */
762 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
763 return 0;
764
765free_opp:
766 _opp_remove(dev_opp, new_opp, false);
767unlock:
768 mutex_unlock(&dev_opp_list_lock);
769 return ret;
770}
771
772/* TODO: Support multiple regulators */
773static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
774{
775 u32 microvolt[3] = {0};
776 int count, ret;
777
778 /* Missing property isn't a problem, but an invalid entry is */
779 if (!of_find_property(opp->np, "opp-microvolt", NULL))
780 return 0;
781
782 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
783 if (count < 0) {
784 dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
785 __func__, count);
786 return count;
787 }
788
789 /* There can be one or three elements here */
790 if (count != 1 && count != 3) {
791 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
792 __func__, count);
793 return -EINVAL;
794 }
795
796 ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
797 count);
798 if (ret) {
799 dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
800 ret);
801 return -EINVAL;
802 }
803
804 opp->u_volt = microvolt[0];
805 opp->u_volt_min = microvolt[1];
806 opp->u_volt_max = microvolt[2];
807
808 return 0;
809}
810
811/**
812 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
813 * @dev: device for which we do this operation
814 * @np: device node
815 *
816 * This function adds an opp definition to the opp list and returns status. The
817 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
818 * removed by dev_pm_opp_remove.
819 *
820 * Locking: The internal device_opp and opp structures are RCU protected.
821 * Hence this function internally uses RCU updater strategy with mutex locks
822 * to keep the integrity of the internal data structures. Callers should ensure
823 * that this function is *NOT* called under RCU protection or in contexts where
824 * mutex cannot be locked.
825 *
826 * Return:
827 * 0 On success OR
828 * Duplicate OPPs (both freq and volt are same) and opp->available
829 * -EEXIST Freq are same and volt are different OR
830 * Duplicate OPPs (both freq and volt are same) and !opp->available
831 * -ENOMEM Memory allocation failure
832 * -EINVAL Failed parsing the OPP node
833 */
834static int _opp_add_static_v2(struct device *dev, struct device_node *np)
835{
836 struct device_opp *dev_opp;
837 struct dev_pm_opp *new_opp;
838 u64 rate;
839 u32 val;
840 int ret;
841
842 /* Hold our list modification lock here */
843 mutex_lock(&dev_opp_list_lock);
844
845 new_opp = _allocate_opp(dev, &dev_opp);
846 if (!new_opp) {
847 ret = -ENOMEM;
848 goto unlock;
849 }
850
851 ret = of_property_read_u64(np, "opp-hz", &rate);
852 if (ret < 0) {
853 dev_err(dev, "%s: opp-hz not found\n", __func__);
854 goto free_opp;
855 }
856
857 /*
858 * Rate is defined as an unsigned long in clk API, and so casting
859 * explicitly to its type. Must be fixed once rate is 64 bit
860 * guaranteed in clk API.
861 */
862 new_opp->rate = (unsigned long)rate;
863 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
864
865 new_opp->np = np;
866 new_opp->dynamic = false;
867 new_opp->available = true;
868
869 if (!of_property_read_u32(np, "clock-latency-ns", &val))
870 new_opp->clock_latency_ns = val;
871
872 ret = opp_get_microvolt(new_opp, dev);
873 if (ret)
874 goto free_opp;
875
876 if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
877 new_opp->u_amp = val;
878
879 ret = _opp_add(dev, new_opp, dev_opp);
880 if (ret)
881 goto free_opp;
882
883 /* OPP to select on device suspend */
884 if (of_property_read_bool(np, "opp-suspend")) {
885 if (dev_opp->suspend_opp)
886 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
887 __func__, dev_opp->suspend_opp->rate,
888 new_opp->rate);
889 else
890 dev_opp->suspend_opp = new_opp;
891 }
892
893 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
894 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
895
896 mutex_unlock(&dev_opp_list_lock);
897
898 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
899 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
900 new_opp->u_volt_min, new_opp->u_volt_max,
901 new_opp->clock_latency_ns);
902
903 /*
904 * Notify the changes in the availability of the operable
905 * frequency/voltage list.
906 */
907 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
908 return 0;
909
910free_opp:
911 _opp_remove(dev_opp, new_opp, false);
912unlock:
913 mutex_unlock(&dev_opp_list_lock);
914 return ret;
915}
916
917/**
918 * dev_pm_opp_add() - Add an OPP table from a table definitions
919 * @dev: device for which we do this operation
920 * @freq: Frequency in Hz for this OPP
921 * @u_volt: Voltage in uVolts for this OPP
922 *
923 * This function adds an opp definition to the opp list and returns status.
924 * The opp is made available by default and it can be controlled using
925 * dev_pm_opp_enable/disable functions.
926 *
927 * Locking: The internal device_opp and opp structures are RCU protected.
928 * Hence this function internally uses RCU updater strategy with mutex locks
929 * to keep the integrity of the internal data structures. Callers should ensure
930 * that this function is *NOT* called under RCU protection or in contexts where
931 * mutex cannot be locked.
932 *
933 * Return:
934 * 0 On success OR
935 * Duplicate OPPs (both freq and volt are same) and opp->available
936 * -EEXIST Freq are same and volt are different OR
937 * Duplicate OPPs (both freq and volt are same) and !opp->available
938 * -ENOMEM Memory allocation failure
939 */
940int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
941{
942 return _opp_add_dynamic(dev, freq, u_volt, true);
943}
944EXPORT_SYMBOL_GPL(dev_pm_opp_add);
945
946/**
947 * _opp_set_availability() - helper to set the availability of an opp
948 * @dev: device for which we do this operation
949 * @freq: OPP frequency to modify availability
950 * @availability_req: availability status requested for this opp
951 *
952 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
953 * share a common logic which is isolated here.
954 *
955 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
956 * copy operation, returns 0 if no modification was done OR modification was
957 * successful.
958 *
959 * Locking: The internal device_opp and opp structures are RCU protected.
960 * Hence this function internally uses RCU updater strategy with mutex locks to
961 * keep the integrity of the internal data structures. Callers should ensure
962 * that this function is *NOT* called under RCU protection or in contexts where
963 * mutex locking or synchronize_rcu() blocking calls cannot be used.
964 */
965static int _opp_set_availability(struct device *dev, unsigned long freq,
966 bool availability_req)
967{
968 struct device_opp *dev_opp;
969 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
970 int r = 0;
971
972 /* keep the node allocated */
973 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
974 if (!new_opp)
975 return -ENOMEM;
976
977 mutex_lock(&dev_opp_list_lock);
978
979 /* Find the device_opp */
980 dev_opp = _find_device_opp(dev);
981 if (IS_ERR(dev_opp)) {
982 r = PTR_ERR(dev_opp);
983 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
984 goto unlock;
985 }
986
987 /* Do we have the frequency? */
988 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
989 if (tmp_opp->rate == freq) {
990 opp = tmp_opp;
991 break;
992 }
993 }
994 if (IS_ERR(opp)) {
995 r = PTR_ERR(opp);
996 goto unlock;
997 }
998
999 /* Is update really needed? */
1000 if (opp->available == availability_req)
1001 goto unlock;
1002 /* copy the old data over */
1003 *new_opp = *opp;
1004
1005 /* plug in new node */
1006 new_opp->available = availability_req;
1007
1008 list_replace_rcu(&opp->node, &new_opp->node);
1009 mutex_unlock(&dev_opp_list_lock);
1010 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1011
1012 /* Notify the change of the OPP availability */
1013 if (availability_req)
1014 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
1015 new_opp);
1016 else
1017 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
1018 new_opp);
1019
1020 return 0;
1021
1022unlock:
1023 mutex_unlock(&dev_opp_list_lock);
1024 kfree(new_opp);
1025 return r;
1026}
1027
1028/**
1029 * dev_pm_opp_enable() - Enable a specific OPP
1030 * @dev: device for which we do this operation
1031 * @freq: OPP frequency to enable
1032 *
1033 * Enables a provided opp. If the operation is valid, this returns 0, else the
1034 * corresponding error value. It is meant to be used for users an OPP available
1035 * after being temporarily made unavailable with dev_pm_opp_disable.
1036 *
1037 * Locking: The internal device_opp and opp structures are RCU protected.
1038 * Hence this function indirectly uses RCU and mutex locks to keep the
1039 * integrity of the internal data structures. Callers should ensure that
1040 * this function is *NOT* called under RCU protection or in contexts where
1041 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1042 *
1043 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1044 * copy operation, returns 0 if no modification was done OR modification was
1045 * successful.
1046 */
1047int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1048{
1049 return _opp_set_availability(dev, freq, true);
1050}
1051EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1052
1053/**
1054 * dev_pm_opp_disable() - Disable a specific OPP
1055 * @dev: device for which we do this operation
1056 * @freq: OPP frequency to disable
1057 *
1058 * Disables a provided opp. If the operation is valid, this returns
1059 * 0, else the corresponding error value. It is meant to be a temporary
1060 * control by users to make this OPP not available until the circumstances are
1061 * right to make it available again (with a call to dev_pm_opp_enable).
1062 *
1063 * Locking: The internal device_opp and opp structures are RCU protected.
1064 * Hence this function indirectly uses RCU and mutex locks to keep the
1065 * integrity of the internal data structures. Callers should ensure that
1066 * this function is *NOT* called under RCU protection or in contexts where
1067 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1068 *
1069 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1070 * copy operation, returns 0 if no modification was done OR modification was
1071 * successful.
1072 */
1073int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1074{
1075 return _opp_set_availability(dev, freq, false);
1076}
1077EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1078
1079/**
1080 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1081 * @dev: device pointer used to lookup device OPPs.
1082 *
1083 * Return: pointer to notifier head if found, otherwise -ENODEV or
1084 * -EINVAL based on type of error casted as pointer. value must be checked
1085 * with IS_ERR to determine valid pointer or error result.
1086 *
1087 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
1088 * protected pointer. The reason for the same is that the opp pointer which is
1089 * returned will remain valid for use with opp_get_{voltage, freq} only while
1090 * under the locked area. The pointer returned must be used prior to unlocking
1091 * with rcu_read_unlock() to maintain the integrity of the pointer.
1092 */
1093struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1094{
1095 struct device_opp *dev_opp = _find_device_opp(dev);
1096
1097 if (IS_ERR(dev_opp))
1098 return ERR_CAST(dev_opp); /* matching type */
1099
1100 return &dev_opp->srcu_head;
1101}
1102EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1103
1104#ifdef CONFIG_OF
1105/**
1106 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
1107 * entries
1108 * @dev: device pointer used to lookup device OPPs.
1109 *
1110 * Free OPPs created using static entries present in DT.
1111 *
1112 * Locking: The internal device_opp and opp structures are RCU protected.
1113 * Hence this function indirectly uses RCU updater strategy with mutex locks
1114 * to keep the integrity of the internal data structures. Callers should ensure
1115 * that this function is *NOT* called under RCU protection or in contexts where
1116 * mutex cannot be locked.
1117 */
1118void dev_pm_opp_of_remove_table(struct device *dev)
1119{
1120 struct device_opp *dev_opp;
1121 struct dev_pm_opp *opp, *tmp;
1122
1123 /* Hold our list modification lock here */
1124 mutex_lock(&dev_opp_list_lock);
1125
1126 /* Check for existing list for 'dev' */
1127 dev_opp = _find_device_opp(dev);
1128 if (IS_ERR(dev_opp)) {
1129 int error = PTR_ERR(dev_opp);
1130
1131 if (error != -ENODEV)
1132 WARN(1, "%s: dev_opp: %d\n",
1133 IS_ERR_OR_NULL(dev) ?
1134 "Invalid device" : dev_name(dev),
1135 error);
1136 goto unlock;
1137 }
1138
1139 /* Find if dev_opp manages a single device */
1140 if (list_is_singular(&dev_opp->dev_list)) {
1141 /* Free static OPPs */
1142 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1143 if (!opp->dynamic)
1144 _opp_remove(dev_opp, opp, true);
1145 }
1146 } else {
1147 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1148 }
1149
1150unlock:
1151 mutex_unlock(&dev_opp_list_lock);
1152}
1153EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1154
1155/* Returns opp descriptor node for a device, caller must do of_node_put() */
1156struct device_node *_of_get_opp_desc_node(struct device *dev)
1157{
1158 /*
1159 * TODO: Support for multiple OPP tables.
1160 *
1161 * There should be only ONE phandle present in "operating-points-v2"
1162 * property.
1163 */
1164
1165 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1166}
1167
1168/* Initializes OPP tables based on new bindings */
1169static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1170{
1171 struct device_node *np;
1172 struct device_opp *dev_opp;
1173 int ret = 0, count = 0;
1174
1175 dev_opp = _managed_opp(opp_np);
1176 if (dev_opp) {
1177 /* OPPs are already managed */
1178 if (!_add_list_dev(dev, dev_opp))
1179 ret = -ENOMEM;
1180 return ret;
1181 }
1182
1183 /* We have opp-list node now, iterate over it and add OPPs */
1184 for_each_available_child_of_node(opp_np, np) {
1185 count++;
1186
1187 ret = _opp_add_static_v2(dev, np);
1188 if (ret) {
1189 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1190 ret);
1191 goto free_table;
1192 }
1193 }
1194
1195 /* There should be one of more OPP defined */
1196 if (WARN_ON(!count))
1197 return -ENOENT;
1198
1199 dev_opp = _find_device_opp(dev);
1200 if (WARN_ON(IS_ERR(dev_opp))) {
1201 ret = PTR_ERR(dev_opp);
1202 goto free_table;
1203 }
1204
1205 dev_opp->np = opp_np;
1206 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1207
1208 return 0;
1209
1210free_table:
1211 dev_pm_opp_of_remove_table(dev);
1212
1213 return ret;
1214}
1215
1216/* Initializes OPP tables based on old-deprecated bindings */
1217static int _of_add_opp_table_v1(struct device *dev)
1218{
1219 const struct property *prop;
1220 const __be32 *val;
1221 int nr;
1222
1223 prop = of_find_property(dev->of_node, "operating-points", NULL);
1224 if (!prop)
1225 return -ENODEV;
1226 if (!prop->value)
1227 return -ENODATA;
1228
1229 /*
1230 * Each OPP is a set of tuples consisting of frequency and
1231 * voltage like <freq-kHz vol-uV>.
1232 */
1233 nr = prop->length / sizeof(u32);
1234 if (nr % 2) {
1235 dev_err(dev, "%s: Invalid OPP list\n", __func__);
1236 return -EINVAL;
1237 }
1238
1239 val = prop->value;
1240 while (nr) {
1241 unsigned long freq = be32_to_cpup(val++) * 1000;
1242 unsigned long volt = be32_to_cpup(val++);
1243
1244 if (_opp_add_dynamic(dev, freq, volt, false))
1245 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1246 __func__, freq);
1247 nr -= 2;
1248 }
1249
1250 return 0;
1251}
1252
1253/**
1254 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
1255 * @dev: device pointer used to lookup device OPPs.
1256 *
1257 * Register the initial OPP table with the OPP library for given device.
1258 *
1259 * Locking: The internal device_opp and opp structures are RCU protected.
1260 * Hence this function indirectly uses RCU updater strategy with mutex locks
1261 * to keep the integrity of the internal data structures. Callers should ensure
1262 * that this function is *NOT* called under RCU protection or in contexts where
1263 * mutex cannot be locked.
1264 *
1265 * Return:
1266 * 0 On success OR
1267 * Duplicate OPPs (both freq and volt are same) and opp->available
1268 * -EEXIST Freq are same and volt are different OR
1269 * Duplicate OPPs (both freq and volt are same) and !opp->available
1270 * -ENOMEM Memory allocation failure
1271 * -ENODEV when 'operating-points' property is not found or is invalid data
1272 * in device node.
1273 * -ENODATA when empty 'operating-points' property is found
1274 * -EINVAL when invalid entries are found in opp-v2 table
1275 */
1276int dev_pm_opp_of_add_table(struct device *dev)
1277{
1278 struct device_node *opp_np;
1279 int ret;
1280
1281 /*
1282 * OPPs have two version of bindings now. The older one is deprecated,
1283 * try for the new binding first.
1284 */
1285 opp_np = _of_get_opp_desc_node(dev);
1286 if (!opp_np) {
1287 /*
1288 * Try old-deprecated bindings for backward compatibility with
1289 * older dtbs.
1290 */
1291 return _of_add_opp_table_v1(dev);
1292 }
1293
1294 ret = _of_add_opp_table_v2(dev, opp_np);
1295 of_node_put(opp_np);
1296
1297 return ret;
1298}
1299EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
1300#endif