aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-09-01 09:52:41 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-09-01 09:52:41 -0400
commitac2a29c8a4a641574febccd10169b26138a5d656 (patch)
tree05629149386bcfbcf2f4bf28c84ae99fcaf055b0
parent4ffe18c2556f2848c4e57457915b08a63dc00fd5 (diff)
parent50a3cb04a5f9cd5323a76db9ee409a7f3004259a (diff)
Merge branch 'pm-opp'
* pm-opp: PM / OPP: Drop unlikely before IS_ERR(_OR_NULL) PM / OPP: Fix static checker warning (broken 64bit big endian systems) PM / OPP: Free resources and properly return error on failure cpufreq-dt: make scaling_boost_freqs sysfs attr available when boost is enabled cpufreq: dt: Add support for turbo/boost mode cpufreq: dt: Add support for operating-points-v2 bindings cpufreq: Allow drivers to enable boost support after registering driver cpufreq: Update boost flag while initializing freq table from OPPs PM / OPP: add dev_pm_opp_is_turbo() helper PM / OPP: Add helpers for initializing CPU OPPs PM / OPP: Add support for opp-suspend PM / OPP: Add OPP sharing information to OPP library PM / OPP: Add clock-latency-ns support PM / OPP: Add support to parse "operating-points-v2" bindings PM / OPP: Break _opp_add_dynamic() into smaller functions PM / OPP: Allocate dev_opp from _add_device_opp() PM / OPP: Create _remove_device_opp() for freeing dev_opp PM / OPP: Relocate few routines PM / OPP: Create a directory for opp bindings PM / OPP: Update bindings to make opp-hz a 64 bit value
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt (renamed from Documentation/devicetree/bindings/power/opp.txt)40
-rw-r--r--drivers/base/power/opp.c1003
-rw-r--r--drivers/cpufreq/cpufreq-dt.c73
-rw-r--r--drivers/cpufreq/cpufreq.c68
-rw-r--r--drivers/cpufreq/cpufreq_opp.c4
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--include/linux/cpufreq.h13
-rw-r--r--include/linux/pm_opp.h36
8 files changed, 1020 insertions, 232 deletions
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 0d5e7c978121..0cb44dc21f97 100644
--- a/Documentation/devicetree/bindings/power/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -88,7 +88,7 @@ This defines voltage-current-frequency combinations along with other related
88properties. 88properties.
89 89
90Required properties: 90Required properties:
91- opp-hz: Frequency in Hz 91- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
92 92
93Optional properties: 93Optional properties:
94- opp-microvolt: voltage in micro Volts. 94- opp-microvolt: voltage in micro Volts.
@@ -158,20 +158,20 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
158 opp-shared; 158 opp-shared;
159 159
160 opp00 { 160 opp00 {
161 opp-hz = <1000000000>; 161 opp-hz = /bits/ 64 <1000000000>;
162 opp-microvolt = <970000 975000 985000>; 162 opp-microvolt = <970000 975000 985000>;
163 opp-microamp = <70000>; 163 opp-microamp = <70000>;
164 clock-latency-ns = <300000>; 164 clock-latency-ns = <300000>;
165 opp-suspend; 165 opp-suspend;
166 }; 166 };
167 opp01 { 167 opp01 {
168 opp-hz = <1100000000>; 168 opp-hz = /bits/ 64 <1100000000>;
169 opp-microvolt = <980000 1000000 1010000>; 169 opp-microvolt = <980000 1000000 1010000>;
170 opp-microamp = <80000>; 170 opp-microamp = <80000>;
171 clock-latency-ns = <310000>; 171 clock-latency-ns = <310000>;
172 }; 172 };
173 opp02 { 173 opp02 {
174 opp-hz = <1200000000>; 174 opp-hz = /bits/ 64 <1200000000>;
175 opp-microvolt = <1025000>; 175 opp-microvolt = <1025000>;
176 clock-latency-ns = <290000>; 176 clock-latency-ns = <290000>;
177 turbo-mode; 177 turbo-mode;
@@ -237,20 +237,20 @@ independently.
237 */ 237 */
238 238
239 opp00 { 239 opp00 {
240 opp-hz = <1000000000>; 240 opp-hz = /bits/ 64 <1000000000>;
241 opp-microvolt = <970000 975000 985000>; 241 opp-microvolt = <970000 975000 985000>;
242 opp-microamp = <70000>; 242 opp-microamp = <70000>;
243 clock-latency-ns = <300000>; 243 clock-latency-ns = <300000>;
244 opp-suspend; 244 opp-suspend;
245 }; 245 };
246 opp01 { 246 opp01 {
247 opp-hz = <1100000000>; 247 opp-hz = /bits/ 64 <1100000000>;
248 opp-microvolt = <980000 1000000 1010000>; 248 opp-microvolt = <980000 1000000 1010000>;
249 opp-microamp = <80000>; 249 opp-microamp = <80000>;
250 clock-latency-ns = <310000>; 250 clock-latency-ns = <310000>;
251 }; 251 };
252 opp02 { 252 opp02 {
253 opp-hz = <1200000000>; 253 opp-hz = /bits/ 64 <1200000000>;
254 opp-microvolt = <1025000>; 254 opp-microvolt = <1025000>;
255 opp-microamp = <90000; 255 opp-microamp = <90000;
256 lock-latency-ns = <290000>; 256 lock-latency-ns = <290000>;
@@ -313,20 +313,20 @@ DVFS state together.
313 opp-shared; 313 opp-shared;
314 314
315 opp00 { 315 opp00 {
316 opp-hz = <1000000000>; 316 opp-hz = /bits/ 64 <1000000000>;
317 opp-microvolt = <970000 975000 985000>; 317 opp-microvolt = <970000 975000 985000>;
318 opp-microamp = <70000>; 318 opp-microamp = <70000>;
319 clock-latency-ns = <300000>; 319 clock-latency-ns = <300000>;
320 opp-suspend; 320 opp-suspend;
321 }; 321 };
322 opp01 { 322 opp01 {
323 opp-hz = <1100000000>; 323 opp-hz = /bits/ 64 <1100000000>;
324 opp-microvolt = <980000 1000000 1010000>; 324 opp-microvolt = <980000 1000000 1010000>;
325 opp-microamp = <80000>; 325 opp-microamp = <80000>;
326 clock-latency-ns = <310000>; 326 clock-latency-ns = <310000>;
327 }; 327 };
328 opp02 { 328 opp02 {
329 opp-hz = <1200000000>; 329 opp-hz = /bits/ 64 <1200000000>;
330 opp-microvolt = <1025000>; 330 opp-microvolt = <1025000>;
331 opp-microamp = <90000>; 331 opp-microamp = <90000>;
332 clock-latency-ns = <290000>; 332 clock-latency-ns = <290000>;
@@ -339,20 +339,20 @@ DVFS state together.
339 opp-shared; 339 opp-shared;
340 340
341 opp10 { 341 opp10 {
342 opp-hz = <1300000000>; 342 opp-hz = /bits/ 64 <1300000000>;
343 opp-microvolt = <1045000 1050000 1055000>; 343 opp-microvolt = <1045000 1050000 1055000>;
344 opp-microamp = <95000>; 344 opp-microamp = <95000>;
345 clock-latency-ns = <400000>; 345 clock-latency-ns = <400000>;
346 opp-suspend; 346 opp-suspend;
347 }; 347 };
348 opp11 { 348 opp11 {
349 opp-hz = <1400000000>; 349 opp-hz = /bits/ 64 <1400000000>;
350 opp-microvolt = <1075000>; 350 opp-microvolt = <1075000>;
351 opp-microamp = <100000>; 351 opp-microamp = <100000>;
352 clock-latency-ns = <400000>; 352 clock-latency-ns = <400000>;
353 }; 353 };
354 opp12 { 354 opp12 {
355 opp-hz = <1500000000>; 355 opp-hz = /bits/ 64 <1500000000>;
356 opp-microvolt = <1010000 1100000 1110000>; 356 opp-microvolt = <1010000 1100000 1110000>;
357 opp-microamp = <95000>; 357 opp-microamp = <95000>;
358 clock-latency-ns = <400000>; 358 clock-latency-ns = <400000>;
@@ -379,7 +379,7 @@ Example 4: Handling multiple regulators
379 opp-shared; 379 opp-shared;
380 380
381 opp00 { 381 opp00 {
382 opp-hz = <1000000000>; 382 opp-hz = /bits/ 64 <1000000000>;
383 opp-microvolt = <970000>, /* Supply 0 */ 383 opp-microvolt = <970000>, /* Supply 0 */
384 <960000>, /* Supply 1 */ 384 <960000>, /* Supply 1 */
385 <960000>; /* Supply 2 */ 385 <960000>; /* Supply 2 */
@@ -392,7 +392,7 @@ Example 4: Handling multiple regulators
392 /* OR */ 392 /* OR */
393 393
394 opp00 { 394 opp00 {
395 opp-hz = <1000000000>; 395 opp-hz = /bits/ 64 <1000000000>;
396 opp-microvolt = <970000 975000 985000>, /* Supply 0 */ 396 opp-microvolt = <970000 975000 985000>, /* Supply 0 */
397 <960000 965000 975000>, /* Supply 1 */ 397 <960000 965000 975000>, /* Supply 1 */
398 <960000 965000 975000>; /* Supply 2 */ 398 <960000 965000 975000>; /* Supply 2 */
@@ -405,7 +405,7 @@ Example 4: Handling multiple regulators
405 /* OR */ 405 /* OR */
406 406
407 opp00 { 407 opp00 {
408 opp-hz = <1000000000>; 408 opp-hz = /bits/ 64 <1000000000>;
409 opp-microvolt = <970000 975000 985000>, /* Supply 0 */ 409 opp-microvolt = <970000 975000 985000>, /* Supply 0 */
410 <960000 965000 975000>, /* Supply 1 */ 410 <960000 965000 975000>, /* Supply 1 */
411 <960000 965000 975000>; /* Supply 2 */ 411 <960000 965000 975000>; /* Supply 2 */
@@ -437,12 +437,12 @@ Example 5: Multiple OPP tables
437 opp-shared; 437 opp-shared;
438 438
439 opp00 { 439 opp00 {
440 opp-hz = <600000000>; 440 opp-hz = /bits/ 64 <600000000>;
441 ... 441 ...
442 }; 442 };
443 443
444 opp01 { 444 opp01 {
445 opp-hz = <800000000>; 445 opp-hz = /bits/ 64 <800000000>;
446 ... 446 ...
447 }; 447 };
448 }; 448 };
@@ -453,12 +453,12 @@ Example 5: Multiple OPP tables
453 opp-shared; 453 opp-shared;
454 454
455 opp10 { 455 opp10 {
456 opp-hz = <1000000000>; 456 opp-hz = /bits/ 64 <1000000000>;
457 ... 457 ...
458 }; 458 };
459 459
460 opp11 { 460 opp11 {
461 opp-hz = <1100000000>; 461 opp-hz = /bits/ 64 <1100000000>;
462 ... 462 ...
463 }; 463 };
464 }; 464 };
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 677fb2843553..bb703b5ebaff 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -11,6 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/cpu.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/err.h> 17#include <linux/err.h>
@@ -51,10 +52,17 @@
51 * order. 52 * order.
52 * @dynamic: not-created from static DT entries. 53 * @dynamic: not-created from static DT entries.
53 * @available: true/false - marks if this OPP as available or not 54 * @available: true/false - marks if this OPP as available or not
55 * @turbo: true if turbo (boost) OPP
54 * @rate: Frequency in hertz 56 * @rate: Frequency in hertz
55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 57 * @u_volt: Target voltage in microvolts corresponding to this OPP
58 * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
59 * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
60 * @u_amp: Maximum current drawn by the device in microamperes
61 * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
62 * frequency from any other OPP's frequency.
56 * @dev_opp: points back to the device_opp struct this opp belongs to 63 * @dev_opp: points back to the device_opp struct this opp belongs to
57 * @rcu_head: RCU callback head used for deferred freeing 64 * @rcu_head: RCU callback head used for deferred freeing
65 * @np: OPP's device node.
58 * 66 *
59 * This structure stores the OPP information for a given device. 67 * This structure stores the OPP information for a given device.
60 */ 68 */
@@ -63,11 +71,34 @@ struct dev_pm_opp {
63 71
64 bool available; 72 bool available;
65 bool dynamic; 73 bool dynamic;
74 bool turbo;
66 unsigned long rate; 75 unsigned long rate;
76
67 unsigned long u_volt; 77 unsigned long u_volt;
78 unsigned long u_volt_min;
79 unsigned long u_volt_max;
80 unsigned long u_amp;
81 unsigned long clock_latency_ns;
68 82
69 struct device_opp *dev_opp; 83 struct device_opp *dev_opp;
70 struct rcu_head rcu_head; 84 struct rcu_head rcu_head;
85
86 struct device_node *np;
87};
88
89/**
90 * struct device_list_opp - devices managed by 'struct device_opp'
91 * @node: list node
92 * @dev: device to which the struct object belongs
93 * @rcu_head: RCU callback head used for deferred freeing
94 *
95 * This is an internal data structure maintaining the list of devices that are
96 * managed by 'struct device_opp'.
97 */
98struct device_list_opp {
99 struct list_head node;
100 const struct device *dev;
101 struct rcu_head rcu_head;
71}; 102};
72 103
73/** 104/**
@@ -77,10 +108,12 @@ struct dev_pm_opp {
77 * list. 108 * list.
78 * RCU usage: nodes are not modified in the list of device_opp, 109 * RCU usage: nodes are not modified in the list of device_opp,
79 * however addition is possible and is secured by dev_opp_list_lock 110 * however addition is possible and is secured by dev_opp_list_lock
80 * @dev: device pointer
81 * @srcu_head: notifier head to notify the OPP availability changes. 111 * @srcu_head: notifier head to notify the OPP availability changes.
82 * @rcu_head: RCU callback head used for deferred freeing 112 * @rcu_head: RCU callback head used for deferred freeing
113 * @dev_list: list of devices that share these OPPs
83 * @opp_list: list of opps 114 * @opp_list: list of opps
115 * @np: struct device_node pointer for opp's DT node.
116 * @shared_opp: OPP is shared between multiple devices.
84 * 117 *
85 * This is an internal data structure maintaining the link to opps attached to 118 * This is an internal data structure maintaining the link to opps attached to
86 * a device. This structure is not meant to be shared to users as it is 119 * a device. This structure is not meant to be shared to users as it is
@@ -93,10 +126,15 @@ struct dev_pm_opp {
93struct device_opp { 126struct device_opp {
94 struct list_head node; 127 struct list_head node;
95 128
96 struct device *dev;
97 struct srcu_notifier_head srcu_head; 129 struct srcu_notifier_head srcu_head;
98 struct rcu_head rcu_head; 130 struct rcu_head rcu_head;
131 struct list_head dev_list;
99 struct list_head opp_list; 132 struct list_head opp_list;
133
134 struct device_node *np;
135 unsigned long clock_latency_ns_max;
136 bool shared_opp;
137 struct dev_pm_opp *suspend_opp;
100}; 138};
101 139
102/* 140/*
@@ -116,6 +154,38 @@ do { \
116 "dev_opp_list_lock protection"); \ 154 "dev_opp_list_lock protection"); \
117} while (0) 155} while (0)
118 156
157static struct device_list_opp *_find_list_dev(const struct device *dev,
158 struct device_opp *dev_opp)
159{
160 struct device_list_opp *list_dev;
161
162 list_for_each_entry(list_dev, &dev_opp->dev_list, node)
163 if (list_dev->dev == dev)
164 return list_dev;
165
166 return NULL;
167}
168
169static struct device_opp *_managed_opp(const struct device_node *np)
170{
171 struct device_opp *dev_opp;
172
173 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
174 if (dev_opp->np == np) {
175 /*
176 * Multiple devices can point to the same OPP table and
177 * so will have same node-pointer, np.
178 *
179 * But the OPPs will be considered as shared only if the
180 * OPP table contains a "opp-shared" property.
181 */
182 return dev_opp->shared_opp ? dev_opp : NULL;
183 }
184 }
185
186 return NULL;
187}
188
119/** 189/**
120 * _find_device_opp() - find device_opp struct using device pointer 190 * _find_device_opp() - find device_opp struct using device pointer
121 * @dev: device pointer used to lookup device OPPs 191 * @dev: device pointer used to lookup device OPPs
@@ -132,21 +202,18 @@ do { \
132 */ 202 */
133static struct device_opp *_find_device_opp(struct device *dev) 203static struct device_opp *_find_device_opp(struct device *dev)
134{ 204{
135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 205 struct device_opp *dev_opp;
136 206
137 if (unlikely(IS_ERR_OR_NULL(dev))) { 207 if (IS_ERR_OR_NULL(dev)) {
138 pr_err("%s: Invalid parameters\n", __func__); 208 pr_err("%s: Invalid parameters\n", __func__);
139 return ERR_PTR(-EINVAL); 209 return ERR_PTR(-EINVAL);
140 } 210 }
141 211
142 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { 212 list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
143 if (tmp_dev_opp->dev == dev) { 213 if (_find_list_dev(dev, dev_opp))
144 dev_opp = tmp_dev_opp; 214 return dev_opp;
145 break;
146 }
147 }
148 215
149 return dev_opp; 216 return ERR_PTR(-ENODEV);
150} 217}
151 218
152/** 219/**
@@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
172 opp_rcu_lockdep_assert(); 239 opp_rcu_lockdep_assert();
173 240
174 tmp_opp = rcu_dereference(opp); 241 tmp_opp = rcu_dereference(opp);
175 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 242 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
176 pr_err("%s: Invalid parameters\n", __func__); 243 pr_err("%s: Invalid parameters\n", __func__);
177 else 244 else
178 v = tmp_opp->u_volt; 245 v = tmp_opp->u_volt;
@@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
204 opp_rcu_lockdep_assert(); 271 opp_rcu_lockdep_assert();
205 272
206 tmp_opp = rcu_dereference(opp); 273 tmp_opp = rcu_dereference(opp);
207 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 274 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
208 pr_err("%s: Invalid parameters\n", __func__); 275 pr_err("%s: Invalid parameters\n", __func__);
209 else 276 else
210 f = tmp_opp->rate; 277 f = tmp_opp->rate;
@@ -214,6 +281,66 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
214EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); 281EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
215 282
216/** 283/**
284 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
285 * @opp: opp for which turbo mode is being verified
286 *
287 * Turbo OPPs are not for normal use, and can be enabled (under certain
288 * conditions) for short duration of times to finish high throughput work
289 * quickly. Running on them for longer times may overheat the chip.
290 *
291 * Return: true if opp is turbo opp, else false.
292 *
293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294 * protected pointer. This means that opp which could have been fetched by
295 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
296 * under RCU lock. The pointer returned by the opp_find_freq family must be
297 * used in the same section as the usage of this function with the pointer
298 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
299 * pointer.
300 */
301bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
302{
303 struct dev_pm_opp *tmp_opp;
304
305 opp_rcu_lockdep_assert();
306
307 tmp_opp = rcu_dereference(opp);
308 if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
309 pr_err("%s: Invalid parameters\n", __func__);
310 return false;
311 }
312
313 return tmp_opp->turbo;
314}
315EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
316
317/**
318 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
319 * @dev: device for which we do this operation
320 *
321 * Return: This function returns the max clock latency in nanoseconds.
322 *
323 * Locking: This function takes rcu_read_lock().
324 */
325unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
326{
327 struct device_opp *dev_opp;
328 unsigned long clock_latency_ns;
329
330 rcu_read_lock();
331
332 dev_opp = _find_device_opp(dev);
333 if (IS_ERR(dev_opp))
334 clock_latency_ns = 0;
335 else
336 clock_latency_ns = dev_opp->clock_latency_ns_max;
337
338 rcu_read_unlock();
339 return clock_latency_ns;
340}
341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
342
343/**
217 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 344 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
218 * @dev: device for which we do this operation 345 * @dev: device for which we do this operation
219 * 346 *
@@ -407,18 +534,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
407} 534}
408EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 535EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
409 536
537/* List-dev Helpers */
538static void _kfree_list_dev_rcu(struct rcu_head *head)
539{
540 struct device_list_opp *list_dev;
541
542 list_dev = container_of(head, struct device_list_opp, rcu_head);
543 kfree_rcu(list_dev, rcu_head);
544}
545
546static void _remove_list_dev(struct device_list_opp *list_dev,
547 struct device_opp *dev_opp)
548{
549 list_del(&list_dev->node);
550 call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
551 _kfree_list_dev_rcu);
552}
553
554static struct device_list_opp *_add_list_dev(const struct device *dev,
555 struct device_opp *dev_opp)
556{
557 struct device_list_opp *list_dev;
558
559 list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
560 if (!list_dev)
561 return NULL;
562
563 /* Initialize list-dev */
564 list_dev->dev = dev;
565 list_add_rcu(&list_dev->node, &dev_opp->dev_list);
566
567 return list_dev;
568}
569
410/** 570/**
411 * _add_device_opp() - Allocate a new device OPP table 571 * _add_device_opp() - Find device OPP table or allocate a new one
412 * @dev: device for which we do this operation 572 * @dev: device for which we do this operation
413 * 573 *
414 * New device node which uses OPPs - used when multiple devices with OPP tables 574 * It tries to find an existing table first, if it couldn't find one, it
415 * are maintained. 575 * allocates a new OPP table and returns that.
416 * 576 *
417 * Return: valid device_opp pointer if success, else NULL. 577 * Return: valid device_opp pointer if success, else NULL.
418 */ 578 */
419static struct device_opp *_add_device_opp(struct device *dev) 579static struct device_opp *_add_device_opp(struct device *dev)
420{ 580{
421 struct device_opp *dev_opp; 581 struct device_opp *dev_opp;
582 struct device_list_opp *list_dev;
583
584 /* Check for existing list for 'dev' first */
585 dev_opp = _find_device_opp(dev);
586 if (!IS_ERR(dev_opp))
587 return dev_opp;
422 588
423 /* 589 /*
424 * Allocate a new device OPP table. In the infrequent case where a new 590 * Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +594,14 @@ static struct device_opp *_add_device_opp(struct device *dev)
428 if (!dev_opp) 594 if (!dev_opp)
429 return NULL; 595 return NULL;
430 596
431 dev_opp->dev = dev; 597 INIT_LIST_HEAD(&dev_opp->dev_list);
598
599 list_dev = _add_list_dev(dev, dev_opp);
600 if (!list_dev) {
601 kfree(dev_opp);
602 return NULL;
603 }
604
432 srcu_init_notifier_head(&dev_opp->srcu_head); 605 srcu_init_notifier_head(&dev_opp->srcu_head);
433 INIT_LIST_HEAD(&dev_opp->opp_list); 606 INIT_LIST_HEAD(&dev_opp->opp_list);
434 607
@@ -438,6 +611,185 @@ static struct device_opp *_add_device_opp(struct device *dev)
438} 611}
439 612
440/** 613/**
614 * _kfree_device_rcu() - Free device_opp RCU handler
615 * @head: RCU head
616 */
617static void _kfree_device_rcu(struct rcu_head *head)
618{
619 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
620
621 kfree_rcu(device_opp, rcu_head);
622}
623
624/**
625 * _remove_device_opp() - Removes a device OPP table
626 * @dev_opp: device OPP table to be removed.
627 *
628 * Removes/frees device OPP table it it doesn't contain any OPPs.
629 */
630static void _remove_device_opp(struct device_opp *dev_opp)
631{
632 struct device_list_opp *list_dev;
633
634 if (!list_empty(&dev_opp->opp_list))
635 return;
636
637 list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
638 node);
639
640 _remove_list_dev(list_dev, dev_opp);
641
642 /* dev_list must be empty now */
643 WARN_ON(!list_empty(&dev_opp->dev_list));
644
645 list_del_rcu(&dev_opp->node);
646 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
647 _kfree_device_rcu);
648}
649
650/**
651 * _kfree_opp_rcu() - Free OPP RCU handler
652 * @head: RCU head
653 */
654static void _kfree_opp_rcu(struct rcu_head *head)
655{
656 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
657
658 kfree_rcu(opp, rcu_head);
659}
660
661/**
662 * _opp_remove() - Remove an OPP from a table definition
663 * @dev_opp: points back to the device_opp struct this opp belongs to
664 * @opp: pointer to the OPP to remove
665 * @notify: OPP_EVENT_REMOVE notification should be sent or not
666 *
667 * This function removes an opp definition from the opp list.
668 *
669 * Locking: The internal device_opp and opp structures are RCU protected.
670 * It is assumed that the caller holds required mutex for an RCU updater
671 * strategy.
672 */
673static void _opp_remove(struct device_opp *dev_opp,
674 struct dev_pm_opp *opp, bool notify)
675{
676 /*
677 * Notify the changes in the availability of the operable
678 * frequency/voltage list.
679 */
680 if (notify)
681 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
682 list_del_rcu(&opp->node);
683 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
684
685 _remove_device_opp(dev_opp);
686}
687
688/**
689 * dev_pm_opp_remove() - Remove an OPP from OPP list
690 * @dev: device for which we do this operation
691 * @freq: OPP to remove with matching 'freq'
692 *
693 * This function removes an opp from the opp list.
694 *
695 * Locking: The internal device_opp and opp structures are RCU protected.
696 * Hence this function internally uses RCU updater strategy with mutex locks
697 * to keep the integrity of the internal data structures. Callers should ensure
698 * that this function is *NOT* called under RCU protection or in contexts where
699 * mutex cannot be locked.
700 */
701void dev_pm_opp_remove(struct device *dev, unsigned long freq)
702{
703 struct dev_pm_opp *opp;
704 struct device_opp *dev_opp;
705 bool found = false;
706
707 /* Hold our list modification lock here */
708 mutex_lock(&dev_opp_list_lock);
709
710 dev_opp = _find_device_opp(dev);
711 if (IS_ERR(dev_opp))
712 goto unlock;
713
714 list_for_each_entry(opp, &dev_opp->opp_list, node) {
715 if (opp->rate == freq) {
716 found = true;
717 break;
718 }
719 }
720
721 if (!found) {
722 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
723 __func__, freq);
724 goto unlock;
725 }
726
727 _opp_remove(dev_opp, opp, true);
728unlock:
729 mutex_unlock(&dev_opp_list_lock);
730}
731EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
732
733static struct dev_pm_opp *_allocate_opp(struct device *dev,
734 struct device_opp **dev_opp)
735{
736 struct dev_pm_opp *opp;
737
738 /* allocate new OPP node */
739 opp = kzalloc(sizeof(*opp), GFP_KERNEL);
740 if (!opp)
741 return NULL;
742
743 INIT_LIST_HEAD(&opp->node);
744
745 *dev_opp = _add_device_opp(dev);
746 if (!*dev_opp) {
747 kfree(opp);
748 return NULL;
749 }
750
751 return opp;
752}
753
754static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
755 struct device_opp *dev_opp)
756{
757 struct dev_pm_opp *opp;
758 struct list_head *head = &dev_opp->opp_list;
759
760 /*
761 * Insert new OPP in order of increasing frequency and discard if
762 * already present.
763 *
764 * Need to use &dev_opp->opp_list in the condition part of the 'for'
765 * loop, don't replace it with head otherwise it will become an infinite
766 * loop.
767 */
768 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
769 if (new_opp->rate > opp->rate) {
770 head = &opp->node;
771 continue;
772 }
773
774 if (new_opp->rate < opp->rate)
775 break;
776
777 /* Duplicate OPPs */
778 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
779 __func__, opp->rate, opp->u_volt, opp->available,
780 new_opp->rate, new_opp->u_volt, new_opp->available);
781
782 return opp->available && new_opp->u_volt == opp->u_volt ?
783 0 : -EEXIST;
784 }
785
786 new_opp->dev_opp = dev_opp;
787 list_add_rcu(&new_opp->node, head);
788
789 return 0;
790}
791
792/**
441 * _opp_add_dynamic() - Allocate a dynamic OPP. 793 * _opp_add_dynamic() - Allocate a dynamic OPP.
442 * @dev: device for which we do this operation 794 * @dev: device for which we do this operation
443 * @freq: Frequency in Hz for this OPP 795 * @freq: Frequency in Hz for this OPP
@@ -467,64 +819,29 @@ static struct device_opp *_add_device_opp(struct device *dev)
467static int _opp_add_dynamic(struct device *dev, unsigned long freq, 819static int _opp_add_dynamic(struct device *dev, unsigned long freq,
468 long u_volt, bool dynamic) 820 long u_volt, bool dynamic)
469{ 821{
470 struct device_opp *dev_opp = NULL; 822 struct device_opp *dev_opp;
471 struct dev_pm_opp *opp, *new_opp; 823 struct dev_pm_opp *new_opp;
472 struct list_head *head;
473 int ret; 824 int ret;
474 825
475 /* allocate new OPP node */
476 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
477 if (!new_opp)
478 return -ENOMEM;
479
480 /* Hold our list modification lock here */ 826 /* Hold our list modification lock here */
481 mutex_lock(&dev_opp_list_lock); 827 mutex_lock(&dev_opp_list_lock);
482 828
829 new_opp = _allocate_opp(dev, &dev_opp);
830 if (!new_opp) {
831 ret = -ENOMEM;
832 goto unlock;
833 }
834
483 /* populate the opp table */ 835 /* populate the opp table */
484 new_opp->rate = freq; 836 new_opp->rate = freq;
485 new_opp->u_volt = u_volt; 837 new_opp->u_volt = u_volt;
486 new_opp->available = true; 838 new_opp->available = true;
487 new_opp->dynamic = dynamic; 839 new_opp->dynamic = dynamic;
488 840
489 /* Check for existing list for 'dev' */ 841 ret = _opp_add(dev, new_opp, dev_opp);
490 dev_opp = _find_device_opp(dev); 842 if (ret)
491 if (IS_ERR(dev_opp)) {
492 dev_opp = _add_device_opp(dev);
493 if (!dev_opp) {
494 ret = -ENOMEM;
495 goto free_opp;
496 }
497
498 head = &dev_opp->opp_list;
499 goto list_add;
500 }
501
502 /*
503 * Insert new OPP in order of increasing frequency
504 * and discard if already present
505 */
506 head = &dev_opp->opp_list;
507 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
508 if (new_opp->rate <= opp->rate)
509 break;
510 else
511 head = &opp->node;
512 }
513
514 /* Duplicate OPPs ? */
515 if (new_opp->rate == opp->rate) {
516 ret = opp->available && new_opp->u_volt == opp->u_volt ?
517 0 : -EEXIST;
518
519 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
520 __func__, opp->rate, opp->u_volt, opp->available,
521 new_opp->rate, new_opp->u_volt, new_opp->available);
522 goto free_opp; 843 goto free_opp;
523 }
524 844
525list_add:
526 new_opp->dev_opp = dev_opp;
527 list_add_rcu(&new_opp->node, head);
528 mutex_unlock(&dev_opp_list_lock); 845 mutex_unlock(&dev_opp_list_lock);
529 846
530 /* 847 /*
@@ -535,20 +852,52 @@ list_add:
535 return 0; 852 return 0;
536 853
537free_opp: 854free_opp:
855 _opp_remove(dev_opp, new_opp, false);
856unlock:
538 mutex_unlock(&dev_opp_list_lock); 857 mutex_unlock(&dev_opp_list_lock);
539 kfree(new_opp);
540 return ret; 858 return ret;
541} 859}
542 860
861/* TODO: Support multiple regulators */
862static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
863{
864 u32 microvolt[3] = {0};
865 int count, ret;
866
867 count = of_property_count_u32_elems(opp->np, "opp-microvolt");
868 if (!count)
869 return 0;
870
871 /* There can be one or three elements here */
872 if (count != 1 && count != 3) {
873 dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
874 __func__, count);
875 return -EINVAL;
876 }
877
878 ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
879 count);
880 if (ret) {
881 dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
882 ret);
883 return -EINVAL;
884 }
885
886 opp->u_volt = microvolt[0];
887 opp->u_volt_min = microvolt[1];
888 opp->u_volt_max = microvolt[2];
889
890 return 0;
891}
892
543/** 893/**
544 * dev_pm_opp_add() - Add an OPP table from a table definitions 894 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
545 * @dev: device for which we do this operation 895 * @dev: device for which we do this operation
546 * @freq: Frequency in Hz for this OPP 896 * @np: device node
547 * @u_volt: Voltage in uVolts for this OPP
548 * 897 *
549 * This function adds an opp definition to the opp list and returns status. 898 * This function adds an opp definition to the opp list and returns status. The
550 * The opp is made available by default and it can be controlled using 899 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
551 * dev_pm_opp_enable/disable functions. 900 * removed by dev_pm_opp_remove.
552 * 901 *
553 * Locking: The internal device_opp and opp structures are RCU protected. 902 * Locking: The internal device_opp and opp structures are RCU protected.
554 * Hence this function internally uses RCU updater strategy with mutex locks 903 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -562,108 +911,119 @@ free_opp:
562 * -EEXIST Freq are same and volt are different OR 911 * -EEXIST Freq are same and volt are different OR
563 * Duplicate OPPs (both freq and volt are same) and !opp->available 912 * Duplicate OPPs (both freq and volt are same) and !opp->available
564 * -ENOMEM Memory allocation failure 913 * -ENOMEM Memory allocation failure
914 * -EINVAL Failed parsing the OPP node
565 */ 915 */
566int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 916static int _opp_add_static_v2(struct device *dev, struct device_node *np)
567{ 917{
568 return _opp_add_dynamic(dev, freq, u_volt, true); 918 struct device_opp *dev_opp;
569} 919 struct dev_pm_opp *new_opp;
570EXPORT_SYMBOL_GPL(dev_pm_opp_add); 920 u64 rate;
921 u32 val;
922 int ret;
571 923
572/** 924 /* Hold our list modification lock here */
573 * _kfree_opp_rcu() - Free OPP RCU handler 925 mutex_lock(&dev_opp_list_lock);
574 * @head: RCU head
575 */
576static void _kfree_opp_rcu(struct rcu_head *head)
577{
578 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
579 926
580 kfree_rcu(opp, rcu_head); 927 new_opp = _allocate_opp(dev, &dev_opp);
581} 928 if (!new_opp) {
929 ret = -ENOMEM;
930 goto unlock;
931 }
582 932
583/** 933 ret = of_property_read_u64(np, "opp-hz", &rate);
584 * _kfree_device_rcu() - Free device_opp RCU handler 934 if (ret < 0) {
585 * @head: RCU head 935 dev_err(dev, "%s: opp-hz not found\n", __func__);
586 */ 936 goto free_opp;
587static void _kfree_device_rcu(struct rcu_head *head) 937 }
588{
589 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
590 938
591 kfree_rcu(device_opp, rcu_head); 939 /*
592} 940 * Rate is defined as an unsigned long in clk API, and so casting
941 * explicitly to its type. Must be fixed once rate is 64 bit
942 * guaranteed in clk API.
943 */
944 new_opp->rate = (unsigned long)rate;
945 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
946
947 new_opp->np = np;
948 new_opp->dynamic = false;
949 new_opp->available = true;
950
951 if (!of_property_read_u32(np, "clock-latency-ns", &val))
952 new_opp->clock_latency_ns = val;
953
954 ret = opp_get_microvolt(new_opp, dev);
955 if (ret)
956 goto free_opp;
957
958 if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
959 new_opp->u_amp = val;
960
961 ret = _opp_add(dev, new_opp, dev_opp);
962 if (ret)
963 goto free_opp;
964
965 /* OPP to select on device suspend */
966 if (of_property_read_bool(np, "opp-suspend")) {
967 if (dev_opp->suspend_opp)
968 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
969 __func__, dev_opp->suspend_opp->rate,
970 new_opp->rate);
971 else
972 dev_opp->suspend_opp = new_opp;
973 }
974
975 if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
976 dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
977
978 mutex_unlock(&dev_opp_list_lock);
979
980 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
981 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
982 new_opp->u_volt_min, new_opp->u_volt_max,
983 new_opp->clock_latency_ns);
593 984
594/**
595 * _opp_remove() - Remove an OPP from a table definition
596 * @dev_opp: points back to the device_opp struct this opp belongs to
597 * @opp: pointer to the OPP to remove
598 *
599 * This function removes an opp definition from the opp list.
600 *
601 * Locking: The internal device_opp and opp structures are RCU protected.
602 * It is assumed that the caller holds required mutex for an RCU updater
603 * strategy.
604 */
605static void _opp_remove(struct device_opp *dev_opp,
606 struct dev_pm_opp *opp)
607{
608 /* 985 /*
609 * Notify the changes in the availability of the operable 986 * Notify the changes in the availability of the operable
610 * frequency/voltage list. 987 * frequency/voltage list.
611 */ 988 */
612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 989 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
613 list_del_rcu(&opp->node); 990 return 0;
614 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
615 991
616 if (list_empty(&dev_opp->opp_list)) { 992free_opp:
617 list_del_rcu(&dev_opp->node); 993 _opp_remove(dev_opp, new_opp, false);
618 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 994unlock:
619 _kfree_device_rcu); 995 mutex_unlock(&dev_opp_list_lock);
620 } 996 return ret;
621} 997}
622 998
623/** 999/**
624 * dev_pm_opp_remove() - Remove an OPP from OPP list 1000 * dev_pm_opp_add() - Add an OPP table from a table definitions
625 * @dev: device for which we do this operation 1001 * @dev: device for which we do this operation
626 * @freq: OPP to remove with matching 'freq' 1002 * @freq: Frequency in Hz for this OPP
1003 * @u_volt: Voltage in uVolts for this OPP
627 * 1004 *
628 * This function removes an opp from the opp list. 1005 * This function adds an opp definition to the opp list and returns status.
1006 * The opp is made available by default and it can be controlled using
1007 * dev_pm_opp_enable/disable functions.
629 * 1008 *
630 * Locking: The internal device_opp and opp structures are RCU protected. 1009 * Locking: The internal device_opp and opp structures are RCU protected.
631 * Hence this function internally uses RCU updater strategy with mutex locks 1010 * Hence this function internally uses RCU updater strategy with mutex locks
632 * to keep the integrity of the internal data structures. Callers should ensure 1011 * to keep the integrity of the internal data structures. Callers should ensure
633 * that this function is *NOT* called under RCU protection or in contexts where 1012 * that this function is *NOT* called under RCU protection or in contexts where
634 * mutex cannot be locked. 1013 * mutex cannot be locked.
1014 *
1015 * Return:
1016 * 0 On success OR
1017 * Duplicate OPPs (both freq and volt are same) and opp->available
1018 * -EEXIST Freq are same and volt are different OR
1019 * Duplicate OPPs (both freq and volt are same) and !opp->available
1020 * -ENOMEM Memory allocation failure
635 */ 1021 */
636void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1022int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
637{ 1023{
638 struct dev_pm_opp *opp; 1024 return _opp_add_dynamic(dev, freq, u_volt, true);
639 struct device_opp *dev_opp;
640 bool found = false;
641
642 /* Hold our list modification lock here */
643 mutex_lock(&dev_opp_list_lock);
644
645 dev_opp = _find_device_opp(dev);
646 if (IS_ERR(dev_opp))
647 goto unlock;
648
649 list_for_each_entry(opp, &dev_opp->opp_list, node) {
650 if (opp->rate == freq) {
651 found = true;
652 break;
653 }
654 }
655
656 if (!found) {
657 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
658 __func__, freq);
659 goto unlock;
660 }
661
662 _opp_remove(dev_opp, opp);
663unlock:
664 mutex_unlock(&dev_opp_list_lock);
665} 1025}
666EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1026EXPORT_SYMBOL_GPL(dev_pm_opp_add);
667 1027
668/** 1028/**
669 * _opp_set_availability() - helper to set the availability of an opp 1029 * _opp_set_availability() - helper to set the availability of an opp
@@ -825,28 +1185,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
825 1185
826#ifdef CONFIG_OF 1186#ifdef CONFIG_OF
827/** 1187/**
828 * of_init_opp_table() - Initialize opp table from device tree 1188 * of_free_opp_table() - Free OPP table entries created from static DT entries
829 * @dev: device pointer used to lookup device OPPs. 1189 * @dev: device pointer used to lookup device OPPs.
830 * 1190 *
831 * Register the initial OPP table with the OPP library for given device. 1191 * Free OPPs created using static entries present in DT.
832 * 1192 *
833 * Locking: The internal device_opp and opp structures are RCU protected. 1193 * Locking: The internal device_opp and opp structures are RCU protected.
834 * Hence this function indirectly uses RCU updater strategy with mutex locks 1194 * Hence this function indirectly uses RCU updater strategy with mutex locks
835 * to keep the integrity of the internal data structures. Callers should ensure 1195 * to keep the integrity of the internal data structures. Callers should ensure
836 * that this function is *NOT* called under RCU protection or in contexts where 1196 * that this function is *NOT* called under RCU protection or in contexts where
837 * mutex cannot be locked. 1197 * mutex cannot be locked.
838 *
839 * Return:
840 * 0 On success OR
841 * Duplicate OPPs (both freq and volt are same) and opp->available
842 * -EEXIST Freq are same and volt are different OR
843 * Duplicate OPPs (both freq and volt are same) and !opp->available
844 * -ENOMEM Memory allocation failure
845 * -ENODEV when 'operating-points' property is not found or is invalid data
846 * in device node.
847 * -ENODATA when empty 'operating-points' property is found
848 */ 1198 */
849int of_init_opp_table(struct device *dev) 1199void of_free_opp_table(struct device *dev)
1200{
1201 struct device_opp *dev_opp;
1202 struct dev_pm_opp *opp, *tmp;
1203
1204 /* Hold our list modification lock here */
1205 mutex_lock(&dev_opp_list_lock);
1206
1207 /* Check for existing list for 'dev' */
1208 dev_opp = _find_device_opp(dev);
1209 if (IS_ERR(dev_opp)) {
1210 int error = PTR_ERR(dev_opp);
1211
1212 if (error != -ENODEV)
1213 WARN(1, "%s: dev_opp: %d\n",
1214 IS_ERR_OR_NULL(dev) ?
1215 "Invalid device" : dev_name(dev),
1216 error);
1217 goto unlock;
1218 }
1219
1220 /* Find if dev_opp manages a single device */
1221 if (list_is_singular(&dev_opp->dev_list)) {
1222 /* Free static OPPs */
1223 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
1224 if (!opp->dynamic)
1225 _opp_remove(dev_opp, opp, true);
1226 }
1227 } else {
1228 _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
1229 }
1230
1231unlock:
1232 mutex_unlock(&dev_opp_list_lock);
1233}
1234EXPORT_SYMBOL_GPL(of_free_opp_table);
1235
1236void of_cpumask_free_opp_table(cpumask_var_t cpumask)
1237{
1238 struct device *cpu_dev;
1239 int cpu;
1240
1241 WARN_ON(cpumask_empty(cpumask));
1242
1243 for_each_cpu(cpu, cpumask) {
1244 cpu_dev = get_cpu_device(cpu);
1245 if (!cpu_dev) {
1246 pr_err("%s: failed to get cpu%d device\n", __func__,
1247 cpu);
1248 continue;
1249 }
1250
1251 of_free_opp_table(cpu_dev);
1252 }
1253}
1254EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
1255
1256/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
1257static struct device_node *
1258_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
1259{
1260 struct device_node *opp_np;
1261
1262 opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
1263 if (!opp_np) {
1264 dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
1265 __func__, prop->name);
1266 return ERR_PTR(-EINVAL);
1267 }
1268
1269 return opp_np;
1270}
1271
1272/* Returns opp descriptor node for a device. Caller must do of_node_put() */
1273static struct device_node *_of_get_opp_desc_node(struct device *dev)
1274{
1275 const struct property *prop;
1276
1277 prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
1278 if (!prop)
1279 return ERR_PTR(-ENODEV);
1280 if (!prop->value)
1281 return ERR_PTR(-ENODATA);
1282
1283 /*
1284 * TODO: Support for multiple OPP tables.
1285 *
1286 * There should be only ONE phandle present in "operating-points-v2"
1287 * property.
1288 */
1289 if (prop->length != sizeof(__be32)) {
1290 dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
1291 return ERR_PTR(-EINVAL);
1292 }
1293
1294 return _of_get_opp_desc_node_from_prop(dev, prop);
1295}
1296
1297/* Initializes OPP tables based on new bindings */
1298static int _of_init_opp_table_v2(struct device *dev,
1299 const struct property *prop)
1300{
1301 struct device_node *opp_np, *np;
1302 struct device_opp *dev_opp;
1303 int ret = 0, count = 0;
1304
1305 if (!prop->value)
1306 return -ENODATA;
1307
1308 /* Get opp node */
1309 opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
1310 if (IS_ERR(opp_np))
1311 return PTR_ERR(opp_np);
1312
1313 dev_opp = _managed_opp(opp_np);
1314 if (dev_opp) {
1315 /* OPPs are already managed */
1316 if (!_add_list_dev(dev, dev_opp))
1317 ret = -ENOMEM;
1318 goto put_opp_np;
1319 }
1320
1321 /* We have opp-list node now, iterate over it and add OPPs */
1322 for_each_available_child_of_node(opp_np, np) {
1323 count++;
1324
1325 ret = _opp_add_static_v2(dev, np);
1326 if (ret) {
1327 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1328 ret);
1329 goto free_table;
1330 }
1331 }
1332
1333 /* There should be one of more OPP defined */
1334 if (WARN_ON(!count)) {
1335 ret = -ENOENT;
1336 goto put_opp_np;
1337 }
1338
1339 dev_opp = _find_device_opp(dev);
1340 if (WARN_ON(IS_ERR(dev_opp))) {
1341 ret = PTR_ERR(dev_opp);
1342 goto free_table;
1343 }
1344
1345 dev_opp->np = opp_np;
1346 dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1347
1348 of_node_put(opp_np);
1349 return 0;
1350
1351free_table:
1352 of_free_opp_table(dev);
1353put_opp_np:
1354 of_node_put(opp_np);
1355
1356 return ret;
1357}
1358
1359/* Initializes OPP tables based on old-deprecated bindings */
1360static int _of_init_opp_table_v1(struct device *dev)
850{ 1361{
851 const struct property *prop; 1362 const struct property *prop;
852 const __be32 *val; 1363 const __be32 *val;
@@ -881,47 +1392,177 @@ int of_init_opp_table(struct device *dev)
881 1392
882 return 0; 1393 return 0;
883} 1394}
884EXPORT_SYMBOL_GPL(of_init_opp_table);
885 1395
886/** 1396/**
887 * of_free_opp_table() - Free OPP table entries created from static DT entries 1397 * of_init_opp_table() - Initialize opp table from device tree
888 * @dev: device pointer used to lookup device OPPs. 1398 * @dev: device pointer used to lookup device OPPs.
889 * 1399 *
890 * Free OPPs created using static entries present in DT. 1400 * Register the initial OPP table with the OPP library for given device.
891 * 1401 *
892 * Locking: The internal device_opp and opp structures are RCU protected. 1402 * Locking: The internal device_opp and opp structures are RCU protected.
893 * Hence this function indirectly uses RCU updater strategy with mutex locks 1403 * Hence this function indirectly uses RCU updater strategy with mutex locks
894 * to keep the integrity of the internal data structures. Callers should ensure 1404 * to keep the integrity of the internal data structures. Callers should ensure
895 * that this function is *NOT* called under RCU protection or in contexts where 1405 * that this function is *NOT* called under RCU protection or in contexts where
896 * mutex cannot be locked. 1406 * mutex cannot be locked.
1407 *
1408 * Return:
1409 * 0 On success OR
1410 * Duplicate OPPs (both freq and volt are same) and opp->available
1411 * -EEXIST Freq are same and volt are different OR
1412 * Duplicate OPPs (both freq and volt are same) and !opp->available
1413 * -ENOMEM Memory allocation failure
1414 * -ENODEV when 'operating-points' property is not found or is invalid data
1415 * in device node.
1416 * -ENODATA when empty 'operating-points' property is found
1417 * -EINVAL when invalid entries are found in opp-v2 table
897 */ 1418 */
898void of_free_opp_table(struct device *dev) 1419int of_init_opp_table(struct device *dev)
1420{
1421 const struct property *prop;
1422
1423 /*
1424 * OPPs have two version of bindings now. The older one is deprecated,
1425 * try for the new binding first.
1426 */
1427 prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
1428 if (!prop) {
1429 /*
1430 * Try old-deprecated bindings for backward compatibility with
1431 * older dtbs.
1432 */
1433 return _of_init_opp_table_v1(dev);
1434 }
1435
1436 return _of_init_opp_table_v2(dev, prop);
1437}
1438EXPORT_SYMBOL_GPL(of_init_opp_table);
1439
1440int of_cpumask_init_opp_table(cpumask_var_t cpumask)
1441{
1442 struct device *cpu_dev;
1443 int cpu, ret = 0;
1444
1445 WARN_ON(cpumask_empty(cpumask));
1446
1447 for_each_cpu(cpu, cpumask) {
1448 cpu_dev = get_cpu_device(cpu);
1449 if (!cpu_dev) {
1450 pr_err("%s: failed to get cpu%d device\n", __func__,
1451 cpu);
1452 continue;
1453 }
1454
1455 ret = of_init_opp_table(cpu_dev);
1456 if (ret) {
1457 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
1458 __func__, cpu, ret);
1459
1460 /* Free all other OPPs */
1461 of_cpumask_free_opp_table(cpumask);
1462 break;
1463 }
1464 }
1465
1466 return ret;
1467}
1468EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
1469
1470/* Required only for V1 bindings, as v2 can manage it from DT itself */
1471int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
899{ 1472{
1473 struct device_list_opp *list_dev;
900 struct device_opp *dev_opp; 1474 struct device_opp *dev_opp;
901 struct dev_pm_opp *opp, *tmp; 1475 struct device *dev;
1476 int cpu, ret = 0;
902 1477
903 /* Check for existing list for 'dev' */ 1478 rcu_read_lock();
904 dev_opp = _find_device_opp(dev); 1479
1480 dev_opp = _find_device_opp(cpu_dev);
905 if (IS_ERR(dev_opp)) { 1481 if (IS_ERR(dev_opp)) {
906 int error = PTR_ERR(dev_opp); 1482 ret = -EINVAL;
907 if (error != -ENODEV) 1483 goto out_rcu_read_unlock;
908 WARN(1, "%s: dev_opp: %d\n",
909 IS_ERR_OR_NULL(dev) ?
910 "Invalid device" : dev_name(dev),
911 error);
912 return;
913 } 1484 }
914 1485
915 /* Hold our list modification lock here */ 1486 for_each_cpu(cpu, cpumask) {
916 mutex_lock(&dev_opp_list_lock); 1487 if (cpu == cpu_dev->id)
1488 continue;
917 1489
918 /* Free static OPPs */ 1490 dev = get_cpu_device(cpu);
919 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 1491 if (!dev) {
920 if (!opp->dynamic) 1492 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
921 _opp_remove(dev_opp, opp); 1493 __func__, cpu);
1494 continue;
1495 }
1496
1497 list_dev = _add_list_dev(dev, dev_opp);
1498 if (!list_dev) {
1499 dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
1500 __func__, cpu);
1501 continue;
1502 }
922 } 1503 }
1504out_rcu_read_unlock:
1505 rcu_read_unlock();
923 1506
924 mutex_unlock(&dev_opp_list_lock); 1507 return 0;
925} 1508}
926EXPORT_SYMBOL_GPL(of_free_opp_table); 1509EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
1510
1511/*
1512 * Works only for OPP v2 bindings.
1513 *
1514 * cpumask should be already set to mask of cpu_dev->id.
1515 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
1516 */
1517int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
1518{
1519 struct device_node *np, *tmp_np;
1520 struct device *tcpu_dev;
1521 int cpu, ret = 0;
1522
1523 /* Get OPP descriptor node */
1524 np = _of_get_opp_desc_node(cpu_dev);
1525 if (IS_ERR(np)) {
1526 dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
1527 PTR_ERR(np));
1528 return -ENOENT;
1529 }
1530
1531 /* OPPs are shared ? */
1532 if (!of_property_read_bool(np, "opp-shared"))
1533 goto put_cpu_node;
1534
1535 for_each_possible_cpu(cpu) {
1536 if (cpu == cpu_dev->id)
1537 continue;
1538
1539 tcpu_dev = get_cpu_device(cpu);
1540 if (!tcpu_dev) {
1541 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
1542 __func__, cpu);
1543 ret = -ENODEV;
1544 goto put_cpu_node;
1545 }
1546
1547 /* Get OPP descriptor node */
1548 tmp_np = _of_get_opp_desc_node(tcpu_dev);
1549 if (IS_ERR(tmp_np)) {
1550 dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
1551 __func__, PTR_ERR(tmp_np));
1552 ret = PTR_ERR(tmp_np);
1553 goto put_cpu_node;
1554 }
1555
1556 /* CPUs are sharing opp node */
1557 if (np == tmp_np)
1558 cpumask_set_cpu(cpu, cpumask);
1559
1560 of_node_put(tmp_np);
1561 }
1562
1563put_cpu_node:
1564 of_node_put(np);
1565 return ret;
1566}
1567EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
927#endif 1568#endif
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 528a82bf5038..c3583cdfadbd 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,6 +36,12 @@ struct private_data {
36 unsigned int voltage_tolerance; /* in percentage */ 36 unsigned int voltage_tolerance; /* in percentage */
37}; 37};
38 38
39static struct freq_attr *cpufreq_dt_attr[] = {
40 &cpufreq_freq_attr_scaling_available_freqs,
41 NULL, /* Extra space for boost-attr if required */
42 NULL,
43};
44
39static int set_target(struct cpufreq_policy *policy, unsigned int index) 45static int set_target(struct cpufreq_policy *policy, unsigned int index)
40{ 46{
41 struct dev_pm_opp *opp; 47 struct dev_pm_opp *opp;
@@ -184,7 +190,6 @@ try_again:
184 190
185static int cpufreq_init(struct cpufreq_policy *policy) 191static int cpufreq_init(struct cpufreq_policy *policy)
186{ 192{
187 struct cpufreq_dt_platform_data *pd;
188 struct cpufreq_frequency_table *freq_table; 193 struct cpufreq_frequency_table *freq_table;
189 struct device_node *np; 194 struct device_node *np;
190 struct private_data *priv; 195 struct private_data *priv;
@@ -193,6 +198,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
193 struct clk *cpu_clk; 198 struct clk *cpu_clk;
194 unsigned long min_uV = ~0, max_uV = 0; 199 unsigned long min_uV = ~0, max_uV = 0;
195 unsigned int transition_latency; 200 unsigned int transition_latency;
201 bool need_update = false;
196 int ret; 202 int ret;
197 203
198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); 204 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@@ -208,8 +214,47 @@ static int cpufreq_init(struct cpufreq_policy *policy)
208 goto out_put_reg_clk; 214 goto out_put_reg_clk;
209 } 215 }
210 216
211 /* OPPs might be populated at runtime, don't check for error here */ 217 /* Get OPP-sharing information from "operating-points-v2" bindings */
212 of_init_opp_table(cpu_dev); 218 ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
219 if (ret) {
220 /*
221 * operating-points-v2 not supported, fallback to old method of
222 * finding shared-OPPs for backward compatibility.
223 */
224 if (ret == -ENOENT)
225 need_update = true;
226 else
227 goto out_node_put;
228 }
229
230 /*
231 * Initialize OPP tables for all policy->cpus. They will be shared by
232 * all CPUs which have marked their CPUs shared with OPP bindings.
233 *
234 * For platforms not using operating-points-v2 bindings, we do this
235 * before updating policy->cpus. Otherwise, we will end up creating
236 * duplicate OPPs for policy->cpus.
237 *
238 * OPPs might be populated at runtime, don't check for error here
239 */
240 of_cpumask_init_opp_table(policy->cpus);
241
242 if (need_update) {
243 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244
245 if (!pd || !pd->independent_clocks)
246 cpumask_setall(policy->cpus);
247
248 /*
249 * OPP tables are initialized only for policy->cpu, do it for
250 * others as well.
251 */
252 set_cpus_sharing_opps(cpu_dev, policy->cpus);
253
254 of_property_read_u32(np, "clock-latency", &transition_latency);
255 } else {
256 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257 }
213 258
214 /* 259 /*
215 * But we need OPP table to function so if it is not there let's 260 * But we need OPP table to function so if it is not there let's
@@ -230,7 +275,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
230 275
231 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 276 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
232 277
233 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 278 if (!transition_latency)
234 transition_latency = CPUFREQ_ETERNAL; 279 transition_latency = CPUFREQ_ETERNAL;
235 280
236 if (!IS_ERR(cpu_reg)) { 281 if (!IS_ERR(cpu_reg)) {
@@ -291,11 +336,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
291 goto out_free_cpufreq_table; 336 goto out_free_cpufreq_table;
292 } 337 }
293 338
294 policy->cpuinfo.transition_latency = transition_latency; 339 /* Support turbo/boost mode */
340 if (policy_has_boost_freq(policy)) {
341 /* This gets disabled by core on driver unregister */
342 ret = cpufreq_enable_boost_support();
343 if (ret)
344 goto out_free_cpufreq_table;
345 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
346 }
295 347
296 pd = cpufreq_get_driver_data(); 348 policy->cpuinfo.transition_latency = transition_latency;
297 if (!pd || !pd->independent_clocks)
298 cpumask_setall(policy->cpus);
299 349
300 of_node_put(np); 350 of_node_put(np);
301 351
@@ -306,7 +356,8 @@ out_free_cpufreq_table:
306out_free_priv: 356out_free_priv:
307 kfree(priv); 357 kfree(priv);
308out_free_opp: 358out_free_opp:
309 of_free_opp_table(cpu_dev); 359 of_cpumask_free_opp_table(policy->cpus);
360out_node_put:
310 of_node_put(np); 361 of_node_put(np);
311out_put_reg_clk: 362out_put_reg_clk:
312 clk_put(cpu_clk); 363 clk_put(cpu_clk);
@@ -322,7 +373,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
322 373
323 cpufreq_cooling_unregister(priv->cdev); 374 cpufreq_cooling_unregister(priv->cdev);
324 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 375 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
325 of_free_opp_table(priv->cpu_dev); 376 of_cpumask_free_opp_table(policy->related_cpus);
326 clk_put(policy->clk); 377 clk_put(policy->clk);
327 if (!IS_ERR(priv->cpu_reg)) 378 if (!IS_ERR(priv->cpu_reg))
328 regulator_put(priv->cpu_reg); 379 regulator_put(priv->cpu_reg);
@@ -367,7 +418,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
367 .exit = cpufreq_exit, 418 .exit = cpufreq_exit,
368 .ready = cpufreq_ready, 419 .ready = cpufreq_ready,
369 .name = "cpufreq-dt", 420 .name = "cpufreq-dt",
370 .attr = cpufreq_generic_attr, 421 .attr = cpufreq_dt_attr,
371}; 422};
372 423
373static int dt_cpufreq_probe(struct platform_device *pdev) 424static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a05cc75cc45d..abb776827bb1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2412,6 +2412,49 @@ int cpufreq_boost_supported(void)
2412} 2412}
2413EXPORT_SYMBOL_GPL(cpufreq_boost_supported); 2413EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2414 2414
2415static int create_boost_sysfs_file(void)
2416{
2417 int ret;
2418
2419 if (!cpufreq_boost_supported())
2420 return 0;
2421
2422 /*
2423 * Check if driver provides function to enable boost -
2424 * if not, use cpufreq_boost_set_sw as default
2425 */
2426 if (!cpufreq_driver->set_boost)
2427 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2428
2429 ret = cpufreq_sysfs_create_file(&boost.attr);
2430 if (ret)
2431 pr_err("%s: cannot register global BOOST sysfs file\n",
2432 __func__);
2433
2434 return ret;
2435}
2436
2437static void remove_boost_sysfs_file(void)
2438{
2439 if (cpufreq_boost_supported())
2440 cpufreq_sysfs_remove_file(&boost.attr);
2441}
2442
2443int cpufreq_enable_boost_support(void)
2444{
2445 if (!cpufreq_driver)
2446 return -EINVAL;
2447
2448 if (cpufreq_boost_supported())
2449 return 0;
2450
2451 cpufreq_driver->boost_supported = true;
2452
2453 /* This will get removed on driver unregister */
2454 return create_boost_sysfs_file();
2455}
2456EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2457
2415int cpufreq_boost_enabled(void) 2458int cpufreq_boost_enabled(void)
2416{ 2459{
2417 return cpufreq_driver->boost_enabled; 2460 return cpufreq_driver->boost_enabled;
@@ -2465,21 +2508,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2465 if (driver_data->setpolicy) 2508 if (driver_data->setpolicy)
2466 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2509 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2467 2510
2468 if (cpufreq_boost_supported()) { 2511 ret = create_boost_sysfs_file();
2469 /* 2512 if (ret)
2470 * Check if driver provides function to enable boost - 2513 goto err_null_driver;
2471 * if not, use cpufreq_boost_set_sw as default
2472 */
2473 if (!cpufreq_driver->set_boost)
2474 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2475
2476 ret = cpufreq_sysfs_create_file(&boost.attr);
2477 if (ret) {
2478 pr_err("%s: cannot register global BOOST sysfs file\n",
2479 __func__);
2480 goto err_null_driver;
2481 }
2482 }
2483 2514
2484 ret = subsys_interface_register(&cpufreq_interface); 2515 ret = subsys_interface_register(&cpufreq_interface);
2485 if (ret) 2516 if (ret)
@@ -2503,8 +2534,7 @@ out:
2503err_if_unreg: 2534err_if_unreg:
2504 subsys_interface_unregister(&cpufreq_interface); 2535 subsys_interface_unregister(&cpufreq_interface);
2505err_boost_unreg: 2536err_boost_unreg:
2506 if (cpufreq_boost_supported()) 2537 remove_boost_sysfs_file();
2507 cpufreq_sysfs_remove_file(&boost.attr);
2508err_null_driver: 2538err_null_driver:
2509 write_lock_irqsave(&cpufreq_driver_lock, flags); 2539 write_lock_irqsave(&cpufreq_driver_lock, flags);
2510 cpufreq_driver = NULL; 2540 cpufreq_driver = NULL;
@@ -2533,9 +2563,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2533 /* Protect against concurrent cpu hotplug */ 2563 /* Protect against concurrent cpu hotplug */
2534 get_online_cpus(); 2564 get_online_cpus();
2535 subsys_interface_unregister(&cpufreq_interface); 2565 subsys_interface_unregister(&cpufreq_interface);
2536 if (cpufreq_boost_supported()) 2566 remove_boost_sysfs_file();
2537 cpufreq_sysfs_remove_file(&boost.attr);
2538
2539 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2567 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2540 2568
2541 write_lock_irqsave(&cpufreq_driver_lock, flags); 2569 write_lock_irqsave(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c
index 773bcde893c0..0f5e6d5f6da0 100644
--- a/drivers/cpufreq/cpufreq_opp.c
+++ b/drivers/cpufreq/cpufreq_opp.c
@@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
75 } 75 }
76 freq_table[i].driver_data = i; 76 freq_table[i].driver_data = i;
77 freq_table[i].frequency = rate / 1000; 77 freq_table[i].frequency = rate / 1000;
78
79 /* Is Boost/turbo opp ? */
80 if (dev_pm_opp_is_turbo(opp))
81 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
78 } 82 }
79 83
80 freq_table[i].driver_data = i; 84 freq_table[i].driver_data = i;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index dfbbf981ed56..a8f1daffc9bc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -18,6 +18,21 @@
18 * FREQUENCY TABLE HELPERS * 18 * FREQUENCY TABLE HELPERS *
19 *********************************************************************/ 19 *********************************************************************/
20 20
21bool policy_has_boost_freq(struct cpufreq_policy *policy)
22{
23 struct cpufreq_frequency_table *pos, *table = policy->freq_table;
24
25 if (!table)
26 return false;
27
28 cpufreq_for_each_valid_entry(pos, table)
29 if (pos->flags & CPUFREQ_BOOST_FREQ)
30 return true;
31
32 return false;
33}
34EXPORT_SYMBOL_GPL(policy_has_boost_freq);
35
21int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 36int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
22 struct cpufreq_frequency_table *table) 37 struct cpufreq_frequency_table *table)
23{ 38{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6ff6a4d95eea..430efcbea48e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -575,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
575int cpufreq_boost_trigger_state(int state); 575int cpufreq_boost_trigger_state(int state);
576int cpufreq_boost_supported(void); 576int cpufreq_boost_supported(void);
577int cpufreq_boost_enabled(void); 577int cpufreq_boost_enabled(void);
578int cpufreq_enable_boost_support(void);
579bool policy_has_boost_freq(struct cpufreq_policy *policy);
578#else 580#else
579static inline int cpufreq_boost_trigger_state(int state) 581static inline int cpufreq_boost_trigger_state(int state)
580{ 582{
@@ -588,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
588{ 590{
589 return 0; 591 return 0;
590} 592}
593
594static inline int cpufreq_enable_boost_support(void)
595{
596 return -EINVAL;
597}
598
599static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
600{
601 return false;
602}
591#endif 603#endif
592/* the following funtion is for cpufreq core use only */ 604/* the following funtion is for cpufreq core use only */
593struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); 605struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
594 606
595/* the following are really really optional */ 607/* the following are really really optional */
596extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 608extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
609extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
597extern struct freq_attr *cpufreq_generic_attr[]; 610extern struct freq_attr *cpufreq_generic_attr[];
598int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 611int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
599 struct cpufreq_frequency_table *table); 612 struct cpufreq_frequency_table *table);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540914..cab7ba55bedb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
30 30
31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
32 32
33bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34
33int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
34 37
35struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
36 unsigned long freq, 39 unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
62 return 0; 65 return 0;
63} 66}
64 67
68static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
69{
70 return false;
71}
72
65static inline int dev_pm_opp_get_opp_count(struct device *dev) 73static inline int dev_pm_opp_get_opp_count(struct device *dev)
66{ 74{
67 return 0; 75 return 0;
68} 76}
69 77
78static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
79{
80 return 0;
81}
82
70static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
71 unsigned long freq, bool available) 84 unsigned long freq, bool available)
72{ 85{
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 128#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
116int of_init_opp_table(struct device *dev); 129int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev); 130void of_free_opp_table(struct device *dev);
131int of_cpumask_init_opp_table(cpumask_var_t cpumask);
132void of_cpumask_free_opp_table(cpumask_var_t cpumask);
133int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
134int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
118#else 135#else
119static inline int of_init_opp_table(struct device *dev) 136static inline int of_init_opp_table(struct device *dev)
120{ 137{
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
124static inline void of_free_opp_table(struct device *dev) 141static inline void of_free_opp_table(struct device *dev)
125{ 142{
126} 143}
144
145static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
146{
147 return -ENOSYS;
148}
149
150static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
151{
152}
153
154static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
155{
156 return -ENOSYS;
157}
158
159static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
160{
161 return -ENOSYS;
162}
127#endif 163#endif
128 164
129#endif /* __LINUX_OPP_H__ */ 165#endif /* __LINUX_OPP_H__ */