diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-10-27 20:29:34 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-10-27 20:29:34 -0400 |
commit | 93658cb8597ab76655220be43d3d7f74c66e9e4e (patch) | |
tree | e6da56aac9cda2c80315cc73ee31ef4c47af8472 /drivers | |
parent | 6e0ca95aa3c83c47d13f9f400bfaaa853d0b224b (diff) | |
parent | ad7722dab7292dbc1c4586d701ac226b68122d39 (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq: (167 commits)
cpufreq: create per policy rwsem instead of per CPU cpu_policy_rwsem
intel_pstate: Add Baytrail support
intel_pstate: Refactor driver to support CPUs with different MSR layouts
cpufreq: Implement light weight ->target_index() routine
PM / OPP: rename header to linux/pm_opp.h
PM / OPP: rename data structures to dev_pm equivalents
PM / OPP: rename functions to dev_pm_opp*
cpufreq / governor: Remove fossil comment
cpufreq: exynos4210: Use the common clock framework to set APLL clock rate
cpufreq: exynos4x12: Use the common clock framework to set APLL clock rate
cpufreq: Detect spurious invocations of update_policy_cpu()
cpufreq: pmac64: enable cpufreq on iMac G5 (iSight) model
cpufreq: pmac64: provide cpufreq transition latency for older G5 models
cpufreq: pmac64: speed up frequency switch
cpufreq: highbank-cpufreq: Enable Midway/ECX-2000
exynos-cpufreq: fix false return check from "regulator_set_voltage"
speedstep-centrino: Remove unnecessary braces
acpi-cpufreq: Add comment under ACPI_ADR_SPACE_SYSTEM_IO case
cpufreq: arm-big-little: use clk_get instead of clk_get_sys
cpufreq: exynos: Show a list of available frequencies
...
Conflicts:
drivers/devfreq/exynos/exynos5_bus.c
Diffstat (limited to 'drivers')
71 files changed, 922 insertions, 2422 deletions
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index ef89897c6043..fa4187418440 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/rculist.h> | 22 | #include <linux/rculist.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/opp.h> | 24 | #include <linux/pm_opp.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | 27 | ||
@@ -42,7 +42,7 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * struct opp - Generic OPP description structure | 45 | * struct dev_pm_opp - Generic OPP description structure |
46 | * @node: opp list node. The nodes are maintained throughout the lifetime | 46 | * @node: opp list node. The nodes are maintained throughout the lifetime |
47 | * of boot. It is expected only an optimal set of OPPs are | 47 | * of boot. It is expected only an optimal set of OPPs are |
48 | * added to the library by the SoC framework. | 48 | * added to the library by the SoC framework. |
@@ -59,7 +59,7 @@ | |||
59 | * | 59 | * |
60 | * This structure stores the OPP information for a given device. | 60 | * This structure stores the OPP information for a given device. |
61 | */ | 61 | */ |
62 | struct opp { | 62 | struct dev_pm_opp { |
63 | struct list_head node; | 63 | struct list_head node; |
64 | 64 | ||
65 | bool available; | 65 | bool available; |
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * opp_get_voltage() - Gets the voltage corresponding to an available opp | 139 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp |
140 | * @opp: opp for which voltage has to be returned for | 140 | * @opp: opp for which voltage has to be returned for |
141 | * | 141 | * |
142 | * Return voltage in micro volt corresponding to the opp, else | 142 | * Return voltage in micro volt corresponding to the opp, else |
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev) | |||
150 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | 150 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the |
151 | * pointer. | 151 | * pointer. |
152 | */ | 152 | */ |
153 | unsigned long opp_get_voltage(struct opp *opp) | 153 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
154 | { | 154 | { |
155 | struct opp *tmp_opp; | 155 | struct dev_pm_opp *tmp_opp; |
156 | unsigned long v = 0; | 156 | unsigned long v = 0; |
157 | 157 | ||
158 | tmp_opp = rcu_dereference(opp); | 158 | tmp_opp = rcu_dereference(opp); |
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp) | |||
163 | 163 | ||
164 | return v; | 164 | return v; |
165 | } | 165 | } |
166 | EXPORT_SYMBOL_GPL(opp_get_voltage); | 166 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * opp_get_freq() - Gets the frequency corresponding to an available opp | 169 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp |
170 | * @opp: opp for which frequency has to be returned for | 170 | * @opp: opp for which frequency has to be returned for |
171 | * | 171 | * |
172 | * Return frequency in hertz corresponding to the opp, else | 172 | * Return frequency in hertz corresponding to the opp, else |
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage); | |||
180 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | 180 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the |
181 | * pointer. | 181 | * pointer. |
182 | */ | 182 | */ |
183 | unsigned long opp_get_freq(struct opp *opp) | 183 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
184 | { | 184 | { |
185 | struct opp *tmp_opp; | 185 | struct dev_pm_opp *tmp_opp; |
186 | unsigned long f = 0; | 186 | unsigned long f = 0; |
187 | 187 | ||
188 | tmp_opp = rcu_dereference(opp); | 188 | tmp_opp = rcu_dereference(opp); |
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp) | |||
193 | 193 | ||
194 | return f; | 194 | return f; |
195 | } | 195 | } |
196 | EXPORT_SYMBOL_GPL(opp_get_freq); | 196 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
197 | 197 | ||
198 | /** | 198 | /** |
199 | * opp_get_opp_count() - Get number of opps available in the opp list | 199 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list |
200 | * @dev: device for which we do this operation | 200 | * @dev: device for which we do this operation |
201 | * | 201 | * |
202 | * This function returns the number of available opps if there are any, | 202 | * This function returns the number of available opps if there are any, |
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq); | |||
206 | * internally references two RCU protected structures: device_opp and opp which | 206 | * internally references two RCU protected structures: device_opp and opp which |
207 | * are safe as long as we are under a common RCU locked section. | 207 | * are safe as long as we are under a common RCU locked section. |
208 | */ | 208 | */ |
209 | int opp_get_opp_count(struct device *dev) | 209 | int dev_pm_opp_get_opp_count(struct device *dev) |
210 | { | 210 | { |
211 | struct device_opp *dev_opp; | 211 | struct device_opp *dev_opp; |
212 | struct opp *temp_opp; | 212 | struct dev_pm_opp *temp_opp; |
213 | int count = 0; | 213 | int count = 0; |
214 | 214 | ||
215 | dev_opp = find_device_opp(dev); | 215 | dev_opp = find_device_opp(dev); |
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev) | |||
226 | 226 | ||
227 | return count; | 227 | return count; |
228 | } | 228 | } |
229 | EXPORT_SYMBOL_GPL(opp_get_opp_count); | 229 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
230 | 230 | ||
231 | /** | 231 | /** |
232 | * opp_find_freq_exact() - search for an exact frequency | 232 | * dev_pm_opp_find_freq_exact() - search for an exact frequency |
233 | * @dev: device for which we do this operation | 233 | * @dev: device for which we do this operation |
234 | * @freq: frequency to search for | 234 | * @freq: frequency to search for |
235 | * @available: true/false - match for available opp | 235 | * @available: true/false - match for available opp |
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count); | |||
254 | * under the locked area. The pointer returned must be used prior to unlocking | 254 | * under the locked area. The pointer returned must be used prior to unlocking |
255 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 255 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
256 | */ | 256 | */ |
257 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | 257 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
258 | bool available) | 258 | unsigned long freq, |
259 | bool available) | ||
259 | { | 260 | { |
260 | struct device_opp *dev_opp; | 261 | struct device_opp *dev_opp; |
261 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 262 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
262 | 263 | ||
263 | dev_opp = find_device_opp(dev); | 264 | dev_opp = find_device_opp(dev); |
264 | if (IS_ERR(dev_opp)) { | 265 | if (IS_ERR(dev_opp)) { |
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | |||
277 | 278 | ||
278 | return opp; | 279 | return opp; |
279 | } | 280 | } |
280 | EXPORT_SYMBOL_GPL(opp_find_freq_exact); | 281 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
281 | 282 | ||
282 | /** | 283 | /** |
283 | * opp_find_freq_ceil() - Search for an rounded ceil freq | 284 | * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq |
284 | * @dev: device for which we do this operation | 285 | * @dev: device for which we do this operation |
285 | * @freq: Start frequency | 286 | * @freq: Start frequency |
286 | * | 287 | * |
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact); | |||
300 | * under the locked area. The pointer returned must be used prior to unlocking | 301 | * under the locked area. The pointer returned must be used prior to unlocking |
301 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 302 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
302 | */ | 303 | */ |
303 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | 304 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
305 | unsigned long *freq) | ||
304 | { | 306 | { |
305 | struct device_opp *dev_opp; | 307 | struct device_opp *dev_opp; |
306 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 308 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
307 | 309 | ||
308 | if (!dev || !freq) { | 310 | if (!dev || !freq) { |
309 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 311 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | |||
324 | 326 | ||
325 | return opp; | 327 | return opp; |
326 | } | 328 | } |
327 | EXPORT_SYMBOL_GPL(opp_find_freq_ceil); | 329 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
328 | 330 | ||
329 | /** | 331 | /** |
330 | * opp_find_freq_floor() - Search for a rounded floor freq | 332 | * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq |
331 | * @dev: device for which we do this operation | 333 | * @dev: device for which we do this operation |
332 | * @freq: Start frequency | 334 | * @freq: Start frequency |
333 | * | 335 | * |
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil); | |||
347 | * under the locked area. The pointer returned must be used prior to unlocking | 349 | * under the locked area. The pointer returned must be used prior to unlocking |
348 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 350 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
349 | */ | 351 | */ |
350 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | 352 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
353 | unsigned long *freq) | ||
351 | { | 354 | { |
352 | struct device_opp *dev_opp; | 355 | struct device_opp *dev_opp; |
353 | struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 356 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
354 | 357 | ||
355 | if (!dev || !freq) { | 358 | if (!dev || !freq) { |
356 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 359 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | |||
375 | 378 | ||
376 | return opp; | 379 | return opp; |
377 | } | 380 | } |
378 | EXPORT_SYMBOL_GPL(opp_find_freq_floor); | 381 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
379 | 382 | ||
380 | /** | 383 | /** |
381 | * opp_add() - Add an OPP table from a table definitions | 384 | * dev_pm_opp_add() - Add an OPP table from a table definitions |
382 | * @dev: device for which we do this operation | 385 | * @dev: device for which we do this operation |
383 | * @freq: Frequency in Hz for this OPP | 386 | * @freq: Frequency in Hz for this OPP |
384 | * @u_volt: Voltage in uVolts for this OPP | 387 | * @u_volt: Voltage in uVolts for this OPP |
385 | * | 388 | * |
386 | * This function adds an opp definition to the opp list and returns status. | 389 | * This function adds an opp definition to the opp list and returns status. |
387 | * The opp is made available by default and it can be controlled using | 390 | * The opp is made available by default and it can be controlled using |
388 | * opp_enable/disable functions. | 391 | * dev_pm_opp_enable/disable functions. |
389 | * | 392 | * |
390 | * Locking: The internal device_opp and opp structures are RCU protected. | 393 | * Locking: The internal device_opp and opp structures are RCU protected. |
391 | * Hence this function internally uses RCU updater strategy with mutex locks | 394 | * Hence this function internally uses RCU updater strategy with mutex locks |
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor); | |||
393 | * that this function is *NOT* called under RCU protection or in contexts where | 396 | * that this function is *NOT* called under RCU protection or in contexts where |
394 | * mutex cannot be locked. | 397 | * mutex cannot be locked. |
395 | */ | 398 | */ |
396 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 399 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
397 | { | 400 | { |
398 | struct device_opp *dev_opp = NULL; | 401 | struct device_opp *dev_opp = NULL; |
399 | struct opp *opp, *new_opp; | 402 | struct dev_pm_opp *opp, *new_opp; |
400 | struct list_head *head; | 403 | struct list_head *head; |
401 | 404 | ||
402 | /* allocate new OPP node */ | 405 | /* allocate new OPP node */ |
403 | new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); | 406 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); |
404 | if (!new_opp) { | 407 | if (!new_opp) { |
405 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | 408 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); |
406 | return -ENOMEM; | 409 | return -ENOMEM; |
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
460 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); | 463 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); |
461 | return 0; | 464 | return 0; |
462 | } | 465 | } |
463 | EXPORT_SYMBOL_GPL(opp_add); | 466 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
464 | 467 | ||
465 | /** | 468 | /** |
466 | * opp_set_availability() - helper to set the availability of an opp | 469 | * opp_set_availability() - helper to set the availability of an opp |
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
485 | bool availability_req) | 488 | bool availability_req) |
486 | { | 489 | { |
487 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | 490 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); |
488 | struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | 491 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
489 | int r = 0; | 492 | int r = 0; |
490 | 493 | ||
491 | /* keep the node allocated */ | 494 | /* keep the node allocated */ |
492 | new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); | 495 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); |
493 | if (!new_opp) { | 496 | if (!new_opp) { |
494 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | 497 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); |
495 | return -ENOMEM; | 498 | return -ENOMEM; |
@@ -552,13 +555,13 @@ unlock: | |||
552 | } | 555 | } |
553 | 556 | ||
554 | /** | 557 | /** |
555 | * opp_enable() - Enable a specific OPP | 558 | * dev_pm_opp_enable() - Enable a specific OPP |
556 | * @dev: device for which we do this operation | 559 | * @dev: device for which we do this operation |
557 | * @freq: OPP frequency to enable | 560 | * @freq: OPP frequency to enable |
558 | * | 561 | * |
559 | * Enables a provided opp. If the operation is valid, this returns 0, else the | 562 | * Enables a provided opp. If the operation is valid, this returns 0, else the |
560 | * corresponding error value. It is meant to be used for users an OPP available | 563 | * corresponding error value. It is meant to be used for users an OPP available |
561 | * after being temporarily made unavailable with opp_disable. | 564 | * after being temporarily made unavailable with dev_pm_opp_disable. |
562 | * | 565 | * |
563 | * Locking: The internal device_opp and opp structures are RCU protected. | 566 | * Locking: The internal device_opp and opp structures are RCU protected. |
564 | * Hence this function indirectly uses RCU and mutex locks to keep the | 567 | * Hence this function indirectly uses RCU and mutex locks to keep the |
@@ -566,21 +569,21 @@ unlock: | |||
566 | * this function is *NOT* called under RCU protection or in contexts where | 569 | * this function is *NOT* called under RCU protection or in contexts where |
567 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 570 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
568 | */ | 571 | */ |
569 | int opp_enable(struct device *dev, unsigned long freq) | 572 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
570 | { | 573 | { |
571 | return opp_set_availability(dev, freq, true); | 574 | return opp_set_availability(dev, freq, true); |
572 | } | 575 | } |
573 | EXPORT_SYMBOL_GPL(opp_enable); | 576 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
574 | 577 | ||
575 | /** | 578 | /** |
576 | * opp_disable() - Disable a specific OPP | 579 | * dev_pm_opp_disable() - Disable a specific OPP |
577 | * @dev: device for which we do this operation | 580 | * @dev: device for which we do this operation |
578 | * @freq: OPP frequency to disable | 581 | * @freq: OPP frequency to disable |
579 | * | 582 | * |
580 | * Disables a provided opp. If the operation is valid, this returns | 583 | * Disables a provided opp. If the operation is valid, this returns |
581 | * 0, else the corresponding error value. It is meant to be a temporary | 584 | * 0, else the corresponding error value. It is meant to be a temporary |
582 | * control by users to make this OPP not available until the circumstances are | 585 | * control by users to make this OPP not available until the circumstances are |
583 | * right to make it available again (with a call to opp_enable). | 586 | * right to make it available again (with a call to dev_pm_opp_enable). |
584 | * | 587 | * |
585 | * Locking: The internal device_opp and opp structures are RCU protected. | 588 | * Locking: The internal device_opp and opp structures are RCU protected. |
586 | * Hence this function indirectly uses RCU and mutex locks to keep the | 589 | * Hence this function indirectly uses RCU and mutex locks to keep the |
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable); | |||
588 | * this function is *NOT* called under RCU protection or in contexts where | 591 | * this function is *NOT* called under RCU protection or in contexts where |
589 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 592 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
590 | */ | 593 | */ |
591 | int opp_disable(struct device *dev, unsigned long freq) | 594 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
592 | { | 595 | { |
593 | return opp_set_availability(dev, freq, false); | 596 | return opp_set_availability(dev, freq, false); |
594 | } | 597 | } |
595 | EXPORT_SYMBOL_GPL(opp_disable); | 598 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
596 | 599 | ||
597 | #ifdef CONFIG_CPU_FREQ | 600 | #ifdef CONFIG_CPU_FREQ |
598 | /** | 601 | /** |
599 | * opp_init_cpufreq_table() - create a cpufreq table for a device | 602 | * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device |
600 | * @dev: device for which we do this operation | 603 | * @dev: device for which we do this operation |
601 | * @table: Cpufreq table returned back to caller | 604 | * @table: Cpufreq table returned back to caller |
602 | * | 605 | * |
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable); | |||
619 | * Callers should ensure that this function is *NOT* called under RCU protection | 622 | * Callers should ensure that this function is *NOT* called under RCU protection |
620 | * or in contexts where mutex locking cannot be used. | 623 | * or in contexts where mutex locking cannot be used. |
621 | */ | 624 | */ |
622 | int opp_init_cpufreq_table(struct device *dev, | 625 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
623 | struct cpufreq_frequency_table **table) | 626 | struct cpufreq_frequency_table **table) |
624 | { | 627 | { |
625 | struct device_opp *dev_opp; | 628 | struct device_opp *dev_opp; |
626 | struct opp *opp; | 629 | struct dev_pm_opp *opp; |
627 | struct cpufreq_frequency_table *freq_table; | 630 | struct cpufreq_frequency_table *freq_table; |
628 | int i = 0; | 631 | int i = 0; |
629 | 632 | ||
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev, | |||
639 | } | 642 | } |
640 | 643 | ||
641 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | 644 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * |
642 | (opp_get_opp_count(dev) + 1), GFP_KERNEL); | 645 | (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL); |
643 | if (!freq_table) { | 646 | if (!freq_table) { |
644 | mutex_unlock(&dev_opp_list_lock); | 647 | mutex_unlock(&dev_opp_list_lock); |
645 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | 648 | dev_warn(dev, "%s: Unable to allocate frequency table\n", |
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev, | |||
663 | 666 | ||
664 | return 0; | 667 | return 0; |
665 | } | 668 | } |
666 | EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); | 669 | EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); |
667 | 670 | ||
668 | /** | 671 | /** |
669 | * opp_free_cpufreq_table() - free the cpufreq table | 672 | * dev_pm_opp_free_cpufreq_table() - free the cpufreq table |
670 | * @dev: device for which we do this operation | 673 | * @dev: device for which we do this operation |
671 | * @table: table to free | 674 | * @table: table to free |
672 | * | 675 | * |
673 | * Free up the table allocated by opp_init_cpufreq_table | 676 | * Free up the table allocated by dev_pm_opp_init_cpufreq_table |
674 | */ | 677 | */ |
675 | void opp_free_cpufreq_table(struct device *dev, | 678 | void dev_pm_opp_free_cpufreq_table(struct device *dev, |
676 | struct cpufreq_frequency_table **table) | 679 | struct cpufreq_frequency_table **table) |
677 | { | 680 | { |
678 | if (!table) | 681 | if (!table) |
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev, | |||
681 | kfree(*table); | 684 | kfree(*table); |
682 | *table = NULL; | 685 | *table = NULL; |
683 | } | 686 | } |
684 | EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); | 687 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); |
685 | #endif /* CONFIG_CPU_FREQ */ | 688 | #endif /* CONFIG_CPU_FREQ */ |
686 | 689 | ||
687 | /** | 690 | /** |
688 | * opp_get_notifier() - find notifier_head of the device with opp | 691 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
689 | * @dev: device pointer used to lookup device OPPs. | 692 | * @dev: device pointer used to lookup device OPPs. |
690 | */ | 693 | */ |
691 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | 694 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) |
692 | { | 695 | { |
693 | struct device_opp *dev_opp = find_device_opp(dev); | 696 | struct device_opp *dev_opp = find_device_opp(dev); |
694 | 697 | ||
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev) | |||
732 | unsigned long freq = be32_to_cpup(val++) * 1000; | 735 | unsigned long freq = be32_to_cpup(val++) * 1000; |
733 | unsigned long volt = be32_to_cpup(val++); | 736 | unsigned long volt = be32_to_cpup(val++); |
734 | 737 | ||
735 | if (opp_add(dev, freq, volt)) { | 738 | if (dev_pm_opp_add(dev, freq, volt)) { |
736 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 739 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
737 | __func__, freq); | 740 | __func__, freq); |
738 | continue; | 741 | continue; |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 534fcb825153..38093e272377 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -17,15 +17,11 @@ config CPU_FREQ | |||
17 | 17 | ||
18 | if CPU_FREQ | 18 | if CPU_FREQ |
19 | 19 | ||
20 | config CPU_FREQ_TABLE | ||
21 | tristate | ||
22 | |||
23 | config CPU_FREQ_GOV_COMMON | 20 | config CPU_FREQ_GOV_COMMON |
24 | bool | 21 | bool |
25 | 22 | ||
26 | config CPU_FREQ_STAT | 23 | config CPU_FREQ_STAT |
27 | tristate "CPU frequency translation statistics" | 24 | tristate "CPU frequency translation statistics" |
28 | select CPU_FREQ_TABLE | ||
29 | default y | 25 | default y |
30 | help | 26 | help |
31 | This driver exports CPU frequency statistics information through sysfs | 27 | This driver exports CPU frequency statistics information through sysfs |
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE | |||
143 | 139 | ||
144 | config CPU_FREQ_GOV_ONDEMAND | 140 | config CPU_FREQ_GOV_ONDEMAND |
145 | tristate "'ondemand' cpufreq policy governor" | 141 | tristate "'ondemand' cpufreq policy governor" |
146 | select CPU_FREQ_TABLE | ||
147 | select CPU_FREQ_GOV_COMMON | 142 | select CPU_FREQ_GOV_COMMON |
148 | help | 143 | help |
149 | 'ondemand' - This driver adds a dynamic cpufreq policy governor. | 144 | 'ondemand' - This driver adds a dynamic cpufreq policy governor. |
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
187 | config GENERIC_CPUFREQ_CPU0 | 182 | config GENERIC_CPUFREQ_CPU0 |
188 | tristate "Generic CPU0 cpufreq driver" | 183 | tristate "Generic CPU0 cpufreq driver" |
189 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF | 184 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF |
190 | select CPU_FREQ_TABLE | ||
191 | help | 185 | help |
192 | This adds a generic cpufreq driver for CPU0 frequency management. | 186 | This adds a generic cpufreq driver for CPU0 frequency management. |
193 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) | 187 | It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) |
@@ -223,7 +217,6 @@ depends on IA64 | |||
223 | 217 | ||
224 | config IA64_ACPI_CPUFREQ | 218 | config IA64_ACPI_CPUFREQ |
225 | tristate "ACPI Processor P-States driver" | 219 | tristate "ACPI Processor P-States driver" |
226 | select CPU_FREQ_TABLE | ||
227 | depends on ACPI_PROCESSOR | 220 | depends on ACPI_PROCESSOR |
228 | help | 221 | help |
229 | This driver adds a CPUFreq driver which utilizes the ACPI | 222 | This driver adds a CPUFreq driver which utilizes the ACPI |
@@ -240,7 +233,6 @@ depends on MIPS | |||
240 | 233 | ||
241 | config LOONGSON2_CPUFREQ | 234 | config LOONGSON2_CPUFREQ |
242 | tristate "Loongson2 CPUFreq Driver" | 235 | tristate "Loongson2 CPUFreq Driver" |
243 | select CPU_FREQ_TABLE | ||
244 | help | 236 | help |
245 | This option adds a CPUFreq driver for loongson processors which | 237 | This option adds a CPUFreq driver for loongson processors which |
246 | support software configurable cpu frequency. | 238 | support software configurable cpu frequency. |
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers" | |||
262 | depends on SPARC64 | 254 | depends on SPARC64 |
263 | config SPARC_US3_CPUFREQ | 255 | config SPARC_US3_CPUFREQ |
264 | tristate "UltraSPARC-III CPU Frequency driver" | 256 | tristate "UltraSPARC-III CPU Frequency driver" |
265 | select CPU_FREQ_TABLE | ||
266 | help | 257 | help |
267 | This adds the CPUFreq driver for UltraSPARC-III processors. | 258 | This adds the CPUFreq driver for UltraSPARC-III processors. |
268 | 259 | ||
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ | |||
272 | 263 | ||
273 | config SPARC_US2E_CPUFREQ | 264 | config SPARC_US2E_CPUFREQ |
274 | tristate "UltraSPARC-IIe CPU Frequency driver" | 265 | tristate "UltraSPARC-IIe CPU Frequency driver" |
275 | select CPU_FREQ_TABLE | ||
276 | help | 266 | help |
277 | This adds the CPUFreq driver for UltraSPARC-IIe processors. | 267 | This adds the CPUFreq driver for UltraSPARC-IIe processors. |
278 | 268 | ||
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling" | |||
285 | depends on SUPERH | 275 | depends on SUPERH |
286 | config SH_CPU_FREQ | 276 | config SH_CPU_FREQ |
287 | tristate "SuperH CPU Frequency driver" | 277 | tristate "SuperH CPU Frequency driver" |
288 | select CPU_FREQ_TABLE | ||
289 | help | 278 | help |
290 | This adds the cpufreq driver for SuperH. Any CPU that supports | 279 | This adds the cpufreq driver for SuperH. Any CPU that supports |
291 | clock rate rounding through the clock framework can use this | 280 | clock rate rounding through the clock framework can use this |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0fa204b244bd..701ec95ce954 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -5,7 +5,6 @@ | |||
5 | config ARM_BIG_LITTLE_CPUFREQ | 5 | config ARM_BIG_LITTLE_CPUFREQ |
6 | tristate "Generic ARM big LITTLE CPUfreq driver" | 6 | tristate "Generic ARM big LITTLE CPUfreq driver" |
7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK | 7 | depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK |
8 | select CPU_FREQ_TABLE | ||
9 | help | 8 | help |
10 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | 9 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. |
11 | 10 | ||
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ | |||
18 | 17 | ||
19 | config ARM_EXYNOS_CPUFREQ | 18 | config ARM_EXYNOS_CPUFREQ |
20 | bool | 19 | bool |
21 | select CPU_FREQ_TABLE | ||
22 | 20 | ||
23 | config ARM_EXYNOS4210_CPUFREQ | 21 | config ARM_EXYNOS4210_CPUFREQ |
24 | bool "SAMSUNG EXYNOS4210" | 22 | bool "SAMSUNG EXYNOS4210" |
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ | |||
58 | depends on SOC_EXYNOS5440 | 56 | depends on SOC_EXYNOS5440 |
59 | depends on HAVE_CLK && PM_OPP && OF | 57 | depends on HAVE_CLK && PM_OPP && OF |
60 | default y | 58 | default y |
61 | select CPU_FREQ_TABLE | ||
62 | help | 59 | help |
63 | This adds the CPUFreq driver for Samsung EXYNOS5440 | 60 | This adds the CPUFreq driver for Samsung EXYNOS5440 |
64 | SoC. The nature of exynos5440 clock controller is | 61 | SoC. The nature of exynos5440 clock controller is |
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ | |||
85 | tristate "Freescale i.MX6Q cpufreq support" | 82 | tristate "Freescale i.MX6Q cpufreq support" |
86 | depends on SOC_IMX6Q | 83 | depends on SOC_IMX6Q |
87 | depends on REGULATOR_ANATOP | 84 | depends on REGULATOR_ANATOP |
88 | select CPU_FREQ_TABLE | ||
89 | help | 85 | help |
90 | This adds cpufreq driver support for Freescale i.MX6Q SOC. | 86 | This adds cpufreq driver support for Freescale i.MX6Q SOC. |
91 | 87 | ||
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR | |||
101 | 97 | ||
102 | config ARM_KIRKWOOD_CPUFREQ | 98 | config ARM_KIRKWOOD_CPUFREQ |
103 | def_bool ARCH_KIRKWOOD && OF | 99 | def_bool ARCH_KIRKWOOD && OF |
104 | select CPU_FREQ_TABLE | ||
105 | help | 100 | help |
106 | This adds the CPUFreq driver for Marvell Kirkwood | 101 | This adds the CPUFreq driver for Marvell Kirkwood |
107 | SoCs. | 102 | SoCs. |
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ | |||
110 | bool "TI OMAP2+" | 105 | bool "TI OMAP2+" |
111 | depends on ARCH_OMAP2PLUS | 106 | depends on ARCH_OMAP2PLUS |
112 | default ARCH_OMAP2PLUS | 107 | default ARCH_OMAP2PLUS |
113 | select CPU_FREQ_TABLE | ||
114 | 108 | ||
115 | config ARM_S3C_CPUFREQ | 109 | config ARM_S3C_CPUFREQ |
116 | bool | 110 | bool |
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ | |||
165 | config ARM_S3C2416_CPUFREQ | 159 | config ARM_S3C2416_CPUFREQ |
166 | bool "S3C2416 CPU Frequency scaling support" | 160 | bool "S3C2416 CPU Frequency scaling support" |
167 | depends on CPU_S3C2416 | 161 | depends on CPU_S3C2416 |
168 | select CPU_FREQ_TABLE | ||
169 | help | 162 | help |
170 | This adds the CPUFreq driver for the Samsung S3C2416 and | 163 | This adds the CPUFreq driver for the Samsung S3C2416 and |
171 | S3C2450 SoC. The S3C2416 supports changing the rate of the | 164 | S3C2450 SoC. The S3C2416 supports changing the rate of the |
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ | |||
196 | config ARM_S3C64XX_CPUFREQ | 189 | config ARM_S3C64XX_CPUFREQ |
197 | bool "Samsung S3C64XX" | 190 | bool "Samsung S3C64XX" |
198 | depends on CPU_S3C6410 | 191 | depends on CPU_S3C6410 |
199 | select CPU_FREQ_TABLE | ||
200 | default y | 192 | default y |
201 | help | 193 | help |
202 | This adds the CPUFreq driver for Samsung S3C6410 SoC. | 194 | This adds the CPUFreq driver for Samsung S3C6410 SoC. |
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ | |||
206 | config ARM_S5PV210_CPUFREQ | 198 | config ARM_S5PV210_CPUFREQ |
207 | bool "Samsung S5PV210 and S5PC110" | 199 | bool "Samsung S5PV210 and S5PC110" |
208 | depends on CPU_S5PV210 | 200 | depends on CPU_S5PV210 |
209 | select CPU_FREQ_TABLE | ||
210 | default y | 201 | default y |
211 | help | 202 | help |
212 | This adds the CPUFreq driver for Samsung S5PV210 and | 203 | This adds the CPUFreq driver for Samsung S5PV210 and |
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ | |||
223 | config ARM_SPEAR_CPUFREQ | 214 | config ARM_SPEAR_CPUFREQ |
224 | bool "SPEAr CPUFreq support" | 215 | bool "SPEAr CPUFreq support" |
225 | depends on PLAT_SPEAR | 216 | depends on PLAT_SPEAR |
226 | select CPU_FREQ_TABLE | ||
227 | default y | 217 | default y |
228 | help | 218 | help |
229 | This adds the CPUFreq driver support for SPEAr SOCs. | 219 | This adds the CPUFreq driver support for SPEAr SOCs. |
@@ -231,7 +221,6 @@ config ARM_SPEAR_CPUFREQ | |||
231 | config ARM_TEGRA_CPUFREQ | 221 | config ARM_TEGRA_CPUFREQ |
232 | bool "TEGRA CPUFreq support" | 222 | bool "TEGRA CPUFreq support" |
233 | depends on ARCH_TEGRA | 223 | depends on ARCH_TEGRA |
234 | select CPU_FREQ_TABLE | ||
235 | default y | 224 | default y |
236 | help | 225 | help |
237 | This adds the CPUFreq driver support for TEGRA SOCs. | 226 | This adds the CPUFreq driver support for TEGRA SOCs. |
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 25ca9db62e09..ca0021a96e19 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc | |||
@@ -1,7 +1,6 @@ | |||
1 | config CPU_FREQ_CBE | 1 | config CPU_FREQ_CBE |
2 | tristate "CBE frequency scaling" | 2 | tristate "CBE frequency scaling" |
3 | depends on CBE_RAS && PPC_CELL | 3 | depends on CBE_RAS && PPC_CELL |
4 | select CPU_FREQ_TABLE | ||
5 | default m | 4 | default m |
6 | help | 5 | help |
7 | This adds the cpufreq driver for Cell BE processors. | 6 | This adds the cpufreq driver for Cell BE processors. |
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI | |||
20 | config CPU_FREQ_MAPLE | 19 | config CPU_FREQ_MAPLE |
21 | bool "Support for Maple 970FX Evaluation Board" | 20 | bool "Support for Maple 970FX Evaluation Board" |
22 | depends on PPC_MAPLE | 21 | depends on PPC_MAPLE |
23 | select CPU_FREQ_TABLE | ||
24 | help | 22 | help |
25 | This adds support for frequency switching on Maple 970FX | 23 | This adds support for frequency switching on Maple 970FX |
26 | Evaluation Board and compatible boards (IBM JS2x blades). | 24 | Evaluation Board and compatible boards (IBM JS2x blades). |
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE | |||
28 | config PPC_CORENET_CPUFREQ | 26 | config PPC_CORENET_CPUFREQ |
29 | tristate "CPU frequency scaling driver for Freescale E500MC SoCs" | 27 | tristate "CPU frequency scaling driver for Freescale E500MC SoCs" |
30 | depends on PPC_E500MC && OF && COMMON_CLK | 28 | depends on PPC_E500MC && OF && COMMON_CLK |
31 | select CPU_FREQ_TABLE | ||
32 | select CLK_PPC_CORENET | 29 | select CLK_PPC_CORENET |
33 | help | 30 | help |
34 | This adds the CPUFreq driver support for Freescale e500mc, | 31 | This adds the CPUFreq driver support for Freescale e500mc, |
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ | |||
38 | config CPU_FREQ_PMAC | 35 | config CPU_FREQ_PMAC |
39 | bool "Support for Apple PowerBooks" | 36 | bool "Support for Apple PowerBooks" |
40 | depends on ADB_PMU && PPC32 | 37 | depends on ADB_PMU && PPC32 |
41 | select CPU_FREQ_TABLE | ||
42 | help | 38 | help |
43 | This adds support for frequency switching on Apple PowerBooks, | 39 | This adds support for frequency switching on Apple PowerBooks, |
44 | this currently includes some models of iBook & Titanium | 40 | this currently includes some models of iBook & Titanium |
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC | |||
47 | config CPU_FREQ_PMAC64 | 43 | config CPU_FREQ_PMAC64 |
48 | bool "Support for some Apple G5s" | 44 | bool "Support for some Apple G5s" |
49 | depends on PPC_PMAC && PPC64 | 45 | depends on PPC_PMAC && PPC64 |
50 | select CPU_FREQ_TABLE | ||
51 | help | 46 | help |
52 | This adds support for frequency switching on Apple iMac G5, | 47 | This adds support for frequency switching on Apple iMac G5, |
53 | and some of the more recent desktop G5 machines as well. | 48 | and some of the more recent desktop G5 machines as well. |
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64 | |||
55 | config PPC_PASEMI_CPUFREQ | 50 | config PPC_PASEMI_CPUFREQ |
56 | bool "Support for PA Semi PWRficient" | 51 | bool "Support for PA Semi PWRficient" |
57 | depends on PPC_PASEMI | 52 | depends on PPC_PASEMI |
58 | select CPU_FREQ_TABLE | ||
59 | default y | 53 | default y |
60 | help | 54 | help |
61 | This adds the support for frequency switching on PA Semi | 55 | This adds the support for frequency switching on PA Semi |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index e2b6eabef221..6897ad85b046 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ | |||
31 | 31 | ||
32 | config X86_ACPI_CPUFREQ | 32 | config X86_ACPI_CPUFREQ |
33 | tristate "ACPI Processor P-States driver" | 33 | tristate "ACPI Processor P-States driver" |
34 | select CPU_FREQ_TABLE | ||
35 | depends on ACPI_PROCESSOR | 34 | depends on ACPI_PROCESSOR |
36 | help | 35 | help |
37 | This driver adds a CPUFreq driver which utilizes the ACPI | 36 | This driver adds a CPUFreq driver which utilizes the ACPI |
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB | |||
60 | 59 | ||
61 | config ELAN_CPUFREQ | 60 | config ELAN_CPUFREQ |
62 | tristate "AMD Elan SC400 and SC410" | 61 | tristate "AMD Elan SC400 and SC410" |
63 | select CPU_FREQ_TABLE | ||
64 | depends on MELAN | 62 | depends on MELAN |
65 | ---help--- | 63 | ---help--- |
66 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 | 64 | This adds the CPUFreq driver for AMD Elan SC400 and SC410 |
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ | |||
76 | 74 | ||
77 | config SC520_CPUFREQ | 75 | config SC520_CPUFREQ |
78 | tristate "AMD Elan SC520" | 76 | tristate "AMD Elan SC520" |
79 | select CPU_FREQ_TABLE | ||
80 | depends on MELAN | 77 | depends on MELAN |
81 | ---help--- | 78 | ---help--- |
82 | This adds the CPUFreq driver for AMD Elan SC520 processor. | 79 | This adds the CPUFreq driver for AMD Elan SC520 processor. |
@@ -88,7 +85,6 @@ config SC520_CPUFREQ | |||
88 | 85 | ||
89 | config X86_POWERNOW_K6 | 86 | config X86_POWERNOW_K6 |
90 | tristate "AMD Mobile K6-2/K6-3 PowerNow!" | 87 | tristate "AMD Mobile K6-2/K6-3 PowerNow!" |
91 | select CPU_FREQ_TABLE | ||
92 | depends on X86_32 | 88 | depends on X86_32 |
93 | help | 89 | help |
94 | This adds the CPUFreq driver for mobile AMD K6-2+ and mobile | 90 | This adds the CPUFreq driver for mobile AMD K6-2+ and mobile |
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6 | |||
100 | 96 | ||
101 | config X86_POWERNOW_K7 | 97 | config X86_POWERNOW_K7 |
102 | tristate "AMD Mobile Athlon/Duron PowerNow!" | 98 | tristate "AMD Mobile Athlon/Duron PowerNow!" |
103 | select CPU_FREQ_TABLE | ||
104 | depends on X86_32 | 99 | depends on X86_32 |
105 | help | 100 | help |
106 | This adds the CPUFreq driver for mobile AMD K7 mobile processors. | 101 | This adds the CPUFreq driver for mobile AMD K7 mobile processors. |
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI | |||
118 | 113 | ||
119 | config X86_POWERNOW_K8 | 114 | config X86_POWERNOW_K8 |
120 | tristate "AMD Opteron/Athlon64 PowerNow!" | 115 | tristate "AMD Opteron/Athlon64 PowerNow!" |
121 | select CPU_FREQ_TABLE | ||
122 | depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ | 116 | depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ |
123 | help | 117 | help |
124 | This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. | 118 | This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. |
@@ -132,7 +126,6 @@ config X86_POWERNOW_K8 | |||
132 | config X86_AMD_FREQ_SENSITIVITY | 126 | config X86_AMD_FREQ_SENSITIVITY |
133 | tristate "AMD frequency sensitivity feedback powersave bias" | 127 | tristate "AMD frequency sensitivity feedback powersave bias" |
134 | depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD | 128 | depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD |
135 | select CPU_FREQ_TABLE | ||
136 | help | 129 | help |
137 | This adds AMD-specific powersave bias function to the ondemand | 130 | This adds AMD-specific powersave bias function to the ondemand |
138 | governor, which allows it to make more power-conscious frequency | 131 | governor, which allows it to make more power-conscious frequency |
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD | |||
160 | 153 | ||
161 | config X86_SPEEDSTEP_CENTRINO | 154 | config X86_SPEEDSTEP_CENTRINO |
162 | tristate "Intel Enhanced SpeedStep (deprecated)" | 155 | tristate "Intel Enhanced SpeedStep (deprecated)" |
163 | select CPU_FREQ_TABLE | ||
164 | select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 | 156 | select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 |
165 | depends on X86_32 || (X86_64 && ACPI_PROCESSOR) | 157 | depends on X86_32 || (X86_64 && ACPI_PROCESSOR) |
166 | help | 158 | help |
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE | |||
190 | 182 | ||
191 | config X86_SPEEDSTEP_ICH | 183 | config X86_SPEEDSTEP_ICH |
192 | tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" | 184 | tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" |
193 | select CPU_FREQ_TABLE | ||
194 | depends on X86_32 | 185 | depends on X86_32 |
195 | help | 186 | help |
196 | This adds the CPUFreq driver for certain mobile Intel Pentium III | 187 | This adds the CPUFreq driver for certain mobile Intel Pentium III |
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH | |||
204 | 195 | ||
205 | config X86_SPEEDSTEP_SMI | 196 | config X86_SPEEDSTEP_SMI |
206 | tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" | 197 | tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" |
207 | select CPU_FREQ_TABLE | ||
208 | depends on X86_32 | 198 | depends on X86_32 |
209 | help | 199 | help |
210 | This adds the CPUFreq driver for certain mobile Intel Pentium III | 200 | This adds the CPUFreq driver for certain mobile Intel Pentium III |
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI | |||
217 | 207 | ||
218 | config X86_P4_CLOCKMOD | 208 | config X86_P4_CLOCKMOD |
219 | tristate "Intel Pentium 4 clock modulation" | 209 | tristate "Intel Pentium 4 clock modulation" |
220 | select CPU_FREQ_TABLE | ||
221 | help | 210 | help |
222 | This adds the CPUFreq driver for Intel Pentium 4 / XEON | 211 | This adds the CPUFreq driver for Intel Pentium 4 / XEON |
223 | processors. When enabled it will lower CPU temperature by skipping | 212 | processors. When enabled it will lower CPU temperature by skipping |
@@ -259,7 +248,6 @@ config X86_LONGRUN | |||
259 | 248 | ||
260 | config X86_LONGHAUL | 249 | config X86_LONGHAUL |
261 | tristate "VIA Cyrix III Longhaul" | 250 | tristate "VIA Cyrix III Longhaul" |
262 | select CPU_FREQ_TABLE | ||
263 | depends on X86_32 && ACPI_PROCESSOR | 251 | depends on X86_32 && ACPI_PROCESSOR |
264 | help | 252 | help |
265 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, | 253 | This adds the CPUFreq driver for VIA Samuel/CyrixIII, |
@@ -272,7 +260,6 @@ config X86_LONGHAUL | |||
272 | 260 | ||
273 | config X86_E_POWERSAVER | 261 | config X86_E_POWERSAVER |
274 | tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" | 262 | tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" |
275 | select CPU_FREQ_TABLE | ||
276 | depends on X86_32 && ACPI_PROCESSOR | 263 | depends on X86_32 && ACPI_PROCESSOR |
277 | help | 264 | help |
278 | This adds the CPUFreq driver for VIA C7 processors. However, this driver | 265 | This adds the CPUFreq driver for VIA C7 processors. However, this driver |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ad5866c2ada0..b7948bbbbf1f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # CPUfreq core | 1 | # CPUfreq core |
2 | obj-$(CONFIG_CPU_FREQ) += cpufreq.o | 2 | obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o |
3 | # CPUfreq stats | 3 | # CPUfreq stats |
4 | obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o | 4 | obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o |
5 | 5 | ||
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | |||
11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | 11 | obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o |
12 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o | 12 | obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o |
13 | 13 | ||
14 | # CPUfreq cross-arch helpers | ||
15 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | ||
16 | |||
17 | obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o | 14 | obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o |
18 | 15 | ||
19 | ################################################################################## | 16 | ################################################################################## |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 506fd23c7550..e4bc19552d2b 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -424,17 +424,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
424 | } | 424 | } |
425 | 425 | ||
426 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 426 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
427 | unsigned int target_freq, unsigned int relation) | 427 | unsigned int index) |
428 | { | 428 | { |
429 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 429 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
430 | struct acpi_processor_performance *perf; | 430 | struct acpi_processor_performance *perf; |
431 | struct cpufreq_freqs freqs; | 431 | struct cpufreq_freqs freqs; |
432 | struct drv_cmd cmd; | 432 | struct drv_cmd cmd; |
433 | unsigned int next_state = 0; /* Index into freq_table */ | ||
434 | unsigned int next_perf_state = 0; /* Index into perf table */ | 433 | unsigned int next_perf_state = 0; /* Index into perf table */ |
435 | int result = 0; | 434 | int result = 0; |
436 | 435 | ||
437 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 436 | pr_debug("acpi_cpufreq_target %d (%d)\n", |
437 | data->freq_table[index].frequency, policy->cpu); | ||
438 | 438 | ||
439 | if (unlikely(data == NULL || | 439 | if (unlikely(data == NULL || |
440 | data->acpi_data == NULL || data->freq_table == NULL)) { | 440 | data->acpi_data == NULL || data->freq_table == NULL)) { |
@@ -442,16 +442,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
442 | } | 442 | } |
443 | 443 | ||
444 | perf = data->acpi_data; | 444 | perf = data->acpi_data; |
445 | result = cpufreq_frequency_table_target(policy, | 445 | next_perf_state = data->freq_table[index].driver_data; |
446 | data->freq_table, | ||
447 | target_freq, | ||
448 | relation, &next_state); | ||
449 | if (unlikely(result)) { | ||
450 | result = -ENODEV; | ||
451 | goto out; | ||
452 | } | ||
453 | |||
454 | next_perf_state = data->freq_table[next_state].driver_data; | ||
455 | if (perf->state == next_perf_state) { | 446 | if (perf->state == next_perf_state) { |
456 | if (unlikely(data->resume)) { | 447 | if (unlikely(data->resume)) { |
457 | pr_debug("Called after resume, resetting to P%d\n", | 448 | pr_debug("Called after resume, resetting to P%d\n", |
@@ -493,7 +484,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
493 | cmd.mask = cpumask_of(policy->cpu); | 484 | cmd.mask = cpumask_of(policy->cpu); |
494 | 485 | ||
495 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 486 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
496 | freqs.new = data->freq_table[next_state].frequency; | 487 | freqs.new = data->freq_table[index].frequency; |
497 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 488 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
498 | 489 | ||
499 | drv_write(&cmd); | 490 | drv_write(&cmd); |
@@ -516,15 +507,6 @@ out: | |||
516 | return result; | 507 | return result; |
517 | } | 508 | } |
518 | 509 | ||
519 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | ||
520 | { | ||
521 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | ||
522 | |||
523 | pr_debug("acpi_cpufreq_verify\n"); | ||
524 | |||
525 | return cpufreq_frequency_table_verify(policy, data->freq_table); | ||
526 | } | ||
527 | |||
528 | static unsigned long | 510 | static unsigned long |
529 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | 511 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
530 | { | 512 | { |
@@ -837,7 +819,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
837 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; | 819 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
838 | perf->state = 0; | 820 | perf->state = 0; |
839 | 821 | ||
840 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 822 | result = cpufreq_table_validate_and_show(policy, data->freq_table); |
841 | if (result) | 823 | if (result) |
842 | goto err_freqfree; | 824 | goto err_freqfree; |
843 | 825 | ||
@@ -846,12 +828,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
846 | 828 | ||
847 | switch (perf->control_register.space_id) { | 829 | switch (perf->control_register.space_id) { |
848 | case ACPI_ADR_SPACE_SYSTEM_IO: | 830 | case ACPI_ADR_SPACE_SYSTEM_IO: |
849 | /* Current speed is unknown and not detectable by IO port */ | 831 | /* |
832 | * The core will not set policy->cur, because | ||
833 | * cpufreq_driver->get is NULL, so we need to set it here. | ||
834 | * However, we have to guess it, because the current speed is | ||
835 | * unknown and not detectable via IO ports. | ||
836 | */ | ||
850 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | 837 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
851 | break; | 838 | break; |
852 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 839 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
853 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; | 840 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
854 | policy->cur = get_cur_freq_on_cpu(cpu); | ||
855 | break; | 841 | break; |
856 | default: | 842 | default: |
857 | break; | 843 | break; |
@@ -868,8 +854,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
868 | (u32) perf->states[i].power, | 854 | (u32) perf->states[i].power, |
869 | (u32) perf->states[i].transition_latency); | 855 | (u32) perf->states[i].transition_latency); |
870 | 856 | ||
871 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
872 | |||
873 | /* | 857 | /* |
874 | * the first call to ->target() should result in us actually | 858 | * the first call to ->target() should result in us actually |
875 | * writing something to the appropriate registers. | 859 | * writing something to the appropriate registers. |
@@ -929,8 +913,8 @@ static struct freq_attr *acpi_cpufreq_attr[] = { | |||
929 | }; | 913 | }; |
930 | 914 | ||
931 | static struct cpufreq_driver acpi_cpufreq_driver = { | 915 | static struct cpufreq_driver acpi_cpufreq_driver = { |
932 | .verify = acpi_cpufreq_verify, | 916 | .verify = cpufreq_generic_frequency_table_verify, |
933 | .target = acpi_cpufreq_target, | 917 | .target_index = acpi_cpufreq_target, |
934 | .bios_limit = acpi_processor_get_bios_limit, | 918 | .bios_limit = acpi_processor_get_bios_limit, |
935 | .init = acpi_cpufreq_cpu_init, | 919 | .init = acpi_cpufreq_cpu_init, |
936 | .exit = acpi_cpufreq_cpu_exit, | 920 | .exit = acpi_cpufreq_cpu_exit, |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 3549f0784af1..163e3378fe17 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
28 | #include <linux/opp.h> | 28 | #include <linux/pm_opp.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/topology.h> | 30 | #include <linux/topology.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
@@ -47,38 +47,23 @@ static unsigned int bL_cpufreq_get(unsigned int cpu) | |||
47 | return clk_get_rate(clk[cur_cluster]) / 1000; | 47 | return clk_get_rate(clk[cur_cluster]) / 1000; |
48 | } | 48 | } |
49 | 49 | ||
50 | /* Validate policy frequency range */ | ||
51 | static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy) | ||
52 | { | ||
53 | u32 cur_cluster = cpu_to_cluster(policy->cpu); | ||
54 | |||
55 | return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]); | ||
56 | } | ||
57 | |||
58 | /* Set clock frequency */ | 50 | /* Set clock frequency */ |
59 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, | 51 | static int bL_cpufreq_set_target(struct cpufreq_policy *policy, |
60 | unsigned int target_freq, unsigned int relation) | 52 | unsigned int index) |
61 | { | 53 | { |
62 | struct cpufreq_freqs freqs; | 54 | struct cpufreq_freqs freqs; |
63 | u32 cpu = policy->cpu, freq_tab_idx, cur_cluster; | 55 | u32 cpu = policy->cpu, cur_cluster; |
64 | int ret = 0; | 56 | int ret = 0; |
65 | 57 | ||
66 | cur_cluster = cpu_to_cluster(policy->cpu); | 58 | cur_cluster = cpu_to_cluster(policy->cpu); |
67 | 59 | ||
68 | freqs.old = bL_cpufreq_get(policy->cpu); | 60 | freqs.old = bL_cpufreq_get(policy->cpu); |
69 | 61 | freqs.new = freq_table[cur_cluster][index].frequency; | |
70 | /* Determine valid target frequency using freq_table */ | ||
71 | cpufreq_frequency_table_target(policy, freq_table[cur_cluster], | ||
72 | target_freq, relation, &freq_tab_idx); | ||
73 | freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency; | ||
74 | 62 | ||
75 | pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", | 63 | pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n", |
76 | __func__, cpu, cur_cluster, freqs.old, target_freq, | 64 | __func__, cpu, cur_cluster, freqs.old, freqs.new, |
77 | freqs.new); | 65 | freqs.new); |
78 | 66 | ||
79 | if (freqs.old == freqs.new) | ||
80 | return 0; | ||
81 | |||
82 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 67 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
83 | 68 | ||
84 | ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); | 69 | ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000); |
@@ -98,7 +83,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev) | |||
98 | 83 | ||
99 | if (!atomic_dec_return(&cluster_usage[cluster])) { | 84 | if (!atomic_dec_return(&cluster_usage[cluster])) { |
100 | clk_put(clk[cluster]); | 85 | clk_put(clk[cluster]); |
101 | opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | 86 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); |
102 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); | 87 | dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); |
103 | } | 88 | } |
104 | } | 89 | } |
@@ -119,7 +104,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | |||
119 | goto atomic_dec; | 104 | goto atomic_dec; |
120 | } | 105 | } |
121 | 106 | ||
122 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); | 107 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); |
123 | if (ret) { | 108 | if (ret) { |
124 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", | 109 | dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", |
125 | __func__, cpu_dev->id, ret); | 110 | __func__, cpu_dev->id, ret); |
@@ -127,7 +112,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | |||
127 | } | 112 | } |
128 | 113 | ||
129 | name[12] = cluster + '0'; | 114 | name[12] = cluster + '0'; |
130 | clk[cluster] = clk_get_sys(name, NULL); | 115 | clk[cluster] = clk_get(cpu_dev, name); |
131 | if (!IS_ERR(clk[cluster])) { | 116 | if (!IS_ERR(clk[cluster])) { |
132 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", | 117 | dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", |
133 | __func__, clk[cluster], freq_table[cluster], | 118 | __func__, clk[cluster], freq_table[cluster], |
@@ -138,7 +123,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev) | |||
138 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", | 123 | dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", |
139 | __func__, cpu_dev->id, cluster); | 124 | __func__, cpu_dev->id, cluster); |
140 | ret = PTR_ERR(clk[cluster]); | 125 | ret = PTR_ERR(clk[cluster]); |
141 | opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); | 126 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); |
142 | 127 | ||
143 | atomic_dec: | 128 | atomic_dec: |
144 | atomic_dec(&cluster_usage[cluster]); | 129 | atomic_dec(&cluster_usage[cluster]); |
@@ -165,7 +150,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
165 | if (ret) | 150 | if (ret) |
166 | return ret; | 151 | return ret; |
167 | 152 | ||
168 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]); | 153 | ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); |
169 | if (ret) { | 154 | if (ret) { |
170 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", | 155 | dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", |
171 | policy->cpu, cur_cluster); | 156 | policy->cpu, cur_cluster); |
@@ -173,16 +158,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
173 | return ret; | 158 | return ret; |
174 | } | 159 | } |
175 | 160 | ||
176 | cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu); | ||
177 | |||
178 | if (arm_bL_ops->get_transition_latency) | 161 | if (arm_bL_ops->get_transition_latency) |
179 | policy->cpuinfo.transition_latency = | 162 | policy->cpuinfo.transition_latency = |
180 | arm_bL_ops->get_transition_latency(cpu_dev); | 163 | arm_bL_ops->get_transition_latency(cpu_dev); |
181 | else | 164 | else |
182 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 165 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
183 | 166 | ||
184 | policy->cur = bL_cpufreq_get(policy->cpu); | ||
185 | |||
186 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | 167 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); |
187 | 168 | ||
188 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); | 169 | dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); |
@@ -200,28 +181,23 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) | |||
200 | return -ENODEV; | 181 | return -ENODEV; |
201 | } | 182 | } |
202 | 183 | ||
184 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
203 | put_cluster_clk_and_freq_table(cpu_dev); | 185 | put_cluster_clk_and_freq_table(cpu_dev); |
204 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); | 186 | dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); |
205 | 187 | ||
206 | return 0; | 188 | return 0; |
207 | } | 189 | } |
208 | 190 | ||
209 | /* Export freq_table to sysfs */ | ||
210 | static struct freq_attr *bL_cpufreq_attr[] = { | ||
211 | &cpufreq_freq_attr_scaling_available_freqs, | ||
212 | NULL, | ||
213 | }; | ||
214 | |||
215 | static struct cpufreq_driver bL_cpufreq_driver = { | 191 | static struct cpufreq_driver bL_cpufreq_driver = { |
216 | .name = "arm-big-little", | 192 | .name = "arm-big-little", |
217 | .flags = CPUFREQ_STICKY, | 193 | .flags = CPUFREQ_STICKY | |
218 | .verify = bL_cpufreq_verify_policy, | 194 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, |
219 | .target = bL_cpufreq_set_target, | 195 | .verify = cpufreq_generic_frequency_table_verify, |
196 | .target_index = bL_cpufreq_set_target, | ||
220 | .get = bL_cpufreq_get, | 197 | .get = bL_cpufreq_get, |
221 | .init = bL_cpufreq_init, | 198 | .init = bL_cpufreq_init, |
222 | .exit = bL_cpufreq_exit, | 199 | .exit = bL_cpufreq_exit, |
223 | .have_governor_per_policy = true, | 200 | .attr = cpufreq_generic_attr, |
224 | .attr = bL_cpufreq_attr, | ||
225 | }; | 201 | }; |
226 | 202 | ||
227 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) | 203 | int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) |
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 480c0bd0468d..8d9d59108906 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
27 | #include <linux/opp.h> | 27 | #include <linux/pm_opp.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index e0c38d938997..81d07527bde6 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c | |||
@@ -19,18 +19,10 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | static struct clk *cpuclk; | 24 | static struct clk *cpuclk; |
24 | 25 | static struct cpufreq_frequency_table *freq_table; | |
25 | static int at32_verify_speed(struct cpufreq_policy *policy) | ||
26 | { | ||
27 | if (policy->cpu != 0) | ||
28 | return -EINVAL; | ||
29 | |||
30 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | ||
31 | policy->cpuinfo.max_freq); | ||
32 | return 0; | ||
33 | } | ||
34 | 26 | ||
35 | static unsigned int at32_get_speed(unsigned int cpu) | 27 | static unsigned int at32_get_speed(unsigned int cpu) |
36 | { | 28 | { |
@@ -43,25 +35,12 @@ static unsigned int at32_get_speed(unsigned int cpu) | |||
43 | static unsigned int ref_freq; | 35 | static unsigned int ref_freq; |
44 | static unsigned long loops_per_jiffy_ref; | 36 | static unsigned long loops_per_jiffy_ref; |
45 | 37 | ||
46 | static int at32_set_target(struct cpufreq_policy *policy, | 38 | static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) |
47 | unsigned int target_freq, | ||
48 | unsigned int relation) | ||
49 | { | 39 | { |
50 | struct cpufreq_freqs freqs; | 40 | struct cpufreq_freqs freqs; |
51 | long freq; | ||
52 | |||
53 | /* Convert target_freq from kHz to Hz */ | ||
54 | freq = clk_round_rate(cpuclk, target_freq * 1000); | ||
55 | |||
56 | /* Check if policy->min <= new_freq <= policy->max */ | ||
57 | if(freq < (policy->min * 1000) || freq > (policy->max * 1000)) | ||
58 | return -EINVAL; | ||
59 | |||
60 | pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); | ||
61 | 41 | ||
62 | freqs.old = at32_get_speed(0); | 42 | freqs.old = at32_get_speed(0); |
63 | freqs.new = (freq + 500) / 1000; | 43 | freqs.new = freq_table[index].frequency; |
64 | freqs.flags = 0; | ||
65 | 44 | ||
66 | if (!ref_freq) { | 45 | if (!ref_freq) { |
67 | ref_freq = freqs.old; | 46 | ref_freq = freqs.old; |
@@ -72,45 +51,82 @@ static int at32_set_target(struct cpufreq_policy *policy, | |||
72 | if (freqs.old < freqs.new) | 51 | if (freqs.old < freqs.new) |
73 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( | 52 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( |
74 | loops_per_jiffy_ref, ref_freq, freqs.new); | 53 | loops_per_jiffy_ref, ref_freq, freqs.new); |
75 | clk_set_rate(cpuclk, freq); | 54 | clk_set_rate(cpuclk, freqs.new * 1000); |
76 | if (freqs.new < freqs.old) | 55 | if (freqs.new < freqs.old) |
77 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( | 56 | boot_cpu_data.loops_per_jiffy = cpufreq_scale( |
78 | loops_per_jiffy_ref, ref_freq, freqs.new); | 57 | loops_per_jiffy_ref, ref_freq, freqs.new); |
79 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 58 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
80 | 59 | ||
81 | pr_debug("cpufreq: set frequency %lu Hz\n", freq); | 60 | pr_debug("cpufreq: set frequency %u Hz\n", freqs.new * 1000); |
82 | 61 | ||
83 | return 0; | 62 | return 0; |
84 | } | 63 | } |
85 | 64 | ||
86 | static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) | 65 | static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) |
87 | { | 66 | { |
67 | unsigned int frequency, rate, min_freq; | ||
68 | int retval, steps, i; | ||
69 | |||
88 | if (policy->cpu != 0) | 70 | if (policy->cpu != 0) |
89 | return -EINVAL; | 71 | return -EINVAL; |
90 | 72 | ||
91 | cpuclk = clk_get(NULL, "cpu"); | 73 | cpuclk = clk_get(NULL, "cpu"); |
92 | if (IS_ERR(cpuclk)) { | 74 | if (IS_ERR(cpuclk)) { |
93 | pr_debug("cpufreq: could not get CPU clk\n"); | 75 | pr_debug("cpufreq: could not get CPU clk\n"); |
94 | return PTR_ERR(cpuclk); | 76 | retval = PTR_ERR(cpuclk); |
77 | goto out_err; | ||
95 | } | 78 | } |
96 | 79 | ||
97 | policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; | 80 | min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; |
98 | policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 81 | frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
99 | policy->cpuinfo.transition_latency = 0; | 82 | policy->cpuinfo.transition_latency = 0; |
100 | policy->cur = at32_get_speed(0); | ||
101 | policy->min = policy->cpuinfo.min_freq; | ||
102 | policy->max = policy->cpuinfo.max_freq; | ||
103 | 83 | ||
104 | printk("cpufreq: AT32AP CPU frequency driver\n"); | 84 | /* |
85 | * AVR32 CPU frequency rate scales in power of two between maximum and | ||
86 | * minimum, also add space for the table end marker. | ||
87 | * | ||
88 | * Further validate that the frequency is usable, and append it to the | ||
89 | * frequency table. | ||
90 | */ | ||
91 | steps = fls(frequency / min_freq) + 1; | ||
92 | freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table), | ||
93 | GFP_KERNEL); | ||
94 | if (!freq_table) { | ||
95 | retval = -ENOMEM; | ||
96 | goto out_err_put_clk; | ||
97 | } | ||
105 | 98 | ||
106 | return 0; | 99 | for (i = 0; i < (steps - 1); i++) { |
100 | rate = clk_round_rate(cpuclk, frequency * 1000) / 1000; | ||
101 | |||
102 | if (rate != frequency) | ||
103 | freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
104 | else | ||
105 | freq_table[i].frequency = frequency; | ||
106 | |||
107 | frequency /= 2; | ||
108 | } | ||
109 | |||
110 | freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; | ||
111 | |||
112 | retval = cpufreq_table_validate_and_show(policy, freq_table); | ||
113 | if (!retval) { | ||
114 | printk("cpufreq: AT32AP CPU frequency driver\n"); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | kfree(freq_table); | ||
119 | out_err_put_clk: | ||
120 | clk_put(cpuclk); | ||
121 | out_err: | ||
122 | return retval; | ||
107 | } | 123 | } |
108 | 124 | ||
109 | static struct cpufreq_driver at32_driver = { | 125 | static struct cpufreq_driver at32_driver = { |
110 | .name = "at32ap", | 126 | .name = "at32ap", |
111 | .init = at32_cpufreq_driver_init, | 127 | .init = at32_cpufreq_driver_init, |
112 | .verify = at32_verify_speed, | 128 | .verify = cpufreq_generic_frequency_table_verify, |
113 | .target = at32_set_target, | 129 | .target_index = at32_set_target, |
114 | .get = at32_get_speed, | 130 | .get = at32_get_speed, |
115 | .flags = CPUFREQ_STICKY, | 131 | .flags = CPUFREQ_STICKY, |
116 | }; | 132 | }; |
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c index ef05978a7237..12528b28d45d 100644 --- a/drivers/cpufreq/blackfin-cpufreq.c +++ b/drivers/cpufreq/blackfin-cpufreq.c | |||
@@ -127,14 +127,11 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new) | |||
127 | } | 127 | } |
128 | #endif | 128 | #endif |
129 | 129 | ||
130 | static int bfin_target(struct cpufreq_policy *policy, | 130 | static int bfin_target(struct cpufreq_policy *policy, unsigned int index) |
131 | unsigned int target_freq, unsigned int relation) | ||
132 | { | 131 | { |
133 | #ifndef CONFIG_BF60x | 132 | #ifndef CONFIG_BF60x |
134 | unsigned int plldiv; | 133 | unsigned int plldiv; |
135 | #endif | 134 | #endif |
136 | unsigned int index; | ||
137 | unsigned long cclk_hz; | ||
138 | struct cpufreq_freqs freqs; | 135 | struct cpufreq_freqs freqs; |
139 | static unsigned long lpj_ref; | 136 | static unsigned long lpj_ref; |
140 | static unsigned int lpj_ref_freq; | 137 | static unsigned int lpj_ref_freq; |
@@ -144,17 +141,11 @@ static int bfin_target(struct cpufreq_policy *policy, | |||
144 | cycles_t cycles; | 141 | cycles_t cycles; |
145 | #endif | 142 | #endif |
146 | 143 | ||
147 | if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, | ||
148 | relation, &index)) | ||
149 | return -EINVAL; | ||
150 | |||
151 | cclk_hz = bfin_freq_table[index].frequency; | ||
152 | |||
153 | freqs.old = bfin_getfreq_khz(0); | 144 | freqs.old = bfin_getfreq_khz(0); |
154 | freqs.new = cclk_hz; | 145 | freqs.new = bfin_freq_table[index].frequency; |
155 | 146 | ||
156 | pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", | 147 | pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", |
157 | cclk_hz, target_freq, freqs.old); | 148 | freqs.new, freqs.new, freqs.old); |
158 | 149 | ||
159 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 150 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
160 | #ifndef CONFIG_BF60x | 151 | #ifndef CONFIG_BF60x |
@@ -191,11 +182,6 @@ static int bfin_target(struct cpufreq_policy *policy, | |||
191 | return ret; | 182 | return ret; |
192 | } | 183 | } |
193 | 184 | ||
194 | static int bfin_verify_speed(struct cpufreq_policy *policy) | ||
195 | { | ||
196 | return cpufreq_frequency_table_verify(policy, bfin_freq_table); | ||
197 | } | ||
198 | |||
199 | static int __bfin_cpu_init(struct cpufreq_policy *policy) | 185 | static int __bfin_cpu_init(struct cpufreq_policy *policy) |
200 | { | 186 | { |
201 | 187 | ||
@@ -209,23 +195,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy) | |||
209 | 195 | ||
210 | policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ | 196 | policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ |
211 | 197 | ||
212 | policy->cur = cclk; | 198 | return cpufreq_table_validate_and_show(policy, bfin_freq_table); |
213 | cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); | ||
214 | return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); | ||
215 | } | 199 | } |
216 | 200 | ||
217 | static struct freq_attr *bfin_freq_attr[] = { | ||
218 | &cpufreq_freq_attr_scaling_available_freqs, | ||
219 | NULL, | ||
220 | }; | ||
221 | |||
222 | static struct cpufreq_driver bfin_driver = { | 201 | static struct cpufreq_driver bfin_driver = { |
223 | .verify = bfin_verify_speed, | 202 | .verify = cpufreq_generic_frequency_table_verify, |
224 | .target = bfin_target, | 203 | .target_index = bfin_target, |
225 | .get = bfin_getfreq_khz, | 204 | .get = bfin_getfreq_khz, |
226 | .init = __bfin_cpu_init, | 205 | .init = __bfin_cpu_init, |
206 | .exit = cpufreq_generic_exit, | ||
227 | .name = "bfin cpufreq", | 207 | .name = "bfin cpufreq", |
228 | .attr = bfin_freq_attr, | 208 | .attr = cpufreq_generic_attr, |
229 | }; | 209 | }; |
230 | 210 | ||
231 | static int __init bfin_cpu_init(void) | 211 | static int __init bfin_cpu_init(void) |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index c522a95c0e16..4dbe7efd86e5 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/opp.h> | 20 | #include <linux/pm_opp.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
@@ -30,34 +30,19 @@ static struct clk *cpu_clk; | |||
30 | static struct regulator *cpu_reg; | 30 | static struct regulator *cpu_reg; |
31 | static struct cpufreq_frequency_table *freq_table; | 31 | static struct cpufreq_frequency_table *freq_table; |
32 | 32 | ||
33 | static int cpu0_verify_speed(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
36 | } | ||
37 | |||
38 | static unsigned int cpu0_get_speed(unsigned int cpu) | 33 | static unsigned int cpu0_get_speed(unsigned int cpu) |
39 | { | 34 | { |
40 | return clk_get_rate(cpu_clk) / 1000; | 35 | return clk_get_rate(cpu_clk) / 1000; |
41 | } | 36 | } |
42 | 37 | ||
43 | static int cpu0_set_target(struct cpufreq_policy *policy, | 38 | static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) |
44 | unsigned int target_freq, unsigned int relation) | ||
45 | { | 39 | { |
46 | struct cpufreq_freqs freqs; | 40 | struct cpufreq_freqs freqs; |
47 | struct opp *opp; | 41 | struct dev_pm_opp *opp; |
48 | unsigned long volt = 0, volt_old = 0, tol = 0; | 42 | unsigned long volt = 0, volt_old = 0, tol = 0; |
49 | long freq_Hz, freq_exact; | 43 | long freq_Hz, freq_exact; |
50 | unsigned int index; | ||
51 | int ret; | 44 | int ret; |
52 | 45 | ||
53 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
54 | relation, &index); | ||
55 | if (ret) { | ||
56 | pr_err("failed to match target freqency %d: %d\n", | ||
57 | target_freq, ret); | ||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); | 46 | freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); |
62 | if (freq_Hz < 0) | 47 | if (freq_Hz < 0) |
63 | freq_Hz = freq_table[index].frequency * 1000; | 48 | freq_Hz = freq_table[index].frequency * 1000; |
@@ -65,14 +50,11 @@ static int cpu0_set_target(struct cpufreq_policy *policy, | |||
65 | freqs.new = freq_Hz / 1000; | 50 | freqs.new = freq_Hz / 1000; |
66 | freqs.old = clk_get_rate(cpu_clk) / 1000; | 51 | freqs.old = clk_get_rate(cpu_clk) / 1000; |
67 | 52 | ||
68 | if (freqs.old == freqs.new) | ||
69 | return 0; | ||
70 | |||
71 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 53 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
72 | 54 | ||
73 | if (!IS_ERR(cpu_reg)) { | 55 | if (!IS_ERR(cpu_reg)) { |
74 | rcu_read_lock(); | 56 | rcu_read_lock(); |
75 | opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); | 57 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); |
76 | if (IS_ERR(opp)) { | 58 | if (IS_ERR(opp)) { |
77 | rcu_read_unlock(); | 59 | rcu_read_unlock(); |
78 | pr_err("failed to find OPP for %ld\n", freq_Hz); | 60 | pr_err("failed to find OPP for %ld\n", freq_Hz); |
@@ -80,7 +62,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, | |||
80 | ret = PTR_ERR(opp); | 62 | ret = PTR_ERR(opp); |
81 | goto post_notify; | 63 | goto post_notify; |
82 | } | 64 | } |
83 | volt = opp_get_voltage(opp); | 65 | volt = dev_pm_opp_get_voltage(opp); |
84 | rcu_read_unlock(); | 66 | rcu_read_unlock(); |
85 | tol = volt * voltage_tolerance / 100; | 67 | tol = volt * voltage_tolerance / 100; |
86 | volt_old = regulator_get_voltage(cpu_reg); | 68 | volt_old = regulator_get_voltage(cpu_reg); |
@@ -127,50 +109,18 @@ post_notify: | |||
127 | 109 | ||
128 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) | 110 | static int cpu0_cpufreq_init(struct cpufreq_policy *policy) |
129 | { | 111 | { |
130 | int ret; | 112 | return cpufreq_generic_init(policy, freq_table, transition_latency); |
131 | |||
132 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
133 | if (ret) { | ||
134 | pr_err("invalid frequency table: %d\n", ret); | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | policy->cpuinfo.transition_latency = transition_latency; | ||
139 | policy->cur = clk_get_rate(cpu_clk) / 1000; | ||
140 | |||
141 | /* | ||
142 | * The driver only supports the SMP configuartion where all processors | ||
143 | * share the clock and voltage and clock. Use cpufreq affected_cpus | ||
144 | * interface to have all CPUs scaled together. | ||
145 | */ | ||
146 | cpumask_setall(policy->cpus); | ||
147 | |||
148 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
149 | |||
150 | return 0; | ||
151 | } | 113 | } |
152 | 114 | ||
153 | static int cpu0_cpufreq_exit(struct cpufreq_policy *policy) | ||
154 | { | ||
155 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static struct freq_attr *cpu0_cpufreq_attr[] = { | ||
161 | &cpufreq_freq_attr_scaling_available_freqs, | ||
162 | NULL, | ||
163 | }; | ||
164 | |||
165 | static struct cpufreq_driver cpu0_cpufreq_driver = { | 115 | static struct cpufreq_driver cpu0_cpufreq_driver = { |
166 | .flags = CPUFREQ_STICKY, | 116 | .flags = CPUFREQ_STICKY, |
167 | .verify = cpu0_verify_speed, | 117 | .verify = cpufreq_generic_frequency_table_verify, |
168 | .target = cpu0_set_target, | 118 | .target_index = cpu0_set_target, |
169 | .get = cpu0_get_speed, | 119 | .get = cpu0_get_speed, |
170 | .init = cpu0_cpufreq_init, | 120 | .init = cpu0_cpufreq_init, |
171 | .exit = cpu0_cpufreq_exit, | 121 | .exit = cpufreq_generic_exit, |
172 | .name = "generic_cpu0", | 122 | .name = "generic_cpu0", |
173 | .attr = cpu0_cpufreq_attr, | 123 | .attr = cpufreq_generic_attr, |
174 | }; | 124 | }; |
175 | 125 | ||
176 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 126 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
@@ -218,7 +168,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
218 | goto out_put_node; | 168 | goto out_put_node; |
219 | } | 169 | } |
220 | 170 | ||
221 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); | 171 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
222 | if (ret) { | 172 | if (ret) { |
223 | pr_err("failed to init cpufreq table: %d\n", ret); | 173 | pr_err("failed to init cpufreq table: %d\n", ret); |
224 | goto out_put_node; | 174 | goto out_put_node; |
@@ -230,7 +180,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
230 | transition_latency = CPUFREQ_ETERNAL; | 180 | transition_latency = CPUFREQ_ETERNAL; |
231 | 181 | ||
232 | if (!IS_ERR(cpu_reg)) { | 182 | if (!IS_ERR(cpu_reg)) { |
233 | struct opp *opp; | 183 | struct dev_pm_opp *opp; |
234 | unsigned long min_uV, max_uV; | 184 | unsigned long min_uV, max_uV; |
235 | int i; | 185 | int i; |
236 | 186 | ||
@@ -242,12 +192,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
242 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) | 192 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) |
243 | ; | 193 | ; |
244 | rcu_read_lock(); | 194 | rcu_read_lock(); |
245 | opp = opp_find_freq_exact(cpu_dev, | 195 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
246 | freq_table[0].frequency * 1000, true); | 196 | freq_table[0].frequency * 1000, true); |
247 | min_uV = opp_get_voltage(opp); | 197 | min_uV = dev_pm_opp_get_voltage(opp); |
248 | opp = opp_find_freq_exact(cpu_dev, | 198 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
249 | freq_table[i-1].frequency * 1000, true); | 199 | freq_table[i-1].frequency * 1000, true); |
250 | max_uV = opp_get_voltage(opp); | 200 | max_uV = dev_pm_opp_get_voltage(opp); |
251 | rcu_read_unlock(); | 201 | rcu_read_unlock(); |
252 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); | 202 | ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); |
253 | if (ret > 0) | 203 | if (ret > 0) |
@@ -264,7 +214,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
264 | return 0; | 214 | return 0; |
265 | 215 | ||
266 | out_free_table: | 216 | out_free_table: |
267 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 217 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
268 | out_put_node: | 218 | out_put_node: |
269 | of_node_put(np); | 219 | of_node_put(np); |
270 | return ret; | 220 | return ret; |
@@ -273,7 +223,7 @@ out_put_node: | |||
273 | static int cpu0_cpufreq_remove(struct platform_device *pdev) | 223 | static int cpu0_cpufreq_remove(struct platform_device *pdev) |
274 | { | 224 | { |
275 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); | 225 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); |
276 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 226 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
277 | 227 | ||
278 | return 0; | 228 | return 0; |
279 | } | 229 | } |
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index b83d45f68574..a05b876f375e 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c | |||
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy) | |||
303 | if (policy->min < (fsb_pol_max * fid * 100)) | 303 | if (policy->min < (fsb_pol_max * fid * 100)) |
304 | policy->max = (fsb_pol_max + 1) * fid * 100; | 304 | policy->max = (fsb_pol_max + 1) * fid * 100; |
305 | 305 | ||
306 | cpufreq_verify_within_limits(policy, | 306 | cpufreq_verify_within_cpu_limits(policy); |
307 | policy->cpuinfo.min_freq, | ||
308 | policy->cpuinfo.max_freq); | ||
309 | return 0; | 307 | return 0; |
310 | } | 308 | } |
311 | 309 | ||
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) | |||
362 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; | 360 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; |
363 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; | 361 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; |
364 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 362 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
365 | policy->cur = nforce2_get(policy->cpu); | ||
366 | 363 | ||
367 | return 0; | 364 | return 0; |
368 | } | 365 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 04548f7023af..6c9cbb9ebd1f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list); | |||
47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* | 50 | static inline bool has_target(void) |
51 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | 51 | { |
52 | * all cpufreq/hotplug/workqueue/etc related lock issues. | 52 | return cpufreq_driver->target_index || cpufreq_driver->target; |
53 | * | ||
54 | * The rules for this semaphore: | ||
55 | * - Any routine that wants to read from the policy structure will | ||
56 | * do a down_read on this semaphore. | ||
57 | * - Any routine that will write to the policy structure and/or may take away | ||
58 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
59 | * mode before doing so. | ||
60 | * | ||
61 | * Additional rules: | ||
62 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
63 | * take this sem as top level hotplug notifier handler takes this. | ||
64 | * - Lock should not be held across | ||
65 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
66 | */ | ||
67 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
68 | |||
69 | #define lock_policy_rwsem(mode, cpu) \ | ||
70 | static int lock_policy_rwsem_##mode(int cpu) \ | ||
71 | { \ | ||
72 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ | ||
73 | BUG_ON(!policy); \ | ||
74 | down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | lock_policy_rwsem(write, cpu); | ||
81 | |||
82 | #define unlock_policy_rwsem(mode, cpu) \ | ||
83 | static void unlock_policy_rwsem_##mode(int cpu) \ | ||
84 | { \ | ||
85 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \ | ||
86 | BUG_ON(!policy); \ | ||
87 | up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \ | ||
88 | } | 53 | } |
89 | 54 | ||
90 | unlock_policy_rwsem(read, cpu); | ||
91 | unlock_policy_rwsem(write, cpu); | ||
92 | |||
93 | /* | 55 | /* |
94 | * rwsem to guarantee that cpufreq driver module doesn't unload during critical | 56 | * rwsem to guarantee that cpufreq driver module doesn't unload during critical |
95 | * sections | 57 | * sections |
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex); | |||
135 | 97 | ||
136 | bool have_governor_per_policy(void) | 98 | bool have_governor_per_policy(void) |
137 | { | 99 | { |
138 | return cpufreq_driver->have_governor_per_policy; | 100 | return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); |
139 | } | 101 | } |
140 | EXPORT_SYMBOL_GPL(have_governor_per_policy); | 102 | EXPORT_SYMBOL_GPL(have_governor_per_policy); |
141 | 103 | ||
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) | |||
183 | } | 145 | } |
184 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); | 146 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); |
185 | 147 | ||
148 | /* | ||
149 | * This is a generic cpufreq init() routine which can be used by cpufreq | ||
150 | * drivers of SMP systems. It will do following: | ||
151 | * - validate & show freq table passed | ||
152 | * - set policies transition latency | ||
153 | * - policy->cpus with all possible CPUs | ||
154 | */ | ||
155 | int cpufreq_generic_init(struct cpufreq_policy *policy, | ||
156 | struct cpufreq_frequency_table *table, | ||
157 | unsigned int transition_latency) | ||
158 | { | ||
159 | int ret; | ||
160 | |||
161 | ret = cpufreq_table_validate_and_show(policy, table); | ||
162 | if (ret) { | ||
163 | pr_err("%s: invalid frequency table: %d\n", __func__, ret); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | policy->cpuinfo.transition_latency = transition_latency; | ||
168 | |||
169 | /* | ||
170 | * The driver only supports the SMP configuartion where all processors | ||
171 | * share the clock and voltage and clock. | ||
172 | */ | ||
173 | cpumask_setall(policy->cpus); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | EXPORT_SYMBOL_GPL(cpufreq_generic_init); | ||
178 | |||
186 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 179 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
187 | { | 180 | { |
188 | struct cpufreq_policy *policy = NULL; | 181 | struct cpufreq_policy *policy = NULL; |
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
363 | *policy = CPUFREQ_POLICY_POWERSAVE; | 356 | *policy = CPUFREQ_POLICY_POWERSAVE; |
364 | err = 0; | 357 | err = 0; |
365 | } | 358 | } |
366 | } else if (cpufreq_driver->target) { | 359 | } else if (has_target()) { |
367 | struct cpufreq_governor *t; | 360 | struct cpufreq_governor *t; |
368 | 361 | ||
369 | mutex_lock(&cpufreq_governor_mutex); | 362 | mutex_lock(&cpufreq_governor_mutex); |
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min); | |||
414 | show_one(scaling_max_freq, max); | 407 | show_one(scaling_max_freq, max); |
415 | show_one(scaling_cur_freq, cur); | 408 | show_one(scaling_cur_freq, cur); |
416 | 409 | ||
417 | static int __cpufreq_set_policy(struct cpufreq_policy *policy, | 410 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
418 | struct cpufreq_policy *new_policy); | 411 | struct cpufreq_policy *new_policy); |
419 | 412 | ||
420 | /** | 413 | /** |
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \ | |||
435 | if (ret != 1) \ | 428 | if (ret != 1) \ |
436 | return -EINVAL; \ | 429 | return -EINVAL; \ |
437 | \ | 430 | \ |
438 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 431 | ret = cpufreq_set_policy(policy, &new_policy); \ |
439 | policy->user_policy.object = policy->object; \ | 432 | policy->user_policy.object = policy->object; \ |
440 | \ | 433 | \ |
441 | return ret ? ret : count; \ | 434 | return ret ? ret : count; \ |
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | |||
493 | &new_policy.governor)) | 486 | &new_policy.governor)) |
494 | return -EINVAL; | 487 | return -EINVAL; |
495 | 488 | ||
496 | /* | 489 | ret = cpufreq_set_policy(policy, &new_policy); |
497 | * Do not use cpufreq_set_policy here or the user_policy.max | ||
498 | * will be wrongly overridden | ||
499 | */ | ||
500 | ret = __cpufreq_set_policy(policy, &new_policy); | ||
501 | 490 | ||
502 | policy->user_policy.policy = policy->policy; | 491 | policy->user_policy.policy = policy->policy; |
503 | policy->user_policy.governor = policy->governor; | 492 | policy->user_policy.governor = policy->governor; |
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | |||
525 | ssize_t i = 0; | 514 | ssize_t i = 0; |
526 | struct cpufreq_governor *t; | 515 | struct cpufreq_governor *t; |
527 | 516 | ||
528 | if (!cpufreq_driver->target) { | 517 | if (!has_target()) { |
529 | i += sprintf(buf, "performance powersave"); | 518 | i += sprintf(buf, "performance powersave"); |
530 | goto out; | 519 | goto out; |
531 | } | 520 | } |
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
653 | { | 642 | { |
654 | struct cpufreq_policy *policy = to_policy(kobj); | 643 | struct cpufreq_policy *policy = to_policy(kobj); |
655 | struct freq_attr *fattr = to_attr(attr); | 644 | struct freq_attr *fattr = to_attr(attr); |
656 | ssize_t ret = -EINVAL; | 645 | ssize_t ret; |
657 | 646 | ||
658 | if (!down_read_trylock(&cpufreq_rwsem)) | 647 | if (!down_read_trylock(&cpufreq_rwsem)) |
659 | goto exit; | 648 | return -EINVAL; |
660 | 649 | ||
661 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 650 | down_read(&policy->rwsem); |
662 | goto up_read; | ||
663 | 651 | ||
664 | if (fattr->show) | 652 | if (fattr->show) |
665 | ret = fattr->show(policy, buf); | 653 | ret = fattr->show(policy, buf); |
666 | else | 654 | else |
667 | ret = -EIO; | 655 | ret = -EIO; |
668 | 656 | ||
669 | unlock_policy_rwsem_read(policy->cpu); | 657 | up_read(&policy->rwsem); |
670 | |||
671 | up_read: | ||
672 | up_read(&cpufreq_rwsem); | 658 | up_read(&cpufreq_rwsem); |
673 | exit: | 659 | |
674 | return ret; | 660 | return ret; |
675 | } | 661 | } |
676 | 662 | ||
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
689 | if (!down_read_trylock(&cpufreq_rwsem)) | 675 | if (!down_read_trylock(&cpufreq_rwsem)) |
690 | goto unlock; | 676 | goto unlock; |
691 | 677 | ||
692 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 678 | down_write(&policy->rwsem); |
693 | goto up_read; | ||
694 | 679 | ||
695 | if (fattr->store) | 680 | if (fattr->store) |
696 | ret = fattr->store(policy, buf, count); | 681 | ret = fattr->store(policy, buf, count); |
697 | else | 682 | else |
698 | ret = -EIO; | 683 | ret = -EIO; |
699 | 684 | ||
700 | unlock_policy_rwsem_write(policy->cpu); | 685 | up_write(&policy->rwsem); |
701 | 686 | ||
702 | up_read: | ||
703 | up_read(&cpufreq_rwsem); | 687 | up_read(&cpufreq_rwsem); |
704 | unlock: | 688 | unlock: |
705 | put_online_cpus(); | 689 | put_online_cpus(); |
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, | |||
815 | if (ret) | 799 | if (ret) |
816 | goto err_out_kobj_put; | 800 | goto err_out_kobj_put; |
817 | } | 801 | } |
818 | if (cpufreq_driver->target) { | 802 | if (has_target()) { |
819 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 803 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
820 | if (ret) | 804 | if (ret) |
821 | goto err_out_kobj_put; | 805 | goto err_out_kobj_put; |
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
844 | int ret = 0; | 828 | int ret = 0; |
845 | 829 | ||
846 | memcpy(&new_policy, policy, sizeof(*policy)); | 830 | memcpy(&new_policy, policy, sizeof(*policy)); |
847 | /* assure that the starting sequence is run in __cpufreq_set_policy */ | 831 | /* assure that the starting sequence is run in cpufreq_set_policy */ |
848 | policy->governor = NULL; | 832 | policy->governor = NULL; |
849 | 833 | ||
850 | /* set default policy */ | 834 | /* set default policy */ |
851 | ret = __cpufreq_set_policy(policy, &new_policy); | 835 | ret = cpufreq_set_policy(policy, &new_policy); |
852 | policy->user_policy.policy = policy->policy; | 836 | policy->user_policy.policy = policy->policy; |
853 | policy->user_policy.governor = policy->governor; | 837 | policy->user_policy.governor = policy->governor; |
854 | 838 | ||
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
864 | unsigned int cpu, struct device *dev, | 848 | unsigned int cpu, struct device *dev, |
865 | bool frozen) | 849 | bool frozen) |
866 | { | 850 | { |
867 | int ret = 0, has_target = !!cpufreq_driver->target; | 851 | int ret = 0; |
868 | unsigned long flags; | 852 | unsigned long flags; |
869 | 853 | ||
870 | if (has_target) { | 854 | if (has_target()) { |
871 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 855 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
872 | if (ret) { | 856 | if (ret) { |
873 | pr_err("%s: Failed to stop governor\n", __func__); | 857 | pr_err("%s: Failed to stop governor\n", __func__); |
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
875 | } | 859 | } |
876 | } | 860 | } |
877 | 861 | ||
878 | lock_policy_rwsem_write(policy->cpu); | 862 | down_write(&policy->rwsem); |
879 | 863 | ||
880 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 864 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
881 | 865 | ||
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
883 | per_cpu(cpufreq_cpu_data, cpu) = policy; | 867 | per_cpu(cpufreq_cpu_data, cpu) = policy; |
884 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 868 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
885 | 869 | ||
886 | unlock_policy_rwsem_write(policy->cpu); | 870 | up_write(&policy->rwsem); |
887 | 871 | ||
888 | if (has_target) { | 872 | if (has_target()) { |
889 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || | 873 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || |
890 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { | 874 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { |
891 | pr_err("%s: Failed to start governor\n", __func__); | 875 | pr_err("%s: Failed to start governor\n", __func__); |
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) | |||
930 | goto err_free_cpumask; | 914 | goto err_free_cpumask; |
931 | 915 | ||
932 | INIT_LIST_HEAD(&policy->policy_list); | 916 | INIT_LIST_HEAD(&policy->policy_list); |
917 | init_rwsem(&policy->rwsem); | ||
918 | |||
933 | return policy; | 919 | return policy; |
934 | 920 | ||
935 | err_free_cpumask: | 921 | err_free_cpumask: |
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) | |||
949 | 935 | ||
950 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | 936 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) |
951 | { | 937 | { |
952 | if (cpu == policy->cpu) | 938 | if (WARN_ON(cpu == policy->cpu)) |
953 | return; | 939 | return; |
954 | 940 | ||
955 | /* | 941 | down_write(&policy->rwsem); |
956 | * Take direct locks as lock_policy_rwsem_write wouldn't work here. | ||
957 | * Also lock for last cpu is enough here as contention will happen only | ||
958 | * after policy->cpu is changed and after it is changed, other threads | ||
959 | * will try to acquire lock for new cpu. And policy is already updated | ||
960 | * by then. | ||
961 | */ | ||
962 | down_write(&per_cpu(cpu_policy_rwsem, policy->cpu)); | ||
963 | 942 | ||
964 | policy->last_cpu = policy->cpu; | 943 | policy->last_cpu = policy->cpu; |
965 | policy->cpu = cpu; | 944 | policy->cpu = cpu; |
966 | 945 | ||
967 | up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu)); | 946 | up_write(&policy->rwsem); |
968 | 947 | ||
969 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
970 | cpufreq_frequency_table_update_policy_cpu(policy); | 948 | cpufreq_frequency_table_update_policy_cpu(policy); |
971 | #endif | ||
972 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 949 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
973 | CPUFREQ_UPDATE_POLICY_CPU, policy); | 950 | CPUFREQ_UPDATE_POLICY_CPU, policy); |
974 | } | 951 | } |
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1053 | goto err_set_policy_cpu; | 1030 | goto err_set_policy_cpu; |
1054 | } | 1031 | } |
1055 | 1032 | ||
1033 | if (cpufreq_driver->get) { | ||
1034 | policy->cur = cpufreq_driver->get(policy->cpu); | ||
1035 | if (!policy->cur) { | ||
1036 | pr_err("%s: ->get() failed\n", __func__); | ||
1037 | goto err_get_freq; | ||
1038 | } | ||
1039 | } | ||
1040 | |||
1056 | /* related cpus should atleast have policy->cpus */ | 1041 | /* related cpus should atleast have policy->cpus */ |
1057 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | 1042 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); |
1058 | 1043 | ||
@@ -1107,6 +1092,9 @@ err_out_unregister: | |||
1107 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1092 | per_cpu(cpufreq_cpu_data, j) = NULL; |
1108 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1093 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1109 | 1094 | ||
1095 | err_get_freq: | ||
1096 | if (cpufreq_driver->exit) | ||
1097 | cpufreq_driver->exit(policy); | ||
1110 | err_set_policy_cpu: | 1098 | err_set_policy_cpu: |
1111 | cpufreq_policy_free(policy); | 1099 | cpufreq_policy_free(policy); |
1112 | nomem_out: | 1100 | nomem_out: |
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
1147 | if (ret) { | 1135 | if (ret) { |
1148 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | 1136 | pr_err("%s: Failed to move kobj: %d", __func__, ret); |
1149 | 1137 | ||
1150 | WARN_ON(lock_policy_rwsem_write(old_cpu)); | 1138 | down_write(&policy->rwsem); |
1151 | cpumask_set_cpu(old_cpu, policy->cpus); | 1139 | cpumask_set_cpu(old_cpu, policy->cpus); |
1152 | unlock_policy_rwsem_write(old_cpu); | 1140 | up_write(&policy->rwsem); |
1153 | 1141 | ||
1154 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, | 1142 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, |
1155 | "cpufreq"); | 1143 | "cpufreq"); |
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1186 | return -EINVAL; | 1174 | return -EINVAL; |
1187 | } | 1175 | } |
1188 | 1176 | ||
1189 | if (cpufreq_driver->target) { | 1177 | if (has_target()) { |
1190 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 1178 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
1191 | if (ret) { | 1179 | if (ret) { |
1192 | pr_err("%s: Failed to stop governor\n", __func__); | 1180 | pr_err("%s: Failed to stop governor\n", __func__); |
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1200 | policy->governor->name, CPUFREQ_NAME_LEN); | 1188 | policy->governor->name, CPUFREQ_NAME_LEN); |
1201 | #endif | 1189 | #endif |
1202 | 1190 | ||
1203 | lock_policy_rwsem_read(cpu); | 1191 | down_read(&policy->rwsem); |
1204 | cpus = cpumask_weight(policy->cpus); | 1192 | cpus = cpumask_weight(policy->cpus); |
1205 | unlock_policy_rwsem_read(cpu); | 1193 | up_read(&policy->rwsem); |
1206 | 1194 | ||
1207 | if (cpu != policy->cpu) { | 1195 | if (cpu != policy->cpu) { |
1208 | if (!frozen) | 1196 | if (!frozen) |
1209 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1197 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1210 | } else if (cpus > 1) { | 1198 | } else if (cpus > 1) { |
1211 | |||
1212 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 1199 | new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); |
1213 | if (new_cpu >= 0) { | 1200 | if (new_cpu >= 0) { |
1214 | update_policy_cpu(policy, new_cpu); | 1201 | update_policy_cpu(policy, new_cpu); |
1215 | 1202 | ||
1216 | if (!frozen) { | 1203 | if (!frozen) { |
1217 | pr_debug("%s: policy Kobject moved to cpu: %d " | 1204 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1218 | "from: %d\n",__func__, new_cpu, cpu); | 1205 | __func__, new_cpu, cpu); |
1219 | } | 1206 | } |
1220 | } | 1207 | } |
1221 | } | 1208 | } |
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1243 | return -EINVAL; | 1230 | return -EINVAL; |
1244 | } | 1231 | } |
1245 | 1232 | ||
1246 | WARN_ON(lock_policy_rwsem_write(cpu)); | 1233 | down_write(&policy->rwsem); |
1247 | cpus = cpumask_weight(policy->cpus); | 1234 | cpus = cpumask_weight(policy->cpus); |
1248 | 1235 | ||
1249 | if (cpus > 1) | 1236 | if (cpus > 1) |
1250 | cpumask_clear_cpu(cpu, policy->cpus); | 1237 | cpumask_clear_cpu(cpu, policy->cpus); |
1251 | unlock_policy_rwsem_write(cpu); | 1238 | up_write(&policy->rwsem); |
1252 | 1239 | ||
1253 | /* If cpu is last user of policy, free policy */ | 1240 | /* If cpu is last user of policy, free policy */ |
1254 | if (cpus == 1) { | 1241 | if (cpus == 1) { |
1255 | if (cpufreq_driver->target) { | 1242 | if (has_target()) { |
1256 | ret = __cpufreq_governor(policy, | 1243 | ret = __cpufreq_governor(policy, |
1257 | CPUFREQ_GOV_POLICY_EXIT); | 1244 | CPUFREQ_GOV_POLICY_EXIT); |
1258 | if (ret) { | 1245 | if (ret) { |
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1263 | } | 1250 | } |
1264 | 1251 | ||
1265 | if (!frozen) { | 1252 | if (!frozen) { |
1266 | lock_policy_rwsem_read(cpu); | 1253 | down_read(&policy->rwsem); |
1267 | kobj = &policy->kobj; | 1254 | kobj = &policy->kobj; |
1268 | cmp = &policy->kobj_unregister; | 1255 | cmp = &policy->kobj_unregister; |
1269 | unlock_policy_rwsem_read(cpu); | 1256 | up_read(&policy->rwsem); |
1270 | kobject_put(kobj); | 1257 | kobject_put(kobj); |
1271 | 1258 | ||
1272 | /* | 1259 | /* |
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1295 | if (!frozen) | 1282 | if (!frozen) |
1296 | cpufreq_policy_free(policy); | 1283 | cpufreq_policy_free(policy); |
1297 | } else { | 1284 | } else { |
1298 | if (cpufreq_driver->target) { | 1285 | if (has_target()) { |
1299 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || | 1286 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || |
1300 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { | 1287 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { |
1301 | pr_err("%s: Failed to start governor\n", | 1288 | pr_err("%s: Failed to start governor\n", |
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1310 | } | 1297 | } |
1311 | 1298 | ||
1312 | /** | 1299 | /** |
1313 | * __cpufreq_remove_dev - remove a CPU device | 1300 | * cpufreq_remove_dev - remove a CPU device |
1314 | * | 1301 | * |
1315 | * Removes the cpufreq interface for a CPU device. | 1302 | * Removes the cpufreq interface for a CPU device. |
1316 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
1317 | * This routine frees the rwsem before returning. | ||
1318 | */ | 1303 | */ |
1319 | static inline int __cpufreq_remove_dev(struct device *dev, | ||
1320 | struct subsys_interface *sif, | ||
1321 | bool frozen) | ||
1322 | { | ||
1323 | int ret; | ||
1324 | |||
1325 | ret = __cpufreq_remove_dev_prepare(dev, sif, frozen); | ||
1326 | |||
1327 | if (!ret) | ||
1328 | ret = __cpufreq_remove_dev_finish(dev, sif, frozen); | ||
1329 | |||
1330 | return ret; | ||
1331 | } | ||
1332 | |||
1333 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1304 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1334 | { | 1305 | { |
1335 | unsigned int cpu = dev->id; | 1306 | unsigned int cpu = dev->id; |
1336 | int retval; | 1307 | int ret; |
1337 | 1308 | ||
1338 | if (cpu_is_offline(cpu)) | 1309 | if (cpu_is_offline(cpu)) |
1339 | return 0; | 1310 | return 0; |
1340 | 1311 | ||
1341 | retval = __cpufreq_remove_dev(dev, sif, false); | 1312 | ret = __cpufreq_remove_dev_prepare(dev, sif, false); |
1342 | return retval; | 1313 | |
1314 | if (!ret) | ||
1315 | ret = __cpufreq_remove_dev_finish(dev, sif, false); | ||
1316 | |||
1317 | return ret; | ||
1343 | } | 1318 | } |
1344 | 1319 | ||
1345 | static void handle_update(struct work_struct *work) | 1320 | static void handle_update(struct work_struct *work) |
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu) | |||
1458 | */ | 1433 | */ |
1459 | unsigned int cpufreq_get(unsigned int cpu) | 1434 | unsigned int cpufreq_get(unsigned int cpu) |
1460 | { | 1435 | { |
1436 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1461 | unsigned int ret_freq = 0; | 1437 | unsigned int ret_freq = 0; |
1462 | 1438 | ||
1463 | if (cpufreq_disabled() || !cpufreq_driver) | 1439 | if (cpufreq_disabled() || !cpufreq_driver) |
1464 | return -ENOENT; | 1440 | return -ENOENT; |
1465 | 1441 | ||
1442 | BUG_ON(!policy); | ||
1443 | |||
1466 | if (!down_read_trylock(&cpufreq_rwsem)) | 1444 | if (!down_read_trylock(&cpufreq_rwsem)) |
1467 | return 0; | 1445 | return 0; |
1468 | 1446 | ||
1469 | if (unlikely(lock_policy_rwsem_read(cpu))) | 1447 | down_read(&policy->rwsem); |
1470 | goto out_policy; | ||
1471 | 1448 | ||
1472 | ret_freq = __cpufreq_get(cpu); | 1449 | ret_freq = __cpufreq_get(cpu); |
1473 | 1450 | ||
1474 | unlock_policy_rwsem_read(cpu); | 1451 | up_read(&policy->rwsem); |
1475 | |||
1476 | out_policy: | ||
1477 | up_read(&cpufreq_rwsem); | 1452 | up_read(&cpufreq_rwsem); |
1478 | 1453 | ||
1479 | return ret_freq; | 1454 | return ret_freq; |
@@ -1681,12 +1656,41 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1681 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | 1656 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", |
1682 | policy->cpu, target_freq, relation, old_target_freq); | 1657 | policy->cpu, target_freq, relation, old_target_freq); |
1683 | 1658 | ||
1659 | /* | ||
1660 | * This might look like a redundant call as we are checking it again | ||
1661 | * after finding index. But it is left intentionally for cases where | ||
1662 | * exactly same freq is called again and so we can save on few function | ||
1663 | * calls. | ||
1664 | */ | ||
1684 | if (target_freq == policy->cur) | 1665 | if (target_freq == policy->cur) |
1685 | return 0; | 1666 | return 0; |
1686 | 1667 | ||
1687 | if (cpufreq_driver->target) | 1668 | if (cpufreq_driver->target) |
1688 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1669 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1670 | else if (cpufreq_driver->target_index) { | ||
1671 | struct cpufreq_frequency_table *freq_table; | ||
1672 | int index; | ||
1673 | |||
1674 | freq_table = cpufreq_frequency_get_table(policy->cpu); | ||
1675 | if (unlikely(!freq_table)) { | ||
1676 | pr_err("%s: Unable to find freq_table\n", __func__); | ||
1677 | goto out; | ||
1678 | } | ||
1679 | |||
1680 | retval = cpufreq_frequency_table_target(policy, freq_table, | ||
1681 | target_freq, relation, &index); | ||
1682 | if (unlikely(retval)) { | ||
1683 | pr_err("%s: Unable to find matching freq\n", __func__); | ||
1684 | goto out; | ||
1685 | } | ||
1689 | 1686 | ||
1687 | if (freq_table[index].frequency == policy->cur) | ||
1688 | retval = 0; | ||
1689 | else | ||
1690 | retval = cpufreq_driver->target_index(policy, index); | ||
1691 | } | ||
1692 | |||
1693 | out: | ||
1690 | return retval; | 1694 | return retval; |
1691 | } | 1695 | } |
1692 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | 1696 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); |
@@ -1697,14 +1701,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1697 | { | 1701 | { |
1698 | int ret = -EINVAL; | 1702 | int ret = -EINVAL; |
1699 | 1703 | ||
1700 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) | 1704 | down_write(&policy->rwsem); |
1701 | goto fail; | ||
1702 | 1705 | ||
1703 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1706 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1704 | 1707 | ||
1705 | unlock_policy_rwsem_write(policy->cpu); | 1708 | up_write(&policy->rwsem); |
1706 | 1709 | ||
1707 | fail: | ||
1708 | return ret; | 1710 | return ret; |
1709 | } | 1711 | } |
1710 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1712 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
@@ -1871,10 +1873,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1871 | EXPORT_SYMBOL(cpufreq_get_policy); | 1873 | EXPORT_SYMBOL(cpufreq_get_policy); |
1872 | 1874 | ||
1873 | /* | 1875 | /* |
1874 | * data : current policy. | 1876 | * policy : current policy. |
1875 | * policy : policy to be set. | 1877 | * new_policy: policy to be set. |
1876 | */ | 1878 | */ |
1877 | static int __cpufreq_set_policy(struct cpufreq_policy *policy, | 1879 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
1878 | struct cpufreq_policy *new_policy) | 1880 | struct cpufreq_policy *new_policy) |
1879 | { | 1881 | { |
1880 | int ret = 0, failed = 1; | 1882 | int ret = 0, failed = 1; |
@@ -1934,10 +1936,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, | |||
1934 | /* end old governor */ | 1936 | /* end old governor */ |
1935 | if (policy->governor) { | 1937 | if (policy->governor) { |
1936 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 1938 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
1937 | unlock_policy_rwsem_write(new_policy->cpu); | 1939 | up_write(&policy->rwsem); |
1938 | __cpufreq_governor(policy, | 1940 | __cpufreq_governor(policy, |
1939 | CPUFREQ_GOV_POLICY_EXIT); | 1941 | CPUFREQ_GOV_POLICY_EXIT); |
1940 | lock_policy_rwsem_write(new_policy->cpu); | 1942 | down_write(&policy->rwsem); |
1941 | } | 1943 | } |
1942 | 1944 | ||
1943 | /* start new governor */ | 1945 | /* start new governor */ |
@@ -1946,10 +1948,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy, | |||
1946 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { | 1948 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { |
1947 | failed = 0; | 1949 | failed = 0; |
1948 | } else { | 1950 | } else { |
1949 | unlock_policy_rwsem_write(new_policy->cpu); | 1951 | up_write(&policy->rwsem); |
1950 | __cpufreq_governor(policy, | 1952 | __cpufreq_governor(policy, |
1951 | CPUFREQ_GOV_POLICY_EXIT); | 1953 | CPUFREQ_GOV_POLICY_EXIT); |
1952 | lock_policy_rwsem_write(new_policy->cpu); | 1954 | down_write(&policy->rwsem); |
1953 | } | 1955 | } |
1954 | } | 1956 | } |
1955 | 1957 | ||
@@ -1995,10 +1997,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1995 | goto no_policy; | 1997 | goto no_policy; |
1996 | } | 1998 | } |
1997 | 1999 | ||
1998 | if (unlikely(lock_policy_rwsem_write(cpu))) { | 2000 | down_write(&policy->rwsem); |
1999 | ret = -EINVAL; | ||
2000 | goto fail; | ||
2001 | } | ||
2002 | 2001 | ||
2003 | pr_debug("updating policy for CPU %u\n", cpu); | 2002 | pr_debug("updating policy for CPU %u\n", cpu); |
2004 | memcpy(&new_policy, policy, sizeof(*policy)); | 2003 | memcpy(&new_policy, policy, sizeof(*policy)); |
@@ -2017,17 +2016,16 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2017 | pr_debug("Driver did not initialize current freq"); | 2016 | pr_debug("Driver did not initialize current freq"); |
2018 | policy->cur = new_policy.cur; | 2017 | policy->cur = new_policy.cur; |
2019 | } else { | 2018 | } else { |
2020 | if (policy->cur != new_policy.cur && cpufreq_driver->target) | 2019 | if (policy->cur != new_policy.cur && has_target()) |
2021 | cpufreq_out_of_sync(cpu, policy->cur, | 2020 | cpufreq_out_of_sync(cpu, policy->cur, |
2022 | new_policy.cur); | 2021 | new_policy.cur); |
2023 | } | 2022 | } |
2024 | } | 2023 | } |
2025 | 2024 | ||
2026 | ret = __cpufreq_set_policy(policy, &new_policy); | 2025 | ret = cpufreq_set_policy(policy, &new_policy); |
2027 | 2026 | ||
2028 | unlock_policy_rwsem_write(cpu); | 2027 | up_write(&policy->rwsem); |
2029 | 2028 | ||
2030 | fail: | ||
2031 | cpufreq_cpu_put(policy); | 2029 | cpufreq_cpu_put(policy); |
2032 | no_policy: | 2030 | no_policy: |
2033 | return ret; | 2031 | return ret; |
@@ -2096,7 +2094,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2096 | return -ENODEV; | 2094 | return -ENODEV; |
2097 | 2095 | ||
2098 | if (!driver_data || !driver_data->verify || !driver_data->init || | 2096 | if (!driver_data || !driver_data->verify || !driver_data->init || |
2099 | ((!driver_data->setpolicy) && (!driver_data->target))) | 2097 | !(driver_data->setpolicy || driver_data->target_index || |
2098 | driver_data->target)) | ||
2100 | return -EINVAL; | 2099 | return -EINVAL; |
2101 | 2100 | ||
2102 | pr_debug("trying to register driver %s\n", driver_data->name); | 2101 | pr_debug("trying to register driver %s\n", driver_data->name); |
@@ -2183,14 +2182,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | |||
2183 | 2182 | ||
2184 | static int __init cpufreq_core_init(void) | 2183 | static int __init cpufreq_core_init(void) |
2185 | { | 2184 | { |
2186 | int cpu; | ||
2187 | |||
2188 | if (cpufreq_disabled()) | 2185 | if (cpufreq_disabled()) |
2189 | return -ENODEV; | 2186 | return -ENODEV; |
2190 | 2187 | ||
2191 | for_each_possible_cpu(cpu) | ||
2192 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
2193 | |||
2194 | cpufreq_global_kobject = kobject_create(); | 2188 | cpufreq_global_kobject = kobject_create(); |
2195 | BUG_ON(!cpufreq_global_kobject); | 2189 | BUG_ON(!cpufreq_global_kobject); |
2196 | register_syscore_ops(&cpufreq_syscore_ops); | 2190 | register_syscore_ops(&cpufreq_syscore_ops); |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 88cd39f7b0e9..b5f2b8618949 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -191,7 +191,10 @@ struct common_dbs_data { | |||
191 | struct attribute_group *attr_group_gov_sys; /* one governor - system */ | 191 | struct attribute_group *attr_group_gov_sys; /* one governor - system */ |
192 | struct attribute_group *attr_group_gov_pol; /* one governor - policy */ | 192 | struct attribute_group *attr_group_gov_pol; /* one governor - policy */ |
193 | 193 | ||
194 | /* Common data for platforms that don't set have_governor_per_policy */ | 194 | /* |
195 | * Common data for platforms that don't set | ||
196 | * CPUFREQ_HAVE_GOVERNOR_PER_POLICY | ||
197 | */ | ||
195 | struct dbs_data *gdbs_data; | 198 | struct dbs_data *gdbs_data; |
196 | 199 | ||
197 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); | 200 | struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 03078090b5f7..4dbf1db16aca 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
38 | if (!per_cpu(cpu_is_managed, policy->cpu)) | 38 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
39 | goto err; | 39 | goto err; |
40 | 40 | ||
41 | /* | ||
42 | * We're safe from concurrent calls to ->target() here | ||
43 | * as we hold the userspace_mutex lock. If we were calling | ||
44 | * cpufreq_driver_target, a deadlock situation might occur: | ||
45 | * A: cpufreq_set (lock userspace_mutex) -> | ||
46 | * cpufreq_driver_target(lock policy->lock) | ||
47 | * B: cpufreq_set_policy(lock policy->lock) -> | ||
48 | * __cpufreq_governor -> | ||
49 | * cpufreq_governor_userspace (lock userspace_mutex) | ||
50 | */ | ||
51 | ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); | 41 | ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); |
52 | |||
53 | err: | 42 | err: |
54 | mutex_unlock(&userspace_mutex); | 43 | mutex_unlock(&userspace_mutex); |
55 | return ret; | 44 | return ret; |
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c index cb8276dd19ca..841857cf1562 100644 --- a/drivers/cpufreq/cris-artpec3-cpufreq.c +++ b/drivers/cpufreq/cris-artpec3-cpufreq.c | |||
@@ -27,8 +27,7 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) | |||
27 | return clk_ctrl.pll ? 200000 : 6000; | 27 | return clk_ctrl.pll ? 200000 : 6000; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | 30 | static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) |
31 | unsigned int state) | ||
32 | { | 31 | { |
33 | struct cpufreq_freqs freqs; | 32 | struct cpufreq_freqs freqs; |
34 | reg_clkgen_rw_clk_ctrl clk_ctrl; | 33 | reg_clkgen_rw_clk_ctrl clk_ctrl; |
@@ -52,66 +51,23 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
52 | local_irq_enable(); | 51 | local_irq_enable(); |
53 | 52 | ||
54 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 53 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
55 | }; | ||
56 | |||
57 | static int cris_freq_verify(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); | ||
60 | } | ||
61 | |||
62 | static int cris_freq_target(struct cpufreq_policy *policy, | ||
63 | unsigned int target_freq, | ||
64 | unsigned int relation) | ||
65 | { | ||
66 | unsigned int newstate = 0; | ||
67 | |||
68 | if (cpufreq_frequency_table_target(policy, cris_freq_table, | ||
69 | target_freq, relation, &newstate)) | ||
70 | return -EINVAL; | ||
71 | |||
72 | cris_freq_set_cpu_state(policy, newstate); | ||
73 | 54 | ||
74 | return 0; | 55 | return 0; |
75 | } | 56 | } |
76 | 57 | ||
77 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) | 58 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) |
78 | { | 59 | { |
79 | int result; | 60 | return cpufreq_generic_init(policy, cris_freq_table, 1000000); |
80 | |||
81 | /* cpuinfo and default policy values */ | ||
82 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | ||
83 | policy->cur = cris_freq_get_cpu_frequency(0); | ||
84 | |||
85 | result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); | ||
86 | if (result) | ||
87 | return (result); | ||
88 | |||
89 | cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | |||
95 | static int cris_freq_cpu_exit(struct cpufreq_policy *policy) | ||
96 | { | ||
97 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
98 | return 0; | ||
99 | } | 61 | } |
100 | 62 | ||
101 | |||
102 | static struct freq_attr *cris_freq_attr[] = { | ||
103 | &cpufreq_freq_attr_scaling_available_freqs, | ||
104 | NULL, | ||
105 | }; | ||
106 | |||
107 | static struct cpufreq_driver cris_freq_driver = { | 63 | static struct cpufreq_driver cris_freq_driver = { |
108 | .get = cris_freq_get_cpu_frequency, | 64 | .get = cris_freq_get_cpu_frequency, |
109 | .verify = cris_freq_verify, | 65 | .verify = cpufreq_generic_frequency_table_verify, |
110 | .target = cris_freq_target, | 66 | .target_index = cris_freq_target, |
111 | .init = cris_freq_cpu_init, | 67 | .init = cris_freq_cpu_init, |
112 | .exit = cris_freq_cpu_exit, | 68 | .exit = cpufreq_generic_exit, |
113 | .name = "cris_freq", | 69 | .name = "cris_freq", |
114 | .attr = cris_freq_attr, | 70 | .attr = cpufreq_generic_attr, |
115 | }; | 71 | }; |
116 | 72 | ||
117 | static int __init cris_freq_init(void) | 73 | static int __init cris_freq_init(void) |
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c index 72328f77dc53..c58811abd961 100644 --- a/drivers/cpufreq/cris-etraxfs-cpufreq.c +++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c | |||
@@ -27,8 +27,7 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) | |||
27 | return clk_ctrl.pll ? 200000 : 6000; | 27 | return clk_ctrl.pll ? 200000 : 6000; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | 30 | static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) |
31 | unsigned int state) | ||
32 | { | 31 | { |
33 | struct cpufreq_freqs freqs; | 32 | struct cpufreq_freqs freqs; |
34 | reg_config_rw_clk_ctrl clk_ctrl; | 33 | reg_config_rw_clk_ctrl clk_ctrl; |
@@ -52,63 +51,23 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
52 | local_irq_enable(); | 51 | local_irq_enable(); |
53 | 52 | ||
54 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 53 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
55 | }; | ||
56 | |||
57 | static int cris_freq_verify(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); | ||
60 | } | ||
61 | |||
62 | static int cris_freq_target(struct cpufreq_policy *policy, | ||
63 | unsigned int target_freq, unsigned int relation) | ||
64 | { | ||
65 | unsigned int newstate = 0; | ||
66 | |||
67 | if (cpufreq_frequency_table_target | ||
68 | (policy, cris_freq_table, target_freq, relation, &newstate)) | ||
69 | return -EINVAL; | ||
70 | |||
71 | cris_freq_set_cpu_state(policy, newstate); | ||
72 | 54 | ||
73 | return 0; | 55 | return 0; |
74 | } | 56 | } |
75 | 57 | ||
76 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) | 58 | static int cris_freq_cpu_init(struct cpufreq_policy *policy) |
77 | { | 59 | { |
78 | int result; | 60 | return cpufreq_generic_init(policy, cris_freq_table, 1000000); |
79 | |||
80 | /* cpuinfo and default policy values */ | ||
81 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | ||
82 | policy->cur = cris_freq_get_cpu_frequency(0); | ||
83 | |||
84 | result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); | ||
85 | if (result) | ||
86 | return (result); | ||
87 | |||
88 | cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); | ||
89 | |||
90 | return 0; | ||
91 | } | 61 | } |
92 | 62 | ||
93 | static int cris_freq_cpu_exit(struct cpufreq_policy *policy) | ||
94 | { | ||
95 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static struct freq_attr *cris_freq_attr[] = { | ||
100 | &cpufreq_freq_attr_scaling_available_freqs, | ||
101 | NULL, | ||
102 | }; | ||
103 | |||
104 | static struct cpufreq_driver cris_freq_driver = { | 63 | static struct cpufreq_driver cris_freq_driver = { |
105 | .get = cris_freq_get_cpu_frequency, | 64 | .get = cris_freq_get_cpu_frequency, |
106 | .verify = cris_freq_verify, | 65 | .verify = cpufreq_generic_frequency_table_verify, |
107 | .target = cris_freq_target, | 66 | .target_index = cris_freq_target, |
108 | .init = cris_freq_cpu_init, | 67 | .init = cris_freq_cpu_init, |
109 | .exit = cris_freq_cpu_exit, | 68 | .exit = cpufreq_generic_exit, |
110 | .name = "cris_freq", | 69 | .name = "cris_freq", |
111 | .attr = cris_freq_attr, | 70 | .attr = cpufreq_generic_attr, |
112 | }; | 71 | }; |
113 | 72 | ||
114 | static int __init cris_freq_init(void) | 73 | static int __init cris_freq_init(void) |
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 551dd655c6f2..1f5d8a569c77 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c | |||
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy) | |||
50 | if (policy->cpu) | 50 | if (policy->cpu) |
51 | return -EINVAL; | 51 | return -EINVAL; |
52 | 52 | ||
53 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 53 | cpufreq_verify_within_cpu_limits(policy); |
54 | policy->cpuinfo.max_freq); | ||
55 | |||
56 | policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; | 54 | policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; |
57 | policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; | 55 | policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; |
58 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 56 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
@@ -68,28 +66,18 @@ static unsigned int davinci_getspeed(unsigned int cpu) | |||
68 | return clk_get_rate(cpufreq.armclk) / 1000; | 66 | return clk_get_rate(cpufreq.armclk) / 1000; |
69 | } | 67 | } |
70 | 68 | ||
71 | static int davinci_target(struct cpufreq_policy *policy, | 69 | static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) |
72 | unsigned int target_freq, unsigned int relation) | ||
73 | { | 70 | { |
74 | int ret = 0; | 71 | int ret = 0; |
75 | unsigned int idx; | ||
76 | struct cpufreq_freqs freqs; | 72 | struct cpufreq_freqs freqs; |
77 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; | 73 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; |
78 | struct clk *armclk = cpufreq.armclk; | 74 | struct clk *armclk = cpufreq.armclk; |
79 | 75 | ||
80 | freqs.old = davinci_getspeed(0); | 76 | freqs.old = davinci_getspeed(0); |
81 | freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000; | 77 | freqs.new = pdata->freq_table[idx].frequency; |
82 | |||
83 | if (freqs.old == freqs.new) | ||
84 | return ret; | ||
85 | 78 | ||
86 | dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); | 79 | dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); |
87 | 80 | ||
88 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, | ||
89 | freqs.new, relation, &idx); | ||
90 | if (ret) | ||
91 | return -EINVAL; | ||
92 | |||
93 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 81 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
94 | 82 | ||
95 | /* if moving to higher frequency, up the voltage beforehand */ | 83 | /* if moving to higher frequency, up the voltage beforehand */ |
@@ -138,47 +126,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) | |||
138 | return result; | 126 | return result; |
139 | } | 127 | } |
140 | 128 | ||
141 | policy->cur = davinci_getspeed(0); | ||
142 | |||
143 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
144 | if (result) { | ||
145 | pr_err("%s: cpufreq_frequency_table_cpuinfo() failed", | ||
146 | __func__); | ||
147 | return result; | ||
148 | } | ||
149 | |||
150 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
151 | |||
152 | /* | 129 | /* |
153 | * Time measurement across the target() function yields ~1500-1800us | 130 | * Time measurement across the target() function yields ~1500-1800us |
154 | * time taken with no drivers on notification list. | 131 | * time taken with no drivers on notification list. |
155 | * Setting the latency to 2000 us to accommodate addition of drivers | 132 | * Setting the latency to 2000 us to accommodate addition of drivers |
156 | * to pre/post change notification list. | 133 | * to pre/post change notification list. |
157 | */ | 134 | */ |
158 | policy->cpuinfo.transition_latency = 2000 * 1000; | 135 | return cpufreq_generic_init(policy, freq_table, 2000 * 1000); |
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static int davinci_cpu_exit(struct cpufreq_policy *policy) | ||
163 | { | ||
164 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
165 | return 0; | ||
166 | } | 136 | } |
167 | 137 | ||
168 | static struct freq_attr *davinci_cpufreq_attr[] = { | ||
169 | &cpufreq_freq_attr_scaling_available_freqs, | ||
170 | NULL, | ||
171 | }; | ||
172 | |||
173 | static struct cpufreq_driver davinci_driver = { | 138 | static struct cpufreq_driver davinci_driver = { |
174 | .flags = CPUFREQ_STICKY, | 139 | .flags = CPUFREQ_STICKY, |
175 | .verify = davinci_verify_speed, | 140 | .verify = davinci_verify_speed, |
176 | .target = davinci_target, | 141 | .target_index = davinci_target, |
177 | .get = davinci_getspeed, | 142 | .get = davinci_getspeed, |
178 | .init = davinci_cpu_init, | 143 | .init = davinci_cpu_init, |
179 | .exit = davinci_cpu_exit, | 144 | .exit = cpufreq_generic_exit, |
180 | .name = "davinci", | 145 | .name = "davinci", |
181 | .attr = davinci_cpufreq_attr, | 146 | .attr = cpufreq_generic_attr, |
182 | }; | 147 | }; |
183 | 148 | ||
184 | static int __init davinci_cpufreq_probe(struct platform_device *pdev) | 149 | static int __init davinci_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 26321cdc1946..238b16976be1 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c | |||
@@ -19,34 +19,14 @@ | |||
19 | static struct cpufreq_frequency_table *freq_table; | 19 | static struct cpufreq_frequency_table *freq_table; |
20 | static struct clk *armss_clk; | 20 | static struct clk *armss_clk; |
21 | 21 | ||
22 | static struct freq_attr *dbx500_cpufreq_attr[] = { | ||
23 | &cpufreq_freq_attr_scaling_available_freqs, | ||
24 | NULL, | ||
25 | }; | ||
26 | |||
27 | static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
28 | { | ||
29 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
30 | } | ||
31 | |||
32 | static int dbx500_cpufreq_target(struct cpufreq_policy *policy, | 22 | static int dbx500_cpufreq_target(struct cpufreq_policy *policy, |
33 | unsigned int target_freq, | 23 | unsigned int index) |
34 | unsigned int relation) | ||
35 | { | 24 | { |
36 | struct cpufreq_freqs freqs; | 25 | struct cpufreq_freqs freqs; |
37 | unsigned int idx; | ||
38 | int ret; | 26 | int ret; |
39 | 27 | ||
40 | /* Lookup the next frequency */ | ||
41 | if (cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
42 | relation, &idx)) | ||
43 | return -EINVAL; | ||
44 | |||
45 | freqs.old = policy->cur; | 28 | freqs.old = policy->cur; |
46 | freqs.new = freq_table[idx].frequency; | 29 | freqs.new = freq_table[index].frequency; |
47 | |||
48 | if (freqs.old == freqs.new) | ||
49 | return 0; | ||
50 | 30 | ||
51 | /* pre-change notification */ | 31 | /* pre-change notification */ |
52 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 32 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
@@ -84,43 +64,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) | |||
84 | 64 | ||
85 | static int dbx500_cpufreq_init(struct cpufreq_policy *policy) | 65 | static int dbx500_cpufreq_init(struct cpufreq_policy *policy) |
86 | { | 66 | { |
87 | int res; | 67 | return cpufreq_generic_init(policy, freq_table, 20 * 1000); |
88 | |||
89 | /* get policy fields based on the table */ | ||
90 | res = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
91 | if (!res) | ||
92 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
93 | else { | ||
94 | pr_err("dbx500-cpufreq: Failed to read policy table\n"); | ||
95 | return res; | ||
96 | } | ||
97 | |||
98 | policy->min = policy->cpuinfo.min_freq; | ||
99 | policy->max = policy->cpuinfo.max_freq; | ||
100 | policy->cur = dbx500_cpufreq_getspeed(policy->cpu); | ||
101 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
102 | |||
103 | /* | ||
104 | * FIXME : Need to take time measurement across the target() | ||
105 | * function with no/some/all drivers in the notification | ||
106 | * list. | ||
107 | */ | ||
108 | policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ | ||
109 | |||
110 | /* policy sharing between dual CPUs */ | ||
111 | cpumask_setall(policy->cpus); | ||
112 | |||
113 | return 0; | ||
114 | } | 68 | } |
115 | 69 | ||
116 | static struct cpufreq_driver dbx500_cpufreq_driver = { | 70 | static struct cpufreq_driver dbx500_cpufreq_driver = { |
117 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, | 71 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, |
118 | .verify = dbx500_cpufreq_verify_speed, | 72 | .verify = cpufreq_generic_frequency_table_verify, |
119 | .target = dbx500_cpufreq_target, | 73 | .target_index = dbx500_cpufreq_target, |
120 | .get = dbx500_cpufreq_getspeed, | 74 | .get = dbx500_cpufreq_getspeed, |
121 | .init = dbx500_cpufreq_init, | 75 | .init = dbx500_cpufreq_init, |
122 | .name = "DBX500", | 76 | .name = "DBX500", |
123 | .attr = dbx500_cpufreq_attr, | 77 | .attr = cpufreq_generic_attr, |
124 | }; | 78 | }; |
125 | 79 | ||
126 | static int dbx500_cpufreq_probe(struct platform_device *pdev) | 80 | static int dbx500_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 09f64cc83019..b39c4ef60a7a 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c | |||
@@ -168,12 +168,9 @@ postchange: | |||
168 | return err; | 168 | return err; |
169 | } | 169 | } |
170 | 170 | ||
171 | static int eps_target(struct cpufreq_policy *policy, | 171 | static int eps_target(struct cpufreq_policy *policy, unsigned int index) |
172 | unsigned int target_freq, | ||
173 | unsigned int relation) | ||
174 | { | 172 | { |
175 | struct eps_cpu_data *centaur; | 173 | struct eps_cpu_data *centaur; |
176 | unsigned int newstate = 0; | ||
177 | unsigned int cpu = policy->cpu; | 174 | unsigned int cpu = policy->cpu; |
178 | unsigned int dest_state; | 175 | unsigned int dest_state; |
179 | int ret; | 176 | int ret; |
@@ -182,28 +179,14 @@ static int eps_target(struct cpufreq_policy *policy, | |||
182 | return -ENODEV; | 179 | return -ENODEV; |
183 | centaur = eps_cpu[cpu]; | 180 | centaur = eps_cpu[cpu]; |
184 | 181 | ||
185 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
186 | &eps_cpu[cpu]->freq_table[0], | ||
187 | target_freq, | ||
188 | relation, | ||
189 | &newstate))) { | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
193 | /* Make frequency transition */ | 182 | /* Make frequency transition */ |
194 | dest_state = centaur->freq_table[newstate].driver_data & 0xffff; | 183 | dest_state = centaur->freq_table[index].driver_data & 0xffff; |
195 | ret = eps_set_state(centaur, policy, dest_state); | 184 | ret = eps_set_state(centaur, policy, dest_state); |
196 | if (ret) | 185 | if (ret) |
197 | printk(KERN_ERR "eps: Timeout!\n"); | 186 | printk(KERN_ERR "eps: Timeout!\n"); |
198 | return ret; | 187 | return ret; |
199 | } | 188 | } |
200 | 189 | ||
201 | static int eps_verify(struct cpufreq_policy *policy) | ||
202 | { | ||
203 | return cpufreq_frequency_table_verify(policy, | ||
204 | &eps_cpu[policy->cpu]->freq_table[0]); | ||
205 | } | ||
206 | |||
207 | static int eps_cpu_init(struct cpufreq_policy *policy) | 190 | static int eps_cpu_init(struct cpufreq_policy *policy) |
208 | { | 191 | { |
209 | unsigned int i; | 192 | unsigned int i; |
@@ -401,15 +384,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
401 | } | 384 | } |
402 | 385 | ||
403 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ | 386 | policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ |
404 | policy->cur = fsb * current_multiplier; | ||
405 | 387 | ||
406 | ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); | 388 | ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]); |
407 | if (ret) { | 389 | if (ret) { |
408 | kfree(centaur); | 390 | kfree(centaur); |
409 | return ret; | 391 | return ret; |
410 | } | 392 | } |
411 | 393 | ||
412 | cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); | ||
413 | return 0; | 394 | return 0; |
414 | } | 395 | } |
415 | 396 | ||
@@ -424,19 +405,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) | |||
424 | return 0; | 405 | return 0; |
425 | } | 406 | } |
426 | 407 | ||
427 | static struct freq_attr *eps_attr[] = { | ||
428 | &cpufreq_freq_attr_scaling_available_freqs, | ||
429 | NULL, | ||
430 | }; | ||
431 | |||
432 | static struct cpufreq_driver eps_driver = { | 408 | static struct cpufreq_driver eps_driver = { |
433 | .verify = eps_verify, | 409 | .verify = cpufreq_generic_frequency_table_verify, |
434 | .target = eps_target, | 410 | .target_index = eps_target, |
435 | .init = eps_cpu_init, | 411 | .init = eps_cpu_init, |
436 | .exit = eps_cpu_exit, | 412 | .exit = eps_cpu_exit, |
437 | .get = eps_get, | 413 | .get = eps_get, |
438 | .name = "e_powersaver", | 414 | .name = "e_powersaver", |
439 | .attr = eps_attr, | 415 | .attr = cpufreq_generic_attr, |
440 | }; | 416 | }; |
441 | 417 | ||
442 | 418 | ||
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 823a400d98fd..4ab41539514f 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c | |||
@@ -105,20 +105,8 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
105 | } | 105 | } |
106 | 106 | ||
107 | 107 | ||
108 | /** | 108 | static int elanfreq_target(struct cpufreq_policy *policy, |
109 | * elanfreq_set_cpu_frequency: Change the CPU core frequency | 109 | unsigned int state) |
110 | * @cpu: cpu number | ||
111 | * @freq: frequency in kHz | ||
112 | * | ||
113 | * This function takes a frequency value and changes the CPU frequency | ||
114 | * according to this. Note that the frequency has to be checked by | ||
115 | * elanfreq_validatespeed() for correctness! | ||
116 | * | ||
117 | * There is no return value. | ||
118 | */ | ||
119 | |||
120 | static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, | ||
121 | unsigned int state) | ||
122 | { | 110 | { |
123 | struct cpufreq_freqs freqs; | 111 | struct cpufreq_freqs freqs; |
124 | 112 | ||
@@ -162,38 +150,9 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy, | |||
162 | local_irq_enable(); | 150 | local_irq_enable(); |
163 | 151 | ||
164 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 152 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
165 | }; | ||
166 | |||
167 | |||
168 | /** | ||
169 | * elanfreq_validatespeed: test if frequency range is valid | ||
170 | * @policy: the policy to validate | ||
171 | * | ||
172 | * This function checks if a given frequency range in kHz is valid | ||
173 | * for the hardware supported by the driver. | ||
174 | */ | ||
175 | |||
176 | static int elanfreq_verify(struct cpufreq_policy *policy) | ||
177 | { | ||
178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); | ||
179 | } | ||
180 | |||
181 | static int elanfreq_target(struct cpufreq_policy *policy, | ||
182 | unsigned int target_freq, | ||
183 | unsigned int relation) | ||
184 | { | ||
185 | unsigned int newstate = 0; | ||
186 | |||
187 | if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], | ||
188 | target_freq, relation, &newstate)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | elanfreq_set_cpu_state(policy, newstate); | ||
192 | 153 | ||
193 | return 0; | 154 | return 0; |
194 | } | 155 | } |
195 | |||
196 | |||
197 | /* | 156 | /* |
198 | * Module init and exit code | 157 | * Module init and exit code |
199 | */ | 158 | */ |
@@ -202,7 +161,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
202 | { | 161 | { |
203 | struct cpuinfo_x86 *c = &cpu_data(0); | 162 | struct cpuinfo_x86 *c = &cpu_data(0); |
204 | unsigned int i; | 163 | unsigned int i; |
205 | int result; | ||
206 | 164 | ||
207 | /* capability check */ | 165 | /* capability check */ |
208 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 166 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
@@ -221,21 +179,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
221 | 179 | ||
222 | /* cpuinfo and default policy values */ | 180 | /* cpuinfo and default policy values */ |
223 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 181 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
224 | policy->cur = elanfreq_get_cpu_frequency(0); | ||
225 | |||
226 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); | ||
227 | if (result) | ||
228 | return result; | ||
229 | |||
230 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); | ||
231 | return 0; | ||
232 | } | ||
233 | 182 | ||
234 | 183 | return cpufreq_table_validate_and_show(policy, elanfreq_table); | |
235 | static int elanfreq_cpu_exit(struct cpufreq_policy *policy) | ||
236 | { | ||
237 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
238 | return 0; | ||
239 | } | 184 | } |
240 | 185 | ||
241 | 186 | ||
@@ -261,20 +206,14 @@ __setup("elanfreq=", elanfreq_setup); | |||
261 | #endif | 206 | #endif |
262 | 207 | ||
263 | 208 | ||
264 | static struct freq_attr *elanfreq_attr[] = { | ||
265 | &cpufreq_freq_attr_scaling_available_freqs, | ||
266 | NULL, | ||
267 | }; | ||
268 | |||
269 | |||
270 | static struct cpufreq_driver elanfreq_driver = { | 209 | static struct cpufreq_driver elanfreq_driver = { |
271 | .get = elanfreq_get_cpu_frequency, | 210 | .get = elanfreq_get_cpu_frequency, |
272 | .verify = elanfreq_verify, | 211 | .verify = cpufreq_generic_frequency_table_verify, |
273 | .target = elanfreq_target, | 212 | .target_index = elanfreq_target, |
274 | .init = elanfreq_cpu_init, | 213 | .init = elanfreq_cpu_init, |
275 | .exit = elanfreq_cpu_exit, | 214 | .exit = cpufreq_generic_exit, |
276 | .name = "elanfreq", | 215 | .name = "elanfreq", |
277 | .attr = elanfreq_attr, | 216 | .attr = cpufreq_generic_attr, |
278 | }; | 217 | }; |
279 | 218 | ||
280 | static const struct x86_cpu_id elan_id[] = { | 219 | static const struct x86_cpu_id elan_id[] = { |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 0fac34439e31..9982fcb82257 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -31,12 +31,6 @@ static unsigned int locking_frequency; | |||
31 | static bool frequency_locked; | 31 | static bool frequency_locked; |
32 | static DEFINE_MUTEX(cpufreq_lock); | 32 | static DEFINE_MUTEX(cpufreq_lock); |
33 | 33 | ||
34 | static int exynos_verify_speed(struct cpufreq_policy *policy) | ||
35 | { | ||
36 | return cpufreq_frequency_table_verify(policy, | ||
37 | exynos_info->freq_table); | ||
38 | } | ||
39 | |||
40 | static unsigned int exynos_getspeed(unsigned int cpu) | 34 | static unsigned int exynos_getspeed(unsigned int cpu) |
41 | { | 35 | { |
42 | return clk_get_rate(exynos_info->cpu_clk) / 1000; | 36 | return clk_get_rate(exynos_info->cpu_clk) / 1000; |
@@ -71,9 +65,6 @@ static int exynos_cpufreq_scale(unsigned int target_freq) | |||
71 | freqs.old = policy->cur; | 65 | freqs.old = policy->cur; |
72 | freqs.new = target_freq; | 66 | freqs.new = target_freq; |
73 | 67 | ||
74 | if (freqs.new == freqs.old) | ||
75 | goto out; | ||
76 | |||
77 | /* | 68 | /* |
78 | * The policy max have been changed so that we cannot get proper | 69 | * The policy max have been changed so that we cannot get proper |
79 | * old_index with cpufreq_frequency_table_target(). Thus, ignore | 70 | * old_index with cpufreq_frequency_table_target(). Thus, ignore |
@@ -141,7 +132,7 @@ post_notify: | |||
141 | if ((freqs.new < freqs.old) || | 132 | if ((freqs.new < freqs.old) || |
142 | ((freqs.new > freqs.old) && safe_arm_volt)) { | 133 | ((freqs.new > freqs.old) && safe_arm_volt)) { |
143 | /* down the voltage after frequency change */ | 134 | /* down the voltage after frequency change */ |
144 | regulator_set_voltage(arm_regulator, arm_volt, | 135 | ret = regulator_set_voltage(arm_regulator, arm_volt, |
145 | arm_volt); | 136 | arm_volt); |
146 | if (ret) { | 137 | if (ret) { |
147 | pr_err("%s: failed to set cpu voltage to %d\n", | 138 | pr_err("%s: failed to set cpu voltage to %d\n", |
@@ -157,13 +148,9 @@ out: | |||
157 | return ret; | 148 | return ret; |
158 | } | 149 | } |
159 | 150 | ||
160 | static int exynos_target(struct cpufreq_policy *policy, | 151 | static int exynos_target(struct cpufreq_policy *policy, unsigned int index) |
161 | unsigned int target_freq, | ||
162 | unsigned int relation) | ||
163 | { | 152 | { |
164 | struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; | 153 | struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; |
165 | unsigned int index; | ||
166 | unsigned int new_freq; | ||
167 | int ret = 0; | 154 | int ret = 0; |
168 | 155 | ||
169 | mutex_lock(&cpufreq_lock); | 156 | mutex_lock(&cpufreq_lock); |
@@ -171,15 +158,7 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
171 | if (frequency_locked) | 158 | if (frequency_locked) |
172 | goto out; | 159 | goto out; |
173 | 160 | ||
174 | if (cpufreq_frequency_table_target(policy, freq_table, | 161 | ret = exynos_cpufreq_scale(freq_table[index].frequency); |
175 | target_freq, relation, &index)) { | ||
176 | ret = -EINVAL; | ||
177 | goto out; | ||
178 | } | ||
179 | |||
180 | new_freq = freq_table[index].frequency; | ||
181 | |||
182 | ret = exynos_cpufreq_scale(new_freq); | ||
183 | 162 | ||
184 | out: | 163 | out: |
185 | mutex_unlock(&cpufreq_lock); | 164 | mutex_unlock(&cpufreq_lock); |
@@ -247,38 +226,18 @@ static struct notifier_block exynos_cpufreq_nb = { | |||
247 | 226 | ||
248 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | 227 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) |
249 | { | 228 | { |
250 | policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); | 229 | return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); |
251 | |||
252 | cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); | ||
253 | |||
254 | /* set the transition latency value */ | ||
255 | policy->cpuinfo.transition_latency = 100000; | ||
256 | |||
257 | cpumask_setall(policy->cpus); | ||
258 | |||
259 | return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); | ||
260 | } | 230 | } |
261 | 231 | ||
262 | static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
263 | { | ||
264 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static struct freq_attr *exynos_cpufreq_attr[] = { | ||
269 | &cpufreq_freq_attr_scaling_available_freqs, | ||
270 | NULL, | ||
271 | }; | ||
272 | |||
273 | static struct cpufreq_driver exynos_driver = { | 232 | static struct cpufreq_driver exynos_driver = { |
274 | .flags = CPUFREQ_STICKY, | 233 | .flags = CPUFREQ_STICKY, |
275 | .verify = exynos_verify_speed, | 234 | .verify = cpufreq_generic_frequency_table_verify, |
276 | .target = exynos_target, | 235 | .target_index = exynos_target, |
277 | .get = exynos_getspeed, | 236 | .get = exynos_getspeed, |
278 | .init = exynos_cpufreq_cpu_init, | 237 | .init = exynos_cpufreq_cpu_init, |
279 | .exit = exynos_cpufreq_cpu_exit, | 238 | .exit = cpufreq_generic_exit, |
280 | .name = "exynos_cpufreq", | 239 | .name = "exynos_cpufreq", |
281 | .attr = exynos_cpufreq_attr, | 240 | .attr = cpufreq_generic_attr, |
282 | #ifdef CONFIG_PM | 241 | #ifdef CONFIG_PM |
283 | .suspend = exynos_cpufreq_suspend, | 242 | .suspend = exynos_cpufreq_suspend, |
284 | .resume = exynos_cpufreq_resume, | 243 | .resume = exynos_cpufreq_resume, |
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index add7fbec4fc9..f2c75065ce19 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c | |||
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index) | |||
81 | 81 | ||
82 | static void exynos4210_set_apll(unsigned int index) | 82 | static void exynos4210_set_apll(unsigned int index) |
83 | { | 83 | { |
84 | unsigned int tmp; | 84 | unsigned int tmp, freq = apll_freq_4210[index].freq; |
85 | 85 | ||
86 | /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ | 86 | /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ |
87 | clk_set_parent(moutcore, mout_mpll); | 87 | clk_set_parent(moutcore, mout_mpll); |
88 | 88 | ||
89 | do { | 89 | do { |
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index) | |||
92 | tmp &= 0x7; | 92 | tmp &= 0x7; |
93 | } while (tmp != 0x2); | 93 | } while (tmp != 0x2); |
94 | 94 | ||
95 | /* 2. Set APLL Lock time */ | 95 | clk_set_rate(mout_apll, freq * 1000); |
96 | __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK); | ||
97 | |||
98 | /* 3. Change PLL PMS values */ | ||
99 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
100 | tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); | ||
101 | tmp |= apll_freq_4210[index].mps; | ||
102 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
103 | 96 | ||
104 | /* 4. wait_lock_time */ | 97 | /* MUX_CORE_SEL = APLL */ |
105 | do { | ||
106 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
107 | } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); | ||
108 | |||
109 | /* 5. MUX_CORE_SEL = APLL */ | ||
110 | clk_set_parent(moutcore, mout_apll); | 98 | clk_set_parent(moutcore, mout_apll); |
111 | 99 | ||
112 | do { | 100 | do { |
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index) | |||
115 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 103 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
116 | } | 104 | } |
117 | 105 | ||
118 | static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) | ||
119 | { | ||
120 | unsigned int old_pm = apll_freq_4210[old_index].mps >> 8; | ||
121 | unsigned int new_pm = apll_freq_4210[new_index].mps >> 8; | ||
122 | |||
123 | return (old_pm == new_pm) ? 0 : 1; | ||
124 | } | ||
125 | |||
126 | static void exynos4210_set_frequency(unsigned int old_index, | 106 | static void exynos4210_set_frequency(unsigned int old_index, |
127 | unsigned int new_index) | 107 | unsigned int new_index) |
128 | { | 108 | { |
129 | unsigned int tmp; | ||
130 | |||
131 | if (old_index > new_index) { | 109 | if (old_index > new_index) { |
132 | if (!exynos4210_pms_change(old_index, new_index)) { | 110 | exynos4210_set_clkdiv(new_index); |
133 | /* 1. Change the system clock divider values */ | 111 | exynos4210_set_apll(new_index); |
134 | exynos4210_set_clkdiv(new_index); | ||
135 | |||
136 | /* 2. Change just s value in apll m,p,s value */ | ||
137 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
138 | tmp &= ~(0x7 << 0); | ||
139 | tmp |= apll_freq_4210[new_index].mps & 0x7; | ||
140 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
141 | } else { | ||
142 | /* Clock Configuration Procedure */ | ||
143 | /* 1. Change the system clock divider values */ | ||
144 | exynos4210_set_clkdiv(new_index); | ||
145 | /* 2. Change the apll m,p,s value */ | ||
146 | exynos4210_set_apll(new_index); | ||
147 | } | ||
148 | } else if (old_index < new_index) { | 112 | } else if (old_index < new_index) { |
149 | if (!exynos4210_pms_change(old_index, new_index)) { | 113 | exynos4210_set_apll(new_index); |
150 | /* 1. Change just s value in apll m,p,s value */ | 114 | exynos4210_set_clkdiv(new_index); |
151 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
152 | tmp &= ~(0x7 << 0); | ||
153 | tmp |= apll_freq_4210[new_index].mps & 0x7; | ||
154 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
155 | |||
156 | /* 2. Change the system clock divider values */ | ||
157 | exynos4210_set_clkdiv(new_index); | ||
158 | } else { | ||
159 | /* Clock Configuration Procedure */ | ||
160 | /* 1. Change the apll m,p,s value */ | ||
161 | exynos4210_set_apll(new_index); | ||
162 | /* 2. Change the system clock divider values */ | ||
163 | exynos4210_set_clkdiv(new_index); | ||
164 | } | ||
165 | } | 115 | } |
166 | } | 116 | } |
167 | 117 | ||
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) | |||
194 | info->volt_table = exynos4210_volt_table; | 144 | info->volt_table = exynos4210_volt_table; |
195 | info->freq_table = exynos4210_freq_table; | 145 | info->freq_table = exynos4210_freq_table; |
196 | info->set_freq = exynos4210_set_frequency; | 146 | info->set_freq = exynos4210_set_frequency; |
197 | info->need_apll_change = exynos4210_pms_change; | ||
198 | 147 | ||
199 | return 0; | 148 | return 0; |
200 | 149 | ||
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 08b7477b0aa2..8683304ce62c 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c | |||
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index) | |||
128 | 128 | ||
129 | static void exynos4x12_set_apll(unsigned int index) | 129 | static void exynos4x12_set_apll(unsigned int index) |
130 | { | 130 | { |
131 | unsigned int tmp, pdiv; | 131 | unsigned int tmp, freq = apll_freq_4x12[index].freq; |
132 | 132 | ||
133 | /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ | 133 | /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ |
134 | clk_set_parent(moutcore, mout_mpll); | 134 | clk_set_parent(moutcore, mout_mpll); |
135 | 135 | ||
136 | do { | 136 | do { |
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index) | |||
140 | tmp &= 0x7; | 140 | tmp &= 0x7; |
141 | } while (tmp != 0x2); | 141 | } while (tmp != 0x2); |
142 | 142 | ||
143 | /* 2. Set APLL Lock time */ | 143 | clk_set_rate(mout_apll, freq * 1000); |
144 | pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f); | ||
145 | 144 | ||
146 | __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK); | 145 | /* MUX_CORE_SEL = APLL */ |
147 | |||
148 | /* 3. Change PLL PMS values */ | ||
149 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
150 | tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); | ||
151 | tmp |= apll_freq_4x12[index].mps; | ||
152 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
153 | |||
154 | /* 4. wait_lock_time */ | ||
155 | do { | ||
156 | cpu_relax(); | ||
157 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
158 | } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT))); | ||
159 | |||
160 | /* 5. MUX_CORE_SEL = APLL */ | ||
161 | clk_set_parent(moutcore, mout_apll); | 146 | clk_set_parent(moutcore, mout_apll); |
162 | 147 | ||
163 | do { | 148 | do { |
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index) | |||
167 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 152 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
168 | } | 153 | } |
169 | 154 | ||
170 | static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index) | ||
171 | { | ||
172 | unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8; | ||
173 | unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8; | ||
174 | |||
175 | return (old_pm == new_pm) ? 0 : 1; | ||
176 | } | ||
177 | |||
178 | static void exynos4x12_set_frequency(unsigned int old_index, | 155 | static void exynos4x12_set_frequency(unsigned int old_index, |
179 | unsigned int new_index) | 156 | unsigned int new_index) |
180 | { | 157 | { |
181 | unsigned int tmp; | ||
182 | |||
183 | if (old_index > new_index) { | 158 | if (old_index > new_index) { |
184 | if (!exynos4x12_pms_change(old_index, new_index)) { | 159 | exynos4x12_set_clkdiv(new_index); |
185 | /* 1. Change the system clock divider values */ | 160 | exynos4x12_set_apll(new_index); |
186 | exynos4x12_set_clkdiv(new_index); | ||
187 | /* 2. Change just s value in apll m,p,s value */ | ||
188 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
189 | tmp &= ~(0x7 << 0); | ||
190 | tmp |= apll_freq_4x12[new_index].mps & 0x7; | ||
191 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
192 | |||
193 | } else { | ||
194 | /* Clock Configuration Procedure */ | ||
195 | /* 1. Change the system clock divider values */ | ||
196 | exynos4x12_set_clkdiv(new_index); | ||
197 | /* 2. Change the apll m,p,s value */ | ||
198 | exynos4x12_set_apll(new_index); | ||
199 | } | ||
200 | } else if (old_index < new_index) { | 161 | } else if (old_index < new_index) { |
201 | if (!exynos4x12_pms_change(old_index, new_index)) { | 162 | exynos4x12_set_apll(new_index); |
202 | /* 1. Change just s value in apll m,p,s value */ | 163 | exynos4x12_set_clkdiv(new_index); |
203 | tmp = __raw_readl(EXYNOS4_APLL_CON0); | ||
204 | tmp &= ~(0x7 << 0); | ||
205 | tmp |= apll_freq_4x12[new_index].mps & 0x7; | ||
206 | __raw_writel(tmp, EXYNOS4_APLL_CON0); | ||
207 | /* 2. Change the system clock divider values */ | ||
208 | exynos4x12_set_clkdiv(new_index); | ||
209 | } else { | ||
210 | /* Clock Configuration Procedure */ | ||
211 | /* 1. Change the apll m,p,s value */ | ||
212 | exynos4x12_set_apll(new_index); | ||
213 | /* 2. Change the system clock divider values */ | ||
214 | exynos4x12_set_clkdiv(new_index); | ||
215 | } | ||
216 | } | 164 | } |
217 | } | 165 | } |
218 | 166 | ||
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) | |||
250 | info->volt_table = exynos4x12_volt_table; | 198 | info->volt_table = exynos4x12_volt_table; |
251 | info->freq_table = exynos4x12_freq_table; | 199 | info->freq_table = exynos4x12_freq_table; |
252 | info->set_freq = exynos4x12_set_frequency; | 200 | info->set_freq = exynos4x12_set_frequency; |
253 | info->need_apll_change = exynos4x12_pms_change; | ||
254 | 201 | ||
255 | return 0; | 202 | return 0; |
256 | 203 | ||
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index be5380ecdcd4..1bf9b060d522 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
23 | #include <linux/opp.h> | 23 | #include <linux/pm_opp.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | 26 | ||
@@ -118,12 +118,12 @@ static int init_div_table(void) | |||
118 | struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; | 118 | struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; |
119 | unsigned int tmp, clk_div, ema_div, freq, volt_id; | 119 | unsigned int tmp, clk_div, ema_div, freq, volt_id; |
120 | int i = 0; | 120 | int i = 0; |
121 | struct opp *opp; | 121 | struct dev_pm_opp *opp; |
122 | 122 | ||
123 | rcu_read_lock(); | 123 | rcu_read_lock(); |
124 | for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { | 124 | for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { |
125 | 125 | ||
126 | opp = opp_find_freq_exact(dvfs_info->dev, | 126 | opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, |
127 | freq_tbl[i].frequency * 1000, true); | 127 | freq_tbl[i].frequency * 1000, true); |
128 | if (IS_ERR(opp)) { | 128 | if (IS_ERR(opp)) { |
129 | rcu_read_unlock(); | 129 | rcu_read_unlock(); |
@@ -142,7 +142,7 @@ static int init_div_table(void) | |||
142 | << P0_7_CSCLKDEV_SHIFT; | 142 | << P0_7_CSCLKDEV_SHIFT; |
143 | 143 | ||
144 | /* Calculate EMA */ | 144 | /* Calculate EMA */ |
145 | volt_id = opp_get_voltage(opp); | 145 | volt_id = dev_pm_opp_get_voltage(opp); |
146 | volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; | 146 | volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP; |
147 | if (volt_id < PMIC_HIGH_VOLT) { | 147 | if (volt_id < PMIC_HIGH_VOLT) { |
148 | ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | | 148 | ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) | |
@@ -209,38 +209,22 @@ static void exynos_enable_dvfs(void) | |||
209 | dvfs_info->base + XMU_DVFS_CTRL); | 209 | dvfs_info->base + XMU_DVFS_CTRL); |
210 | } | 210 | } |
211 | 211 | ||
212 | static int exynos_verify_speed(struct cpufreq_policy *policy) | ||
213 | { | ||
214 | return cpufreq_frequency_table_verify(policy, | ||
215 | dvfs_info->freq_table); | ||
216 | } | ||
217 | |||
218 | static unsigned int exynos_getspeed(unsigned int cpu) | 212 | static unsigned int exynos_getspeed(unsigned int cpu) |
219 | { | 213 | { |
220 | return dvfs_info->cur_frequency; | 214 | return dvfs_info->cur_frequency; |
221 | } | 215 | } |
222 | 216 | ||
223 | static int exynos_target(struct cpufreq_policy *policy, | 217 | static int exynos_target(struct cpufreq_policy *policy, unsigned int index) |
224 | unsigned int target_freq, | ||
225 | unsigned int relation) | ||
226 | { | 218 | { |
227 | unsigned int index, tmp; | 219 | unsigned int tmp; |
228 | int ret = 0, i; | 220 | int i; |
229 | struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; | 221 | struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; |
230 | 222 | ||
231 | mutex_lock(&cpufreq_lock); | 223 | mutex_lock(&cpufreq_lock); |
232 | 224 | ||
233 | ret = cpufreq_frequency_table_target(policy, freq_table, | ||
234 | target_freq, relation, &index); | ||
235 | if (ret) | ||
236 | goto out; | ||
237 | |||
238 | freqs.old = dvfs_info->cur_frequency; | 225 | freqs.old = dvfs_info->cur_frequency; |
239 | freqs.new = freq_table[index].frequency; | 226 | freqs.new = freq_table[index].frequency; |
240 | 227 | ||
241 | if (freqs.old == freqs.new) | ||
242 | goto out; | ||
243 | |||
244 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 228 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
245 | 229 | ||
246 | /* Set the target frequency in all C0_3_PSTATE register */ | 230 | /* Set the target frequency in all C0_3_PSTATE register */ |
@@ -251,9 +235,8 @@ static int exynos_target(struct cpufreq_policy *policy, | |||
251 | 235 | ||
252 | __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); | 236 | __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4); |
253 | } | 237 | } |
254 | out: | ||
255 | mutex_unlock(&cpufreq_lock); | 238 | mutex_unlock(&cpufreq_lock); |
256 | return ret; | 239 | return 0; |
257 | } | 240 | } |
258 | 241 | ||
259 | static void exynos_cpufreq_work(struct work_struct *work) | 242 | static void exynos_cpufreq_work(struct work_struct *work) |
@@ -324,30 +307,19 @@ static void exynos_sort_descend_freq_table(void) | |||
324 | 307 | ||
325 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | 308 | static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) |
326 | { | 309 | { |
327 | int ret; | 310 | return cpufreq_generic_init(policy, dvfs_info->freq_table, |
328 | 311 | dvfs_info->latency); | |
329 | ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table); | ||
330 | if (ret) { | ||
331 | dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret); | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | policy->cur = dvfs_info->cur_frequency; | ||
336 | policy->cpuinfo.transition_latency = dvfs_info->latency; | ||
337 | cpumask_setall(policy->cpus); | ||
338 | |||
339 | cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu); | ||
340 | |||
341 | return 0; | ||
342 | } | 312 | } |
343 | 313 | ||
344 | static struct cpufreq_driver exynos_driver = { | 314 | static struct cpufreq_driver exynos_driver = { |
345 | .flags = CPUFREQ_STICKY, | 315 | .flags = CPUFREQ_STICKY, |
346 | .verify = exynos_verify_speed, | 316 | .verify = cpufreq_generic_frequency_table_verify, |
347 | .target = exynos_target, | 317 | .target_index = exynos_target, |
348 | .get = exynos_getspeed, | 318 | .get = exynos_getspeed, |
349 | .init = exynos_cpufreq_cpu_init, | 319 | .init = exynos_cpufreq_cpu_init, |
320 | .exit = cpufreq_generic_exit, | ||
350 | .name = CPUFREQ_NAME, | 321 | .name = CPUFREQ_NAME, |
322 | .attr = cpufreq_generic_attr, | ||
351 | }; | 323 | }; |
352 | 324 | ||
353 | static const struct of_device_id exynos_cpufreq_match[] = { | 325 | static const struct of_device_id exynos_cpufreq_match[] = { |
@@ -399,13 +371,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
399 | goto err_put_node; | 371 | goto err_put_node; |
400 | } | 372 | } |
401 | 373 | ||
402 | ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 374 | ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev, |
375 | &dvfs_info->freq_table); | ||
403 | if (ret) { | 376 | if (ret) { |
404 | dev_err(dvfs_info->dev, | 377 | dev_err(dvfs_info->dev, |
405 | "failed to init cpufreq table: %d\n", ret); | 378 | "failed to init cpufreq table: %d\n", ret); |
406 | goto err_put_node; | 379 | goto err_put_node; |
407 | } | 380 | } |
408 | dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev); | 381 | dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); |
409 | exynos_sort_descend_freq_table(); | 382 | exynos_sort_descend_freq_table(); |
410 | 383 | ||
411 | if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) | 384 | if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency)) |
@@ -454,7 +427,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
454 | return 0; | 427 | return 0; |
455 | 428 | ||
456 | err_free_table: | 429 | err_free_table: |
457 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 430 | dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); |
458 | err_put_node: | 431 | err_put_node: |
459 | of_node_put(np); | 432 | of_node_put(np); |
460 | dev_err(&pdev->dev, "%s: failed initialization\n", __func__); | 433 | dev_err(&pdev->dev, "%s: failed initialization\n", __func__); |
@@ -464,7 +437,7 @@ err_put_node: | |||
464 | static int exynos_cpufreq_remove(struct platform_device *pdev) | 437 | static int exynos_cpufreq_remove(struct platform_device *pdev) |
465 | { | 438 | { |
466 | cpufreq_unregister_driver(&exynos_driver); | 439 | cpufreq_unregister_driver(&exynos_driver); |
467 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 440 | dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); |
468 | return 0; | 441 | return 0; |
469 | } | 442 | } |
470 | 443 | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index f111454a7aea..3458d27f63b4 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); | |||
54 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | 54 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
55 | struct cpufreq_frequency_table *table) | 55 | struct cpufreq_frequency_table *table) |
56 | { | 56 | { |
57 | unsigned int next_larger = ~0; | 57 | unsigned int next_larger = ~0, freq, i = 0; |
58 | unsigned int i; | 58 | bool found = false; |
59 | unsigned int count = 0; | ||
60 | 59 | ||
61 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", | 60 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", |
62 | policy->min, policy->max, policy->cpu); | 61 | policy->min, policy->max, policy->cpu); |
63 | 62 | ||
64 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 63 | cpufreq_verify_within_cpu_limits(policy); |
65 | policy->cpuinfo.max_freq); | ||
66 | 64 | ||
67 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 65 | for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) { |
68 | unsigned int freq = table[i].frequency; | ||
69 | if (freq == CPUFREQ_ENTRY_INVALID) | 66 | if (freq == CPUFREQ_ENTRY_INVALID) |
70 | continue; | 67 | continue; |
71 | if ((freq >= policy->min) && (freq <= policy->max)) | 68 | if ((freq >= policy->min) && (freq <= policy->max)) { |
72 | count++; | 69 | found = true; |
73 | else if ((next_larger > freq) && (freq > policy->max)) | 70 | break; |
71 | } | ||
72 | |||
73 | if ((next_larger > freq) && (freq > policy->max)) | ||
74 | next_larger = freq; | 74 | next_larger = freq; |
75 | } | 75 | } |
76 | 76 | ||
77 | if (!count) | 77 | if (!found) { |
78 | policy->max = next_larger; | 78 | policy->max = next_larger; |
79 | 79 | cpufreq_verify_within_cpu_limits(policy); | |
80 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 80 | } |
81 | policy->cpuinfo.max_freq); | ||
82 | 81 | ||
83 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", | 82 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", |
84 | policy->min, policy->max, policy->cpu); | 83 | policy->min, policy->max, policy->cpu); |
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
87 | } | 86 | } |
88 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); | 87 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); |
89 | 88 | ||
89 | /* | ||
90 | * Generic routine to verify policy & frequency table, requires driver to call | ||
91 | * cpufreq_frequency_table_get_attr() prior to it. | ||
92 | */ | ||
93 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) | ||
94 | { | ||
95 | struct cpufreq_frequency_table *table = | ||
96 | cpufreq_frequency_get_table(policy->cpu); | ||
97 | if (!table) | ||
98 | return -ENODEV; | ||
99 | |||
100 | return cpufreq_frequency_table_verify(policy, table); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); | ||
90 | 103 | ||
91 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | 104 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
92 | struct cpufreq_frequency_table *table, | 105 | struct cpufreq_frequency_table *table, |
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { | |||
200 | }; | 213 | }; |
201 | EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); | 214 | EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); |
202 | 215 | ||
216 | struct freq_attr *cpufreq_generic_attr[] = { | ||
217 | &cpufreq_freq_attr_scaling_available_freqs, | ||
218 | NULL, | ||
219 | }; | ||
220 | EXPORT_SYMBOL_GPL(cpufreq_generic_attr); | ||
221 | |||
203 | /* | 222 | /* |
204 | * if you use these, you must assure that the frequency table is valid | 223 | * if you use these, you must assure that the frequency table is valid |
205 | * all the time between get_attr and put_attr! | 224 | * all the time between get_attr and put_attr! |
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu) | |||
219 | } | 238 | } |
220 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 239 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
221 | 240 | ||
241 | int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, | ||
242 | struct cpufreq_frequency_table *table) | ||
243 | { | ||
244 | int ret = cpufreq_frequency_table_cpuinfo(policy, table); | ||
245 | |||
246 | if (!ret) | ||
247 | cpufreq_frequency_table_get_attr(table, policy->cpu); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); | ||
252 | |||
222 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) | 253 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) |
223 | { | 254 | { |
224 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", | 255 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", |
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 70442c7b5e71..d83e8266a58e 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c | |||
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy, | |||
401 | 401 | ||
402 | static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | 402 | static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) |
403 | { | 403 | { |
404 | unsigned int maxfreq, curfreq; | 404 | unsigned int maxfreq; |
405 | 405 | ||
406 | if (!policy || policy->cpu != 0) | 406 | if (!policy || policy->cpu != 0) |
407 | return -ENODEV; | 407 | return -ENODEV; |
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
415 | maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; | 415 | maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; |
416 | 416 | ||
417 | stock_freq = maxfreq; | 417 | stock_freq = maxfreq; |
418 | curfreq = gx_get_cpuspeed(0); | ||
419 | 418 | ||
420 | pr_debug("cpu max frequency is %d.\n", maxfreq); | 419 | pr_debug("cpu max frequency is %d.\n", maxfreq); |
421 | pr_debug("cpu current frequency is %dkHz.\n", curfreq); | ||
422 | 420 | ||
423 | /* setup basic struct for cpufreq API */ | 421 | /* setup basic struct for cpufreq API */ |
424 | policy->cpu = 0; | 422 | policy->cpu = 0; |
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
428 | else | 426 | else |
429 | policy->min = maxfreq / POLICY_MIN_DIV; | 427 | policy->min = maxfreq / POLICY_MIN_DIV; |
430 | policy->max = maxfreq; | 428 | policy->max = maxfreq; |
431 | policy->cur = curfreq; | ||
432 | policy->cpuinfo.min_freq = maxfreq / max_duration; | 429 | policy->cpuinfo.min_freq = maxfreq / max_duration; |
433 | policy->cpuinfo.max_freq = maxfreq; | 430 | policy->cpuinfo.max_freq = maxfreq; |
434 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 431 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index 794123fcf3e3..bf8902a0866d 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c | |||
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void) | |||
66 | struct device_node *np; | 66 | struct device_node *np; |
67 | int ret; | 67 | int ret; |
68 | 68 | ||
69 | if (!of_machine_is_compatible("calxeda,highbank")) | 69 | if ((!of_machine_is_compatible("calxeda,highbank")) && |
70 | (!of_machine_is_compatible("calxeda,ecx-2000"))) | ||
70 | return -ENODEV; | 71 | return -ENODEV; |
71 | 72 | ||
72 | cpu_dev = get_cpu_device(0); | 73 | cpu_dev = get_cpu_device(0); |
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index 3e14f0317175..4695fa22406a 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c | |||
@@ -227,42 +227,11 @@ acpi_cpufreq_get ( | |||
227 | static int | 227 | static int |
228 | acpi_cpufreq_target ( | 228 | acpi_cpufreq_target ( |
229 | struct cpufreq_policy *policy, | 229 | struct cpufreq_policy *policy, |
230 | unsigned int target_freq, | 230 | unsigned int index) |
231 | unsigned int relation) | ||
232 | { | 231 | { |
233 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 232 | return processor_set_freq(acpi_io_data[policy->cpu], policy, index); |
234 | unsigned int next_state = 0; | ||
235 | unsigned int result = 0; | ||
236 | |||
237 | pr_debug("acpi_cpufreq_setpolicy\n"); | ||
238 | |||
239 | result = cpufreq_frequency_table_target(policy, | ||
240 | data->freq_table, target_freq, relation, &next_state); | ||
241 | if (result) | ||
242 | return (result); | ||
243 | |||
244 | result = processor_set_freq(data, policy, next_state); | ||
245 | |||
246 | return (result); | ||
247 | } | 233 | } |
248 | 234 | ||
249 | |||
250 | static int | ||
251 | acpi_cpufreq_verify ( | ||
252 | struct cpufreq_policy *policy) | ||
253 | { | ||
254 | unsigned int result = 0; | ||
255 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
256 | |||
257 | pr_debug("acpi_cpufreq_verify\n"); | ||
258 | |||
259 | result = cpufreq_frequency_table_verify(policy, | ||
260 | data->freq_table); | ||
261 | |||
262 | return (result); | ||
263 | } | ||
264 | |||
265 | |||
266 | static int | 235 | static int |
267 | acpi_cpufreq_cpu_init ( | 236 | acpi_cpufreq_cpu_init ( |
268 | struct cpufreq_policy *policy) | 237 | struct cpufreq_policy *policy) |
@@ -321,7 +290,6 @@ acpi_cpufreq_cpu_init ( | |||
321 | data->acpi_data.states[i].transition_latency * 1000; | 290 | data->acpi_data.states[i].transition_latency * 1000; |
322 | } | 291 | } |
323 | } | 292 | } |
324 | policy->cur = processor_get_freq(data, policy->cpu); | ||
325 | 293 | ||
326 | /* table init */ | 294 | /* table init */ |
327 | for (i = 0; i <= data->acpi_data.state_count; i++) | 295 | for (i = 0; i <= data->acpi_data.state_count; i++) |
@@ -335,7 +303,7 @@ acpi_cpufreq_cpu_init ( | |||
335 | } | 303 | } |
336 | } | 304 | } |
337 | 305 | ||
338 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 306 | result = cpufreq_table_validate_and_show(policy, data->freq_table); |
339 | if (result) { | 307 | if (result) { |
340 | goto err_freqfree; | 308 | goto err_freqfree; |
341 | } | 309 | } |
@@ -356,8 +324,6 @@ acpi_cpufreq_cpu_init ( | |||
356 | (u32) data->acpi_data.states[i].status, | 324 | (u32) data->acpi_data.states[i].status, |
357 | (u32) data->acpi_data.states[i].control); | 325 | (u32) data->acpi_data.states[i].control); |
358 | 326 | ||
359 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
360 | |||
361 | /* the first call to ->target() should result in us actually | 327 | /* the first call to ->target() should result in us actually |
362 | * writing something to the appropriate registers. */ | 328 | * writing something to the appropriate registers. */ |
363 | data->resume = 1; | 329 | data->resume = 1; |
@@ -396,20 +362,14 @@ acpi_cpufreq_cpu_exit ( | |||
396 | } | 362 | } |
397 | 363 | ||
398 | 364 | ||
399 | static struct freq_attr* acpi_cpufreq_attr[] = { | ||
400 | &cpufreq_freq_attr_scaling_available_freqs, | ||
401 | NULL, | ||
402 | }; | ||
403 | |||
404 | |||
405 | static struct cpufreq_driver acpi_cpufreq_driver = { | 365 | static struct cpufreq_driver acpi_cpufreq_driver = { |
406 | .verify = acpi_cpufreq_verify, | 366 | .verify = cpufreq_generic_frequency_table_verify, |
407 | .target = acpi_cpufreq_target, | 367 | .target_index = acpi_cpufreq_target, |
408 | .get = acpi_cpufreq_get, | 368 | .get = acpi_cpufreq_get, |
409 | .init = acpi_cpufreq_cpu_init, | 369 | .init = acpi_cpufreq_cpu_init, |
410 | .exit = acpi_cpufreq_cpu_exit, | 370 | .exit = acpi_cpufreq_cpu_exit, |
411 | .name = "acpi-cpufreq", | 371 | .name = "acpi-cpufreq", |
412 | .attr = acpi_cpufreq_attr, | 372 | .attr = cpufreq_generic_attr, |
413 | }; | 373 | }; |
414 | 374 | ||
415 | 375 | ||
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index c3fd2a101ca0..07af3b0de069 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/opp.h> | 16 | #include <linux/pm_opp.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/regulator/consumer.h> | 18 | #include <linux/regulator/consumer.h> |
19 | 19 | ||
@@ -35,49 +35,31 @@ static struct device *cpu_dev; | |||
35 | static struct cpufreq_frequency_table *freq_table; | 35 | static struct cpufreq_frequency_table *freq_table; |
36 | static unsigned int transition_latency; | 36 | static unsigned int transition_latency; |
37 | 37 | ||
38 | static int imx6q_verify_speed(struct cpufreq_policy *policy) | ||
39 | { | ||
40 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
41 | } | ||
42 | |||
43 | static unsigned int imx6q_get_speed(unsigned int cpu) | 38 | static unsigned int imx6q_get_speed(unsigned int cpu) |
44 | { | 39 | { |
45 | return clk_get_rate(arm_clk) / 1000; | 40 | return clk_get_rate(arm_clk) / 1000; |
46 | } | 41 | } |
47 | 42 | ||
48 | static int imx6q_set_target(struct cpufreq_policy *policy, | 43 | static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) |
49 | unsigned int target_freq, unsigned int relation) | ||
50 | { | 44 | { |
51 | struct cpufreq_freqs freqs; | 45 | struct cpufreq_freqs freqs; |
52 | struct opp *opp; | 46 | struct dev_pm_opp *opp; |
53 | unsigned long freq_hz, volt, volt_old; | 47 | unsigned long freq_hz, volt, volt_old; |
54 | unsigned int index; | ||
55 | int ret; | 48 | int ret; |
56 | 49 | ||
57 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
58 | relation, &index); | ||
59 | if (ret) { | ||
60 | dev_err(cpu_dev, "failed to match target frequency %d: %d\n", | ||
61 | target_freq, ret); | ||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | freqs.new = freq_table[index].frequency; | 50 | freqs.new = freq_table[index].frequency; |
66 | freq_hz = freqs.new * 1000; | 51 | freq_hz = freqs.new * 1000; |
67 | freqs.old = clk_get_rate(arm_clk) / 1000; | 52 | freqs.old = clk_get_rate(arm_clk) / 1000; |
68 | 53 | ||
69 | if (freqs.old == freqs.new) | ||
70 | return 0; | ||
71 | |||
72 | rcu_read_lock(); | 54 | rcu_read_lock(); |
73 | opp = opp_find_freq_ceil(cpu_dev, &freq_hz); | 55 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); |
74 | if (IS_ERR(opp)) { | 56 | if (IS_ERR(opp)) { |
75 | rcu_read_unlock(); | 57 | rcu_read_unlock(); |
76 | dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); | 58 | dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); |
77 | return PTR_ERR(opp); | 59 | return PTR_ERR(opp); |
78 | } | 60 | } |
79 | 61 | ||
80 | volt = opp_get_voltage(opp); | 62 | volt = dev_pm_opp_get_voltage(opp); |
81 | rcu_read_unlock(); | 63 | rcu_read_unlock(); |
82 | volt_old = regulator_get_voltage(arm_reg); | 64 | volt_old = regulator_get_voltage(arm_reg); |
83 | 65 | ||
@@ -159,47 +141,23 @@ post_notify: | |||
159 | 141 | ||
160 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) | 142 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) |
161 | { | 143 | { |
162 | int ret; | 144 | return cpufreq_generic_init(policy, freq_table, transition_latency); |
163 | |||
164 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
165 | if (ret) { | ||
166 | dev_err(cpu_dev, "invalid frequency table: %d\n", ret); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | policy->cpuinfo.transition_latency = transition_latency; | ||
171 | policy->cur = clk_get_rate(arm_clk) / 1000; | ||
172 | cpumask_setall(policy->cpus); | ||
173 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) | ||
179 | { | ||
180 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
181 | return 0; | ||
182 | } | 145 | } |
183 | 146 | ||
184 | static struct freq_attr *imx6q_cpufreq_attr[] = { | ||
185 | &cpufreq_freq_attr_scaling_available_freqs, | ||
186 | NULL, | ||
187 | }; | ||
188 | |||
189 | static struct cpufreq_driver imx6q_cpufreq_driver = { | 147 | static struct cpufreq_driver imx6q_cpufreq_driver = { |
190 | .verify = imx6q_verify_speed, | 148 | .verify = cpufreq_generic_frequency_table_verify, |
191 | .target = imx6q_set_target, | 149 | .target_index = imx6q_set_target, |
192 | .get = imx6q_get_speed, | 150 | .get = imx6q_get_speed, |
193 | .init = imx6q_cpufreq_init, | 151 | .init = imx6q_cpufreq_init, |
194 | .exit = imx6q_cpufreq_exit, | 152 | .exit = cpufreq_generic_exit, |
195 | .name = "imx6q-cpufreq", | 153 | .name = "imx6q-cpufreq", |
196 | .attr = imx6q_cpufreq_attr, | 154 | .attr = cpufreq_generic_attr, |
197 | }; | 155 | }; |
198 | 156 | ||
199 | static int imx6q_cpufreq_probe(struct platform_device *pdev) | 157 | static int imx6q_cpufreq_probe(struct platform_device *pdev) |
200 | { | 158 | { |
201 | struct device_node *np; | 159 | struct device_node *np; |
202 | struct opp *opp; | 160 | struct dev_pm_opp *opp; |
203 | unsigned long min_volt, max_volt; | 161 | unsigned long min_volt, max_volt; |
204 | int num, ret; | 162 | int num, ret; |
205 | 163 | ||
@@ -237,14 +195,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
237 | } | 195 | } |
238 | 196 | ||
239 | /* We expect an OPP table supplied by platform */ | 197 | /* We expect an OPP table supplied by platform */ |
240 | num = opp_get_opp_count(cpu_dev); | 198 | num = dev_pm_opp_get_opp_count(cpu_dev); |
241 | if (num < 0) { | 199 | if (num < 0) { |
242 | ret = num; | 200 | ret = num; |
243 | dev_err(cpu_dev, "no OPP table is found: %d\n", ret); | 201 | dev_err(cpu_dev, "no OPP table is found: %d\n", ret); |
244 | goto put_node; | 202 | goto put_node; |
245 | } | 203 | } |
246 | 204 | ||
247 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); | 205 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
248 | if (ret) { | 206 | if (ret) { |
249 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); | 207 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); |
250 | goto put_node; | 208 | goto put_node; |
@@ -259,12 +217,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
259 | * same order. | 217 | * same order. |
260 | */ | 218 | */ |
261 | rcu_read_lock(); | 219 | rcu_read_lock(); |
262 | opp = opp_find_freq_exact(cpu_dev, | 220 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
263 | freq_table[0].frequency * 1000, true); | 221 | freq_table[0].frequency * 1000, true); |
264 | min_volt = opp_get_voltage(opp); | 222 | min_volt = dev_pm_opp_get_voltage(opp); |
265 | opp = opp_find_freq_exact(cpu_dev, | 223 | opp = dev_pm_opp_find_freq_exact(cpu_dev, |
266 | freq_table[--num].frequency * 1000, true); | 224 | freq_table[--num].frequency * 1000, true); |
267 | max_volt = opp_get_voltage(opp); | 225 | max_volt = dev_pm_opp_get_voltage(opp); |
268 | rcu_read_unlock(); | 226 | rcu_read_unlock(); |
269 | ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); | 227 | ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); |
270 | if (ret > 0) | 228 | if (ret > 0) |
@@ -292,7 +250,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) | |||
292 | return 0; | 250 | return 0; |
293 | 251 | ||
294 | free_freq_table: | 252 | free_freq_table: |
295 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 253 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
296 | put_node: | 254 | put_node: |
297 | of_node_put(np); | 255 | of_node_put(np); |
298 | return ret; | 256 | return ret; |
@@ -301,7 +259,7 @@ put_node: | |||
301 | static int imx6q_cpufreq_remove(struct platform_device *pdev) | 259 | static int imx6q_cpufreq_remove(struct platform_device *pdev) |
302 | { | 260 | { |
303 | cpufreq_unregister_driver(&imx6q_cpufreq_driver); | 261 | cpufreq_unregister_driver(&imx6q_cpufreq_driver); |
304 | opp_free_cpufreq_table(cpu_dev, &freq_table); | 262 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
305 | 263 | ||
306 | return 0; | 264 | return 0; |
307 | } | 265 | } |
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index f7c99df0880b..babf3e40e9fa 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c | |||
@@ -59,9 +59,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) | |||
59 | { | 59 | { |
60 | struct icst_vco vco; | 60 | struct icst_vco vco; |
61 | 61 | ||
62 | cpufreq_verify_within_limits(policy, | 62 | cpufreq_verify_within_cpu_limits(policy); |
63 | policy->cpuinfo.min_freq, | ||
64 | policy->cpuinfo.max_freq); | ||
65 | 63 | ||
66 | vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); | 64 | vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); |
67 | policy->max = icst_hz(&cclk_params, vco) / 1000; | 65 | policy->max = icst_hz(&cclk_params, vco) / 1000; |
@@ -69,10 +67,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy) | |||
69 | vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); | 67 | vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); |
70 | policy->min = icst_hz(&cclk_params, vco) / 1000; | 68 | policy->min = icst_hz(&cclk_params, vco) / 1000; |
71 | 69 | ||
72 | cpufreq_verify_within_limits(policy, | 70 | cpufreq_verify_within_cpu_limits(policy); |
73 | policy->cpuinfo.min_freq, | ||
74 | policy->cpuinfo.max_freq); | ||
75 | |||
76 | return 0; | 71 | return 0; |
77 | } | 72 | } |
78 | 73 | ||
@@ -186,10 +181,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy) | |||
186 | { | 181 | { |
187 | 182 | ||
188 | /* set default policy and cpuinfo */ | 183 | /* set default policy and cpuinfo */ |
189 | policy->cpuinfo.max_freq = 160000; | 184 | policy->max = policy->cpuinfo.max_freq = 160000; |
190 | policy->cpuinfo.min_freq = 12000; | 185 | policy->min = policy->cpuinfo.min_freq = 12000; |
191 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ | 186 | policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ |
192 | policy->cur = policy->min = policy->max = integrator_get(policy->cpu); | ||
193 | 187 | ||
194 | return 0; | 188 | return 0; |
195 | } | 189 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb3fdc755000..89925513fea5 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -33,6 +33,8 @@ | |||
33 | 33 | ||
34 | #define SAMPLE_COUNT 3 | 34 | #define SAMPLE_COUNT 3 |
35 | 35 | ||
36 | #define BYT_RATIOS 0x66a | ||
37 | |||
36 | #define FRAC_BITS 8 | 38 | #define FRAC_BITS 8 |
37 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 39 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
38 | #define fp_toint(X) ((X) >> FRAC_BITS) | 40 | #define fp_toint(X) ((X) >> FRAC_BITS) |
@@ -78,7 +80,6 @@ struct cpudata { | |||
78 | 80 | ||
79 | struct timer_list timer; | 81 | struct timer_list timer; |
80 | 82 | ||
81 | struct pstate_adjust_policy *pstate_policy; | ||
82 | struct pstate_data pstate; | 83 | struct pstate_data pstate; |
83 | struct _pid pid; | 84 | struct _pid pid; |
84 | 85 | ||
@@ -100,15 +101,21 @@ struct pstate_adjust_policy { | |||
100 | int i_gain_pct; | 101 | int i_gain_pct; |
101 | }; | 102 | }; |
102 | 103 | ||
103 | static struct pstate_adjust_policy default_policy = { | 104 | struct pstate_funcs { |
104 | .sample_rate_ms = 10, | 105 | int (*get_max)(void); |
105 | .deadband = 0, | 106 | int (*get_min)(void); |
106 | .setpoint = 97, | 107 | int (*get_turbo)(void); |
107 | .p_gain_pct = 20, | 108 | void (*set)(int pstate); |
108 | .d_gain_pct = 0, | 109 | }; |
109 | .i_gain_pct = 0, | 110 | |
111 | struct cpu_defaults { | ||
112 | struct pstate_adjust_policy pid_policy; | ||
113 | struct pstate_funcs funcs; | ||
110 | }; | 114 | }; |
111 | 115 | ||
116 | static struct pstate_adjust_policy pid_params; | ||
117 | static struct pstate_funcs pstate_funcs; | ||
118 | |||
112 | struct perf_limits { | 119 | struct perf_limits { |
113 | int no_turbo; | 120 | int no_turbo; |
114 | int max_perf_pct; | 121 | int max_perf_pct; |
@@ -185,14 +192,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy) | |||
185 | 192 | ||
186 | static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | 193 | static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) |
187 | { | 194 | { |
188 | pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); | 195 | pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); |
189 | pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); | 196 | pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); |
190 | pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); | 197 | pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); |
191 | 198 | ||
192 | pid_reset(&cpu->pid, | 199 | pid_reset(&cpu->pid, |
193 | cpu->pstate_policy->setpoint, | 200 | pid_params.setpoint, |
194 | 100, | 201 | 100, |
195 | cpu->pstate_policy->deadband, | 202 | pid_params.deadband, |
196 | 0); | 203 | 0); |
197 | } | 204 | } |
198 | 205 | ||
@@ -226,12 +233,12 @@ struct pid_param { | |||
226 | }; | 233 | }; |
227 | 234 | ||
228 | static struct pid_param pid_files[] = { | 235 | static struct pid_param pid_files[] = { |
229 | {"sample_rate_ms", &default_policy.sample_rate_ms}, | 236 | {"sample_rate_ms", &pid_params.sample_rate_ms}, |
230 | {"d_gain_pct", &default_policy.d_gain_pct}, | 237 | {"d_gain_pct", &pid_params.d_gain_pct}, |
231 | {"i_gain_pct", &default_policy.i_gain_pct}, | 238 | {"i_gain_pct", &pid_params.i_gain_pct}, |
232 | {"deadband", &default_policy.deadband}, | 239 | {"deadband", &pid_params.deadband}, |
233 | {"setpoint", &default_policy.setpoint}, | 240 | {"setpoint", &pid_params.setpoint}, |
234 | {"p_gain_pct", &default_policy.p_gain_pct}, | 241 | {"p_gain_pct", &pid_params.p_gain_pct}, |
235 | {NULL, NULL} | 242 | {NULL, NULL} |
236 | }; | 243 | }; |
237 | 244 | ||
@@ -336,33 +343,92 @@ static void intel_pstate_sysfs_expose_params(void) | |||
336 | } | 343 | } |
337 | 344 | ||
338 | /************************** sysfs end ************************/ | 345 | /************************** sysfs end ************************/ |
346 | static int byt_get_min_pstate(void) | ||
347 | { | ||
348 | u64 value; | ||
349 | rdmsrl(BYT_RATIOS, value); | ||
350 | return value & 0xFF; | ||
351 | } | ||
352 | |||
353 | static int byt_get_max_pstate(void) | ||
354 | { | ||
355 | u64 value; | ||
356 | rdmsrl(BYT_RATIOS, value); | ||
357 | return (value >> 16) & 0xFF; | ||
358 | } | ||
339 | 359 | ||
340 | static int intel_pstate_min_pstate(void) | 360 | static int core_get_min_pstate(void) |
341 | { | 361 | { |
342 | u64 value; | 362 | u64 value; |
343 | rdmsrl(MSR_PLATFORM_INFO, value); | 363 | rdmsrl(MSR_PLATFORM_INFO, value); |
344 | return (value >> 40) & 0xFF; | 364 | return (value >> 40) & 0xFF; |
345 | } | 365 | } |
346 | 366 | ||
347 | static int intel_pstate_max_pstate(void) | 367 | static int core_get_max_pstate(void) |
348 | { | 368 | { |
349 | u64 value; | 369 | u64 value; |
350 | rdmsrl(MSR_PLATFORM_INFO, value); | 370 | rdmsrl(MSR_PLATFORM_INFO, value); |
351 | return (value >> 8) & 0xFF; | 371 | return (value >> 8) & 0xFF; |
352 | } | 372 | } |
353 | 373 | ||
354 | static int intel_pstate_turbo_pstate(void) | 374 | static int core_get_turbo_pstate(void) |
355 | { | 375 | { |
356 | u64 value; | 376 | u64 value; |
357 | int nont, ret; | 377 | int nont, ret; |
358 | rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); | 378 | rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); |
359 | nont = intel_pstate_max_pstate(); | 379 | nont = core_get_max_pstate(); |
360 | ret = ((value) & 255); | 380 | ret = ((value) & 255); |
361 | if (ret <= nont) | 381 | if (ret <= nont) |
362 | ret = nont; | 382 | ret = nont; |
363 | return ret; | 383 | return ret; |
364 | } | 384 | } |
365 | 385 | ||
386 | static void core_set_pstate(int pstate) | ||
387 | { | ||
388 | u64 val; | ||
389 | |||
390 | val = pstate << 8; | ||
391 | if (limits.no_turbo) | ||
392 | val |= (u64)1 << 32; | ||
393 | |||
394 | wrmsrl(MSR_IA32_PERF_CTL, val); | ||
395 | } | ||
396 | |||
397 | static struct cpu_defaults core_params = { | ||
398 | .pid_policy = { | ||
399 | .sample_rate_ms = 10, | ||
400 | .deadband = 0, | ||
401 | .setpoint = 97, | ||
402 | .p_gain_pct = 20, | ||
403 | .d_gain_pct = 0, | ||
404 | .i_gain_pct = 0, | ||
405 | }, | ||
406 | .funcs = { | ||
407 | .get_max = core_get_max_pstate, | ||
408 | .get_min = core_get_min_pstate, | ||
409 | .get_turbo = core_get_turbo_pstate, | ||
410 | .set = core_set_pstate, | ||
411 | }, | ||
412 | }; | ||
413 | |||
414 | static struct cpu_defaults byt_params = { | ||
415 | .pid_policy = { | ||
416 | .sample_rate_ms = 10, | ||
417 | .deadband = 0, | ||
418 | .setpoint = 97, | ||
419 | .p_gain_pct = 14, | ||
420 | .d_gain_pct = 0, | ||
421 | .i_gain_pct = 4, | ||
422 | }, | ||
423 | .funcs = { | ||
424 | .get_max = byt_get_max_pstate, | ||
425 | .get_min = byt_get_min_pstate, | ||
426 | .get_turbo = byt_get_max_pstate, | ||
427 | .set = core_set_pstate, | ||
428 | }, | ||
429 | }; | ||
430 | |||
431 | |||
366 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | 432 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) |
367 | { | 433 | { |
368 | int max_perf = cpu->pstate.turbo_pstate; | 434 | int max_perf = cpu->pstate.turbo_pstate; |
@@ -383,7 +449,6 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
383 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | 449 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
384 | { | 450 | { |
385 | int max_perf, min_perf; | 451 | int max_perf, min_perf; |
386 | u64 val; | ||
387 | 452 | ||
388 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); | 453 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); |
389 | 454 | ||
@@ -395,11 +460,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
395 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 460 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
396 | 461 | ||
397 | cpu->pstate.current_pstate = pstate; | 462 | cpu->pstate.current_pstate = pstate; |
398 | val = pstate << 8; | ||
399 | if (limits.no_turbo) | ||
400 | val |= (u64)1 << 32; | ||
401 | 463 | ||
402 | wrmsrl(MSR_IA32_PERF_CTL, val); | 464 | pstate_funcs.set(pstate); |
403 | } | 465 | } |
404 | 466 | ||
405 | static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) | 467 | static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) |
@@ -421,9 +483,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | |||
421 | { | 483 | { |
422 | sprintf(cpu->name, "Intel 2nd generation core"); | 484 | sprintf(cpu->name, "Intel 2nd generation core"); |
423 | 485 | ||
424 | cpu->pstate.min_pstate = intel_pstate_min_pstate(); | 486 | cpu->pstate.min_pstate = pstate_funcs.get_min(); |
425 | cpu->pstate.max_pstate = intel_pstate_max_pstate(); | 487 | cpu->pstate.max_pstate = pstate_funcs.get_max(); |
426 | cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); | 488 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); |
427 | 489 | ||
428 | /* | 490 | /* |
429 | * goto max pstate so we don't slow up boot if we are built-in if we are | 491 | * goto max pstate so we don't slow up boot if we are built-in if we are |
@@ -465,7 +527,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
465 | { | 527 | { |
466 | int sample_time, delay; | 528 | int sample_time, delay; |
467 | 529 | ||
468 | sample_time = cpu->pstate_policy->sample_rate_ms; | 530 | sample_time = pid_params.sample_rate_ms; |
469 | delay = msecs_to_jiffies(sample_time); | 531 | delay = msecs_to_jiffies(sample_time); |
470 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 532 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
471 | } | 533 | } |
@@ -521,14 +583,15 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
521 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } | 583 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } |
522 | 584 | ||
523 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 585 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
524 | ICPU(0x2a, default_policy), | 586 | ICPU(0x2a, core_params), |
525 | ICPU(0x2d, default_policy), | 587 | ICPU(0x2d, core_params), |
526 | ICPU(0x3a, default_policy), | 588 | ICPU(0x37, byt_params), |
527 | ICPU(0x3c, default_policy), | 589 | ICPU(0x3a, core_params), |
528 | ICPU(0x3e, default_policy), | 590 | ICPU(0x3c, core_params), |
529 | ICPU(0x3f, default_policy), | 591 | ICPU(0x3e, core_params), |
530 | ICPU(0x45, default_policy), | 592 | ICPU(0x3f, core_params), |
531 | ICPU(0x46, default_policy), | 593 | ICPU(0x45, core_params), |
594 | ICPU(0x46, core_params), | ||
532 | {} | 595 | {} |
533 | }; | 596 | }; |
534 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | 597 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); |
@@ -552,8 +615,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
552 | intel_pstate_get_cpu_pstates(cpu); | 615 | intel_pstate_get_cpu_pstates(cpu); |
553 | 616 | ||
554 | cpu->cpu = cpunum; | 617 | cpu->cpu = cpunum; |
555 | cpu->pstate_policy = | 618 | |
556 | (struct pstate_adjust_policy *)id->driver_data; | ||
557 | init_timer_deferrable(&cpu->timer); | 619 | init_timer_deferrable(&cpu->timer); |
558 | cpu->timer.function = intel_pstate_timer_func; | 620 | cpu->timer.function = intel_pstate_timer_func; |
559 | cpu->timer.data = | 621 | cpu->timer.data = |
@@ -613,9 +675,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
613 | 675 | ||
614 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | 676 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) |
615 | { | 677 | { |
616 | cpufreq_verify_within_limits(policy, | 678 | cpufreq_verify_within_cpu_limits(policy); |
617 | policy->cpuinfo.min_freq, | ||
618 | policy->cpuinfo.max_freq); | ||
619 | 679 | ||
620 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | 680 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && |
621 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | 681 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) |
@@ -683,9 +743,9 @@ static int intel_pstate_msrs_not_valid(void) | |||
683 | rdmsrl(MSR_IA32_APERF, aperf); | 743 | rdmsrl(MSR_IA32_APERF, aperf); |
684 | rdmsrl(MSR_IA32_MPERF, mperf); | 744 | rdmsrl(MSR_IA32_MPERF, mperf); |
685 | 745 | ||
686 | if (!intel_pstate_min_pstate() || | 746 | if (!pstate_funcs.get_max() || |
687 | !intel_pstate_max_pstate() || | 747 | !pstate_funcs.get_min() || |
688 | !intel_pstate_turbo_pstate()) | 748 | !pstate_funcs.get_turbo()) |
689 | return -ENODEV; | 749 | return -ENODEV; |
690 | 750 | ||
691 | rdmsrl(MSR_IA32_APERF, tmp); | 751 | rdmsrl(MSR_IA32_APERF, tmp); |
@@ -698,10 +758,30 @@ static int intel_pstate_msrs_not_valid(void) | |||
698 | 758 | ||
699 | return 0; | 759 | return 0; |
700 | } | 760 | } |
761 | |||
762 | void copy_pid_params(struct pstate_adjust_policy *policy) | ||
763 | { | ||
764 | pid_params.sample_rate_ms = policy->sample_rate_ms; | ||
765 | pid_params.p_gain_pct = policy->p_gain_pct; | ||
766 | pid_params.i_gain_pct = policy->i_gain_pct; | ||
767 | pid_params.d_gain_pct = policy->d_gain_pct; | ||
768 | pid_params.deadband = policy->deadband; | ||
769 | pid_params.setpoint = policy->setpoint; | ||
770 | } | ||
771 | |||
772 | void copy_cpu_funcs(struct pstate_funcs *funcs) | ||
773 | { | ||
774 | pstate_funcs.get_max = funcs->get_max; | ||
775 | pstate_funcs.get_min = funcs->get_min; | ||
776 | pstate_funcs.get_turbo = funcs->get_turbo; | ||
777 | pstate_funcs.set = funcs->set; | ||
778 | } | ||
779 | |||
701 | static int __init intel_pstate_init(void) | 780 | static int __init intel_pstate_init(void) |
702 | { | 781 | { |
703 | int cpu, rc = 0; | 782 | int cpu, rc = 0; |
704 | const struct x86_cpu_id *id; | 783 | const struct x86_cpu_id *id; |
784 | struct cpu_defaults *cpu_info; | ||
705 | 785 | ||
706 | if (no_load) | 786 | if (no_load) |
707 | return -ENODEV; | 787 | return -ENODEV; |
@@ -710,6 +790,11 @@ static int __init intel_pstate_init(void) | |||
710 | if (!id) | 790 | if (!id) |
711 | return -ENODEV; | 791 | return -ENODEV; |
712 | 792 | ||
793 | cpu_info = (struct cpu_defaults *)id->driver_data; | ||
794 | |||
795 | copy_pid_params(&cpu_info->pid_policy); | ||
796 | copy_cpu_funcs(&cpu_info->funcs); | ||
797 | |||
713 | if (intel_pstate_msrs_not_valid()) | 798 | if (intel_pstate_msrs_not_valid()) |
714 | return -ENODEV; | 799 | return -ENODEV; |
715 | 800 | ||
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index ba10658a9394..0ae4dd7e1f2d 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
@@ -55,8 +55,8 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) | |||
55 | return kirkwood_freq_table[0].frequency; | 55 | return kirkwood_freq_table[0].frequency; |
56 | } | 56 | } |
57 | 57 | ||
58 | static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, | 58 | static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, |
59 | unsigned int index) | 59 | unsigned int index) |
60 | { | 60 | { |
61 | struct cpufreq_freqs freqs; | 61 | struct cpufreq_freqs freqs; |
62 | unsigned int state = kirkwood_freq_table[index].driver_data; | 62 | unsigned int state = kirkwood_freq_table[index].driver_data; |
@@ -100,24 +100,6 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy, | |||
100 | local_irq_enable(); | 100 | local_irq_enable(); |
101 | } | 101 | } |
102 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 102 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
103 | }; | ||
104 | |||
105 | static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) | ||
106 | { | ||
107 | return cpufreq_frequency_table_verify(policy, kirkwood_freq_table); | ||
108 | } | ||
109 | |||
110 | static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, | ||
111 | unsigned int target_freq, | ||
112 | unsigned int relation) | ||
113 | { | ||
114 | unsigned int index = 0; | ||
115 | |||
116 | if (cpufreq_frequency_table_target(policy, kirkwood_freq_table, | ||
117 | target_freq, relation, &index)) | ||
118 | return -EINVAL; | ||
119 | |||
120 | kirkwood_cpufreq_set_cpu_state(policy, index); | ||
121 | 103 | ||
122 | return 0; | 104 | return 0; |
123 | } | 105 | } |
@@ -125,40 +107,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, | |||
125 | /* Module init and exit code */ | 107 | /* Module init and exit code */ |
126 | static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) | 108 | static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) |
127 | { | 109 | { |
128 | int result; | 110 | return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); |
129 | |||
130 | /* cpuinfo and default policy values */ | ||
131 | policy->cpuinfo.transition_latency = 5000; /* 5uS */ | ||
132 | policy->cur = kirkwood_cpufreq_get_cpu_frequency(0); | ||
133 | |||
134 | result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table); | ||
135 | if (result) | ||
136 | return result; | ||
137 | |||
138 | cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
146 | return 0; | ||
147 | } | 111 | } |
148 | 112 | ||
149 | static struct freq_attr *kirkwood_cpufreq_attr[] = { | ||
150 | &cpufreq_freq_attr_scaling_available_freqs, | ||
151 | NULL, | ||
152 | }; | ||
153 | |||
154 | static struct cpufreq_driver kirkwood_cpufreq_driver = { | 113 | static struct cpufreq_driver kirkwood_cpufreq_driver = { |
155 | .get = kirkwood_cpufreq_get_cpu_frequency, | 114 | .get = kirkwood_cpufreq_get_cpu_frequency, |
156 | .verify = kirkwood_cpufreq_verify, | 115 | .verify = cpufreq_generic_frequency_table_verify, |
157 | .target = kirkwood_cpufreq_target, | 116 | .target_index = kirkwood_cpufreq_target, |
158 | .init = kirkwood_cpufreq_cpu_init, | 117 | .init = kirkwood_cpufreq_cpu_init, |
159 | .exit = kirkwood_cpufreq_cpu_exit, | 118 | .exit = cpufreq_generic_exit, |
160 | .name = "kirkwood-cpufreq", | 119 | .name = "kirkwood-cpufreq", |
161 | .attr = kirkwood_cpufreq_attr, | 120 | .attr = cpufreq_generic_attr, |
162 | }; | 121 | }; |
163 | 122 | ||
164 | static int kirkwood_cpufreq_probe(struct platform_device *pdev) | 123 | static int kirkwood_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 4ada1cccb052..45bafddfd8ea 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -625,28 +625,13 @@ static void longhaul_setup_voltagescaling(void) | |||
625 | } | 625 | } |
626 | 626 | ||
627 | 627 | ||
628 | static int longhaul_verify(struct cpufreq_policy *policy) | ||
629 | { | ||
630 | return cpufreq_frequency_table_verify(policy, longhaul_table); | ||
631 | } | ||
632 | |||
633 | |||
634 | static int longhaul_target(struct cpufreq_policy *policy, | 628 | static int longhaul_target(struct cpufreq_policy *policy, |
635 | unsigned int target_freq, unsigned int relation) | 629 | unsigned int table_index) |
636 | { | 630 | { |
637 | unsigned int table_index = 0; | ||
638 | unsigned int i; | 631 | unsigned int i; |
639 | unsigned int dir = 0; | 632 | unsigned int dir = 0; |
640 | u8 vid, current_vid; | 633 | u8 vid, current_vid; |
641 | 634 | ||
642 | if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, | ||
643 | relation, &table_index)) | ||
644 | return -EINVAL; | ||
645 | |||
646 | /* Don't set same frequency again */ | ||
647 | if (longhaul_index == table_index) | ||
648 | return 0; | ||
649 | |||
650 | if (!can_scale_voltage) | 635 | if (!can_scale_voltage) |
651 | longhaul_setstate(policy, table_index); | 636 | longhaul_setstate(policy, table_index); |
652 | else { | 637 | else { |
@@ -919,36 +904,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
919 | longhaul_setup_voltagescaling(); | 904 | longhaul_setup_voltagescaling(); |
920 | 905 | ||
921 | policy->cpuinfo.transition_latency = 200000; /* nsec */ | 906 | policy->cpuinfo.transition_latency = 200000; /* nsec */ |
922 | policy->cur = calc_speed(longhaul_get_cpu_mult()); | ||
923 | |||
924 | ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); | ||
925 | if (ret) | ||
926 | return ret; | ||
927 | |||
928 | cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); | ||
929 | 907 | ||
930 | return 0; | 908 | return cpufreq_table_validate_and_show(policy, longhaul_table); |
931 | } | 909 | } |
932 | 910 | ||
933 | static int longhaul_cpu_exit(struct cpufreq_policy *policy) | ||
934 | { | ||
935 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | static struct freq_attr *longhaul_attr[] = { | ||
940 | &cpufreq_freq_attr_scaling_available_freqs, | ||
941 | NULL, | ||
942 | }; | ||
943 | |||
944 | static struct cpufreq_driver longhaul_driver = { | 911 | static struct cpufreq_driver longhaul_driver = { |
945 | .verify = longhaul_verify, | 912 | .verify = cpufreq_generic_frequency_table_verify, |
946 | .target = longhaul_target, | 913 | .target_index = longhaul_target, |
947 | .get = longhaul_get, | 914 | .get = longhaul_get, |
948 | .init = longhaul_cpu_init, | 915 | .init = longhaul_cpu_init, |
949 | .exit = longhaul_cpu_exit, | 916 | .exit = cpufreq_generic_exit, |
950 | .name = "longhaul", | 917 | .name = "longhaul", |
951 | .attr = longhaul_attr, | 918 | .attr = cpufreq_generic_attr, |
952 | }; | 919 | }; |
953 | 920 | ||
954 | static const struct x86_cpu_id longhaul_id[] = { | 921 | static const struct x86_cpu_id longhaul_id[] = { |
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 5aa031612d53..074971b12635 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c | |||
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) | |||
129 | return -EINVAL; | 129 | return -EINVAL; |
130 | 130 | ||
131 | policy->cpu = 0; | 131 | policy->cpu = 0; |
132 | cpufreq_verify_within_limits(policy, | 132 | cpufreq_verify_within_cpu_limits(policy); |
133 | policy->cpuinfo.min_freq, | ||
134 | policy->cpuinfo.max_freq); | ||
135 | 133 | ||
136 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | 134 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && |
137 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | 135 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) |
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 7bc3c44d34e2..41a8e2cdf940 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
@@ -53,11 +53,9 @@ static unsigned int loongson2_cpufreq_get(unsigned int cpu) | |||
53 | * Here we notify other drivers of the proposed change and the final change. | 53 | * Here we notify other drivers of the proposed change and the final change. |
54 | */ | 54 | */ |
55 | static int loongson2_cpufreq_target(struct cpufreq_policy *policy, | 55 | static int loongson2_cpufreq_target(struct cpufreq_policy *policy, |
56 | unsigned int target_freq, | 56 | unsigned int index) |
57 | unsigned int relation) | ||
58 | { | 57 | { |
59 | unsigned int cpu = policy->cpu; | 58 | unsigned int cpu = policy->cpu; |
60 | unsigned int newstate = 0; | ||
61 | cpumask_t cpus_allowed; | 59 | cpumask_t cpus_allowed; |
62 | struct cpufreq_freqs freqs; | 60 | struct cpufreq_freqs freqs; |
63 | unsigned int freq; | 61 | unsigned int freq; |
@@ -65,26 +63,17 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, | |||
65 | cpus_allowed = current->cpus_allowed; | 63 | cpus_allowed = current->cpus_allowed; |
66 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 64 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
67 | 65 | ||
68 | if (cpufreq_frequency_table_target | ||
69 | (policy, &loongson2_clockmod_table[0], target_freq, relation, | ||
70 | &newstate)) | ||
71 | return -EINVAL; | ||
72 | |||
73 | freq = | 66 | freq = |
74 | ((cpu_clock_freq / 1000) * | 67 | ((cpu_clock_freq / 1000) * |
75 | loongson2_clockmod_table[newstate].driver_data) / 8; | 68 | loongson2_clockmod_table[index].driver_data) / 8; |
76 | if (freq < policy->min || freq > policy->max) | ||
77 | return -EINVAL; | ||
78 | 69 | ||
79 | pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); | 70 | pr_debug("cpufreq: requested frequency %u Hz\n", |
71 | loongson2_clockmod_table[index].frequency * 1000); | ||
80 | 72 | ||
81 | freqs.old = loongson2_cpufreq_get(cpu); | 73 | freqs.old = loongson2_cpufreq_get(cpu); |
82 | freqs.new = freq; | 74 | freqs.new = freq; |
83 | freqs.flags = 0; | 75 | freqs.flags = 0; |
84 | 76 | ||
85 | if (freqs.new == freqs.old) | ||
86 | return 0; | ||
87 | |||
88 | /* notifiers */ | 77 | /* notifiers */ |
89 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 78 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
90 | 79 | ||
@@ -131,40 +120,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
131 | return ret; | 120 | return ret; |
132 | } | 121 | } |
133 | 122 | ||
134 | policy->cur = loongson2_cpufreq_get(policy->cpu); | 123 | return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); |
135 | |||
136 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], | ||
137 | policy->cpu); | ||
138 | |||
139 | return cpufreq_frequency_table_cpuinfo(policy, | ||
140 | &loongson2_clockmod_table[0]); | ||
141 | } | ||
142 | |||
143 | static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | return cpufreq_frequency_table_verify(policy, | ||
146 | &loongson2_clockmod_table[0]); | ||
147 | } | 124 | } |
148 | 125 | ||
149 | static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) | 126 | static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) |
150 | { | 127 | { |
128 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
151 | clk_put(cpuclk); | 129 | clk_put(cpuclk); |
152 | return 0; | 130 | return 0; |
153 | } | 131 | } |
154 | 132 | ||
155 | static struct freq_attr *loongson2_table_attr[] = { | ||
156 | &cpufreq_freq_attr_scaling_available_freqs, | ||
157 | NULL, | ||
158 | }; | ||
159 | |||
160 | static struct cpufreq_driver loongson2_cpufreq_driver = { | 133 | static struct cpufreq_driver loongson2_cpufreq_driver = { |
161 | .name = "loongson2", | 134 | .name = "loongson2", |
162 | .init = loongson2_cpufreq_cpu_init, | 135 | .init = loongson2_cpufreq_cpu_init, |
163 | .verify = loongson2_cpufreq_verify, | 136 | .verify = cpufreq_generic_frequency_table_verify, |
164 | .target = loongson2_cpufreq_target, | 137 | .target_index = loongson2_cpufreq_target, |
165 | .get = loongson2_cpufreq_get, | 138 | .get = loongson2_cpufreq_get, |
166 | .exit = loongson2_cpufreq_exit, | 139 | .exit = loongson2_cpufreq_exit, |
167 | .attr = loongson2_table_attr, | 140 | .attr = cpufreq_generic_attr, |
168 | }; | 141 | }; |
169 | 142 | ||
170 | static struct platform_device_id platform_device_ids[] = { | 143 | static struct platform_device_id platform_device_ids[] = { |
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index 6168d77b296d..4e2da0874bfb 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c | |||
@@ -64,11 +64,6 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = { | |||
64 | {0, CPUFREQ_TABLE_END}, | 64 | {0, CPUFREQ_TABLE_END}, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static struct freq_attr *maple_cpu_freqs_attr[] = { | ||
68 | &cpufreq_freq_attr_scaling_available_freqs, | ||
69 | NULL, | ||
70 | }; | ||
71 | |||
72 | /* Power mode data is an array of the 32 bits PCR values to use for | 67 | /* Power mode data is an array of the 32 bits PCR values to use for |
73 | * the various frequencies, retrieved from the device-tree | 68 | * the various frequencies, retrieved from the device-tree |
74 | */ | 69 | */ |
@@ -135,32 +130,19 @@ static int maple_scom_query_freq(void) | |||
135 | * Common interface to the cpufreq core | 130 | * Common interface to the cpufreq core |
136 | */ | 131 | */ |
137 | 132 | ||
138 | static int maple_cpufreq_verify(struct cpufreq_policy *policy) | ||
139 | { | ||
140 | return cpufreq_frequency_table_verify(policy, maple_cpu_freqs); | ||
141 | } | ||
142 | |||
143 | static int maple_cpufreq_target(struct cpufreq_policy *policy, | 133 | static int maple_cpufreq_target(struct cpufreq_policy *policy, |
144 | unsigned int target_freq, unsigned int relation) | 134 | unsigned int index) |
145 | { | 135 | { |
146 | unsigned int newstate = 0; | ||
147 | struct cpufreq_freqs freqs; | 136 | struct cpufreq_freqs freqs; |
148 | int rc; | 137 | int rc; |
149 | 138 | ||
150 | if (cpufreq_frequency_table_target(policy, maple_cpu_freqs, | ||
151 | target_freq, relation, &newstate)) | ||
152 | return -EINVAL; | ||
153 | |||
154 | if (maple_pmode_cur == newstate) | ||
155 | return 0; | ||
156 | |||
157 | mutex_lock(&maple_switch_mutex); | 139 | mutex_lock(&maple_switch_mutex); |
158 | 140 | ||
159 | freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; | 141 | freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; |
160 | freqs.new = maple_cpu_freqs[newstate].frequency; | 142 | freqs.new = maple_cpu_freqs[index].frequency; |
161 | 143 | ||
162 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 144 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
163 | rc = maple_scom_switch_freq(newstate); | 145 | rc = maple_scom_switch_freq(index); |
164 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 146 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
165 | 147 | ||
166 | mutex_unlock(&maple_switch_mutex); | 148 | mutex_unlock(&maple_switch_mutex); |
@@ -175,27 +157,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu) | |||
175 | 157 | ||
176 | static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) | 158 | static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) |
177 | { | 159 | { |
178 | policy->cpuinfo.transition_latency = 12000; | 160 | return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); |
179 | policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency; | ||
180 | /* secondary CPUs are tied to the primary one by the | ||
181 | * cpufreq core if in the secondary policy we tell it that | ||
182 | * it actually must be one policy together with all others. */ | ||
183 | cpumask_setall(policy->cpus); | ||
184 | cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); | ||
185 | |||
186 | return cpufreq_frequency_table_cpuinfo(policy, | ||
187 | maple_cpu_freqs); | ||
188 | } | 161 | } |
189 | 162 | ||
190 | |||
191 | static struct cpufreq_driver maple_cpufreq_driver = { | 163 | static struct cpufreq_driver maple_cpufreq_driver = { |
192 | .name = "maple", | 164 | .name = "maple", |
193 | .flags = CPUFREQ_CONST_LOOPS, | 165 | .flags = CPUFREQ_CONST_LOOPS, |
194 | .init = maple_cpufreq_cpu_init, | 166 | .init = maple_cpufreq_cpu_init, |
195 | .verify = maple_cpufreq_verify, | 167 | .verify = cpufreq_generic_frequency_table_verify, |
196 | .target = maple_cpufreq_target, | 168 | .target_index = maple_cpufreq_target, |
197 | .get = maple_cpufreq_get_speed, | 169 | .get = maple_cpufreq_get_speed, |
198 | .attr = maple_cpu_freqs_attr, | 170 | .attr = cpufreq_generic_attr, |
199 | }; | 171 | }; |
200 | 172 | ||
201 | static int __init maple_cpufreq_init(void) | 173 | static int __init maple_cpufreq_init(void) |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index f31fcfcad514..b5512712298f 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/opp.h> | 25 | #include <linux/pm_opp.h> |
26 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
@@ -40,13 +40,6 @@ static struct clk *mpu_clk; | |||
40 | static struct device *mpu_dev; | 40 | static struct device *mpu_dev; |
41 | static struct regulator *mpu_reg; | 41 | static struct regulator *mpu_reg; |
42 | 42 | ||
43 | static int omap_verify_speed(struct cpufreq_policy *policy) | ||
44 | { | ||
45 | if (!freq_table) | ||
46 | return -EINVAL; | ||
47 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
48 | } | ||
49 | |||
50 | static unsigned int omap_getspeed(unsigned int cpu) | 43 | static unsigned int omap_getspeed(unsigned int cpu) |
51 | { | 44 | { |
52 | unsigned long rate; | 45 | unsigned long rate; |
@@ -58,40 +51,15 @@ static unsigned int omap_getspeed(unsigned int cpu) | |||
58 | return rate; | 51 | return rate; |
59 | } | 52 | } |
60 | 53 | ||
61 | static int omap_target(struct cpufreq_policy *policy, | 54 | static int omap_target(struct cpufreq_policy *policy, unsigned int index) |
62 | unsigned int target_freq, | ||
63 | unsigned int relation) | ||
64 | { | 55 | { |
65 | unsigned int i; | ||
66 | int r, ret = 0; | 56 | int r, ret = 0; |
67 | struct cpufreq_freqs freqs; | 57 | struct cpufreq_freqs freqs; |
68 | struct opp *opp; | 58 | struct dev_pm_opp *opp; |
69 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; | 59 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; |
70 | 60 | ||
71 | if (!freq_table) { | ||
72 | dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, | ||
73 | policy->cpu); | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | |||
77 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
78 | relation, &i); | ||
79 | if (ret) { | ||
80 | dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", | ||
81 | __func__, policy->cpu, target_freq, ret); | ||
82 | return ret; | ||
83 | } | ||
84 | freqs.new = freq_table[i].frequency; | ||
85 | if (!freqs.new) { | ||
86 | dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, | ||
87 | policy->cpu, target_freq); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | freqs.old = omap_getspeed(policy->cpu); | 61 | freqs.old = omap_getspeed(policy->cpu); |
92 | 62 | freqs.new = freq_table[index].frequency; | |
93 | if (freqs.old == freqs.new && policy->cur == freqs.new) | ||
94 | return ret; | ||
95 | 63 | ||
96 | freq = freqs.new * 1000; | 64 | freq = freqs.new * 1000; |
97 | ret = clk_round_rate(mpu_clk, freq); | 65 | ret = clk_round_rate(mpu_clk, freq); |
@@ -105,14 +73,14 @@ static int omap_target(struct cpufreq_policy *policy, | |||
105 | 73 | ||
106 | if (mpu_reg) { | 74 | if (mpu_reg) { |
107 | rcu_read_lock(); | 75 | rcu_read_lock(); |
108 | opp = opp_find_freq_ceil(mpu_dev, &freq); | 76 | opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq); |
109 | if (IS_ERR(opp)) { | 77 | if (IS_ERR(opp)) { |
110 | rcu_read_unlock(); | 78 | rcu_read_unlock(); |
111 | dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", | 79 | dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", |
112 | __func__, freqs.new); | 80 | __func__, freqs.new); |
113 | return -EINVAL; | 81 | return -EINVAL; |
114 | } | 82 | } |
115 | volt = opp_get_voltage(opp); | 83 | volt = dev_pm_opp_get_voltage(opp); |
116 | rcu_read_unlock(); | 84 | rcu_read_unlock(); |
117 | tol = volt * OPP_TOLERANCE / 100; | 85 | tol = volt * OPP_TOLERANCE / 100; |
118 | volt_old = regulator_get_voltage(mpu_reg); | 86 | volt_old = regulator_get_voltage(mpu_reg); |
@@ -162,86 +130,57 @@ done: | |||
162 | static inline void freq_table_free(void) | 130 | static inline void freq_table_free(void) |
163 | { | 131 | { |
164 | if (atomic_dec_and_test(&freq_table_users)) | 132 | if (atomic_dec_and_test(&freq_table_users)) |
165 | opp_free_cpufreq_table(mpu_dev, &freq_table); | 133 | dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table); |
166 | } | 134 | } |
167 | 135 | ||
168 | static int omap_cpu_init(struct cpufreq_policy *policy) | 136 | static int omap_cpu_init(struct cpufreq_policy *policy) |
169 | { | 137 | { |
170 | int result = 0; | 138 | int result; |
171 | 139 | ||
172 | mpu_clk = clk_get(NULL, "cpufreq_ck"); | 140 | mpu_clk = clk_get(NULL, "cpufreq_ck"); |
173 | if (IS_ERR(mpu_clk)) | 141 | if (IS_ERR(mpu_clk)) |
174 | return PTR_ERR(mpu_clk); | 142 | return PTR_ERR(mpu_clk); |
175 | 143 | ||
176 | if (policy->cpu >= NR_CPUS) { | 144 | if (!freq_table) { |
177 | result = -EINVAL; | 145 | result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); |
178 | goto fail_ck; | 146 | if (result) { |
179 | } | 147 | dev_err(mpu_dev, |
180 | 148 | "%s: cpu%d: failed creating freq table[%d]\n", | |
181 | policy->cur = omap_getspeed(policy->cpu); | ||
182 | |||
183 | if (!freq_table) | ||
184 | result = opp_init_cpufreq_table(mpu_dev, &freq_table); | ||
185 | |||
186 | if (result) { | ||
187 | dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", | ||
188 | __func__, policy->cpu, result); | 149 | __func__, policy->cpu, result); |
189 | goto fail_ck; | 150 | goto fail; |
151 | } | ||
190 | } | 152 | } |
191 | 153 | ||
192 | atomic_inc_return(&freq_table_users); | 154 | atomic_inc_return(&freq_table_users); |
193 | 155 | ||
194 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
195 | if (result) | ||
196 | goto fail_table; | ||
197 | |||
198 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
199 | |||
200 | policy->cur = omap_getspeed(policy->cpu); | ||
201 | |||
202 | /* | ||
203 | * On OMAP SMP configuartion, both processors share the voltage | ||
204 | * and clock. So both CPUs needs to be scaled together and hence | ||
205 | * needs software co-ordination. Use cpufreq affected_cpus | ||
206 | * interface to handle this scenario. Additional is_smp() check | ||
207 | * is to keep SMP_ON_UP build working. | ||
208 | */ | ||
209 | if (is_smp()) | ||
210 | cpumask_setall(policy->cpus); | ||
211 | |||
212 | /* FIXME: what's the actual transition time? */ | 156 | /* FIXME: what's the actual transition time? */ |
213 | policy->cpuinfo.transition_latency = 300 * 1000; | 157 | result = cpufreq_generic_init(policy, freq_table, 300 * 1000); |
214 | 158 | if (!result) | |
215 | return 0; | 159 | return 0; |
216 | 160 | ||
217 | fail_table: | ||
218 | freq_table_free(); | 161 | freq_table_free(); |
219 | fail_ck: | 162 | fail: |
220 | clk_put(mpu_clk); | 163 | clk_put(mpu_clk); |
221 | return result; | 164 | return result; |
222 | } | 165 | } |
223 | 166 | ||
224 | static int omap_cpu_exit(struct cpufreq_policy *policy) | 167 | static int omap_cpu_exit(struct cpufreq_policy *policy) |
225 | { | 168 | { |
169 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
226 | freq_table_free(); | 170 | freq_table_free(); |
227 | clk_put(mpu_clk); | 171 | clk_put(mpu_clk); |
228 | return 0; | 172 | return 0; |
229 | } | 173 | } |
230 | 174 | ||
231 | static struct freq_attr *omap_cpufreq_attr[] = { | ||
232 | &cpufreq_freq_attr_scaling_available_freqs, | ||
233 | NULL, | ||
234 | }; | ||
235 | |||
236 | static struct cpufreq_driver omap_driver = { | 175 | static struct cpufreq_driver omap_driver = { |
237 | .flags = CPUFREQ_STICKY, | 176 | .flags = CPUFREQ_STICKY, |
238 | .verify = omap_verify_speed, | 177 | .verify = cpufreq_generic_frequency_table_verify, |
239 | .target = omap_target, | 178 | .target_index = omap_target, |
240 | .get = omap_getspeed, | 179 | .get = omap_getspeed, |
241 | .init = omap_cpu_init, | 180 | .init = omap_cpu_init, |
242 | .exit = omap_cpu_exit, | 181 | .exit = omap_cpu_exit, |
243 | .name = "omap", | 182 | .name = "omap", |
244 | .attr = omap_cpufreq_attr, | 183 | .attr = cpufreq_generic_attr, |
245 | }; | 184 | }; |
246 | 185 | ||
247 | static int omap_cpufreq_probe(struct platform_device *pdev) | 186 | static int omap_cpufreq_probe(struct platform_device *pdev) |
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 2f0a2a65c37f..3c23053afdfd 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c | |||
@@ -105,23 +105,13 @@ static struct cpufreq_frequency_table p4clockmod_table[] = { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | 107 | ||
108 | static int cpufreq_p4_target(struct cpufreq_policy *policy, | 108 | static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index) |
109 | unsigned int target_freq, | ||
110 | unsigned int relation) | ||
111 | { | 109 | { |
112 | unsigned int newstate = DC_RESV; | ||
113 | struct cpufreq_freqs freqs; | 110 | struct cpufreq_freqs freqs; |
114 | int i; | 111 | int i; |
115 | 112 | ||
116 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], | ||
117 | target_freq, relation, &newstate)) | ||
118 | return -EINVAL; | ||
119 | |||
120 | freqs.old = cpufreq_p4_get(policy->cpu); | 113 | freqs.old = cpufreq_p4_get(policy->cpu); |
121 | freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8; | 114 | freqs.new = stock_freq * p4clockmod_table[index].driver_data / 8; |
122 | |||
123 | if (freqs.new == freqs.old) | ||
124 | return 0; | ||
125 | 115 | ||
126 | /* notifiers */ | 116 | /* notifiers */ |
127 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 117 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
@@ -131,7 +121,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
131 | * Developer's Manual, Volume 3 | 121 | * Developer's Manual, Volume 3 |
132 | */ | 122 | */ |
133 | for_each_cpu(i, policy->cpus) | 123 | for_each_cpu(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data); | 124 | cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data); |
135 | 125 | ||
136 | /* notifiers */ | 126 | /* notifiers */ |
137 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 127 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
@@ -140,12 +130,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
140 | } | 130 | } |
141 | 131 | ||
142 | 132 | ||
143 | static int cpufreq_p4_verify(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); | ||
146 | } | ||
147 | |||
148 | |||
149 | static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | 133 | static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) |
150 | { | 134 | { |
151 | if (c->x86 == 0x06) { | 135 | if (c->x86 == 0x06) { |
@@ -230,25 +214,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
230 | else | 214 | else |
231 | p4clockmod_table[i].frequency = (stock_freq * i)/8; | 215 | p4clockmod_table[i].frequency = (stock_freq * i)/8; |
232 | } | 216 | } |
233 | cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); | ||
234 | 217 | ||
235 | /* cpuinfo and default policy values */ | 218 | /* cpuinfo and default policy values */ |
236 | 219 | ||
237 | /* the transition latency is set to be 1 higher than the maximum | 220 | /* the transition latency is set to be 1 higher than the maximum |
238 | * transition latency of the ondemand governor */ | 221 | * transition latency of the ondemand governor */ |
239 | policy->cpuinfo.transition_latency = 10000001; | 222 | policy->cpuinfo.transition_latency = 10000001; |
240 | policy->cur = stock_freq; | ||
241 | 223 | ||
242 | return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); | 224 | return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]); |
243 | } | 225 | } |
244 | 226 | ||
245 | 227 | ||
246 | static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) | ||
247 | { | ||
248 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static unsigned int cpufreq_p4_get(unsigned int cpu) | 228 | static unsigned int cpufreq_p4_get(unsigned int cpu) |
253 | { | 229 | { |
254 | u32 l, h; | 230 | u32 l, h; |
@@ -267,19 +243,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) | |||
267 | return stock_freq; | 243 | return stock_freq; |
268 | } | 244 | } |
269 | 245 | ||
270 | static struct freq_attr *p4clockmod_attr[] = { | ||
271 | &cpufreq_freq_attr_scaling_available_freqs, | ||
272 | NULL, | ||
273 | }; | ||
274 | |||
275 | static struct cpufreq_driver p4clockmod_driver = { | 246 | static struct cpufreq_driver p4clockmod_driver = { |
276 | .verify = cpufreq_p4_verify, | 247 | .verify = cpufreq_generic_frequency_table_verify, |
277 | .target = cpufreq_p4_target, | 248 | .target_index = cpufreq_p4_target, |
278 | .init = cpufreq_p4_cpu_init, | 249 | .init = cpufreq_p4_cpu_init, |
279 | .exit = cpufreq_p4_cpu_exit, | 250 | .exit = cpufreq_generic_exit, |
280 | .get = cpufreq_p4_get, | 251 | .get = cpufreq_p4_get, |
281 | .name = "p4-clockmod", | 252 | .name = "p4-clockmod", |
282 | .attr = p4clockmod_attr, | 253 | .attr = cpufreq_generic_attr, |
283 | }; | 254 | }; |
284 | 255 | ||
285 | static const struct x86_cpu_id cpufreq_p4_id[] = { | 256 | static const struct x86_cpu_id cpufreq_p4_id[] = { |
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 534e43a60d1f..17424ddc7f67 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c | |||
@@ -69,11 +69,6 @@ static struct cpufreq_frequency_table pas_freqs[] = { | |||
69 | {0, CPUFREQ_TABLE_END}, | 69 | {0, CPUFREQ_TABLE_END}, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static struct freq_attr *pas_cpu_freqs_attr[] = { | ||
73 | &cpufreq_freq_attr_scaling_available_freqs, | ||
74 | NULL, | ||
75 | }; | ||
76 | |||
77 | /* | 72 | /* |
78 | * hardware specific functions | 73 | * hardware specific functions |
79 | */ | 74 | */ |
@@ -209,22 +204,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
209 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); | 204 | pr_debug("%d: %d\n", i, pas_freqs[i].frequency); |
210 | } | 205 | } |
211 | 206 | ||
212 | policy->cpuinfo.transition_latency = get_gizmo_latency(); | ||
213 | |||
214 | cur_astate = get_cur_astate(policy->cpu); | 207 | cur_astate = get_cur_astate(policy->cpu); |
215 | pr_debug("current astate is at %d\n",cur_astate); | 208 | pr_debug("current astate is at %d\n",cur_astate); |
216 | 209 | ||
217 | policy->cur = pas_freqs[cur_astate].frequency; | 210 | policy->cur = pas_freqs[cur_astate].frequency; |
218 | cpumask_copy(policy->cpus, cpu_online_mask); | ||
219 | |||
220 | ppc_proc_freq = policy->cur * 1000ul; | 211 | ppc_proc_freq = policy->cur * 1000ul; |
221 | 212 | ||
222 | cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu); | 213 | return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); |
223 | |||
224 | /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max | ||
225 | * are set correctly | ||
226 | */ | ||
227 | return cpufreq_frequency_table_cpuinfo(policy, pas_freqs); | ||
228 | 214 | ||
229 | out_unmap_sdcpwr: | 215 | out_unmap_sdcpwr: |
230 | iounmap(sdcpwr_mapbase); | 216 | iounmap(sdcpwr_mapbase); |
@@ -253,25 +239,12 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
253 | return 0; | 239 | return 0; |
254 | } | 240 | } |
255 | 241 | ||
256 | static int pas_cpufreq_verify(struct cpufreq_policy *policy) | ||
257 | { | ||
258 | return cpufreq_frequency_table_verify(policy, pas_freqs); | ||
259 | } | ||
260 | |||
261 | static int pas_cpufreq_target(struct cpufreq_policy *policy, | 242 | static int pas_cpufreq_target(struct cpufreq_policy *policy, |
262 | unsigned int target_freq, | 243 | unsigned int pas_astate_new) |
263 | unsigned int relation) | ||
264 | { | 244 | { |
265 | struct cpufreq_freqs freqs; | 245 | struct cpufreq_freqs freqs; |
266 | int pas_astate_new; | ||
267 | int i; | 246 | int i; |
268 | 247 | ||
269 | cpufreq_frequency_table_target(policy, | ||
270 | pas_freqs, | ||
271 | target_freq, | ||
272 | relation, | ||
273 | &pas_astate_new); | ||
274 | |||
275 | freqs.old = policy->cur; | 248 | freqs.old = policy->cur; |
276 | freqs.new = pas_freqs[pas_astate_new].frequency; | 249 | freqs.new = pas_freqs[pas_astate_new].frequency; |
277 | 250 | ||
@@ -300,9 +273,9 @@ static struct cpufreq_driver pas_cpufreq_driver = { | |||
300 | .flags = CPUFREQ_CONST_LOOPS, | 273 | .flags = CPUFREQ_CONST_LOOPS, |
301 | .init = pas_cpufreq_cpu_init, | 274 | .init = pas_cpufreq_cpu_init, |
302 | .exit = pas_cpufreq_cpu_exit, | 275 | .exit = pas_cpufreq_cpu_exit, |
303 | .verify = pas_cpufreq_verify, | 276 | .verify = cpufreq_generic_frequency_table_verify, |
304 | .target = pas_cpufreq_target, | 277 | .target_index = pas_cpufreq_target, |
305 | .attr = pas_cpu_freqs_attr, | 278 | .attr = cpufreq_generic_attr, |
306 | }; | 279 | }; |
307 | 280 | ||
308 | /* | 281 | /* |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 70438de5c0e4..e2b4f40ff69a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info; | |||
111 | 111 | ||
112 | static int pcc_cpufreq_verify(struct cpufreq_policy *policy) | 112 | static int pcc_cpufreq_verify(struct cpufreq_policy *policy) |
113 | { | 113 | { |
114 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 114 | cpufreq_verify_within_cpu_limits(policy); |
115 | policy->cpuinfo.max_freq); | ||
116 | return 0; | 115 | return 0; |
117 | } | 116 | } |
118 | 117 | ||
@@ -559,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
559 | ioread32(&pcch_hdr->nominal) * 1000; | 558 | ioread32(&pcch_hdr->nominal) * 1000; |
560 | policy->min = policy->cpuinfo.min_freq = | 559 | policy->min = policy->cpuinfo.min_freq = |
561 | ioread32(&pcch_hdr->minimum_frequency) * 1000; | 560 | ioread32(&pcch_hdr->minimum_frequency) * 1000; |
562 | policy->cur = pcc_get_freq(cpu); | ||
563 | |||
564 | if (!policy->cur) { | ||
565 | pr_debug("init: Unable to get current CPU frequency\n"); | ||
566 | result = -EINVAL; | ||
567 | goto out; | ||
568 | } | ||
569 | 561 | ||
570 | pr_debug("init: policy->max is %d, policy->min is %d\n", | 562 | pr_debug("init: policy->max is %d, policy->min is %d\n", |
571 | policy->max, policy->min); | 563 | policy->max, policy->min); |
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index a096cd3fa23d..05f705e1b7a2 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c | |||
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = { | |||
86 | {0, CPUFREQ_TABLE_END}, | 86 | {0, CPUFREQ_TABLE_END}, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static struct freq_attr* pmac_cpu_freqs_attr[] = { | ||
90 | &cpufreq_freq_attr_scaling_available_freqs, | ||
91 | NULL, | ||
92 | }; | ||
93 | |||
94 | static inline void local_delay(unsigned long ms) | 89 | static inline void local_delay(unsigned long ms) |
95 | { | 90 | { |
96 | if (no_schedule) | 91 | if (no_schedule) |
@@ -378,23 +373,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) | |||
378 | return cur_freq; | 373 | return cur_freq; |
379 | } | 374 | } |
380 | 375 | ||
381 | static int pmac_cpufreq_verify(struct cpufreq_policy *policy) | ||
382 | { | ||
383 | return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); | ||
384 | } | ||
385 | |||
386 | static int pmac_cpufreq_target( struct cpufreq_policy *policy, | 376 | static int pmac_cpufreq_target( struct cpufreq_policy *policy, |
387 | unsigned int target_freq, | 377 | unsigned int index) |
388 | unsigned int relation) | ||
389 | { | 378 | { |
390 | unsigned int newstate = 0; | ||
391 | int rc; | 379 | int rc; |
392 | 380 | ||
393 | if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, | 381 | rc = do_set_cpu_speed(policy, index, 1); |
394 | target_freq, relation, &newstate)) | ||
395 | return -EINVAL; | ||
396 | |||
397 | rc = do_set_cpu_speed(policy, newstate, 1); | ||
398 | 382 | ||
399 | ppc_proc_freq = cur_freq * 1000ul; | 383 | ppc_proc_freq = cur_freq * 1000ul; |
400 | return rc; | 384 | return rc; |
@@ -402,14 +386,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, | |||
402 | 386 | ||
403 | static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) | 387 | static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) |
404 | { | 388 | { |
405 | if (policy->cpu != 0) | 389 | return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); |
406 | return -ENODEV; | ||
407 | |||
408 | policy->cpuinfo.transition_latency = transition_latency; | ||
409 | policy->cur = cur_freq; | ||
410 | |||
411 | cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); | ||
412 | return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); | ||
413 | } | 390 | } |
414 | 391 | ||
415 | static u32 read_gpio(struct device_node *np) | 392 | static u32 read_gpio(struct device_node *np) |
@@ -469,14 +446,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy) | |||
469 | } | 446 | } |
470 | 447 | ||
471 | static struct cpufreq_driver pmac_cpufreq_driver = { | 448 | static struct cpufreq_driver pmac_cpufreq_driver = { |
472 | .verify = pmac_cpufreq_verify, | 449 | .verify = cpufreq_generic_frequency_table_verify, |
473 | .target = pmac_cpufreq_target, | 450 | .target_index = pmac_cpufreq_target, |
474 | .get = pmac_cpufreq_get_speed, | 451 | .get = pmac_cpufreq_get_speed, |
475 | .init = pmac_cpufreq_cpu_init, | 452 | .init = pmac_cpufreq_cpu_init, |
476 | .suspend = pmac_cpufreq_suspend, | 453 | .suspend = pmac_cpufreq_suspend, |
477 | .resume = pmac_cpufreq_resume, | 454 | .resume = pmac_cpufreq_resume, |
478 | .flags = CPUFREQ_PM_NO_WARN, | 455 | .flags = CPUFREQ_PM_NO_WARN, |
479 | .attr = pmac_cpu_freqs_attr, | 456 | .attr = cpufreq_generic_attr, |
480 | .name = "powermac", | 457 | .name = "powermac", |
481 | }; | 458 | }; |
482 | 459 | ||
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 3a51ad7e47c8..234b598ce416 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c | |||
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = { | |||
70 | {0, CPUFREQ_TABLE_END}, | 70 | {0, CPUFREQ_TABLE_END}, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static struct freq_attr* g5_cpu_freqs_attr[] = { | ||
74 | &cpufreq_freq_attr_scaling_available_freqs, | ||
75 | NULL, | ||
76 | }; | ||
77 | |||
78 | /* Power mode data is an array of the 32 bits PCR values to use for | 73 | /* Power mode data is an array of the 32 bits PCR values to use for |
79 | * the various frequencies, retrieved from the device-tree | 74 | * the various frequencies, retrieved from the device-tree |
80 | */ | 75 | */ |
@@ -142,7 +137,7 @@ static void g5_vdnap_switch_volt(int speed_mode) | |||
142 | pmf_call_one(pfunc_vdnap0_complete, &args); | 137 | pmf_call_one(pfunc_vdnap0_complete, &args); |
143 | if (done) | 138 | if (done) |
144 | break; | 139 | break; |
145 | msleep(1); | 140 | usleep_range(1000, 1000); |
146 | } | 141 | } |
147 | if (done == 0) | 142 | if (done == 0) |
148 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | 143 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); |
@@ -241,7 +236,7 @@ static void g5_pfunc_switch_volt(int speed_mode) | |||
241 | if (pfunc_cpu1_volt_low) | 236 | if (pfunc_cpu1_volt_low) |
242 | pmf_call_one(pfunc_cpu1_volt_low, NULL); | 237 | pmf_call_one(pfunc_cpu1_volt_low, NULL); |
243 | } | 238 | } |
244 | msleep(10); /* should be faster , to fix */ | 239 | usleep_range(10000, 10000); /* should be faster , to fix */ |
245 | } | 240 | } |
246 | 241 | ||
247 | /* | 242 | /* |
@@ -286,7 +281,7 @@ static int g5_pfunc_switch_freq(int speed_mode) | |||
286 | pmf_call_one(pfunc_slewing_done, &args); | 281 | pmf_call_one(pfunc_slewing_done, &args); |
287 | if (done) | 282 | if (done) |
288 | break; | 283 | break; |
289 | msleep(1); | 284 | usleep_range(500, 500); |
290 | } | 285 | } |
291 | if (done == 0) | 286 | if (done == 0) |
292 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | 287 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); |
@@ -317,32 +312,18 @@ static int g5_pfunc_query_freq(void) | |||
317 | * Common interface to the cpufreq core | 312 | * Common interface to the cpufreq core |
318 | */ | 313 | */ |
319 | 314 | ||
320 | static int g5_cpufreq_verify(struct cpufreq_policy *policy) | 315 | static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) |
321 | { | ||
322 | return cpufreq_frequency_table_verify(policy, g5_cpu_freqs); | ||
323 | } | ||
324 | |||
325 | static int g5_cpufreq_target(struct cpufreq_policy *policy, | ||
326 | unsigned int target_freq, unsigned int relation) | ||
327 | { | 316 | { |
328 | unsigned int newstate = 0; | ||
329 | struct cpufreq_freqs freqs; | 317 | struct cpufreq_freqs freqs; |
330 | int rc; | 318 | int rc; |
331 | 319 | ||
332 | if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, | ||
333 | target_freq, relation, &newstate)) | ||
334 | return -EINVAL; | ||
335 | |||
336 | if (g5_pmode_cur == newstate) | ||
337 | return 0; | ||
338 | |||
339 | mutex_lock(&g5_switch_mutex); | 320 | mutex_lock(&g5_switch_mutex); |
340 | 321 | ||
341 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; | 322 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; |
342 | freqs.new = g5_cpu_freqs[newstate].frequency; | 323 | freqs.new = g5_cpu_freqs[index].frequency; |
343 | 324 | ||
344 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 325 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
345 | rc = g5_switch_freq(newstate); | 326 | rc = g5_switch_freq(index); |
346 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 327 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
347 | 328 | ||
348 | mutex_unlock(&g5_switch_mutex); | 329 | mutex_unlock(&g5_switch_mutex); |
@@ -357,27 +338,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu) | |||
357 | 338 | ||
358 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) | 339 | static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) |
359 | { | 340 | { |
360 | policy->cpuinfo.transition_latency = transition_latency; | 341 | return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); |
361 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; | ||
362 | /* secondary CPUs are tied to the primary one by the | ||
363 | * cpufreq core if in the secondary policy we tell it that | ||
364 | * it actually must be one policy together with all others. */ | ||
365 | cpumask_copy(policy->cpus, cpu_online_mask); | ||
366 | cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); | ||
367 | |||
368 | return cpufreq_frequency_table_cpuinfo(policy, | ||
369 | g5_cpu_freqs); | ||
370 | } | 342 | } |
371 | 343 | ||
372 | |||
373 | static struct cpufreq_driver g5_cpufreq_driver = { | 344 | static struct cpufreq_driver g5_cpufreq_driver = { |
374 | .name = "powermac", | 345 | .name = "powermac", |
375 | .flags = CPUFREQ_CONST_LOOPS, | 346 | .flags = CPUFREQ_CONST_LOOPS, |
376 | .init = g5_cpufreq_cpu_init, | 347 | .init = g5_cpufreq_cpu_init, |
377 | .verify = g5_cpufreq_verify, | 348 | .verify = cpufreq_generic_frequency_table_verify, |
378 | .target = g5_cpufreq_target, | 349 | .target_index = g5_cpufreq_target, |
379 | .get = g5_cpufreq_get_speed, | 350 | .get = g5_cpufreq_get_speed, |
380 | .attr = g5_cpu_freqs_attr, | 351 | .attr = cpufreq_generic_attr, |
381 | }; | 352 | }; |
382 | 353 | ||
383 | 354 | ||
@@ -397,7 +368,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) | |||
397 | /* Check supported platforms */ | 368 | /* Check supported platforms */ |
398 | if (of_machine_is_compatible("PowerMac8,1") || | 369 | if (of_machine_is_compatible("PowerMac8,1") || |
399 | of_machine_is_compatible("PowerMac8,2") || | 370 | of_machine_is_compatible("PowerMac8,2") || |
400 | of_machine_is_compatible("PowerMac9,1")) | 371 | of_machine_is_compatible("PowerMac9,1") || |
372 | of_machine_is_compatible("PowerMac12,1")) | ||
401 | use_volts_smu = 1; | 373 | use_volts_smu = 1; |
402 | else if (of_machine_is_compatible("PowerMac11,2")) | 374 | else if (of_machine_is_compatible("PowerMac11,2")) |
403 | use_volts_vdnap = 1; | 375 | use_volts_vdnap = 1; |
@@ -647,8 +619,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) | |||
647 | g5_cpu_freqs[0].frequency = max_freq; | 619 | g5_cpu_freqs[0].frequency = max_freq; |
648 | g5_cpu_freqs[1].frequency = min_freq; | 620 | g5_cpu_freqs[1].frequency = min_freq; |
649 | 621 | ||
622 | /* Based on a measurement on Xserve G5, rounded up. */ | ||
623 | transition_latency = 10 * NSEC_PER_MSEC; | ||
624 | |||
650 | /* Set callbacks */ | 625 | /* Set callbacks */ |
651 | transition_latency = CPUFREQ_ETERNAL; | ||
652 | g5_switch_volt = g5_pfunc_switch_volt; | 626 | g5_switch_volt = g5_pfunc_switch_volt; |
653 | g5_switch_freq = g5_pfunc_switch_freq; | 627 | g5_switch_freq = g5_pfunc_switch_freq; |
654 | g5_query_freq = g5_pfunc_query_freq; | 628 | g5_query_freq = g5_pfunc_query_freq; |
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 85f1c8c25ddc..643e7952cad3 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c | |||
@@ -63,12 +63,12 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
63 | 63 | ||
64 | 64 | ||
65 | /** | 65 | /** |
66 | * powernow_k6_set_state - set the PowerNow! multiplier | 66 | * powernow_k6_target - set the PowerNow! multiplier |
67 | * @best_i: clock_ratio[best_i] is the target multiplier | 67 | * @best_i: clock_ratio[best_i] is the target multiplier |
68 | * | 68 | * |
69 | * Tries to change the PowerNow! multiplier | 69 | * Tries to change the PowerNow! multiplier |
70 | */ | 70 | */ |
71 | static void powernow_k6_set_state(struct cpufreq_policy *policy, | 71 | static int powernow_k6_target(struct cpufreq_policy *policy, |
72 | unsigned int best_i) | 72 | unsigned int best_i) |
73 | { | 73 | { |
74 | unsigned long outvalue = 0, invalue = 0; | 74 | unsigned long outvalue = 0, invalue = 0; |
@@ -77,7 +77,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy, | |||
77 | 77 | ||
78 | if (clock_ratio[best_i].driver_data > max_multiplier) { | 78 | if (clock_ratio[best_i].driver_data > max_multiplier) { |
79 | printk(KERN_ERR PFX "invalid target frequency\n"); | 79 | printk(KERN_ERR PFX "invalid target frequency\n"); |
80 | return; | 80 | return -EINVAL; |
81 | } | 81 | } |
82 | 82 | ||
83 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); | 83 | freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); |
@@ -100,44 +100,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy, | |||
100 | 100 | ||
101 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 101 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
102 | 102 | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | |||
107 | /** | ||
108 | * powernow_k6_verify - verifies a new CPUfreq policy | ||
109 | * @policy: new policy | ||
110 | * | ||
111 | * Policy must be within lowest and highest possible CPU Frequency, | ||
112 | * and at least one possible state must be within min and max. | ||
113 | */ | ||
114 | static int powernow_k6_verify(struct cpufreq_policy *policy) | ||
115 | { | ||
116 | return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); | ||
117 | } | ||
118 | |||
119 | |||
120 | /** | ||
121 | * powernow_k6_setpolicy - sets a new CPUFreq policy | ||
122 | * @policy: new policy | ||
123 | * @target_freq: the target frequency | ||
124 | * @relation: how that frequency relates to achieved frequency | ||
125 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
126 | * | ||
127 | * sets a new CPUFreq policy | ||
128 | */ | ||
129 | static int powernow_k6_target(struct cpufreq_policy *policy, | ||
130 | unsigned int target_freq, | ||
131 | unsigned int relation) | ||
132 | { | ||
133 | unsigned int newstate = 0; | ||
134 | |||
135 | if (cpufreq_frequency_table_target(policy, &clock_ratio[0], | ||
136 | target_freq, relation, &newstate)) | ||
137 | return -EINVAL; | ||
138 | |||
139 | powernow_k6_set_state(policy, newstate); | ||
140 | |||
141 | return 0; | 103 | return 0; |
142 | } | 104 | } |
143 | 105 | ||
@@ -145,7 +107,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy, | |||
145 | static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | 107 | static int powernow_k6_cpu_init(struct cpufreq_policy *policy) |
146 | { | 108 | { |
147 | unsigned int i, f; | 109 | unsigned int i, f; |
148 | int result; | ||
149 | 110 | ||
150 | if (policy->cpu != 0) | 111 | if (policy->cpu != 0) |
151 | return -ENODEV; | 112 | return -ENODEV; |
@@ -165,15 +126,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
165 | 126 | ||
166 | /* cpuinfo and default policy values */ | 127 | /* cpuinfo and default policy values */ |
167 | policy->cpuinfo.transition_latency = 200000; | 128 | policy->cpuinfo.transition_latency = 200000; |
168 | policy->cur = busfreq * max_multiplier; | ||
169 | |||
170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | ||
171 | if (result) | ||
172 | return result; | ||
173 | |||
174 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); | ||
175 | 129 | ||
176 | return 0; | 130 | return cpufreq_table_validate_and_show(policy, clock_ratio); |
177 | } | 131 | } |
178 | 132 | ||
179 | 133 | ||
@@ -182,7 +136,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | |||
182 | unsigned int i; | 136 | unsigned int i; |
183 | for (i = 0; i < 8; i++) { | 137 | for (i = 0; i < 8; i++) { |
184 | if (i == max_multiplier) | 138 | if (i == max_multiplier) |
185 | powernow_k6_set_state(policy, i); | 139 | powernow_k6_target(policy, i); |
186 | } | 140 | } |
187 | cpufreq_frequency_table_put_attr(policy->cpu); | 141 | cpufreq_frequency_table_put_attr(policy->cpu); |
188 | return 0; | 142 | return 0; |
@@ -195,19 +149,14 @@ static unsigned int powernow_k6_get(unsigned int cpu) | |||
195 | return ret; | 149 | return ret; |
196 | } | 150 | } |
197 | 151 | ||
198 | static struct freq_attr *powernow_k6_attr[] = { | ||
199 | &cpufreq_freq_attr_scaling_available_freqs, | ||
200 | NULL, | ||
201 | }; | ||
202 | |||
203 | static struct cpufreq_driver powernow_k6_driver = { | 152 | static struct cpufreq_driver powernow_k6_driver = { |
204 | .verify = powernow_k6_verify, | 153 | .verify = cpufreq_generic_frequency_table_verify, |
205 | .target = powernow_k6_target, | 154 | .target_index = powernow_k6_target, |
206 | .init = powernow_k6_cpu_init, | 155 | .init = powernow_k6_cpu_init, |
207 | .exit = powernow_k6_cpu_exit, | 156 | .exit = powernow_k6_cpu_exit, |
208 | .get = powernow_k6_get, | 157 | .get = powernow_k6_get, |
209 | .name = "powernow-k6", | 158 | .name = "powernow-k6", |
210 | .attr = powernow_k6_attr, | 159 | .attr = cpufreq_generic_attr, |
211 | }; | 160 | }; |
212 | 161 | ||
213 | static const struct x86_cpu_id powernow_k6_ids[] = { | 162 | static const struct x86_cpu_id powernow_k6_ids[] = { |
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 14ce480be8ab..946708a1d745 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
@@ -248,7 +248,7 @@ static void change_VID(int vid) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | 250 | ||
251 | static void change_speed(struct cpufreq_policy *policy, unsigned int index) | 251 | static int powernow_target(struct cpufreq_policy *policy, unsigned int index) |
252 | { | 252 | { |
253 | u8 fid, vid; | 253 | u8 fid, vid; |
254 | struct cpufreq_freqs freqs; | 254 | struct cpufreq_freqs freqs; |
@@ -291,6 +291,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index) | |||
291 | local_irq_enable(); | 291 | local_irq_enable(); |
292 | 292 | ||
293 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 293 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
294 | |||
295 | return 0; | ||
294 | } | 296 | } |
295 | 297 | ||
296 | 298 | ||
@@ -533,27 +535,6 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
533 | } | 535 | } |
534 | 536 | ||
535 | 537 | ||
536 | static int powernow_target(struct cpufreq_policy *policy, | ||
537 | unsigned int target_freq, | ||
538 | unsigned int relation) | ||
539 | { | ||
540 | unsigned int newstate; | ||
541 | |||
542 | if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, | ||
543 | relation, &newstate)) | ||
544 | return -EINVAL; | ||
545 | |||
546 | change_speed(policy, newstate); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | |||
552 | static int powernow_verify(struct cpufreq_policy *policy) | ||
553 | { | ||
554 | return cpufreq_frequency_table_verify(policy, powernow_table); | ||
555 | } | ||
556 | |||
557 | /* | 538 | /* |
558 | * We use the fact that the bus frequency is somehow | 539 | * We use the fact that the bus frequency is somehow |
559 | * a multiple of 100000/3 khz, then we compute sgtc according | 540 | * a multiple of 100000/3 khz, then we compute sgtc according |
@@ -678,11 +659,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) | |||
678 | policy->cpuinfo.transition_latency = | 659 | policy->cpuinfo.transition_latency = |
679 | cpufreq_scale(2000000UL, fsb, latency); | 660 | cpufreq_scale(2000000UL, fsb, latency); |
680 | 661 | ||
681 | policy->cur = powernow_get(0); | 662 | return cpufreq_table_validate_and_show(policy, powernow_table); |
682 | |||
683 | cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); | ||
684 | |||
685 | return cpufreq_frequency_table_cpuinfo(policy, powernow_table); | ||
686 | } | 663 | } |
687 | 664 | ||
688 | static int powernow_cpu_exit(struct cpufreq_policy *policy) | 665 | static int powernow_cpu_exit(struct cpufreq_policy *policy) |
@@ -701,14 +678,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) | |||
701 | return 0; | 678 | return 0; |
702 | } | 679 | } |
703 | 680 | ||
704 | static struct freq_attr *powernow_table_attr[] = { | ||
705 | &cpufreq_freq_attr_scaling_available_freqs, | ||
706 | NULL, | ||
707 | }; | ||
708 | |||
709 | static struct cpufreq_driver powernow_driver = { | 681 | static struct cpufreq_driver powernow_driver = { |
710 | .verify = powernow_verify, | 682 | .verify = cpufreq_generic_frequency_table_verify, |
711 | .target = powernow_target, | 683 | .target_index = powernow_target, |
712 | .get = powernow_get, | 684 | .get = powernow_get, |
713 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI | 685 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI |
714 | .bios_limit = acpi_processor_get_bios_limit, | 686 | .bios_limit = acpi_processor_get_bios_limit, |
@@ -716,7 +688,7 @@ static struct cpufreq_driver powernow_driver = { | |||
716 | .init = powernow_cpu_init, | 688 | .init = powernow_cpu_init, |
717 | .exit = powernow_cpu_exit, | 689 | .exit = powernow_cpu_exit, |
718 | .name = "powernow-k7", | 690 | .name = "powernow-k7", |
719 | .attr = powernow_table_attr, | 691 | .attr = cpufreq_generic_attr, |
720 | }; | 692 | }; |
721 | 693 | ||
722 | static int __init powernow_init(void) | 694 | static int __init powernow_init(void) |
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2344a9ed17f3..62a1ce47d3df 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -977,20 +977,17 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
977 | 977 | ||
978 | struct powernowk8_target_arg { | 978 | struct powernowk8_target_arg { |
979 | struct cpufreq_policy *pol; | 979 | struct cpufreq_policy *pol; |
980 | unsigned targfreq; | 980 | unsigned newstate; |
981 | unsigned relation; | ||
982 | }; | 981 | }; |
983 | 982 | ||
984 | static long powernowk8_target_fn(void *arg) | 983 | static long powernowk8_target_fn(void *arg) |
985 | { | 984 | { |
986 | struct powernowk8_target_arg *pta = arg; | 985 | struct powernowk8_target_arg *pta = arg; |
987 | struct cpufreq_policy *pol = pta->pol; | 986 | struct cpufreq_policy *pol = pta->pol; |
988 | unsigned targfreq = pta->targfreq; | 987 | unsigned newstate = pta->newstate; |
989 | unsigned relation = pta->relation; | ||
990 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 988 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
991 | u32 checkfid; | 989 | u32 checkfid; |
992 | u32 checkvid; | 990 | u32 checkvid; |
993 | unsigned int newstate; | ||
994 | int ret; | 991 | int ret; |
995 | 992 | ||
996 | if (!data) | 993 | if (!data) |
@@ -1004,8 +1001,9 @@ static long powernowk8_target_fn(void *arg) | |||
1004 | return -EIO; | 1001 | return -EIO; |
1005 | } | 1002 | } |
1006 | 1003 | ||
1007 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | 1004 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n", |
1008 | pol->cpu, targfreq, pol->min, pol->max, relation); | 1005 | pol->cpu, data->powernow_table[newstate].frequency, pol->min, |
1006 | pol->max); | ||
1009 | 1007 | ||
1010 | if (query_current_values_with_pending_wait(data)) | 1008 | if (query_current_values_with_pending_wait(data)) |
1011 | return -EIO; | 1009 | return -EIO; |
@@ -1021,10 +1019,6 @@ static long powernowk8_target_fn(void *arg) | |||
1021 | checkvid, data->currvid); | 1019 | checkvid, data->currvid); |
1022 | } | 1020 | } |
1023 | 1021 | ||
1024 | if (cpufreq_frequency_table_target(pol, data->powernow_table, | ||
1025 | targfreq, relation, &newstate)) | ||
1026 | return -EIO; | ||
1027 | |||
1028 | mutex_lock(&fidvid_mutex); | 1022 | mutex_lock(&fidvid_mutex); |
1029 | 1023 | ||
1030 | powernow_k8_acpi_pst_values(data, newstate); | 1024 | powernow_k8_acpi_pst_values(data, newstate); |
@@ -1044,26 +1038,13 @@ static long powernowk8_target_fn(void *arg) | |||
1044 | } | 1038 | } |
1045 | 1039 | ||
1046 | /* Driver entry point to switch to the target frequency */ | 1040 | /* Driver entry point to switch to the target frequency */ |
1047 | static int powernowk8_target(struct cpufreq_policy *pol, | 1041 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned index) |
1048 | unsigned targfreq, unsigned relation) | ||
1049 | { | 1042 | { |
1050 | struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, | 1043 | struct powernowk8_target_arg pta = { .pol = pol, .newstate = index }; |
1051 | .relation = relation }; | ||
1052 | 1044 | ||
1053 | return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); | 1045 | return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); |
1054 | } | 1046 | } |
1055 | 1047 | ||
1056 | /* Driver entry point to verify the policy and range of frequencies */ | ||
1057 | static int powernowk8_verify(struct cpufreq_policy *pol) | ||
1058 | { | ||
1059 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | ||
1060 | |||
1061 | if (!data) | ||
1062 | return -EINVAL; | ||
1063 | |||
1064 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | ||
1065 | } | ||
1066 | |||
1067 | struct init_on_cpu { | 1048 | struct init_on_cpu { |
1068 | struct powernow_k8_data *data; | 1049 | struct powernow_k8_data *data; |
1069 | int rc; | 1050 | int rc; |
@@ -1152,11 +1133,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1152 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); | 1133 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
1153 | data->available_cores = pol->cpus; | 1134 | data->available_cores = pol->cpus; |
1154 | 1135 | ||
1155 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1156 | pr_debug("policy current frequency %d kHz\n", pol->cur); | ||
1157 | |||
1158 | /* min/max the cpu is capable of */ | 1136 | /* min/max the cpu is capable of */ |
1159 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | 1137 | if (cpufreq_table_validate_and_show(pol, data->powernow_table)) { |
1160 | printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); | 1138 | printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); |
1161 | powernow_k8_cpu_exit_acpi(data); | 1139 | powernow_k8_cpu_exit_acpi(data); |
1162 | kfree(data->powernow_table); | 1140 | kfree(data->powernow_table); |
@@ -1164,8 +1142,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1164 | return -EINVAL; | 1142 | return -EINVAL; |
1165 | } | 1143 | } |
1166 | 1144 | ||
1167 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | ||
1168 | |||
1169 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", | 1145 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", |
1170 | data->currfid, data->currvid); | 1146 | data->currfid, data->currvid); |
1171 | 1147 | ||
@@ -1227,20 +1203,15 @@ out: | |||
1227 | return khz; | 1203 | return khz; |
1228 | } | 1204 | } |
1229 | 1205 | ||
1230 | static struct freq_attr *powernow_k8_attr[] = { | ||
1231 | &cpufreq_freq_attr_scaling_available_freqs, | ||
1232 | NULL, | ||
1233 | }; | ||
1234 | |||
1235 | static struct cpufreq_driver cpufreq_amd64_driver = { | 1206 | static struct cpufreq_driver cpufreq_amd64_driver = { |
1236 | .verify = powernowk8_verify, | 1207 | .verify = cpufreq_generic_frequency_table_verify, |
1237 | .target = powernowk8_target, | 1208 | .target_index = powernowk8_target, |
1238 | .bios_limit = acpi_processor_get_bios_limit, | 1209 | .bios_limit = acpi_processor_get_bios_limit, |
1239 | .init = powernowk8_cpu_init, | 1210 | .init = powernowk8_cpu_init, |
1240 | .exit = powernowk8_cpu_exit, | 1211 | .exit = powernowk8_cpu_exit, |
1241 | .get = powernowk8_get, | 1212 | .get = powernowk8_get, |
1242 | .name = "powernow-k8", | 1213 | .name = "powernow-k8", |
1243 | .attr = powernow_k8_attr, | 1214 | .attr = cpufreq_generic_attr, |
1244 | }; | 1215 | }; |
1245 | 1216 | ||
1246 | static void __request_acpi_cpufreq(void) | 1217 | static void __request_acpi_cpufreq(void) |
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 60e81d524ea8..79d8e9c46b6d 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
@@ -202,7 +202,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
202 | table[i].frequency = CPUFREQ_TABLE_END; | 202 | table[i].frequency = CPUFREQ_TABLE_END; |
203 | 203 | ||
204 | /* set the min and max frequency properly */ | 204 | /* set the min and max frequency properly */ |
205 | ret = cpufreq_frequency_table_cpuinfo(policy, table); | 205 | ret = cpufreq_table_validate_and_show(policy, table); |
206 | if (ret) { | 206 | if (ret) { |
207 | pr_err("invalid frequency table: %d\n", ret); | 207 | pr_err("invalid frequency table: %d\n", ret); |
208 | goto err_nomem1; | 208 | goto err_nomem1; |
@@ -217,9 +217,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
217 | per_cpu(cpu_data, i) = data; | 217 | per_cpu(cpu_data, i) = data; |
218 | 218 | ||
219 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 219 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
220 | policy->cur = corenet_cpufreq_get_speed(policy->cpu); | ||
221 | |||
222 | cpufreq_frequency_table_get_attr(table, cpu); | ||
223 | of_node_put(np); | 220 | of_node_put(np); |
224 | 221 | ||
225 | return 0; | 222 | return 0; |
@@ -253,36 +250,21 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
253 | return 0; | 250 | return 0; |
254 | } | 251 | } |
255 | 252 | ||
256 | static int corenet_cpufreq_verify(struct cpufreq_policy *policy) | ||
257 | { | ||
258 | struct cpufreq_frequency_table *table = | ||
259 | per_cpu(cpu_data, policy->cpu)->table; | ||
260 | |||
261 | return cpufreq_frequency_table_verify(policy, table); | ||
262 | } | ||
263 | |||
264 | static int corenet_cpufreq_target(struct cpufreq_policy *policy, | 253 | static int corenet_cpufreq_target(struct cpufreq_policy *policy, |
265 | unsigned int target_freq, unsigned int relation) | 254 | unsigned int index) |
266 | { | 255 | { |
267 | struct cpufreq_freqs freqs; | 256 | struct cpufreq_freqs freqs; |
268 | unsigned int new; | ||
269 | struct clk *parent; | 257 | struct clk *parent; |
270 | int ret; | 258 | int ret; |
271 | struct cpu_data *data = per_cpu(cpu_data, policy->cpu); | 259 | struct cpu_data *data = per_cpu(cpu_data, policy->cpu); |
272 | 260 | ||
273 | cpufreq_frequency_table_target(policy, data->table, | ||
274 | target_freq, relation, &new); | ||
275 | |||
276 | if (policy->cur == data->table[new].frequency) | ||
277 | return 0; | ||
278 | |||
279 | freqs.old = policy->cur; | 261 | freqs.old = policy->cur; |
280 | freqs.new = data->table[new].frequency; | 262 | freqs.new = data->table[index].frequency; |
281 | 263 | ||
282 | mutex_lock(&cpufreq_lock); | 264 | mutex_lock(&cpufreq_lock); |
283 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 265 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
284 | 266 | ||
285 | parent = of_clk_get(data->parent, data->table[new].driver_data); | 267 | parent = of_clk_get(data->parent, data->table[index].driver_data); |
286 | ret = clk_set_parent(data->clk, parent); | 268 | ret = clk_set_parent(data->clk, parent); |
287 | if (ret) | 269 | if (ret) |
288 | freqs.new = freqs.old; | 270 | freqs.new = freqs.old; |
@@ -293,20 +275,15 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy, | |||
293 | return ret; | 275 | return ret; |
294 | } | 276 | } |
295 | 277 | ||
296 | static struct freq_attr *corenet_cpufreq_attr[] = { | ||
297 | &cpufreq_freq_attr_scaling_available_freqs, | ||
298 | NULL, | ||
299 | }; | ||
300 | |||
301 | static struct cpufreq_driver ppc_corenet_cpufreq_driver = { | 278 | static struct cpufreq_driver ppc_corenet_cpufreq_driver = { |
302 | .name = "ppc_cpufreq", | 279 | .name = "ppc_cpufreq", |
303 | .flags = CPUFREQ_CONST_LOOPS, | 280 | .flags = CPUFREQ_CONST_LOOPS, |
304 | .init = corenet_cpufreq_cpu_init, | 281 | .init = corenet_cpufreq_cpu_init, |
305 | .exit = __exit_p(corenet_cpufreq_cpu_exit), | 282 | .exit = __exit_p(corenet_cpufreq_cpu_exit), |
306 | .verify = corenet_cpufreq_verify, | 283 | .verify = cpufreq_generic_frequency_table_verify, |
307 | .target = corenet_cpufreq_target, | 284 | .target_index = corenet_cpufreq_target, |
308 | .get = corenet_cpufreq_get_speed, | 285 | .get = corenet_cpufreq_get_speed, |
309 | .attr = corenet_cpufreq_attr, | 286 | .attr = cpufreq_generic_attr, |
310 | }; | 287 | }; |
311 | 288 | ||
312 | static const struct of_device_id node_matches[] __initdata = { | 289 | static const struct of_device_id node_matches[] __initdata = { |
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 2e448f0bbdc5..52f707d5f458 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c | |||
@@ -123,37 +123,16 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
123 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); | 123 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
124 | #endif | 124 | #endif |
125 | 125 | ||
126 | cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); | ||
127 | |||
128 | /* this ensures that policy->cpuinfo_min | 126 | /* this ensures that policy->cpuinfo_min |
129 | * and policy->cpuinfo_max are set correctly */ | 127 | * and policy->cpuinfo_max are set correctly */ |
130 | return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); | 128 | return cpufreq_table_validate_and_show(policy, cbe_freqs); |
131 | } | ||
132 | |||
133 | static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
134 | { | ||
135 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int cbe_cpufreq_verify(struct cpufreq_policy *policy) | ||
140 | { | ||
141 | return cpufreq_frequency_table_verify(policy, cbe_freqs); | ||
142 | } | 129 | } |
143 | 130 | ||
144 | static int cbe_cpufreq_target(struct cpufreq_policy *policy, | 131 | static int cbe_cpufreq_target(struct cpufreq_policy *policy, |
145 | unsigned int target_freq, | 132 | unsigned int cbe_pmode_new) |
146 | unsigned int relation) | ||
147 | { | 133 | { |
148 | int rc; | 134 | int rc; |
149 | struct cpufreq_freqs freqs; | 135 | struct cpufreq_freqs freqs; |
150 | unsigned int cbe_pmode_new; | ||
151 | |||
152 | cpufreq_frequency_table_target(policy, | ||
153 | cbe_freqs, | ||
154 | target_freq, | ||
155 | relation, | ||
156 | &cbe_pmode_new); | ||
157 | 136 | ||
158 | freqs.old = policy->cur; | 137 | freqs.old = policy->cur; |
159 | freqs.new = cbe_freqs[cbe_pmode_new].frequency; | 138 | freqs.new = cbe_freqs[cbe_pmode_new].frequency; |
@@ -176,10 +155,10 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy, | |||
176 | } | 155 | } |
177 | 156 | ||
178 | static struct cpufreq_driver cbe_cpufreq_driver = { | 157 | static struct cpufreq_driver cbe_cpufreq_driver = { |
179 | .verify = cbe_cpufreq_verify, | 158 | .verify = cpufreq_generic_frequency_table_verify, |
180 | .target = cbe_cpufreq_target, | 159 | .target_index = cbe_cpufreq_target, |
181 | .init = cbe_cpufreq_cpu_init, | 160 | .init = cbe_cpufreq_cpu_init, |
182 | .exit = cbe_cpufreq_cpu_exit, | 161 | .exit = cpufreq_generic_exit, |
183 | .name = "cbe-cpufreq", | 162 | .name = "cbe-cpufreq", |
184 | .flags = CPUFREQ_CONST_LOOPS, | 163 | .flags = CPUFREQ_CONST_LOOPS, |
185 | }; | 164 | }; |
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 8749eaf18793..183bc13f13e5 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c | |||
@@ -262,36 +262,16 @@ static u32 mdrefr_dri(unsigned int freq) | |||
262 | return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; | 262 | return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; |
263 | } | 263 | } |
264 | 264 | ||
265 | /* find a valid frequency point */ | ||
266 | static int pxa_verify_policy(struct cpufreq_policy *policy) | ||
267 | { | ||
268 | struct cpufreq_frequency_table *pxa_freqs_table; | ||
269 | pxa_freqs_t *pxa_freqs; | ||
270 | int ret; | ||
271 | |||
272 | find_freq_tables(&pxa_freqs_table, &pxa_freqs); | ||
273 | ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table); | ||
274 | |||
275 | if (freq_debug) | ||
276 | pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n", | ||
277 | policy->min, policy->max); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | static unsigned int pxa_cpufreq_get(unsigned int cpu) | 265 | static unsigned int pxa_cpufreq_get(unsigned int cpu) |
283 | { | 266 | { |
284 | return get_clk_frequency_khz(0); | 267 | return get_clk_frequency_khz(0); |
285 | } | 268 | } |
286 | 269 | ||
287 | static int pxa_set_target(struct cpufreq_policy *policy, | 270 | static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) |
288 | unsigned int target_freq, | ||
289 | unsigned int relation) | ||
290 | { | 271 | { |
291 | struct cpufreq_frequency_table *pxa_freqs_table; | 272 | struct cpufreq_frequency_table *pxa_freqs_table; |
292 | pxa_freqs_t *pxa_freq_settings; | 273 | pxa_freqs_t *pxa_freq_settings; |
293 | struct cpufreq_freqs freqs; | 274 | struct cpufreq_freqs freqs; |
294 | unsigned int idx; | ||
295 | unsigned long flags; | 275 | unsigned long flags; |
296 | unsigned int new_freq_cpu, new_freq_mem; | 276 | unsigned int new_freq_cpu, new_freq_mem; |
297 | unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; | 277 | unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; |
@@ -300,12 +280,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
300 | /* Get the current policy */ | 280 | /* Get the current policy */ |
301 | find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); | 281 | find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); |
302 | 282 | ||
303 | /* Lookup the next frequency */ | ||
304 | if (cpufreq_frequency_table_target(policy, pxa_freqs_table, | ||
305 | target_freq, relation, &idx)) { | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | |||
309 | new_freq_cpu = pxa_freq_settings[idx].khz; | 283 | new_freq_cpu = pxa_freq_settings[idx].khz; |
310 | new_freq_mem = pxa_freq_settings[idx].membus; | 284 | new_freq_mem = pxa_freq_settings[idx].membus; |
311 | freqs.old = policy->cur; | 285 | freqs.old = policy->cur; |
@@ -414,8 +388,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
414 | 388 | ||
415 | /* set default policy and cpuinfo */ | 389 | /* set default policy and cpuinfo */ |
416 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ | 390 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ |
417 | policy->cur = get_clk_frequency_khz(0); /* current freq */ | ||
418 | policy->min = policy->max = policy->cur; | ||
419 | 391 | ||
420 | /* Generate pxa25x the run cpufreq_frequency_table struct */ | 392 | /* Generate pxa25x the run cpufreq_frequency_table struct */ |
421 | for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { | 393 | for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { |
@@ -453,10 +425,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
453 | find_freq_tables(&pxa255_freq_table, &pxa255_freqs); | 425 | find_freq_tables(&pxa255_freq_table, &pxa255_freqs); |
454 | pr_info("PXA255 cpufreq using %s frequency table\n", | 426 | pr_info("PXA255 cpufreq using %s frequency table\n", |
455 | pxa255_turbo_table ? "turbo" : "run"); | 427 | pxa255_turbo_table ? "turbo" : "run"); |
456 | cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table); | 428 | |
429 | cpufreq_table_validate_and_show(policy, pxa255_freq_table); | ||
430 | } | ||
431 | else if (cpu_is_pxa27x()) { | ||
432 | cpufreq_table_validate_and_show(policy, pxa27x_freq_table); | ||
457 | } | 433 | } |
458 | else if (cpu_is_pxa27x()) | ||
459 | cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table); | ||
460 | 434 | ||
461 | printk(KERN_INFO "PXA CPU frequency change support initialized\n"); | 435 | printk(KERN_INFO "PXA CPU frequency change support initialized\n"); |
462 | 436 | ||
@@ -464,9 +438,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) | |||
464 | } | 438 | } |
465 | 439 | ||
466 | static struct cpufreq_driver pxa_cpufreq_driver = { | 440 | static struct cpufreq_driver pxa_cpufreq_driver = { |
467 | .verify = pxa_verify_policy, | 441 | .verify = cpufreq_generic_frequency_table_verify, |
468 | .target = pxa_set_target, | 442 | .target_index = pxa_set_target, |
469 | .init = pxa_cpufreq_init, | 443 | .init = pxa_cpufreq_init, |
444 | .exit = cpufreq_generic_exit, | ||
470 | .get = pxa_cpufreq_get, | 445 | .get = pxa_cpufreq_get, |
471 | .name = "PXA2xx", | 446 | .name = "PXA2xx", |
472 | }; | 447 | }; |
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c index d26306fb00d2..132e37d578c2 100644 --- a/drivers/cpufreq/pxa3xx-cpufreq.c +++ b/drivers/cpufreq/pxa3xx-cpufreq.c | |||
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy, | |||
108 | pxa3xx_freqs_num = num; | 108 | pxa3xx_freqs_num = num; |
109 | pxa3xx_freqs_table = table; | 109 | pxa3xx_freqs_table = table; |
110 | 110 | ||
111 | return cpufreq_frequency_table_cpuinfo(policy, table); | 111 | return cpufreq_table_validate_and_show(policy, table); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __update_core_freq(struct pxa3xx_freq_info *info) | 114 | static void __update_core_freq(struct pxa3xx_freq_info *info) |
@@ -150,34 +150,21 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info) | |||
150 | cpu_relax(); | 150 | cpu_relax(); |
151 | } | 151 | } |
152 | 152 | ||
153 | static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy) | ||
154 | { | ||
155 | return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table); | ||
156 | } | ||
157 | |||
158 | static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) | 153 | static unsigned int pxa3xx_cpufreq_get(unsigned int cpu) |
159 | { | 154 | { |
160 | return pxa3xx_get_clk_frequency_khz(0); | 155 | return pxa3xx_get_clk_frequency_khz(0); |
161 | } | 156 | } |
162 | 157 | ||
163 | static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, | 158 | static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index) |
164 | unsigned int target_freq, | ||
165 | unsigned int relation) | ||
166 | { | 159 | { |
167 | struct pxa3xx_freq_info *next; | 160 | struct pxa3xx_freq_info *next; |
168 | struct cpufreq_freqs freqs; | 161 | struct cpufreq_freqs freqs; |
169 | unsigned long flags; | 162 | unsigned long flags; |
170 | int idx; | ||
171 | 163 | ||
172 | if (policy->cpu != 0) | 164 | if (policy->cpu != 0) |
173 | return -EINVAL; | 165 | return -EINVAL; |
174 | 166 | ||
175 | /* Lookup the next frequency */ | 167 | next = &pxa3xx_freqs[index]; |
176 | if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table, | ||
177 | target_freq, relation, &idx)) | ||
178 | return -EINVAL; | ||
179 | |||
180 | next = &pxa3xx_freqs[idx]; | ||
181 | 168 | ||
182 | freqs.old = policy->cur; | 169 | freqs.old = policy->cur; |
183 | freqs.new = next->cpufreq_mhz * 1000; | 170 | freqs.new = next->cpufreq_mhz * 1000; |
@@ -186,9 +173,6 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, | |||
186 | freqs.old / 1000, freqs.new / 1000, | 173 | freqs.old / 1000, freqs.new / 1000, |
187 | (freqs.old == freqs.new) ? " (skipped)" : ""); | 174 | (freqs.old == freqs.new) ? " (skipped)" : ""); |
188 | 175 | ||
189 | if (freqs.old == target_freq) | ||
190 | return 0; | ||
191 | |||
192 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 176 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
193 | 177 | ||
194 | local_irq_save(flags); | 178 | local_irq_save(flags); |
@@ -206,11 +190,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | |||
206 | int ret = -EINVAL; | 190 | int ret = -EINVAL; |
207 | 191 | ||
208 | /* set default policy and cpuinfo */ | 192 | /* set default policy and cpuinfo */ |
209 | policy->cpuinfo.min_freq = 104000; | 193 | policy->min = policy->cpuinfo.min_freq = 104000; |
210 | policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000; | 194 | policy->max = policy->cpuinfo.max_freq = |
195 | (cpu_is_pxa320()) ? 806000 : 624000; | ||
211 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ | 196 | policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ |
212 | policy->max = pxa3xx_get_clk_frequency_khz(0); | ||
213 | policy->cur = policy->min = policy->max; | ||
214 | 197 | ||
215 | if (cpu_is_pxa300() || cpu_is_pxa310()) | 198 | if (cpu_is_pxa300() || cpu_is_pxa310()) |
216 | ret = setup_freqs_table(policy, pxa300_freqs, | 199 | ret = setup_freqs_table(policy, pxa300_freqs, |
@@ -230,9 +213,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) | |||
230 | } | 213 | } |
231 | 214 | ||
232 | static struct cpufreq_driver pxa3xx_cpufreq_driver = { | 215 | static struct cpufreq_driver pxa3xx_cpufreq_driver = { |
233 | .verify = pxa3xx_cpufreq_verify, | 216 | .verify = cpufreq_generic_frequency_table_verify, |
234 | .target = pxa3xx_cpufreq_set, | 217 | .target_index = pxa3xx_cpufreq_set, |
235 | .init = pxa3xx_cpufreq_init, | 218 | .init = pxa3xx_cpufreq_init, |
219 | .exit = cpufreq_generic_exit, | ||
236 | .get = pxa3xx_cpufreq_get, | 220 | .get = pxa3xx_cpufreq_get, |
237 | .name = "pxa3xx-cpufreq", | 221 | .name = "pxa3xx-cpufreq", |
238 | }; | 222 | }; |
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 22dcb81ef9d0..4188accd34ab 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c | |||
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = { | |||
87 | { 0, CPUFREQ_TABLE_END }, | 87 | { 0, CPUFREQ_TABLE_END }, |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
91 | { | ||
92 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | ||
93 | |||
94 | if (policy->cpu != 0) | ||
95 | return -EINVAL; | ||
96 | |||
97 | return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table); | ||
98 | } | ||
99 | |||
100 | static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) | 90 | static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu) |
101 | { | 91 | { |
102 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | 92 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; |
@@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) | |||
227 | } | 217 | } |
228 | 218 | ||
229 | static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, | 219 | static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, |
230 | unsigned int target_freq, | 220 | unsigned int index) |
231 | unsigned int relation) | ||
232 | { | 221 | { |
233 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; | 222 | struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; |
234 | struct cpufreq_freqs freqs; | 223 | struct cpufreq_freqs freqs; |
235 | int idx, ret, to_dvs = 0; | 224 | int idx, ret, to_dvs = 0; |
236 | unsigned int i; | ||
237 | 225 | ||
238 | mutex_lock(&cpufreq_lock); | 226 | mutex_lock(&cpufreq_lock); |
239 | 227 | ||
240 | pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation); | 228 | idx = s3c_freq->freq_table[index].driver_data; |
241 | |||
242 | ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table, | ||
243 | target_freq, relation, &i); | ||
244 | if (ret != 0) | ||
245 | goto out; | ||
246 | |||
247 | idx = s3c_freq->freq_table[i].driver_data; | ||
248 | 229 | ||
249 | if (idx == SOURCE_HCLK) | 230 | if (idx == SOURCE_HCLK) |
250 | to_dvs = 1; | 231 | to_dvs = 1; |
@@ -266,7 +247,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, | |||
266 | */ | 247 | */ |
267 | freqs.new = (s3c_freq->is_dvs && !to_dvs) | 248 | freqs.new = (s3c_freq->is_dvs && !to_dvs) |
268 | ? clk_get_rate(s3c_freq->hclk) / 1000 | 249 | ? clk_get_rate(s3c_freq->hclk) / 1000 |
269 | : s3c_freq->freq_table[i].frequency; | 250 | : s3c_freq->freq_table[index].frequency; |
270 | 251 | ||
271 | pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); | 252 | pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); |
272 | 253 | ||
@@ -486,20 +467,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
486 | freq++; | 467 | freq++; |
487 | } | 468 | } |
488 | 469 | ||
489 | policy->cur = clk_get_rate(s3c_freq->armclk) / 1000; | ||
490 | |||
491 | /* Datasheet says PLL stabalisation time must be at least 300us, | 470 | /* Datasheet says PLL stabalisation time must be at least 300us, |
492 | * so but add some fudge. (reference in LOCKCON0 register description) | 471 | * so but add some fudge. (reference in LOCKCON0 register description) |
493 | */ | 472 | */ |
494 | policy->cpuinfo.transition_latency = (500 * 1000) + | 473 | ret = cpufreq_generic_init(policy, s3c_freq->freq_table, |
495 | s3c_freq->regulator_latency; | 474 | (500 * 1000) + s3c_freq->regulator_latency); |
496 | |||
497 | ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table); | ||
498 | if (ret) | 475 | if (ret) |
499 | goto err_freq_table; | 476 | goto err_freq_table; |
500 | 477 | ||
501 | cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0); | ||
502 | |||
503 | register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); | 478 | register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); |
504 | 479 | ||
505 | return 0; | 480 | return 0; |
@@ -518,19 +493,14 @@ err_hclk: | |||
518 | return ret; | 493 | return ret; |
519 | } | 494 | } |
520 | 495 | ||
521 | static struct freq_attr *s3c2416_cpufreq_attr[] = { | ||
522 | &cpufreq_freq_attr_scaling_available_freqs, | ||
523 | NULL, | ||
524 | }; | ||
525 | |||
526 | static struct cpufreq_driver s3c2416_cpufreq_driver = { | 496 | static struct cpufreq_driver s3c2416_cpufreq_driver = { |
527 | .flags = 0, | 497 | .flags = 0, |
528 | .verify = s3c2416_cpufreq_verify_speed, | 498 | .verify = cpufreq_generic_frequency_table_verify, |
529 | .target = s3c2416_cpufreq_set_target, | 499 | .target_index = s3c2416_cpufreq_set_target, |
530 | .get = s3c2416_cpufreq_get_speed, | 500 | .get = s3c2416_cpufreq_get_speed, |
531 | .init = s3c2416_cpufreq_driver_init, | 501 | .init = s3c2416_cpufreq_driver_init, |
532 | .name = "s3c2416", | 502 | .name = "s3c2416", |
533 | .attr = s3c2416_cpufreq_attr, | 503 | .attr = cpufreq_generic_attr, |
534 | }; | 504 | }; |
535 | 505 | ||
536 | static int __init s3c2416_cpufreq_init(void) | 506 | static int __init s3c2416_cpufreq_init(void) |
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index b0f343fcb7ee..485088253358 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c | |||
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) | |||
373 | 373 | ||
374 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) | 374 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) |
375 | { | 375 | { |
376 | printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy); | 376 | return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); |
377 | |||
378 | if (policy->cpu != 0) | ||
379 | return -EINVAL; | ||
380 | |||
381 | policy->cur = s3c_cpufreq_get(0); | ||
382 | policy->min = policy->cpuinfo.min_freq = 0; | ||
383 | policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000; | ||
384 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
385 | |||
386 | /* feed the latency information from the cpu driver */ | ||
387 | policy->cpuinfo.transition_latency = cpu_cur.info->latency; | ||
388 | |||
389 | if (ftab) | ||
390 | cpufreq_frequency_table_cpuinfo(policy, ftab); | ||
391 | |||
392 | return 0; | ||
393 | } | 377 | } |
394 | 378 | ||
395 | static int __init s3c_cpufreq_initclks(void) | 379 | static int __init s3c_cpufreq_initclks(void) |
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void) | |||
416 | return 0; | 400 | return 0; |
417 | } | 401 | } |
418 | 402 | ||
419 | static int s3c_cpufreq_verify(struct cpufreq_policy *policy) | ||
420 | { | ||
421 | if (policy->cpu != 0) | ||
422 | return -EINVAL; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | #ifdef CONFIG_PM | 403 | #ifdef CONFIG_PM |
428 | static struct cpufreq_frequency_table suspend_pll; | 404 | static struct cpufreq_frequency_table suspend_pll; |
429 | static unsigned int suspend_freq; | 405 | static unsigned int suspend_freq; |
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy) | |||
473 | 449 | ||
474 | static struct cpufreq_driver s3c24xx_driver = { | 450 | static struct cpufreq_driver s3c24xx_driver = { |
475 | .flags = CPUFREQ_STICKY, | 451 | .flags = CPUFREQ_STICKY, |
476 | .verify = s3c_cpufreq_verify, | ||
477 | .target = s3c_cpufreq_target, | 452 | .target = s3c_cpufreq_target, |
478 | .get = s3c_cpufreq_get, | 453 | .get = s3c_cpufreq_get, |
479 | .init = s3c_cpufreq_init, | 454 | .init = s3c_cpufreq_init, |
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 15631f92ab7d..8bdcf32a4418 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c | |||
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = { | |||
54 | }; | 54 | }; |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy) | ||
58 | { | ||
59 | if (policy->cpu != 0) | ||
60 | return -EINVAL; | ||
61 | |||
62 | return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table); | ||
63 | } | ||
64 | |||
65 | static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) | 57 | static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) |
66 | { | 58 | { |
67 | if (cpu != 0) | 59 | if (cpu != 0) |
@@ -71,26 +63,16 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu) | |||
71 | } | 63 | } |
72 | 64 | ||
73 | static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, | 65 | static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, |
74 | unsigned int target_freq, | 66 | unsigned int index) |
75 | unsigned int relation) | ||
76 | { | 67 | { |
77 | int ret; | 68 | int ret; |
78 | unsigned int i; | ||
79 | struct cpufreq_freqs freqs; | 69 | struct cpufreq_freqs freqs; |
80 | struct s3c64xx_dvfs *dvfs; | 70 | struct s3c64xx_dvfs *dvfs; |
81 | 71 | ||
82 | ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table, | ||
83 | target_freq, relation, &i); | ||
84 | if (ret != 0) | ||
85 | return ret; | ||
86 | |||
87 | freqs.old = clk_get_rate(armclk) / 1000; | 72 | freqs.old = clk_get_rate(armclk) / 1000; |
88 | freqs.new = s3c64xx_freq_table[i].frequency; | 73 | freqs.new = s3c64xx_freq_table[index].frequency; |
89 | freqs.flags = 0; | 74 | freqs.flags = 0; |
90 | dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data]; | 75 | dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; |
91 | |||
92 | if (freqs.old == freqs.new) | ||
93 | return 0; | ||
94 | 76 | ||
95 | pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); | 77 | pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); |
96 | 78 | ||
@@ -243,15 +225,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
243 | freq++; | 225 | freq++; |
244 | } | 226 | } |
245 | 227 | ||
246 | policy->cur = clk_get_rate(armclk) / 1000; | ||
247 | |||
248 | /* Datasheet says PLL stabalisation time (if we were to use | 228 | /* Datasheet says PLL stabalisation time (if we were to use |
249 | * the PLLs, which we don't currently) is ~300us worst case, | 229 | * the PLLs, which we don't currently) is ~300us worst case, |
250 | * but add some fudge. | 230 | * but add some fudge. |
251 | */ | 231 | */ |
252 | policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency; | 232 | ret = cpufreq_generic_init(policy, s3c64xx_freq_table, |
253 | 233 | (500 * 1000) + regulator_latency); | |
254 | ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); | ||
255 | if (ret != 0) { | 234 | if (ret != 0) { |
256 | pr_err("Failed to configure frequency table: %d\n", | 235 | pr_err("Failed to configure frequency table: %d\n", |
257 | ret); | 236 | ret); |
@@ -264,8 +243,8 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) | |||
264 | 243 | ||
265 | static struct cpufreq_driver s3c64xx_cpufreq_driver = { | 244 | static struct cpufreq_driver s3c64xx_cpufreq_driver = { |
266 | .flags = 0, | 245 | .flags = 0, |
267 | .verify = s3c64xx_cpufreq_verify_speed, | 246 | .verify = cpufreq_generic_frequency_table_verify, |
268 | .target = s3c64xx_cpufreq_set_target, | 247 | .target_index = s3c64xx_cpufreq_set_target, |
269 | .get = s3c64xx_cpufreq_get_speed, | 248 | .get = s3c64xx_cpufreq_get_speed, |
270 | .init = s3c64xx_cpufreq_driver_init, | 249 | .init = s3c64xx_cpufreq_driver_init, |
271 | .name = "s3c", | 250 | .name = "s3c", |
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5c7757073793..5978b94e0340 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
@@ -36,16 +36,7 @@ static DEFINE_MUTEX(set_freq_lock); | |||
36 | /* Use 800MHz when entering sleep mode */ | 36 | /* Use 800MHz when entering sleep mode */ |
37 | #define SLEEP_FREQ (800 * 1000) | 37 | #define SLEEP_FREQ (800 * 1000) |
38 | 38 | ||
39 | /* | 39 | /* Tracks if cpu freqency can be updated anymore */ |
40 | * relation has an additional symantics other than the standard of cpufreq | ||
41 | * DISALBE_FURTHER_CPUFREQ: disable further access to target | ||
42 | * ENABLE_FURTUER_CPUFREQ: enable access to target | ||
43 | */ | ||
44 | enum cpufreq_access { | ||
45 | DISABLE_FURTHER_CPUFREQ = 0x10, | ||
46 | ENABLE_FURTHER_CPUFREQ = 0x20, | ||
47 | }; | ||
48 | |||
49 | static bool no_cpufreq_access; | 40 | static bool no_cpufreq_access; |
50 | 41 | ||
51 | /* | 42 | /* |
@@ -174,14 +165,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) | |||
174 | __raw_writel(tmp1, reg); | 165 | __raw_writel(tmp1, reg); |
175 | } | 166 | } |
176 | 167 | ||
177 | static int s5pv210_verify_speed(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | if (policy->cpu) | ||
180 | return -EINVAL; | ||
181 | |||
182 | return cpufreq_frequency_table_verify(policy, s5pv210_freq_table); | ||
183 | } | ||
184 | |||
185 | static unsigned int s5pv210_getspeed(unsigned int cpu) | 168 | static unsigned int s5pv210_getspeed(unsigned int cpu) |
186 | { | 169 | { |
187 | if (cpu) | 170 | if (cpu) |
@@ -190,12 +173,10 @@ static unsigned int s5pv210_getspeed(unsigned int cpu) | |||
190 | return clk_get_rate(cpu_clk) / 1000; | 173 | return clk_get_rate(cpu_clk) / 1000; |
191 | } | 174 | } |
192 | 175 | ||
193 | static int s5pv210_target(struct cpufreq_policy *policy, | 176 | static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) |
194 | unsigned int target_freq, | ||
195 | unsigned int relation) | ||
196 | { | 177 | { |
197 | unsigned long reg; | 178 | unsigned long reg; |
198 | unsigned int index, priv_index; | 179 | unsigned int priv_index; |
199 | unsigned int pll_changing = 0; | 180 | unsigned int pll_changing = 0; |
200 | unsigned int bus_speed_changing = 0; | 181 | unsigned int bus_speed_changing = 0; |
201 | int arm_volt, int_volt; | 182 | int arm_volt, int_volt; |
@@ -203,9 +184,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
203 | 184 | ||
204 | mutex_lock(&set_freq_lock); | 185 | mutex_lock(&set_freq_lock); |
205 | 186 | ||
206 | if (relation & ENABLE_FURTHER_CPUFREQ) | ||
207 | no_cpufreq_access = false; | ||
208 | |||
209 | if (no_cpufreq_access) { | 187 | if (no_cpufreq_access) { |
210 | #ifdef CONFIG_PM_VERBOSE | 188 | #ifdef CONFIG_PM_VERBOSE |
211 | pr_err("%s:%d denied access to %s as it is disabled" | 189 | pr_err("%s:%d denied access to %s as it is disabled" |
@@ -215,27 +193,13 @@ static int s5pv210_target(struct cpufreq_policy *policy, | |||
215 | goto exit; | 193 | goto exit; |
216 | } | 194 | } |
217 | 195 | ||
218 | if (relation & DISABLE_FURTHER_CPUFREQ) | ||
219 | no_cpufreq_access = true; | ||
220 | |||
221 | relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ); | ||
222 | |||
223 | freqs.old = s5pv210_getspeed(0); | 196 | freqs.old = s5pv210_getspeed(0); |
224 | |||
225 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, | ||
226 | target_freq, relation, &index)) { | ||
227 | ret = -EINVAL; | ||
228 | goto exit; | ||
229 | } | ||
230 | |||
231 | freqs.new = s5pv210_freq_table[index].frequency; | 197 | freqs.new = s5pv210_freq_table[index].frequency; |
232 | 198 | ||
233 | if (freqs.new == freqs.old) | ||
234 | goto exit; | ||
235 | |||
236 | /* Finding current running level index */ | 199 | /* Finding current running level index */ |
237 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, | 200 | if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, |
238 | freqs.old, relation, &priv_index)) { | 201 | freqs.old, CPUFREQ_RELATION_H, |
202 | &priv_index)) { | ||
239 | ret = -EINVAL; | 203 | ret = -EINVAL; |
240 | goto exit; | 204 | goto exit; |
241 | } | 205 | } |
@@ -551,13 +515,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) | |||
551 | s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); | 515 | s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); |
552 | s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); | 516 | s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); |
553 | 517 | ||
554 | policy->cur = policy->min = policy->max = s5pv210_getspeed(0); | 518 | return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); |
555 | |||
556 | cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu); | ||
557 | |||
558 | policy->cpuinfo.transition_latency = 40000; | ||
559 | |||
560 | return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table); | ||
561 | 519 | ||
562 | out_dmc1: | 520 | out_dmc1: |
563 | clk_put(dmc0_clk); | 521 | clk_put(dmc0_clk); |
@@ -573,16 +531,18 @@ static int s5pv210_cpufreq_notifier_event(struct notifier_block *this, | |||
573 | 531 | ||
574 | switch (event) { | 532 | switch (event) { |
575 | case PM_SUSPEND_PREPARE: | 533 | case PM_SUSPEND_PREPARE: |
576 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 534 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); |
577 | DISABLE_FURTHER_CPUFREQ); | ||
578 | if (ret < 0) | 535 | if (ret < 0) |
579 | return NOTIFY_BAD; | 536 | return NOTIFY_BAD; |
580 | 537 | ||
538 | /* Disable updation of cpu frequency */ | ||
539 | no_cpufreq_access = true; | ||
581 | return NOTIFY_OK; | 540 | return NOTIFY_OK; |
582 | case PM_POST_RESTORE: | 541 | case PM_POST_RESTORE: |
583 | case PM_POST_SUSPEND: | 542 | case PM_POST_SUSPEND: |
584 | cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 543 | /* Enable updation of cpu frequency */ |
585 | ENABLE_FURTHER_CPUFREQ); | 544 | no_cpufreq_access = false; |
545 | cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); | ||
586 | 546 | ||
587 | return NOTIFY_OK; | 547 | return NOTIFY_OK; |
588 | } | 548 | } |
@@ -595,18 +555,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, | |||
595 | { | 555 | { |
596 | int ret; | 556 | int ret; |
597 | 557 | ||
598 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, | 558 | ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); |
599 | DISABLE_FURTHER_CPUFREQ); | ||
600 | if (ret < 0) | 559 | if (ret < 0) |
601 | return NOTIFY_BAD; | 560 | return NOTIFY_BAD; |
602 | 561 | ||
562 | no_cpufreq_access = true; | ||
603 | return NOTIFY_DONE; | 563 | return NOTIFY_DONE; |
604 | } | 564 | } |
605 | 565 | ||
606 | static struct cpufreq_driver s5pv210_driver = { | 566 | static struct cpufreq_driver s5pv210_driver = { |
607 | .flags = CPUFREQ_STICKY, | 567 | .flags = CPUFREQ_STICKY, |
608 | .verify = s5pv210_verify_speed, | 568 | .verify = cpufreq_generic_frequency_table_verify, |
609 | .target = s5pv210_target, | 569 | .target_index = s5pv210_target, |
610 | .get = s5pv210_getspeed, | 570 | .get = s5pv210_getspeed, |
611 | .init = s5pv210_cpu_init, | 571 | .init = s5pv210_cpu_init, |
612 | .name = "s5pv210", | 572 | .name = "s5pv210", |
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index cff18e87ca58..b0da1fe40b1d 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c | |||
@@ -177,36 +177,20 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed) | |||
177 | } | 177 | } |
178 | } | 178 | } |
179 | 179 | ||
180 | static int sa1100_target(struct cpufreq_policy *policy, | 180 | static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) |
181 | unsigned int target_freq, | ||
182 | unsigned int relation) | ||
183 | { | 181 | { |
184 | unsigned int cur = sa11x0_getspeed(0); | 182 | unsigned int cur = sa11x0_getspeed(0); |
185 | unsigned int new_ppcr; | ||
186 | struct cpufreq_freqs freqs; | 183 | struct cpufreq_freqs freqs; |
187 | 184 | ||
188 | new_ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
189 | switch (relation) { | ||
190 | case CPUFREQ_RELATION_L: | ||
191 | if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max) | ||
192 | new_ppcr--; | ||
193 | break; | ||
194 | case CPUFREQ_RELATION_H: | ||
195 | if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) && | ||
196 | (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min)) | ||
197 | new_ppcr--; | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | freqs.old = cur; | 185 | freqs.old = cur; |
202 | freqs.new = sa11x0_ppcr_to_freq(new_ppcr); | 186 | freqs.new = sa11x0_freq_table[ppcr].frequency; |
203 | 187 | ||
204 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 188 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
205 | 189 | ||
206 | if (freqs.new > cur) | 190 | if (freqs.new > cur) |
207 | sa1100_update_dram_timings(cur, freqs.new); | 191 | sa1100_update_dram_timings(cur, freqs.new); |
208 | 192 | ||
209 | PPCR = new_ppcr; | 193 | PPCR = ppcr; |
210 | 194 | ||
211 | if (freqs.new < cur) | 195 | if (freqs.new < cur) |
212 | sa1100_update_dram_timings(cur, freqs.new); | 196 | sa1100_update_dram_timings(cur, freqs.new); |
@@ -218,19 +202,13 @@ static int sa1100_target(struct cpufreq_policy *policy, | |||
218 | 202 | ||
219 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) | 203 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) |
220 | { | 204 | { |
221 | if (policy->cpu != 0) | 205 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); |
222 | return -EINVAL; | ||
223 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); | ||
224 | policy->cpuinfo.min_freq = 59000; | ||
225 | policy->cpuinfo.max_freq = 287000; | ||
226 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
227 | return 0; | ||
228 | } | 206 | } |
229 | 207 | ||
230 | static struct cpufreq_driver sa1100_driver __refdata = { | 208 | static struct cpufreq_driver sa1100_driver __refdata = { |
231 | .flags = CPUFREQ_STICKY, | 209 | .flags = CPUFREQ_STICKY, |
232 | .verify = sa11x0_verify_speed, | 210 | .verify = cpufreq_generic_frequency_table_verify, |
233 | .target = sa1100_target, | 211 | .target_index = sa1100_target, |
234 | .get = sa11x0_getspeed, | 212 | .get = sa11x0_getspeed, |
235 | .init = sa1100_cpu_init, | 213 | .init = sa1100_cpu_init, |
236 | .name = "sa1100", | 214 | .name = "sa1100", |
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 39c90b6f4286..55b1818c3e49 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c | |||
@@ -229,34 +229,16 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram) | |||
229 | /* | 229 | /* |
230 | * Ok, set the CPU frequency. | 230 | * Ok, set the CPU frequency. |
231 | */ | 231 | */ |
232 | static int sa1110_target(struct cpufreq_policy *policy, | 232 | static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) |
233 | unsigned int target_freq, | ||
234 | unsigned int relation) | ||
235 | { | 233 | { |
236 | struct sdram_params *sdram = &sdram_params; | 234 | struct sdram_params *sdram = &sdram_params; |
237 | struct cpufreq_freqs freqs; | 235 | struct cpufreq_freqs freqs; |
238 | struct sdram_info sd; | 236 | struct sdram_info sd; |
239 | unsigned long flags; | 237 | unsigned long flags; |
240 | unsigned int ppcr, unused; | 238 | unsigned int unused; |
241 | |||
242 | switch (relation) { | ||
243 | case CPUFREQ_RELATION_L: | ||
244 | ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
245 | if (sa11x0_ppcr_to_freq(ppcr) > policy->max) | ||
246 | ppcr--; | ||
247 | break; | ||
248 | case CPUFREQ_RELATION_H: | ||
249 | ppcr = sa11x0_freq_to_ppcr(target_freq); | ||
250 | if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) && | ||
251 | (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min)) | ||
252 | ppcr--; | ||
253 | break; | ||
254 | default: | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | 239 | ||
258 | freqs.old = sa11x0_getspeed(0); | 240 | freqs.old = sa11x0_getspeed(0); |
259 | freqs.new = sa11x0_ppcr_to_freq(ppcr); | 241 | freqs.new = sa11x0_freq_table[ppcr].frequency; |
260 | 242 | ||
261 | sdram_calculate_timing(&sd, freqs.new, sdram); | 243 | sdram_calculate_timing(&sd, freqs.new, sdram); |
262 | 244 | ||
@@ -332,21 +314,15 @@ static int sa1110_target(struct cpufreq_policy *policy, | |||
332 | 314 | ||
333 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) | 315 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) |
334 | { | 316 | { |
335 | if (policy->cpu != 0) | 317 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); |
336 | return -EINVAL; | ||
337 | policy->cur = policy->min = policy->max = sa11x0_getspeed(0); | ||
338 | policy->cpuinfo.min_freq = 59000; | ||
339 | policy->cpuinfo.max_freq = 287000; | ||
340 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
341 | return 0; | ||
342 | } | 318 | } |
343 | 319 | ||
344 | /* sa1110_driver needs __refdata because it must remain after init registers | 320 | /* sa1110_driver needs __refdata because it must remain after init registers |
345 | * it with cpufreq_register_driver() */ | 321 | * it with cpufreq_register_driver() */ |
346 | static struct cpufreq_driver sa1110_driver __refdata = { | 322 | static struct cpufreq_driver sa1110_driver __refdata = { |
347 | .flags = CPUFREQ_STICKY, | 323 | .flags = CPUFREQ_STICKY, |
348 | .verify = sa11x0_verify_speed, | 324 | .verify = cpufreq_generic_frequency_table_verify, |
349 | .target = sa1110_target, | 325 | .target_index = sa1110_target, |
350 | .get = sa11x0_getspeed, | 326 | .get = sa11x0_getspeed, |
351 | .init = sa1110_cpu_init, | 327 | .init = sa1110_cpu_init, |
352 | .name = "sa1110", | 328 | .name = "sa1110", |
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index d6f6c6f4efa7..6c86452e1737 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c | |||
@@ -53,8 +53,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) | |||
53 | } | 53 | } |
54 | } | 54 | } |
55 | 55 | ||
56 | static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, | 56 | static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state) |
57 | unsigned int state) | ||
58 | { | 57 | { |
59 | 58 | ||
60 | struct cpufreq_freqs freqs; | 59 | struct cpufreq_freqs freqs; |
@@ -76,29 +75,10 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy, | |||
76 | local_irq_enable(); | 75 | local_irq_enable(); |
77 | 76 | ||
78 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 77 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
79 | }; | ||
80 | |||
81 | static int sc520_freq_verify(struct cpufreq_policy *policy) | ||
82 | { | ||
83 | return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); | ||
84 | } | ||
85 | |||
86 | static int sc520_freq_target(struct cpufreq_policy *policy, | ||
87 | unsigned int target_freq, | ||
88 | unsigned int relation) | ||
89 | { | ||
90 | unsigned int newstate = 0; | ||
91 | |||
92 | if (cpufreq_frequency_table_target(policy, sc520_freq_table, | ||
93 | target_freq, relation, &newstate)) | ||
94 | return -EINVAL; | ||
95 | |||
96 | sc520_freq_set_cpu_state(policy, newstate); | ||
97 | 78 | ||
98 | return 0; | 79 | return 0; |
99 | } | 80 | } |
100 | 81 | ||
101 | |||
102 | /* | 82 | /* |
103 | * Module init and exit code | 83 | * Module init and exit code |
104 | */ | 84 | */ |
@@ -106,7 +86,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy, | |||
106 | static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | 86 | static int sc520_freq_cpu_init(struct cpufreq_policy *policy) |
107 | { | 87 | { |
108 | struct cpuinfo_x86 *c = &cpu_data(0); | 88 | struct cpuinfo_x86 *c = &cpu_data(0); |
109 | int result; | ||
110 | 89 | ||
111 | /* capability check */ | 90 | /* capability check */ |
112 | if (c->x86_vendor != X86_VENDOR_AMD || | 91 | if (c->x86_vendor != X86_VENDOR_AMD || |
@@ -115,39 +94,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) | |||
115 | 94 | ||
116 | /* cpuinfo and default policy values */ | 95 | /* cpuinfo and default policy values */ |
117 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ | 96 | policy->cpuinfo.transition_latency = 1000000; /* 1ms */ |
118 | policy->cur = sc520_freq_get_cpu_frequency(0); | ||
119 | |||
120 | result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); | ||
121 | if (result) | ||
122 | return result; | ||
123 | 97 | ||
124 | cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); | 98 | return cpufreq_table_validate_and_show(policy, sc520_freq_table); |
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | |||
130 | static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) | ||
131 | { | ||
132 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
133 | return 0; | ||
134 | } | 99 | } |
135 | 100 | ||
136 | 101 | ||
137 | static struct freq_attr *sc520_freq_attr[] = { | ||
138 | &cpufreq_freq_attr_scaling_available_freqs, | ||
139 | NULL, | ||
140 | }; | ||
141 | |||
142 | |||
143 | static struct cpufreq_driver sc520_freq_driver = { | 102 | static struct cpufreq_driver sc520_freq_driver = { |
144 | .get = sc520_freq_get_cpu_frequency, | 103 | .get = sc520_freq_get_cpu_frequency, |
145 | .verify = sc520_freq_verify, | 104 | .verify = cpufreq_generic_frequency_table_verify, |
146 | .target = sc520_freq_target, | 105 | .target_index = sc520_freq_target, |
147 | .init = sc520_freq_cpu_init, | 106 | .init = sc520_freq_cpu_init, |
148 | .exit = sc520_freq_cpu_exit, | 107 | .exit = cpufreq_generic_exit, |
149 | .name = "sc520_freq", | 108 | .name = "sc520_freq", |
150 | .attr = sc520_freq_attr, | 109 | .attr = cpufreq_generic_attr, |
151 | }; | 110 | }; |
152 | 111 | ||
153 | static const struct x86_cpu_id sc520_ids[] = { | 112 | static const struct x86_cpu_id sc520_ids[] = { |
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index ffc6d24b0cfb..387af12503a6 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c | |||
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy) | |||
87 | if (freq_table) | 87 | if (freq_table) |
88 | return cpufreq_frequency_table_verify(policy, freq_table); | 88 | return cpufreq_frequency_table_verify(policy, freq_table); |
89 | 89 | ||
90 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 90 | cpufreq_verify_within_cpu_limits(policy); |
91 | policy->cpuinfo.max_freq); | ||
92 | 91 | ||
93 | policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; | 92 | policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; |
94 | policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 93 | policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
95 | 94 | ||
96 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 95 | cpufreq_verify_within_cpu_limits(policy); |
97 | policy->cpuinfo.max_freq); | ||
98 | |||
99 | return 0; | 96 | return 0; |
100 | } | 97 | } |
101 | 98 | ||
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
114 | return PTR_ERR(cpuclk); | 111 | return PTR_ERR(cpuclk); |
115 | } | 112 | } |
116 | 113 | ||
117 | policy->cur = sh_cpufreq_get(cpu); | ||
118 | |||
119 | freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; | 114 | freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL; |
120 | if (freq_table) { | 115 | if (freq_table) { |
121 | int result; | 116 | int result; |
122 | 117 | ||
123 | result = cpufreq_frequency_table_cpuinfo(policy, freq_table); | 118 | result = cpufreq_table_validate_and_show(policy, freq_table); |
124 | if (!result) | 119 | if (result) |
125 | cpufreq_frequency_table_get_attr(freq_table, cpu); | 120 | return result; |
126 | } else { | 121 | } else { |
127 | dev_notice(dev, "no frequency table found, falling back " | 122 | dev_notice(dev, "no frequency table found, falling back " |
128 | "to rate rounding.\n"); | 123 | "to rate rounding.\n"); |
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
154 | return 0; | 149 | return 0; |
155 | } | 150 | } |
156 | 151 | ||
157 | static struct freq_attr *sh_freq_attr[] = { | ||
158 | &cpufreq_freq_attr_scaling_available_freqs, | ||
159 | NULL, | ||
160 | }; | ||
161 | |||
162 | static struct cpufreq_driver sh_cpufreq_driver = { | 152 | static struct cpufreq_driver sh_cpufreq_driver = { |
163 | .name = "sh", | 153 | .name = "sh", |
164 | .get = sh_cpufreq_get, | 154 | .get = sh_cpufreq_get, |
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = { | |||
166 | .verify = sh_cpufreq_verify, | 156 | .verify = sh_cpufreq_verify, |
167 | .init = sh_cpufreq_cpu_init, | 157 | .init = sh_cpufreq_cpu_init, |
168 | .exit = sh_cpufreq_cpu_exit, | 158 | .exit = sh_cpufreq_cpu_exit, |
169 | .attr = sh_freq_attr, | 159 | .attr = cpufreq_generic_attr, |
170 | }; | 160 | }; |
171 | 161 | ||
172 | static int __init sh_cpufreq_module_init(void) | 162 | static int __init sh_cpufreq_module_init(void) |
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c index cf5bc2ca16fa..3bf5b8f03661 100644 --- a/drivers/cpufreq/sparc-us2e-cpufreq.c +++ b/drivers/cpufreq/sparc-us2e-cpufreq.c | |||
@@ -245,8 +245,7 @@ static unsigned int us2e_freq_get(unsigned int cpu) | |||
245 | return clock_tick / estar_to_divisor(estar); | 245 | return clock_tick / estar_to_divisor(estar); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, | 248 | static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) |
249 | unsigned int index) | ||
250 | { | 249 | { |
251 | unsigned int cpu = policy->cpu; | 250 | unsigned int cpu = policy->cpu; |
252 | unsigned long new_bits, new_freq; | 251 | unsigned long new_bits, new_freq; |
@@ -277,30 +276,10 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy, | |||
277 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 276 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
278 | 277 | ||
279 | set_cpus_allowed_ptr(current, &cpus_allowed); | 278 | set_cpus_allowed_ptr(current, &cpus_allowed); |
280 | } | ||
281 | |||
282 | static int us2e_freq_target(struct cpufreq_policy *policy, | ||
283 | unsigned int target_freq, | ||
284 | unsigned int relation) | ||
285 | { | ||
286 | unsigned int new_index = 0; | ||
287 | |||
288 | if (cpufreq_frequency_table_target(policy, | ||
289 | &us2e_freq_table[policy->cpu].table[0], | ||
290 | target_freq, relation, &new_index)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | us2e_set_cpu_divider_index(policy, new_index); | ||
294 | 279 | ||
295 | return 0; | 280 | return 0; |
296 | } | 281 | } |
297 | 282 | ||
298 | static int us2e_freq_verify(struct cpufreq_policy *policy) | ||
299 | { | ||
300 | return cpufreq_frequency_table_verify(policy, | ||
301 | &us2e_freq_table[policy->cpu].table[0]); | ||
302 | } | ||
303 | |||
304 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | 283 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) |
305 | { | 284 | { |
306 | unsigned int cpu = policy->cpu; | 285 | unsigned int cpu = policy->cpu; |
@@ -324,13 +303,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | |||
324 | policy->cpuinfo.transition_latency = 0; | 303 | policy->cpuinfo.transition_latency = 0; |
325 | policy->cur = clock_tick; | 304 | policy->cur = clock_tick; |
326 | 305 | ||
327 | return cpufreq_frequency_table_cpuinfo(policy, table); | 306 | return cpufreq_table_validate_and_show(policy, table); |
328 | } | 307 | } |
329 | 308 | ||
330 | static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) | 309 | static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) |
331 | { | 310 | { |
332 | if (cpufreq_us2e_driver) | 311 | if (cpufreq_us2e_driver) { |
333 | us2e_set_cpu_divider_index(policy, 0); | 312 | cpufreq_frequency_table_put_attr(policy->cpu); |
313 | us2e_freq_target(policy, 0); | ||
314 | } | ||
334 | 315 | ||
335 | return 0; | 316 | return 0; |
336 | } | 317 | } |
@@ -361,8 +342,8 @@ static int __init us2e_freq_init(void) | |||
361 | goto err_out; | 342 | goto err_out; |
362 | 343 | ||
363 | driver->init = us2e_freq_cpu_init; | 344 | driver->init = us2e_freq_cpu_init; |
364 | driver->verify = us2e_freq_verify; | 345 | driver->verify = cpufreq_generic_frequency_table_verify; |
365 | driver->target = us2e_freq_target; | 346 | driver->target_index = us2e_freq_target; |
366 | driver->get = us2e_freq_get; | 347 | driver->get = us2e_freq_get; |
367 | driver->exit = us2e_freq_cpu_exit; | 348 | driver->exit = us2e_freq_cpu_exit; |
368 | strcpy(driver->name, "UltraSPARC-IIe"); | 349 | strcpy(driver->name, "UltraSPARC-IIe"); |
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c index ac76b489979d..2e54d55915df 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c | |||
@@ -93,8 +93,7 @@ static unsigned int us3_freq_get(unsigned int cpu) | |||
93 | return ret; | 93 | return ret; |
94 | } | 94 | } |
95 | 95 | ||
96 | static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, | 96 | static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) |
97 | unsigned int index) | ||
98 | { | 97 | { |
99 | unsigned int cpu = policy->cpu; | 98 | unsigned int cpu = policy->cpu; |
100 | unsigned long new_bits, new_freq, reg; | 99 | unsigned long new_bits, new_freq, reg; |
@@ -136,32 +135,10 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy, | |||
136 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 135 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
137 | 136 | ||
138 | set_cpus_allowed_ptr(current, &cpus_allowed); | 137 | set_cpus_allowed_ptr(current, &cpus_allowed); |
139 | } | ||
140 | |||
141 | static int us3_freq_target(struct cpufreq_policy *policy, | ||
142 | unsigned int target_freq, | ||
143 | unsigned int relation) | ||
144 | { | ||
145 | unsigned int new_index = 0; | ||
146 | |||
147 | if (cpufreq_frequency_table_target(policy, | ||
148 | &us3_freq_table[policy->cpu].table[0], | ||
149 | target_freq, | ||
150 | relation, | ||
151 | &new_index)) | ||
152 | return -EINVAL; | ||
153 | |||
154 | us3_set_cpu_divider_index(policy, new_index); | ||
155 | 138 | ||
156 | return 0; | 139 | return 0; |
157 | } | 140 | } |
158 | 141 | ||
159 | static int us3_freq_verify(struct cpufreq_policy *policy) | ||
160 | { | ||
161 | return cpufreq_frequency_table_verify(policy, | ||
162 | &us3_freq_table[policy->cpu].table[0]); | ||
163 | } | ||
164 | |||
165 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | 142 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) |
166 | { | 143 | { |
167 | unsigned int cpu = policy->cpu; | 144 | unsigned int cpu = policy->cpu; |
@@ -181,13 +158,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | |||
181 | policy->cpuinfo.transition_latency = 0; | 158 | policy->cpuinfo.transition_latency = 0; |
182 | policy->cur = clock_tick; | 159 | policy->cur = clock_tick; |
183 | 160 | ||
184 | return cpufreq_frequency_table_cpuinfo(policy, table); | 161 | return cpufreq_table_validate_and_show(policy, table); |
185 | } | 162 | } |
186 | 163 | ||
187 | static int us3_freq_cpu_exit(struct cpufreq_policy *policy) | 164 | static int us3_freq_cpu_exit(struct cpufreq_policy *policy) |
188 | { | 165 | { |
189 | if (cpufreq_us3_driver) | 166 | if (cpufreq_us3_driver) { |
190 | us3_set_cpu_divider_index(policy, 0); | 167 | cpufreq_frequency_table_put_attr(policy->cpu); |
168 | us3_freq_target(policy, 0); | ||
169 | } | ||
191 | 170 | ||
192 | return 0; | 171 | return 0; |
193 | } | 172 | } |
@@ -222,8 +201,8 @@ static int __init us3_freq_init(void) | |||
222 | goto err_out; | 201 | goto err_out; |
223 | 202 | ||
224 | driver->init = us3_freq_cpu_init; | 203 | driver->init = us3_freq_cpu_init; |
225 | driver->verify = us3_freq_verify; | 204 | driver->verify = cpufreq_generic_frequency_table_verify; |
226 | driver->target = us3_freq_target; | 205 | driver->target_index = us3_freq_target; |
227 | driver->get = us3_freq_get; | 206 | driver->get = us3_freq_get; |
228 | driver->exit = us3_freq_cpu_exit; | 207 | driver->exit = us3_freq_cpu_exit; |
229 | strcpy(driver->name, "UltraSPARC-III"); | 208 | strcpy(driver->name, "UltraSPARC-III"); |
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 3f418166ce02..11a65be3fd76 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -30,11 +30,6 @@ static struct { | |||
30 | u32 cnt; | 30 | u32 cnt; |
31 | } spear_cpufreq; | 31 | } spear_cpufreq; |
32 | 32 | ||
33 | static int spear_cpufreq_verify(struct cpufreq_policy *policy) | ||
34 | { | ||
35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); | ||
36 | } | ||
37 | |||
38 | static unsigned int spear_cpufreq_get(unsigned int cpu) | 33 | static unsigned int spear_cpufreq_get(unsigned int cpu) |
39 | { | 34 | { |
40 | return clk_get_rate(spear_cpufreq.clk) / 1000; | 35 | return clk_get_rate(spear_cpufreq.clk) / 1000; |
@@ -110,20 +105,16 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) | |||
110 | } | 105 | } |
111 | 106 | ||
112 | static int spear_cpufreq_target(struct cpufreq_policy *policy, | 107 | static int spear_cpufreq_target(struct cpufreq_policy *policy, |
113 | unsigned int target_freq, unsigned int relation) | 108 | unsigned int index) |
114 | { | 109 | { |
115 | struct cpufreq_freqs freqs; | 110 | struct cpufreq_freqs freqs; |
116 | long newfreq; | 111 | long newfreq; |
117 | struct clk *srcclk; | 112 | struct clk *srcclk; |
118 | int index, ret, mult = 1; | 113 | int ret, mult = 1; |
119 | |||
120 | if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, | ||
121 | target_freq, relation, &index)) | ||
122 | return -EINVAL; | ||
123 | 114 | ||
124 | freqs.old = spear_cpufreq_get(0); | 115 | freqs.old = spear_cpufreq_get(0); |
125 | |||
126 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; | 116 | newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; |
117 | |||
127 | if (of_machine_is_compatible("st,spear1340")) { | 118 | if (of_machine_is_compatible("st,spear1340")) { |
128 | /* | 119 | /* |
129 | * SPEAr1340 is special in the sense that due to the possibility | 120 | * SPEAr1340 is special in the sense that due to the possibility |
@@ -176,43 +167,19 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, | |||
176 | 167 | ||
177 | static int spear_cpufreq_init(struct cpufreq_policy *policy) | 168 | static int spear_cpufreq_init(struct cpufreq_policy *policy) |
178 | { | 169 | { |
179 | int ret; | 170 | return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, |
180 | 171 | spear_cpufreq.transition_latency); | |
181 | ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); | ||
182 | if (ret) { | ||
183 | pr_err("cpufreq_frequency_table_cpuinfo() failed"); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); | ||
188 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; | ||
189 | policy->cur = spear_cpufreq_get(0); | ||
190 | |||
191 | cpumask_setall(policy->cpus); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int spear_cpufreq_exit(struct cpufreq_policy *policy) | ||
197 | { | ||
198 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
199 | return 0; | ||
200 | } | 172 | } |
201 | 173 | ||
202 | static struct freq_attr *spear_cpufreq_attr[] = { | ||
203 | &cpufreq_freq_attr_scaling_available_freqs, | ||
204 | NULL, | ||
205 | }; | ||
206 | |||
207 | static struct cpufreq_driver spear_cpufreq_driver = { | 174 | static struct cpufreq_driver spear_cpufreq_driver = { |
208 | .name = "cpufreq-spear", | 175 | .name = "cpufreq-spear", |
209 | .flags = CPUFREQ_STICKY, | 176 | .flags = CPUFREQ_STICKY, |
210 | .verify = spear_cpufreq_verify, | 177 | .verify = cpufreq_generic_frequency_table_verify, |
211 | .target = spear_cpufreq_target, | 178 | .target_index = spear_cpufreq_target, |
212 | .get = spear_cpufreq_get, | 179 | .get = spear_cpufreq_get, |
213 | .init = spear_cpufreq_init, | 180 | .init = spear_cpufreq_init, |
214 | .exit = spear_cpufreq_exit, | 181 | .exit = cpufreq_generic_exit, |
215 | .attr = spear_cpufreq_attr, | 182 | .attr = cpufreq_generic_attr, |
216 | }; | 183 | }; |
217 | 184 | ||
218 | static int spear_cpufreq_driver_init(void) | 185 | static int spear_cpufreq_driver_init(void) |
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index f897d5105842..c51ec8c0e3a8 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c | |||
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
343 | static int centrino_cpu_init(struct cpufreq_policy *policy) | 343 | static int centrino_cpu_init(struct cpufreq_policy *policy) |
344 | { | 344 | { |
345 | struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); | 345 | struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); |
346 | unsigned freq; | ||
347 | unsigned l, h; | 346 | unsigned l, h; |
348 | int ret; | ||
349 | int i; | 347 | int i; |
350 | 348 | ||
351 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ | 349 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ |
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
373 | return -ENODEV; | 371 | return -ENODEV; |
374 | } | 372 | } |
375 | 373 | ||
376 | if (centrino_cpu_init_table(policy)) { | 374 | if (centrino_cpu_init_table(policy)) |
377 | return -ENODEV; | 375 | return -ENODEV; |
378 | } | ||
379 | 376 | ||
380 | /* Check to see if Enhanced SpeedStep is enabled, and try to | 377 | /* Check to see if Enhanced SpeedStep is enabled, and try to |
381 | enable it if not. */ | 378 | enable it if not. */ |
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
395 | } | 392 | } |
396 | } | 393 | } |
397 | 394 | ||
398 | freq = get_cur_freq(policy->cpu); | ||
399 | policy->cpuinfo.transition_latency = 10000; | 395 | policy->cpuinfo.transition_latency = 10000; |
400 | /* 10uS transition latency */ | 396 | /* 10uS transition latency */ |
401 | policy->cur = freq; | ||
402 | |||
403 | pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); | ||
404 | 397 | ||
405 | ret = cpufreq_frequency_table_cpuinfo(policy, | 398 | return cpufreq_table_validate_and_show(policy, |
406 | per_cpu(centrino_model, policy->cpu)->op_points); | 399 | per_cpu(centrino_model, policy->cpu)->op_points); |
407 | if (ret) | ||
408 | return (ret); | ||
409 | |||
410 | cpufreq_frequency_table_get_attr( | ||
411 | per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); | ||
412 | |||
413 | return 0; | ||
414 | } | 400 | } |
415 | 401 | ||
416 | static int centrino_cpu_exit(struct cpufreq_policy *policy) | 402 | static int centrino_cpu_exit(struct cpufreq_policy *policy) |
@@ -428,36 +414,19 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
428 | } | 414 | } |
429 | 415 | ||
430 | /** | 416 | /** |
431 | * centrino_verify - verifies a new CPUFreq policy | ||
432 | * @policy: new policy | ||
433 | * | ||
434 | * Limit must be within this model's frequency range at least one | ||
435 | * border included. | ||
436 | */ | ||
437 | static int centrino_verify (struct cpufreq_policy *policy) | ||
438 | { | ||
439 | return cpufreq_frequency_table_verify(policy, | ||
440 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * centrino_setpolicy - set a new CPUFreq policy | 417 | * centrino_setpolicy - set a new CPUFreq policy |
445 | * @policy: new policy | 418 | * @policy: new policy |
446 | * @target_freq: the target frequency | 419 | * @index: index of target frequency |
447 | * @relation: how that frequency relates to achieved frequency | ||
448 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
449 | * | 420 | * |
450 | * Sets a new CPUFreq policy. | 421 | * Sets a new CPUFreq policy. |
451 | */ | 422 | */ |
452 | static int centrino_target (struct cpufreq_policy *policy, | 423 | static int centrino_target(struct cpufreq_policy *policy, unsigned int index) |
453 | unsigned int target_freq, | ||
454 | unsigned int relation) | ||
455 | { | 424 | { |
456 | unsigned int newstate = 0; | ||
457 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | 425 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; |
458 | struct cpufreq_freqs freqs; | 426 | struct cpufreq_freqs freqs; |
459 | int retval = 0; | 427 | int retval = 0; |
460 | unsigned int j, first_cpu, tmp; | 428 | unsigned int j, first_cpu, tmp; |
429 | struct cpufreq_frequency_table *op_points; | ||
461 | cpumask_var_t covered_cpus; | 430 | cpumask_var_t covered_cpus; |
462 | 431 | ||
463 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) | 432 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) |
@@ -468,16 +437,8 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
468 | goto out; | 437 | goto out; |
469 | } | 438 | } |
470 | 439 | ||
471 | if (unlikely(cpufreq_frequency_table_target(policy, | ||
472 | per_cpu(centrino_model, cpu)->op_points, | ||
473 | target_freq, | ||
474 | relation, | ||
475 | &newstate))) { | ||
476 | retval = -EINVAL; | ||
477 | goto out; | ||
478 | } | ||
479 | |||
480 | first_cpu = 1; | 440 | first_cpu = 1; |
441 | op_points = &per_cpu(centrino_model, cpu)->op_points[index]; | ||
481 | for_each_cpu(j, policy->cpus) { | 442 | for_each_cpu(j, policy->cpus) { |
482 | int good_cpu; | 443 | int good_cpu; |
483 | 444 | ||
@@ -501,7 +462,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
501 | break; | 462 | break; |
502 | } | 463 | } |
503 | 464 | ||
504 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data; | 465 | msr = op_points->driver_data; |
505 | 466 | ||
506 | if (first_cpu) { | 467 | if (first_cpu) { |
507 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); | 468 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
@@ -516,7 +477,8 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
516 | freqs.new = extract_clock(msr, cpu, 0); | 477 | freqs.new = extract_clock(msr, cpu, 0); |
517 | 478 | ||
518 | pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", | 479 | pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", |
519 | target_freq, freqs.old, freqs.new, msr); | 480 | op_points->frequency, freqs.old, freqs.new, |
481 | msr); | ||
520 | 482 | ||
521 | cpufreq_notify_transition(policy, &freqs, | 483 | cpufreq_notify_transition(policy, &freqs, |
522 | CPUFREQ_PRECHANGE); | 484 | CPUFREQ_PRECHANGE); |
@@ -561,20 +523,15 @@ out: | |||
561 | return retval; | 523 | return retval; |
562 | } | 524 | } |
563 | 525 | ||
564 | static struct freq_attr* centrino_attr[] = { | ||
565 | &cpufreq_freq_attr_scaling_available_freqs, | ||
566 | NULL, | ||
567 | }; | ||
568 | |||
569 | static struct cpufreq_driver centrino_driver = { | 526 | static struct cpufreq_driver centrino_driver = { |
570 | .name = "centrino", /* should be speedstep-centrino, | 527 | .name = "centrino", /* should be speedstep-centrino, |
571 | but there's a 16 char limit */ | 528 | but there's a 16 char limit */ |
572 | .init = centrino_cpu_init, | 529 | .init = centrino_cpu_init, |
573 | .exit = centrino_cpu_exit, | 530 | .exit = centrino_cpu_exit, |
574 | .verify = centrino_verify, | 531 | .verify = cpufreq_generic_frequency_table_verify, |
575 | .target = centrino_target, | 532 | .target_index = centrino_target, |
576 | .get = get_cur_freq, | 533 | .get = get_cur_freq, |
577 | .attr = centrino_attr, | 534 | .attr = cpufreq_generic_attr, |
578 | }; | 535 | }; |
579 | 536 | ||
580 | /* | 537 | /* |
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 5355abb69afc..707721ebb853 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
@@ -251,36 +251,24 @@ static unsigned int speedstep_get(unsigned int cpu) | |||
251 | /** | 251 | /** |
252 | * speedstep_target - set a new CPUFreq policy | 252 | * speedstep_target - set a new CPUFreq policy |
253 | * @policy: new policy | 253 | * @policy: new policy |
254 | * @target_freq: the target frequency | 254 | * @index: index of target frequency |
255 | * @relation: how that frequency relates to achieved frequency | ||
256 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
257 | * | 255 | * |
258 | * Sets a new CPUFreq policy. | 256 | * Sets a new CPUFreq policy. |
259 | */ | 257 | */ |
260 | static int speedstep_target(struct cpufreq_policy *policy, | 258 | static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) |
261 | unsigned int target_freq, | ||
262 | unsigned int relation) | ||
263 | { | 259 | { |
264 | unsigned int newstate = 0, policy_cpu; | 260 | unsigned int policy_cpu; |
265 | struct cpufreq_freqs freqs; | 261 | struct cpufreq_freqs freqs; |
266 | 262 | ||
267 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], | ||
268 | target_freq, relation, &newstate)) | ||
269 | return -EINVAL; | ||
270 | |||
271 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); | 263 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); |
272 | freqs.old = speedstep_get(policy_cpu); | 264 | freqs.old = speedstep_get(policy_cpu); |
273 | freqs.new = speedstep_freqs[newstate].frequency; | 265 | freqs.new = speedstep_freqs[index].frequency; |
274 | 266 | ||
275 | pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); | 267 | pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); |
276 | 268 | ||
277 | /* no transition necessary */ | ||
278 | if (freqs.old == freqs.new) | ||
279 | return 0; | ||
280 | |||
281 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 269 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
282 | 270 | ||
283 | smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, | 271 | smp_call_function_single(policy_cpu, _speedstep_set_state, &index, |
284 | true); | 272 | true); |
285 | 273 | ||
286 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 274 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
@@ -289,18 +277,6 @@ static int speedstep_target(struct cpufreq_policy *policy, | |||
289 | } | 277 | } |
290 | 278 | ||
291 | 279 | ||
292 | /** | ||
293 | * speedstep_verify - verifies a new CPUFreq policy | ||
294 | * @policy: new policy | ||
295 | * | ||
296 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
297 | * at least one border included. | ||
298 | */ | ||
299 | static int speedstep_verify(struct cpufreq_policy *policy) | ||
300 | { | ||
301 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
302 | } | ||
303 | |||
304 | struct get_freqs { | 280 | struct get_freqs { |
305 | struct cpufreq_policy *policy; | 281 | struct cpufreq_policy *policy; |
306 | int ret; | 282 | int ret; |
@@ -320,8 +296,7 @@ static void get_freqs_on_cpu(void *_get_freqs) | |||
320 | 296 | ||
321 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | 297 | static int speedstep_cpu_init(struct cpufreq_policy *policy) |
322 | { | 298 | { |
323 | int result; | 299 | unsigned int policy_cpu; |
324 | unsigned int policy_cpu, speed; | ||
325 | struct get_freqs gf; | 300 | struct get_freqs gf; |
326 | 301 | ||
327 | /* only run on CPU to be set, or on its sibling */ | 302 | /* only run on CPU to be set, or on its sibling */ |
@@ -336,49 +311,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
336 | if (gf.ret) | 311 | if (gf.ret) |
337 | return gf.ret; | 312 | return gf.ret; |
338 | 313 | ||
339 | /* get current speed setting */ | 314 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); |
340 | speed = speedstep_get(policy_cpu); | ||
341 | if (!speed) | ||
342 | return -EIO; | ||
343 | |||
344 | pr_debug("currently at %s speed setting - %i MHz\n", | ||
345 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | ||
346 | ? "low" : "high", | ||
347 | (speed / 1000)); | ||
348 | |||
349 | /* cpuinfo and default policy values */ | ||
350 | policy->cur = speed; | ||
351 | |||
352 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
353 | if (result) | ||
354 | return result; | ||
355 | |||
356 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
357 | |||
358 | return 0; | ||
359 | } | 315 | } |
360 | 316 | ||
361 | 317 | ||
362 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
363 | { | ||
364 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static struct freq_attr *speedstep_attr[] = { | ||
369 | &cpufreq_freq_attr_scaling_available_freqs, | ||
370 | NULL, | ||
371 | }; | ||
372 | |||
373 | |||
374 | static struct cpufreq_driver speedstep_driver = { | 318 | static struct cpufreq_driver speedstep_driver = { |
375 | .name = "speedstep-ich", | 319 | .name = "speedstep-ich", |
376 | .verify = speedstep_verify, | 320 | .verify = cpufreq_generic_frequency_table_verify, |
377 | .target = speedstep_target, | 321 | .target_index = speedstep_target, |
378 | .init = speedstep_cpu_init, | 322 | .init = speedstep_cpu_init, |
379 | .exit = speedstep_cpu_exit, | 323 | .exit = cpufreq_generic_exit, |
380 | .get = speedstep_get, | 324 | .get = speedstep_get, |
381 | .attr = speedstep_attr, | 325 | .attr = cpufreq_generic_attr, |
382 | }; | 326 | }; |
383 | 327 | ||
384 | static const struct x86_cpu_id ss_smi_ids[] = { | 328 | static const struct x86_cpu_id ss_smi_ids[] = { |
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index abfba4f731eb..19446e479ccc 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
@@ -235,52 +235,28 @@ static void speedstep_set_state(unsigned int state) | |||
235 | /** | 235 | /** |
236 | * speedstep_target - set a new CPUFreq policy | 236 | * speedstep_target - set a new CPUFreq policy |
237 | * @policy: new policy | 237 | * @policy: new policy |
238 | * @target_freq: new freq | 238 | * @index: index of new freq |
239 | * @relation: | ||
240 | * | 239 | * |
241 | * Sets a new CPUFreq policy/freq. | 240 | * Sets a new CPUFreq policy/freq. |
242 | */ | 241 | */ |
243 | static int speedstep_target(struct cpufreq_policy *policy, | 242 | static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) |
244 | unsigned int target_freq, unsigned int relation) | ||
245 | { | 243 | { |
246 | unsigned int newstate = 0; | ||
247 | struct cpufreq_freqs freqs; | 244 | struct cpufreq_freqs freqs; |
248 | 245 | ||
249 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], | ||
250 | target_freq, relation, &newstate)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | freqs.old = speedstep_freqs[speedstep_get_state()].frequency; | 246 | freqs.old = speedstep_freqs[speedstep_get_state()].frequency; |
254 | freqs.new = speedstep_freqs[newstate].frequency; | 247 | freqs.new = speedstep_freqs[index].frequency; |
255 | |||
256 | if (freqs.old == freqs.new) | ||
257 | return 0; | ||
258 | 248 | ||
259 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 249 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
260 | speedstep_set_state(newstate); | 250 | speedstep_set_state(index); |
261 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 251 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
262 | 252 | ||
263 | return 0; | 253 | return 0; |
264 | } | 254 | } |
265 | 255 | ||
266 | 256 | ||
267 | /** | ||
268 | * speedstep_verify - verifies a new CPUFreq policy | ||
269 | * @policy: new policy | ||
270 | * | ||
271 | * Limit must be within speedstep_low_freq and speedstep_high_freq, with | ||
272 | * at least one border included. | ||
273 | */ | ||
274 | static int speedstep_verify(struct cpufreq_policy *policy) | ||
275 | { | ||
276 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | ||
277 | } | ||
278 | |||
279 | |||
280 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | 257 | static int speedstep_cpu_init(struct cpufreq_policy *policy) |
281 | { | 258 | { |
282 | int result; | 259 | int result; |
283 | unsigned int speed, state; | ||
284 | unsigned int *low, *high; | 260 | unsigned int *low, *high; |
285 | 261 | ||
286 | /* capability check */ | 262 | /* capability check */ |
@@ -316,32 +292,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
316 | pr_debug("workaround worked.\n"); | 292 | pr_debug("workaround worked.\n"); |
317 | } | 293 | } |
318 | 294 | ||
319 | /* get current speed setting */ | ||
320 | state = speedstep_get_state(); | ||
321 | speed = speedstep_freqs[state].frequency; | ||
322 | |||
323 | pr_debug("currently at %s speed setting - %i MHz\n", | ||
324 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | ||
325 | ? "low" : "high", | ||
326 | (speed / 1000)); | ||
327 | |||
328 | /* cpuinfo and default policy values */ | ||
329 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 295 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
330 | policy->cur = speed; | 296 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); |
331 | |||
332 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | ||
333 | if (result) | ||
334 | return result; | ||
335 | |||
336 | cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int speedstep_cpu_exit(struct cpufreq_policy *policy) | ||
342 | { | ||
343 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
344 | return 0; | ||
345 | } | 297 | } |
346 | 298 | ||
347 | static unsigned int speedstep_get(unsigned int cpu) | 299 | static unsigned int speedstep_get(unsigned int cpu) |
@@ -362,20 +314,15 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
362 | return result; | 314 | return result; |
363 | } | 315 | } |
364 | 316 | ||
365 | static struct freq_attr *speedstep_attr[] = { | ||
366 | &cpufreq_freq_attr_scaling_available_freqs, | ||
367 | NULL, | ||
368 | }; | ||
369 | |||
370 | static struct cpufreq_driver speedstep_driver = { | 317 | static struct cpufreq_driver speedstep_driver = { |
371 | .name = "speedstep-smi", | 318 | .name = "speedstep-smi", |
372 | .verify = speedstep_verify, | 319 | .verify = cpufreq_generic_frequency_table_verify, |
373 | .target = speedstep_target, | 320 | .target_index = speedstep_target, |
374 | .init = speedstep_cpu_init, | 321 | .init = speedstep_cpu_init, |
375 | .exit = speedstep_cpu_exit, | 322 | .exit = cpufreq_generic_exit, |
376 | .get = speedstep_get, | 323 | .get = speedstep_get, |
377 | .resume = speedstep_resume, | 324 | .resume = speedstep_resume, |
378 | .attr = speedstep_attr, | 325 | .attr = cpufreq_generic_attr, |
379 | }; | 326 | }; |
380 | 327 | ||
381 | static const struct x86_cpu_id ss_smi_ids[] = { | 328 | static const struct x86_cpu_id ss_smi_ids[] = { |
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index a7b876fdc1d8..bd7d89c013a5 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c | |||
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS]; | |||
51 | static DEFINE_MUTEX(tegra_cpu_lock); | 51 | static DEFINE_MUTEX(tegra_cpu_lock); |
52 | static bool is_suspended; | 52 | static bool is_suspended; |
53 | 53 | ||
54 | static int tegra_verify_speed(struct cpufreq_policy *policy) | ||
55 | { | ||
56 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
57 | } | ||
58 | |||
59 | static unsigned int tegra_getspeed(unsigned int cpu) | 54 | static unsigned int tegra_getspeed(unsigned int cpu) |
60 | { | 55 | { |
61 | unsigned long rate; | 56 | unsigned long rate; |
@@ -155,11 +150,8 @@ static unsigned long tegra_cpu_highest_speed(void) | |||
155 | return rate; | 150 | return rate; |
156 | } | 151 | } |
157 | 152 | ||
158 | static int tegra_target(struct cpufreq_policy *policy, | 153 | static int tegra_target(struct cpufreq_policy *policy, unsigned int index) |
159 | unsigned int target_freq, | ||
160 | unsigned int relation) | ||
161 | { | 154 | { |
162 | unsigned int idx; | ||
163 | unsigned int freq; | 155 | unsigned int freq; |
164 | int ret = 0; | 156 | int ret = 0; |
165 | 157 | ||
@@ -170,10 +162,7 @@ static int tegra_target(struct cpufreq_policy *policy, | |||
170 | goto out; | 162 | goto out; |
171 | } | 163 | } |
172 | 164 | ||
173 | cpufreq_frequency_table_target(policy, freq_table, target_freq, | 165 | freq = freq_table[index].frequency; |
174 | relation, &idx); | ||
175 | |||
176 | freq = freq_table[idx].frequency; | ||
177 | 166 | ||
178 | target_cpu_speed[policy->cpu] = freq; | 167 | target_cpu_speed[policy->cpu] = freq; |
179 | 168 | ||
@@ -209,21 +198,23 @@ static struct notifier_block tegra_cpu_pm_notifier = { | |||
209 | 198 | ||
210 | static int tegra_cpu_init(struct cpufreq_policy *policy) | 199 | static int tegra_cpu_init(struct cpufreq_policy *policy) |
211 | { | 200 | { |
201 | int ret; | ||
202 | |||
212 | if (policy->cpu >= NUM_CPUS) | 203 | if (policy->cpu >= NUM_CPUS) |
213 | return -EINVAL; | 204 | return -EINVAL; |
214 | 205 | ||
215 | clk_prepare_enable(emc_clk); | 206 | clk_prepare_enable(emc_clk); |
216 | clk_prepare_enable(cpu_clk); | 207 | clk_prepare_enable(cpu_clk); |
217 | 208 | ||
218 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 209 | target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu); |
219 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
220 | policy->cur = tegra_getspeed(policy->cpu); | ||
221 | target_cpu_speed[policy->cpu] = policy->cur; | ||
222 | 210 | ||
223 | /* FIXME: what's the actual transition time? */ | 211 | /* FIXME: what's the actual transition time? */ |
224 | policy->cpuinfo.transition_latency = 300 * 1000; | 212 | ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); |
225 | 213 | if (ret) { | |
226 | cpumask_copy(policy->cpus, cpu_possible_mask); | 214 | clk_disable_unprepare(cpu_clk); |
215 | clk_disable_unprepare(emc_clk); | ||
216 | return ret; | ||
217 | } | ||
227 | 218 | ||
228 | if (policy->cpu == 0) | 219 | if (policy->cpu == 0) |
229 | register_pm_notifier(&tegra_cpu_pm_notifier); | 220 | register_pm_notifier(&tegra_cpu_pm_notifier); |
@@ -233,24 +224,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) | |||
233 | 224 | ||
234 | static int tegra_cpu_exit(struct cpufreq_policy *policy) | 225 | static int tegra_cpu_exit(struct cpufreq_policy *policy) |
235 | { | 226 | { |
236 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 227 | cpufreq_frequency_table_put_attr(policy->cpu); |
228 | clk_disable_unprepare(cpu_clk); | ||
237 | clk_disable_unprepare(emc_clk); | 229 | clk_disable_unprepare(emc_clk); |
238 | return 0; | 230 | return 0; |
239 | } | 231 | } |
240 | 232 | ||
241 | static struct freq_attr *tegra_cpufreq_attr[] = { | ||
242 | &cpufreq_freq_attr_scaling_available_freqs, | ||
243 | NULL, | ||
244 | }; | ||
245 | |||
246 | static struct cpufreq_driver tegra_cpufreq_driver = { | 233 | static struct cpufreq_driver tegra_cpufreq_driver = { |
247 | .verify = tegra_verify_speed, | 234 | .verify = cpufreq_generic_frequency_table_verify, |
248 | .target = tegra_target, | 235 | .target_index = tegra_target, |
249 | .get = tegra_getspeed, | 236 | .get = tegra_getspeed, |
250 | .init = tegra_cpu_init, | 237 | .init = tegra_cpu_init, |
251 | .exit = tegra_cpu_exit, | 238 | .exit = tegra_cpu_exit, |
252 | .name = "tegra", | 239 | .name = "tegra", |
253 | .attr = tegra_cpufreq_attr, | 240 | .attr = cpufreq_generic_attr, |
254 | }; | 241 | }; |
255 | 242 | ||
256 | static int __init tegra_cpufreq_init(void) | 243 | static int __init tegra_cpufreq_init(void) |
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index b225f04d8ae5..653ae2955b55 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c | |||
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy) | |||
29 | if (policy->cpu) | 29 | if (policy->cpu) |
30 | return -EINVAL; | 30 | return -EINVAL; |
31 | 31 | ||
32 | cpufreq_verify_within_limits(policy, | 32 | cpufreq_verify_within_cpu_limits(policy); |
33 | policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); | ||
34 | |||
35 | return 0; | 33 | return 0; |
36 | } | 34 | } |
37 | 35 | ||
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy) | |||
68 | { | 66 | { |
69 | if (policy->cpu != 0) | 67 | if (policy->cpu != 0) |
70 | return -EINVAL; | 68 | return -EINVAL; |
71 | policy->cur = ucv2_getspeed(0); | ||
72 | policy->min = policy->cpuinfo.min_freq = 250000; | 69 | policy->min = policy->cpuinfo.min_freq = 250000; |
73 | policy->max = policy->cpuinfo.max_freq = 1000000; | 70 | policy->max = policy->cpuinfo.max_freq = 1000000; |
74 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 71 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index c99c00d35d34..2e23b12c350b 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/stat.h> | 20 | #include <linux/stat.h> |
21 | #include <linux/opp.h> | 21 | #include <linux/pm_opp.h> |
22 | #include <linux/devfreq.h> | 22 | #include <linux/devfreq.h> |
23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d, | |||
902 | { | 902 | { |
903 | struct devfreq *df = to_devfreq(d); | 903 | struct devfreq *df = to_devfreq(d); |
904 | struct device *dev = df->dev.parent; | 904 | struct device *dev = df->dev.parent; |
905 | struct opp *opp; | 905 | struct dev_pm_opp *opp; |
906 | ssize_t count = 0; | 906 | ssize_t count = 0; |
907 | unsigned long freq = 0; | 907 | unsigned long freq = 0; |
908 | 908 | ||
909 | rcu_read_lock(); | 909 | rcu_read_lock(); |
910 | do { | 910 | do { |
911 | opp = opp_find_freq_ceil(dev, &freq); | 911 | opp = dev_pm_opp_find_freq_ceil(dev, &freq); |
912 | if (IS_ERR(opp)) | 912 | if (IS_ERR(opp)) |
913 | break; | 913 | break; |
914 | 914 | ||
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit); | |||
1029 | * under the locked area. The pointer returned must be used prior to unlocking | 1029 | * under the locked area. The pointer returned must be used prior to unlocking |
1030 | * with rcu_read_unlock() to maintain the integrity of the pointer. | 1030 | * with rcu_read_unlock() to maintain the integrity of the pointer. |
1031 | */ | 1031 | */ |
1032 | struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, | 1032 | struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, |
1033 | u32 flags) | 1033 | unsigned long *freq, |
1034 | u32 flags) | ||
1034 | { | 1035 | { |
1035 | struct opp *opp; | 1036 | struct dev_pm_opp *opp; |
1036 | 1037 | ||
1037 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { | 1038 | if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { |
1038 | /* The freq is an upper bound. opp should be lower */ | 1039 | /* The freq is an upper bound. opp should be lower */ |
1039 | opp = opp_find_freq_floor(dev, freq); | 1040 | opp = dev_pm_opp_find_freq_floor(dev, freq); |
1040 | 1041 | ||
1041 | /* If not available, use the closest opp */ | 1042 | /* If not available, use the closest opp */ |
1042 | if (opp == ERR_PTR(-ERANGE)) | 1043 | if (opp == ERR_PTR(-ERANGE)) |
1043 | opp = opp_find_freq_ceil(dev, freq); | 1044 | opp = dev_pm_opp_find_freq_ceil(dev, freq); |
1044 | } else { | 1045 | } else { |
1045 | /* The freq is an lower bound. opp should be higher */ | 1046 | /* The freq is an lower bound. opp should be higher */ |
1046 | opp = opp_find_freq_ceil(dev, freq); | 1047 | opp = dev_pm_opp_find_freq_ceil(dev, freq); |
1047 | 1048 | ||
1048 | /* If not available, use the closest opp */ | 1049 | /* If not available, use the closest opp */ |
1049 | if (opp == ERR_PTR(-ERANGE)) | 1050 | if (opp == ERR_PTR(-ERANGE)) |
1050 | opp = opp_find_freq_floor(dev, freq); | 1051 | opp = dev_pm_opp_find_freq_floor(dev, freq); |
1051 | } | 1052 | } |
1052 | 1053 | ||
1053 | return opp; | 1054 | return opp; |
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) | |||
1066 | int ret = 0; | 1067 | int ret = 0; |
1067 | 1068 | ||
1068 | rcu_read_lock(); | 1069 | rcu_read_lock(); |
1069 | nh = opp_get_notifier(dev); | 1070 | nh = dev_pm_opp_get_notifier(dev); |
1070 | if (IS_ERR(nh)) | 1071 | if (IS_ERR(nh)) |
1071 | ret = PTR_ERR(nh); | 1072 | ret = PTR_ERR(nh); |
1072 | rcu_read_unlock(); | 1073 | rcu_read_unlock(); |
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) | |||
1092 | int ret = 0; | 1093 | int ret = 0; |
1093 | 1094 | ||
1094 | rcu_read_lock(); | 1095 | rcu_read_lock(); |
1095 | nh = opp_get_notifier(dev); | 1096 | nh = dev_pm_opp_get_notifier(dev); |
1096 | if (IS_ERR(nh)) | 1097 | if (IS_ERR(nh)) |
1097 | ret = PTR_ERR(nh); | 1098 | ret = PTR_ERR(nh); |
1098 | rcu_read_unlock(); | 1099 | rcu_read_unlock(); |
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c index c5f86d8caca3..cede6f71cd63 100644 --- a/drivers/devfreq/exynos/exynos4_bus.c +++ b/drivers/devfreq/exynos/exynos4_bus.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
22 | #include <linux/opp.h> | 22 | #include <linux/pm_opp.h> |
23 | #include <linux/devfreq.h> | 23 | #include <linux/devfreq.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/regulator/consumer.h> | 25 | #include <linux/regulator/consumer.h> |
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq, | |||
639 | struct platform_device *pdev = container_of(dev, struct platform_device, | 639 | struct platform_device *pdev = container_of(dev, struct platform_device, |
640 | dev); | 640 | dev); |
641 | struct busfreq_data *data = platform_get_drvdata(pdev); | 641 | struct busfreq_data *data = platform_get_drvdata(pdev); |
642 | struct opp *opp; | 642 | struct dev_pm_opp *opp; |
643 | unsigned long freq; | 643 | unsigned long freq; |
644 | unsigned long old_freq = data->curr_oppinfo.rate; | 644 | unsigned long old_freq = data->curr_oppinfo.rate; |
645 | struct busfreq_opp_info new_oppinfo; | 645 | struct busfreq_opp_info new_oppinfo; |
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq, | |||
650 | rcu_read_unlock(); | 650 | rcu_read_unlock(); |
651 | return PTR_ERR(opp); | 651 | return PTR_ERR(opp); |
652 | } | 652 | } |
653 | new_oppinfo.rate = opp_get_freq(opp); | 653 | new_oppinfo.rate = dev_pm_opp_get_freq(opp); |
654 | new_oppinfo.volt = opp_get_voltage(opp); | 654 | new_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
655 | rcu_read_unlock(); | 655 | rcu_read_unlock(); |
656 | freq = new_oppinfo.rate; | 656 | freq = new_oppinfo.rate; |
657 | 657 | ||
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data) | |||
873 | exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i]; | 873 | exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i]; |
874 | 874 | ||
875 | for (i = LV_0; i < EX4210_LV_NUM; i++) { | 875 | for (i = LV_0; i < EX4210_LV_NUM; i++) { |
876 | err = opp_add(data->dev, exynos4210_busclk_table[i].clk, | 876 | err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk, |
877 | exynos4210_busclk_table[i].volt); | 877 | exynos4210_busclk_table[i].volt); |
878 | if (err) { | 878 | if (err) { |
879 | dev_err(data->dev, "Cannot add opp entries.\n"); | 879 | dev_err(data->dev, "Cannot add opp entries.\n"); |
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data) | |||
940 | } | 940 | } |
941 | 941 | ||
942 | for (i = 0; i < EX4x12_LV_NUM; i++) { | 942 | for (i = 0; i < EX4x12_LV_NUM; i++) { |
943 | ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk, | 943 | ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk, |
944 | exynos4x12_mifclk_table[i].volt); | 944 | exynos4x12_mifclk_table[i].volt); |
945 | if (ret) { | 945 | if (ret) { |
946 | dev_err(data->dev, "Fail to add opp entries.\n"); | 946 | dev_err(data->dev, "Fail to add opp entries.\n"); |
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
956 | { | 956 | { |
957 | struct busfreq_data *data = container_of(this, struct busfreq_data, | 957 | struct busfreq_data *data = container_of(this, struct busfreq_data, |
958 | pm_notifier); | 958 | pm_notifier); |
959 | struct opp *opp; | 959 | struct dev_pm_opp *opp; |
960 | struct busfreq_opp_info new_oppinfo; | 960 | struct busfreq_opp_info new_oppinfo; |
961 | unsigned long maxfreq = ULONG_MAX; | 961 | unsigned long maxfreq = ULONG_MAX; |
962 | int err = 0; | 962 | int err = 0; |
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
969 | data->disabled = true; | 969 | data->disabled = true; |
970 | 970 | ||
971 | rcu_read_lock(); | 971 | rcu_read_lock(); |
972 | opp = opp_find_freq_floor(data->dev, &maxfreq); | 972 | opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq); |
973 | if (IS_ERR(opp)) { | 973 | if (IS_ERR(opp)) { |
974 | rcu_read_unlock(); | 974 | rcu_read_unlock(); |
975 | dev_err(data->dev, "%s: unable to find a min freq\n", | 975 | dev_err(data->dev, "%s: unable to find a min freq\n", |
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this, | |||
977 | mutex_unlock(&data->lock); | 977 | mutex_unlock(&data->lock); |
978 | return PTR_ERR(opp); | 978 | return PTR_ERR(opp); |
979 | } | 979 | } |
980 | new_oppinfo.rate = opp_get_freq(opp); | 980 | new_oppinfo.rate = dev_pm_opp_get_freq(opp); |
981 | new_oppinfo.volt = opp_get_voltage(opp); | 981 | new_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
982 | rcu_read_unlock(); | 982 | rcu_read_unlock(); |
983 | 983 | ||
984 | err = exynos4_bus_setvolt(data, &new_oppinfo, | 984 | err = exynos4_bus_setvolt(data, &new_oppinfo, |
@@ -1020,7 +1020,7 @@ unlock: | |||
1020 | static int exynos4_busfreq_probe(struct platform_device *pdev) | 1020 | static int exynos4_busfreq_probe(struct platform_device *pdev) |
1021 | { | 1021 | { |
1022 | struct busfreq_data *data; | 1022 | struct busfreq_data *data; |
1023 | struct opp *opp; | 1023 | struct dev_pm_opp *opp; |
1024 | struct device *dev = &pdev->dev; | 1024 | struct device *dev = &pdev->dev; |
1025 | int err = 0; | 1025 | int err = 0; |
1026 | 1026 | ||
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev) | |||
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | rcu_read_lock(); | 1067 | rcu_read_lock(); |
1068 | opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); | 1068 | opp = dev_pm_opp_find_freq_floor(dev, |
1069 | &exynos4_devfreq_profile.initial_freq); | ||
1069 | if (IS_ERR(opp)) { | 1070 | if (IS_ERR(opp)) { |
1070 | rcu_read_unlock(); | 1071 | rcu_read_unlock(); |
1071 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", | 1072 | dev_err(dev, "Invalid initial frequency %lu kHz.\n", |
1072 | exynos4_devfreq_profile.initial_freq); | 1073 | exynos4_devfreq_profile.initial_freq); |
1073 | return PTR_ERR(opp); | 1074 | return PTR_ERR(opp); |
1074 | } | 1075 | } |
1075 | data->curr_oppinfo.rate = opp_get_freq(opp); | 1076 | data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp); |
1076 | data->curr_oppinfo.volt = opp_get_voltage(opp); | 1077 | data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp); |
1077 | rcu_read_unlock(); | 1078 | rcu_read_unlock(); |
1078 | 1079 | ||
1079 | platform_set_drvdata(pdev, data); | 1080 | platform_set_drvdata(pdev, data); |
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c index 93c29f4085bd..0e0bfc17cd5b 100644 --- a/drivers/devfreq/exynos/exynos5_bus.c +++ b/drivers/devfreq/exynos/exynos5_bus.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/devfreq.h> | 16 | #include <linux/devfreq.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/opp.h> | 18 | #include <linux/pm_opp.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
@@ -131,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq, | |||
131 | struct platform_device *pdev = container_of(dev, struct platform_device, | 131 | struct platform_device *pdev = container_of(dev, struct platform_device, |
132 | dev); | 132 | dev); |
133 | struct busfreq_data_int *data = platform_get_drvdata(pdev); | 133 | struct busfreq_data_int *data = platform_get_drvdata(pdev); |
134 | struct opp *opp; | 134 | struct dev_pm_opp *opp; |
135 | unsigned long old_freq, freq; | 135 | unsigned long old_freq, freq; |
136 | unsigned long volt; | 136 | unsigned long volt; |
137 | 137 | ||
@@ -143,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq, | |||
143 | return PTR_ERR(opp); | 143 | return PTR_ERR(opp); |
144 | } | 144 | } |
145 | 145 | ||
146 | freq = opp_get_freq(opp); | 146 | freq = dev_pm_opp_get_freq(opp); |
147 | volt = opp_get_voltage(opp); | 147 | volt = dev_pm_opp_get_voltage(opp); |
148 | rcu_read_unlock(); | 148 | rcu_read_unlock(); |
149 | 149 | ||
150 | old_freq = data->curr_freq; | 150 | old_freq = data->curr_freq; |
@@ -245,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data) | |||
245 | int i, err = 0; | 245 | int i, err = 0; |
246 | 246 | ||
247 | for (i = LV_0; i < _LV_END; i++) { | 247 | for (i = LV_0; i < _LV_END; i++) { |
248 | err = opp_add(data->dev, exynos5_int_opp_table[i].clk, | 248 | err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk, |
249 | exynos5_int_opp_table[i].volt); | 249 | exynos5_int_opp_table[i].volt); |
250 | if (err) { | 250 | if (err) { |
251 | dev_err(data->dev, "Cannot add opp entries.\n"); | 251 | dev_err(data->dev, "Cannot add opp entries.\n"); |
@@ -261,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this, | |||
261 | { | 261 | { |
262 | struct busfreq_data_int *data = container_of(this, | 262 | struct busfreq_data_int *data = container_of(this, |
263 | struct busfreq_data_int, pm_notifier); | 263 | struct busfreq_data_int, pm_notifier); |
264 | struct opp *opp; | 264 | struct dev_pm_opp *opp; |
265 | unsigned long maxfreq = ULONG_MAX; | 265 | unsigned long maxfreq = ULONG_MAX; |
266 | unsigned long freq; | 266 | unsigned long freq; |
267 | unsigned long volt; | 267 | unsigned long volt; |
@@ -275,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this, | |||
275 | data->disabled = true; | 275 | data->disabled = true; |
276 | 276 | ||
277 | rcu_read_lock(); | 277 | rcu_read_lock(); |
278 | opp = opp_find_freq_floor(data->dev, &maxfreq); | 278 | opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq); |
279 | if (IS_ERR(opp)) { | 279 | if (IS_ERR(opp)) { |
280 | rcu_read_unlock(); | 280 | rcu_read_unlock(); |
281 | err = PTR_ERR(opp); | 281 | err = PTR_ERR(opp); |
282 | goto unlock; | 282 | goto unlock; |
283 | } | 283 | } |
284 | freq = opp_get_freq(opp); | 284 | freq = dev_pm_opp_get_freq(opp); |
285 | volt = opp_get_voltage(opp); | 285 | volt = dev_pm_opp_get_voltage(opp); |
286 | rcu_read_unlock(); | 286 | rcu_read_unlock(); |
287 | 287 | ||
288 | err = exynos5_int_setvolt(data, volt); | 288 | err = exynos5_int_setvolt(data, volt); |
@@ -315,7 +315,7 @@ unlock: | |||
315 | static int exynos5_busfreq_int_probe(struct platform_device *pdev) | 315 | static int exynos5_busfreq_int_probe(struct platform_device *pdev) |
316 | { | 316 | { |
317 | struct busfreq_data_int *data; | 317 | struct busfreq_data_int *data; |
318 | struct opp *opp; | 318 | struct dev_pm_opp *opp; |
319 | struct device *dev = &pdev->dev; | 319 | struct device *dev = &pdev->dev; |
320 | struct device_node *np; | 320 | struct device_node *np; |
321 | unsigned long initial_freq; | 321 | unsigned long initial_freq; |
@@ -367,7 +367,7 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev) | |||
367 | } | 367 | } |
368 | 368 | ||
369 | rcu_read_lock(); | 369 | rcu_read_lock(); |
370 | opp = opp_find_freq_floor(dev, | 370 | opp = dev_pm_opp_find_freq_floor(dev, |
371 | &exynos5_devfreq_int_profile.initial_freq); | 371 | &exynos5_devfreq_int_profile.initial_freq); |
372 | if (IS_ERR(opp)) { | 372 | if (IS_ERR(opp)) { |
373 | rcu_read_unlock(); | 373 | rcu_read_unlock(); |
@@ -376,8 +376,8 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev) | |||
376 | err = PTR_ERR(opp); | 376 | err = PTR_ERR(opp); |
377 | goto err_opp_add; | 377 | goto err_opp_add; |
378 | } | 378 | } |
379 | initial_freq = opp_get_freq(opp); | 379 | initial_freq = dev_pm_opp_get_freq(opp); |
380 | initial_volt = opp_get_voltage(opp); | 380 | initial_volt = dev_pm_opp_get_voltage(opp); |
381 | rcu_read_unlock(); | 381 | rcu_read_unlock(); |
382 | data->curr_freq = initial_freq; | 382 | data->curr_freq = initial_freq; |
383 | 383 | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index dbfc390330ac..5ef596765060 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -78,7 +78,6 @@ config THERMAL_GOV_USER_SPACE | |||
78 | config CPU_THERMAL | 78 | config CPU_THERMAL |
79 | bool "generic cpu cooling support" | 79 | bool "generic cpu cooling support" |
80 | depends on CPU_FREQ | 80 | depends on CPU_FREQ |
81 | select CPU_FREQ_TABLE | ||
82 | help | 81 | help |
83 | This implements the generic cpu cooling mechanism through frequency | 82 | This implements the generic cpu cooling mechanism through frequency |
84 | reduction. An ACPI version of this already exists | 83 | reduction. An ACPI version of this already exists |