aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm17
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c1
-rw-r--r--drivers/cpufreq/arm_big_little.c6
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c4
-rw-r--r--drivers/cpufreq/cpufreq.c553
-rw-r--r--drivers/cpufreq/cpufreq_stats.c40
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c1
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c1
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c97
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/freq_table.c46
-rw-r--r--drivers/cpufreq/gx-suspmod.c4
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c90
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/longhaul.c5
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c5
-rw-r--r--drivers/cpufreq/powernow-k7.c6
-rw-r--r--drivers/cpufreq/powernow-k8.c16
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c5
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c49
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/sh-cpufreq.c5
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c4
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c14
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c47
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c4
48 files changed, 446 insertions, 617 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 4b029c0944af..1fbe11f2a146 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -200,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86"
200endmenu 200endmenu
201 201
202menu "ARM CPU frequency scaling drivers" 202menu "ARM CPU frequency scaling drivers"
203depends on ARM 203depends on ARM || ARM64
204source "drivers/cpufreq/Kconfig.arm" 204source "drivers/cpufreq/Kconfig.arm"
205endmenu 205endmenu
206 206
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 077db3aa985b..1e2b9db563ec 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,7 @@
2# ARM CPU Frequency scaling drivers 2# ARM CPU Frequency scaling drivers
3# 3#
4 4
5# big LITTLE core layer and glue drivers
5config ARM_BIG_LITTLE_CPUFREQ 6config ARM_BIG_LITTLE_CPUFREQ
6 tristate "Generic ARM big LITTLE CPUfreq driver" 7 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK 8 depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
@@ -16,6 +17,14 @@ config ARM_DT_BL_CPUFREQ
16 This enables probing via DT for Generic CPUfreq driver for ARM 17 This enables probing via DT for Generic CPUfreq driver for ARM
17 big.LITTLE platform. This gets frequency tables from DT. 18 big.LITTLE platform. This gets frequency tables from DT.
18 19
20config ARM_VEXPRESS_SPC_CPUFREQ
21 tristate "Versatile Express SPC based CPUfreq driver"
22 depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
23 help
24 This add the CPUfreq driver support for Versatile Express
25 big.LITTLE platforms using SPC for power management.
26
27
19config ARM_EXYNOS_CPUFREQ 28config ARM_EXYNOS_CPUFREQ
20 bool 29 bool
21 30
@@ -241,11 +250,3 @@ config ARM_TEGRA_CPUFREQ
241 default y 250 default y
242 help 251 help
243 This adds the CPUFreq driver support for TEGRA SOCs. 252 This adds the CPUFreq driver support for TEGRA SOCs.
244
245config ARM_VEXPRESS_SPC_CPUFREQ
246 tristate "Versatile Express SPC based CPUfreq driver"
247 select ARM_BIG_LITTLE_CPUFREQ
248 depends on ARCH_VEXPRESS_SPC
249 help
250 This add the CPUfreq driver support for Versatile Express
251 big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 18448a7e9f86..822ca03a87f7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -855,7 +855,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
855 pr_debug("acpi_cpufreq_cpu_exit\n"); 855 pr_debug("acpi_cpufreq_cpu_exit\n");
856 856
857 if (data) { 857 if (data) {
858 cpufreq_frequency_table_put_attr(policy->cpu);
859 per_cpu(acfreq_data, policy->cpu) = NULL; 858 per_cpu(acfreq_data, policy->cpu) = NULL;
860 acpi_processor_unregister_performance(data->acpi_data, 859 acpi_processor_unregister_performance(data->acpi_data,
861 policy->cpu); 860 policy->cpu);
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 72f87e9317e3..bad2ed317ba2 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -446,9 +446,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
446 } 446 }
447 447
448 if (cur_cluster < MAX_CLUSTERS) { 448 if (cur_cluster < MAX_CLUSTERS) {
449 int cpu;
450
449 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 451 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
450 452
451 per_cpu(physical_cluster, policy->cpu) = cur_cluster; 453 for_each_cpu(cpu, policy->cpus)
454 per_cpu(physical_cluster, cpu) = cur_cluster;
452 } else { 455 } else {
453 /* Assumption: during init, we are always running on A15 */ 456 /* Assumption: during init, we are always running on A15 */
454 per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; 457 per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
@@ -478,7 +481,6 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
478 return -ENODEV; 481 return -ENODEV;
479 } 482 }
480 483
481 cpufreq_frequency_table_put_attr(policy->cpu);
482 put_cluster_clk_and_freq_table(cpu_dev); 484 put_cluster_clk_and_freq_table(cpu_dev);
483 dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); 485 dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
484 486
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index e9e63fc9c2c9..a9f8e5bd0716 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -195,7 +195,6 @@ static struct cpufreq_driver bfin_driver = {
195 .target_index = bfin_target, 195 .target_index = bfin_target,
196 .get = bfin_getfreq_khz, 196 .get = bfin_getfreq_khz,
197 .init = __bfin_cpu_init, 197 .init = __bfin_cpu_init,
198 .exit = cpufreq_generic_exit,
199 .name = "bfin cpufreq", 198 .name = "bfin cpufreq",
200 .attr = cpufreq_generic_attr, 199 .attr = cpufreq_generic_attr,
201}; 200};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 0c12ffc0ebcb..1bf6bbac3e03 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -109,7 +109,6 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
109 .target_index = cpu0_set_target, 109 .target_index = cpu0_set_target,
110 .get = cpufreq_generic_get, 110 .get = cpufreq_generic_get,
111 .init = cpu0_cpufreq_init, 111 .init = cpu0_cpufreq_init,
112 .exit = cpufreq_generic_exit,
113 .name = "generic_cpu0", 112 .name = "generic_cpu0",
114 .attr = cpufreq_generic_attr, 113 .attr = cpufreq_generic_attr,
115}; 114};
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index a05b876f375e..bc447b9003c3 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
270 pr_debug("Old CPU frequency %d kHz, new %d kHz\n", 270 pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
271 freqs.old, freqs.new); 271 freqs.old, freqs.new);
272 272
273 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 273 cpufreq_freq_transition_begin(policy, &freqs);
274 274
275 /* Disable IRQs */ 275 /* Disable IRQs */
276 /* local_irq_save(flags); */ 276 /* local_irq_save(flags); */
@@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
285 /* Enable IRQs */ 285 /* Enable IRQs */
286 /* local_irq_restore(flags); */ 286 /* local_irq_restore(flags); */
287 287
288 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 288 cpufreq_freq_transition_end(policy, &freqs, 0);
289 289
290 return 0; 290 return 0;
291} 291}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 08ca8c9f41cd..abda6609d3e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,7 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/syscore_ops.h> 29#include <linux/suspend.h>
30#include <linux/tick.h> 30#include <linux/tick.h>
31#include <trace/events/power.h> 31#include <trace/events/power.h>
32 32
@@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock);
42DEFINE_MUTEX(cpufreq_governor_lock); 42DEFINE_MUTEX(cpufreq_governor_lock);
43static LIST_HEAD(cpufreq_policy_list); 43static LIST_HEAD(cpufreq_policy_list);
44 44
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */ 45/* This one keeps track of the previously set governor of a removed CPU */
47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48#endif 47
48/* Flag to suspend/resume CPUFreq governors */
49static bool cpufreq_suspended;
49 50
50static inline bool has_target(void) 51static inline bool has_target(void)
51{ 52{
@@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
181 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
182 183
183 if (!policy || IS_ERR(policy->clk)) { 184 if (!policy || IS_ERR(policy->clk)) {
184 pr_err("%s: No %s associated to cpu: %d\n", __func__, 185 pr_err("%s: No %s associated to cpu: %d\n",
185 policy ? "clk" : "policy", cpu); 186 __func__, policy ? "clk" : "policy", cpu);
186 return 0; 187 return 0;
187 } 188 }
188 189
@@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
190} 191}
191EXPORT_SYMBOL_GPL(cpufreq_generic_get); 192EXPORT_SYMBOL_GPL(cpufreq_generic_get);
192 193
194/* Only for cpufreq core internal use */
195struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196{
197 return per_cpu(cpufreq_cpu_data, cpu);
198}
199
193struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 200struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
194{ 201{
195 struct cpufreq_policy *policy = NULL; 202 struct cpufreq_policy *policy = NULL;
@@ -254,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
254 if (!l_p_j_ref_freq) { 261 if (!l_p_j_ref_freq) {
255 l_p_j_ref = loops_per_jiffy; 262 l_p_j_ref = loops_per_jiffy;
256 l_p_j_ref_freq = ci->old; 263 l_p_j_ref_freq = ci->old;
257 pr_debug("saving %lu as reference value for loops_per_jiffy; " 264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
258 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 265 l_p_j_ref, l_p_j_ref_freq);
259 } 266 }
260 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || 267 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
261 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
262 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 268 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
263 ci->new); 269 ci->new);
264 pr_debug("scaling loops_per_jiffy to %lu " 270 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
265 "for frequency %u kHz\n", loops_per_jiffy, ci->new); 271 loops_per_jiffy, ci->new);
266 } 272 }
267} 273}
268#else 274#else
@@ -282,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282 288
283 freqs->flags = cpufreq_driver->flags; 289 freqs->flags = cpufreq_driver->flags;
284 pr_debug("notification %u of frequency transition to %u kHz\n", 290 pr_debug("notification %u of frequency transition to %u kHz\n",
285 state, freqs->new); 291 state, freqs->new);
286 292
287 switch (state) { 293 switch (state) {
288 294
@@ -294,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
294 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 300 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
295 if ((policy) && (policy->cpu == freqs->cpu) && 301 if ((policy) && (policy->cpu == freqs->cpu) &&
296 (policy->cur) && (policy->cur != freqs->old)) { 302 (policy->cur) && (policy->cur != freqs->old)) {
297 pr_debug("Warning: CPU frequency is" 303 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
298 " %u, cpufreq assumed %u kHz.\n", 304 freqs->old, policy->cur);
299 freqs->old, policy->cur);
300 freqs->old = policy->cur; 305 freqs->old = policy->cur;
301 } 306 }
302 } 307 }
@@ -307,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
307 312
308 case CPUFREQ_POSTCHANGE: 313 case CPUFREQ_POSTCHANGE:
309 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 314 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
310 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 315 pr_debug("FREQ: %lu - CPU: %lu\n",
311 (unsigned long)freqs->cpu); 316 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
312 trace_cpu_frequency(freqs->new, freqs->cpu); 317 trace_cpu_frequency(freqs->new, freqs->cpu);
313 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 318 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
314 CPUFREQ_POSTCHANGE, freqs); 319 CPUFREQ_POSTCHANGE, freqs);
@@ -326,16 +331,15 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
326 * function. It is called twice on all CPU frequency changes that have 331 * function. It is called twice on all CPU frequency changes that have
327 * external effects. 332 * external effects.
328 */ 333 */
329void cpufreq_notify_transition(struct cpufreq_policy *policy, 334static void cpufreq_notify_transition(struct cpufreq_policy *policy,
330 struct cpufreq_freqs *freqs, unsigned int state) 335 struct cpufreq_freqs *freqs, unsigned int state)
331{ 336{
332 for_each_cpu(freqs->cpu, policy->cpus) 337 for_each_cpu(freqs->cpu, policy->cpus)
333 __cpufreq_notify_transition(policy, freqs, state); 338 __cpufreq_notify_transition(policy, freqs, state);
334} 339}
335EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
336 340
337/* Do post notifications when there are chances that transition has failed */ 341/* Do post notifications when there are chances that transition has failed */
338void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 342static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs, int transition_failed) 343 struct cpufreq_freqs *freqs, int transition_failed)
340{ 344{
341 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 345 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
@@ -346,13 +350,47 @@ void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 350 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 351 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348} 352}
349EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition); 353
354void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs)
356{
357wait:
358 wait_event(policy->transition_wait, !policy->transition_ongoing);
359
360 spin_lock(&policy->transition_lock);
361
362 if (unlikely(policy->transition_ongoing)) {
363 spin_unlock(&policy->transition_lock);
364 goto wait;
365 }
366
367 policy->transition_ongoing = true;
368
369 spin_unlock(&policy->transition_lock);
370
371 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
372}
373EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
374
375void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
376 struct cpufreq_freqs *freqs, int transition_failed)
377{
378 if (unlikely(WARN_ON(!policy->transition_ongoing)))
379 return;
380
381 cpufreq_notify_post_transition(policy, freqs, transition_failed);
382
383 policy->transition_ongoing = false;
384
385 wake_up(&policy->transition_wait);
386}
387EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
350 388
351 389
352/********************************************************************* 390/*********************************************************************
353 * SYSFS INTERFACE * 391 * SYSFS INTERFACE *
354 *********************************************************************/ 392 *********************************************************************/
355ssize_t show_boost(struct kobject *kobj, 393static ssize_t show_boost(struct kobject *kobj,
356 struct attribute *attr, char *buf) 394 struct attribute *attr, char *buf)
357{ 395{
358 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 396 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
@@ -368,13 +406,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
368 return -EINVAL; 406 return -EINVAL;
369 407
370 if (cpufreq_boost_trigger_state(enable)) { 408 if (cpufreq_boost_trigger_state(enable)) {
371 pr_err("%s: Cannot %s BOOST!\n", __func__, 409 pr_err("%s: Cannot %s BOOST!\n",
372 enable ? "enable" : "disable"); 410 __func__, enable ? "enable" : "disable");
373 return -EINVAL; 411 return -EINVAL;
374 } 412 }
375 413
376 pr_debug("%s: cpufreq BOOST %s\n", __func__, 414 pr_debug("%s: cpufreq BOOST %s\n",
377 enable ? "enabled" : "disabled"); 415 __func__, enable ? "enabled" : "disabled");
378 416
379 return count; 417 return count;
380} 418}
@@ -879,18 +917,25 @@ err_out_kobj_put:
879 917
880static void cpufreq_init_policy(struct cpufreq_policy *policy) 918static void cpufreq_init_policy(struct cpufreq_policy *policy)
881{ 919{
920 struct cpufreq_governor *gov = NULL;
882 struct cpufreq_policy new_policy; 921 struct cpufreq_policy new_policy;
883 int ret = 0; 922 int ret = 0;
884 923
885 memcpy(&new_policy, policy, sizeof(*policy)); 924 memcpy(&new_policy, policy, sizeof(*policy));
886 925
926 /* Update governor of new_policy to the governor used before hotplug */
927 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
928 if (gov)
929 pr_debug("Restoring governor %s for cpu %d\n",
930 policy->governor->name, policy->cpu);
931 else
932 gov = CPUFREQ_DEFAULT_GOVERNOR;
933
934 new_policy.governor = gov;
935
887 /* Use the default policy if its valid. */ 936 /* Use the default policy if its valid. */
888 if (cpufreq_driver->setpolicy) 937 if (cpufreq_driver->setpolicy)
889 cpufreq_parse_governor(policy->governor->name, 938 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
890 &new_policy.policy, NULL);
891
892 /* assure that the starting sequence is run in cpufreq_set_policy */
893 policy->governor = NULL;
894 939
895 /* set default policy */ 940 /* set default policy */
896 ret = cpufreq_set_policy(policy, &new_policy); 941 ret = cpufreq_set_policy(policy, &new_policy);
@@ -927,8 +972,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
927 up_write(&policy->rwsem); 972 up_write(&policy->rwsem);
928 973
929 if (has_target()) { 974 if (has_target()) {
930 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || 975 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
931 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { 976 if (!ret)
977 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
978
979 if (ret) {
932 pr_err("%s: Failed to start governor\n", __func__); 980 pr_err("%s: Failed to start governor\n", __func__);
933 return ret; 981 return ret;
934 } 982 }
@@ -949,6 +997,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
949 997
950 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 998 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 999
1000 policy->governor = NULL;
1001
952 return policy; 1002 return policy;
953} 1003}
954 1004
@@ -968,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
968 1018
969 INIT_LIST_HEAD(&policy->policy_list); 1019 INIT_LIST_HEAD(&policy->policy_list);
970 init_rwsem(&policy->rwsem); 1020 init_rwsem(&policy->rwsem);
1021 spin_lock_init(&policy->transition_lock);
1022 init_waitqueue_head(&policy->transition_wait);
971 1023
972 return policy; 1024 return policy;
973 1025
@@ -1022,21 +1074,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1022 1074
1023 up_write(&policy->rwsem); 1075 up_write(&policy->rwsem);
1024 1076
1025 cpufreq_frequency_table_update_policy_cpu(policy);
1026 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1077 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1027 CPUFREQ_UPDATE_POLICY_CPU, policy); 1078 CPUFREQ_UPDATE_POLICY_CPU, policy);
1028} 1079}
1029 1080
1030static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, 1081static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1031 bool frozen)
1032{ 1082{
1033 unsigned int j, cpu = dev->id; 1083 unsigned int j, cpu = dev->id;
1034 int ret = -ENOMEM; 1084 int ret = -ENOMEM;
1035 struct cpufreq_policy *policy; 1085 struct cpufreq_policy *policy;
1036 unsigned long flags; 1086 unsigned long flags;
1087 bool recover_policy = cpufreq_suspended;
1037#ifdef CONFIG_HOTPLUG_CPU 1088#ifdef CONFIG_HOTPLUG_CPU
1038 struct cpufreq_policy *tpolicy; 1089 struct cpufreq_policy *tpolicy;
1039 struct cpufreq_governor *gov;
1040#endif 1090#endif
1041 1091
1042 if (cpu_is_offline(cpu)) 1092 if (cpu_is_offline(cpu))
@@ -1075,9 +1125,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1075 * Restore the saved policy when doing light-weight init and fall back 1125 * Restore the saved policy when doing light-weight init and fall back
1076 * to the full init if that fails. 1126 * to the full init if that fails.
1077 */ 1127 */
1078 policy = frozen ? cpufreq_policy_restore(cpu) : NULL; 1128 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1079 if (!policy) { 1129 if (!policy) {
1080 frozen = false; 1130 recover_policy = false;
1081 policy = cpufreq_policy_alloc(); 1131 policy = cpufreq_policy_alloc();
1082 if (!policy) 1132 if (!policy)
1083 goto nomem_out; 1133 goto nomem_out;
@@ -1089,12 +1139,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1089 * the creation of a brand new one. So we need to perform this update 1139 * the creation of a brand new one. So we need to perform this update
1090 * by invoking update_policy_cpu(). 1140 * by invoking update_policy_cpu().
1091 */ 1141 */
1092 if (frozen && cpu != policy->cpu) 1142 if (recover_policy && cpu != policy->cpu)
1093 update_policy_cpu(policy, cpu); 1143 update_policy_cpu(policy, cpu);
1094 else 1144 else
1095 policy->cpu = cpu; 1145 policy->cpu = cpu;
1096 1146
1097 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1098 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1147 cpumask_copy(policy->cpus, cpumask_of(cpu));
1099 1148
1100 init_completion(&policy->kobj_unregister); 1149 init_completion(&policy->kobj_unregister);
@@ -1109,12 +1158,27 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1109 goto err_set_policy_cpu; 1158 goto err_set_policy_cpu;
1110 } 1159 }
1111 1160
1161 /* related cpus should atleast have policy->cpus */
1162 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1163
1164 /*
1165 * affected cpus must always be the one, which are online. We aren't
1166 * managing offline cpus here.
1167 */
1168 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1169
1170 if (!recover_policy) {
1171 policy->user_policy.min = policy->min;
1172 policy->user_policy.max = policy->max;
1173 }
1174
1175 down_write(&policy->rwsem);
1112 write_lock_irqsave(&cpufreq_driver_lock, flags); 1176 write_lock_irqsave(&cpufreq_driver_lock, flags);
1113 for_each_cpu(j, policy->cpus) 1177 for_each_cpu(j, policy->cpus)
1114 per_cpu(cpufreq_cpu_data, j) = policy; 1178 per_cpu(cpufreq_cpu_data, j) = policy;
1115 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1179 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1116 1180
1117 if (cpufreq_driver->get) { 1181 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1118 policy->cur = cpufreq_driver->get(policy->cpu); 1182 policy->cur = cpufreq_driver->get(policy->cpu);
1119 if (!policy->cur) { 1183 if (!policy->cur) {
1120 pr_err("%s: ->get() failed\n", __func__); 1184 pr_err("%s: ->get() failed\n", __func__);
@@ -1162,33 +1226,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1162 } 1226 }
1163 } 1227 }
1164 1228
1165 /* related cpus should atleast have policy->cpus */
1166 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1167
1168 /*
1169 * affected cpus must always be the one, which are online. We aren't
1170 * managing offline cpus here.
1171 */
1172 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1173
1174 if (!frozen) {
1175 policy->user_policy.min = policy->min;
1176 policy->user_policy.max = policy->max;
1177 }
1178
1179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1229 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1180 CPUFREQ_START, policy); 1230 CPUFREQ_START, policy);
1181 1231
1182#ifdef CONFIG_HOTPLUG_CPU 1232 if (!recover_policy) {
1183 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1184 if (gov) {
1185 policy->governor = gov;
1186 pr_debug("Restoring governor %s for cpu %d\n",
1187 policy->governor->name, cpu);
1188 }
1189#endif
1190
1191 if (!frozen) {
1192 ret = cpufreq_add_dev_interface(policy, dev); 1233 ret = cpufreq_add_dev_interface(policy, dev);
1193 if (ret) 1234 if (ret)
1194 goto err_out_unregister; 1235 goto err_out_unregister;
@@ -1202,10 +1243,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1202 1243
1203 cpufreq_init_policy(policy); 1244 cpufreq_init_policy(policy);
1204 1245
1205 if (!frozen) { 1246 if (!recover_policy) {
1206 policy->user_policy.policy = policy->policy; 1247 policy->user_policy.policy = policy->policy;
1207 policy->user_policy.governor = policy->governor; 1248 policy->user_policy.governor = policy->governor;
1208 } 1249 }
1250 up_write(&policy->rwsem);
1209 1251
1210 kobject_uevent(&policy->kobj, KOBJ_ADD); 1252 kobject_uevent(&policy->kobj, KOBJ_ADD);
1211 up_read(&cpufreq_rwsem); 1253 up_read(&cpufreq_rwsem);
@@ -1224,7 +1266,7 @@ err_get_freq:
1224 if (cpufreq_driver->exit) 1266 if (cpufreq_driver->exit)
1225 cpufreq_driver->exit(policy); 1267 cpufreq_driver->exit(policy);
1226err_set_policy_cpu: 1268err_set_policy_cpu:
1227 if (frozen) { 1269 if (recover_policy) {
1228 /* Do not leave stale fallback data behind. */ 1270 /* Do not leave stale fallback data behind. */
1229 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; 1271 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1230 cpufreq_policy_put_kobj(policy); 1272 cpufreq_policy_put_kobj(policy);
@@ -1248,7 +1290,7 @@ nomem_out:
1248 */ 1290 */
1249static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1291static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1250{ 1292{
1251 return __cpufreq_add_dev(dev, sif, false); 1293 return __cpufreq_add_dev(dev, sif);
1252} 1294}
1253 1295
1254static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1296static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1263,7 +1305,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1263 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1305 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1264 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 1306 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1265 if (ret) { 1307 if (ret) {
1266 pr_err("%s: Failed to move kobj: %d", __func__, ret); 1308 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1267 1309
1268 down_write(&policy->rwsem); 1310 down_write(&policy->rwsem);
1269 cpumask_set_cpu(old_cpu, policy->cpus); 1311 cpumask_set_cpu(old_cpu, policy->cpus);
@@ -1279,8 +1321,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1279} 1321}
1280 1322
1281static int __cpufreq_remove_dev_prepare(struct device *dev, 1323static int __cpufreq_remove_dev_prepare(struct device *dev,
1282 struct subsys_interface *sif, 1324 struct subsys_interface *sif)
1283 bool frozen)
1284{ 1325{
1285 unsigned int cpu = dev->id, cpus; 1326 unsigned int cpu = dev->id, cpus;
1286 int new_cpu, ret; 1327 int new_cpu, ret;
@@ -1294,7 +1335,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1294 policy = per_cpu(cpufreq_cpu_data, cpu); 1335 policy = per_cpu(cpufreq_cpu_data, cpu);
1295 1336
1296 /* Save the policy somewhere when doing a light-weight tear-down */ 1337 /* Save the policy somewhere when doing a light-weight tear-down */
1297 if (frozen) 1338 if (cpufreq_suspended)
1298 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; 1339 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1299 1340
1300 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1341 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1312,37 +1353,34 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1312 } 1353 }
1313 } 1354 }
1314 1355
1315#ifdef CONFIG_HOTPLUG_CPU
1316 if (!cpufreq_driver->setpolicy) 1356 if (!cpufreq_driver->setpolicy)
1317 strncpy(per_cpu(cpufreq_cpu_governor, cpu), 1357 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1318 policy->governor->name, CPUFREQ_NAME_LEN); 1358 policy->governor->name, CPUFREQ_NAME_LEN);
1319#endif
1320 1359
1321 down_read(&policy->rwsem); 1360 down_read(&policy->rwsem);
1322 cpus = cpumask_weight(policy->cpus); 1361 cpus = cpumask_weight(policy->cpus);
1323 up_read(&policy->rwsem); 1362 up_read(&policy->rwsem);
1324 1363
1325 if (cpu != policy->cpu) { 1364 if (cpu != policy->cpu) {
1326 if (!frozen) 1365 sysfs_remove_link(&dev->kobj, "cpufreq");
1327 sysfs_remove_link(&dev->kobj, "cpufreq");
1328 } else if (cpus > 1) { 1366 } else if (cpus > 1) {
1329 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); 1367 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1330 if (new_cpu >= 0) { 1368 if (new_cpu >= 0) {
1331 update_policy_cpu(policy, new_cpu); 1369 update_policy_cpu(policy, new_cpu);
1332 1370
1333 if (!frozen) { 1371 if (!cpufreq_suspended)
1334 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 1372 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1335 __func__, new_cpu, cpu); 1373 __func__, new_cpu, cpu);
1336 }
1337 } 1374 }
1375 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1376 cpufreq_driver->stop_cpu(policy);
1338 } 1377 }
1339 1378
1340 return 0; 1379 return 0;
1341} 1380}
1342 1381
1343static int __cpufreq_remove_dev_finish(struct device *dev, 1382static int __cpufreq_remove_dev_finish(struct device *dev,
1344 struct subsys_interface *sif, 1383 struct subsys_interface *sif)
1345 bool frozen)
1346{ 1384{
1347 unsigned int cpu = dev->id, cpus; 1385 unsigned int cpu = dev->id, cpus;
1348 int ret; 1386 int ret;
@@ -1372,12 +1410,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1372 CPUFREQ_GOV_POLICY_EXIT); 1410 CPUFREQ_GOV_POLICY_EXIT);
1373 if (ret) { 1411 if (ret) {
1374 pr_err("%s: Failed to exit governor\n", 1412 pr_err("%s: Failed to exit governor\n",
1375 __func__); 1413 __func__);
1376 return ret; 1414 return ret;
1377 } 1415 }
1378 } 1416 }
1379 1417
1380 if (!frozen) 1418 if (!cpufreq_suspended)
1381 cpufreq_policy_put_kobj(policy); 1419 cpufreq_policy_put_kobj(policy);
1382 1420
1383 /* 1421 /*
@@ -1393,16 +1431,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1393 list_del(&policy->policy_list); 1431 list_del(&policy->policy_list);
1394 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1432 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1395 1433
1396 if (!frozen) 1434 if (!cpufreq_suspended)
1397 cpufreq_policy_free(policy); 1435 cpufreq_policy_free(policy);
1398 } else { 1436 } else if (has_target()) {
1399 if (has_target()) { 1437 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1400 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || 1438 if (!ret)
1401 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { 1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1402 pr_err("%s: Failed to start governor\n", 1440
1403 __func__); 1441 if (ret) {
1404 return ret; 1442 pr_err("%s: Failed to start governor\n", __func__);
1405 } 1443 return ret;
1406 } 1444 }
1407 } 1445 }
1408 1446
@@ -1423,10 +1461,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1423 if (cpu_is_offline(cpu)) 1461 if (cpu_is_offline(cpu))
1424 return 0; 1462 return 0;
1425 1463
1426 ret = __cpufreq_remove_dev_prepare(dev, sif, false); 1464 ret = __cpufreq_remove_dev_prepare(dev, sif);
1427 1465
1428 if (!ret) 1466 if (!ret)
1429 ret = __cpufreq_remove_dev_finish(dev, sif, false); 1467 ret = __cpufreq_remove_dev_finish(dev, sif);
1430 1468
1431 return ret; 1469 return ret;
1432} 1470}
@@ -1457,8 +1495,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1457 struct cpufreq_freqs freqs; 1495 struct cpufreq_freqs freqs;
1458 unsigned long flags; 1496 unsigned long flags;
1459 1497
1460 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1498 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1461 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1499 old_freq, new_freq);
1462 1500
1463 freqs.old = old_freq; 1501 freqs.old = old_freq;
1464 freqs.new = new_freq; 1502 freqs.new = new_freq;
@@ -1467,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1467 policy = per_cpu(cpufreq_cpu_data, cpu); 1505 policy = per_cpu(cpufreq_cpu_data, cpu);
1468 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1506 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1469 1507
1470 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 1508 cpufreq_freq_transition_begin(policy, &freqs);
1471 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 1509 cpufreq_freq_transition_end(policy, &freqs, 0);
1472} 1510}
1473 1511
1474/** 1512/**
@@ -1547,23 +1585,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1547 */ 1585 */
1548unsigned int cpufreq_get(unsigned int cpu) 1586unsigned int cpufreq_get(unsigned int cpu)
1549{ 1587{
1550 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1588 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1551 unsigned int ret_freq = 0; 1589 unsigned int ret_freq = 0;
1552 1590
1553 if (cpufreq_disabled() || !cpufreq_driver) 1591 if (policy) {
1554 return -ENOENT; 1592 down_read(&policy->rwsem);
1555 1593 ret_freq = __cpufreq_get(cpu);
1556 BUG_ON(!policy); 1594 up_read(&policy->rwsem);
1557
1558 if (!down_read_trylock(&cpufreq_rwsem))
1559 return 0;
1560
1561 down_read(&policy->rwsem);
1562
1563 ret_freq = __cpufreq_get(cpu);
1564 1595
1565 up_read(&policy->rwsem); 1596 cpufreq_cpu_put(policy);
1566 up_read(&cpufreq_rwsem); 1597 }
1567 1598
1568 return ret_freq; 1599 return ret_freq;
1569} 1600}
@@ -1576,83 +1607,103 @@ static struct subsys_interface cpufreq_interface = {
1576 .remove_dev = cpufreq_remove_dev, 1607 .remove_dev = cpufreq_remove_dev,
1577}; 1608};
1578 1609
1610/*
1611 * In case platform wants some specific frequency to be configured
1612 * during suspend..
1613 */
1614int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1615{
1616 int ret;
1617
1618 if (!policy->suspend_freq) {
1619 pr_err("%s: suspend_freq can't be zero\n", __func__);
1620 return -EINVAL;
1621 }
1622
1623 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1624 policy->suspend_freq);
1625
1626 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1627 CPUFREQ_RELATION_H);
1628 if (ret)
1629 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1630 __func__, policy->suspend_freq, ret);
1631
1632 return ret;
1633}
1634EXPORT_SYMBOL(cpufreq_generic_suspend);
1635
1579/** 1636/**
1580 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. 1637 * cpufreq_suspend() - Suspend CPUFreq governors
1581 * 1638 *
1582 * This function is only executed for the boot processor. The other CPUs 1639 * Called during system wide Suspend/Hibernate cycles for suspending governors
1583 * have been put offline by means of CPU hotplug. 1640 * as some platforms can't change frequency after this point in suspend cycle.
1641 * Because some of the devices (like: i2c, regulators, etc) they use for
1642 * changing frequency are suspended quickly after this point.
1584 */ 1643 */
1585static int cpufreq_bp_suspend(void) 1644void cpufreq_suspend(void)
1586{ 1645{
1587 int ret = 0;
1588
1589 int cpu = smp_processor_id();
1590 struct cpufreq_policy *policy; 1646 struct cpufreq_policy *policy;
1591 1647
1592 pr_debug("suspending cpu %u\n", cpu); 1648 if (!cpufreq_driver)
1649 return;
1593 1650
1594 /* If there's no policy for the boot CPU, we have nothing to do. */ 1651 if (!has_target())
1595 policy = cpufreq_cpu_get(cpu); 1652 return;
1596 if (!policy)
1597 return 0;
1598 1653
1599 if (cpufreq_driver->suspend) { 1654 pr_debug("%s: Suspending Governors\n", __func__);
1600 ret = cpufreq_driver->suspend(policy); 1655
1601 if (ret) 1656 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1602 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1657 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1603 "step on CPU %u\n", policy->cpu); 1658 pr_err("%s: Failed to stop governor for policy: %p\n",
1659 __func__, policy);
1660 else if (cpufreq_driver->suspend
1661 && cpufreq_driver->suspend(policy))
1662 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1663 policy);
1604 } 1664 }
1605 1665
1606 cpufreq_cpu_put(policy); 1666 cpufreq_suspended = true;
1607 return ret;
1608} 1667}
1609 1668
1610/** 1669/**
1611 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. 1670 * cpufreq_resume() - Resume CPUFreq governors
1612 *
1613 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1614 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1615 * restored. It will verify that the current freq is in sync with
1616 * what we believe it to be. This is a bit later than when it
1617 * should be, but nonethteless it's better than calling
1618 * cpufreq_driver->get() here which might re-enable interrupts...
1619 * 1671 *
1620 * This function is only executed for the boot CPU. The other CPUs have not 1672 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1621 * been turned on yet. 1673 * are suspended with cpufreq_suspend().
1622 */ 1674 */
1623static void cpufreq_bp_resume(void) 1675void cpufreq_resume(void)
1624{ 1676{
1625 int ret = 0;
1626
1627 int cpu = smp_processor_id();
1628 struct cpufreq_policy *policy; 1677 struct cpufreq_policy *policy;
1629 1678
1630 pr_debug("resuming cpu %u\n", cpu); 1679 if (!cpufreq_driver)
1680 return;
1631 1681
1632 /* If there's no policy for the boot CPU, we have nothing to do. */ 1682 if (!has_target())
1633 policy = cpufreq_cpu_get(cpu);
1634 if (!policy)
1635 return; 1683 return;
1636 1684
1637 if (cpufreq_driver->resume) { 1685 pr_debug("%s: Resuming Governors\n", __func__);
1638 ret = cpufreq_driver->resume(policy);
1639 if (ret) {
1640 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1641 "step on CPU %u\n", policy->cpu);
1642 goto fail;
1643 }
1644 }
1645 1686
1646 schedule_work(&policy->update); 1687 cpufreq_suspended = false;
1647 1688
1648fail: 1689 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1649 cpufreq_cpu_put(policy); 1690 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1650} 1691 pr_err("%s: Failed to resume driver: %p\n", __func__,
1692 policy);
1693 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1694 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1695 pr_err("%s: Failed to start governor for policy: %p\n",
1696 __func__, policy);
1651 1697
1652static struct syscore_ops cpufreq_syscore_ops = { 1698 /*
1653 .suspend = cpufreq_bp_suspend, 1699 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1654 .resume = cpufreq_bp_resume, 1700 * policy in list. It will verify that the current freq is in
1655}; 1701 * sync with what we believe it to be.
1702 */
1703 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1704 schedule_work(&policy->update);
1705 }
1706}
1656 1707
1657/** 1708/**
1658 * cpufreq_get_current_driver - return current driver's name 1709 * cpufreq_get_current_driver - return current driver's name
@@ -1768,7 +1819,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1768 target_freq = policy->min; 1819 target_freq = policy->min;
1769 1820
1770 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 1821 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1771 policy->cpu, target_freq, relation, old_target_freq); 1822 policy->cpu, target_freq, relation, old_target_freq);
1772 1823
1773 /* 1824 /*
1774 * This might look like a redundant call as we are checking it again 1825 * This might look like a redundant call as we are checking it again
@@ -1813,20 +1864,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1813 freqs.flags = 0; 1864 freqs.flags = 0;
1814 1865
1815 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 1866 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1816 __func__, policy->cpu, freqs.old, 1867 __func__, policy->cpu, freqs.old, freqs.new);
1817 freqs.new);
1818 1868
1819 cpufreq_notify_transition(policy, &freqs, 1869 cpufreq_freq_transition_begin(policy, &freqs);
1820 CPUFREQ_PRECHANGE);
1821 } 1870 }
1822 1871
1823 retval = cpufreq_driver->target_index(policy, index); 1872 retval = cpufreq_driver->target_index(policy, index);
1824 if (retval) 1873 if (retval)
1825 pr_err("%s: Failed to change cpu frequency: %d\n", 1874 pr_err("%s: Failed to change cpu frequency: %d\n",
1826 __func__, retval); 1875 __func__, retval);
1827 1876
1828 if (notify) 1877 if (notify)
1829 cpufreq_notify_post_transition(policy, &freqs, retval); 1878 cpufreq_freq_transition_end(policy, &freqs, retval);
1830 } 1879 }
1831 1880
1832out: 1881out:
@@ -1869,17 +1918,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1869 struct cpufreq_governor *gov = NULL; 1918 struct cpufreq_governor *gov = NULL;
1870#endif 1919#endif
1871 1920
1921 /* Don't start any governor operations if we are entering suspend */
1922 if (cpufreq_suspended)
1923 return 0;
1924
1872 if (policy->governor->max_transition_latency && 1925 if (policy->governor->max_transition_latency &&
1873 policy->cpuinfo.transition_latency > 1926 policy->cpuinfo.transition_latency >
1874 policy->governor->max_transition_latency) { 1927 policy->governor->max_transition_latency) {
1875 if (!gov) 1928 if (!gov)
1876 return -EINVAL; 1929 return -EINVAL;
1877 else { 1930 else {
1878 printk(KERN_WARNING "%s governor failed, too long" 1931 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1879 " transition latency of HW, fallback" 1932 policy->governor->name, gov->name);
1880 " to %s governor\n",
1881 policy->governor->name,
1882 gov->name);
1883 policy->governor = gov; 1933 policy->governor = gov;
1884 } 1934 }
1885 } 1935 }
@@ -1889,7 +1939,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1889 return -EINVAL; 1939 return -EINVAL;
1890 1940
1891 pr_debug("__cpufreq_governor for CPU %u, event %u\n", 1941 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1892 policy->cpu, event); 1942 policy->cpu, event);
1893 1943
1894 mutex_lock(&cpufreq_governor_lock); 1944 mutex_lock(&cpufreq_governor_lock);
1895 if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 1945 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -1956,9 +2006,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1956 2006
1957void cpufreq_unregister_governor(struct cpufreq_governor *governor) 2007void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1958{ 2008{
1959#ifdef CONFIG_HOTPLUG_CPU
1960 int cpu; 2009 int cpu;
1961#endif
1962 2010
1963 if (!governor) 2011 if (!governor)
1964 return; 2012 return;
@@ -1966,14 +2014,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1966 if (cpufreq_disabled()) 2014 if (cpufreq_disabled())
1967 return; 2015 return;
1968 2016
1969#ifdef CONFIG_HOTPLUG_CPU
1970 for_each_present_cpu(cpu) { 2017 for_each_present_cpu(cpu) {
1971 if (cpu_online(cpu)) 2018 if (cpu_online(cpu))
1972 continue; 2019 continue;
1973 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) 2020 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1974 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); 2021 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1975 } 2022 }
1976#endif
1977 2023
1978 mutex_lock(&cpufreq_governor_mutex); 2024 mutex_lock(&cpufreq_governor_mutex);
1979 list_del(&governor->governor_list); 2025 list_del(&governor->governor_list);
@@ -2018,22 +2064,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
2018static int cpufreq_set_policy(struct cpufreq_policy *policy, 2064static int cpufreq_set_policy(struct cpufreq_policy *policy,
2019 struct cpufreq_policy *new_policy) 2065 struct cpufreq_policy *new_policy)
2020{ 2066{
2021 int ret = 0, failed = 1; 2067 struct cpufreq_governor *old_gov;
2068 int ret;
2022 2069
2023 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, 2070 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2024 new_policy->min, new_policy->max); 2071 new_policy->cpu, new_policy->min, new_policy->max);
2025 2072
2026 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 2073 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2027 2074
2028 if (new_policy->min > policy->max || new_policy->max < policy->min) { 2075 if (new_policy->min > policy->max || new_policy->max < policy->min)
2029 ret = -EINVAL; 2076 return -EINVAL;
2030 goto error_out;
2031 }
2032 2077
2033 /* verify the cpu speed can be set within this limit */ 2078 /* verify the cpu speed can be set within this limit */
2034 ret = cpufreq_driver->verify(new_policy); 2079 ret = cpufreq_driver->verify(new_policy);
2035 if (ret) 2080 if (ret)
2036 goto error_out; 2081 return ret;
2037 2082
2038 /* adjust if necessary - all reasons */ 2083 /* adjust if necessary - all reasons */
2039 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 2084 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2049,7 +2094,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2049 */ 2094 */
2050 ret = cpufreq_driver->verify(new_policy); 2095 ret = cpufreq_driver->verify(new_policy);
2051 if (ret) 2096 if (ret)
2052 goto error_out; 2097 return ret;
2053 2098
2054 /* notification of the new policy */ 2099 /* notification of the new policy */
2055 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 2100 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2059,63 +2104,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2059 policy->max = new_policy->max; 2104 policy->max = new_policy->max;
2060 2105
2061 pr_debug("new min and max freqs are %u - %u kHz\n", 2106 pr_debug("new min and max freqs are %u - %u kHz\n",
2062 policy->min, policy->max); 2107 policy->min, policy->max);
2063 2108
2064 if (cpufreq_driver->setpolicy) { 2109 if (cpufreq_driver->setpolicy) {
2065 policy->policy = new_policy->policy; 2110 policy->policy = new_policy->policy;
2066 pr_debug("setting range\n"); 2111 pr_debug("setting range\n");
2067 ret = cpufreq_driver->setpolicy(new_policy); 2112 return cpufreq_driver->setpolicy(new_policy);
2068 } else { 2113 }
2069 if (new_policy->governor != policy->governor) {
2070 /* save old, working values */
2071 struct cpufreq_governor *old_gov = policy->governor;
2072
2073 pr_debug("governor switch\n");
2074
2075 /* end old governor */
2076 if (policy->governor) {
2077 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2078 up_write(&policy->rwsem);
2079 __cpufreq_governor(policy,
2080 CPUFREQ_GOV_POLICY_EXIT);
2081 down_write(&policy->rwsem);
2082 }
2083 2114
2084 /* start new governor */ 2115 if (new_policy->governor == policy->governor)
2085 policy->governor = new_policy->governor; 2116 goto out;
2086 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2087 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2088 failed = 0;
2089 } else {
2090 up_write(&policy->rwsem);
2091 __cpufreq_governor(policy,
2092 CPUFREQ_GOV_POLICY_EXIT);
2093 down_write(&policy->rwsem);
2094 }
2095 }
2096 2117
2097 if (failed) { 2118 pr_debug("governor switch\n");
2098 /* new governor failed, so re-start old one */ 2119
2099 pr_debug("starting governor %s failed\n", 2120 /* save old, working values */
2100 policy->governor->name); 2121 old_gov = policy->governor;
2101 if (old_gov) { 2122 /* end old governor */
2102 policy->governor = old_gov; 2123 if (old_gov) {
2103 __cpufreq_governor(policy, 2124 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2104 CPUFREQ_GOV_POLICY_INIT); 2125 up_write(&policy->rwsem);
2105 __cpufreq_governor(policy, 2126 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2106 CPUFREQ_GOV_START); 2127 down_write(&policy->rwsem);
2107 }
2108 ret = -EINVAL;
2109 goto error_out;
2110 }
2111 /* might be a policy change, too, so fall through */
2112 }
2113 pr_debug("governor: change or update limits\n");
2114 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2115 } 2128 }
2116 2129
2117error_out: 2130 /* start new governor */
2118 return ret; 2131 policy->governor = new_policy->governor;
2132 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2133 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2134 goto out;
2135
2136 up_write(&policy->rwsem);
2137 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2138 down_write(&policy->rwsem);
2139 }
2140
2141 /* new governor failed, so re-start old one */
2142 pr_debug("starting governor %s failed\n", policy->governor->name);
2143 if (old_gov) {
2144 policy->governor = old_gov;
2145 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2146 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2147 }
2148
2149 return -EINVAL;
2150
2151 out:
2152 pr_debug("governor: change or update limits\n");
2153 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2119} 2154}
2120 2155
2121/** 2156/**
@@ -2149,10 +2184,15 @@ int cpufreq_update_policy(unsigned int cpu)
2149 * BIOS might change freq behind our back 2184 * BIOS might change freq behind our back
2150 * -> ask driver for current freq and notify governors about a change 2185 * -> ask driver for current freq and notify governors about a change
2151 */ 2186 */
2152 if (cpufreq_driver->get) { 2187 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2153 new_policy.cur = cpufreq_driver->get(cpu); 2188 new_policy.cur = cpufreq_driver->get(cpu);
2189 if (WARN_ON(!new_policy.cur)) {
2190 ret = -EIO;
2191 goto no_policy;
2192 }
2193
2154 if (!policy->cur) { 2194 if (!policy->cur) {
2155 pr_debug("Driver did not initialize current freq"); 2195 pr_debug("Driver did not initialize current freq\n");
2156 policy->cur = new_policy.cur; 2196 policy->cur = new_policy.cur;
2157 } else { 2197 } else {
2158 if (policy->cur != new_policy.cur && has_target()) 2198 if (policy->cur != new_policy.cur && has_target())
@@ -2176,30 +2216,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2176{ 2216{
2177 unsigned int cpu = (unsigned long)hcpu; 2217 unsigned int cpu = (unsigned long)hcpu;
2178 struct device *dev; 2218 struct device *dev;
2179 bool frozen = false;
2180 2219
2181 dev = get_cpu_device(cpu); 2220 dev = get_cpu_device(cpu);
2182 if (dev) { 2221 if (dev) {
2183
2184 if (action & CPU_TASKS_FROZEN)
2185 frozen = true;
2186
2187 switch (action & ~CPU_TASKS_FROZEN) { 2222 switch (action & ~CPU_TASKS_FROZEN) {
2188 case CPU_ONLINE: 2223 case CPU_ONLINE:
2189 __cpufreq_add_dev(dev, NULL, frozen); 2224 __cpufreq_add_dev(dev, NULL);
2190 cpufreq_update_policy(cpu);
2191 break; 2225 break;
2192 2226
2193 case CPU_DOWN_PREPARE: 2227 case CPU_DOWN_PREPARE:
2194 __cpufreq_remove_dev_prepare(dev, NULL, frozen); 2228 __cpufreq_remove_dev_prepare(dev, NULL);
2195 break; 2229 break;
2196 2230
2197 case CPU_POST_DEAD: 2231 case CPU_POST_DEAD:
2198 __cpufreq_remove_dev_finish(dev, NULL, frozen); 2232 __cpufreq_remove_dev_finish(dev, NULL);
2199 break; 2233 break;
2200 2234
2201 case CPU_DOWN_FAILED: 2235 case CPU_DOWN_FAILED:
2202 __cpufreq_add_dev(dev, NULL, frozen); 2236 __cpufreq_add_dev(dev, NULL);
2203 break; 2237 break;
2204 } 2238 }
2205 } 2239 }
@@ -2255,8 +2289,8 @@ int cpufreq_boost_trigger_state(int state)
2255 cpufreq_driver->boost_enabled = !state; 2289 cpufreq_driver->boost_enabled = !state;
2256 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2290 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2257 2291
2258 pr_err("%s: Cannot %s BOOST\n", __func__, 2292 pr_err("%s: Cannot %s BOOST\n",
2259 state ? "enable" : "disable"); 2293 __func__, state ? "enable" : "disable");
2260 } 2294 }
2261 2295
2262 return ret; 2296 return ret;
@@ -2301,7 +2335,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2301 2335
2302 if (!driver_data || !driver_data->verify || !driver_data->init || 2336 if (!driver_data || !driver_data->verify || !driver_data->init ||
2303 !(driver_data->setpolicy || driver_data->target_index || 2337 !(driver_data->setpolicy || driver_data->target_index ||
2304 driver_data->target)) 2338 driver_data->target) ||
2339 (driver_data->setpolicy && (driver_data->target_index ||
2340 driver_data->target)))
2305 return -EINVAL; 2341 return -EINVAL;
2306 2342
2307 pr_debug("trying to register driver %s\n", driver_data->name); 2343 pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2328,7 +2364,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2328 ret = cpufreq_sysfs_create_file(&boost.attr); 2364 ret = cpufreq_sysfs_create_file(&boost.attr);
2329 if (ret) { 2365 if (ret) {
2330 pr_err("%s: cannot register global BOOST sysfs file\n", 2366 pr_err("%s: cannot register global BOOST sysfs file\n",
2331 __func__); 2367 __func__);
2332 goto err_null_driver; 2368 goto err_null_driver;
2333 } 2369 }
2334 } 2370 }
@@ -2351,7 +2387,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2351 /* if all ->init() calls failed, unregister */ 2387 /* if all ->init() calls failed, unregister */
2352 if (ret) { 2388 if (ret) {
2353 pr_debug("no CPU initialized for driver %s\n", 2389 pr_debug("no CPU initialized for driver %s\n",
2354 driver_data->name); 2390 driver_data->name);
2355 goto err_if_unreg; 2391 goto err_if_unreg;
2356 } 2392 }
2357 } 2393 }
@@ -2415,7 +2451,6 @@ static int __init cpufreq_core_init(void)
2415 2451
2416 cpufreq_global_kobject = kobject_create(); 2452 cpufreq_global_kobject = kobject_create();
2417 BUG_ON(!cpufreq_global_kobject); 2453 BUG_ON(!cpufreq_global_kobject);
2418 register_syscore_ops(&cpufreq_syscore_ops);
2419 2454
2420 return 0; 2455 return 0;
2421} 2456}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 5793e1447fb1..ecaaebf969fc 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,7 @@
13#include <linux/cpufreq.h> 13#include <linux/cpufreq.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <asm/cputime.h> 16#include <linux/cputime.h>
17 17
18static spinlock_t cpufreq_stats_lock; 18static spinlock_t cpufreq_stats_lock;
19 19
@@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu)
180 cpufreq_cpu_put(policy); 180 cpufreq_cpu_put(policy);
181} 181}
182 182
183static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, 183static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
184 struct cpufreq_frequency_table *table)
185{ 184{
186 unsigned int i, j, count = 0, ret = 0; 185 unsigned int i, j, count = 0, ret = 0;
187 struct cpufreq_stats *stat; 186 struct cpufreq_stats *stat;
188 struct cpufreq_policy *current_policy;
189 unsigned int alloc_size; 187 unsigned int alloc_size;
190 unsigned int cpu = policy->cpu; 188 unsigned int cpu = policy->cpu;
189 struct cpufreq_frequency_table *table;
190
191 table = cpufreq_frequency_get_table(cpu);
192 if (unlikely(!table))
193 return 0;
194
191 if (per_cpu(cpufreq_stats_table, cpu)) 195 if (per_cpu(cpufreq_stats_table, cpu))
192 return -EBUSY; 196 return -EBUSY;
193 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 197 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
194 if ((stat) == NULL) 198 if ((stat) == NULL)
195 return -ENOMEM; 199 return -ENOMEM;
196 200
197 current_policy = cpufreq_cpu_get(cpu); 201 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
198 if (current_policy == NULL) {
199 ret = -EINVAL;
200 goto error_get_fail;
201 }
202
203 ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
204 if (ret) 202 if (ret)
205 goto error_out; 203 goto error_out;
206 204
@@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
223 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); 221 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
224 if (!stat->time_in_state) { 222 if (!stat->time_in_state) {
225 ret = -ENOMEM; 223 ret = -ENOMEM;
226 goto error_out; 224 goto error_alloc;
227 } 225 }
228 stat->freq_table = (unsigned int *)(stat->time_in_state + count); 226 stat->freq_table = (unsigned int *)(stat->time_in_state + count);
229 227
@@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
243 stat->last_time = get_jiffies_64(); 241 stat->last_time = get_jiffies_64();
244 stat->last_index = freq_table_get_index(stat, policy->cur); 242 stat->last_index = freq_table_get_index(stat, policy->cur);
245 spin_unlock(&cpufreq_stats_lock); 243 spin_unlock(&cpufreq_stats_lock);
246 cpufreq_cpu_put(current_policy);
247 return 0; 244 return 0;
245error_alloc:
246 sysfs_remove_group(&policy->kobj, &stats_attr_group);
248error_out: 247error_out:
249 cpufreq_cpu_put(current_policy);
250error_get_fail:
251 kfree(stat); 248 kfree(stat);
252 per_cpu(cpufreq_stats_table, cpu) = NULL; 249 per_cpu(cpufreq_stats_table, cpu) = NULL;
253 return ret; 250 return ret;
@@ -256,7 +253,6 @@ error_get_fail:
256static void cpufreq_stats_create_table(unsigned int cpu) 253static void cpufreq_stats_create_table(unsigned int cpu)
257{ 254{
258 struct cpufreq_policy *policy; 255 struct cpufreq_policy *policy;
259 struct cpufreq_frequency_table *table;
260 256
261 /* 257 /*
262 * "likely(!policy)" because normally cpufreq_stats will be registered 258 * "likely(!policy)" because normally cpufreq_stats will be registered
@@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu)
266 if (likely(!policy)) 262 if (likely(!policy))
267 return; 263 return;
268 264
269 table = cpufreq_frequency_get_table(policy->cpu); 265 __cpufreq_stats_create_table(policy);
270 if (likely(table))
271 __cpufreq_stats_create_table(policy, table);
272 266
273 cpufreq_cpu_put(policy); 267 cpufreq_cpu_put(policy);
274} 268}
@@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
291{ 285{
292 int ret = 0; 286 int ret = 0;
293 struct cpufreq_policy *policy = data; 287 struct cpufreq_policy *policy = data;
294 struct cpufreq_frequency_table *table;
295 unsigned int cpu = policy->cpu;
296 288
297 if (val == CPUFREQ_UPDATE_POLICY_CPU) { 289 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
298 cpufreq_stats_update_policy_cpu(policy); 290 cpufreq_stats_update_policy_cpu(policy);
299 return 0; 291 return 0;
300 } 292 }
301 293
302 table = cpufreq_frequency_get_table(cpu);
303 if (!table)
304 return 0;
305
306 if (val == CPUFREQ_CREATE_POLICY) 294 if (val == CPUFREQ_CREATE_POLICY)
307 ret = __cpufreq_stats_create_table(policy, table); 295 ret = __cpufreq_stats_create_table(policy);
308 else if (val == CPUFREQ_REMOVE_POLICY) 296 else if (val == CPUFREQ_REMOVE_POLICY)
309 __cpufreq_stats_free_table(policy); 297 __cpufreq_stats_free_table(policy);
310 298
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index 86559040c54c..d4573032cbbc 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
57 .verify = cpufreq_generic_frequency_table_verify, 57 .verify = cpufreq_generic_frequency_table_verify,
58 .target_index = cris_freq_target, 58 .target_index = cris_freq_target,
59 .init = cris_freq_cpu_init, 59 .init = cris_freq_cpu_init,
60 .exit = cpufreq_generic_exit,
61 .name = "cris_freq", 60 .name = "cris_freq",
62 .attr = cpufreq_generic_attr, 61 .attr = cpufreq_generic_attr,
63}; 62};
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 26d940d40b1d..13c3361437f7 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
57 .verify = cpufreq_generic_frequency_table_verify, 57 .verify = cpufreq_generic_frequency_table_verify,
58 .target_index = cris_freq_target, 58 .target_index = cris_freq_target,
59 .init = cris_freq_cpu_init, 59 .init = cris_freq_cpu_init,
60 .exit = cpufreq_generic_exit,
61 .name = "cris_freq", 60 .name = "cris_freq",
62 .attr = cpufreq_generic_attr, 61 .attr = cpufreq_generic_attr,
63}; 62};
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 2cf33848d86e..28a16dc6e02e 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -125,7 +125,6 @@ static struct cpufreq_driver davinci_driver = {
125 .target_index = davinci_target, 125 .target_index = davinci_target,
126 .get = cpufreq_generic_get, 126 .get = cpufreq_generic_get,
127 .init = davinci_cpu_init, 127 .init = davinci_cpu_init,
128 .exit = cpufreq_generic_exit,
129 .name = "davinci", 128 .name = "davinci",
130 .attr = cpufreq_generic_attr, 129 .attr = cpufreq_generic_attr,
131}; 130};
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 9012b8bb6b64..a0d2a423cea9 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -382,7 +382,6 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
382 unsigned int cpu = policy->cpu; 382 unsigned int cpu = policy->cpu;
383 383
384 /* Bye */ 384 /* Bye */
385 cpufreq_frequency_table_put_attr(policy->cpu);
386 kfree(eps_cpu[cpu]); 385 kfree(eps_cpu[cpu]);
387 eps_cpu[cpu] = NULL; 386 eps_cpu[cpu] = NULL;
388 return 0; 387 return 0;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index de08acff5101..c987e94708f5 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -198,7 +198,6 @@ static struct cpufreq_driver elanfreq_driver = {
198 .verify = cpufreq_generic_frequency_table_verify, 198 .verify = cpufreq_generic_frequency_table_verify,
199 .target_index = elanfreq_target, 199 .target_index = elanfreq_target,
200 .init = elanfreq_cpu_init, 200 .init = elanfreq_cpu_init,
201 .exit = cpufreq_generic_exit,
202 .name = "elanfreq", 201 .name = "elanfreq",
203 .attr = cpufreq_generic_attr, 202 .attr = cpufreq_generic_attr,
204}; 203};
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index fcd2914d081a..f99cfe24e7bc 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -16,7 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/suspend.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21 20
22#include <plat/cpu.h> 21#include <plat/cpu.h>
@@ -24,12 +23,8 @@
24#include "exynos-cpufreq.h" 23#include "exynos-cpufreq.h"
25 24
26static struct exynos_dvfs_info *exynos_info; 25static struct exynos_dvfs_info *exynos_info;
27
28static struct regulator *arm_regulator; 26static struct regulator *arm_regulator;
29
30static unsigned int locking_frequency; 27static unsigned int locking_frequency;
31static bool frequency_locked;
32static DEFINE_MUTEX(cpufreq_lock);
33 28
34static int exynos_cpufreq_get_index(unsigned int freq) 29static int exynos_cpufreq_get_index(unsigned int freq)
35{ 30{
@@ -134,83 +129,13 @@ out:
134 129
135static int exynos_target(struct cpufreq_policy *policy, unsigned int index) 130static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
136{ 131{
137 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; 132 return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
138 int ret = 0;
139
140 mutex_lock(&cpufreq_lock);
141
142 if (frequency_locked)
143 goto out;
144
145 ret = exynos_cpufreq_scale(freq_table[index].frequency);
146
147out:
148 mutex_unlock(&cpufreq_lock);
149
150 return ret;
151}
152
153#ifdef CONFIG_PM
154static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
155{
156 return 0;
157}
158
159static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
160{
161 return 0;
162}
163#endif
164
165/**
166 * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
167 * context
168 * @notifier
169 * @pm_event
170 * @v
171 *
172 * While frequency_locked == true, target() ignores every frequency but
173 * locking_frequency. The locking_frequency value is the initial frequency,
174 * which is set by the bootloader. In order to eliminate possible
175 * inconsistency in clock values, we save and restore frequencies during
176 * suspend and resume and block CPUFREQ activities. Note that the standard
177 * suspend/resume cannot be used as they are too deep (syscore_ops) for
178 * regulator actions.
179 */
180static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
181 unsigned long pm_event, void *v)
182{
183 int ret;
184
185 switch (pm_event) {
186 case PM_SUSPEND_PREPARE:
187 mutex_lock(&cpufreq_lock);
188 frequency_locked = true;
189 mutex_unlock(&cpufreq_lock);
190
191 ret = exynos_cpufreq_scale(locking_frequency);
192 if (ret < 0)
193 return NOTIFY_BAD;
194
195 break;
196
197 case PM_POST_SUSPEND:
198 mutex_lock(&cpufreq_lock);
199 frequency_locked = false;
200 mutex_unlock(&cpufreq_lock);
201 break;
202 }
203
204 return NOTIFY_OK;
205} 133}
206 134
207static struct notifier_block exynos_cpufreq_nb = {
208 .notifier_call = exynos_cpufreq_pm_notifier,
209};
210
211static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) 135static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
212{ 136{
213 policy->clk = exynos_info->cpu_clk; 137 policy->clk = exynos_info->cpu_clk;
138 policy->suspend_freq = locking_frequency;
214 return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); 139 return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
215} 140}
216 141
@@ -220,15 +145,13 @@ static struct cpufreq_driver exynos_driver = {
220 .target_index = exynos_target, 145 .target_index = exynos_target,
221 .get = cpufreq_generic_get, 146 .get = cpufreq_generic_get,
222 .init = exynos_cpufreq_cpu_init, 147 .init = exynos_cpufreq_cpu_init,
223 .exit = cpufreq_generic_exit,
224 .name = "exynos_cpufreq", 148 .name = "exynos_cpufreq",
225 .attr = cpufreq_generic_attr, 149 .attr = cpufreq_generic_attr,
226#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW 150#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
227 .boost_supported = true, 151 .boost_supported = true,
228#endif 152#endif
229#ifdef CONFIG_PM 153#ifdef CONFIG_PM
230 .suspend = exynos_cpufreq_suspend, 154 .suspend = cpufreq_generic_suspend,
231 .resume = exynos_cpufreq_resume,
232#endif 155#endif
233}; 156};
234 157
@@ -263,19 +186,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
263 goto err_vdd_arm; 186 goto err_vdd_arm;
264 } 187 }
265 188
189 /* Done here as we want to capture boot frequency */
266 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; 190 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
267 191
268 register_pm_notifier(&exynos_cpufreq_nb); 192 if (!cpufreq_register_driver(&exynos_driver))
269 193 return 0;
270 if (cpufreq_register_driver(&exynos_driver)) {
271 pr_err("%s: failed to register cpufreq driver\n", __func__);
272 goto err_cpufreq;
273 }
274
275 return 0;
276err_cpufreq:
277 unregister_pm_notifier(&exynos_cpufreq_nb);
278 194
195 pr_err("%s: failed to register cpufreq driver\n", __func__);
279 regulator_put(arm_regulator); 196 regulator_put(arm_regulator);
280err_vdd_arm: 197err_vdd_arm:
281 kfree(exynos_info); 198 kfree(exynos_info);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 49b756015316..a6b8214d7b77 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -219,7 +219,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
219 freqs.old = policy->cur; 219 freqs.old = policy->cur;
220 freqs.new = freq_table[index].frequency; 220 freqs.new = freq_table[index].frequency;
221 221
222 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 222 cpufreq_freq_transition_begin(policy, &freqs);
223 223
224 /* Set the target frequency in all C0_3_PSTATE register */ 224 /* Set the target frequency in all C0_3_PSTATE register */
225 for_each_cpu(i, policy->cpus) { 225 for_each_cpu(i, policy->cpus) {
@@ -258,7 +258,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
258 dev_crit(dvfs_info->dev, "New frequency out of range\n"); 258 dev_crit(dvfs_info->dev, "New frequency out of range\n");
259 freqs.new = freqs.old; 259 freqs.new = freqs.old;
260 } 260 }
261 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 261 cpufreq_freq_transition_end(policy, &freqs, 0);
262 262
263 cpufreq_cpu_put(policy); 263 cpufreq_cpu_put(policy);
264 mutex_unlock(&cpufreq_lock); 264 mutex_unlock(&cpufreq_lock);
@@ -312,7 +312,6 @@ static struct cpufreq_driver exynos_driver = {
312 .target_index = exynos_target, 312 .target_index = exynos_target,
313 .get = cpufreq_generic_get, 313 .get = cpufreq_generic_get,
314 .init = exynos_cpufreq_cpu_init, 314 .init = exynos_cpufreq_cpu_init,
315 .exit = cpufreq_generic_exit,
316 .name = CPUFREQ_NAME, 315 .name = CPUFREQ_NAME,
317 .attr = cpufreq_generic_attr, 316 .attr = cpufreq_generic_attr,
318}; 317};
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 8e54f97899ba..65a477075b3f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -91,8 +91,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
91EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); 91EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
92 92
93/* 93/*
94 * Generic routine to verify policy & frequency table, requires driver to call 94 * Generic routine to verify policy & frequency table, requires driver to set
95 * cpufreq_frequency_table_get_attr() prior to it. 95 * policy->freq_table prior to it.
96 */ 96 */
97int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) 97int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
98{ 98{
@@ -203,8 +203,6 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
203} 203}
204EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index); 204EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
205 205
206static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
207
208/** 206/**
209 * show_available_freqs - show available frequencies for the specified CPU 207 * show_available_freqs - show available frequencies for the specified CPU
210 */ 208 */
@@ -212,15 +210,12 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
212 bool show_boost) 210 bool show_boost)
213{ 211{
214 unsigned int i = 0; 212 unsigned int i = 0;
215 unsigned int cpu = policy->cpu;
216 ssize_t count = 0; 213 ssize_t count = 0;
217 struct cpufreq_frequency_table *table; 214 struct cpufreq_frequency_table *table = policy->freq_table;
218 215
219 if (!per_cpu(cpufreq_show_table, cpu)) 216 if (!table)
220 return -ENODEV; 217 return -ENODEV;
221 218
222 table = per_cpu(cpufreq_show_table, cpu);
223
224 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 219 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
225 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 220 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
226 continue; 221 continue;
@@ -283,49 +278,24 @@ struct freq_attr *cpufreq_generic_attr[] = {
283}; 278};
284EXPORT_SYMBOL_GPL(cpufreq_generic_attr); 279EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
285 280
286/*
287 * if you use these, you must assure that the frequency table is valid
288 * all the time between get_attr and put_attr!
289 */
290void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
291 unsigned int cpu)
292{
293 pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
294 per_cpu(cpufreq_show_table, cpu) = table;
295}
296EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
297
298void cpufreq_frequency_table_put_attr(unsigned int cpu)
299{
300 pr_debug("clearing show_table for cpu %u\n", cpu);
301 per_cpu(cpufreq_show_table, cpu) = NULL;
302}
303EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
304
305int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 281int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
306 struct cpufreq_frequency_table *table) 282 struct cpufreq_frequency_table *table)
307{ 283{
308 int ret = cpufreq_frequency_table_cpuinfo(policy, table); 284 int ret = cpufreq_frequency_table_cpuinfo(policy, table);
309 285
310 if (!ret) 286 if (!ret)
311 cpufreq_frequency_table_get_attr(table, policy->cpu); 287 policy->freq_table = table;
312 288
313 return ret; 289 return ret;
314} 290}
315EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); 291EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
316 292
317void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) 293struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
318{
319 pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
320 policy->cpu, policy->last_cpu);
321 per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
322 policy->last_cpu);
323 per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
324}
325 294
326struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 295struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
327{ 296{
328 return per_cpu(cpufreq_show_table, cpu); 297 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
298 return policy ? policy->freq_table : NULL;
329} 299}
330EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 300EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
331 301
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index d83e8266a58e..1d723dc8880c 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
265 265
266 freqs.new = new_khz; 266 freqs.new = new_khz;
267 267
268 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 268 cpufreq_freq_transition_begin(policy, &freqs);
269 local_irq_save(flags); 269 local_irq_save(flags);
270 270
271 if (new_khz != stock_freq) { 271 if (new_khz != stock_freq) {
@@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
314 314
315 gx_params->pci_suscfg = suscfg; 315 gx_params->pci_suscfg = suscfg;
316 316
317 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 317 cpufreq_freq_transition_end(policy, &freqs, 0);
318 318
319 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", 319 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
320 gx_params->on_duration * 32, gx_params->off_duration * 32); 320 gx_params->on_duration * 32, gx_params->off_duration * 32);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 53c6ac637e10..a22b5d182e0e 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -332,7 +332,6 @@ acpi_cpufreq_cpu_exit (
332 pr_debug("acpi_cpufreq_cpu_exit\n"); 332 pr_debug("acpi_cpufreq_cpu_exit\n");
333 333
334 if (data) { 334 if (data) {
335 cpufreq_frequency_table_put_attr(policy->cpu);
336 acpi_io_data[policy->cpu] = NULL; 335 acpi_io_data[policy->cpu] = NULL;
337 acpi_processor_unregister_performance(&data->acpi_data, 336 acpi_processor_unregister_performance(&data->acpi_data,
338 policy->cpu); 337 policy->cpu);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ce69059be1fc..e27fca86fe4f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -144,7 +144,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
144 .target_index = imx6q_set_target, 144 .target_index = imx6q_set_target,
145 .get = cpufreq_generic_get, 145 .get = cpufreq_generic_get,
146 .init = imx6q_cpufreq_init, 146 .init = imx6q_cpufreq_init,
147 .exit = cpufreq_generic_exit,
148 .name = "imx6q-cpufreq", 147 .name = "imx6q-cpufreq",
149 .attr = cpufreq_generic_attr, 148 .attr = cpufreq_generic_attr,
150}; 149};
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 0e27844e8c2d..e5122f1bfe78 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -122,7 +122,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 } 123 }
124 124
125 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 125 cpufreq_freq_transition_begin(policy, &freqs);
126 126
127 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); 127 cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
128 128
@@ -143,7 +143,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
143 */ 143 */
144 set_cpus_allowed(current, cpus_allowed); 144 set_cpus_allowed(current, cpus_allowed);
145 145
146 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 146 cpufreq_freq_transition_end(policy, &freqs, 0);
147 147
148 return 0; 148 return 0;
149} 149}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c788abf1c457..099967302bf2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,12 +34,15 @@
34 34
35#define SAMPLE_COUNT 3 35#define SAMPLE_COUNT 3
36 36
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 38#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c
39 40
40#define FRAC_BITS 8 41
42#define FRAC_BITS 6
41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42#define fp_toint(X) ((X) >> FRAC_BITS) 44#define fp_toint(X) ((X) >> FRAC_BITS)
45#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
43 46
44static inline int32_t mul_fp(int32_t x, int32_t y) 47static inline int32_t mul_fp(int32_t x, int32_t y)
45{ 48{
@@ -96,8 +99,7 @@ struct cpudata {
96 u64 prev_aperf; 99 u64 prev_aperf;
97 u64 prev_mperf; 100 u64 prev_mperf;
98 unsigned long long prev_tsc; 101 unsigned long long prev_tsc;
99 int sample_ptr; 102 struct sample sample;
100 struct sample samples[SAMPLE_COUNT];
101}; 103};
102 104
103static struct cpudata **all_cpu_data; 105static struct cpudata **all_cpu_data;
@@ -151,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
151 pid->setpoint = setpoint; 153 pid->setpoint = setpoint;
152 pid->deadband = deadband; 154 pid->deadband = deadband;
153 pid->integral = int_tofp(integral); 155 pid->integral = int_tofp(integral);
154 pid->last_err = setpoint - busy; 156 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
155} 157}
156 158
157static inline void pid_p_gain_set(struct _pid *pid, int percent) 159static inline void pid_p_gain_set(struct _pid *pid, int percent)
@@ -357,7 +359,7 @@ static int byt_get_min_pstate(void)
357{ 359{
358 u64 value; 360 u64 value;
359 rdmsrl(BYT_RATIOS, value); 361 rdmsrl(BYT_RATIOS, value);
360 return value & 0xFF; 362 return (value >> 8) & 0xFF;
361} 363}
362 364
363static int byt_get_max_pstate(void) 365static int byt_get_max_pstate(void)
@@ -367,6 +369,13 @@ static int byt_get_max_pstate(void)
367 return (value >> 16) & 0xFF; 369 return (value >> 16) & 0xFF;
368} 370}
369 371
372static int byt_get_turbo_pstate(void)
373{
374 u64 value;
375 rdmsrl(BYT_TURBO_RATIOS, value);
376 return value & 0x3F;
377}
378
370static void byt_set_pstate(struct cpudata *cpudata, int pstate) 379static void byt_set_pstate(struct cpudata *cpudata, int pstate)
371{ 380{
372 u64 val; 381 u64 val;
@@ -437,7 +446,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
437 if (limits.no_turbo) 446 if (limits.no_turbo)
438 val |= (u64)1 << 32; 447 val |= (u64)1 << 32;
439 448
440 wrmsrl(MSR_IA32_PERF_CTL, val); 449 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
441} 450}
442 451
443static struct cpu_defaults core_params = { 452static struct cpu_defaults core_params = {
@@ -469,7 +478,7 @@ static struct cpu_defaults byt_params = {
469 .funcs = { 478 .funcs = {
470 .get_max = byt_get_max_pstate, 479 .get_max = byt_get_max_pstate,
471 .get_min = byt_get_min_pstate, 480 .get_min = byt_get_min_pstate,
472 .get_turbo = byt_get_max_pstate, 481 .get_turbo = byt_get_turbo_pstate,
473 .set = byt_set_pstate, 482 .set = byt_set_pstate,
474 .get_vid = byt_get_vid, 483 .get_vid = byt_get_vid,
475 }, 484 },
@@ -547,18 +556,20 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
547static inline void intel_pstate_calc_busy(struct cpudata *cpu, 556static inline void intel_pstate_calc_busy(struct cpudata *cpu,
548 struct sample *sample) 557 struct sample *sample)
549{ 558{
550 u64 core_pct; 559 int32_t core_pct;
551 u64 c0_pct; 560 int32_t c0_pct;
561
562 core_pct = div_fp(int_tofp((sample->aperf)),
563 int_tofp((sample->mperf)));
564 core_pct = mul_fp(core_pct, int_tofp(100));
565 FP_ROUNDUP(core_pct);
552 566
553 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 567 c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
554 568
555 c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
556 sample->freq = fp_toint( 569 sample->freq = fp_toint(
557 mul_fp(int_tofp(cpu->pstate.max_pstate), 570 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
558 int_tofp(core_pct * 1000)));
559 571
560 sample->core_pct_busy = mul_fp(int_tofp(core_pct), 572 sample->core_pct_busy = mul_fp(core_pct, c0_pct);
561 div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
562} 573}
563 574
564static inline void intel_pstate_sample(struct cpudata *cpu) 575static inline void intel_pstate_sample(struct cpudata *cpu)
@@ -570,15 +581,18 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
570 rdmsrl(MSR_IA32_MPERF, mperf); 581 rdmsrl(MSR_IA32_MPERF, mperf);
571 tsc = native_read_tsc(); 582 tsc = native_read_tsc();
572 583
573 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 584 aperf = aperf >> FRAC_BITS;
574 cpu->samples[cpu->sample_ptr].aperf = aperf; 585 mperf = mperf >> FRAC_BITS;
575 cpu->samples[cpu->sample_ptr].mperf = mperf; 586 tsc = tsc >> FRAC_BITS;
576 cpu->samples[cpu->sample_ptr].tsc = tsc; 587
577 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; 588 cpu->sample.aperf = aperf;
578 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; 589 cpu->sample.mperf = mperf;
579 cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc; 590 cpu->sample.tsc = tsc;
591 cpu->sample.aperf -= cpu->prev_aperf;
592 cpu->sample.mperf -= cpu->prev_mperf;
593 cpu->sample.tsc -= cpu->prev_tsc;
580 594
581 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); 595 intel_pstate_calc_busy(cpu, &cpu->sample);
582 596
583 cpu->prev_aperf = aperf; 597 cpu->prev_aperf = aperf;
584 cpu->prev_mperf = mperf; 598 cpu->prev_mperf = mperf;
@@ -598,10 +612,11 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
598{ 612{
599 int32_t core_busy, max_pstate, current_pstate; 613 int32_t core_busy, max_pstate, current_pstate;
600 614
601 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; 615 core_busy = cpu->sample.core_pct_busy;
602 max_pstate = int_tofp(cpu->pstate.max_pstate); 616 max_pstate = int_tofp(cpu->pstate.max_pstate);
603 current_pstate = int_tofp(cpu->pstate.current_pstate); 617 current_pstate = int_tofp(cpu->pstate.current_pstate);
604 return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 618 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
619 return FP_ROUNDUP(core_busy);
605} 620}
606 621
607static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 622static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
@@ -631,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data)
631 646
632 intel_pstate_sample(cpu); 647 intel_pstate_sample(cpu);
633 648
634 sample = &cpu->samples[cpu->sample_ptr]; 649 sample = &cpu->sample;
635 650
636 intel_pstate_adjust_busy_pstate(cpu); 651 intel_pstate_adjust_busy_pstate(cpu);
637 652
@@ -712,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
712 cpu = all_cpu_data[cpu_num]; 727 cpu = all_cpu_data[cpu_num];
713 if (!cpu) 728 if (!cpu)
714 return 0; 729 return 0;
715 sample = &cpu->samples[cpu->sample_ptr]; 730 sample = &cpu->sample;
716 return sample->freq; 731 return sample->freq;
717} 732}
718 733
@@ -756,14 +771,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
756 return 0; 771 return 0;
757} 772}
758 773
759static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 774static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
760{ 775{
761 int cpu = policy->cpu; 776 int cpu_num = policy->cpu;
777 struct cpudata *cpu = all_cpu_data[cpu_num];
762 778
763 del_timer(&all_cpu_data[cpu]->timer); 779 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
764 kfree(all_cpu_data[cpu]); 780
765 all_cpu_data[cpu] = NULL; 781 del_timer_sync(&all_cpu_data[cpu_num]->timer);
766 return 0; 782 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
783 kfree(all_cpu_data[cpu_num]);
784 all_cpu_data[cpu_num] = NULL;
767} 785}
768 786
769static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 787static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -801,7 +819,7 @@ static struct cpufreq_driver intel_pstate_driver = {
801 .setpolicy = intel_pstate_set_policy, 819 .setpolicy = intel_pstate_set_policy,
802 .get = intel_pstate_get, 820 .get = intel_pstate_get,
803 .init = intel_pstate_cpu_init, 821 .init = intel_pstate_cpu_init,
804 .exit = intel_pstate_cpu_exit, 822 .stop_cpu = intel_pstate_stop_cpu,
805 .name = "intel_pstate", 823 .name = "intel_pstate",
806}; 824};
807 825
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index eb7abe345b50..3d114bc5a97a 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -102,7 +102,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
102 .verify = cpufreq_generic_frequency_table_verify, 102 .verify = cpufreq_generic_frequency_table_verify,
103 .target_index = kirkwood_cpufreq_target, 103 .target_index = kirkwood_cpufreq_target,
104 .init = kirkwood_cpufreq_cpu_init, 104 .init = kirkwood_cpufreq_cpu_init,
105 .exit = cpufreq_generic_exit,
106 .name = "kirkwood-cpufreq", 105 .name = "kirkwood-cpufreq",
107 .attr = cpufreq_generic_attr, 106 .attr = cpufreq_generic_attr,
108}; 107};
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 45bafddfd8ea..5c440f87ba8a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -269,7 +269,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
269 freqs.old = calc_speed(longhaul_get_cpu_mult()); 269 freqs.old = calc_speed(longhaul_get_cpu_mult());
270 freqs.new = speed; 270 freqs.new = speed;
271 271
272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 272 cpufreq_freq_transition_begin(policy, &freqs);
273 273
274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
275 fsb, mult/10, mult%10, print_speed(speed/1000)); 275 fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
386 } 386 }
387 } 387 }
388 /* Report true CPU frequency */ 388 /* Report true CPU frequency */
389 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 389 cpufreq_freq_transition_end(policy, &freqs, 0);
390 390
391 if (!bm_timeout) 391 if (!bm_timeout)
392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 392 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -913,7 +913,6 @@ static struct cpufreq_driver longhaul_driver = {
913 .target_index = longhaul_target, 913 .target_index = longhaul_target,
914 .get = longhaul_get, 914 .get = longhaul_get,
915 .init = longhaul_cpu_init, 915 .init = longhaul_cpu_init,
916 .exit = cpufreq_generic_exit,
917 .name = "longhaul", 916 .name = "longhaul",
918 .attr = cpufreq_generic_attr, 917 .attr = cpufreq_generic_attr,
919}; 918};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index b6581abc9207..a3588d61d933 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -104,7 +104,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
104 104
105static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) 105static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
106{ 106{
107 cpufreq_frequency_table_put_attr(policy->cpu);
108 clk_put(policy->clk); 107 clk_put(policy->clk);
109 return 0; 108 return 0;
110} 109}
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 590f5b66d181..5f69c9aa703c 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -143,7 +143,6 @@ fail:
143 143
144static int omap_cpu_exit(struct cpufreq_policy *policy) 144static int omap_cpu_exit(struct cpufreq_policy *policy)
145{ 145{
146 cpufreq_frequency_table_put_attr(policy->cpu);
147 freq_table_free(); 146 freq_table_free();
148 clk_put(policy->clk); 147 clk_put(policy->clk);
149 return 0; 148 return 0;
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 3d1cba9fd5f9..74f593e70e19 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -237,7 +237,6 @@ static struct cpufreq_driver p4clockmod_driver = {
237 .verify = cpufreq_generic_frequency_table_verify, 237 .verify = cpufreq_generic_frequency_table_verify,
238 .target_index = cpufreq_p4_target, 238 .target_index = cpufreq_p4_target,
239 .init = cpufreq_p4_cpu_init, 239 .init = cpufreq_p4_cpu_init,
240 .exit = cpufreq_generic_exit,
241 .get = cpufreq_p4_get, 240 .get = cpufreq_p4_get,
242 .name = "p4-clockmod", 241 .name = "p4-clockmod",
243 .attr = cpufreq_generic_attr, 242 .attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 0426008380d8..6a2b7d3e85a7 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -234,7 +234,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
234 if (sdcpwr_mapbase) 234 if (sdcpwr_mapbase)
235 iounmap(sdcpwr_mapbase); 235 iounmap(sdcpwr_mapbase);
236 236
237 cpufreq_frequency_table_put_attr(policy->cpu);
238 return 0; 237 return 0;
239} 238}
240 239
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 1c0f1067af73..728a2d879499 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,7 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
215 215
216 freqs.old = policy->cur; 216 freqs.old = policy->cur;
217 freqs.new = target_freq; 217 freqs.new = target_freq;
218 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 218 cpufreq_freq_transition_begin(policy, &freqs);
219 219
220 input_buffer = 0x1 | (((target_freq * 100) 220 input_buffer = 0x1 | (((target_freq * 100)
221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); 221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -231,7 +231,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
231 status = ioread16(&pcch_hdr->status); 231 status = ioread16(&pcch_hdr->status);
232 iowrite16(0, &pcch_hdr->status); 232 iowrite16(0, &pcch_hdr->status);
233 233
234 cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE); 234 cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
235 spin_unlock(&pcc_lock); 235 spin_unlock(&pcc_lock);
236 236
237 if (status != CMD_COMPLETE) { 237 if (status != CMD_COMPLETE) {
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index b9a444e358b5..62c6f2e5afce 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -148,11 +148,11 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
148 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 148 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
149 freqs.new = busfreq * clock_ratio[best_i].driver_data; 149 freqs.new = busfreq * clock_ratio[best_i].driver_data;
150 150
151 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 151 cpufreq_freq_transition_begin(policy, &freqs);
152 152
153 powernow_k6_set_cpu_multiplier(best_i); 153 powernow_k6_set_cpu_multiplier(best_i);
154 154
155 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 155 cpufreq_freq_transition_end(policy, &freqs, 0);
156 156
157 return 0; 157 return 0;
158} 158}
@@ -231,7 +231,6 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
231 if (i == max_multiplier) 231 if (i == max_multiplier)
232 powernow_k6_target(policy, i); 232 powernow_k6_target(policy, i);
233 } 233 }
234 cpufreq_frequency_table_put_attr(policy->cpu);
235 return 0; 234 return 0;
236} 235}
237 236
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 946708a1d745..f911645c3f6d 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,7 +269,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
269 269
270 freqs.new = powernow_table[index].frequency; 270 freqs.new = powernow_table[index].frequency;
271 271
272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 272 cpufreq_freq_transition_begin(policy, &freqs);
273 273
274 /* Now do the magic poking into the MSRs. */ 274 /* Now do the magic poking into the MSRs. */
275 275
@@ -290,7 +290,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
290 if (have_a0 == 1) 290 if (have_a0 == 1)
291 local_irq_enable(); 291 local_irq_enable();
292 292
293 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 293 cpufreq_freq_transition_end(policy, &freqs, 0);
294 294
295 return 0; 295 return 0;
296} 296}
@@ -664,8 +664,6 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
664 664
665static int powernow_cpu_exit(struct cpufreq_policy *policy) 665static int powernow_cpu_exit(struct cpufreq_policy *policy)
666{ 666{
667 cpufreq_frequency_table_put_attr(policy->cpu);
668
669#ifdef CONFIG_X86_POWERNOW_K7_ACPI 667#ifdef CONFIG_X86_POWERNOW_K7_ACPI
670 if (acpi_processor_perf) { 668 if (acpi_processor_perf) {
671 acpi_processor_unregister_performance(acpi_processor_perf, 0); 669 acpi_processor_unregister_performance(acpi_processor_perf, 0);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e10b646634d7..770a9e1b3468 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -963,9 +963,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
963 policy = cpufreq_cpu_get(smp_processor_id()); 963 policy = cpufreq_cpu_get(smp_processor_id());
964 cpufreq_cpu_put(policy); 964 cpufreq_cpu_put(policy);
965 965
966 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 966 cpufreq_freq_transition_begin(policy, &freqs);
967 res = transition_fid_vid(data, fid, vid); 967 res = transition_fid_vid(data, fid, vid);
968 cpufreq_notify_post_transition(policy, &freqs, res); 968 cpufreq_freq_transition_end(policy, &freqs, res);
969 969
970 return res; 970 return res;
971} 971}
@@ -1076,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1076{ 1076{
1077 struct powernow_k8_data *data; 1077 struct powernow_k8_data *data;
1078 struct init_on_cpu init_on_cpu; 1078 struct init_on_cpu init_on_cpu;
1079 int rc; 1079 int rc, cpu;
1080 1080
1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1082 if (rc) 1082 if (rc)
@@ -1140,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1141 data->currfid, data->currvid); 1141 data->currfid, data->currvid);
1142 1142
1143 per_cpu(powernow_data, pol->cpu) = data; 1143 /* Point all the CPUs in this policy to the same data */
1144 for_each_cpu(cpu, pol->cpus)
1145 per_cpu(powernow_data, cpu) = data;
1144 1146
1145 return 0; 1147 return 0;
1146 1148
@@ -1155,17 +1157,17 @@ err_out:
1155static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1157static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
1156{ 1158{
1157 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1159 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1160 int cpu;
1158 1161
1159 if (!data) 1162 if (!data)
1160 return -EINVAL; 1163 return -EINVAL;
1161 1164
1162 powernow_k8_cpu_exit_acpi(data); 1165 powernow_k8_cpu_exit_acpi(data);
1163 1166
1164 cpufreq_frequency_table_put_attr(pol->cpu);
1165
1166 kfree(data->powernow_table); 1167 kfree(data->powernow_table);
1167 kfree(data); 1168 kfree(data);
1168 per_cpu(powernow_data, pol->cpu) = NULL; 1169 for_each_cpu(cpu, pol->cpus)
1170 per_cpu(powernow_data, cpu) = NULL;
1169 1171
1170 return 0; 1172 return 0;
1171} 1173}
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 051000f44ca2..3bd9123e7026 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <sysdev/fsl_soc.h>
24 25
25/** 26/**
26 * struct cpu_data - per CPU data struct 27 * struct cpu_data - per CPU data struct
@@ -205,7 +206,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
205 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 206 for_each_cpu(i, per_cpu(cpu_mask, cpu))
206 per_cpu(cpu_data, i) = data; 207 per_cpu(cpu_data, i) = data;
207 208
208 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 209 policy->cpuinfo.transition_latency =
210 (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
209 of_node_put(np); 211 of_node_put(np);
210 212
211 return 0; 213 return 0;
@@ -228,7 +230,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
228 struct cpu_data *data = per_cpu(cpu_data, policy->cpu); 230 struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
229 unsigned int cpu; 231 unsigned int cpu;
230 232
231 cpufreq_frequency_table_put_attr(policy->cpu);
232 of_node_put(data->parent); 233 of_node_put(data->parent);
233 kfree(data->table); 234 kfree(data->table);
234 kfree(data); 235 kfree(data);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e42ca9c31cea..af7b1cabd1e7 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -141,7 +141,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
141 .verify = cpufreq_generic_frequency_table_verify, 141 .verify = cpufreq_generic_frequency_table_verify,
142 .target_index = cbe_cpufreq_target, 142 .target_index = cbe_cpufreq_target,
143 .init = cbe_cpufreq_cpu_init, 143 .init = cbe_cpufreq_cpu_init,
144 .exit = cpufreq_generic_exit,
145 .name = "cbe-cpufreq", 144 .name = "cbe-cpufreq",
146 .flags = CPUFREQ_CONST_LOOPS, 145 .flags = CPUFREQ_CONST_LOOPS,
147}; 146};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index a9195a86b069..e24269ab4e9b 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -427,7 +427,6 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
427 .verify = cpufreq_generic_frequency_table_verify, 427 .verify = cpufreq_generic_frequency_table_verify,
428 .target_index = pxa_set_target, 428 .target_index = pxa_set_target,
429 .init = pxa_cpufreq_init, 429 .init = pxa_cpufreq_init,
430 .exit = cpufreq_generic_exit,
431 .get = pxa_cpufreq_get, 430 .get = pxa_cpufreq_get,
432 .name = "PXA2xx", 431 .name = "PXA2xx",
433}; 432};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 3785687e9d70..a01275900389 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -205,7 +205,6 @@ static struct cpufreq_driver pxa3xx_cpufreq_driver = {
205 .verify = cpufreq_generic_frequency_table_verify, 205 .verify = cpufreq_generic_frequency_table_verify,
206 .target_index = pxa3xx_cpufreq_set, 206 .target_index = pxa3xx_cpufreq_set,
207 .init = pxa3xx_cpufreq_init, 207 .init = pxa3xx_cpufreq_init,
208 .exit = cpufreq_generic_exit,
209 .get = pxa3xx_cpufreq_get, 208 .get = pxa3xx_cpufreq_get,
210 .name = "pxa3xx-cpufreq", 209 .name = "pxa3xx-cpufreq",
211}; 210};
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 25069741b507..a3dc192d21f9 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -217,7 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
217 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); 217 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
218 218
219 /* start the frequency change */ 219 /* start the frequency change */
220 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); 220 cpufreq_freq_transition_begin(policy, &freqs.freqs);
221 221
222 /* If hclk is staying the same, then we do not need to 222 /* If hclk is staying the same, then we do not need to
223 * re-write the IO or the refresh timings whilst we are changing 223 * re-write the IO or the refresh timings whilst we are changing
@@ -261,7 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
261 local_irq_restore(flags); 261 local_irq_restore(flags);
262 262
263 /* notify everyone we've done this */ 263 /* notify everyone we've done this */
264 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); 264 cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
265 265
266 s3c_freq_dbg("%s: finished\n", __func__); 266 s3c_freq_dbg("%s: finished\n", __func__);
267 return 0; 267 return 0;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 55a8e9fa9435..72421534fff5 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -18,7 +18,6 @@
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/regulator/consumer.h> 20#include <linux/regulator/consumer.h>
21#include <linux/suspend.h>
22 21
23#include <mach/map.h> 22#include <mach/map.h>
24#include <mach/regs-clock.h> 23#include <mach/regs-clock.h>
@@ -435,18 +434,6 @@ exit:
435 return ret; 434 return ret;
436} 435}
437 436
438#ifdef CONFIG_PM
439static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
440{
441 return 0;
442}
443
444static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
445{
446 return 0;
447}
448#endif
449
450static int check_mem_type(void __iomem *dmc_reg) 437static int check_mem_type(void __iomem *dmc_reg)
451{ 438{
452 unsigned long val; 439 unsigned long val;
@@ -502,6 +489,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
502 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); 489 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
503 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); 490 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
504 491
492 policy->suspend_freq = SLEEP_FREQ;
505 return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); 493 return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
506 494
507out_dmc1: 495out_dmc1:
@@ -511,32 +499,6 @@ out_dmc0:
511 return ret; 499 return ret;
512} 500}
513 501
514static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
515 unsigned long event, void *ptr)
516{
517 int ret;
518
519 switch (event) {
520 case PM_SUSPEND_PREPARE:
521 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
522 if (ret < 0)
523 return NOTIFY_BAD;
524
525 /* Disable updation of cpu frequency */
526 no_cpufreq_access = true;
527 return NOTIFY_OK;
528 case PM_POST_RESTORE:
529 case PM_POST_SUSPEND:
530 /* Enable updation of cpu frequency */
531 no_cpufreq_access = false;
532 cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
533
534 return NOTIFY_OK;
535 }
536
537 return NOTIFY_DONE;
538}
539
540static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, 502static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
541 unsigned long event, void *ptr) 503 unsigned long event, void *ptr)
542{ 504{
@@ -558,15 +520,11 @@ static struct cpufreq_driver s5pv210_driver = {
558 .init = s5pv210_cpu_init, 520 .init = s5pv210_cpu_init,
559 .name = "s5pv210", 521 .name = "s5pv210",
560#ifdef CONFIG_PM 522#ifdef CONFIG_PM
561 .suspend = s5pv210_cpufreq_suspend, 523 .suspend = cpufreq_generic_suspend,
562 .resume = s5pv210_cpufreq_resume, 524 .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
563#endif 525#endif
564}; 526};
565 527
566static struct notifier_block s5pv210_cpufreq_notifier = {
567 .notifier_call = s5pv210_cpufreq_notifier_event,
568};
569
570static struct notifier_block s5pv210_cpufreq_reboot_notifier = { 528static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
571 .notifier_call = s5pv210_cpufreq_reboot_notifier_event, 529 .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
572}; 530};
@@ -586,7 +544,6 @@ static int __init s5pv210_cpufreq_init(void)
586 return PTR_ERR(int_regulator); 544 return PTR_ERR(int_regulator);
587 } 545 }
588 546
589 register_pm_notifier(&s5pv210_cpufreq_notifier);
590 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); 547 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
591 548
592 return cpufreq_register_driver(&s5pv210_driver); 549 return cpufreq_register_driver(&s5pv210_driver);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 6adb354e359c..69371bf0886d 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -93,7 +93,6 @@ static struct cpufreq_driver sc520_freq_driver = {
93 .verify = cpufreq_generic_frequency_table_verify, 93 .verify = cpufreq_generic_frequency_table_verify,
94 .target_index = sc520_freq_target, 94 .target_index = sc520_freq_target,
95 .init = sc520_freq_cpu_init, 95 .init = sc520_freq_cpu_init,
96 .exit = cpufreq_generic_exit,
97 .name = "sc520_freq", 96 .name = "sc520_freq",
98 .attr = cpufreq_generic_attr, 97 .attr = cpufreq_generic_attr,
99}; 98};
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 387af12503a6..86628e22b2a3 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
68 freqs.new = (freq + 500) / 1000; 68 freqs.new = (freq + 500) / 1000;
69 freqs.flags = 0; 69 freqs.flags = 0;
70 70
71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 71 cpufreq_freq_transition_begin(policy, &freqs);
72 set_cpus_allowed_ptr(current, &cpus_allowed); 72 set_cpus_allowed_ptr(current, &cpus_allowed);
73 clk_set_rate(cpuclk, freq); 73 clk_set_rate(cpuclk, freq);
74 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 74 cpufreq_freq_transition_end(policy, &freqs, 0);
75 75
76 dev_dbg(dev, "set frequency %lu Hz\n", freq); 76 dev_dbg(dev, "set frequency %lu Hz\n", freq);
77 77
@@ -143,7 +143,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
143 unsigned int cpu = policy->cpu; 143 unsigned int cpu = policy->cpu;
144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); 144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
145 145
146 cpufreq_frequency_table_put_attr(cpu);
147 clk_put(cpuclk); 146 clk_put(cpuclk);
148 147
149 return 0; 148 return 0;
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 62aa23e219d4..b73feeb666f9 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -301,10 +301,8 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
301 301
302static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) 302static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
303{ 303{
304 if (cpufreq_us2e_driver) { 304 if (cpufreq_us2e_driver)
305 cpufreq_frequency_table_put_attr(policy->cpu);
306 us2e_freq_target(policy, 0); 305 us2e_freq_target(policy, 0);
307 }
308 306
309 return 0; 307 return 0;
310} 308}
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 724ffbd7105d..9bb42ba50efa 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -156,10 +156,8 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
156 156
157static int us3_freq_cpu_exit(struct cpufreq_policy *policy) 157static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
158{ 158{
159 if (cpufreq_us3_driver) { 159 if (cpufreq_us3_driver)
160 cpufreq_frequency_table_put_attr(policy->cpu);
161 us3_freq_target(policy, 0); 160 us3_freq_target(policy, 0);
162 }
163 161
164 return 0; 162 return 0;
165} 163}
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 5c86e3fa5593..4cfdcff8a310 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/platform_device.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/types.h> 24#include <linux/types.h>
24 25
@@ -163,11 +164,10 @@ static struct cpufreq_driver spear_cpufreq_driver = {
163 .target_index = spear_cpufreq_target, 164 .target_index = spear_cpufreq_target,
164 .get = cpufreq_generic_get, 165 .get = cpufreq_generic_get,
165 .init = spear_cpufreq_init, 166 .init = spear_cpufreq_init,
166 .exit = cpufreq_generic_exit,
167 .attr = cpufreq_generic_attr, 167 .attr = cpufreq_generic_attr,
168}; 168};
169 169
170static int spear_cpufreq_driver_init(void) 170static int spear_cpufreq_probe(struct platform_device *pdev)
171{ 171{
172 struct device_node *np; 172 struct device_node *np;
173 const struct property *prop; 173 const struct property *prop;
@@ -235,7 +235,15 @@ out_put_node:
235 of_node_put(np); 235 of_node_put(np);
236 return ret; 236 return ret;
237} 237}
238late_initcall(spear_cpufreq_driver_init); 238
239static struct platform_driver spear_cpufreq_platdrv = {
240 .driver = {
241 .name = "spear-cpufreq",
242 .owner = THIS_MODULE,
243 },
244 .probe = spear_cpufreq_probe,
245};
246module_platform_driver(spear_cpufreq_platdrv);
239 247
240MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); 248MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
241MODULE_DESCRIPTION("SPEAr CPUFreq driver"); 249MODULE_DESCRIPTION("SPEAr CPUFreq driver");
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 4e1daca5ce3b..6723f0390f20 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -406,8 +406,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
406 if (!per_cpu(centrino_model, cpu)) 406 if (!per_cpu(centrino_model, cpu))
407 return -ENODEV; 407 return -ENODEV;
408 408
409 cpufreq_frequency_table_put_attr(cpu);
410
411 per_cpu(centrino_model, cpu) = NULL; 409 per_cpu(centrino_model, cpu) = NULL;
412 410
413 return 0; 411 return 0;
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 7639b2be2a90..394ac159312a 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -311,7 +311,6 @@ static struct cpufreq_driver speedstep_driver = {
311 .verify = cpufreq_generic_frequency_table_verify, 311 .verify = cpufreq_generic_frequency_table_verify,
312 .target_index = speedstep_target, 312 .target_index = speedstep_target,
313 .init = speedstep_cpu_init, 313 .init = speedstep_cpu_init,
314 .exit = cpufreq_generic_exit,
315 .get = speedstep_get, 314 .get = speedstep_get,
316 .attr = cpufreq_generic_attr, 315 .attr = cpufreq_generic_attr,
317}; 316};
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 998c17b42200..db5d274dc13a 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -280,7 +280,6 @@ static struct cpufreq_driver speedstep_driver = {
280 .verify = cpufreq_generic_frequency_table_verify, 280 .verify = cpufreq_generic_frequency_table_verify,
281 .target_index = speedstep_target, 281 .target_index = speedstep_target,
282 .init = speedstep_cpu_init, 282 .init = speedstep_cpu_init,
283 .exit = cpufreq_generic_exit,
284 .get = speedstep_get, 283 .get = speedstep_get,
285 .resume = speedstep_resume, 284 .resume = speedstep_resume,
286 .attr = cpufreq_generic_attr, 285 .attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index e652c1bd8d0f..63f00598a251 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -26,7 +26,6 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/suspend.h>
30 29
31static struct cpufreq_frequency_table freq_table[] = { 30static struct cpufreq_frequency_table freq_table[] = {
32 { .frequency = 216000 }, 31 { .frequency = 216000 },
@@ -47,9 +46,6 @@ static struct clk *pll_x_clk;
47static struct clk *pll_p_clk; 46static struct clk *pll_p_clk;
48static struct clk *emc_clk; 47static struct clk *emc_clk;
49 48
50static DEFINE_MUTEX(tegra_cpu_lock);
51static bool is_suspended;
52
53static int tegra_cpu_clk_set_rate(unsigned long rate) 49static int tegra_cpu_clk_set_rate(unsigned long rate)
54{ 50{
55 int ret; 51 int ret;
@@ -112,42 +108,9 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
112 108
113static int tegra_target(struct cpufreq_policy *policy, unsigned int index) 109static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
114{ 110{
115 int ret = -EBUSY; 111 return tegra_update_cpu_speed(policy, freq_table[index].frequency);
116
117 mutex_lock(&tegra_cpu_lock);
118
119 if (!is_suspended)
120 ret = tegra_update_cpu_speed(policy,
121 freq_table[index].frequency);
122
123 mutex_unlock(&tegra_cpu_lock);
124 return ret;
125} 112}
126 113
127static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
128 void *dummy)
129{
130 mutex_lock(&tegra_cpu_lock);
131 if (event == PM_SUSPEND_PREPARE) {
132 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
133 is_suspended = true;
134 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
135 freq_table[0].frequency);
136 if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
137 tegra_update_cpu_speed(policy, freq_table[0].frequency);
138 cpufreq_cpu_put(policy);
139 } else if (event == PM_POST_SUSPEND) {
140 is_suspended = false;
141 }
142 mutex_unlock(&tegra_cpu_lock);
143
144 return NOTIFY_OK;
145}
146
147static struct notifier_block tegra_cpu_pm_notifier = {
148 .notifier_call = tegra_pm_notify,
149};
150
151static int tegra_cpu_init(struct cpufreq_policy *policy) 114static int tegra_cpu_init(struct cpufreq_policy *policy)
152{ 115{
153 int ret; 116 int ret;
@@ -166,16 +129,13 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
166 return ret; 129 return ret;
167 } 130 }
168 131
169 if (policy->cpu == 0)
170 register_pm_notifier(&tegra_cpu_pm_notifier);
171
172 policy->clk = cpu_clk; 132 policy->clk = cpu_clk;
133 policy->suspend_freq = freq_table[0].frequency;
173 return 0; 134 return 0;
174} 135}
175 136
176static int tegra_cpu_exit(struct cpufreq_policy *policy) 137static int tegra_cpu_exit(struct cpufreq_policy *policy)
177{ 138{
178 cpufreq_frequency_table_put_attr(policy->cpu);
179 clk_disable_unprepare(cpu_clk); 139 clk_disable_unprepare(cpu_clk);
180 clk_disable_unprepare(emc_clk); 140 clk_disable_unprepare(emc_clk);
181 return 0; 141 return 0;
@@ -190,6 +150,9 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
190 .exit = tegra_cpu_exit, 150 .exit = tegra_cpu_exit,
191 .name = "tegra", 151 .name = "tegra",
192 .attr = cpufreq_generic_attr, 152 .attr = cpufreq_generic_attr,
153#ifdef CONFIG_PM
154 .suspend = cpufreq_generic_suspend,
155#endif
193}; 156};
194 157
195static int __init tegra_cpufreq_init(void) 158static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 36cc330b8747..13be802b6170 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -44,9 +44,9 @@ static int ucv2_target(struct cpufreq_policy *policy,
44 freqs.old = policy->cur; 44 freqs.old = policy->cur;
45 freqs.new = target_freq; 45 freqs.new = target_freq;
46 46
47 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 47 cpufreq_freq_transition_begin(policy, &freqs);
48 ret = clk_set_rate(policy->mclk, target_freq * 1000); 48 ret = clk_set_rate(policy->mclk, target_freq * 1000);
49 cpufreq_notify_post_transition(policy, &freqs, ret); 49 cpufreq_freq_transition_end(policy, &freqs, ret);
50 50
51 return ret; 51 return ret;
52} 52}