aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-24 18:51:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-24 18:51:02 -0500
commit09da8dfa98682d871987145ed11e3232accac860 (patch)
tree152a9bb1e52f70db6efb66fffbdc4871f749d7df /drivers/cpufreq
parent3aacd625f20129f5a41ea3ff3b5353b0e4dabd01 (diff)
parent7744064731a9543105e207504e0262f883bc14c0 (diff)
Merge tag 'pm+acpi-3.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: "As far as the number of commits goes, the top spot belongs to ACPI this time with cpufreq in the second position and a handful of PM core, PNP and cpuidle updates. They are fixes and cleanups mostly, as usual, with a couple of new features in the mix. The most visible change is probably that we will create struct acpi_device objects (visible in sysfs) for all devices represented in the ACPI tables regardless of their status and there will be a new sysfs attribute under those objects allowing user space to check that status via _STA. Consequently, ACPI device eject or generally hot-removal will not delete those objects, unless the table containing the corresponding namespace nodes is unloaded, which is extremely rare. Also ACPI container hotplug will be handled quite a bit differently and cpufreq will support CPU boost ("turbo") generically and not only in the acpi-cpufreq driver. Specifics: - ACPI core changes to make it create a struct acpi_device object for every device represented in the ACPI tables during all namespace scans regardless of the current status of that device. In accordance with this, ACPI hotplug operations will not delete those objects, unless the underlying ACPI tables go away. - On top of the above, new sysfs attribute for ACPI device objects allowing user space to check device status by triggering the execution of _STA for its ACPI object. From Srinivas Pandruvada. - ACPI core hotplug changes reducing code duplication, integrating the PCI root hotplug with the core and reworking container hotplug. - ACPI core simplifications making it use ACPI_COMPANION() in the code "glueing" ACPI device objects to "physical" devices. - ACPICA update to upstream version 20131218. This adds support for the DBG2 and PCCT tables to ACPICA, fixes some bugs and improves debug facilities. From Bob Moore, Lv Zheng and Betty Dall. - Init code change to carry out the early ACPI initialization earlier. That should allow us to use ACPI during the timekeeping initialization and possibly to simplify the EFI initialization too. From Chun-Yi Lee. - Clenups of the inclusions of ACPI headers in many places all over from Lv Zheng and Rashika Kheria (work in progress). - New helper for ACPI _DSM execution and rework of the code in drivers that uses _DSM to execute it via the new helper. From Jiang Liu. - New Win8 OSI blacklist entries from Takashi Iwai. - Assorted ACPI fixes and cleanups from Al Stone, Emil Goode, Hanjun Guo, Lan Tianyu, Masanari Iida, Oliver Neukum, Prarit Bhargava, Rashika Kheria, Tang Chen, Zhang Rui. - intel_pstate driver updates, including proper Baytrail support, from Dirk Brandewie and intel_pstate documentation from Ramkumar Ramachandra. - Generic CPU boost ("turbo") support for cpufreq from Lukasz Majewski. - powernow-k6 cpufreq driver fixes from Mikulas Patocka. - cpufreq core fixes and cleanups from Viresh Kumar, Jane Li, Mark Brown. - Assorted cpufreq drivers fixes and cleanups from Anson Huang, John Tobias, Paul Bolle, Paul Walmsley, Sachin Kamat, Shawn Guo, Viresh Kumar. - cpuidle cleanups from Bartlomiej Zolnierkiewicz. - Support for hibernation APM events from Bin Shi. - Hibernation fix to avoid bringing up nonboot CPUs with ACPI EC disabled during thaw transitions from Bjørn Mork. - PM core fixes and cleanups from Ben Dooks, Leonardo Potenza, Ulf Hansson. - PNP subsystem fixes and cleanups from Dmitry Torokhov, Levente Kurusa, Rashika Kheria. - New tool for profiling system suspend from Todd E Brandt and a cpupower tool cleanup from One Thousand Gnomes" * tag 'pm+acpi-3.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (153 commits) thermal: exynos: boost: Automatic enable/disable of BOOST feature (at Exynos4412) cpufreq: exynos4x12: Change L0 driver data to CPUFREQ_BOOST_FREQ Documentation: cpufreq / boost: Update BOOST documentation cpufreq: exynos: Extend Exynos cpufreq driver to support boost cpufreq / boost: Kconfig: Support for software-managed BOOST acpi-cpufreq: Adjust the code to use the common boost attribute cpufreq: Add boost frequency support in core intel_pstate: Add trace point to report internal state. cpufreq: introduce cpufreq_generic_get() routine ARM: SA1100: Create dummy clk_get_rate() to avoid build failures cpufreq: stats: create sysfs entries when cpufreq_stats is a module cpufreq: stats: free table and remove sysfs entry in a single routine cpufreq: stats: remove hotplug notifiers cpufreq: stats: handle cpufreq_unregister_driver() and suspend/resume properly cpufreq: speedstep: remove unused speedstep_get_state platform: introduce OF style 'modalias' support for platform bus PM / tools: new tool for suspend/resume performance optimization ACPI: fix module autoloading for ACPI enumerated devices ACPI: add module autoloading support for ACPI enumerated devices ACPI: fix create_modalias() return value handling ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig7
-rw-r--r--drivers/cpufreq/Kconfig.arm27
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c86
-rw-r--r--drivers/cpufreq/arm_big_little.c3
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c10
-rw-r--r--drivers/cpufreq/cpufreq.c218
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c109
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c16
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c28
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c74
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c36
-rw-r--r--drivers/cpufreq/freq_table.c78
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c134
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c89
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c15
-rw-r--r--drivers/cpufreq/omap-cpufreq.c34
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c18
-rw-r--r--drivers/cpufreq/powernow-k6.c147
-rw-r--r--drivers/cpufreq/powernow-k8.c7
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c17
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c14
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c23
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
-rw-r--r--drivers/cpufreq/speedstep-smi.c32
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c49
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c33
40 files changed, 826 insertions, 590 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 38093e272377..d100926aca0a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -20,6 +20,10 @@ if CPU_FREQ
20config CPU_FREQ_GOV_COMMON 20config CPU_FREQ_GOV_COMMON
21 bool 21 bool
22 22
23config CPU_FREQ_BOOST_SW
24 bool
25 depends on THERMAL
26
23config CPU_FREQ_STAT 27config CPU_FREQ_STAT
24 tristate "CPU frequency translation statistics" 28 tristate "CPU frequency translation statistics"
25 default y 29 default y
@@ -181,7 +185,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
181 185
182config GENERIC_CPUFREQ_CPU0 186config GENERIC_CPUFREQ_CPU0
183 tristate "Generic CPU0 cpufreq driver" 187 tristate "Generic CPU0 cpufreq driver"
184 depends on HAVE_CLK && REGULATOR && PM_OPP && OF 188 depends on HAVE_CLK && REGULATOR && OF
189 select PM_OPP
185 help 190 help
186 This adds a generic cpufreq driver for CPU0 frequency management. 191 This adds a generic cpufreq driver for CPU0 frequency management.
187 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) 192 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ce52ed949249..31297499a60a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,7 +4,8 @@
4 4
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate "Generic ARM big LITTLE CPUfreq driver" 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK 7 depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
8 select PM_OPP
8 help 9 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 10 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
10 11
@@ -54,7 +55,8 @@ config ARM_EXYNOS5250_CPUFREQ
54config ARM_EXYNOS5440_CPUFREQ 55config ARM_EXYNOS5440_CPUFREQ
55 bool "SAMSUNG EXYNOS5440" 56 bool "SAMSUNG EXYNOS5440"
56 depends on SOC_EXYNOS5440 57 depends on SOC_EXYNOS5440
57 depends on HAVE_CLK && PM_OPP && OF 58 depends on HAVE_CLK && OF
59 select PM_OPP
58 default y 60 default y
59 help 61 help
60 This adds the CPUFreq driver for Samsung EXYNOS5440 62 This adds the CPUFreq driver for Samsung EXYNOS5440
@@ -64,6 +66,21 @@ config ARM_EXYNOS5440_CPUFREQ
64 66
65 If in doubt, say N. 67 If in doubt, say N.
66 68
69config ARM_EXYNOS_CPU_FREQ_BOOST_SW
70 bool "EXYNOS Frequency Overclocking - Software"
71 depends on ARM_EXYNOS_CPUFREQ
72 select CPU_FREQ_BOOST_SW
73 select EXYNOS_THERMAL
74 help
75 This driver supports software managed overclocking (BOOST).
76 It allows usage of special frequencies for Samsung Exynos
77 processors if thermal conditions are appropriate.
78
79 It reguires, for safe operation, thermal framework with properly
80 defined trip points.
81
82 If in doubt, say N.
83
67config ARM_HIGHBANK_CPUFREQ 84config ARM_HIGHBANK_CPUFREQ
68 tristate "Calxeda Highbank-based" 85 tristate "Calxeda Highbank-based"
69 depends on ARCH_HIGHBANK 86 depends on ARCH_HIGHBANK
@@ -79,11 +96,11 @@ config ARM_HIGHBANK_CPUFREQ
79 If in doubt, say N. 96 If in doubt, say N.
80 97
81config ARM_IMX6Q_CPUFREQ 98config ARM_IMX6Q_CPUFREQ
82 tristate "Freescale i.MX6Q cpufreq support" 99 tristate "Freescale i.MX6 cpufreq support"
83 depends on SOC_IMX6Q 100 depends on ARCH_MXC
84 depends on REGULATOR_ANATOP 101 depends on REGULATOR_ANATOP
85 help 102 help
86 This adds cpufreq driver support for Freescale i.MX6Q SOC. 103 This adds cpufreq driver support for Freescale i.MX6 series SoCs.
87 104
88 If in doubt, say N. 105 If in doubt, say N.
89 106
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index caf41ebea184..79e5608e71b5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -80,7 +80,6 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
80static struct cpufreq_driver acpi_cpufreq_driver; 80static struct cpufreq_driver acpi_cpufreq_driver;
81 81
82static unsigned int acpi_pstate_strict; 82static unsigned int acpi_pstate_strict;
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs; 83static struct msr __percpu *msrs;
85 84
86static bool boost_state(unsigned int cpu) 85static bool boost_state(unsigned int cpu)
@@ -133,49 +132,16 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
133 wrmsr_on_cpus(cpumask, msr_addr, msrs); 132 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134} 133}
135 134
136static ssize_t _store_boost(const char *buf, size_t count) 135static int _store_boost(int val)
137{ 136{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus(); 137 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask); 138 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus(); 139 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); 140 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159 141
160 return count; 142 return 0;
161}
162
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173} 143}
174 144
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
178
179static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 145static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
180{ 146{
181 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); 147 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
@@ -186,15 +152,32 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
186cpufreq_freq_attr_ro(freqdomain_cpus); 152cpufreq_freq_attr_ro(freqdomain_cpus);
187 153
188#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB 154#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
155static ssize_t store_boost(const char *buf, size_t count)
156{
157 int ret;
158 unsigned long val = 0;
159
160 if (!acpi_cpufreq_driver.boost_supported)
161 return -EINVAL;
162
163 ret = kstrtoul(buf, 10, &val);
164 if (ret || (val > 1))
165 return -EINVAL;
166
167 _store_boost((int) val);
168
169 return count;
170}
171
189static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, 172static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
190 size_t count) 173 size_t count)
191{ 174{
192 return _store_boost(buf, count); 175 return store_boost(buf, count);
193} 176}
194 177
195static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) 178static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
196{ 179{
197 return sprintf(buf, "%u\n", boost_enabled); 180 return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
198} 181}
199 182
200cpufreq_freq_attr_rw(cpb); 183cpufreq_freq_attr_rw(cpb);
@@ -554,7 +537,7 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
554 switch (action) { 537 switch (action) {
555 case CPU_UP_PREPARE: 538 case CPU_UP_PREPARE:
556 case CPU_UP_PREPARE_FROZEN: 539 case CPU_UP_PREPARE_FROZEN:
557 boost_set_msrs(boost_enabled, cpumask); 540 boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
558 break; 541 break;
559 542
560 case CPU_DOWN_PREPARE: 543 case CPU_DOWN_PREPARE:
@@ -911,6 +894,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
911 .resume = acpi_cpufreq_resume, 894 .resume = acpi_cpufreq_resume,
912 .name = "acpi-cpufreq", 895 .name = "acpi-cpufreq",
913 .attr = acpi_cpufreq_attr, 896 .attr = acpi_cpufreq_attr,
897 .set_boost = _store_boost,
914}; 898};
915 899
916static void __init acpi_cpufreq_boost_init(void) 900static void __init acpi_cpufreq_boost_init(void)
@@ -921,33 +905,22 @@ static void __init acpi_cpufreq_boost_init(void)
921 if (!msrs) 905 if (!msrs)
922 return; 906 return;
923 907
924 boost_supported = true; 908 acpi_cpufreq_driver.boost_supported = true;
925 boost_enabled = boost_state(0); 909 acpi_cpufreq_driver.boost_enabled = boost_state(0);
926
927 get_online_cpus(); 910 get_online_cpus();
928 911
929 /* Force all MSRs to the same value */ 912 /* Force all MSRs to the same value */
930 boost_set_msrs(boost_enabled, cpu_online_mask); 913 boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
914 cpu_online_mask);
931 915
932 register_cpu_notifier(&boost_nb); 916 register_cpu_notifier(&boost_nb);
933 917
934 put_online_cpus(); 918 put_online_cpus();
935 } else 919 }
936 global_boost.attr.mode = 0444;
937
938 /* We create the boost file in any case, though for systems without
939 * hardware support it will be read-only and hardwired to return 0.
940 */
941 if (cpufreq_sysfs_create_file(&(global_boost.attr)))
942 pr_warn(PFX "could not register global boost sysfs file\n");
943 else
944 pr_debug("registered global boost sysfs file\n");
945} 920}
946 921
947static void __exit acpi_cpufreq_boost_exit(void) 922static void __exit acpi_cpufreq_boost_exit(void)
948{ 923{
949 cpufreq_sysfs_remove_file(&(global_boost.attr));
950
951 if (msrs) { 924 if (msrs) {
952 unregister_cpu_notifier(&boost_nb); 925 unregister_cpu_notifier(&boost_nb);
953 926
@@ -993,12 +966,11 @@ static int __init acpi_cpufreq_init(void)
993 *iter = &cpb; 966 *iter = &cpb;
994 } 967 }
995#endif 968#endif
969 acpi_cpufreq_boost_init();
996 970
997 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 971 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
998 if (ret) 972 if (ret)
999 free_acpi_perf_data(); 973 free_acpi_perf_data();
1000 else
1001 acpi_cpufreq_boost_init();
1002 974
1003 return ret; 975 return ret;
1004} 976}
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 5519933813ea..72f87e9317e3 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -488,7 +488,8 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
488static struct cpufreq_driver bL_cpufreq_driver = { 488static struct cpufreq_driver bL_cpufreq_driver = {
489 .name = "arm-big-little", 489 .name = "arm-big-little",
490 .flags = CPUFREQ_STICKY | 490 .flags = CPUFREQ_STICKY |
491 CPUFREQ_HAVE_GOVERNOR_PER_POLICY, 491 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
492 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
492 .verify = cpufreq_generic_frequency_table_verify, 493 .verify = cpufreq_generic_frequency_table_verify,
493 .target_index = bL_cpufreq_set_target, 494 .target_index = bL_cpufreq_set_target,
494 .get = bL_cpufreq_get_rate, 495 .get = bL_cpufreq_get_rate,
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 7c03dd84f66a..a1c79f549edb 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -21,17 +21,8 @@
21#include <linux/export.h> 21#include <linux/export.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24static struct clk *cpuclk;
25static struct cpufreq_frequency_table *freq_table; 24static struct cpufreq_frequency_table *freq_table;
26 25
27static unsigned int at32_get_speed(unsigned int cpu)
28{
29 /* No SMP support */
30 if (cpu)
31 return 0;
32 return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
33}
34
35static unsigned int ref_freq; 26static unsigned int ref_freq;
36static unsigned long loops_per_jiffy_ref; 27static unsigned long loops_per_jiffy_ref;
37 28
@@ -39,7 +30,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
39{ 30{
40 unsigned int old_freq, new_freq; 31 unsigned int old_freq, new_freq;
41 32
42 old_freq = at32_get_speed(0); 33 old_freq = policy->cur;
43 new_freq = freq_table[index].frequency; 34 new_freq = freq_table[index].frequency;
44 35
45 if (!ref_freq) { 36 if (!ref_freq) {
@@ -50,7 +41,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
50 if (old_freq < new_freq) 41 if (old_freq < new_freq)
51 boot_cpu_data.loops_per_jiffy = cpufreq_scale( 42 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
52 loops_per_jiffy_ref, ref_freq, new_freq); 43 loops_per_jiffy_ref, ref_freq, new_freq);
53 clk_set_rate(cpuclk, new_freq * 1000); 44 clk_set_rate(policy->clk, new_freq * 1000);
54 if (new_freq < old_freq) 45 if (new_freq < old_freq)
55 boot_cpu_data.loops_per_jiffy = cpufreq_scale( 46 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
56 loops_per_jiffy_ref, ref_freq, new_freq); 47 loops_per_jiffy_ref, ref_freq, new_freq);
@@ -61,6 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
61static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) 52static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
62{ 53{
63 unsigned int frequency, rate, min_freq; 54 unsigned int frequency, rate, min_freq;
55 static struct clk *cpuclk;
64 int retval, steps, i; 56 int retval, steps, i;
65 57
66 if (policy->cpu != 0) 58 if (policy->cpu != 0)
@@ -103,6 +95,7 @@ static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
103 frequency /= 2; 95 frequency /= 2;
104 } 96 }
105 97
98 policy->clk = cpuclk;
106 freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; 99 freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
107 100
108 retval = cpufreq_table_validate_and_show(policy, freq_table); 101 retval = cpufreq_table_validate_and_show(policy, freq_table);
@@ -123,7 +116,7 @@ static struct cpufreq_driver at32_driver = {
123 .init = at32_cpufreq_driver_init, 116 .init = at32_cpufreq_driver_init,
124 .verify = cpufreq_generic_frequency_table_verify, 117 .verify = cpufreq_generic_frequency_table_verify,
125 .target_index = at32_set_target, 118 .target_index = at32_set_target,
126 .get = at32_get_speed, 119 .get = cpufreq_generic_get,
127 .flags = CPUFREQ_STICKY, 120 .flags = CPUFREQ_STICKY,
128}; 121};
129 122
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index d4585ce2346c..bb7b3082efb3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -30,11 +30,6 @@ static struct clk *cpu_clk;
30static struct regulator *cpu_reg; 30static struct regulator *cpu_reg;
31static struct cpufreq_frequency_table *freq_table; 31static struct cpufreq_frequency_table *freq_table;
32 32
33static unsigned int cpu0_get_speed(unsigned int cpu)
34{
35 return clk_get_rate(cpu_clk) / 1000;
36}
37
38static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) 33static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
39{ 34{
40 struct dev_pm_opp *opp; 35 struct dev_pm_opp *opp;
@@ -44,7 +39,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
44 int ret; 39 int ret;
45 40
46 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); 41 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
47 if (freq_Hz < 0) 42 if (freq_Hz <= 0)
48 freq_Hz = freq_table[index].frequency * 1000; 43 freq_Hz = freq_table[index].frequency * 1000;
49 44
50 freq_exact = freq_Hz; 45 freq_exact = freq_Hz;
@@ -100,6 +95,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
100 95
101static int cpu0_cpufreq_init(struct cpufreq_policy *policy) 96static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
102{ 97{
98 policy->clk = cpu_clk;
103 return cpufreq_generic_init(policy, freq_table, transition_latency); 99 return cpufreq_generic_init(policy, freq_table, transition_latency);
104} 100}
105 101
@@ -107,7 +103,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
107 .flags = CPUFREQ_STICKY, 103 .flags = CPUFREQ_STICKY,
108 .verify = cpufreq_generic_frequency_table_verify, 104 .verify = cpufreq_generic_frequency_table_verify,
109 .target_index = cpu0_set_target, 105 .target_index = cpu0_set_target,
110 .get = cpu0_get_speed, 106 .get = cpufreq_generic_get,
111 .init = cpu0_cpufreq_init, 107 .init = cpu0_cpufreq_init,
112 .exit = cpufreq_generic_exit, 108 .exit = cpufreq_generic_exit,
113 .name = "generic_cpu0", 109 .name = "generic_cpu0",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d19f7c06010..08ca8c9f41cd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -39,7 +39,7 @@ static struct cpufreq_driver *cpufreq_driver;
39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41static DEFINE_RWLOCK(cpufreq_driver_lock); 41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock); 42DEFINE_MUTEX(cpufreq_governor_lock);
43static LIST_HEAD(cpufreq_policy_list); 43static LIST_HEAD(cpufreq_policy_list);
44 44
45#ifdef CONFIG_HOTPLUG_CPU 45#ifdef CONFIG_HOTPLUG_CPU
@@ -176,6 +176,20 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
176} 176}
177EXPORT_SYMBOL_GPL(cpufreq_generic_init); 177EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178 178
179unsigned int cpufreq_generic_get(unsigned int cpu)
180{
181 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
182
183 if (!policy || IS_ERR(policy->clk)) {
184 pr_err("%s: No %s associated to cpu: %d\n", __func__,
185 policy ? "clk" : "policy", cpu);
186 return 0;
187 }
188
189 return clk_get_rate(policy->clk) / 1000;
190}
191EXPORT_SYMBOL_GPL(cpufreq_generic_get);
192
179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 193struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
180{ 194{
181 struct cpufreq_policy *policy = NULL; 195 struct cpufreq_policy *policy = NULL;
@@ -320,10 +334,51 @@ void cpufreq_notify_transition(struct cpufreq_policy *policy,
320} 334}
321EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 335EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322 336
337/* Do post notifications when there are chances that transition has failed */
338void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs, int transition_failed)
340{
341 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
342 if (!transition_failed)
343 return;
344
345 swap(freqs->old, freqs->new);
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348}
349EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
350
323 351
324/********************************************************************* 352/*********************************************************************
325 * SYSFS INTERFACE * 353 * SYSFS INTERFACE *
326 *********************************************************************/ 354 *********************************************************************/
355ssize_t show_boost(struct kobject *kobj,
356 struct attribute *attr, char *buf)
357{
358 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
359}
360
361static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
362 const char *buf, size_t count)
363{
364 int ret, enable;
365
366 ret = sscanf(buf, "%d", &enable);
367 if (ret != 1 || enable < 0 || enable > 1)
368 return -EINVAL;
369
370 if (cpufreq_boost_trigger_state(enable)) {
371 pr_err("%s: Cannot %s BOOST!\n", __func__,
372 enable ? "enable" : "disable");
373 return -EINVAL;
374 }
375
376 pr_debug("%s: cpufreq BOOST %s\n", __func__,
377 enable ? "enabled" : "disabled");
378
379 return count;
380}
381define_one_global_rw(boost);
327 382
328static struct cpufreq_governor *__find_governor(const char *str_governor) 383static struct cpufreq_governor *__find_governor(const char *str_governor)
329{ 384{
@@ -929,6 +984,9 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
929 struct kobject *kobj; 984 struct kobject *kobj;
930 struct completion *cmp; 985 struct completion *cmp;
931 986
987 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
988 CPUFREQ_REMOVE_POLICY, policy);
989
932 down_read(&policy->rwsem); 990 down_read(&policy->rwsem);
933 kobj = &policy->kobj; 991 kobj = &policy->kobj;
934 cmp = &policy->kobj_unregister; 992 cmp = &policy->kobj_unregister;
@@ -1051,6 +1109,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1051 goto err_set_policy_cpu; 1109 goto err_set_policy_cpu;
1052 } 1110 }
1053 1111
1112 write_lock_irqsave(&cpufreq_driver_lock, flags);
1113 for_each_cpu(j, policy->cpus)
1114 per_cpu(cpufreq_cpu_data, j) = policy;
1115 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1116
1054 if (cpufreq_driver->get) { 1117 if (cpufreq_driver->get) {
1055 policy->cur = cpufreq_driver->get(policy->cpu); 1118 policy->cur = cpufreq_driver->get(policy->cpu);
1056 if (!policy->cur) { 1119 if (!policy->cur) {
@@ -1059,6 +1122,46 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1059 } 1122 }
1060 } 1123 }
1061 1124
1125 /*
1126 * Sometimes boot loaders set CPU frequency to a value outside of
1127 * frequency table present with cpufreq core. In such cases CPU might be
1128 * unstable if it has to run on that frequency for long duration of time
1129 * and so its better to set it to a frequency which is specified in
1130 * freq-table. This also makes cpufreq stats inconsistent as
1131 * cpufreq-stats would fail to register because current frequency of CPU
1132 * isn't found in freq-table.
1133 *
1134 * Because we don't want this change to effect boot process badly, we go
1135 * for the next freq which is >= policy->cur ('cur' must be set by now,
1136 * otherwise we will end up setting freq to lowest of the table as 'cur'
1137 * is initialized to zero).
1138 *
1139 * We are passing target-freq as "policy->cur - 1" otherwise
1140 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1141 * equal to target-freq.
1142 */
1143 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1144 && has_target()) {
1145 /* Are we running at unknown frequency ? */
1146 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1147 if (ret == -EINVAL) {
1148 /* Warn user and fix it */
1149 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1150 __func__, policy->cpu, policy->cur);
1151 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1152 CPUFREQ_RELATION_L);
1153
1154 /*
1155 * Reaching here after boot in a few seconds may not
1156 * mean that system will remain stable at "unknown"
1157 * frequency for longer duration. Hence, a BUG_ON().
1158 */
1159 BUG_ON(ret);
1160 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1161 __func__, policy->cpu, policy->cur);
1162 }
1163 }
1164
1062 /* related cpus should atleast have policy->cpus */ 1165 /* related cpus should atleast have policy->cpus */
1063 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1166 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1064 1167
@@ -1085,15 +1188,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1085 } 1188 }
1086#endif 1189#endif
1087 1190
1088 write_lock_irqsave(&cpufreq_driver_lock, flags);
1089 for_each_cpu(j, policy->cpus)
1090 per_cpu(cpufreq_cpu_data, j) = policy;
1091 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1092
1093 if (!frozen) { 1191 if (!frozen) {
1094 ret = cpufreq_add_dev_interface(policy, dev); 1192 ret = cpufreq_add_dev_interface(policy, dev);
1095 if (ret) 1193 if (ret)
1096 goto err_out_unregister; 1194 goto err_out_unregister;
1195 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1196 CPUFREQ_CREATE_POLICY, policy);
1097 } 1197 }
1098 1198
1099 write_lock_irqsave(&cpufreq_driver_lock, flags); 1199 write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1115,12 +1215,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1115 return 0; 1215 return 0;
1116 1216
1117err_out_unregister: 1217err_out_unregister:
1218err_get_freq:
1118 write_lock_irqsave(&cpufreq_driver_lock, flags); 1219 write_lock_irqsave(&cpufreq_driver_lock, flags);
1119 for_each_cpu(j, policy->cpus) 1220 for_each_cpu(j, policy->cpus)
1120 per_cpu(cpufreq_cpu_data, j) = NULL; 1221 per_cpu(cpufreq_cpu_data, j) = NULL;
1121 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1222 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122 1223
1123err_get_freq:
1124 if (cpufreq_driver->exit) 1224 if (cpufreq_driver->exit)
1125 cpufreq_driver->exit(policy); 1225 cpufreq_driver->exit(policy);
1126err_set_policy_cpu: 1226err_set_policy_cpu:
@@ -1725,17 +1825,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1725 pr_err("%s: Failed to change cpu frequency: %d\n", 1825 pr_err("%s: Failed to change cpu frequency: %d\n",
1726 __func__, retval); 1826 __func__, retval);
1727 1827
1728 if (notify) { 1828 if (notify)
1729 /* 1829 cpufreq_notify_post_transition(policy, &freqs, retval);
1730 * Notify with old freq in case we failed to change
1731 * frequency
1732 */
1733 if (retval)
1734 freqs.new = freqs.old;
1735
1736 cpufreq_notify_transition(policy, &freqs,
1737 CPUFREQ_POSTCHANGE);
1738 }
1739 } 1830 }
1740 1831
1741out: 1832out:
@@ -2120,6 +2211,73 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
2120}; 2211};
2121 2212
2122/********************************************************************* 2213/*********************************************************************
2214 * BOOST *
2215 *********************************************************************/
2216static int cpufreq_boost_set_sw(int state)
2217{
2218 struct cpufreq_frequency_table *freq_table;
2219 struct cpufreq_policy *policy;
2220 int ret = -EINVAL;
2221
2222 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2223 freq_table = cpufreq_frequency_get_table(policy->cpu);
2224 if (freq_table) {
2225 ret = cpufreq_frequency_table_cpuinfo(policy,
2226 freq_table);
2227 if (ret) {
2228 pr_err("%s: Policy frequency update failed\n",
2229 __func__);
2230 break;
2231 }
2232 policy->user_policy.max = policy->max;
2233 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2234 }
2235 }
2236
2237 return ret;
2238}
2239
2240int cpufreq_boost_trigger_state(int state)
2241{
2242 unsigned long flags;
2243 int ret = 0;
2244
2245 if (cpufreq_driver->boost_enabled == state)
2246 return 0;
2247
2248 write_lock_irqsave(&cpufreq_driver_lock, flags);
2249 cpufreq_driver->boost_enabled = state;
2250 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2251
2252 ret = cpufreq_driver->set_boost(state);
2253 if (ret) {
2254 write_lock_irqsave(&cpufreq_driver_lock, flags);
2255 cpufreq_driver->boost_enabled = !state;
2256 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2257
2258 pr_err("%s: Cannot %s BOOST\n", __func__,
2259 state ? "enable" : "disable");
2260 }
2261
2262 return ret;
2263}
2264
2265int cpufreq_boost_supported(void)
2266{
2267 if (likely(cpufreq_driver))
2268 return cpufreq_driver->boost_supported;
2269
2270 return 0;
2271}
2272EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2273
2274int cpufreq_boost_enabled(void)
2275{
2276 return cpufreq_driver->boost_enabled;
2277}
2278EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2279
2280/*********************************************************************
2123 * REGISTER / UNREGISTER CPUFREQ DRIVER * 2281 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2124 *********************************************************************/ 2282 *********************************************************************/
2125 2283
@@ -2159,9 +2317,25 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2159 cpufreq_driver = driver_data; 2317 cpufreq_driver = driver_data;
2160 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2318 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2161 2319
2320 if (cpufreq_boost_supported()) {
2321 /*
2322 * Check if driver provides function to enable boost -
2323 * if not, use cpufreq_boost_set_sw as default
2324 */
2325 if (!cpufreq_driver->set_boost)
2326 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2327
2328 ret = cpufreq_sysfs_create_file(&boost.attr);
2329 if (ret) {
2330 pr_err("%s: cannot register global BOOST sysfs file\n",
2331 __func__);
2332 goto err_null_driver;
2333 }
2334 }
2335
2162 ret = subsys_interface_register(&cpufreq_interface); 2336 ret = subsys_interface_register(&cpufreq_interface);
2163 if (ret) 2337 if (ret)
2164 goto err_null_driver; 2338 goto err_boost_unreg;
2165 2339
2166 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 2340 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2167 int i; 2341 int i;
@@ -2188,6 +2362,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2188 return 0; 2362 return 0;
2189err_if_unreg: 2363err_if_unreg:
2190 subsys_interface_unregister(&cpufreq_interface); 2364 subsys_interface_unregister(&cpufreq_interface);
2365err_boost_unreg:
2366 if (cpufreq_boost_supported())
2367 cpufreq_sysfs_remove_file(&boost.attr);
2191err_null_driver: 2368err_null_driver:
2192 write_lock_irqsave(&cpufreq_driver_lock, flags); 2369 write_lock_irqsave(&cpufreq_driver_lock, flags);
2193 cpufreq_driver = NULL; 2370 cpufreq_driver = NULL;
@@ -2214,6 +2391,9 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2214 pr_debug("unregistering driver %s\n", driver->name); 2391 pr_debug("unregistering driver %s\n", driver->name);
2215 2392
2216 subsys_interface_unregister(&cpufreq_interface); 2393 subsys_interface_unregister(&cpufreq_interface);
2394 if (cpufreq_boost_supported())
2395 cpufreq_sysfs_remove_file(&boost.attr);
2396
2217 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2397 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2218 2398
2219 down_write(&cpufreq_rwsem); 2399 down_write(&cpufreq_rwsem);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e6be63561fa6..ba43991ba98a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -119,8 +119,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
119{ 119{
120 int i; 120 int i;
121 121
122 mutex_lock(&cpufreq_governor_lock);
122 if (!policy->governor_enabled) 123 if (!policy->governor_enabled)
123 return; 124 goto out_unlock;
124 125
125 if (!all_cpus) { 126 if (!all_cpus) {
126 /* 127 /*
@@ -135,6 +136,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
135 for_each_cpu(i, policy->cpus) 136 for_each_cpu(i, policy->cpus)
136 __gov_queue_work(i, dbs_data, delay); 137 __gov_queue_work(i, dbs_data, delay);
137 } 138 }
139
140out_unlock:
141 mutex_unlock(&cpufreq_governor_lock);
138} 142}
139EXPORT_SYMBOL_GPL(gov_queue_work); 143EXPORT_SYMBOL_GPL(gov_queue_work);
140 144
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index b5f2b8618949..bfb9ae14142c 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -257,6 +257,8 @@ static ssize_t show_sampling_rate_min_gov_pol \
257 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ 257 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
258} 258}
259 259
260extern struct mutex cpufreq_governor_lock;
261
260void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 262void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
261bool need_load_eval(struct cpu_dbs_common_info *cdbs, 263bool need_load_eval(struct cpu_dbs_common_info *cdbs,
262 unsigned int sampling_rate); 264 unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 4cf0d2805cb2..5793e1447fb1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -151,44 +151,36 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
151 return -1; 151 return -1;
152} 152}
153 153
154/* should be called late in the CPU removal sequence so that the stats 154static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
155 * memory is still available in case someone tries to use it.
156 */
157static void cpufreq_stats_free_table(unsigned int cpu)
158{ 155{
159 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); 156 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
160 157
161 if (stat) { 158 if (!stat)
162 pr_debug("%s: Free stat table\n", __func__); 159 return;
163 kfree(stat->time_in_state); 160
164 kfree(stat); 161 pr_debug("%s: Free stat table\n", __func__);
165 per_cpu(cpufreq_stats_table, cpu) = NULL; 162
166 } 163 sysfs_remove_group(&policy->kobj, &stats_attr_group);
164 kfree(stat->time_in_state);
165 kfree(stat);
166 per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
167} 167}
168 168
169/* must be called early in the CPU removal sequence (before 169static void cpufreq_stats_free_table(unsigned int cpu)
170 * cpufreq_remove_dev) so that policy is still valid.
171 */
172static void cpufreq_stats_free_sysfs(unsigned int cpu)
173{ 170{
174 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 171 struct cpufreq_policy *policy;
175 172
173 policy = cpufreq_cpu_get(cpu);
176 if (!policy) 174 if (!policy)
177 return; 175 return;
178 176
179 if (!cpufreq_frequency_get_table(cpu)) 177 if (cpufreq_frequency_get_table(policy->cpu))
180 goto put_ref; 178 __cpufreq_stats_free_table(policy);
181
182 if (!policy_is_shared(policy)) {
183 pr_debug("%s: Free sysfs stat\n", __func__);
184 sysfs_remove_group(&policy->kobj, &stats_attr_group);
185 }
186 179
187put_ref:
188 cpufreq_cpu_put(policy); 180 cpufreq_cpu_put(policy);
189} 181}
190 182
191static int cpufreq_stats_create_table(struct cpufreq_policy *policy, 183static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
192 struct cpufreq_frequency_table *table) 184 struct cpufreq_frequency_table *table)
193{ 185{
194 unsigned int i, j, count = 0, ret = 0; 186 unsigned int i, j, count = 0, ret = 0;
@@ -261,6 +253,26 @@ error_get_fail:
261 return ret; 253 return ret;
262} 254}
263 255
256static void cpufreq_stats_create_table(unsigned int cpu)
257{
258 struct cpufreq_policy *policy;
259 struct cpufreq_frequency_table *table;
260
261 /*
262 * "likely(!policy)" because normally cpufreq_stats will be registered
263 * before cpufreq driver
264 */
265 policy = cpufreq_cpu_get(cpu);
266 if (likely(!policy))
267 return;
268
269 table = cpufreq_frequency_get_table(policy->cpu);
270 if (likely(table))
271 __cpufreq_stats_create_table(policy, table);
272
273 cpufreq_cpu_put(policy);
274}
275
264static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) 276static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
265{ 277{
266 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, 278 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -277,7 +289,7 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
277static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 289static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
278 unsigned long val, void *data) 290 unsigned long val, void *data)
279{ 291{
280 int ret; 292 int ret = 0;
281 struct cpufreq_policy *policy = data; 293 struct cpufreq_policy *policy = data;
282 struct cpufreq_frequency_table *table; 294 struct cpufreq_frequency_table *table;
283 unsigned int cpu = policy->cpu; 295 unsigned int cpu = policy->cpu;
@@ -287,15 +299,16 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
287 return 0; 299 return 0;
288 } 300 }
289 301
290 if (val != CPUFREQ_NOTIFY)
291 return 0;
292 table = cpufreq_frequency_get_table(cpu); 302 table = cpufreq_frequency_get_table(cpu);
293 if (!table) 303 if (!table)
294 return 0; 304 return 0;
295 ret = cpufreq_stats_create_table(policy, table); 305
296 if (ret) 306 if (val == CPUFREQ_CREATE_POLICY)
297 return ret; 307 ret = __cpufreq_stats_create_table(policy, table);
298 return 0; 308 else if (val == CPUFREQ_REMOVE_POLICY)
309 __cpufreq_stats_free_table(policy);
310
311 return ret;
299} 312}
300 313
301static int cpufreq_stat_notifier_trans(struct notifier_block *nb, 314static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
@@ -334,29 +347,6 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
334 return 0; 347 return 0;
335} 348}
336 349
337static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
338 unsigned long action,
339 void *hcpu)
340{
341 unsigned int cpu = (unsigned long)hcpu;
342
343 switch (action) {
344 case CPU_DOWN_PREPARE:
345 cpufreq_stats_free_sysfs(cpu);
346 break;
347 case CPU_DEAD:
348 cpufreq_stats_free_table(cpu);
349 break;
350 }
351 return NOTIFY_OK;
352}
353
354/* priority=1 so this will get called before cpufreq_remove_dev */
355static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
356 .notifier_call = cpufreq_stat_cpu_callback,
357 .priority = 1,
358};
359
360static struct notifier_block notifier_policy_block = { 350static struct notifier_block notifier_policy_block = {
361 .notifier_call = cpufreq_stat_notifier_policy 351 .notifier_call = cpufreq_stat_notifier_policy
362}; 352};
@@ -376,14 +366,14 @@ static int __init cpufreq_stats_init(void)
376 if (ret) 366 if (ret)
377 return ret; 367 return ret;
378 368
379 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 369 for_each_online_cpu(cpu)
370 cpufreq_stats_create_table(cpu);
380 371
381 ret = cpufreq_register_notifier(&notifier_trans_block, 372 ret = cpufreq_register_notifier(&notifier_trans_block,
382 CPUFREQ_TRANSITION_NOTIFIER); 373 CPUFREQ_TRANSITION_NOTIFIER);
383 if (ret) { 374 if (ret) {
384 cpufreq_unregister_notifier(&notifier_policy_block, 375 cpufreq_unregister_notifier(&notifier_policy_block,
385 CPUFREQ_POLICY_NOTIFIER); 376 CPUFREQ_POLICY_NOTIFIER);
386 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
387 for_each_online_cpu(cpu) 377 for_each_online_cpu(cpu)
388 cpufreq_stats_free_table(cpu); 378 cpufreq_stats_free_table(cpu);
389 return ret; 379 return ret;
@@ -399,11 +389,8 @@ static void __exit cpufreq_stats_exit(void)
399 CPUFREQ_POLICY_NOTIFIER); 389 CPUFREQ_POLICY_NOTIFIER);
400 cpufreq_unregister_notifier(&notifier_trans_block, 390 cpufreq_unregister_notifier(&notifier_trans_block,
401 CPUFREQ_TRANSITION_NOTIFIER); 391 CPUFREQ_TRANSITION_NOTIFIER);
402 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 392 for_each_online_cpu(cpu)
403 for_each_online_cpu(cpu) {
404 cpufreq_stats_free_table(cpu); 393 cpufreq_stats_free_table(cpu);
405 cpufreq_stats_free_sysfs(cpu);
406 }
407} 394}
408 395
409MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); 396MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 5e8a854381b7..2cf33848d86e 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -58,14 +58,6 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
58 return 0; 58 return 0;
59} 59}
60 60
61static unsigned int davinci_getspeed(unsigned int cpu)
62{
63 if (cpu)
64 return 0;
65
66 return clk_get_rate(cpufreq.armclk) / 1000;
67}
68
69static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) 61static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
70{ 62{
71 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; 63 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -73,7 +65,7 @@ static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
73 unsigned int old_freq, new_freq; 65 unsigned int old_freq, new_freq;
74 int ret = 0; 66 int ret = 0;
75 67
76 old_freq = davinci_getspeed(0); 68 old_freq = policy->cur;
77 new_freq = pdata->freq_table[idx].frequency; 69 new_freq = pdata->freq_table[idx].frequency;
78 70
79 /* if moving to higher frequency, up the voltage beforehand */ 71 /* if moving to higher frequency, up the voltage beforehand */
@@ -116,6 +108,8 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
116 return result; 108 return result;
117 } 109 }
118 110
111 policy->clk = cpufreq.armclk;
112
119 /* 113 /*
120 * Time measurement across the target() function yields ~1500-1800us 114 * Time measurement across the target() function yields ~1500-1800us
121 * time taken with no drivers on notification list. 115 * time taken with no drivers on notification list.
@@ -126,10 +120,10 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
126} 120}
127 121
128static struct cpufreq_driver davinci_driver = { 122static struct cpufreq_driver davinci_driver = {
129 .flags = CPUFREQ_STICKY, 123 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
130 .verify = davinci_verify_speed, 124 .verify = davinci_verify_speed,
131 .target_index = davinci_target, 125 .target_index = davinci_target,
132 .get = davinci_getspeed, 126 .get = cpufreq_generic_get,
133 .init = davinci_cpu_init, 127 .init = davinci_cpu_init,
134 .exit = cpufreq_generic_exit, 128 .exit = cpufreq_generic_exit,
135 .name = "davinci", 129 .name = "davinci",
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 0e67ab96321a..412a78bb0c94 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -26,32 +26,18 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
26 return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); 26 return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
27} 27}
28 28
29static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
30{
31 int i = 0;
32 unsigned long freq = clk_get_rate(armss_clk) / 1000;
33
34 /* The value is rounded to closest frequency in the defined table. */
35 while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
36 if (freq < freq_table[i].frequency +
37 (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
38 return freq_table[i].frequency;
39 i++;
40 }
41
42 return freq_table[i].frequency;
43}
44
45static int dbx500_cpufreq_init(struct cpufreq_policy *policy) 29static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
46{ 30{
31 policy->clk = armss_clk;
47 return cpufreq_generic_init(policy, freq_table, 20 * 1000); 32 return cpufreq_generic_init(policy, freq_table, 20 * 1000);
48} 33}
49 34
50static struct cpufreq_driver dbx500_cpufreq_driver = { 35static struct cpufreq_driver dbx500_cpufreq_driver = {
51 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, 36 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
37 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
52 .verify = cpufreq_generic_frequency_table_verify, 38 .verify = cpufreq_generic_frequency_table_verify,
53 .target_index = dbx500_cpufreq_target, 39 .target_index = dbx500_cpufreq_target,
54 .get = dbx500_cpufreq_getspeed, 40 .get = cpufreq_generic_get,
55 .init = dbx500_cpufreq_init, 41 .init = dbx500_cpufreq_init,
56 .name = "DBX500", 42 .name = "DBX500",
57 .attr = cpufreq_generic_attr, 43 .attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index f3c22874da75..fcd2914d081a 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -17,6 +17,7 @@
17#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/suspend.h> 19#include <linux/suspend.h>
20#include <linux/platform_device.h>
20 21
21#include <plat/cpu.h> 22#include <plat/cpu.h>
22 23
@@ -30,11 +31,6 @@ static unsigned int locking_frequency;
30static bool frequency_locked; 31static bool frequency_locked;
31static DEFINE_MUTEX(cpufreq_lock); 32static DEFINE_MUTEX(cpufreq_lock);
32 33
33static unsigned int exynos_getspeed(unsigned int cpu)
34{
35 return clk_get_rate(exynos_info->cpu_clk) / 1000;
36}
37
38static int exynos_cpufreq_get_index(unsigned int freq) 34static int exynos_cpufreq_get_index(unsigned int freq)
39{ 35{
40 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; 36 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
@@ -214,25 +210,29 @@ static struct notifier_block exynos_cpufreq_nb = {
214 210
215static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) 211static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
216{ 212{
213 policy->clk = exynos_info->cpu_clk;
217 return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); 214 return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
218} 215}
219 216
220static struct cpufreq_driver exynos_driver = { 217static struct cpufreq_driver exynos_driver = {
221 .flags = CPUFREQ_STICKY, 218 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
222 .verify = cpufreq_generic_frequency_table_verify, 219 .verify = cpufreq_generic_frequency_table_verify,
223 .target_index = exynos_target, 220 .target_index = exynos_target,
224 .get = exynos_getspeed, 221 .get = cpufreq_generic_get,
225 .init = exynos_cpufreq_cpu_init, 222 .init = exynos_cpufreq_cpu_init,
226 .exit = cpufreq_generic_exit, 223 .exit = cpufreq_generic_exit,
227 .name = "exynos_cpufreq", 224 .name = "exynos_cpufreq",
228 .attr = cpufreq_generic_attr, 225 .attr = cpufreq_generic_attr,
226#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
227 .boost_supported = true,
228#endif
229#ifdef CONFIG_PM 229#ifdef CONFIG_PM
230 .suspend = exynos_cpufreq_suspend, 230 .suspend = exynos_cpufreq_suspend,
231 .resume = exynos_cpufreq_resume, 231 .resume = exynos_cpufreq_resume,
232#endif 232#endif
233}; 233};
234 234
235static int __init exynos_cpufreq_init(void) 235static int exynos_cpufreq_probe(struct platform_device *pdev)
236{ 236{
237 int ret = -EINVAL; 237 int ret = -EINVAL;
238 238
@@ -263,7 +263,7 @@ static int __init exynos_cpufreq_init(void)
263 goto err_vdd_arm; 263 goto err_vdd_arm;
264 } 264 }
265 265
266 locking_frequency = exynos_getspeed(0); 266 locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
267 267
268 register_pm_notifier(&exynos_cpufreq_nb); 268 register_pm_notifier(&exynos_cpufreq_nb);
269 269
@@ -281,4 +281,12 @@ err_vdd_arm:
281 kfree(exynos_info); 281 kfree(exynos_info);
282 return -EINVAL; 282 return -EINVAL;
283} 283}
284late_initcall(exynos_cpufreq_init); 284
285static struct platform_driver exynos_cpufreq_platdrv = {
286 .driver = {
287 .name = "exynos-cpufreq",
288 .owner = THIS_MODULE,
289 },
290 .probe = exynos_cpufreq_probe,
291};
292module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 869e48297e28..7c11ace3b3fc 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -30,7 +30,7 @@ static unsigned int exynos4x12_volt_table[] = {
30}; 30};
31 31
32static struct cpufreq_frequency_table exynos4x12_freq_table[] = { 32static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
33 {L0, CPUFREQ_ENTRY_INVALID}, 33 {CPUFREQ_BOOST_FREQ, 1500 * 1000},
34 {L1, 1400 * 1000}, 34 {L1, 1400 * 1000},
35 {L2, 1300 * 1000}, 35 {L2, 1300 * 1000},
36 {L3, 1200 * 1000}, 36 {L3, 1200 * 1000},
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 5ee2ce1ad424..5f90b82a4082 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -101,12 +101,12 @@ static void set_clkdiv(unsigned int div_index)
101 cpu_relax(); 101 cpu_relax();
102} 102}
103 103
104static void set_apll(unsigned int new_index, 104static void set_apll(unsigned int index)
105 unsigned int old_index)
106{ 105{
107 unsigned int tmp, pdiv; 106 unsigned int tmp;
107 unsigned int freq = apll_freq_5250[index].freq;
108 108
109 /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ 109 /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
110 clk_set_parent(moutcore, mout_mpll); 110 clk_set_parent(moutcore, mout_mpll);
111 111
112 do { 112 do {
@@ -115,24 +115,9 @@ static void set_apll(unsigned int new_index,
115 tmp &= 0x7; 115 tmp &= 0x7;
116 } while (tmp != 0x2); 116 } while (tmp != 0x2);
117 117
118 /* 2. Set APLL Lock time */ 118 clk_set_rate(mout_apll, freq * 1000);
119 pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
120
121 __raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
122 119
123 /* 3. Change PLL PMS values */ 120 /* MUX_CORE_SEL = APLL */
124 tmp = __raw_readl(EXYNOS5_APLL_CON0);
125 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
126 tmp |= apll_freq_5250[new_index].mps;
127 __raw_writel(tmp, EXYNOS5_APLL_CON0);
128
129 /* 4. wait_lock_time */
130 do {
131 cpu_relax();
132 tmp = __raw_readl(EXYNOS5_APLL_CON0);
133 } while (!(tmp & (0x1 << 29)));
134
135 /* 5. MUX_CORE_SEL = APLL */
136 clk_set_parent(moutcore, mout_apll); 121 clk_set_parent(moutcore, mout_apll);
137 122
138 do { 123 do {
@@ -140,55 +125,17 @@ static void set_apll(unsigned int new_index,
140 tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); 125 tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU);
141 tmp &= (0x7 << 16); 126 tmp &= (0x7 << 16);
142 } while (tmp != (0x1 << 16)); 127 } while (tmp != (0x1 << 16));
143
144}
145
146static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
147{
148 unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
149 unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
150
151 return (old_pm == new_pm) ? 0 : 1;
152} 128}
153 129
154static void exynos5250_set_frequency(unsigned int old_index, 130static void exynos5250_set_frequency(unsigned int old_index,
155 unsigned int new_index) 131 unsigned int new_index)
156{ 132{
157 unsigned int tmp;
158
159 if (old_index > new_index) { 133 if (old_index > new_index) {
160 if (!exynos5250_pms_change(old_index, new_index)) { 134 set_clkdiv(new_index);
161 /* 1. Change the system clock divider values */ 135 set_apll(new_index);
162 set_clkdiv(new_index);
163 /* 2. Change just s value in apll m,p,s value */
164 tmp = __raw_readl(EXYNOS5_APLL_CON0);
165 tmp &= ~(0x7 << 0);
166 tmp |= apll_freq_5250[new_index].mps & 0x7;
167 __raw_writel(tmp, EXYNOS5_APLL_CON0);
168
169 } else {
170 /* Clock Configuration Procedure */
171 /* 1. Change the system clock divider values */
172 set_clkdiv(new_index);
173 /* 2. Change the apll m,p,s value */
174 set_apll(new_index, old_index);
175 }
176 } else if (old_index < new_index) { 136 } else if (old_index < new_index) {
177 if (!exynos5250_pms_change(old_index, new_index)) { 137 set_apll(new_index);
178 /* 1. Change just s value in apll m,p,s value */ 138 set_clkdiv(new_index);
179 tmp = __raw_readl(EXYNOS5_APLL_CON0);
180 tmp &= ~(0x7 << 0);
181 tmp |= apll_freq_5250[new_index].mps & 0x7;
182 __raw_writel(tmp, EXYNOS5_APLL_CON0);
183 /* 2. Change the system clock divider values */
184 set_clkdiv(new_index);
185 } else {
186 /* Clock Configuration Procedure */
187 /* 1. Change the apll m,p,s value */
188 set_apll(new_index, old_index);
189 /* 2. Change the system clock divider values */
190 set_clkdiv(new_index);
191 }
192 } 139 }
193} 140}
194 141
@@ -221,7 +168,6 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
221 info->volt_table = exynos5250_volt_table; 168 info->volt_table = exynos5250_volt_table;
222 info->freq_table = exynos5250_freq_table; 169 info->freq_table = exynos5250_freq_table;
223 info->set_freq = exynos5250_set_frequency; 170 info->set_freq = exynos5250_set_frequency;
224 info->need_apll_change = exynos5250_pms_change;
225 171
226 return 0; 172 return 0;
227 173
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 76bef8b078cb..49b756015316 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -100,7 +100,6 @@ struct exynos_dvfs_data {
100 struct resource *mem; 100 struct resource *mem;
101 int irq; 101 int irq;
102 struct clk *cpu_clk; 102 struct clk *cpu_clk;
103 unsigned int cur_frequency;
104 unsigned int latency; 103 unsigned int latency;
105 struct cpufreq_frequency_table *freq_table; 104 struct cpufreq_frequency_table *freq_table;
106 unsigned int freq_count; 105 unsigned int freq_count;
@@ -165,7 +164,7 @@ static int init_div_table(void)
165 return 0; 164 return 0;
166} 165}
167 166
168static void exynos_enable_dvfs(void) 167static void exynos_enable_dvfs(unsigned int cur_frequency)
169{ 168{
170 unsigned int tmp, i, cpu; 169 unsigned int tmp, i, cpu;
171 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; 170 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
@@ -184,18 +183,18 @@ static void exynos_enable_dvfs(void)
184 183
185 /* Set initial performance index */ 184 /* Set initial performance index */
186 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 185 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187 if (freq_table[i].frequency == dvfs_info->cur_frequency) 186 if (freq_table[i].frequency == cur_frequency)
188 break; 187 break;
189 188
190 if (freq_table[i].frequency == CPUFREQ_TABLE_END) { 189 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n"); 190 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */ 191 /* Assign the highest frequency */
193 i = 0; 192 i = 0;
194 dvfs_info->cur_frequency = freq_table[i].frequency; 193 cur_frequency = freq_table[i].frequency;
195 } 194 }
196 195
197 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ", 196 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info->cur_frequency); 197 cur_frequency);
199 198
200 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) { 199 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); 200 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
@@ -209,11 +208,6 @@ static void exynos_enable_dvfs(void)
209 dvfs_info->base + XMU_DVFS_CTRL); 208 dvfs_info->base + XMU_DVFS_CTRL);
210} 209}
211 210
212static unsigned int exynos_getspeed(unsigned int cpu)
213{
214 return dvfs_info->cur_frequency;
215}
216
217static int exynos_target(struct cpufreq_policy *policy, unsigned int index) 211static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
218{ 212{
219 unsigned int tmp; 213 unsigned int tmp;
@@ -222,7 +216,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
222 216
223 mutex_lock(&cpufreq_lock); 217 mutex_lock(&cpufreq_lock);
224 218
225 freqs.old = dvfs_info->cur_frequency; 219 freqs.old = policy->cur;
226 freqs.new = freq_table[index].frequency; 220 freqs.new = freq_table[index].frequency;
227 221
228 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 222 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
@@ -250,7 +244,7 @@ static void exynos_cpufreq_work(struct work_struct *work)
250 goto skip_work; 244 goto skip_work;
251 245
252 mutex_lock(&cpufreq_lock); 246 mutex_lock(&cpufreq_lock);
253 freqs.old = dvfs_info->cur_frequency; 247 freqs.old = policy->cur;
254 248
255 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS); 249 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
256 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1) 250 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
@@ -260,10 +254,9 @@ static void exynos_cpufreq_work(struct work_struct *work)
260 254
261 if (likely(index < dvfs_info->freq_count)) { 255 if (likely(index < dvfs_info->freq_count)) {
262 freqs.new = freq_table[index].frequency; 256 freqs.new = freq_table[index].frequency;
263 dvfs_info->cur_frequency = freqs.new;
264 } else { 257 } else {
265 dev_crit(dvfs_info->dev, "New frequency out of range\n"); 258 dev_crit(dvfs_info->dev, "New frequency out of range\n");
266 freqs.new = dvfs_info->cur_frequency; 259 freqs.new = freqs.old;
267 } 260 }
268 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 261 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
269 262
@@ -307,15 +300,17 @@ static void exynos_sort_descend_freq_table(void)
307 300
308static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) 301static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
309{ 302{
303 policy->clk = dvfs_info->cpu_clk;
310 return cpufreq_generic_init(policy, dvfs_info->freq_table, 304 return cpufreq_generic_init(policy, dvfs_info->freq_table,
311 dvfs_info->latency); 305 dvfs_info->latency);
312} 306}
313 307
314static struct cpufreq_driver exynos_driver = { 308static struct cpufreq_driver exynos_driver = {
315 .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION, 309 .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
310 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
316 .verify = cpufreq_generic_frequency_table_verify, 311 .verify = cpufreq_generic_frequency_table_verify,
317 .target_index = exynos_target, 312 .target_index = exynos_target,
318 .get = exynos_getspeed, 313 .get = cpufreq_generic_get,
319 .init = exynos_cpufreq_cpu_init, 314 .init = exynos_cpufreq_cpu_init,
320 .exit = cpufreq_generic_exit, 315 .exit = cpufreq_generic_exit,
321 .name = CPUFREQ_NAME, 316 .name = CPUFREQ_NAME,
@@ -335,6 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
335 int ret = -EINVAL; 330 int ret = -EINVAL;
336 struct device_node *np; 331 struct device_node *np;
337 struct resource res; 332 struct resource res;
333 unsigned int cur_frequency;
338 334
339 np = pdev->dev.of_node; 335 np = pdev->dev.of_node;
340 if (!np) 336 if (!np)
@@ -391,13 +387,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
391 goto err_free_table; 387 goto err_free_table;
392 } 388 }
393 389
394 dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk); 390 cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
395 if (!dvfs_info->cur_frequency) { 391 if (!cur_frequency) {
396 dev_err(dvfs_info->dev, "Failed to get clock rate\n"); 392 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
397 ret = -EINVAL; 393 ret = -EINVAL;
398 goto err_free_table; 394 goto err_free_table;
399 } 395 }
400 dvfs_info->cur_frequency /= 1000; 396 cur_frequency /= 1000;
401 397
402 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work); 398 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
403 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq, 399 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
@@ -414,7 +410,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
414 goto err_free_table; 410 goto err_free_table;
415 } 411 }
416 412
417 exynos_enable_dvfs(); 413 exynos_enable_dvfs(cur_frequency);
418 ret = cpufreq_register_driver(&exynos_driver); 414 ret = cpufreq_register_driver(&exynos_driver);
419 if (ret) { 415 if (ret) {
420 dev_err(dvfs_info->dev, 416 dev_err(dvfs_info->dev,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3458d27f63b4..8e54f97899ba 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -32,6 +32,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
32 32
33 continue; 33 continue;
34 } 34 }
35 if (!cpufreq_boost_enabled()
36 && table[i].driver_data == CPUFREQ_BOOST_FREQ)
37 continue;
38
35 pr_debug("table entry %u: %u kHz, %u driver_data\n", 39 pr_debug("table entry %u: %u kHz, %u driver_data\n",
36 i, freq, table[i].driver_data); 40 i, freq, table[i].driver_data);
37 if (freq < min_freq) 41 if (freq < min_freq)
@@ -178,11 +182,34 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
178} 182}
179EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); 183EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
180 184
185int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
186 unsigned int freq)
187{
188 struct cpufreq_frequency_table *table;
189 int i;
190
191 table = cpufreq_frequency_get_table(policy->cpu);
192 if (unlikely(!table)) {
193 pr_debug("%s: Unable to find frequency table\n", __func__);
194 return -ENOENT;
195 }
196
197 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
198 if (table[i].frequency == freq)
199 return i;
200 }
201
202 return -EINVAL;
203}
204EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
205
181static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); 206static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
207
182/** 208/**
183 * show_available_freqs - show available frequencies for the specified CPU 209 * show_available_freqs - show available frequencies for the specified CPU
184 */ 210 */
185static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) 211static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
212 bool show_boost)
186{ 213{
187 unsigned int i = 0; 214 unsigned int i = 0;
188 unsigned int cpu = policy->cpu; 215 unsigned int cpu = policy->cpu;
@@ -197,6 +224,20 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
197 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 224 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
198 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 225 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
199 continue; 226 continue;
227 /*
228 * show_boost = true and driver_data = BOOST freq
229 * display BOOST freqs
230 *
231 * show_boost = false and driver_data = BOOST freq
232 * show_boost = true and driver_data != BOOST freq
233 * continue - do not display anything
234 *
235 * show_boost = false and driver_data != BOOST freq
236 * display NON BOOST freqs
237 */
238 if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
239 continue;
240
200 count += sprintf(&buf[count], "%d ", table[i].frequency); 241 count += sprintf(&buf[count], "%d ", table[i].frequency);
201 } 242 }
202 count += sprintf(&buf[count], "\n"); 243 count += sprintf(&buf[count], "\n");
@@ -205,16 +246,39 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
205 246
206} 247}
207 248
208struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { 249#define cpufreq_attr_available_freq(_name) \
209 .attr = { .name = "scaling_available_frequencies", 250struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
210 .mode = 0444, 251__ATTR_RO(_name##_frequencies)
211 }, 252
212 .show = show_available_freqs, 253/**
213}; 254 * show_scaling_available_frequencies - show available normal frequencies for
255 * the specified CPU
256 */
257static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
258 char *buf)
259{
260 return show_available_freqs(policy, buf, false);
261}
262cpufreq_attr_available_freq(scaling_available);
214EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); 263EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
215 264
265/**
266 * show_available_boost_freqs - show available boost frequencies for
267 * the specified CPU
268 */
269static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
270 char *buf)
271{
272 return show_available_freqs(policy, buf, true);
273}
274cpufreq_attr_available_freq(scaling_boost);
275EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
276
216struct freq_attr *cpufreq_generic_attr[] = { 277struct freq_attr *cpufreq_generic_attr[] = {
217 &cpufreq_freq_attr_scaling_available_freqs, 278 &cpufreq_freq_attr_scaling_available_freqs,
279#ifdef CONFIG_CPU_FREQ_BOOST_SW
280 &cpufreq_freq_attr_scaling_boost_freqs,
281#endif
218 NULL, 282 NULL,
219}; 283};
220EXPORT_SYMBOL_GPL(cpufreq_generic_attr); 284EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 4b3f18e5f36b..ce69059be1fc 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -35,10 +35,8 @@ static struct device *cpu_dev;
35static struct cpufreq_frequency_table *freq_table; 35static struct cpufreq_frequency_table *freq_table;
36static unsigned int transition_latency; 36static unsigned int transition_latency;
37 37
38static unsigned int imx6q_get_speed(unsigned int cpu) 38static u32 *imx6_soc_volt;
39{ 39static u32 soc_opp_count;
40 return clk_get_rate(arm_clk) / 1000;
41}
42 40
43static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) 41static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
44{ 42{
@@ -69,23 +67,22 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
69 67
70 /* scaling up? scale voltage before frequency */ 68 /* scaling up? scale voltage before frequency */
71 if (new_freq > old_freq) { 69 if (new_freq > old_freq) {
70 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
71 if (ret) {
72 dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
73 return ret;
74 }
75 ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
76 if (ret) {
77 dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
78 return ret;
79 }
72 ret = regulator_set_voltage_tol(arm_reg, volt, 0); 80 ret = regulator_set_voltage_tol(arm_reg, volt, 0);
73 if (ret) { 81 if (ret) {
74 dev_err(cpu_dev, 82 dev_err(cpu_dev,
75 "failed to scale vddarm up: %d\n", ret); 83 "failed to scale vddarm up: %d\n", ret);
76 return ret; 84 return ret;
77 } 85 }
78
79 /*
80 * Need to increase vddpu and vddsoc for safety
81 * if we are about to run at 1.2 GHz.
82 */
83 if (new_freq == FREQ_1P2_GHZ / 1000) {
84 regulator_set_voltage_tol(pu_reg,
85 PU_SOC_VOLTAGE_HIGH, 0);
86 regulator_set_voltage_tol(soc_reg,
87 PU_SOC_VOLTAGE_HIGH, 0);
88 }
89 } 86 }
90 87
91 /* 88 /*
@@ -120,12 +117,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
120 "failed to scale vddarm down: %d\n", ret); 117 "failed to scale vddarm down: %d\n", ret);
121 ret = 0; 118 ret = 0;
122 } 119 }
123 120 ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
124 if (old_freq == FREQ_1P2_GHZ / 1000) { 121 if (ret) {
125 regulator_set_voltage_tol(pu_reg, 122 dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
126 PU_SOC_VOLTAGE_NORMAL, 0); 123 ret = 0;
127 regulator_set_voltage_tol(soc_reg, 124 }
128 PU_SOC_VOLTAGE_NORMAL, 0); 125 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
126 if (ret) {
127 dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
128 ret = 0;
129 } 129 }
130 } 130 }
131 131
@@ -134,13 +134,15 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
134 134
135static int imx6q_cpufreq_init(struct cpufreq_policy *policy) 135static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
136{ 136{
137 policy->clk = arm_clk;
137 return cpufreq_generic_init(policy, freq_table, transition_latency); 138 return cpufreq_generic_init(policy, freq_table, transition_latency);
138} 139}
139 140
140static struct cpufreq_driver imx6q_cpufreq_driver = { 141static struct cpufreq_driver imx6q_cpufreq_driver = {
142 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
141 .verify = cpufreq_generic_frequency_table_verify, 143 .verify = cpufreq_generic_frequency_table_verify,
142 .target_index = imx6q_set_target, 144 .target_index = imx6q_set_target,
143 .get = imx6q_get_speed, 145 .get = cpufreq_generic_get,
144 .init = imx6q_cpufreq_init, 146 .init = imx6q_cpufreq_init,
145 .exit = cpufreq_generic_exit, 147 .exit = cpufreq_generic_exit,
146 .name = "imx6q-cpufreq", 148 .name = "imx6q-cpufreq",
@@ -153,6 +155,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
153 struct dev_pm_opp *opp; 155 struct dev_pm_opp *opp;
154 unsigned long min_volt, max_volt; 156 unsigned long min_volt, max_volt;
155 int num, ret; 157 int num, ret;
158 const struct property *prop;
159 const __be32 *val;
160 u32 nr, i, j;
156 161
157 cpu_dev = get_cpu_device(0); 162 cpu_dev = get_cpu_device(0);
158 if (!cpu_dev) { 163 if (!cpu_dev) {
@@ -187,12 +192,25 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
187 goto put_node; 192 goto put_node;
188 } 193 }
189 194
190 /* We expect an OPP table supplied by platform */ 195 /*
196 * We expect an OPP table supplied by platform.
197 * Just, incase the platform did not supply the OPP
198 * table, it will try to get it.
199 */
191 num = dev_pm_opp_get_opp_count(cpu_dev); 200 num = dev_pm_opp_get_opp_count(cpu_dev);
192 if (num < 0) { 201 if (num < 0) {
193 ret = num; 202 ret = of_init_opp_table(cpu_dev);
194 dev_err(cpu_dev, "no OPP table is found: %d\n", ret); 203 if (ret < 0) {
195 goto put_node; 204 dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
205 goto put_node;
206 }
207
208 num = dev_pm_opp_get_opp_count(cpu_dev);
209 if (num < 0) {
210 ret = num;
211 dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
212 goto put_node;
213 }
196 } 214 }
197 215
198 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 216 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
@@ -201,10 +219,62 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
201 goto put_node; 219 goto put_node;
202 } 220 }
203 221
222 /* Make imx6_soc_volt array's size same as arm opp number */
223 imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
224 if (imx6_soc_volt == NULL) {
225 ret = -ENOMEM;
226 goto free_freq_table;
227 }
228
229 prop = of_find_property(np, "fsl,soc-operating-points", NULL);
230 if (!prop || !prop->value)
231 goto soc_opp_out;
232
233 /*
234 * Each OPP is a set of tuples consisting of frequency and
235 * voltage like <freq-kHz vol-uV>.
236 */
237 nr = prop->length / sizeof(u32);
238 if (nr % 2 || (nr / 2) < num)
239 goto soc_opp_out;
240
241 for (j = 0; j < num; j++) {
242 val = prop->value;
243 for (i = 0; i < nr / 2; i++) {
244 unsigned long freq = be32_to_cpup(val++);
245 unsigned long volt = be32_to_cpup(val++);
246 if (freq_table[j].frequency == freq) {
247 imx6_soc_volt[soc_opp_count++] = volt;
248 break;
249 }
250 }
251 }
252
253soc_opp_out:
254 /* use fixed soc opp volt if no valid soc opp info found in dtb */
255 if (soc_opp_count != num) {
256 dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
257 for (j = 0; j < num; j++)
258 imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
259 if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
260 imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
261 }
262
204 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 263 if (of_property_read_u32(np, "clock-latency", &transition_latency))
205 transition_latency = CPUFREQ_ETERNAL; 264 transition_latency = CPUFREQ_ETERNAL;
206 265
207 /* 266 /*
267 * Calculate the ramp time for max voltage change in the
268 * VDDSOC and VDDPU regulators.
269 */
270 ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
271 if (ret > 0)
272 transition_latency += ret * 1000;
273 ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
274 if (ret > 0)
275 transition_latency += ret * 1000;
276
277 /*
208 * OPP is maintained in order of increasing frequency, and 278 * OPP is maintained in order of increasing frequency, and
209 * freq_table initialised from OPP is therefore sorted in the 279 * freq_table initialised from OPP is therefore sorted in the
210 * same order. 280 * same order.
@@ -221,18 +291,6 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
221 if (ret > 0) 291 if (ret > 0)
222 transition_latency += ret * 1000; 292 transition_latency += ret * 1000;
223 293
224 /* Count vddpu and vddsoc latency in for 1.2 GHz support */
225 if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
226 ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
227 PU_SOC_VOLTAGE_HIGH);
228 if (ret > 0)
229 transition_latency += ret * 1000;
230 ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
231 PU_SOC_VOLTAGE_HIGH);
232 if (ret > 0)
233 transition_latency += ret * 1000;
234 }
235
236 ret = cpufreq_register_driver(&imx6q_cpufreq_driver); 294 ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
237 if (ret) { 295 if (ret) {
238 dev_err(cpu_dev, "failed register driver: %d\n", ret); 296 dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 7d8ab000d317..0e27844e8c2d 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -190,6 +190,7 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
190} 190}
191 191
192static struct cpufreq_driver integrator_driver = { 192static struct cpufreq_driver integrator_driver = {
193 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
193 .verify = integrator_verify_policy, 194 .verify = integrator_verify_policy,
194 .target = integrator_set_target, 195 .target = integrator_set_target,
195 .get = integrator_get, 196 .get = integrator_get,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d51f17ed691e..7e257b233602 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -35,6 +35,7 @@
35#define SAMPLE_COUNT 3 35#define SAMPLE_COUNT 3
36 36
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b
38 39
39#define FRAC_BITS 8 40#define FRAC_BITS 8
40#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -50,6 +51,8 @@ static inline int32_t div_fp(int32_t x, int32_t y)
50 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 51 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
51} 52}
52 53
54static u64 energy_divisor;
55
53struct sample { 56struct sample {
54 int32_t core_pct_busy; 57 int32_t core_pct_busy;
55 u64 aperf; 58 u64 aperf;
@@ -64,6 +67,12 @@ struct pstate_data {
64 int turbo_pstate; 67 int turbo_pstate;
65}; 68};
66 69
70struct vid_data {
71 int32_t min;
72 int32_t max;
73 int32_t ratio;
74};
75
67struct _pid { 76struct _pid {
68 int setpoint; 77 int setpoint;
69 int32_t integral; 78 int32_t integral;
@@ -82,10 +91,9 @@ struct cpudata {
82 struct timer_list timer; 91 struct timer_list timer;
83 92
84 struct pstate_data pstate; 93 struct pstate_data pstate;
94 struct vid_data vid;
85 struct _pid pid; 95 struct _pid pid;
86 96
87 int min_pstate_count;
88
89 u64 prev_aperf; 97 u64 prev_aperf;
90 u64 prev_mperf; 98 u64 prev_mperf;
91 int sample_ptr; 99 int sample_ptr;
@@ -106,7 +114,8 @@ struct pstate_funcs {
106 int (*get_max)(void); 114 int (*get_max)(void);
107 int (*get_min)(void); 115 int (*get_min)(void);
108 int (*get_turbo)(void); 116 int (*get_turbo)(void);
109 void (*set)(int pstate); 117 void (*set)(struct cpudata*, int pstate);
118 void (*get_vid)(struct cpudata *);
110}; 119};
111 120
112struct cpu_defaults { 121struct cpu_defaults {
@@ -358,6 +367,42 @@ static int byt_get_max_pstate(void)
358 return (value >> 16) & 0xFF; 367 return (value >> 16) & 0xFF;
359} 368}
360 369
370static void byt_set_pstate(struct cpudata *cpudata, int pstate)
371{
372 u64 val;
373 int32_t vid_fp;
374 u32 vid;
375
376 val = pstate << 8;
377 if (limits.no_turbo)
378 val |= (u64)1 << 32;
379
380 vid_fp = cpudata->vid.min + mul_fp(
381 int_tofp(pstate - cpudata->pstate.min_pstate),
382 cpudata->vid.ratio);
383
384 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
385 vid = fp_toint(vid_fp);
386
387 val |= vid;
388
389 wrmsrl(MSR_IA32_PERF_CTL, val);
390}
391
392static void byt_get_vid(struct cpudata *cpudata)
393{
394 u64 value;
395
396 rdmsrl(BYT_VIDS, value);
397 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
398 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
399 cpudata->vid.ratio = div_fp(
400 cpudata->vid.max - cpudata->vid.min,
401 int_tofp(cpudata->pstate.max_pstate -
402 cpudata->pstate.min_pstate));
403}
404
405
361static int core_get_min_pstate(void) 406static int core_get_min_pstate(void)
362{ 407{
363 u64 value; 408 u64 value;
@@ -384,7 +429,7 @@ static int core_get_turbo_pstate(void)
384 return ret; 429 return ret;
385} 430}
386 431
387static void core_set_pstate(int pstate) 432static void core_set_pstate(struct cpudata *cpudata, int pstate)
388{ 433{
389 u64 val; 434 u64 val;
390 435
@@ -425,7 +470,8 @@ static struct cpu_defaults byt_params = {
425 .get_max = byt_get_max_pstate, 470 .get_max = byt_get_max_pstate,
426 .get_min = byt_get_min_pstate, 471 .get_min = byt_get_min_pstate,
427 .get_turbo = byt_get_max_pstate, 472 .get_turbo = byt_get_max_pstate,
428 .set = core_set_pstate, 473 .set = byt_set_pstate,
474 .get_vid = byt_get_vid,
429 }, 475 },
430}; 476};
431 477
@@ -462,7 +508,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
462 508
463 cpu->pstate.current_pstate = pstate; 509 cpu->pstate.current_pstate = pstate;
464 510
465 pstate_funcs.set(pstate); 511 pstate_funcs.set(cpu, pstate);
466} 512}
467 513
468static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 514static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -488,6 +534,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
488 cpu->pstate.max_pstate = pstate_funcs.get_max(); 534 cpu->pstate.max_pstate = pstate_funcs.get_max();
489 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 535 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
490 536
537 if (pstate_funcs.get_vid)
538 pstate_funcs.get_vid(cpu);
539
491 /* 540 /*
492 * goto max pstate so we don't slow up boot if we are built-in if we are 541 * goto max pstate so we don't slow up boot if we are built-in if we are
493 * a module we will take care of it during normal operation 542 * a module we will take care of it during normal operation
@@ -512,6 +561,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
512 561
513 rdmsrl(MSR_IA32_APERF, aperf); 562 rdmsrl(MSR_IA32_APERF, aperf);
514 rdmsrl(MSR_IA32_MPERF, mperf); 563 rdmsrl(MSR_IA32_MPERF, mperf);
564
515 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 565 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
516 cpu->samples[cpu->sample_ptr].aperf = aperf; 566 cpu->samples[cpu->sample_ptr].aperf = aperf;
517 cpu->samples[cpu->sample_ptr].mperf = mperf; 567 cpu->samples[cpu->sample_ptr].mperf = mperf;
@@ -556,6 +606,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
556 ctl = pid_calc(pid, busy_scaled); 606 ctl = pid_calc(pid, busy_scaled);
557 607
558 steps = abs(ctl); 608 steps = abs(ctl);
609
559 if (ctl < 0) 610 if (ctl < 0)
560 intel_pstate_pstate_increase(cpu, steps); 611 intel_pstate_pstate_increase(cpu, steps);
561 else 612 else
@@ -565,17 +616,23 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
565static void intel_pstate_timer_func(unsigned long __data) 616static void intel_pstate_timer_func(unsigned long __data)
566{ 617{
567 struct cpudata *cpu = (struct cpudata *) __data; 618 struct cpudata *cpu = (struct cpudata *) __data;
619 struct sample *sample;
620 u64 energy;
568 621
569 intel_pstate_sample(cpu); 622 intel_pstate_sample(cpu);
623
624 sample = &cpu->samples[cpu->sample_ptr];
625 rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
626
570 intel_pstate_adjust_busy_pstate(cpu); 627 intel_pstate_adjust_busy_pstate(cpu);
571 628
572 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { 629 trace_pstate_sample(fp_toint(sample->core_pct_busy),
573 cpu->min_pstate_count++; 630 fp_toint(intel_pstate_get_scaled_busy(cpu)),
574 if (!(cpu->min_pstate_count % 5)) { 631 cpu->pstate.current_pstate,
575 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 632 sample->mperf,
576 } 633 sample->aperf,
577 } else 634 div64_u64(energy, energy_divisor),
578 cpu->min_pstate_count = 0; 635 sample->freq);
579 636
580 intel_pstate_set_sample_time(cpu); 637 intel_pstate_set_sample_time(cpu);
581} 638}
@@ -782,6 +839,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
782 pstate_funcs.get_min = funcs->get_min; 839 pstate_funcs.get_min = funcs->get_min;
783 pstate_funcs.get_turbo = funcs->get_turbo; 840 pstate_funcs.get_turbo = funcs->get_turbo;
784 pstate_funcs.set = funcs->set; 841 pstate_funcs.set = funcs->set;
842 pstate_funcs.get_vid = funcs->get_vid;
785} 843}
786 844
787#if IS_ENABLED(CONFIG_ACPI) 845#if IS_ENABLED(CONFIG_ACPI)
@@ -855,6 +913,7 @@ static int __init intel_pstate_init(void)
855 int cpu, rc = 0; 913 int cpu, rc = 0;
856 const struct x86_cpu_id *id; 914 const struct x86_cpu_id *id;
857 struct cpu_defaults *cpu_info; 915 struct cpu_defaults *cpu_info;
916 u64 units;
858 917
859 if (no_load) 918 if (no_load)
860 return -ENODEV; 919 return -ENODEV;
@@ -888,8 +947,12 @@ static int __init intel_pstate_init(void)
888 if (rc) 947 if (rc)
889 goto out; 948 goto out;
890 949
950 rdmsrl(MSR_RAPL_POWER_UNIT, units);
951 energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
952
891 intel_pstate_debug_expose_params(); 953 intel_pstate_debug_expose_params();
892 intel_pstate_sysfs_expose_params(); 954 intel_pstate_sysfs_expose_params();
955
893 return rc; 956 return rc;
894out: 957out:
895 get_online_cpus(); 958 get_online_cpus();
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0767a4e29dfe..eb7abe345b50 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -97,6 +97,7 @@ static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
97} 97}
98 98
99static struct cpufreq_driver kirkwood_cpufreq_driver = { 99static struct cpufreq_driver kirkwood_cpufreq_driver = {
100 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
100 .get = kirkwood_cpufreq_get_cpu_frequency, 101 .get = kirkwood_cpufreq_get_cpu_frequency,
101 .verify = cpufreq_generic_frequency_table_verify, 102 .verify = cpufreq_generic_frequency_table_verify,
102 .target_index = kirkwood_cpufreq_target, 103 .target_index = kirkwood_cpufreq_target,
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index a43609218105..b6581abc9207 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -24,8 +24,6 @@
24 24
25static uint nowait; 25static uint nowait;
26 26
27static struct clk *cpuclk;
28
29static void (*saved_cpu_wait) (void); 27static void (*saved_cpu_wait) (void);
30 28
31static int loongson2_cpu_freq_notifier(struct notifier_block *nb, 29static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
@@ -44,11 +42,6 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
44 return 0; 42 return 0;
45} 43}
46 44
47static unsigned int loongson2_cpufreq_get(unsigned int cpu)
48{
49 return clk_get_rate(cpuclk);
50}
51
52/* 45/*
53 * Here we notify other drivers of the proposed change and the final change. 46 * Here we notify other drivers of the proposed change and the final change.
54 */ 47 */
@@ -69,13 +62,14 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
69 set_cpus_allowed_ptr(current, &cpus_allowed); 62 set_cpus_allowed_ptr(current, &cpus_allowed);
70 63
71 /* setting the cpu frequency */ 64 /* setting the cpu frequency */
72 clk_set_rate(cpuclk, freq); 65 clk_set_rate(policy->clk, freq);
73 66
74 return 0; 67 return 0;
75} 68}
76 69
77static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) 70static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
78{ 71{
72 static struct clk *cpuclk;
79 int i; 73 int i;
80 unsigned long rate; 74 unsigned long rate;
81 int ret; 75 int ret;
@@ -104,13 +98,14 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
104 return ret; 98 return ret;
105 } 99 }
106 100
101 policy->clk = cpuclk;
107 return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); 102 return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
108} 103}
109 104
110static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) 105static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
111{ 106{
112 cpufreq_frequency_table_put_attr(policy->cpu); 107 cpufreq_frequency_table_put_attr(policy->cpu);
113 clk_put(cpuclk); 108 clk_put(policy->clk);
114 return 0; 109 return 0;
115} 110}
116 111
@@ -119,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
119 .init = loongson2_cpufreq_cpu_init, 114 .init = loongson2_cpufreq_cpu_init,
120 .verify = cpufreq_generic_frequency_table_verify, 115 .verify = cpufreq_generic_frequency_table_verify,
121 .target_index = loongson2_cpufreq_target, 116 .target_index = loongson2_cpufreq_target,
122 .get = loongson2_cpufreq_get, 117 .get = cpufreq_generic_get,
123 .exit = loongson2_cpufreq_exit, 118 .exit = loongson2_cpufreq_exit,
124 .attr = cpufreq_generic_attr, 119 .attr = cpufreq_generic_attr,
125}; 120};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index a0acd0bfba40..590f5b66d181 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -36,21 +36,9 @@
36 36
37static struct cpufreq_frequency_table *freq_table; 37static struct cpufreq_frequency_table *freq_table;
38static atomic_t freq_table_users = ATOMIC_INIT(0); 38static atomic_t freq_table_users = ATOMIC_INIT(0);
39static struct clk *mpu_clk;
40static struct device *mpu_dev; 39static struct device *mpu_dev;
41static struct regulator *mpu_reg; 40static struct regulator *mpu_reg;
42 41
43static unsigned int omap_getspeed(unsigned int cpu)
44{
45 unsigned long rate;
46
47 if (cpu >= NR_CPUS)
48 return 0;
49
50 rate = clk_get_rate(mpu_clk) / 1000;
51 return rate;
52}
53
54static int omap_target(struct cpufreq_policy *policy, unsigned int index) 42static int omap_target(struct cpufreq_policy *policy, unsigned int index)
55{ 43{
56 int r, ret; 44 int r, ret;
@@ -58,11 +46,11 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
58 unsigned long freq, volt = 0, volt_old = 0, tol = 0; 46 unsigned long freq, volt = 0, volt_old = 0, tol = 0;
59 unsigned int old_freq, new_freq; 47 unsigned int old_freq, new_freq;
60 48
61 old_freq = omap_getspeed(policy->cpu); 49 old_freq = policy->cur;
62 new_freq = freq_table[index].frequency; 50 new_freq = freq_table[index].frequency;
63 51
64 freq = new_freq * 1000; 52 freq = new_freq * 1000;
65 ret = clk_round_rate(mpu_clk, freq); 53 ret = clk_round_rate(policy->clk, freq);
66 if (IS_ERR_VALUE(ret)) { 54 if (IS_ERR_VALUE(ret)) {
67 dev_warn(mpu_dev, 55 dev_warn(mpu_dev,
68 "CPUfreq: Cannot find matching frequency for %lu\n", 56 "CPUfreq: Cannot find matching frequency for %lu\n",
@@ -100,7 +88,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
100 } 88 }
101 } 89 }
102 90
103 ret = clk_set_rate(mpu_clk, new_freq * 1000); 91 ret = clk_set_rate(policy->clk, new_freq * 1000);
104 92
105 /* scaling down? scale voltage after frequency */ 93 /* scaling down? scale voltage after frequency */
106 if (mpu_reg && (new_freq < old_freq)) { 94 if (mpu_reg && (new_freq < old_freq)) {
@@ -108,7 +96,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
108 if (r < 0) { 96 if (r < 0) {
109 dev_warn(mpu_dev, "%s: unable to scale voltage down.\n", 97 dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
110 __func__); 98 __func__);
111 clk_set_rate(mpu_clk, old_freq * 1000); 99 clk_set_rate(policy->clk, old_freq * 1000);
112 return r; 100 return r;
113 } 101 }
114 } 102 }
@@ -126,9 +114,9 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
126{ 114{
127 int result; 115 int result;
128 116
129 mpu_clk = clk_get(NULL, "cpufreq_ck"); 117 policy->clk = clk_get(NULL, "cpufreq_ck");
130 if (IS_ERR(mpu_clk)) 118 if (IS_ERR(policy->clk))
131 return PTR_ERR(mpu_clk); 119 return PTR_ERR(policy->clk);
132 120
133 if (!freq_table) { 121 if (!freq_table) {
134 result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); 122 result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -149,7 +137,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
149 137
150 freq_table_free(); 138 freq_table_free();
151fail: 139fail:
152 clk_put(mpu_clk); 140 clk_put(policy->clk);
153 return result; 141 return result;
154} 142}
155 143
@@ -157,15 +145,15 @@ static int omap_cpu_exit(struct cpufreq_policy *policy)
157{ 145{
158 cpufreq_frequency_table_put_attr(policy->cpu); 146 cpufreq_frequency_table_put_attr(policy->cpu);
159 freq_table_free(); 147 freq_table_free();
160 clk_put(mpu_clk); 148 clk_put(policy->clk);
161 return 0; 149 return 0;
162} 150}
163 151
164static struct cpufreq_driver omap_driver = { 152static struct cpufreq_driver omap_driver = {
165 .flags = CPUFREQ_STICKY, 153 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
166 .verify = cpufreq_generic_frequency_table_verify, 154 .verify = cpufreq_generic_frequency_table_verify,
167 .target_index = omap_target, 155 .target_index = omap_target,
168 .get = omap_getspeed, 156 .get = cpufreq_generic_get,
169 .init = omap_cpu_init, 157 .init = omap_cpu_init,
170 .exit = omap_cpu_exit, 158 .exit = omap_cpu_exit,
171 .name = "omap", 159 .name = "omap",
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index e2b4f40ff69a..1c0f1067af73 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -213,6 +213,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
213 cpu, target_freq, 213 cpu, target_freq,
214 (pcch_virt_addr + pcc_cpu_data->input_offset)); 214 (pcch_virt_addr + pcc_cpu_data->input_offset));
215 215
216 freqs.old = policy->cur;
216 freqs.new = target_freq; 217 freqs.new = target_freq;
217 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 218 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
218 219
@@ -228,25 +229,20 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
228 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); 229 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
229 230
230 status = ioread16(&pcch_hdr->status); 231 status = ioread16(&pcch_hdr->status);
232 iowrite16(0, &pcch_hdr->status);
233
234 cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE);
235 spin_unlock(&pcc_lock);
236
231 if (status != CMD_COMPLETE) { 237 if (status != CMD_COMPLETE) {
232 pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", 238 pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
233 cpu, status); 239 cpu, status);
234 goto cmd_incomplete; 240 return -EINVAL;
235 } 241 }
236 iowrite16(0, &pcch_hdr->status);
237 242
238 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
239 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); 243 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
240 spin_unlock(&pcc_lock);
241 244
242 return 0; 245 return 0;
243
244cmd_incomplete:
245 freqs.new = freqs.old;
246 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
247 iowrite16(0, &pcch_hdr->status);
248 spin_unlock(&pcc_lock);
249 return -EINVAL;
250} 246}
251 247
252static int pcc_get_offset(int cpu) 248static int pcc_get_offset(int cpu)
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 643e7952cad3..b9a444e358b5 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -26,41 +26,108 @@
26static unsigned int busfreq; /* FSB, in 10 kHz */ 26static unsigned int busfreq; /* FSB, in 10 kHz */
27static unsigned int max_multiplier; 27static unsigned int max_multiplier;
28 28
29static unsigned int param_busfreq = 0;
30static unsigned int param_max_multiplier = 0;
31
32module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
33MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
34
35module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
36MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
29 37
30/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ 38/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
31static struct cpufreq_frequency_table clock_ratio[] = { 39static struct cpufreq_frequency_table clock_ratio[] = {
32 {45, /* 000 -> 4.5x */ 0}, 40 {60, /* 110 -> 6.0x */ 0},
41 {55, /* 011 -> 5.5x */ 0},
33 {50, /* 001 -> 5.0x */ 0}, 42 {50, /* 001 -> 5.0x */ 0},
43 {45, /* 000 -> 4.5x */ 0},
34 {40, /* 010 -> 4.0x */ 0}, 44 {40, /* 010 -> 4.0x */ 0},
35 {55, /* 011 -> 5.5x */ 0},
36 {20, /* 100 -> 2.0x */ 0},
37 {30, /* 101 -> 3.0x */ 0},
38 {60, /* 110 -> 6.0x */ 0},
39 {35, /* 111 -> 3.5x */ 0}, 45 {35, /* 111 -> 3.5x */ 0},
46 {30, /* 101 -> 3.0x */ 0},
47 {20, /* 100 -> 2.0x */ 0},
40 {0, CPUFREQ_TABLE_END} 48 {0, CPUFREQ_TABLE_END}
41}; 49};
42 50
51static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
52static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
53
54static const struct {
55 unsigned freq;
56 unsigned mult;
57} usual_frequency_table[] = {
58 { 400000, 40 }, // 100 * 4
59 { 450000, 45 }, // 100 * 4.5
60 { 475000, 50 }, // 95 * 5
61 { 500000, 50 }, // 100 * 5
62 { 506250, 45 }, // 112.5 * 4.5
63 { 533500, 55 }, // 97 * 5.5
64 { 550000, 55 }, // 100 * 5.5
65 { 562500, 50 }, // 112.5 * 5
66 { 570000, 60 }, // 95 * 6
67 { 600000, 60 }, // 100 * 6
68 { 618750, 55 }, // 112.5 * 5.5
69 { 660000, 55 }, // 120 * 5.5
70 { 675000, 60 }, // 112.5 * 6
71 { 720000, 60 }, // 120 * 6
72};
73
74#define FREQ_RANGE 3000
43 75
44/** 76/**
45 * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier 77 * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
46 * 78 *
47 * Returns the current setting of the frequency multiplier. Core clock 79 * Returns the current setting of the frequency multiplier. Core clock
48 * speed is frequency of the Front-Side Bus multiplied with this value. 80 * speed is frequency of the Front-Side Bus multiplied with this value.
49 */ 81 */
50static int powernow_k6_get_cpu_multiplier(void) 82static int powernow_k6_get_cpu_multiplier(void)
51{ 83{
52 u64 invalue = 0; 84 unsigned long invalue = 0;
53 u32 msrval; 85 u32 msrval;
54 86
87 local_irq_disable();
88
55 msrval = POWERNOW_IOPORT + 0x1; 89 msrval = POWERNOW_IOPORT + 0x1;
56 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ 90 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
57 invalue = inl(POWERNOW_IOPORT + 0x8); 91 invalue = inl(POWERNOW_IOPORT + 0x8);
58 msrval = POWERNOW_IOPORT + 0x0; 92 msrval = POWERNOW_IOPORT + 0x0;
59 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 93 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
60 94
61 return clock_ratio[(invalue >> 5)&7].driver_data; 95 local_irq_enable();
96
97 return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
62} 98}
63 99
100static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
101{
102 unsigned long outvalue, invalue;
103 unsigned long msrval;
104 unsigned long cr0;
105
106 /* we now need to transform best_i to the BVC format, see AMD#23446 */
107
108 /*
109 * The processor doesn't respond to inquiry cycles while changing the
110 * frequency, so we must disable cache.
111 */
112 local_irq_disable();
113 cr0 = read_cr0();
114 write_cr0(cr0 | X86_CR0_CD);
115 wbinvd();
116
117 outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
118
119 msrval = POWERNOW_IOPORT + 0x1;
120 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
121 invalue = inl(POWERNOW_IOPORT + 0x8);
122 invalue = invalue & 0x1f;
123 outvalue = outvalue | invalue;
124 outl(outvalue, (POWERNOW_IOPORT + 0x8));
125 msrval = POWERNOW_IOPORT + 0x0;
126 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
127
128 write_cr0(cr0);
129 local_irq_enable();
130}
64 131
65/** 132/**
66 * powernow_k6_target - set the PowerNow! multiplier 133 * powernow_k6_target - set the PowerNow! multiplier
@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
71static int powernow_k6_target(struct cpufreq_policy *policy, 138static int powernow_k6_target(struct cpufreq_policy *policy,
72 unsigned int best_i) 139 unsigned int best_i)
73{ 140{
74 unsigned long outvalue = 0, invalue = 0;
75 unsigned long msrval;
76 struct cpufreq_freqs freqs; 141 struct cpufreq_freqs freqs;
77 142
78 if (clock_ratio[best_i].driver_data > max_multiplier) { 143 if (clock_ratio[best_i].driver_data > max_multiplier) {
@@ -85,35 +150,63 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
85 150
86 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 151 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
87 152
88 /* we now need to transform best_i to the BVC format, see AMD#23446 */ 153 powernow_k6_set_cpu_multiplier(best_i);
89
90 outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
91
92 msrval = POWERNOW_IOPORT + 0x1;
93 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
94 invalue = inl(POWERNOW_IOPORT + 0x8);
95 invalue = invalue & 0xf;
96 outvalue = outvalue | invalue;
97 outl(outvalue , (POWERNOW_IOPORT + 0x8));
98 msrval = POWERNOW_IOPORT + 0x0;
99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
100 154
101 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 155 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
102 156
103 return 0; 157 return 0;
104} 158}
105 159
106
107static int powernow_k6_cpu_init(struct cpufreq_policy *policy) 160static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
108{ 161{
109 unsigned int i, f; 162 unsigned int i, f;
163 unsigned khz;
110 164
111 if (policy->cpu != 0) 165 if (policy->cpu != 0)
112 return -ENODEV; 166 return -ENODEV;
113 167
114 /* get frequencies */ 168 max_multiplier = 0;
115 max_multiplier = powernow_k6_get_cpu_multiplier(); 169 khz = cpu_khz;
116 busfreq = cpu_khz / max_multiplier; 170 for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
171 if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
172 khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
173 khz = usual_frequency_table[i].freq;
174 max_multiplier = usual_frequency_table[i].mult;
175 break;
176 }
177 }
178 if (param_max_multiplier) {
179 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
180 if (clock_ratio[i].driver_data == param_max_multiplier) {
181 max_multiplier = param_max_multiplier;
182 goto have_max_multiplier;
183 }
184 }
185 printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
186 return -EINVAL;
187 }
188
189 if (!max_multiplier) {
190 printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
191 printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
192 return -EOPNOTSUPP;
193 }
194
195have_max_multiplier:
196 param_max_multiplier = max_multiplier;
197
198 if (param_busfreq) {
199 if (param_busfreq >= 50000 && param_busfreq <= 150000) {
200 busfreq = param_busfreq / 10;
201 goto have_busfreq;
202 }
203 printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
204 return -EINVAL;
205 }
206
207 busfreq = khz / max_multiplier;
208have_busfreq:
209 param_busfreq = busfreq * 10;
117 210
118 /* table init */ 211 /* table init */
119 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 212 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
@@ -125,7 +218,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
125 } 218 }
126 219
127 /* cpuinfo and default policy values */ 220 /* cpuinfo and default policy values */
128 policy->cpuinfo.transition_latency = 200000; 221 policy->cpuinfo.transition_latency = 500000;
129 222
130 return cpufreq_table_validate_and_show(policy, clock_ratio); 223 return cpufreq_table_validate_and_show(policy, clock_ratio);
131} 224}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0023c7d40a51..e10b646634d7 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -964,14 +964,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
964 cpufreq_cpu_put(policy); 964 cpufreq_cpu_put(policy);
965 965
966 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 966 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
967
968 res = transition_fid_vid(data, fid, vid); 967 res = transition_fid_vid(data, fid, vid);
969 if (res) 968 cpufreq_notify_post_transition(policy, &freqs, res);
970 freqs.new = freqs.old;
971 else
972 freqs.new = find_khz_freq_from_fid(data->currfid);
973 969
974 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
975 return res; 970 return res;
976} 971}
977 972
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3f7be46d2b27..051000f44ca2 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -24,12 +24,10 @@
24 24
25/** 25/**
26 * struct cpu_data - per CPU data struct 26 * struct cpu_data - per CPU data struct
27 * @clk: the clk of CPU
28 * @parent: the parent node of cpu clock 27 * @parent: the parent node of cpu clock
29 * @table: frequency table 28 * @table: frequency table
30 */ 29 */
31struct cpu_data { 30struct cpu_data {
32 struct clk *clk;
33 struct device_node *parent; 31 struct device_node *parent;
34 struct cpufreq_frequency_table *table; 32 struct cpufreq_frequency_table *table;
35}; 33};
@@ -81,13 +79,6 @@ static inline const struct cpumask *cpu_core_mask(int cpu)
81} 79}
82#endif 80#endif
83 81
84static unsigned int corenet_cpufreq_get_speed(unsigned int cpu)
85{
86 struct cpu_data *data = per_cpu(cpu_data, cpu);
87
88 return clk_get_rate(data->clk) / 1000;
89}
90
91/* reduce the duplicated frequencies in frequency table */ 82/* reduce the duplicated frequencies in frequency table */
92static void freq_table_redup(struct cpufreq_frequency_table *freq_table, 83static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
93 int count) 84 int count)
@@ -158,8 +149,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
158 goto err_np; 149 goto err_np;
159 } 150 }
160 151
161 data->clk = of_clk_get(np, 0); 152 policy->clk = of_clk_get(np, 0);
162 if (IS_ERR(data->clk)) { 153 if (IS_ERR(policy->clk)) {
163 pr_err("%s: no clock information\n", __func__); 154 pr_err("%s: no clock information\n", __func__);
164 goto err_nomem2; 155 goto err_nomem2;
165 } 156 }
@@ -255,7 +246,7 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
255 struct cpu_data *data = per_cpu(cpu_data, policy->cpu); 246 struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
256 247
257 parent = of_clk_get(data->parent, data->table[index].driver_data); 248 parent = of_clk_get(data->parent, data->table[index].driver_data);
258 return clk_set_parent(data->clk, parent); 249 return clk_set_parent(policy->clk, parent);
259} 250}
260 251
261static struct cpufreq_driver ppc_corenet_cpufreq_driver = { 252static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
@@ -265,7 +256,7 @@ static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
265 .exit = __exit_p(corenet_cpufreq_cpu_exit), 256 .exit = __exit_p(corenet_cpufreq_cpu_exit),
266 .verify = cpufreq_generic_frequency_table_verify, 257 .verify = cpufreq_generic_frequency_table_verify,
267 .target_index = corenet_cpufreq_target, 258 .target_index = corenet_cpufreq_target,
268 .get = corenet_cpufreq_get_speed, 259 .get = cpufreq_generic_get,
269 .attr = cpufreq_generic_attr, 260 .attr = cpufreq_generic_attr,
270}; 261};
271 262
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 0a0f4369636a..a9195a86b069 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -423,6 +423,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
423} 423}
424 424
425static struct cpufreq_driver pxa_cpufreq_driver = { 425static struct cpufreq_driver pxa_cpufreq_driver = {
426 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
426 .verify = cpufreq_generic_frequency_table_verify, 427 .verify = cpufreq_generic_frequency_table_verify,
427 .target_index = pxa_set_target, 428 .target_index = pxa_set_target,
428 .init = pxa_cpufreq_init, 429 .init = pxa_cpufreq_init,
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 93840048dd11..3785687e9d70 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -201,6 +201,7 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
201} 201}
202 202
203static struct cpufreq_driver pxa3xx_cpufreq_driver = { 203static struct cpufreq_driver pxa3xx_cpufreq_driver = {
204 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
204 .verify = cpufreq_generic_frequency_table_verify, 205 .verify = cpufreq_generic_frequency_table_verify,
205 .target_index = pxa3xx_cpufreq_set, 206 .target_index = pxa3xx_cpufreq_set,
206 .init = pxa3xx_cpufreq_init, 207 .init = pxa3xx_cpufreq_init,
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 8d904a00027b..826b8be23099 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -481,7 +481,7 @@ err_hclk:
481} 481}
482 482
483static struct cpufreq_driver s3c2416_cpufreq_driver = { 483static struct cpufreq_driver s3c2416_cpufreq_driver = {
484 .flags = 0, 484 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
485 .verify = cpufreq_generic_frequency_table_verify, 485 .verify = cpufreq_generic_frequency_table_verify,
486 .target_index = s3c2416_cpufreq_set_target, 486 .target_index = s3c2416_cpufreq_set_target,
487 .get = s3c2416_cpufreq_get_speed, 487 .get = s3c2416_cpufreq_get_speed,
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 72b2cc8a5a85..f84ed10755b5 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -22,8 +22,6 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <mach/hardware.h>
26
27#include <asm/mach/arch.h> 25#include <asm/mach/arch.h>
28#include <asm/mach/map.h> 26#include <asm/mach/map.h>
29 27
@@ -55,7 +53,7 @@ static inline int within_khz(unsigned long a, unsigned long b)
55 * specified in @cfg. The values are stored in @cfg for later use 53 * specified in @cfg. The values are stored in @cfg for later use
56 * by the relevant set routine if the request settings can be reached. 54 * by the relevant set routine if the request settings can be reached.
57 */ 55 */
58int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) 56static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
59{ 57{
60 unsigned int hdiv, pdiv; 58 unsigned int hdiv, pdiv;
61 unsigned long hclk, fclk, armclk; 59 unsigned long hclk, fclk, armclk;
@@ -242,7 +240,7 @@ static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
242 return ret; 240 return ret;
243} 241}
244 242
245struct s3c_cpufreq_info s3c2440_cpufreq_info = { 243static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
246 .max = { 244 .max = {
247 .fclk = 400000000, 245 .fclk = 400000000,
248 .hclk = 133333333, 246 .hclk = 133333333,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 485088253358..25069741b507 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -355,11 +355,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
355 return -EINVAL; 355 return -EINVAL;
356} 356}
357 357
358static unsigned int s3c_cpufreq_get(unsigned int cpu)
359{
360 return clk_get_rate(clk_arm) / 1000;
361}
362
363struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) 358struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
364{ 359{
365 struct clk *clk; 360 struct clk *clk;
@@ -373,6 +368,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
373 368
374static int s3c_cpufreq_init(struct cpufreq_policy *policy) 369static int s3c_cpufreq_init(struct cpufreq_policy *policy)
375{ 370{
371 policy->clk = clk_arm;
376 return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); 372 return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
377} 373}
378 374
@@ -408,7 +404,7 @@ static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
408{ 404{
409 suspend_pll.frequency = clk_get_rate(_clk_mpll); 405 suspend_pll.frequency = clk_get_rate(_clk_mpll);
410 suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON); 406 suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
411 suspend_freq = s3c_cpufreq_get(0) * 1000; 407 suspend_freq = clk_get_rate(clk_arm);
412 408
413 return 0; 409 return 0;
414} 410}
@@ -448,9 +444,9 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
448#endif 444#endif
449 445
450static struct cpufreq_driver s3c24xx_driver = { 446static struct cpufreq_driver s3c24xx_driver = {
451 .flags = CPUFREQ_STICKY, 447 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
452 .target = s3c_cpufreq_target, 448 .target = s3c_cpufreq_target,
453 .get = s3c_cpufreq_get, 449 .get = cpufreq_generic_get,
454 .init = s3c_cpufreq_init, 450 .init = s3c_cpufreq_init,
455 .suspend = s3c_cpufreq_suspend, 451 .suspend = s3c_cpufreq_suspend,
456 .resume = s3c_cpufreq_resume, 452 .resume = s3c_cpufreq_resume,
@@ -509,7 +505,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
509 return 0; 505 return 0;
510} 506}
511 507
512int __init s3c_cpufreq_auto_io(void) 508static int __init s3c_cpufreq_auto_io(void)
513{ 509{
514 int ret; 510 int ret;
515 511
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 67e302eeefec..c4226de079ab 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22static struct clk *armclk;
23static struct regulator *vddarm; 22static struct regulator *vddarm;
24static unsigned long regulator_latency; 23static unsigned long regulator_latency;
25 24
@@ -54,14 +53,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
54}; 53};
55#endif 54#endif
56 55
57static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
58{
59 if (cpu != 0)
60 return 0;
61
62 return clk_get_rate(armclk) / 1000;
63}
64
65static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, 56static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
66 unsigned int index) 57 unsigned int index)
67{ 58{
@@ -69,7 +60,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
69 unsigned int old_freq, new_freq; 60 unsigned int old_freq, new_freq;
70 int ret; 61 int ret;
71 62
72 old_freq = clk_get_rate(armclk) / 1000; 63 old_freq = clk_get_rate(policy->clk) / 1000;
73 new_freq = s3c64xx_freq_table[index].frequency; 64 new_freq = s3c64xx_freq_table[index].frequency;
74 dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; 65 dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
75 66
@@ -86,7 +77,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
86 } 77 }
87#endif 78#endif
88 79
89 ret = clk_set_rate(armclk, new_freq * 1000); 80 ret = clk_set_rate(policy->clk, new_freq * 1000);
90 if (ret < 0) { 81 if (ret < 0) {
91 pr_err("Failed to set rate %dkHz: %d\n", 82 pr_err("Failed to set rate %dkHz: %d\n",
92 new_freq, ret); 83 new_freq, ret);
@@ -101,7 +92,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
101 if (ret != 0) { 92 if (ret != 0) {
102 pr_err("Failed to set VDDARM for %dkHz: %d\n", 93 pr_err("Failed to set VDDARM for %dkHz: %d\n",
103 new_freq, ret); 94 new_freq, ret);
104 if (clk_set_rate(armclk, old_freq * 1000) < 0) 95 if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
105 pr_err("Failed to restore original clock rate\n"); 96 pr_err("Failed to restore original clock rate\n");
106 97
107 return ret; 98 return ret;
@@ -110,7 +101,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
110#endif 101#endif
111 102
112 pr_debug("Set actual frequency %lukHz\n", 103 pr_debug("Set actual frequency %lukHz\n",
113 clk_get_rate(armclk) / 1000); 104 clk_get_rate(policy->clk) / 1000);
114 105
115 return 0; 106 return 0;
116} 107}
@@ -169,11 +160,11 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
169 return -ENODEV; 160 return -ENODEV;
170 } 161 }
171 162
172 armclk = clk_get(NULL, "armclk"); 163 policy->clk = clk_get(NULL, "armclk");
173 if (IS_ERR(armclk)) { 164 if (IS_ERR(policy->clk)) {
174 pr_err("Unable to obtain ARMCLK: %ld\n", 165 pr_err("Unable to obtain ARMCLK: %ld\n",
175 PTR_ERR(armclk)); 166 PTR_ERR(policy->clk));
176 return PTR_ERR(armclk); 167 return PTR_ERR(policy->clk);
177 } 168 }
178 169
179#ifdef CONFIG_REGULATOR 170#ifdef CONFIG_REGULATOR
@@ -193,7 +184,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
193 unsigned long r; 184 unsigned long r;
194 185
195 /* Check for frequencies we can generate */ 186 /* Check for frequencies we can generate */
196 r = clk_round_rate(armclk, freq->frequency * 1000); 187 r = clk_round_rate(policy->clk, freq->frequency * 1000);
197 r /= 1000; 188 r /= 1000;
198 if (r != freq->frequency) { 189 if (r != freq->frequency) {
199 pr_debug("%dkHz unsupported by clock\n", 190 pr_debug("%dkHz unsupported by clock\n",
@@ -203,7 +194,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
203 194
204 /* If we have no regulator then assume startup 195 /* If we have no regulator then assume startup
205 * frequency is the maximum we can support. */ 196 * frequency is the maximum we can support. */
206 if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0)) 197 if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
207 freq->frequency = CPUFREQ_ENTRY_INVALID; 198 freq->frequency = CPUFREQ_ENTRY_INVALID;
208 199
209 freq++; 200 freq++;
@@ -219,17 +210,17 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
219 pr_err("Failed to configure frequency table: %d\n", 210 pr_err("Failed to configure frequency table: %d\n",
220 ret); 211 ret);
221 regulator_put(vddarm); 212 regulator_put(vddarm);
222 clk_put(armclk); 213 clk_put(policy->clk);
223 } 214 }
224 215
225 return ret; 216 return ret;
226} 217}
227 218
228static struct cpufreq_driver s3c64xx_cpufreq_driver = { 219static struct cpufreq_driver s3c64xx_cpufreq_driver = {
229 .flags = 0, 220 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
230 .verify = cpufreq_generic_frequency_table_verify, 221 .verify = cpufreq_generic_frequency_table_verify,
231 .target_index = s3c64xx_cpufreq_set_target, 222 .target_index = s3c64xx_cpufreq_set_target,
232 .get = s3c64xx_cpufreq_get_speed, 223 .get = cpufreq_generic_get,
233 .init = s3c64xx_cpufreq_driver_init, 224 .init = s3c64xx_cpufreq_driver_init,
234 .name = "s3c", 225 .name = "s3c",
235}; 226};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index e3973dae28a7..55a8e9fa9435 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -23,7 +23,6 @@
23#include <mach/map.h> 23#include <mach/map.h>
24#include <mach/regs-clock.h> 24#include <mach/regs-clock.h>
25 25
26static struct clk *cpu_clk;
27static struct clk *dmc0_clk; 26static struct clk *dmc0_clk;
28static struct clk *dmc1_clk; 27static struct clk *dmc1_clk;
29static DEFINE_MUTEX(set_freq_lock); 28static DEFINE_MUTEX(set_freq_lock);
@@ -164,14 +163,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
164 __raw_writel(tmp1, reg); 163 __raw_writel(tmp1, reg);
165} 164}
166 165
167static unsigned int s5pv210_getspeed(unsigned int cpu)
168{
169 if (cpu)
170 return 0;
171
172 return clk_get_rate(cpu_clk) / 1000;
173}
174
175static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) 166static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
176{ 167{
177 unsigned long reg; 168 unsigned long reg;
@@ -193,7 +184,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
193 goto exit; 184 goto exit;
194 } 185 }
195 186
196 old_freq = s5pv210_getspeed(0); 187 old_freq = policy->cur;
197 new_freq = s5pv210_freq_table[index].frequency; 188 new_freq = s5pv210_freq_table[index].frequency;
198 189
199 /* Finding current running level index */ 190 /* Finding current running level index */
@@ -471,9 +462,9 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
471 unsigned long mem_type; 462 unsigned long mem_type;
472 int ret; 463 int ret;
473 464
474 cpu_clk = clk_get(NULL, "armclk"); 465 policy->clk = clk_get(NULL, "armclk");
475 if (IS_ERR(cpu_clk)) 466 if (IS_ERR(policy->clk))
476 return PTR_ERR(cpu_clk); 467 return PTR_ERR(policy->clk);
477 468
478 dmc0_clk = clk_get(NULL, "sclk_dmc0"); 469 dmc0_clk = clk_get(NULL, "sclk_dmc0");
479 if (IS_ERR(dmc0_clk)) { 470 if (IS_ERR(dmc0_clk)) {
@@ -516,7 +507,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
516out_dmc1: 507out_dmc1:
517 clk_put(dmc0_clk); 508 clk_put(dmc0_clk);
518out_dmc0: 509out_dmc0:
519 clk_put(cpu_clk); 510 clk_put(policy->clk);
520 return ret; 511 return ret;
521} 512}
522 513
@@ -560,10 +551,10 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
560} 551}
561 552
562static struct cpufreq_driver s5pv210_driver = { 553static struct cpufreq_driver s5pv210_driver = {
563 .flags = CPUFREQ_STICKY, 554 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
564 .verify = cpufreq_generic_frequency_table_verify, 555 .verify = cpufreq_generic_frequency_table_verify,
565 .target_index = s5pv210_target, 556 .target_index = s5pv210_target,
566 .get = s5pv210_getspeed, 557 .get = cpufreq_generic_get,
567 .init = s5pv210_cpu_init, 558 .init = s5pv210_cpu_init,
568 .name = "s5pv210", 559 .name = "s5pv210",
569#ifdef CONFIG_PM 560#ifdef CONFIG_PM
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 623da742f8e7..728eab77e8e0 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -201,7 +201,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
201} 201}
202 202
203static struct cpufreq_driver sa1100_driver __refdata = { 203static struct cpufreq_driver sa1100_driver __refdata = {
204 .flags = CPUFREQ_STICKY, 204 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
205 .verify = cpufreq_generic_frequency_table_verify, 205 .verify = cpufreq_generic_frequency_table_verify,
206 .target_index = sa1100_target, 206 .target_index = sa1100_target,
207 .get = sa11x0_getspeed, 207 .get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 2c2b2e601d13..546376719d8f 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -312,7 +312,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
312/* sa1110_driver needs __refdata because it must remain after init registers 312/* sa1110_driver needs __refdata because it must remain after init registers
313 * it with cpufreq_register_driver() */ 313 * it with cpufreq_register_driver() */
314static struct cpufreq_driver sa1110_driver __refdata = { 314static struct cpufreq_driver sa1110_driver __refdata = {
315 .flags = CPUFREQ_STICKY, 315 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
316 .verify = cpufreq_generic_frequency_table_verify, 316 .verify = cpufreq_generic_frequency_table_verify,
317 .target_index = sa1110_target, 317 .target_index = sa1110_target,
318 .get = sa11x0_getspeed, 318 .get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d02ccd19c9c4..5c86e3fa5593 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@ static struct {
30 u32 cnt; 30 u32 cnt;
31} spear_cpufreq; 31} spear_cpufreq;
32 32
33static unsigned int spear_cpufreq_get(unsigned int cpu)
34{
35 return clk_get_rate(spear_cpufreq.clk) / 1000;
36}
37
38static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) 33static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
39{ 34{
40 struct clk *sys_pclk; 35 struct clk *sys_pclk;
@@ -138,7 +133,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
138 } 133 }
139 134
140 newfreq = clk_round_rate(srcclk, newfreq * mult); 135 newfreq = clk_round_rate(srcclk, newfreq * mult);
141 if (newfreq < 0) { 136 if (newfreq <= 0) {
142 pr_err("clk_round_rate failed for cpu src clock\n"); 137 pr_err("clk_round_rate failed for cpu src clock\n");
143 return newfreq; 138 return newfreq;
144 } 139 }
@@ -156,16 +151,17 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
156 151
157static int spear_cpufreq_init(struct cpufreq_policy *policy) 152static int spear_cpufreq_init(struct cpufreq_policy *policy)
158{ 153{
154 policy->clk = spear_cpufreq.clk;
159 return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, 155 return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
160 spear_cpufreq.transition_latency); 156 spear_cpufreq.transition_latency);
161} 157}
162 158
163static struct cpufreq_driver spear_cpufreq_driver = { 159static struct cpufreq_driver spear_cpufreq_driver = {
164 .name = "cpufreq-spear", 160 .name = "cpufreq-spear",
165 .flags = CPUFREQ_STICKY, 161 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
166 .verify = cpufreq_generic_frequency_table_verify, 162 .verify = cpufreq_generic_frequency_table_verify,
167 .target_index = spear_cpufreq_target, 163 .target_index = spear_cpufreq_target,
168 .get = spear_cpufreq_get, 164 .get = cpufreq_generic_get,
169 .init = spear_cpufreq_init, 165 .init = spear_cpufreq_init,
170 .exit = cpufreq_generic_exit, 166 .exit = cpufreq_generic_exit,
171 .attr = cpufreq_generic_attr, 167 .attr = cpufreq_generic_attr,
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0f5326d6f79f..998c17b42200 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -141,38 +141,6 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
141} 141}
142 142
143/** 143/**
144 * speedstep_get_state - set the SpeedStep state
145 * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
146 *
147 */
148static int speedstep_get_state(void)
149{
150 u32 function = GET_SPEEDSTEP_STATE;
151 u32 result, state, edi, command, dummy;
152
153 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
154
155 pr_debug("trying to determine current setting with command %x "
156 "at port %x\n", command, smi_port);
157
158 __asm__ __volatile__(
159 "push %%ebp\n"
160 "out %%al, (%%dx)\n"
161 "pop %%ebp\n"
162 : "=a" (result),
163 "=b" (state), "=D" (edi),
164 "=c" (dummy), "=d" (dummy), "=S" (dummy)
165 : "a" (command), "b" (function), "c" (0),
166 "d" (smi_port), "S" (0), "D" (0)
167 );
168
169 pr_debug("state is %x, result is %x\n", state, result);
170
171 return state & 1;
172}
173
174
175/**
176 * speedstep_set_state - set the SpeedStep state 144 * speedstep_set_state - set the SpeedStep state
177 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) 145 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
178 * 146 *
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index b7309c37033d..e652c1bd8d0f 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -47,21 +47,9 @@ static struct clk *pll_x_clk;
47static struct clk *pll_p_clk; 47static struct clk *pll_p_clk;
48static struct clk *emc_clk; 48static struct clk *emc_clk;
49 49
50static unsigned long target_cpu_speed[NUM_CPUS];
51static DEFINE_MUTEX(tegra_cpu_lock); 50static DEFINE_MUTEX(tegra_cpu_lock);
52static bool is_suspended; 51static bool is_suspended;
53 52
54static unsigned int tegra_getspeed(unsigned int cpu)
55{
56 unsigned long rate;
57
58 if (cpu >= NUM_CPUS)
59 return 0;
60
61 rate = clk_get_rate(cpu_clk) / 1000;
62 return rate;
63}
64
65static int tegra_cpu_clk_set_rate(unsigned long rate) 53static int tegra_cpu_clk_set_rate(unsigned long rate)
66{ 54{
67 int ret; 55 int ret;
@@ -103,9 +91,6 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
103{ 91{
104 int ret = 0; 92 int ret = 0;
105 93
106 if (tegra_getspeed(0) == rate)
107 return ret;
108
109 /* 94 /*
110 * Vote on memory bus frequency based on cpu frequency 95 * Vote on memory bus frequency based on cpu frequency
111 * This sets the minimum frequency, display or avp may request higher 96 * This sets the minimum frequency, display or avp may request higher
@@ -125,33 +110,16 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
125 return ret; 110 return ret;
126} 111}
127 112
128static unsigned long tegra_cpu_highest_speed(void)
129{
130 unsigned long rate = 0;
131 int i;
132
133 for_each_online_cpu(i)
134 rate = max(rate, target_cpu_speed[i]);
135 return rate;
136}
137
138static int tegra_target(struct cpufreq_policy *policy, unsigned int index) 113static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
139{ 114{
140 unsigned int freq; 115 int ret = -EBUSY;
141 int ret = 0;
142 116
143 mutex_lock(&tegra_cpu_lock); 117 mutex_lock(&tegra_cpu_lock);
144 118
145 if (is_suspended) 119 if (!is_suspended)
146 goto out; 120 ret = tegra_update_cpu_speed(policy,
147 121 freq_table[index].frequency);
148 freq = freq_table[index].frequency;
149 122
150 target_cpu_speed[policy->cpu] = freq;
151
152 ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
153
154out:
155 mutex_unlock(&tegra_cpu_lock); 123 mutex_unlock(&tegra_cpu_lock);
156 return ret; 124 return ret;
157} 125}
@@ -165,7 +133,8 @@ static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
165 is_suspended = true; 133 is_suspended = true;
166 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", 134 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
167 freq_table[0].frequency); 135 freq_table[0].frequency);
168 tegra_update_cpu_speed(policy, freq_table[0].frequency); 136 if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
137 tegra_update_cpu_speed(policy, freq_table[0].frequency);
169 cpufreq_cpu_put(policy); 138 cpufreq_cpu_put(policy);
170 } else if (event == PM_POST_SUSPEND) { 139 } else if (event == PM_POST_SUSPEND) {
171 is_suspended = false; 140 is_suspended = false;
@@ -189,8 +158,6 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
189 clk_prepare_enable(emc_clk); 158 clk_prepare_enable(emc_clk);
190 clk_prepare_enable(cpu_clk); 159 clk_prepare_enable(cpu_clk);
191 160
192 target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
193
194 /* FIXME: what's the actual transition time? */ 161 /* FIXME: what's the actual transition time? */
195 ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); 162 ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
196 if (ret) { 163 if (ret) {
@@ -202,6 +169,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
202 if (policy->cpu == 0) 169 if (policy->cpu == 0)
203 register_pm_notifier(&tegra_cpu_pm_notifier); 170 register_pm_notifier(&tegra_cpu_pm_notifier);
204 171
172 policy->clk = cpu_clk;
205 return 0; 173 return 0;
206} 174}
207 175
@@ -214,9 +182,10 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
214} 182}
215 183
216static struct cpufreq_driver tegra_cpufreq_driver = { 184static struct cpufreq_driver tegra_cpufreq_driver = {
185 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
217 .verify = cpufreq_generic_frequency_table_verify, 186 .verify = cpufreq_generic_frequency_table_verify,
218 .target_index = tegra_target, 187 .target_index = tegra_target,
219 .get = tegra_getspeed, 188 .get = cpufreq_generic_get,
220 .init = tegra_cpu_init, 189 .init = tegra_cpu_init,
221 .exit = tegra_cpu_exit, 190 .exit = tegra_cpu_exit,
222 .name = "tegra", 191 .name = "tegra",
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 653ae2955b55..36cc330b8747 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -11,6 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/err.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -33,42 +34,34 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
33 return 0; 34 return 0;
34} 35}
35 36
36static unsigned int ucv2_getspeed(unsigned int cpu)
37{
38 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
39
40 if (cpu)
41 return 0;
42 return clk_get_rate(mclk)/1000;
43}
44
45static int ucv2_target(struct cpufreq_policy *policy, 37static int ucv2_target(struct cpufreq_policy *policy,
46 unsigned int target_freq, 38 unsigned int target_freq,
47 unsigned int relation) 39 unsigned int relation)
48{ 40{
49 unsigned int cur = ucv2_getspeed(0);
50 struct cpufreq_freqs freqs; 41 struct cpufreq_freqs freqs;
51 struct clk *mclk = clk_get(NULL, "MAIN_CLK"); 42 int ret;
52 43
53 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 44 freqs.old = policy->cur;
45 freqs.new = target_freq;
54 46
55 if (!clk_set_rate(mclk, target_freq * 1000)) { 47 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
56 freqs.old = cur; 48 ret = clk_set_rate(policy->mclk, target_freq * 1000);
57 freqs.new = target_freq; 49 cpufreq_notify_post_transition(policy, &freqs, ret);
58 }
59
60 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
61 50
62 return 0; 51 return ret;
63} 52}
64 53
65static int __init ucv2_cpu_init(struct cpufreq_policy *policy) 54static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
66{ 55{
67 if (policy->cpu != 0) 56 if (policy->cpu != 0)
68 return -EINVAL; 57 return -EINVAL;
58
69 policy->min = policy->cpuinfo.min_freq = 250000; 59 policy->min = policy->cpuinfo.min_freq = 250000;
70 policy->max = policy->cpuinfo.max_freq = 1000000; 60 policy->max = policy->cpuinfo.max_freq = 1000000;
71 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 61 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
62 policy->clk = clk_get(NULL, "MAIN_CLK");
63 if (IS_ERR(policy->clk))
64 return PTR_ERR(policy->clk);
72 return 0; 65 return 0;
73} 66}
74 67
@@ -76,7 +69,7 @@ static struct cpufreq_driver ucv2_driver = {
76 .flags = CPUFREQ_STICKY, 69 .flags = CPUFREQ_STICKY,
77 .verify = ucv2_verify_speed, 70 .verify = ucv2_verify_speed,
78 .target = ucv2_target, 71 .target = ucv2_target,
79 .get = ucv2_getspeed, 72 .get = cpufreq_generic_get,
80 .init = ucv2_cpu_init, 73 .init = ucv2_cpu_init,
81 .name = "UniCore-II", 74 .name = "UniCore-II",
82}; 75};