aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-11 21:53:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-11 21:53:33 -0500
commit02d929502ce7b57f4835d8bb7c828d36e6d9e8ce (patch)
tree7bb0ca7a9bfe5c336c3125f823770934a2150ae4 /drivers/cpufreq
parentb24ca57e7625bc304e77bc429693ad32a691eb16 (diff)
parent6c523c614c13b84a3dc64f7a56d6855b03e6b292 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: (23 commits) [CPUFREQ] EXYNOS: Removed useless headers and codes [CPUFREQ] EXYNOS: Make EXYNOS common cpufreq driver [CPUFREQ] powernow-k8: Update copyright, maintainer and documentation information [CPUFREQ] powernow-k8: Fix indexing issue [CPUFREQ] powernow-k8: Avoid Pstate MSR accesses on systems supporting CPB [CPUFREQ] update lpj only if frequency has changed [CPUFREQ] cpufreq:userspace: fix cpu_cur_freq updation [CPUFREQ] Remove wall variable from cpufreq_gov_dbs_init() [CPUFREQ] EXYNOS4210: cpufreq code is changed for stable working [CPUFREQ] EXYNOS4210: Update frequency table for cpu divider [CPUFREQ] EXYNOS4210: Remove code about bus on cpufreq [CPUFREQ] s3c64xx: Use pr_fmt() for consistent log messages cpufreq: OMAP: fixup for omap_device changes, include <linux/module.h> cpufreq: OMAP: fix freq_table leak cpufreq: OMAP: put clk if cpu_init failed cpufreq: OMAP: only supports OPP library cpufreq: OMAP: dont support !freq_table cpufreq: OMAP: deny initialization if no mpudev cpufreq: OMAP: move clk name decision to init cpufreq: OMAP: notify even with bad boot frequency ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c8
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c290
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c643
-rw-r--r--drivers/cpufreq/omap-cpufreq.c274
-rw-r--r--drivers/cpufreq/powernow-k8.c47
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
10 files changed, 765 insertions, 555 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 72a0044c1ba..e0664fed018 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ
21 21
22 If in doubt, say N. 22 If in doubt, say N.
23 23
24config ARM_EXYNOS_CPUFREQ
25 bool "SAMSUNG EXYNOS SoCs"
26 depends on ARCH_EXYNOS
27 select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
28 default y
29 help
30 This adds the CPUFreq driver common part for Samsung
31 EXYNOS SoCs.
32
33 If in doubt, say N.
34
24config ARM_EXYNOS4210_CPUFREQ 35config ARM_EXYNOS4210_CPUFREQ
25 bool "Samsung EXYNOS4210" 36 bool "Samsung EXYNOS4210"
26 depends on CPU_EXYNOS4210
27 default y
28 help 37 help
29 This adds the CPUFreq driver for Samsung EXYNOS4210 38 This adds the CPUFreq driver for Samsung EXYNOS4210
30 SoC (S5PV310 or S5PC210). 39 SoC (S5PV310 or S5PC210).
31
32 If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index a48bc02cd76..ac000fa76bb 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
42obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o 42obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
43obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o 43obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
44obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o 44obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
45obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
45obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 46obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
47obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
46 48
47################################################################################## 49##################################################################################
48# PowerPC platform drivers 50# PowerPC platform drivers
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8c2df3499da..622013fb789 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
204 pr_debug("saving %lu as reference value for loops_per_jiffy; " 204 pr_debug("saving %lu as reference value for loops_per_jiffy; "
205 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 205 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
206 } 206 }
207 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 207 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
208 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
209 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 208 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
210 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 209 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
211 ci->new); 210 ci->new);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3d679eee70a..c3e0652520a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
713 713
714static int __init cpufreq_gov_dbs_init(void) 714static int __init cpufreq_gov_dbs_init(void)
715{ 715{
716 cputime64_t wall;
717 u64 idle_time; 716 u64 idle_time;
718 int cpu = get_cpu(); 717 int cpu = get_cpu();
719 718
720 idle_time = get_cpu_idle_time_us(cpu, &wall); 719 idle_time = get_cpu_idle_time_us(cpu, NULL);
721 put_cpu(); 720 put_cpu();
722 if (idle_time != -1ULL) { 721 if (idle_time != -1ULL) {
723 /* Idle micro accounting is supported. Use finer thresholds */ 722 /* Idle micro accounting is supported. Use finer thresholds */
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index f231015904c..bedac1aa9be 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
47 if (!per_cpu(cpu_is_managed, freq->cpu)) 47 if (!per_cpu(cpu_is_managed, freq->cpu))
48 return 0; 48 return 0;
49 49
50 pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", 50 if (val == CPUFREQ_POSTCHANGE) {
51 freq->cpu, freq->new); 51 pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
52 per_cpu(cpu_cur_freq, freq->cpu) = freq->new; 52 freq->cpu, freq->new);
53 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
54 }
53 55
54 return 0; 56 return 0;
55} 57}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
new file mode 100644
index 00000000000..5467879ea07
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * EXYNOS - CPU frequency scaling support for EXYNOS series
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/kernel.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/regulator/consumer.h>
18#include <linux/cpufreq.h>
19#include <linux/suspend.h>
20
21#include <mach/cpufreq.h>
22
23#include <plat/cpu.h>
24
25static struct exynos_dvfs_info *exynos_info;
26
27static struct regulator *arm_regulator;
28static struct cpufreq_freqs freqs;
29
30static unsigned int locking_frequency;
31static bool frequency_locked;
32static DEFINE_MUTEX(cpufreq_lock);
33
34int exynos_verify_speed(struct cpufreq_policy *policy)
35{
36 return cpufreq_frequency_table_verify(policy,
37 exynos_info->freq_table);
38}
39
40unsigned int exynos_getspeed(unsigned int cpu)
41{
42 return clk_get_rate(exynos_info->cpu_clk) / 1000;
43}
44
45static int exynos_target(struct cpufreq_policy *policy,
46 unsigned int target_freq,
47 unsigned int relation)
48{
49 unsigned int index, old_index;
50 unsigned int arm_volt, safe_arm_volt = 0;
51 int ret = 0;
52 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
53 unsigned int *volt_table = exynos_info->volt_table;
54 unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
55
56 mutex_lock(&cpufreq_lock);
57
58 freqs.old = policy->cur;
59
60 if (frequency_locked && target_freq != locking_frequency) {
61 ret = -EAGAIN;
62 goto out;
63 }
64
65 if (cpufreq_frequency_table_target(policy, freq_table,
66 freqs.old, relation, &old_index)) {
67 ret = -EINVAL;
68 goto out;
69 }
70
71 if (cpufreq_frequency_table_target(policy, freq_table,
72 target_freq, relation, &index)) {
73 ret = -EINVAL;
74 goto out;
75 }
76
77 freqs.new = freq_table[index].frequency;
78 freqs.cpu = policy->cpu;
79
80 /*
81 * ARM clock source will be changed APLL to MPLL temporary
82 * To support this level, need to control regulator for
83 * required voltage level
84 */
85 if (exynos_info->need_apll_change != NULL) {
86 if (exynos_info->need_apll_change(old_index, index) &&
87 (freq_table[index].frequency < mpll_freq_khz) &&
88 (freq_table[old_index].frequency < mpll_freq_khz))
89 safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
90 }
91 arm_volt = volt_table[index];
92
93 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
94
95 /* When the new frequency is higher than current frequency */
96 if ((freqs.new > freqs.old) && !safe_arm_volt) {
97 /* Firstly, voltage up to increase frequency */
98 regulator_set_voltage(arm_regulator, arm_volt,
99 arm_volt);
100 }
101
102 if (safe_arm_volt)
103 regulator_set_voltage(arm_regulator, safe_arm_volt,
104 safe_arm_volt);
105 if (freqs.new != freqs.old)
106 exynos_info->set_freq(old_index, index);
107
108 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
109
110 /* When the new frequency is lower than current frequency */
111 if ((freqs.new < freqs.old) ||
112 ((freqs.new > freqs.old) && safe_arm_volt)) {
113 /* down the voltage after frequency change */
114 regulator_set_voltage(arm_regulator, arm_volt,
115 arm_volt);
116 }
117
118out:
119 mutex_unlock(&cpufreq_lock);
120
121 return ret;
122}
123
124#ifdef CONFIG_PM
125static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
126{
127 return 0;
128}
129
130static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
131{
132 return 0;
133}
134#endif
135
136/**
137 * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
138 * context
139 * @notifier
140 * @pm_event
141 * @v
142 *
143 * While frequency_locked == true, target() ignores every frequency but
144 * locking_frequency. The locking_frequency value is the initial frequency,
145 * which is set by the bootloader. In order to eliminate possible
146 * inconsistency in clock values, we save and restore frequencies during
147 * suspend and resume and block CPUFREQ activities. Note that the standard
148 * suspend/resume cannot be used as they are too deep (syscore_ops) for
149 * regulator actions.
150 */
151static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
152 unsigned long pm_event, void *v)
153{
154 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
155 static unsigned int saved_frequency;
156 unsigned int temp;
157
158 mutex_lock(&cpufreq_lock);
159 switch (pm_event) {
160 case PM_SUSPEND_PREPARE:
161 if (frequency_locked)
162 goto out;
163
164 frequency_locked = true;
165
166 if (locking_frequency) {
167 saved_frequency = exynos_getspeed(0);
168
169 mutex_unlock(&cpufreq_lock);
170 exynos_target(policy, locking_frequency,
171 CPUFREQ_RELATION_H);
172 mutex_lock(&cpufreq_lock);
173 }
174 break;
175
176 case PM_POST_SUSPEND:
177 if (saved_frequency) {
178 /*
179 * While frequency_locked, only locking_frequency
180 * is valid for target(). In order to use
181 * saved_frequency while keeping frequency_locked,
182 * we temporarly overwrite locking_frequency.
183 */
184 temp = locking_frequency;
185 locking_frequency = saved_frequency;
186
187 mutex_unlock(&cpufreq_lock);
188 exynos_target(policy, locking_frequency,
189 CPUFREQ_RELATION_H);
190 mutex_lock(&cpufreq_lock);
191
192 locking_frequency = temp;
193 }
194 frequency_locked = false;
195 break;
196 }
197out:
198 mutex_unlock(&cpufreq_lock);
199
200 return NOTIFY_OK;
201}
202
203static struct notifier_block exynos_cpufreq_nb = {
204 .notifier_call = exynos_cpufreq_pm_notifier,
205};
206
207static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
208{
209 policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
210
211 cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
212
213 /* set the transition latency value */
214 policy->cpuinfo.transition_latency = 100000;
215
216 /*
217 * EXYNOS4 multi-core processors has 2 cores
218 * that the frequency cannot be set independently.
219 * Each cpu is bound to the same speed.
220 * So the affected cpu is all of the cpus.
221 */
222 if (num_online_cpus() == 1) {
223 cpumask_copy(policy->related_cpus, cpu_possible_mask);
224 cpumask_copy(policy->cpus, cpu_online_mask);
225 } else {
226 cpumask_setall(policy->cpus);
227 }
228
229 return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
230}
231
232static struct cpufreq_driver exynos_driver = {
233 .flags = CPUFREQ_STICKY,
234 .verify = exynos_verify_speed,
235 .target = exynos_target,
236 .get = exynos_getspeed,
237 .init = exynos_cpufreq_cpu_init,
238 .name = "exynos_cpufreq",
239#ifdef CONFIG_PM
240 .suspend = exynos_cpufreq_suspend,
241 .resume = exynos_cpufreq_resume,
242#endif
243};
244
245static int __init exynos_cpufreq_init(void)
246{
247 int ret = -EINVAL;
248
249 exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
250 if (!exynos_info)
251 return -ENOMEM;
252
253 if (soc_is_exynos4210())
254 ret = exynos4210_cpufreq_init(exynos_info);
255 else
256 pr_err("%s: CPU type not found\n", __func__);
257
258 if (ret)
259 goto err_vdd_arm;
260
261 if (exynos_info->set_freq == NULL) {
262 pr_err("%s: No set_freq function (ERR)\n", __func__);
263 goto err_vdd_arm;
264 }
265
266 arm_regulator = regulator_get(NULL, "vdd_arm");
267 if (IS_ERR(arm_regulator)) {
268 pr_err("%s: failed to get resource vdd_arm\n", __func__);
269 goto err_vdd_arm;
270 }
271
272 register_pm_notifier(&exynos_cpufreq_nb);
273
274 if (cpufreq_register_driver(&exynos_driver)) {
275 pr_err("%s: failed to register cpufreq driver\n", __func__);
276 goto err_cpufreq;
277 }
278
279 return 0;
280err_cpufreq:
281 unregister_pm_notifier(&exynos_cpufreq_nb);
282
283 if (!IS_ERR(arm_regulator))
284 regulator_put(arm_regulator);
285err_vdd_arm:
286 kfree(exynos_info);
287 pr_debug("%s: failed initialization\n", __func__);
288 return -EINVAL;
289}
290late_initcall(exynos_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index ab9741fab92..065da5b702f 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -2,61 +2,52 @@
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com 3 * http://www.samsung.com
4 * 4 *
5 * EXYNOS4 - CPU frequency scaling support 5 * EXYNOS4210 - CPU frequency scaling support
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10*/ 10*/
11 11
12#include <linux/types.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/regulator/consumer.h>
19#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
20#include <linux/notifier.h>
21#include <linux/suspend.h>
22 19
23#include <mach/map.h>
24#include <mach/regs-clock.h> 20#include <mach/regs-clock.h>
25#include <mach/regs-mem.h> 21#include <mach/cpufreq.h>
26 22
27#include <plat/clock.h> 23#define CPUFREQ_LEVEL_END L5
28#include <plat/pm.h> 24
25static int max_support_idx = L0;
26static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
29 27
30static struct clk *cpu_clk; 28static struct clk *cpu_clk;
31static struct clk *moutcore; 29static struct clk *moutcore;
32static struct clk *mout_mpll; 30static struct clk *mout_mpll;
33static struct clk *mout_apll; 31static struct clk *mout_apll;
34 32
35static struct regulator *arm_regulator; 33struct cpufreq_clkdiv {
36static struct regulator *int_regulator; 34 unsigned int index;
37 35 unsigned int clkdiv;
38static struct cpufreq_freqs freqs;
39static unsigned int memtype;
40
41static unsigned int locking_frequency;
42static bool frequency_locked;
43static DEFINE_MUTEX(cpufreq_lock);
44
45enum exynos4_memory_type {
46 DDR2 = 4,
47 LPDDR2,
48 DDR3,
49}; 36};
50 37
51enum cpufreq_level_index { 38static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
52 L0, L1, L2, L3, CPUFREQ_LEVEL_END, 39 1250000, 1150000, 1050000, 975000, 950000,
53}; 40};
54 41
55static struct cpufreq_frequency_table exynos4_freq_table[] = { 42
56 {L0, 1000*1000}, 43static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
57 {L1, 800*1000}, 44
58 {L2, 400*1000}, 45static struct cpufreq_frequency_table exynos4210_freq_table[] = {
59 {L3, 100*1000}, 46 {L0, 1200*1000},
47 {L1, 1000*1000},
48 {L2, 800*1000},
49 {L3, 500*1000},
50 {L4, 200*1000},
60 {0, CPUFREQ_TABLE_END}, 51 {0, CPUFREQ_TABLE_END},
61}; 52};
62 53
@@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
67 * DIVATB, DIVPCLK_DBG, DIVAPLL } 58 * DIVATB, DIVPCLK_DBG, DIVAPLL }
68 */ 59 */
69 60
70 /* ARM L0: 1000MHz */ 61 /* ARM L0: 1200MHz */
71 { 0, 3, 7, 3, 3, 0, 1 }, 62 { 0, 3, 7, 3, 4, 1, 7 },
72 63
73 /* ARM L1: 800MHz */ 64 /* ARM L1: 1000MHz */
74 { 0, 3, 7, 3, 3, 0, 1 }, 65 { 0, 3, 7, 3, 4, 1, 7 },
75 66
76 /* ARM L2: 400MHz */ 67 /* ARM L2: 800MHz */
77 { 0, 1, 3, 1, 3, 0, 1 }, 68 { 0, 3, 7, 3, 3, 1, 7 },
78 69
79 /* ARM L3: 100MHz */ 70 /* ARM L3: 500MHz */
80 { 0, 0, 1, 0, 3, 1, 1 }, 71 { 0, 3, 7, 3, 3, 1, 7 },
72
73 /* ARM L4: 200MHz */
74 { 0, 1, 3, 1, 3, 1, 0 },
81}; 75};
82 76
83static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { 77static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
@@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
86 * { DIVCOPY, DIVHPM } 80 * { DIVCOPY, DIVHPM }
87 */ 81 */
88 82
89 /* ARM L0: 1000MHz */ 83 /* ARM L0: 1200MHz */
90 { 3, 0 }, 84 { 5, 0 },
91 85
92 /* ARM L1: 800MHz */ 86 /* ARM L1: 1000MHz */
93 { 3, 0 }, 87 { 4, 0 },
94 88
95 /* ARM L2: 400MHz */ 89 /* ARM L2: 800MHz */
96 { 3, 0 }, 90 { 3, 0 },
97 91
98 /* ARM L3: 100MHz */ 92 /* ARM L3: 500MHz */
99 { 3, 0 }, 93 { 3, 0 },
100};
101
102static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
103 /*
104 * Clock divider value for following
105 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
106 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
107 */
108
109 /* DMC L0: 400MHz */
110 { 3, 1, 1, 1, 1, 1, 3, 1 },
111
112 /* DMC L1: 400MHz */
113 { 3, 1, 1, 1, 1, 1, 3, 1 },
114
115 /* DMC L2: 266.7MHz */
116 { 7, 1, 1, 2, 1, 1, 3, 1 },
117
118 /* DMC L3: 200MHz */
119 { 7, 1, 1, 3, 1, 1, 3, 1 },
120};
121
122static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
123 /*
124 * Clock divider value for following
125 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
126 */
127 94
128 /* ACLK200 L0: 200MHz */ 95 /* ARM L4: 200MHz */
129 { 3, 7, 4, 5, 1 }, 96 { 3, 0 },
130
131 /* ACLK200 L1: 200MHz */
132 { 3, 7, 4, 5, 1 },
133
134 /* ACLK200 L2: 160MHz */
135 { 4, 7, 5, 7, 1 },
136
137 /* ACLK200 L3: 133.3MHz */
138 { 5, 7, 7, 7, 1 },
139};
140
141static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
142 /*
143 * Clock divider value for following
144 * { DIVGDL/R, DIVGPL/R }
145 */
146
147 /* ACLK_GDL/R L0: 200MHz */
148 { 3, 1 },
149
150 /* ACLK_GDL/R L1: 200MHz */
151 { 3, 1 },
152
153 /* ACLK_GDL/R L2: 160MHz */
154 { 4, 1 },
155
156 /* ACLK_GDL/R L3: 133.3MHz */
157 { 5, 1 },
158};
159
160struct cpufreq_voltage_table {
161 unsigned int index; /* any */
162 unsigned int arm_volt; /* uV */
163 unsigned int int_volt;
164}; 97};
165 98
166static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { 99static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
167 { 100 /* APLL FOUT L0: 1200MHz */
168 .index = L0, 101 ((150 << 16) | (3 << 8) | 1),
169 .arm_volt = 1200000,
170 .int_volt = 1100000,
171 }, {
172 .index = L1,
173 .arm_volt = 1100000,
174 .int_volt = 1100000,
175 }, {
176 .index = L2,
177 .arm_volt = 1000000,
178 .int_volt = 1000000,
179 }, {
180 .index = L3,
181 .arm_volt = 900000,
182 .int_volt = 1000000,
183 },
184};
185 102
186static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { 103 /* APLL FOUT L1: 1000MHz */
187 /* APLL FOUT L0: 1000MHz */
188 ((250 << 16) | (6 << 8) | 1), 104 ((250 << 16) | (6 << 8) | 1),
189 105
190 /* APLL FOUT L1: 800MHz */ 106 /* APLL FOUT L2: 800MHz */
191 ((200 << 16) | (6 << 8) | 1), 107 ((200 << 16) | (6 << 8) | 1),
192 108
193 /* APLL FOUT L2 : 400MHz */ 109 /* APLL FOUT L3: 500MHz */
194 ((200 << 16) | (6 << 8) | 2), 110 ((250 << 16) | (6 << 8) | 2),
195 111
196 /* APLL FOUT L3: 100MHz */ 112 /* APLL FOUT L4: 200MHz */
197 ((200 << 16) | (6 << 8) | 4), 113 ((200 << 16) | (6 << 8) | 3),
198}; 114};
199 115
200static int exynos4_verify_speed(struct cpufreq_policy *policy) 116static void exynos4210_set_clkdiv(unsigned int div_index)
201{
202 return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
203}
204
205static unsigned int exynos4_getspeed(unsigned int cpu)
206{
207 return clk_get_rate(cpu_clk) / 1000;
208}
209
210static void exynos4_set_clkdiv(unsigned int div_index)
211{ 117{
212 unsigned int tmp; 118 unsigned int tmp;
213 119
214 /* Change Divider - CPU0 */ 120 /* Change Divider - CPU0 */
215 121
216 tmp = __raw_readl(S5P_CLKDIV_CPU); 122 tmp = exynos4210_clkdiv_table[div_index].clkdiv;
217
218 tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
219 S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
220 S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
221 S5P_CLKDIV_CPU0_APLL_MASK);
222
223 tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
224 (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
225 (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
226 (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
227 (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
228 (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
229 (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
230 123
231 __raw_writel(tmp, S5P_CLKDIV_CPU); 124 __raw_writel(tmp, S5P_CLKDIV_CPU);
232 125
@@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index)
248 do { 141 do {
249 tmp = __raw_readl(S5P_CLKDIV_STATCPU1); 142 tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
250 } while (tmp & 0x11); 143 } while (tmp & 0x11);
251
252 /* Change Divider - DMC0 */
253
254 tmp = __raw_readl(S5P_CLKDIV_DMC0);
255
256 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
257 S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
258 S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
259 S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
260
261 tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
262 (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
263 (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
264 (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
265 (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
266 (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
267 (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
268 (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
269
270 __raw_writel(tmp, S5P_CLKDIV_DMC0);
271
272 do {
273 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
274 } while (tmp & 0x11111111);
275
276 /* Change Divider - TOP */
277
278 tmp = __raw_readl(S5P_CLKDIV_TOP);
279
280 tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
281 S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
282 S5P_CLKDIV_TOP_ONENAND_MASK);
283
284 tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
285 (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
286 (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
287 (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
288 (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
289
290 __raw_writel(tmp, S5P_CLKDIV_TOP);
291
292 do {
293 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
294 } while (tmp & 0x11111);
295
296 /* Change Divider - LEFTBUS */
297
298 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
299
300 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
301
302 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
303 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
304
305 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
306
307 do {
308 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
309 } while (tmp & 0x11);
310
311 /* Change Divider - RIGHTBUS */
312
313 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
314
315 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
316
317 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
318 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
319
320 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
321
322 do {
323 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
324 } while (tmp & 0x11);
325} 144}
326 145
327static void exynos4_set_apll(unsigned int index) 146static void exynos4210_set_apll(unsigned int index)
328{ 147{
329 unsigned int tmp; 148 unsigned int tmp;
330 149
@@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index)
343 /* 3. Change PLL PMS values */ 162 /* 3. Change PLL PMS values */
344 tmp = __raw_readl(S5P_APLL_CON0); 163 tmp = __raw_readl(S5P_APLL_CON0);
345 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); 164 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
346 tmp |= exynos4_apll_pms_table[index]; 165 tmp |= exynos4210_apll_pms_table[index];
347 __raw_writel(tmp, S5P_APLL_CON0); 166 __raw_writel(tmp, S5P_APLL_CON0);
348 167
349 /* 4. wait_lock_time */ 168 /* 4. wait_lock_time */
@@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index)
360 } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)); 179 } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
361} 180}
362 181
363static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index) 182bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
183{
184 unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
185 unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
186
187 return (old_pm == new_pm) ? 0 : 1;
188}
189
190static void exynos4210_set_frequency(unsigned int old_index,
191 unsigned int new_index)
364{ 192{
365 unsigned int tmp; 193 unsigned int tmp;
366 194
367 if (old_index > new_index) { 195 if (old_index > new_index) {
368 /* The frequency changing to L0 needs to change apll */ 196 if (!exynos4210_pms_change(old_index, new_index)) {
369 if (freqs.new == exynos4_freq_table[L0].frequency) {
370 /* 1. Change the system clock divider values */
371 exynos4_set_clkdiv(new_index);
372
373 /* 2. Change the apll m,p,s value */
374 exynos4_set_apll(new_index);
375 } else {
376 /* 1. Change the system clock divider values */ 197 /* 1. Change the system clock divider values */
377 exynos4_set_clkdiv(new_index); 198 exynos4210_set_clkdiv(new_index);
378 199
379 /* 2. Change just s value in apll m,p,s value */ 200 /* 2. Change just s value in apll m,p,s value */
380 tmp = __raw_readl(S5P_APLL_CON0); 201 tmp = __raw_readl(S5P_APLL_CON0);
381 tmp &= ~(0x7 << 0); 202 tmp &= ~(0x7 << 0);
382 tmp |= (exynos4_apll_pms_table[new_index] & 0x7); 203 tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
383 __raw_writel(tmp, S5P_APLL_CON0); 204 __raw_writel(tmp, S5P_APLL_CON0);
384 }
385 }
386
387 else if (old_index < new_index) {
388 /* The frequency changing from L0 needs to change apll */
389 if (freqs.old == exynos4_freq_table[L0].frequency) {
390 /* 1. Change the apll m,p,s value */
391 exynos4_set_apll(new_index);
392
393 /* 2. Change the system clock divider values */
394 exynos4_set_clkdiv(new_index);
395 } else { 205 } else {
206 /* Clock Configuration Procedure */
207 /* 1. Change the system clock divider values */
208 exynos4210_set_clkdiv(new_index);
209 /* 2. Change the apll m,p,s value */
210 exynos4210_set_apll(new_index);
211 }
212 } else if (old_index < new_index) {
213 if (!exynos4210_pms_change(old_index, new_index)) {
396 /* 1. Change just s value in apll m,p,s value */ 214 /* 1. Change just s value in apll m,p,s value */
397 tmp = __raw_readl(S5P_APLL_CON0); 215 tmp = __raw_readl(S5P_APLL_CON0);
398 tmp &= ~(0x7 << 0); 216 tmp &= ~(0x7 << 0);
399 tmp |= (exynos4_apll_pms_table[new_index] & 0x7); 217 tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
400 __raw_writel(tmp, S5P_APLL_CON0); 218 __raw_writel(tmp, S5P_APLL_CON0);
401 219
402 /* 2. Change the system clock divider values */ 220 /* 2. Change the system clock divider values */
403 exynos4_set_clkdiv(new_index); 221 exynos4210_set_clkdiv(new_index);
222 } else {
223 /* Clock Configuration Procedure */
224 /* 1. Change the apll m,p,s value */
225 exynos4210_set_apll(new_index);
226 /* 2. Change the system clock divider values */
227 exynos4210_set_clkdiv(new_index);
404 } 228 }
405 } 229 }
406} 230}
407 231
408static int exynos4_target(struct cpufreq_policy *policy, 232int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
409 unsigned int target_freq,
410 unsigned int relation)
411{
412 unsigned int index, old_index;
413 unsigned int arm_volt, int_volt;
414 int err = -EINVAL;
415
416 freqs.old = exynos4_getspeed(policy->cpu);
417
418 mutex_lock(&cpufreq_lock);
419
420 if (frequency_locked && target_freq != locking_frequency) {
421 err = -EAGAIN;
422 goto out;
423 }
424
425 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
426 freqs.old, relation, &old_index))
427 goto out;
428
429 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
430 target_freq, relation, &index))
431 goto out;
432
433 err = 0;
434
435 freqs.new = exynos4_freq_table[index].frequency;
436 freqs.cpu = policy->cpu;
437
438 if (freqs.new == freqs.old)
439 goto out;
440
441 /* get the voltage value */
442 arm_volt = exynos4_volt_table[index].arm_volt;
443 int_volt = exynos4_volt_table[index].int_volt;
444
445 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
446
447 /* control regulator */
448 if (freqs.new > freqs.old) {
449 /* Voltage up */
450 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
451 regulator_set_voltage(int_regulator, int_volt, int_volt);
452 }
453
454 /* Clock Configuration Procedure */
455 exynos4_set_frequency(old_index, index);
456
457 /* control regulator */
458 if (freqs.new < freqs.old) {
459 /* Voltage down */
460 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
461 regulator_set_voltage(int_regulator, int_volt, int_volt);
462 }
463
464 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
465
466out:
467 mutex_unlock(&cpufreq_lock);
468 return err;
469}
470
471#ifdef CONFIG_PM
472/*
473 * These suspend/resume are used as syscore_ops, it is already too
474 * late to set regulator voltages at this stage.
475 */
476static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
477{
478 return 0;
479}
480
481static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
482{ 233{
483 return 0; 234 int i;
484} 235 unsigned int tmp;
485#endif 236 unsigned long rate;
486
487/**
488 * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
489 * context
490 * @notifier
491 * @pm_event
492 * @v
493 *
494 * While frequency_locked == true, target() ignores every frequency but
495 * locking_frequency. The locking_frequency value is the initial frequency,
496 * which is set by the bootloader. In order to eliminate possible
497 * inconsistency in clock values, we save and restore frequencies during
498 * suspend and resume and block CPUFREQ activities. Note that the standard
499 * suspend/resume cannot be used as they are too deep (syscore_ops) for
500 * regulator actions.
501 */
502static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
503 unsigned long pm_event, void *v)
504{
505 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
506 static unsigned int saved_frequency;
507 unsigned int temp;
508
509 mutex_lock(&cpufreq_lock);
510 switch (pm_event) {
511 case PM_SUSPEND_PREPARE:
512 if (frequency_locked)
513 goto out;
514 frequency_locked = true;
515
516 if (locking_frequency) {
517 saved_frequency = exynos4_getspeed(0);
518
519 mutex_unlock(&cpufreq_lock);
520 exynos4_target(policy, locking_frequency,
521 CPUFREQ_RELATION_H);
522 mutex_lock(&cpufreq_lock);
523 }
524
525 break;
526 case PM_POST_SUSPEND:
527
528 if (saved_frequency) {
529 /*
530 * While frequency_locked, only locking_frequency
531 * is valid for target(). In order to use
532 * saved_frequency while keeping frequency_locked,
533 * we temporarly overwrite locking_frequency.
534 */
535 temp = locking_frequency;
536 locking_frequency = saved_frequency;
537
538 mutex_unlock(&cpufreq_lock);
539 exynos4_target(policy, locking_frequency,
540 CPUFREQ_RELATION_H);
541 mutex_lock(&cpufreq_lock);
542
543 locking_frequency = temp;
544 }
545
546 frequency_locked = false;
547 break;
548 }
549out:
550 mutex_unlock(&cpufreq_lock);
551
552 return NOTIFY_OK;
553}
554
555static struct notifier_block exynos4_cpufreq_nb = {
556 .notifier_call = exynos4_cpufreq_pm_notifier,
557};
558
559static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
560{
561 int ret;
562
563 policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
564
565 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
566
567 /* set the transition latency value */
568 policy->cpuinfo.transition_latency = 100000;
569
570 /*
571 * EXYNOS4 multi-core processors has 2 cores
572 * that the frequency cannot be set independently.
573 * Each cpu is bound to the same speed.
574 * So the affected cpu is all of the cpus.
575 */
576 cpumask_setall(policy->cpus);
577
578 ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
579 if (ret)
580 return ret;
581
582 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
583
584 return 0;
585}
586
587static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
588{
589 cpufreq_frequency_table_put_attr(policy->cpu);
590 return 0;
591}
592
593static struct freq_attr *exynos4_cpufreq_attr[] = {
594 &cpufreq_freq_attr_scaling_available_freqs,
595 NULL,
596};
597
598static struct cpufreq_driver exynos4_driver = {
599 .flags = CPUFREQ_STICKY,
600 .verify = exynos4_verify_speed,
601 .target = exynos4_target,
602 .get = exynos4_getspeed,
603 .init = exynos4_cpufreq_cpu_init,
604 .exit = exynos4_cpufreq_cpu_exit,
605 .name = "exynos4_cpufreq",
606 .attr = exynos4_cpufreq_attr,
607#ifdef CONFIG_PM
608 .suspend = exynos4_cpufreq_suspend,
609 .resume = exynos4_cpufreq_resume,
610#endif
611};
612 237
613static int __init exynos4_cpufreq_init(void)
614{
615 cpu_clk = clk_get(NULL, "armclk"); 238 cpu_clk = clk_get(NULL, "armclk");
616 if (IS_ERR(cpu_clk)) 239 if (IS_ERR(cpu_clk))
617 return PTR_ERR(cpu_clk); 240 return PTR_ERR(cpu_clk);
618 241
619 locking_frequency = exynos4_getspeed(0);
620
621 moutcore = clk_get(NULL, "moutcore"); 242 moutcore = clk_get(NULL, "moutcore");
622 if (IS_ERR(moutcore)) 243 if (IS_ERR(moutcore))
623 goto out; 244 goto err_moutcore;
624 245
625 mout_mpll = clk_get(NULL, "mout_mpll"); 246 mout_mpll = clk_get(NULL, "mout_mpll");
626 if (IS_ERR(mout_mpll)) 247 if (IS_ERR(mout_mpll))
627 goto out; 248 goto err_mout_mpll;
249
250 rate = clk_get_rate(mout_mpll) / 1000;
628 251
629 mout_apll = clk_get(NULL, "mout_apll"); 252 mout_apll = clk_get(NULL, "mout_apll");
630 if (IS_ERR(mout_apll)) 253 if (IS_ERR(mout_apll))
631 goto out; 254 goto err_mout_apll;
632 255
633 arm_regulator = regulator_get(NULL, "vdd_arm"); 256 tmp = __raw_readl(S5P_CLKDIV_CPU);
634 if (IS_ERR(arm_regulator)) {
635 printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
636 goto out;
637 }
638 257
639 int_regulator = regulator_get(NULL, "vdd_int"); 258 for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
640 if (IS_ERR(int_regulator)) { 259 tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK |
641 printk(KERN_ERR "failed to get resource %s\n", "vdd_int"); 260 S5P_CLKDIV_CPU0_COREM0_MASK |
642 goto out; 261 S5P_CLKDIV_CPU0_COREM1_MASK |
262 S5P_CLKDIV_CPU0_PERIPH_MASK |
263 S5P_CLKDIV_CPU0_ATB_MASK |
264 S5P_CLKDIV_CPU0_PCLKDBG_MASK |
265 S5P_CLKDIV_CPU0_APLL_MASK);
266
267 tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
268 (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
269 (clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
270 (clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
271 (clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
272 (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
273 (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
274
275 exynos4210_clkdiv_table[i].clkdiv = tmp;
643 } 276 }
644 277
645 /* 278 info->mpll_freq_khz = rate;
646 * Check DRAM type. 279 info->pm_lock_idx = L2;
647 * Because DVFS level is different according to DRAM type. 280 info->pll_safe_idx = L2;
648 */ 281 info->max_support_idx = max_support_idx;
649 memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET); 282 info->min_support_idx = min_support_idx;
650 memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT); 283 info->cpu_clk = cpu_clk;
651 memtype &= S5P_DMC0_MEMTYPE_MASK; 284 info->volt_table = exynos4210_volt_table;
652 285 info->freq_table = exynos4210_freq_table;
653 if ((memtype < DDR2) && (memtype > DDR3)) { 286 info->set_freq = exynos4210_set_frequency;
654 printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype); 287 info->need_apll_change = exynos4210_pms_change;
655 goto out;
656 } else {
657 printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
658 }
659
660 register_pm_notifier(&exynos4_cpufreq_nb);
661
662 return cpufreq_register_driver(&exynos4_driver);
663
664out:
665 if (!IS_ERR(cpu_clk))
666 clk_put(cpu_clk);
667 288
668 if (!IS_ERR(moutcore)) 289 return 0;
669 clk_put(moutcore);
670 290
291err_mout_apll:
671 if (!IS_ERR(mout_mpll)) 292 if (!IS_ERR(mout_mpll))
672 clk_put(mout_mpll); 293 clk_put(mout_mpll);
294err_mout_mpll:
295 if (!IS_ERR(moutcore))
296 clk_put(moutcore);
297err_moutcore:
298 if (!IS_ERR(cpu_clk))
299 clk_put(cpu_clk);
673 300
674 if (!IS_ERR(mout_apll)) 301 pr_debug("%s: failed initialization\n", __func__);
675 clk_put(mout_apll);
676
677 if (!IS_ERR(arm_regulator))
678 regulator_put(arm_regulator);
679
680 if (!IS_ERR(int_regulator))
681 regulator_put(int_regulator);
682
683 printk(KERN_ERR "%s: failed initialization\n", __func__);
684
685 return -EINVAL; 302 return -EINVAL;
686} 303}
687late_initcall(exynos4_cpufreq_init); 304EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
new file mode 100644
index 00000000000..5d04c57aae3
--- /dev/null
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -0,0 +1,274 @@
1/*
2 * CPU frequency scaling for OMAP using OPP information
3 *
4 * Copyright (C) 2005 Nokia Corporation
5 * Written by Tony Lindgren <tony@atomide.com>
6 *
7 * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
8 *
9 * Copyright (C) 2007-2011 Texas Instruments, Inc.
10 * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/err.h>
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/opp.h>
26#include <linux/cpu.h>
27#include <linux/module.h>
28
29#include <asm/system.h>
30#include <asm/smp_plat.h>
31#include <asm/cpu.h>
32
33#include <plat/clock.h>
34#include <plat/omap-pm.h>
35#include <plat/common.h>
36#include <plat/omap_device.h>
37
38#include <mach/hardware.h>
39
40#ifdef CONFIG_SMP
41struct lpj_info {
42 unsigned long ref;
43 unsigned int freq;
44};
45
46static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
47static struct lpj_info global_lpj_ref;
48#endif
49
50static struct cpufreq_frequency_table *freq_table;
51static atomic_t freq_table_users = ATOMIC_INIT(0);
52static struct clk *mpu_clk;
53static char *mpu_clk_name;
54static struct device *mpu_dev;
55
56static int omap_verify_speed(struct cpufreq_policy *policy)
57{
58 if (!freq_table)
59 return -EINVAL;
60 return cpufreq_frequency_table_verify(policy, freq_table);
61}
62
63static unsigned int omap_getspeed(unsigned int cpu)
64{
65 unsigned long rate;
66
67 if (cpu >= NR_CPUS)
68 return 0;
69
70 rate = clk_get_rate(mpu_clk) / 1000;
71 return rate;
72}
73
74static int omap_target(struct cpufreq_policy *policy,
75 unsigned int target_freq,
76 unsigned int relation)
77{
78 unsigned int i;
79 int ret = 0;
80 struct cpufreq_freqs freqs;
81
82 if (!freq_table) {
83 dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
84 policy->cpu);
85 return -EINVAL;
86 }
87
88 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
89 relation, &i);
90 if (ret) {
91 dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
92 __func__, policy->cpu, target_freq, ret);
93 return ret;
94 }
95 freqs.new = freq_table[i].frequency;
96 if (!freqs.new) {
97 dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
98 policy->cpu, target_freq);
99 return -EINVAL;
100 }
101
102 freqs.old = omap_getspeed(policy->cpu);
103 freqs.cpu = policy->cpu;
104
105 if (freqs.old == freqs.new && policy->cur == freqs.new)
106 return ret;
107
108 /* notifiers */
109 for_each_cpu(i, policy->cpus) {
110 freqs.cpu = i;
111 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
112 }
113
114#ifdef CONFIG_CPU_FREQ_DEBUG
115 pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
116#endif
117
118 ret = clk_set_rate(mpu_clk, freqs.new * 1000);
119 freqs.new = omap_getspeed(policy->cpu);
120
121#ifdef CONFIG_SMP
122 /*
123 * Note that loops_per_jiffy is not updated on SMP systems in
124 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
125 * on frequency transition. We need to update all dependent CPUs.
126 */
127 for_each_cpu(i, policy->cpus) {
128 struct lpj_info *lpj = &per_cpu(lpj_ref, i);
129 if (!lpj->freq) {
130 lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
131 lpj->freq = freqs.old;
132 }
133
134 per_cpu(cpu_data, i).loops_per_jiffy =
135 cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
136 }
137
138 /* And don't forget to adjust the global one */
139 if (!global_lpj_ref.freq) {
140 global_lpj_ref.ref = loops_per_jiffy;
141 global_lpj_ref.freq = freqs.old;
142 }
143 loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
144 freqs.new);
145#endif
146
147 /* notifiers */
148 for_each_cpu(i, policy->cpus) {
149 freqs.cpu = i;
150 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
151 }
152
153 return ret;
154}
155
156static inline void freq_table_free(void)
157{
158 if (atomic_dec_and_test(&freq_table_users))
159 opp_free_cpufreq_table(mpu_dev, &freq_table);
160}
161
162static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
163{
164 int result = 0;
165
166 mpu_clk = clk_get(NULL, mpu_clk_name);
167 if (IS_ERR(mpu_clk))
168 return PTR_ERR(mpu_clk);
169
170 if (policy->cpu >= NR_CPUS) {
171 result = -EINVAL;
172 goto fail_ck;
173 }
174
175 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
176
177 if (atomic_inc_return(&freq_table_users) == 1)
178 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
179
180 if (result) {
181 dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
182 __func__, policy->cpu, result);
183 goto fail_ck;
184 }
185
186 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
187 if (result)
188 goto fail_table;
189
190 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
191
192 policy->min = policy->cpuinfo.min_freq;
193 policy->max = policy->cpuinfo.max_freq;
194 policy->cur = omap_getspeed(policy->cpu);
195
196 /*
197 * On OMAP SMP configuartion, both processors share the voltage
198 * and clock. So both CPUs needs to be scaled together and hence
199 * needs software co-ordination. Use cpufreq affected_cpus
200 * interface to handle this scenario. Additional is_smp() check
201 * is to keep SMP_ON_UP build working.
202 */
203 if (is_smp()) {
204 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
205 cpumask_setall(policy->cpus);
206 }
207
208 /* FIXME: what's the actual transition time? */
209 policy->cpuinfo.transition_latency = 300 * 1000;
210
211 return 0;
212
213fail_table:
214 freq_table_free();
215fail_ck:
216 clk_put(mpu_clk);
217 return result;
218}
219
220static int omap_cpu_exit(struct cpufreq_policy *policy)
221{
222 freq_table_free();
223 clk_put(mpu_clk);
224 return 0;
225}
226
227static struct freq_attr *omap_cpufreq_attr[] = {
228 &cpufreq_freq_attr_scaling_available_freqs,
229 NULL,
230};
231
232static struct cpufreq_driver omap_driver = {
233 .flags = CPUFREQ_STICKY,
234 .verify = omap_verify_speed,
235 .target = omap_target,
236 .get = omap_getspeed,
237 .init = omap_cpu_init,
238 .exit = omap_cpu_exit,
239 .name = "omap",
240 .attr = omap_cpufreq_attr,
241};
242
243static int __init omap_cpufreq_init(void)
244{
245 if (cpu_is_omap24xx())
246 mpu_clk_name = "virt_prcm_set";
247 else if (cpu_is_omap34xx())
248 mpu_clk_name = "dpll1_ck";
249 else if (cpu_is_omap44xx())
250 mpu_clk_name = "dpll_mpu_ck";
251
252 if (!mpu_clk_name) {
253 pr_err("%s: unsupported Silicon?\n", __func__);
254 return -EINVAL;
255 }
256
257 mpu_dev = omap_device_get_by_hwmod_name("mpu");
258 if (!mpu_dev) {
259 pr_warning("%s: unable to get the mpu device\n", __func__);
260 return -EINVAL;
261 }
262
263 return cpufreq_register_driver(&omap_driver);
264}
265
266static void __exit omap_cpufreq_exit(void)
267{
268 cpufreq_unregister_driver(&omap_driver);
269}
270
271MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
272MODULE_LICENSE("GPL");
273module_init(omap_cpufreq_init);
274module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index bce576d7478..8f9b2ceeec8 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * (c) 2003-2010 Advanced Micro Devices, Inc. 2 * (c) 2003-2012 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the 3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or 4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 * 6 *
7 * Support : mark.langsdorf@amd.com 7 * Maintainer:
8 * Andreas Herrmann <andreas.herrmann3@amd.com>
8 * 9 *
9 * Based on the powernow-k7.c module written by Dave Jones. 10 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones on behalf of SuSE Labs 11 * (C) 2003 Dave Jones on behalf of SuSE Labs
@@ -16,12 +17,14 @@
16 * Valuable input gratefully received from Dave Jones, Pavel Machek, 17 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, Jacob Shin, and others. 18 * Dominik Brodowski, Jacob Shin, and others.
18 * Originally developed by Paul Devriendt. 19 * Originally developed by Paul Devriendt.
19 * Processor information obtained from Chapter 9 (Power and Thermal Management)
20 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
21 * Opteron Processors" available for download from www.amd.com
22 * 20 *
23 * Tables for specific CPUs can be inferred from 21 * Processor information obtained from Chapter 9 (Power and Thermal
24 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf 22 * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
23 * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
24 * Power Management" in BKDGs for newer AMD CPU families.
25 *
26 * Tables for specific CPUs can be inferred from AMD's processor
27 * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
25 */ 28 */
26 29
27#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54 57
55static int cpu_family = CPU_OPTERON; 58static int cpu_family = CPU_OPTERON;
56 59
60/* array to map SW pstate number to acpi state */
61static u32 ps_to_as[8];
62
57/* core performance boost */ 63/* core performance boost */
58static bool cpb_capable, cpb_enabled; 64static bool cpb_capable, cpb_enabled;
59static struct msr __percpu *msrs; 65static struct msr __percpu *msrs;
@@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
80} 86}
81 87
82static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, 88static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
83 u32 pstate) 89 u32 pstate)
84{ 90{
85 return data[pstate].frequency; 91 return data[ps_to_as[pstate]].frequency;
86} 92}
87 93
88/* Return the vco fid for an input fid 94/* Return the vco fid for an input fid
@@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
926 invalidate_entry(powernow_table, i); 932 invalidate_entry(powernow_table, i);
927 continue; 933 continue;
928 } 934 }
929 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
930 if (!(hi & HW_PSTATE_VALID_MASK)) {
931 pr_debug("invalid pstate %d, ignoring\n", index);
932 invalidate_entry(powernow_table, i);
933 continue;
934 }
935 935
936 powernow_table[i].index = index; 936 ps_to_as[index] = i;
937 937
938 /* Frequency may be rounded for these */ 938 /* Frequency may be rounded for these */
939 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 939 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
940 || boot_cpu_data.x86 == 0x11) { 940 || boot_cpu_data.x86 == 0x11) {
941
942 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
943 if (!(hi & HW_PSTATE_VALID_MASK)) {
944 pr_debug("invalid pstate %d, ignoring\n", index);
945 invalidate_entry(powernow_table, i);
946 continue;
947 }
948
941 powernow_table[i].frequency = 949 powernow_table[i].frequency =
942 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); 950 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
943 } else 951 } else
944 powernow_table[i].frequency = 952 powernow_table[i].frequency =
945 data->acpi_data.states[i].core_frequency * 1000; 953 data->acpi_data.states[i].core_frequency * 1000;
954
955 powernow_table[i].index = index;
946 } 956 }
947 return 0; 957 return 0;
948} 958}
@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1189 powernow_k8_acpi_pst_values(data, newstate); 1199 powernow_k8_acpi_pst_values(data, newstate);
1190 1200
1191 if (cpu_family == CPU_HW_PSTATE) 1201 if (cpu_family == CPU_HW_PSTATE)
1192 ret = transition_frequency_pstate(data, newstate); 1202 ret = transition_frequency_pstate(data,
1203 data->powernow_table[newstate].index);
1193 else 1204 else
1194 ret = transition_frequency_fidvid(data, newstate); 1205 ret = transition_frequency_fidvid(data, newstate);
1195 if (ret) { 1206 if (ret) {
@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1202 1213
1203 if (cpu_family == CPU_HW_PSTATE) 1214 if (cpu_family == CPU_HW_PSTATE)
1204 pol->cur = find_khz_freq_from_pstate(data->powernow_table, 1215 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1205 newstate); 1216 data->powernow_table[newstate].index);
1206 else 1217 else
1207 pol->cur = find_khz_freq_from_fid(data->currfid); 1218 pol->cur = find_khz_freq_from_fid(data->currfid);
1208 ret = 0; 1219 ret = 0;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 3475f65aeec..a5e72cb5f53 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -8,6 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#define pr_fmt(fmt) "cpufreq: " fmt
12
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/types.h> 14#include <linux/types.h>
13#include <linux/init.h> 15#include <linux/init.h>
@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
91 if (freqs.old == freqs.new) 93 if (freqs.old == freqs.new)
92 return 0; 94 return 0;
93 95
94 pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); 96 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
95 97
96 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 98 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
97 99
@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
101 dvfs->vddarm_min, 103 dvfs->vddarm_min,
102 dvfs->vddarm_max); 104 dvfs->vddarm_max);
103 if (ret != 0) { 105 if (ret != 0) {
104 pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", 106 pr_err("Failed to set VDDARM for %dkHz: %d\n",
105 freqs.new, ret); 107 freqs.new, ret);
106 goto err; 108 goto err;
107 } 109 }
@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
110 112
111 ret = clk_set_rate(armclk, freqs.new * 1000); 113 ret = clk_set_rate(armclk, freqs.new * 1000);
112 if (ret < 0) { 114 if (ret < 0) {
113 pr_err("cpufreq: Failed to set rate %dkHz: %d\n", 115 pr_err("Failed to set rate %dkHz: %d\n",
114 freqs.new, ret); 116 freqs.new, ret);
115 goto err; 117 goto err;
116 } 118 }
@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
123 dvfs->vddarm_min, 125 dvfs->vddarm_min,
124 dvfs->vddarm_max); 126 dvfs->vddarm_max);
125 if (ret != 0) { 127 if (ret != 0) {
126 pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", 128 pr_err("Failed to set VDDARM for %dkHz: %d\n",
127 freqs.new, ret); 129 freqs.new, ret);
128 goto err_clk; 130 goto err_clk;
129 } 131 }
130 } 132 }
131#endif 133#endif
132 134
133 pr_debug("cpufreq: Set actual frequency %lukHz\n", 135 pr_debug("Set actual frequency %lukHz\n",
134 clk_get_rate(armclk) / 1000); 136 clk_get_rate(armclk) / 1000);
135 137
136 return 0; 138 return 0;
@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
153 155
154 count = regulator_count_voltages(vddarm); 156 count = regulator_count_voltages(vddarm);
155 if (count < 0) { 157 if (count < 0) {
156 pr_err("cpufreq: Unable to check supported voltages\n"); 158 pr_err("Unable to check supported voltages\n");
157 } 159 }
158 160
159 freq = s3c64xx_freq_table; 161 freq = s3c64xx_freq_table;
@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
171 } 173 }
172 174
173 if (!found) { 175 if (!found) {
174 pr_debug("cpufreq: %dkHz unsupported by regulator\n", 176 pr_debug("%dkHz unsupported by regulator\n",
175 freq->frequency); 177 freq->frequency);
176 freq->frequency = CPUFREQ_ENTRY_INVALID; 178 freq->frequency = CPUFREQ_ENTRY_INVALID;
177 } 179 }
@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
194 return -EINVAL; 196 return -EINVAL;
195 197
196 if (s3c64xx_freq_table == NULL) { 198 if (s3c64xx_freq_table == NULL) {
197 pr_err("cpufreq: No frequency information for this CPU\n"); 199 pr_err("No frequency information for this CPU\n");
198 return -ENODEV; 200 return -ENODEV;
199 } 201 }
200 202
201 armclk = clk_get(NULL, "armclk"); 203 armclk = clk_get(NULL, "armclk");
202 if (IS_ERR(armclk)) { 204 if (IS_ERR(armclk)) {
203 pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", 205 pr_err("Unable to obtain ARMCLK: %ld\n",
204 PTR_ERR(armclk)); 206 PTR_ERR(armclk));
205 return PTR_ERR(armclk); 207 return PTR_ERR(armclk);
206 } 208 }
@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
209 vddarm = regulator_get(NULL, "vddarm"); 211 vddarm = regulator_get(NULL, "vddarm");
210 if (IS_ERR(vddarm)) { 212 if (IS_ERR(vddarm)) {
211 ret = PTR_ERR(vddarm); 213 ret = PTR_ERR(vddarm);
212 pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); 214 pr_err("Failed to obtain VDDARM: %d\n", ret);
213 pr_err("cpufreq: Only frequency scaling available\n"); 215 pr_err("Only frequency scaling available\n");
214 vddarm = NULL; 216 vddarm = NULL;
215 } else { 217 } else {
216 s3c64xx_cpufreq_config_regulator(); 218 s3c64xx_cpufreq_config_regulator();
217 } 219 }
220
221 vddint = regulator_get(NULL, "vddint");
222 if (IS_ERR(vddint)) {
223 ret = PTR_ERR(vddint);
224 pr_err("Failed to obtain VDDINT: %d\n", ret);
225 vddint = NULL;
226 }
218#endif 227#endif
219 228
220 freq = s3c64xx_freq_table; 229 freq = s3c64xx_freq_table;
@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
225 r = clk_round_rate(armclk, freq->frequency * 1000); 234 r = clk_round_rate(armclk, freq->frequency * 1000);
226 r /= 1000; 235 r /= 1000;
227 if (r != freq->frequency) { 236 if (r != freq->frequency) {
228 pr_debug("cpufreq: %dkHz unsupported by clock\n", 237 pr_debug("%dkHz unsupported by clock\n",
229 freq->frequency); 238 freq->frequency);
230 freq->frequency = CPUFREQ_ENTRY_INVALID; 239 freq->frequency = CPUFREQ_ENTRY_INVALID;
231 } 240 }
@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
248 257
249 ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); 258 ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
250 if (ret != 0) { 259 if (ret != 0) {
251 pr_err("cpufreq: Failed to configure frequency table: %d\n", 260 pr_err("Failed to configure frequency table: %d\n",
252 ret); 261 ret);
253 regulator_put(vddarm); 262 regulator_put(vddarm);
254 clk_put(armclk); 263 clk_put(armclk);