aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig10
-rw-r--r--drivers/cpufreq/Kconfig.arm32
-rw-r--r--drivers/cpufreq/Kconfig.powerpc7
-rw-r--r--drivers/cpufreq/Makefile12
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c568
-rw-r--r--drivers/cpufreq/maple-cpufreq.c309
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c273
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c649
9 files changed, 1858 insertions, 4 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 9fb84853d8e3..e24a2a1b6666 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -184,5 +184,15 @@ depends on X86
184source "drivers/cpufreq/Kconfig.x86" 184source "drivers/cpufreq/Kconfig.x86"
185endmenu 185endmenu
186 186
187menu "ARM CPU frequency scaling drivers"
188depends on ARM
189source "drivers/cpufreq/Kconfig.arm"
190endmenu
191
192menu "PowerPC CPU frequency scaling drivers"
193depends on PPC32 || PPC64
194source "drivers/cpufreq/Kconfig.powerpc"
195endmenu
196
187endif 197endif
188endmenu 198endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
new file mode 100644
index 000000000000..72a0044c1baa
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.arm
@@ -0,0 +1,32 @@
1#
2# ARM CPU Frequency scaling drivers
3#
4
5config ARM_S3C64XX_CPUFREQ
6 bool "Samsung S3C64XX"
7 depends on CPU_S3C6410
8 default y
9 help
10 This adds the CPUFreq driver for Samsung S3C6410 SoC.
11
12 If in doubt, say N.
13
14config ARM_S5PV210_CPUFREQ
15 bool "Samsung S5PV210 and S5PC110"
16 depends on CPU_S5PV210
17 default y
18 help
19 This adds the CPUFreq driver for Samsung S5PV210 and
20 S5PC110 SoCs.
21
22 If in doubt, say N.
23
24config ARM_EXYNOS4210_CPUFREQ
25 bool "Samsung EXYNOS4210"
26 depends on CPU_EXYNOS4210
27 default y
28 help
29 This adds the CPUFreq driver for Samsung EXYNOS4210
30 SoC (S5PV310 or S5PC210).
31
32 If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
new file mode 100644
index 000000000000..e76992f79683
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -0,0 +1,7 @@
1config CPU_FREQ_MAPLE
2 bool "Support for Maple 970FX Evaluation Board"
3 depends on PPC_MAPLE
4 select CPU_FREQ_TABLE
5 help
6 This adds support for frequency switching on Maple 970FX
7 Evaluation Board and compatible boards (IBM JS2x blades).
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e2fc2d21fa61..a48bc02cd765 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
13# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
15 15
16##################################################################################d 16##################################################################################
17# x86 drivers. 17# x86 drivers.
18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early 18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
19# K8 systems. ACPI is preferred to all other hardware-specific drivers. 19# K8 systems. ACPI is preferred to all other hardware-specific drivers.
@@ -37,7 +37,13 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
37obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 37obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
38obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 38obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
39 39
40##################################################################################d 40##################################################################################
41
42# ARM SoC drivers 41# ARM SoC drivers
43obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o 42obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
43obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
44obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
45obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
46
47##################################################################################
48# PowerPC platform drivers
49obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 596d5dd32f41..56c6c6b4eb4d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -655,7 +655,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
655 acpi_processor_notify_smm(THIS_MODULE); 655 acpi_processor_notify_smm(THIS_MODULE);
656 656
657 /* Check for APERF/MPERF support in hardware */ 657 /* Check for APERF/MPERF support in hardware */
658 if (cpu_has(c, X86_FEATURE_APERFMPERF)) 658 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
659 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; 659 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
660 660
661 pr_debug("CPU%u - ACPI performance management activated.\n", cpu); 661 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
new file mode 100644
index 000000000000..b7c3a84c4cfa
--- /dev/null
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * EXYNOS4 - CPU frequency scaling support
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/err.h>
15#include <linux/clk.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include <linux/regulator/consumer.h>
19#include <linux/cpufreq.h>
20
21#include <mach/map.h>
22#include <mach/regs-clock.h>
23#include <mach/regs-mem.h>
24
25#include <plat/clock.h>
26#include <plat/pm.h>
27
28static struct clk *cpu_clk;
29static struct clk *moutcore;
30static struct clk *mout_mpll;
31static struct clk *mout_apll;
32
33static struct regulator *arm_regulator;
34static struct regulator *int_regulator;
35
36static struct cpufreq_freqs freqs;
37static unsigned int memtype;
38
39enum exynos4_memory_type {
40 DDR2 = 4,
41 LPDDR2,
42 DDR3,
43};
44
45enum cpufreq_level_index {
46 L0, L1, L2, L3, CPUFREQ_LEVEL_END,
47};
48
49static struct cpufreq_frequency_table exynos4_freq_table[] = {
50 {L0, 1000*1000},
51 {L1, 800*1000},
52 {L2, 400*1000},
53 {L3, 100*1000},
54 {0, CPUFREQ_TABLE_END},
55};
56
57static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
58 /*
59 * Clock divider value for following
60 * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
61 * DIVATB, DIVPCLK_DBG, DIVAPLL }
62 */
63
64 /* ARM L0: 1000MHz */
65 { 0, 3, 7, 3, 3, 0, 1 },
66
67 /* ARM L1: 800MHz */
68 { 0, 3, 7, 3, 3, 0, 1 },
69
70 /* ARM L2: 400MHz */
71 { 0, 1, 3, 1, 3, 0, 1 },
72
73 /* ARM L3: 100MHz */
74 { 0, 0, 1, 0, 3, 1, 1 },
75};
76
77static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
78 /*
79 * Clock divider value for following
80 * { DIVCOPY, DIVHPM }
81 */
82
83 /* ARM L0: 1000MHz */
84 { 3, 0 },
85
86 /* ARM L1: 800MHz */
87 { 3, 0 },
88
89 /* ARM L2: 400MHz */
90 { 3, 0 },
91
92 /* ARM L3: 100MHz */
93 { 3, 0 },
94};
95
96static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
97 /*
98 * Clock divider value for following
99 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
100 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
101 */
102
103 /* DMC L0: 400MHz */
104 { 3, 1, 1, 1, 1, 1, 3, 1 },
105
106 /* DMC L1: 400MHz */
107 { 3, 1, 1, 1, 1, 1, 3, 1 },
108
109 /* DMC L2: 266.7MHz */
110 { 7, 1, 1, 2, 1, 1, 3, 1 },
111
112 /* DMC L3: 200MHz */
113 { 7, 1, 1, 3, 1, 1, 3, 1 },
114};
115
116static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
117 /*
118 * Clock divider value for following
119 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
120 */
121
122 /* ACLK200 L0: 200MHz */
123 { 3, 7, 4, 5, 1 },
124
125 /* ACLK200 L1: 200MHz */
126 { 3, 7, 4, 5, 1 },
127
128 /* ACLK200 L2: 160MHz */
129 { 4, 7, 5, 7, 1 },
130
131 /* ACLK200 L3: 133.3MHz */
132 { 5, 7, 7, 7, 1 },
133};
134
135static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
136 /*
137 * Clock divider value for following
138 * { DIVGDL/R, DIVGPL/R }
139 */
140
141 /* ACLK_GDL/R L0: 200MHz */
142 { 3, 1 },
143
144 /* ACLK_GDL/R L1: 200MHz */
145 { 3, 1 },
146
147 /* ACLK_GDL/R L2: 160MHz */
148 { 4, 1 },
149
150 /* ACLK_GDL/R L3: 133.3MHz */
151 { 5, 1 },
152};
153
154struct cpufreq_voltage_table {
155 unsigned int index; /* any */
156 unsigned int arm_volt; /* uV */
157 unsigned int int_volt;
158};
159
160static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
161 {
162 .index = L0,
163 .arm_volt = 1200000,
164 .int_volt = 1100000,
165 }, {
166 .index = L1,
167 .arm_volt = 1100000,
168 .int_volt = 1100000,
169 }, {
170 .index = L2,
171 .arm_volt = 1000000,
172 .int_volt = 1000000,
173 }, {
174 .index = L3,
175 .arm_volt = 900000,
176 .int_volt = 1000000,
177 },
178};
179
180static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
181 /* APLL FOUT L0: 1000MHz */
182 ((250 << 16) | (6 << 8) | 1),
183
184 /* APLL FOUT L1: 800MHz */
185 ((200 << 16) | (6 << 8) | 1),
186
187 /* APLL FOUT L2 : 400MHz */
188 ((200 << 16) | (6 << 8) | 2),
189
190 /* APLL FOUT L3: 100MHz */
191 ((200 << 16) | (6 << 8) | 4),
192};
193
194static int exynos4_verify_speed(struct cpufreq_policy *policy)
195{
196 return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
197}
198
199static unsigned int exynos4_getspeed(unsigned int cpu)
200{
201 return clk_get_rate(cpu_clk) / 1000;
202}
203
204static void exynos4_set_clkdiv(unsigned int div_index)
205{
206 unsigned int tmp;
207
208 /* Change Divider - CPU0 */
209
210 tmp = __raw_readl(S5P_CLKDIV_CPU);
211
212 tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
213 S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
214 S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
215 S5P_CLKDIV_CPU0_APLL_MASK);
216
217 tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
218 (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
219 (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
220 (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
221 (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
222 (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
223 (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
224
225 __raw_writel(tmp, S5P_CLKDIV_CPU);
226
227 do {
228 tmp = __raw_readl(S5P_CLKDIV_STATCPU);
229 } while (tmp & 0x1111111);
230
231 /* Change Divider - CPU1 */
232
233 tmp = __raw_readl(S5P_CLKDIV_CPU1);
234
235 tmp &= ~((0x7 << 4) | 0x7);
236
237 tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
238 (clkdiv_cpu1[div_index][1] << 0));
239
240 __raw_writel(tmp, S5P_CLKDIV_CPU1);
241
242 do {
243 tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
244 } while (tmp & 0x11);
245
246 /* Change Divider - DMC0 */
247
248 tmp = __raw_readl(S5P_CLKDIV_DMC0);
249
250 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
251 S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
252 S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
253 S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
254
255 tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
256 (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
257 (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
258 (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
259 (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
260 (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
261 (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
262 (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
263
264 __raw_writel(tmp, S5P_CLKDIV_DMC0);
265
266 do {
267 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
268 } while (tmp & 0x11111111);
269
270 /* Change Divider - TOP */
271
272 tmp = __raw_readl(S5P_CLKDIV_TOP);
273
274 tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
275 S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
276 S5P_CLKDIV_TOP_ONENAND_MASK);
277
278 tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
279 (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
280 (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
281 (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
282 (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
283
284 __raw_writel(tmp, S5P_CLKDIV_TOP);
285
286 do {
287 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
288 } while (tmp & 0x11111);
289
290 /* Change Divider - LEFTBUS */
291
292 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
293
294 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
295
296 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
297 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
298
299 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
300
301 do {
302 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
303 } while (tmp & 0x11);
304
305 /* Change Divider - RIGHTBUS */
306
307 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
308
309 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
310
311 tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
312 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
313
314 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
315
316 do {
317 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
318 } while (tmp & 0x11);
319}
320
321static void exynos4_set_apll(unsigned int index)
322{
323 unsigned int tmp;
324
325 /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
326 clk_set_parent(moutcore, mout_mpll);
327
328 do {
329 tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
330 >> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
331 tmp &= 0x7;
332 } while (tmp != 0x2);
333
334 /* 2. Set APLL Lock time */
335 __raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
336
337 /* 3. Change PLL PMS values */
338 tmp = __raw_readl(S5P_APLL_CON0);
339 tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
340 tmp |= exynos4_apll_pms_table[index];
341 __raw_writel(tmp, S5P_APLL_CON0);
342
343 /* 4. wait_lock_time */
344 do {
345 tmp = __raw_readl(S5P_APLL_CON0);
346 } while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
347
348 /* 5. MUX_CORE_SEL = APLL */
349 clk_set_parent(moutcore, mout_apll);
350
351 do {
352 tmp = __raw_readl(S5P_CLKMUX_STATCPU);
353 tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
354 } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
355}
356
357static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
358{
359 unsigned int tmp;
360
361 if (old_index > new_index) {
362 /* The frequency changing to L0 needs to change apll */
363 if (freqs.new == exynos4_freq_table[L0].frequency) {
364 /* 1. Change the system clock divider values */
365 exynos4_set_clkdiv(new_index);
366
367 /* 2. Change the apll m,p,s value */
368 exynos4_set_apll(new_index);
369 } else {
370 /* 1. Change the system clock divider values */
371 exynos4_set_clkdiv(new_index);
372
373 /* 2. Change just s value in apll m,p,s value */
374 tmp = __raw_readl(S5P_APLL_CON0);
375 tmp &= ~(0x7 << 0);
376 tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
377 __raw_writel(tmp, S5P_APLL_CON0);
378 }
379 }
380
381 else if (old_index < new_index) {
382 /* The frequency changing from L0 needs to change apll */
383 if (freqs.old == exynos4_freq_table[L0].frequency) {
384 /* 1. Change the apll m,p,s value */
385 exynos4_set_apll(new_index);
386
387 /* 2. Change the system clock divider values */
388 exynos4_set_clkdiv(new_index);
389 } else {
390 /* 1. Change just s value in apll m,p,s value */
391 tmp = __raw_readl(S5P_APLL_CON0);
392 tmp &= ~(0x7 << 0);
393 tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
394 __raw_writel(tmp, S5P_APLL_CON0);
395
396 /* 2. Change the system clock divider values */
397 exynos4_set_clkdiv(new_index);
398 }
399 }
400}
401
402static int exynos4_target(struct cpufreq_policy *policy,
403 unsigned int target_freq,
404 unsigned int relation)
405{
406 unsigned int index, old_index;
407 unsigned int arm_volt, int_volt;
408
409 freqs.old = exynos4_getspeed(policy->cpu);
410
411 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
412 freqs.old, relation, &old_index))
413 return -EINVAL;
414
415 if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
416 target_freq, relation, &index))
417 return -EINVAL;
418
419 freqs.new = exynos4_freq_table[index].frequency;
420 freqs.cpu = policy->cpu;
421
422 if (freqs.new == freqs.old)
423 return 0;
424
425 /* get the voltage value */
426 arm_volt = exynos4_volt_table[index].arm_volt;
427 int_volt = exynos4_volt_table[index].int_volt;
428
429 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
430
431 /* control regulator */
432 if (freqs.new > freqs.old) {
433 /* Voltage up */
434 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
435 regulator_set_voltage(int_regulator, int_volt, int_volt);
436 }
437
438 /* Clock Configuration Procedure */
439 exynos4_set_frequency(old_index, index);
440
441 /* control regulator */
442 if (freqs.new < freqs.old) {
443 /* Voltage down */
444 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
445 regulator_set_voltage(int_regulator, int_volt, int_volt);
446 }
447
448 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
449
450 return 0;
451}
452
453#ifdef CONFIG_PM
454static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
455{
456 return 0;
457}
458
459static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
460{
461 return 0;
462}
463#endif
464
465static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
466{
467 policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
468
469 cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
470
471 /* set the transition latency value */
472 policy->cpuinfo.transition_latency = 100000;
473
474 /*
475 * EXYNOS4 multi-core processors has 2 cores
476 * that the frequency cannot be set independently.
477 * Each cpu is bound to the same speed.
478 * So the affected cpu is all of the cpus.
479 */
480 cpumask_setall(policy->cpus);
481
482 return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
483}
484
485static struct cpufreq_driver exynos4_driver = {
486 .flags = CPUFREQ_STICKY,
487 .verify = exynos4_verify_speed,
488 .target = exynos4_target,
489 .get = exynos4_getspeed,
490 .init = exynos4_cpufreq_cpu_init,
491 .name = "exynos4_cpufreq",
492#ifdef CONFIG_PM
493 .suspend = exynos4_cpufreq_suspend,
494 .resume = exynos4_cpufreq_resume,
495#endif
496};
497
498static int __init exynos4_cpufreq_init(void)
499{
500 cpu_clk = clk_get(NULL, "armclk");
501 if (IS_ERR(cpu_clk))
502 return PTR_ERR(cpu_clk);
503
504 moutcore = clk_get(NULL, "moutcore");
505 if (IS_ERR(moutcore))
506 goto out;
507
508 mout_mpll = clk_get(NULL, "mout_mpll");
509 if (IS_ERR(mout_mpll))
510 goto out;
511
512 mout_apll = clk_get(NULL, "mout_apll");
513 if (IS_ERR(mout_apll))
514 goto out;
515
516 arm_regulator = regulator_get(NULL, "vdd_arm");
517 if (IS_ERR(arm_regulator)) {
518 printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
519 goto out;
520 }
521
522 int_regulator = regulator_get(NULL, "vdd_int");
523 if (IS_ERR(int_regulator)) {
524 printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
525 goto out;
526 }
527
528 /*
529 * Check DRAM type.
530 * Because DVFS level is different according to DRAM type.
531 */
532 memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
533 memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
534 memtype &= S5P_DMC0_MEMTYPE_MASK;
535
536 if ((memtype < DDR2) && (memtype > DDR3)) {
537 printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
538 goto out;
539 } else {
540 printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
541 }
542
543 return cpufreq_register_driver(&exynos4_driver);
544
545out:
546 if (!IS_ERR(cpu_clk))
547 clk_put(cpu_clk);
548
549 if (!IS_ERR(moutcore))
550 clk_put(moutcore);
551
552 if (!IS_ERR(mout_mpll))
553 clk_put(mout_mpll);
554
555 if (!IS_ERR(mout_apll))
556 clk_put(mout_apll);
557
558 if (!IS_ERR(arm_regulator))
559 regulator_put(arm_regulator);
560
561 if (!IS_ERR(int_regulator))
562 regulator_put(int_regulator);
563
564 printk(KERN_ERR "%s: failed initialization\n", __func__);
565
566 return -EINVAL;
567}
568late_initcall(exynos4_cpufreq_init);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
new file mode 100644
index 000000000000..89b178a3f849
--- /dev/null
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright (C) 2011 Dmitry Eremin-Solenikov
3 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
4 * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
11 * that is iMac G5 and latest single CPU desktop.
12 */
13
14#undef DEBUG
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/cpufreq.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/mutex.h>
26#include <linux/time.h>
27#include <linux/of.h>
28
29#define DBG(fmt...) pr_debug(fmt)
30
31/* see 970FX user manual */
32
33#define SCOM_PCR 0x0aa001 /* PCR scom addr */
34
35#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
36#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
37#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
38#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
39#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
40#define PCR_SPEED_SHIFT 17
41#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
42#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
43#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
44#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
45#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
46#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
47
48#define SCOM_PSR 0x408001 /* PSR scom addr */
49/* warning: PSR is a 64 bits register */
50#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
51#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
52#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
53#define PSR_CUR_SPEED_SHIFT (56)
54
55/*
56 * The G5 only supports two frequencies (Quarter speed is not supported)
57 */
58#define CPUFREQ_HIGH 0
59#define CPUFREQ_LOW 1
60
61static struct cpufreq_frequency_table maple_cpu_freqs[] = {
62 {CPUFREQ_HIGH, 0},
63 {CPUFREQ_LOW, 0},
64 {0, CPUFREQ_TABLE_END},
65};
66
67static struct freq_attr *maple_cpu_freqs_attr[] = {
68 &cpufreq_freq_attr_scaling_available_freqs,
69 NULL,
70};
71
72/* Power mode data is an array of the 32 bits PCR values to use for
73 * the various frequencies, retrieved from the device-tree
74 */
75static int maple_pmode_cur;
76
77static DEFINE_MUTEX(maple_switch_mutex);
78
79static const u32 *maple_pmode_data;
80static int maple_pmode_max;
81
82/*
83 * SCOM based frequency switching for 970FX rev3
84 */
85static int maple_scom_switch_freq(int speed_mode)
86{
87 unsigned long flags;
88 int to;
89
90 local_irq_save(flags);
91
92 /* Clear PCR high */
93 scom970_write(SCOM_PCR, 0);
94 /* Clear PCR low */
95 scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
96 /* Set PCR low */
97 scom970_write(SCOM_PCR, PCR_HILO_SELECT |
98 maple_pmode_data[speed_mode]);
99
100 /* Wait for completion */
101 for (to = 0; to < 10; to++) {
102 unsigned long psr = scom970_read(SCOM_PSR);
103
104 if ((psr & PSR_CMD_RECEIVED) == 0 &&
105 (((psr >> PSR_CUR_SPEED_SHIFT) ^
106 (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
107 == 0)
108 break;
109 if (psr & PSR_CMD_COMPLETED)
110 break;
111 udelay(100);
112 }
113
114 local_irq_restore(flags);
115
116 maple_pmode_cur = speed_mode;
117 ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
118
119 return 0;
120}
121
122static int maple_scom_query_freq(void)
123{
124 unsigned long psr = scom970_read(SCOM_PSR);
125 int i;
126
127 for (i = 0; i <= maple_pmode_max; i++)
128 if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
129 (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
130 break;
131 return i;
132}
133
134/*
135 * Common interface to the cpufreq core
136 */
137
138static int maple_cpufreq_verify(struct cpufreq_policy *policy)
139{
140 return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
141}
142
143static int maple_cpufreq_target(struct cpufreq_policy *policy,
144 unsigned int target_freq, unsigned int relation)
145{
146 unsigned int newstate = 0;
147 struct cpufreq_freqs freqs;
148 int rc;
149
150 if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
151 target_freq, relation, &newstate))
152 return -EINVAL;
153
154 if (maple_pmode_cur == newstate)
155 return 0;
156
157 mutex_lock(&maple_switch_mutex);
158
159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
160 freqs.new = maple_cpu_freqs[newstate].frequency;
161 freqs.cpu = 0;
162
163 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
164 rc = maple_scom_switch_freq(newstate);
165 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
166
167 mutex_unlock(&maple_switch_mutex);
168
169 return rc;
170}
171
172static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
173{
174 return maple_cpu_freqs[maple_pmode_cur].frequency;
175}
176
177static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
178{
179 policy->cpuinfo.transition_latency = 12000;
180 policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
181 /* secondary CPUs are tied to the primary one by the
182 * cpufreq core if in the secondary policy we tell it that
183 * it actually must be one policy together with all others. */
184 cpumask_copy(policy->cpus, cpu_online_mask);
185 cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
186
187 return cpufreq_frequency_table_cpuinfo(policy,
188 maple_cpu_freqs);
189}
190
191
192static struct cpufreq_driver maple_cpufreq_driver = {
193 .name = "maple",
194 .owner = THIS_MODULE,
195 .flags = CPUFREQ_CONST_LOOPS,
196 .init = maple_cpufreq_cpu_init,
197 .verify = maple_cpufreq_verify,
198 .target = maple_cpufreq_target,
199 .get = maple_cpufreq_get_speed,
200 .attr = maple_cpu_freqs_attr,
201};
202
203static int __init maple_cpufreq_init(void)
204{
205 struct device_node *cpus;
206 struct device_node *cpunode;
207 unsigned int psize;
208 unsigned long max_freq;
209 const u32 *valp;
210 u32 pvr_hi;
211 int rc = -ENODEV;
212
213 /*
214 * Behave here like powermac driver which checks machine compatibility
215 * to ease merging of two drivers in future.
216 */
217 if (!of_machine_is_compatible("Momentum,Maple") &&
218 !of_machine_is_compatible("Momentum,Apache"))
219 return 0;
220
221 cpus = of_find_node_by_path("/cpus");
222 if (cpus == NULL) {
223 DBG("No /cpus node !\n");
224 return -ENODEV;
225 }
226
227 /* Get first CPU node */
228 for (cpunode = NULL;
229 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
230 const u32 *reg = of_get_property(cpunode, "reg", NULL);
231 if (reg == NULL || (*reg) != 0)
232 continue;
233 if (!strcmp(cpunode->type, "cpu"))
234 break;
235 }
236 if (cpunode == NULL) {
237 printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
238 goto bail_cpus;
239 }
240
241 /* Check 970FX for now */
242 /* we actually don't care on which CPU to access PVR */
243 pvr_hi = PVR_VER(mfspr(SPRN_PVR));
244 if (pvr_hi != 0x3c && pvr_hi != 0x44) {
245 printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
246 pvr_hi);
247 goto bail_noprops;
248 }
249
250 /* Look for the powertune data in the device-tree */
251 /*
252 * On Maple this property is provided by PIBS in dual-processor config,
253 * not provided by PIBS in CPU0 config and also not provided by SLOF,
254 * so YMMV
255 */
256 maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
257 if (!maple_pmode_data) {
258 DBG("No power-mode-data !\n");
259 goto bail_noprops;
260 }
261 maple_pmode_max = psize / sizeof(u32) - 1;
262
263 /*
264 * From what I see, clock-frequency is always the maximal frequency.
265 * The current driver can not slew sysclk yet, so we really only deal
266 * with powertune steps for now. We also only implement full freq and
267 * half freq in this version. So far, I haven't yet seen a machine
268 * supporting anything else.
269 */
270 valp = of_get_property(cpunode, "clock-frequency", NULL);
271 if (!valp)
272 return -ENODEV;
273 max_freq = (*valp)/1000;
274 maple_cpu_freqs[0].frequency = max_freq;
275 maple_cpu_freqs[1].frequency = max_freq/2;
276
277 /* Force apply current frequency to make sure everything is in
278 * sync (voltage is right for example). Firmware may leave us with
279 * a strange setting ...
280 */
281 msleep(10);
282 maple_pmode_cur = -1;
283 maple_scom_switch_freq(maple_scom_query_freq());
284
285 printk(KERN_INFO "Registering Maple CPU frequency driver\n");
286 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
287 maple_cpu_freqs[1].frequency/1000,
288 maple_cpu_freqs[0].frequency/1000,
289 maple_cpu_freqs[maple_pmode_cur].frequency/1000);
290
291 rc = cpufreq_register_driver(&maple_cpufreq_driver);
292
293 of_node_put(cpunode);
294 of_node_put(cpus);
295
296 return rc;
297
298bail_noprops:
299 of_node_put(cpunode);
300bail_cpus:
301 of_node_put(cpus);
302
303 return rc;
304}
305
306module_init(maple_cpufreq_init);
307
308
309MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
new file mode 100644
index 000000000000..b8d1d205e1ef
--- /dev/null
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright 2009 Wolfson Microelectronics plc
3 *
4 * S3C64xx CPUfreq Support
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/regulator/consumer.h>
18
19static struct clk *armclk;
20static struct regulator *vddarm;
21static unsigned long regulator_latency;
22
23#ifdef CONFIG_CPU_S3C6410
24struct s3c64xx_dvfs {
25 unsigned int vddarm_min;
26 unsigned int vddarm_max;
27};
28
29static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
30 [0] = { 1000000, 1150000 },
31 [1] = { 1050000, 1150000 },
32 [2] = { 1100000, 1150000 },
33 [3] = { 1200000, 1350000 },
34 [4] = { 1300000, 1350000 },
35};
36
37static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
38 { 0, 66000 },
39 { 0, 100000 },
40 { 0, 133000 },
41 { 1, 200000 },
42 { 1, 222000 },
43 { 1, 266000 },
44 { 2, 333000 },
45 { 2, 400000 },
46 { 2, 532000 },
47 { 2, 533000 },
48 { 3, 667000 },
49 { 4, 800000 },
50 { 0, CPUFREQ_TABLE_END },
51};
52#endif
53
54static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
55{
56 if (policy->cpu != 0)
57 return -EINVAL;
58
59 return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
60}
61
62static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
63{
64 if (cpu != 0)
65 return 0;
66
67 return clk_get_rate(armclk) / 1000;
68}
69
70static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
71 unsigned int target_freq,
72 unsigned int relation)
73{
74 int ret;
75 unsigned int i;
76 struct cpufreq_freqs freqs;
77 struct s3c64xx_dvfs *dvfs;
78
79 ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
80 target_freq, relation, &i);
81 if (ret != 0)
82 return ret;
83
84 freqs.cpu = 0;
85 freqs.old = clk_get_rate(armclk) / 1000;
86 freqs.new = s3c64xx_freq_table[i].frequency;
87 freqs.flags = 0;
88 dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
89
90 if (freqs.old == freqs.new)
91 return 0;
92
93 pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
94
95 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
96
97#ifdef CONFIG_REGULATOR
98 if (vddarm && freqs.new > freqs.old) {
99 ret = regulator_set_voltage(vddarm,
100 dvfs->vddarm_min,
101 dvfs->vddarm_max);
102 if (ret != 0) {
103 pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
104 freqs.new, ret);
105 goto err;
106 }
107 }
108#endif
109
110 ret = clk_set_rate(armclk, freqs.new * 1000);
111 if (ret < 0) {
112 pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
113 freqs.new, ret);
114 goto err;
115 }
116
117 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
118
119#ifdef CONFIG_REGULATOR
120 if (vddarm && freqs.new < freqs.old) {
121 ret = regulator_set_voltage(vddarm,
122 dvfs->vddarm_min,
123 dvfs->vddarm_max);
124 if (ret != 0) {
125 pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
126 freqs.new, ret);
127 goto err_clk;
128 }
129 }
130#endif
131
132 pr_debug("cpufreq: Set actual frequency %lukHz\n",
133 clk_get_rate(armclk) / 1000);
134
135 return 0;
136
137err_clk:
138 if (clk_set_rate(armclk, freqs.old * 1000) < 0)
139 pr_err("Failed to restore original clock rate\n");
140err:
141 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
142
143 return ret;
144}
145
146#ifdef CONFIG_REGULATOR
147static void __init s3c64xx_cpufreq_config_regulator(void)
148{
149 int count, v, i, found;
150 struct cpufreq_frequency_table *freq;
151 struct s3c64xx_dvfs *dvfs;
152
153 count = regulator_count_voltages(vddarm);
154 if (count < 0) {
155 pr_err("cpufreq: Unable to check supported voltages\n");
156 }
157
158 freq = s3c64xx_freq_table;
159 while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
160 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
161 continue;
162
163 dvfs = &s3c64xx_dvfs_table[freq->index];
164 found = 0;
165
166 for (i = 0; i < count; i++) {
167 v = regulator_list_voltage(vddarm, i);
168 if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
169 found = 1;
170 }
171
172 if (!found) {
173 pr_debug("cpufreq: %dkHz unsupported by regulator\n",
174 freq->frequency);
175 freq->frequency = CPUFREQ_ENTRY_INVALID;
176 }
177
178 freq++;
179 }
180
181 /* Guess based on having to do an I2C/SPI write; in future we
182 * will be able to query the regulator performance here. */
183 regulator_latency = 1 * 1000 * 1000;
184}
185#endif
186
187static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
188{
189 int ret;
190 struct cpufreq_frequency_table *freq;
191
192 if (policy->cpu != 0)
193 return -EINVAL;
194
195 if (s3c64xx_freq_table == NULL) {
196 pr_err("cpufreq: No frequency information for this CPU\n");
197 return -ENODEV;
198 }
199
200 armclk = clk_get(NULL, "armclk");
201 if (IS_ERR(armclk)) {
202 pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
203 PTR_ERR(armclk));
204 return PTR_ERR(armclk);
205 }
206
207#ifdef CONFIG_REGULATOR
208 vddarm = regulator_get(NULL, "vddarm");
209 if (IS_ERR(vddarm)) {
210 ret = PTR_ERR(vddarm);
211 pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
212 pr_err("cpufreq: Only frequency scaling available\n");
213 vddarm = NULL;
214 } else {
215 s3c64xx_cpufreq_config_regulator();
216 }
217#endif
218
219 freq = s3c64xx_freq_table;
220 while (freq->frequency != CPUFREQ_TABLE_END) {
221 unsigned long r;
222
223 /* Check for frequencies we can generate */
224 r = clk_round_rate(armclk, freq->frequency * 1000);
225 r /= 1000;
226 if (r != freq->frequency) {
227 pr_debug("cpufreq: %dkHz unsupported by clock\n",
228 freq->frequency);
229 freq->frequency = CPUFREQ_ENTRY_INVALID;
230 }
231
232 /* If we have no regulator then assume startup
233 * frequency is the maximum we can support. */
234 if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
235 freq->frequency = CPUFREQ_ENTRY_INVALID;
236
237 freq++;
238 }
239
240 policy->cur = clk_get_rate(armclk) / 1000;
241
242 /* Datasheet says PLL stabalisation time (if we were to use
243 * the PLLs, which we don't currently) is ~300us worst case,
244 * but add some fudge.
245 */
246 policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
247
248 ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
249 if (ret != 0) {
250 pr_err("cpufreq: Failed to configure frequency table: %d\n",
251 ret);
252 regulator_put(vddarm);
253 clk_put(armclk);
254 }
255
256 return ret;
257}
258
259static struct cpufreq_driver s3c64xx_cpufreq_driver = {
260 .owner = THIS_MODULE,
261 .flags = 0,
262 .verify = s3c64xx_cpufreq_verify_speed,
263 .target = s3c64xx_cpufreq_set_target,
264 .get = s3c64xx_cpufreq_get_speed,
265 .init = s3c64xx_cpufreq_driver_init,
266 .name = "s3c",
267};
268
269static int __init s3c64xx_cpufreq_init(void)
270{
271 return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
272}
273module_init(s3c64xx_cpufreq_init);
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
new file mode 100644
index 000000000000..a484aaea9809
--- /dev/null
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -0,0 +1,649 @@
1/*
2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * CPU frequency scaling for S5PC110/S5PV210
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/cpufreq.h>
19#include <linux/reboot.h>
20#include <linux/regulator/consumer.h>
21#include <linux/suspend.h>
22
23#include <mach/map.h>
24#include <mach/regs-clock.h>
25
26static struct clk *cpu_clk;
27static struct clk *dmc0_clk;
28static struct clk *dmc1_clk;
29static struct cpufreq_freqs freqs;
30static DEFINE_MUTEX(set_freq_lock);
31
32/* APLL M,P,S values for 1G/800Mhz */
33#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
34#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
35
36/* Use 800MHz when entering sleep mode */
37#define SLEEP_FREQ (800 * 1000)
38
39/*
40 * relation has an additional symantics other than the standard of cpufreq
41 * DISALBE_FURTHER_CPUFREQ: disable further access to target
42 * ENABLE_FURTUER_CPUFREQ: enable access to target
43 */
44enum cpufreq_access {
45 DISABLE_FURTHER_CPUFREQ = 0x10,
46 ENABLE_FURTHER_CPUFREQ = 0x20,
47};
48
49static bool no_cpufreq_access;
50
51/*
52 * DRAM configurations to calculate refresh counter for changing
53 * frequency of memory.
54 */
55struct dram_conf {
56 unsigned long freq; /* HZ */
57 unsigned long refresh; /* DRAM refresh counter * 1000 */
58};
59
60/* DRAM configuration (DMC0 and DMC1) */
61static struct dram_conf s5pv210_dram_conf[2];
62
63enum perf_level {
64 L0, L1, L2, L3, L4,
65};
66
67enum s5pv210_mem_type {
68 LPDDR = 0x1,
69 LPDDR2 = 0x2,
70 DDR2 = 0x4,
71};
72
73enum s5pv210_dmc_port {
74 DMC0 = 0,
75 DMC1,
76};
77
78static struct cpufreq_frequency_table s5pv210_freq_table[] = {
79 {L0, 1000*1000},
80 {L1, 800*1000},
81 {L2, 400*1000},
82 {L3, 200*1000},
83 {L4, 100*1000},
84 {0, CPUFREQ_TABLE_END},
85};
86
87static struct regulator *arm_regulator;
88static struct regulator *int_regulator;
89
90struct s5pv210_dvs_conf {
91 int arm_volt; /* uV */
92 int int_volt; /* uV */
93};
94
95static const int arm_volt_max = 1350000;
96static const int int_volt_max = 1250000;
97
98static struct s5pv210_dvs_conf dvs_conf[] = {
99 [L0] = {
100 .arm_volt = 1250000,
101 .int_volt = 1100000,
102 },
103 [L1] = {
104 .arm_volt = 1200000,
105 .int_volt = 1100000,
106 },
107 [L2] = {
108 .arm_volt = 1050000,
109 .int_volt = 1100000,
110 },
111 [L3] = {
112 .arm_volt = 950000,
113 .int_volt = 1100000,
114 },
115 [L4] = {
116 .arm_volt = 950000,
117 .int_volt = 1000000,
118 },
119};
120
121static u32 clkdiv_val[5][11] = {
122 /*
123 * Clock divider value for following
124 * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
125 * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
126 * ONEDRAM, MFC, G3D }
127 */
128
129 /* L0 : [1000/200/100][166/83][133/66][200/200] */
130 {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
131
132 /* L1 : [800/200/100][166/83][133/66][200/200] */
133 {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
134
135 /* L2 : [400/200/100][166/83][133/66][200/200] */
136 {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
137
138 /* L3 : [200/200/100][166/83][133/66][200/200] */
139 {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
140
141 /* L4 : [100/100/100][83/83][66/66][100/100] */
142 {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
143};
144
145/*
146 * This function set DRAM refresh counter
147 * accoriding to operating frequency of DRAM
148 * ch: DMC port number 0 or 1
149 * freq: Operating frequency of DRAM(KHz)
150 */
151static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
152{
153 unsigned long tmp, tmp1;
154 void __iomem *reg = NULL;
155
156 if (ch == DMC0) {
157 reg = (S5P_VA_DMC0 + 0x30);
158 } else if (ch == DMC1) {
159 reg = (S5P_VA_DMC1 + 0x30);
160 } else {
161 printk(KERN_ERR "Cannot find DMC port\n");
162 return;
163 }
164
165 /* Find current DRAM frequency */
166 tmp = s5pv210_dram_conf[ch].freq;
167
168 do_div(tmp, freq);
169
170 tmp1 = s5pv210_dram_conf[ch].refresh;
171
172 do_div(tmp1, tmp);
173
174 __raw_writel(tmp1, reg);
175}
176
177static int s5pv210_verify_speed(struct cpufreq_policy *policy)
178{
179 if (policy->cpu)
180 return -EINVAL;
181
182 return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
183}
184
185static unsigned int s5pv210_getspeed(unsigned int cpu)
186{
187 if (cpu)
188 return 0;
189
190 return clk_get_rate(cpu_clk) / 1000;
191}
192
193static int s5pv210_target(struct cpufreq_policy *policy,
194 unsigned int target_freq,
195 unsigned int relation)
196{
197 unsigned long reg;
198 unsigned int index, priv_index;
199 unsigned int pll_changing = 0;
200 unsigned int bus_speed_changing = 0;
201 int arm_volt, int_volt;
202 int ret = 0;
203
204 mutex_lock(&set_freq_lock);
205
206 if (relation & ENABLE_FURTHER_CPUFREQ)
207 no_cpufreq_access = false;
208
209 if (no_cpufreq_access) {
210#ifdef CONFIG_PM_VERBOSE
211 pr_err("%s:%d denied access to %s as it is disabled"
212 "temporarily\n", __FILE__, __LINE__, __func__);
213#endif
214 ret = -EINVAL;
215 goto exit;
216 }
217
218 if (relation & DISABLE_FURTHER_CPUFREQ)
219 no_cpufreq_access = true;
220
221 relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
222
223 freqs.old = s5pv210_getspeed(0);
224
225 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
226 target_freq, relation, &index)) {
227 ret = -EINVAL;
228 goto exit;
229 }
230
231 freqs.new = s5pv210_freq_table[index].frequency;
232 freqs.cpu = 0;
233
234 if (freqs.new == freqs.old)
235 goto exit;
236
237 /* Finding current running level index */
238 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
239 freqs.old, relation, &priv_index)) {
240 ret = -EINVAL;
241 goto exit;
242 }
243
244 arm_volt = dvs_conf[index].arm_volt;
245 int_volt = dvs_conf[index].int_volt;
246
247 if (freqs.new > freqs.old) {
248 ret = regulator_set_voltage(arm_regulator,
249 arm_volt, arm_volt_max);
250 if (ret)
251 goto exit;
252
253 ret = regulator_set_voltage(int_regulator,
254 int_volt, int_volt_max);
255 if (ret)
256 goto exit;
257 }
258
259 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
260
261 /* Check if there need to change PLL */
262 if ((index == L0) || (priv_index == L0))
263 pll_changing = 1;
264
265 /* Check if there need to change System bus clock */
266 if ((index == L4) || (priv_index == L4))
267 bus_speed_changing = 1;
268
269 if (bus_speed_changing) {
270 /*
271 * Reconfigure DRAM refresh counter value for minimum
272 * temporary clock while changing divider.
273 * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
274 */
275 if (pll_changing)
276 s5pv210_set_refresh(DMC1, 83000);
277 else
278 s5pv210_set_refresh(DMC1, 100000);
279
280 s5pv210_set_refresh(DMC0, 83000);
281 }
282
283 /*
284 * APLL should be changed in this level
285 * APLL -> MPLL(for stable transition) -> APLL
286 * Some clock source's clock API are not prepared.
287 * Do not use clock API in below code.
288 */
289 if (pll_changing) {
290 /*
291 * 1. Temporary Change divider for MFC and G3D
292 * SCLKA2M(200/1=200)->(200/4=50)Mhz
293 */
294 reg = __raw_readl(S5P_CLK_DIV2);
295 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
296 reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
297 (3 << S5P_CLKDIV2_MFC_SHIFT);
298 __raw_writel(reg, S5P_CLK_DIV2);
299
300 /* For MFC, G3D dividing */
301 do {
302 reg = __raw_readl(S5P_CLKDIV_STAT0);
303 } while (reg & ((1 << 16) | (1 << 17)));
304
305 /*
306 * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
307 * (200/4=50)->(667/4=166)Mhz
308 */
309 reg = __raw_readl(S5P_CLK_SRC2);
310 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
311 reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
312 (1 << S5P_CLKSRC2_MFC_SHIFT);
313 __raw_writel(reg, S5P_CLK_SRC2);
314
315 do {
316 reg = __raw_readl(S5P_CLKMUX_STAT1);
317 } while (reg & ((1 << 7) | (1 << 3)));
318
319 /*
320 * 3. DMC1 refresh count for 133Mhz if (index == L4) is
321 * true refresh counter is already programed in upper
322 * code. 0x287@83Mhz
323 */
324 if (!bus_speed_changing)
325 s5pv210_set_refresh(DMC1, 133000);
326
327 /* 4. SCLKAPLL -> SCLKMPLL */
328 reg = __raw_readl(S5P_CLK_SRC0);
329 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
330 reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
331 __raw_writel(reg, S5P_CLK_SRC0);
332
333 do {
334 reg = __raw_readl(S5P_CLKMUX_STAT0);
335 } while (reg & (0x1 << 18));
336
337 }
338
339 /* Change divider */
340 reg = __raw_readl(S5P_CLK_DIV0);
341
342 reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
343 S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
344 S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
345 S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
346
347 reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
348 (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
349 (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
350 (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
351 (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
352 (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
353 (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
354 (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
355
356 __raw_writel(reg, S5P_CLK_DIV0);
357
358 do {
359 reg = __raw_readl(S5P_CLKDIV_STAT0);
360 } while (reg & 0xff);
361
362 /* ARM MCS value changed */
363 reg = __raw_readl(S5P_ARM_MCS_CON);
364 reg &= ~0x3;
365 if (index >= L3)
366 reg |= 0x3;
367 else
368 reg |= 0x1;
369
370 __raw_writel(reg, S5P_ARM_MCS_CON);
371
372 if (pll_changing) {
373 /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
374 __raw_writel(0x2cf, S5P_APLL_LOCK);
375
376 /*
377 * 6. Turn on APLL
378 * 6-1. Set PMS values
379 * 6-2. Wait untile the PLL is locked
380 */
381 if (index == L0)
382 __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
383 else
384 __raw_writel(APLL_VAL_800, S5P_APLL_CON);
385
386 do {
387 reg = __raw_readl(S5P_APLL_CON);
388 } while (!(reg & (0x1 << 29)));
389
390 /*
391 * 7. Change souce clock from SCLKMPLL(667Mhz)
392 * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
393 * (667/4=166)->(200/4=50)Mhz
394 */
395 reg = __raw_readl(S5P_CLK_SRC2);
396 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
397 reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
398 (0 << S5P_CLKSRC2_MFC_SHIFT);
399 __raw_writel(reg, S5P_CLK_SRC2);
400
401 do {
402 reg = __raw_readl(S5P_CLKMUX_STAT1);
403 } while (reg & ((1 << 7) | (1 << 3)));
404
405 /*
406 * 8. Change divider for MFC and G3D
407 * (200/4=50)->(200/1=200)Mhz
408 */
409 reg = __raw_readl(S5P_CLK_DIV2);
410 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
411 reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
412 (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
413 __raw_writel(reg, S5P_CLK_DIV2);
414
415 /* For MFC, G3D dividing */
416 do {
417 reg = __raw_readl(S5P_CLKDIV_STAT0);
418 } while (reg & ((1 << 16) | (1 << 17)));
419
420 /* 9. Change MPLL to APLL in MSYS_MUX */
421 reg = __raw_readl(S5P_CLK_SRC0);
422 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
423 reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
424 __raw_writel(reg, S5P_CLK_SRC0);
425
426 do {
427 reg = __raw_readl(S5P_CLKMUX_STAT0);
428 } while (reg & (0x1 << 18));
429
430 /*
431 * 10. DMC1 refresh counter
432 * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
433 * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
434 */
435 if (!bus_speed_changing)
436 s5pv210_set_refresh(DMC1, 200000);
437 }
438
439 /*
440 * L4 level need to change memory bus speed, hence onedram clock divier
441 * and memory refresh parameter should be changed
442 */
443 if (bus_speed_changing) {
444 reg = __raw_readl(S5P_CLK_DIV6);
445 reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
446 reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
447 __raw_writel(reg, S5P_CLK_DIV6);
448
449 do {
450 reg = __raw_readl(S5P_CLKDIV_STAT1);
451 } while (reg & (1 << 15));
452
453 /* Reconfigure DRAM refresh counter value */
454 if (index != L4) {
455 /*
456 * DMC0 : 166Mhz
457 * DMC1 : 200Mhz
458 */
459 s5pv210_set_refresh(DMC0, 166000);
460 s5pv210_set_refresh(DMC1, 200000);
461 } else {
462 /*
463 * DMC0 : 83Mhz
464 * DMC1 : 100Mhz
465 */
466 s5pv210_set_refresh(DMC0, 83000);
467 s5pv210_set_refresh(DMC1, 100000);
468 }
469 }
470
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
472
473 if (freqs.new < freqs.old) {
474 regulator_set_voltage(int_regulator,
475 int_volt, int_volt_max);
476
477 regulator_set_voltage(arm_regulator,
478 arm_volt, arm_volt_max);
479 }
480
481 printk(KERN_DEBUG "Perf changed[L%d]\n", index);
482
483exit:
484 mutex_unlock(&set_freq_lock);
485 return ret;
486}
487
488#ifdef CONFIG_PM
489static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
490{
491 return 0;
492}
493
494static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
495{
496 return 0;
497}
498#endif
499
500static int check_mem_type(void __iomem *dmc_reg)
501{
502 unsigned long val;
503
504 val = __raw_readl(dmc_reg + 0x4);
505 val = (val & (0xf << 8));
506
507 return val >> 8;
508}
509
510static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
511{
512 unsigned long mem_type;
513 int ret;
514
515 cpu_clk = clk_get(NULL, "armclk");
516 if (IS_ERR(cpu_clk))
517 return PTR_ERR(cpu_clk);
518
519 dmc0_clk = clk_get(NULL, "sclk_dmc0");
520 if (IS_ERR(dmc0_clk)) {
521 ret = PTR_ERR(dmc0_clk);
522 goto out_dmc0;
523 }
524
525 dmc1_clk = clk_get(NULL, "hclk_msys");
526 if (IS_ERR(dmc1_clk)) {
527 ret = PTR_ERR(dmc1_clk);
528 goto out_dmc1;
529 }
530
531 if (policy->cpu != 0) {
532 ret = -EINVAL;
533 goto out_dmc1;
534 }
535
536 /*
537 * check_mem_type : This driver only support LPDDR & LPDDR2.
538 * other memory type is not supported.
539 */
540 mem_type = check_mem_type(S5P_VA_DMC0);
541
542 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
543 printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
544 ret = -EINVAL;
545 goto out_dmc1;
546 }
547
548 /* Find current refresh counter and frequency each DMC */
549 s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
550 s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
551
552 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
553 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
554
555 policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
556
557 cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
558
559 policy->cpuinfo.transition_latency = 40000;
560
561 return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
562
563out_dmc1:
564 clk_put(dmc0_clk);
565out_dmc0:
566 clk_put(cpu_clk);
567 return ret;
568}
569
570static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
571 unsigned long event, void *ptr)
572{
573 int ret;
574
575 switch (event) {
576 case PM_SUSPEND_PREPARE:
577 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
578 DISABLE_FURTHER_CPUFREQ);
579 if (ret < 0)
580 return NOTIFY_BAD;
581
582 return NOTIFY_OK;
583 case PM_POST_RESTORE:
584 case PM_POST_SUSPEND:
585 cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
586 ENABLE_FURTHER_CPUFREQ);
587
588 return NOTIFY_OK;
589 }
590
591 return NOTIFY_DONE;
592}
593
594static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
595 unsigned long event, void *ptr)
596{
597 int ret;
598
599 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
600 DISABLE_FURTHER_CPUFREQ);
601 if (ret < 0)
602 return NOTIFY_BAD;
603
604 return NOTIFY_DONE;
605}
606
607static struct cpufreq_driver s5pv210_driver = {
608 .flags = CPUFREQ_STICKY,
609 .verify = s5pv210_verify_speed,
610 .target = s5pv210_target,
611 .get = s5pv210_getspeed,
612 .init = s5pv210_cpu_init,
613 .name = "s5pv210",
614#ifdef CONFIG_PM
615 .suspend = s5pv210_cpufreq_suspend,
616 .resume = s5pv210_cpufreq_resume,
617#endif
618};
619
620static struct notifier_block s5pv210_cpufreq_notifier = {
621 .notifier_call = s5pv210_cpufreq_notifier_event,
622};
623
624static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
625 .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
626};
627
628static int __init s5pv210_cpufreq_init(void)
629{
630 arm_regulator = regulator_get(NULL, "vddarm");
631 if (IS_ERR(arm_regulator)) {
632 pr_err("failed to get regulator vddarm");
633 return PTR_ERR(arm_regulator);
634 }
635
636 int_regulator = regulator_get(NULL, "vddint");
637 if (IS_ERR(int_regulator)) {
638 pr_err("failed to get regulator vddint");
639 regulator_put(arm_regulator);
640 return PTR_ERR(int_regulator);
641 }
642
643 register_pm_notifier(&s5pv210_cpufreq_notifier);
644 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
645
646 return cpufreq_register_driver(&s5pv210_driver);
647}
648
649late_initcall(s5pv210_cpufreq_init);