aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-04-27 20:10:46 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-04-27 20:10:46 -0400
commit885f925eef411f549f17bc64dd054a3269cf66cd (patch)
tree6bac783d573a51e497ad28c19b5a71defac85f39 /drivers/cpufreq
parente4f5a3adc454745fea35f1c312e14cbeba6e0ea4 (diff)
parent45c009a9a447655aecbdb06c86126f05d0272171 (diff)
Merge branch 'pm-cpufreq'
* pm-cpufreq: (57 commits) cpufreq: MAINTAINERS: Add co-maintainer cpufreq: pxa2xx: initialize variables ARM: S5pv210: compiling issue, ARM_S5PV210_CPUFREQ needs CONFIG_CPU_FREQ_TABLE=y cpufreq: cpu0: Put cpu parent node after using it cpufreq: ARM big LITTLE: Adapt to latest cpufreq updates cpufreq: ARM big LITTLE: put DT nodes after using them cpufreq: Don't call __cpufreq_governor() for drivers without target() cpufreq: exynos5440: Protect OPP search calls with RCU lock cpufreq: dbx500: Round to closest available freq cpufreq: Call __cpufreq_governor() with correct policy->cpus mask cpufreq / intel_pstate: Optimize intel_pstate_set_policy cpufreq: OMAP: instantiate omap-cpufreq as a platform_driver arm: exynos: Enable OPP library support for exynos5440 cpufreq: exynos: Remove error return even if no soc is found cpufreq: exynos: Add cpufreq driver for exynos5440 cpufreq: AMD "frequency sensitivity feedback" powersave bias for ondemand governor cpufreq: ondemand: allow custom powersave_bias_target handler to be registered cpufreq: convert cpufreq_driver to using RCU cpufreq: powerpc/platforms/cell: move cpufreq driver to drivers/cpufreq cpufreq: sparc: move cpufreq driver to drivers/cpufreq ... Conflicts: MAINTAINERS (with commit a8e39c3 from pm-cpuidle) drivers/cpufreq/cpufreq_governor.h (with commit beb0ff3)
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig89
-rw-r--r--drivers/cpufreq/Kconfig.arm148
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Kconfig.x8617
-rw-r--r--drivers/cpufreq/Makefile41
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c11
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c148
-rw-r--r--drivers/cpufreq/arm_big_little.c278
-rw-r--r--drivers/cpufreq/arm_big_little.h40
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c107
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c123
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c247
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c32
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c11
-rw-r--r--drivers/cpufreq/cpufreq.c418
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c244
-rw-r--r--drivers/cpufreq/cpufreq_governor.c291
-rw-r--r--drivers/cpufreq/cpufreq_governor.h128
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c363
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c146
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c142
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c231
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c22
-rw-r--r--drivers/cpufreq/e_powersaver.c11
-rw-r--r--drivers/cpufreq/elanfreq.c10
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c9
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c481
-rw-r--r--drivers/cpufreq/gx-suspmod.c11
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c438
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c12
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c220
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c18
-rw-r--r--drivers/cpufreq/longhaul.c18
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c248
-rw-r--r--drivers/cpufreq/maple-cpufreq.c5
-rw-r--r--drivers/cpufreq/omap-cpufreq.c34
-rw-r--r--drivers/cpufreq/p4-clockmod.c13
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c5
-rw-r--r--drivers/cpufreq/powernow-k6.c12
-rw-r--r--drivers/cpufreq/powernow-k7.c10
-rw-r--r--drivers/cpufreq/powernow-k8.c19
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c209
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h24
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c115
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c156
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c492
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c254
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c5
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c247
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c406
-rw-r--r--drivers/cpufreq/sc520_freq.c10
-rw-r--r--drivers/cpufreq/sh-cpufreq.c189
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c408
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c269
-rw-r--r--drivers/cpufreq/spear-cpufreq.c7
-rw-r--r--drivers/cpufreq/speedstep-centrino.c28
-rw-r--r--drivers/cpufreq/speedstep-ich.c12
-rw-r--r--drivers/cpufreq/speedstep-smi.c5
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c292
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c92
63 files changed, 7366 insertions, 756 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index cbcb21e32771..a1488f58f6ca 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -205,10 +205,99 @@ depends on ARM
205source "drivers/cpufreq/Kconfig.arm" 205source "drivers/cpufreq/Kconfig.arm"
206endmenu 206endmenu
207 207
208menu "AVR32 CPU frequency scaling drivers"
209depends on AVR32
210
211config AVR32_AT32AP_CPUFREQ
212 bool "CPU frequency driver for AT32AP"
213 depends on PLATFORM_AT32AP
214 default n
215 help
216 This enables the CPU frequency driver for AT32AP processors.
217 If in doubt, say N.
218
219endmenu
220
221menu "CPUFreq processor drivers"
222depends on IA64
223
224config IA64_ACPI_CPUFREQ
225 tristate "ACPI Processor P-States driver"
226 select CPU_FREQ_TABLE
227 depends on ACPI_PROCESSOR
228 help
229 This driver adds a CPUFreq driver which utilizes the ACPI
230 Processor Performance States.
231
232 For details, take a look at <file:Documentation/cpu-freq/>.
233
234 If in doubt, say N.
235
236endmenu
237
238menu "MIPS CPUFreq processor drivers"
239depends on MIPS
240
241config LOONGSON2_CPUFREQ
242 tristate "Loongson2 CPUFreq Driver"
243 select CPU_FREQ_TABLE
244 help
245 This option adds a CPUFreq driver for loongson processors which
246 support software configurable cpu frequency.
247
248 Loongson2F and it's successors support this feature.
249
250 For details, take a look at <file:Documentation/cpu-freq/>.
251
252 If in doubt, say N.
253
254endmenu
255
208menu "PowerPC CPU frequency scaling drivers" 256menu "PowerPC CPU frequency scaling drivers"
209depends on PPC32 || PPC64 257depends on PPC32 || PPC64
210source "drivers/cpufreq/Kconfig.powerpc" 258source "drivers/cpufreq/Kconfig.powerpc"
211endmenu 259endmenu
212 260
261menu "SPARC CPU frequency scaling drivers"
262depends on SPARC64
263config SPARC_US3_CPUFREQ
264 tristate "UltraSPARC-III CPU Frequency driver"
265 select CPU_FREQ_TABLE
266 help
267 This adds the CPUFreq driver for UltraSPARC-III processors.
268
269 For details, take a look at <file:Documentation/cpu-freq>.
270
271 If in doubt, say N.
272
273config SPARC_US2E_CPUFREQ
274 tristate "UltraSPARC-IIe CPU Frequency driver"
275 select CPU_FREQ_TABLE
276 help
277 This adds the CPUFreq driver for UltraSPARC-IIe processors.
278
279 For details, take a look at <file:Documentation/cpu-freq>.
280
281 If in doubt, say N.
282endmenu
283
284menu "SH CPU Frequency scaling"
285depends on SUPERH
286config SH_CPU_FREQ
287 tristate "SuperH CPU Frequency driver"
288 select CPU_FREQ_TABLE
289 help
290 This adds the cpufreq driver for SuperH. Any CPU that supports
291 clock rate rounding through the clock framework can use this
292 driver. While it will make the kernel slightly larger, this is
293 harmless for CPUs that don't support rate rounding. The driver
294 will also generate a notice in the boot log before disabling
295 itself if the CPU in question is not capable of rate rounding.
296
297 For details, take a look at <file:Documentation/cpu-freq>.
298
299 If unsure, say N.
300endmenu
301
213endif 302endif
214endmenu 303endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 030ddf6dd3f1..f3af18b9acc5 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,93 @@
2# ARM CPU Frequency scaling drivers 2# ARM CPU Frequency scaling drivers
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ
6 tristate
7 depends on ARM_CPU_TOPOLOGY
8
9config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
11 select ARM_BIG_LITTLE_CPUFREQ
12 depends on OF && HAVE_CLK
13 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
15 This gets frequency tables from DT.
16
17config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs"
19 depends on ARCH_EXYNOS
20 default y
21 help
22 This adds the CPUFreq driver common part for Samsung
23 EXYNOS SoCs.
24
25 If in doubt, say N.
26
27config ARM_EXYNOS4210_CPUFREQ
28 def_bool CPU_EXYNOS4210
29 help
30 This adds the CPUFreq driver for Samsung EXYNOS4210
31 SoC (S5PV310 or S5PC210).
32
33config ARM_EXYNOS4X12_CPUFREQ
34 def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
35 help
36 This adds the CPUFreq driver for Samsung EXYNOS4X12
37 SoC (EXYNOS4212 or EXYNOS4412).
38
39config ARM_EXYNOS5250_CPUFREQ
40 def_bool SOC_EXYNOS5250
41 help
42 This adds the CPUFreq driver for Samsung EXYNOS5250
43 SoC.
44
45config ARM_EXYNOS5440_CPUFREQ
46 def_bool SOC_EXYNOS5440
47 depends on HAVE_CLK && PM_OPP && OF
48 help
49 This adds the CPUFreq driver for Samsung EXYNOS5440
50 SoC. The nature of exynos5440 clock controller is
51 different than previous exynos controllers so not using
52 the common exynos framework.
53
54config ARM_HIGHBANK_CPUFREQ
55 tristate "Calxeda Highbank-based"
56 depends on ARCH_HIGHBANK
57 select CPU_FREQ_TABLE
58 select GENERIC_CPUFREQ_CPU0
59 select PM_OPP
60 select REGULATOR
61
62 default m
63 help
64 This adds the CPUFreq driver for Calxeda Highbank SoC
65 based boards.
66
67 If in doubt, say N.
68
69config ARM_IMX6Q_CPUFREQ
70 tristate "Freescale i.MX6Q cpufreq support"
71 depends on SOC_IMX6Q
72 depends on REGULATOR_ANATOP
73 help
74 This adds cpufreq driver support for Freescale i.MX6Q SOC.
75
76 If in doubt, say N.
77
78config ARM_INTEGRATOR
79 tristate "CPUfreq driver for ARM Integrator CPUs"
80 depends on ARCH_INTEGRATOR
81 default y
82 help
83 This enables the CPUfreq driver for ARM Integrator CPUs.
84 If in doubt, say Y.
85
86config ARM_KIRKWOOD_CPUFREQ
87 def_bool ARCH_KIRKWOOD && OF
88 help
89 This adds the CPUFreq driver for Marvell Kirkwood
90 SoCs.
91
5config ARM_OMAP2PLUS_CPUFREQ 92config ARM_OMAP2PLUS_CPUFREQ
6 bool "TI OMAP2+" 93 bool "TI OMAP2+"
7 depends on ARCH_OMAP2PLUS 94 depends on ARCH_OMAP2PLUS
@@ -42,6 +129,7 @@ config ARM_S3C64XX_CPUFREQ
42config ARM_S5PV210_CPUFREQ 129config ARM_S5PV210_CPUFREQ
43 bool "Samsung S5PV210 and S5PC110" 130 bool "Samsung S5PV210 and S5PC110"
44 depends on CPU_S5PV210 131 depends on CPU_S5PV210
132 select CPU_FREQ_TABLE
45 default y 133 default y
46 help 134 help
47 This adds the CPUFreq driver for Samsung S5PV210 and 135 This adds the CPUFreq driver for Samsung S5PV210 and
@@ -49,48 +137,11 @@ config ARM_S5PV210_CPUFREQ
49 137
50 If in doubt, say N. 138 If in doubt, say N.
51 139
52config ARM_EXYNOS_CPUFREQ 140config ARM_SA1100_CPUFREQ
53 bool "SAMSUNG EXYNOS SoCs" 141 bool
54 depends on ARCH_EXYNOS
55 default y
56 help
57 This adds the CPUFreq driver common part for Samsung
58 EXYNOS SoCs.
59
60 If in doubt, say N.
61 142
62config ARM_EXYNOS4210_CPUFREQ 143config ARM_SA1110_CPUFREQ
63 def_bool CPU_EXYNOS4210 144 bool
64 help
65 This adds the CPUFreq driver for Samsung EXYNOS4210
66 SoC (S5PV310 or S5PC210).
67
68config ARM_EXYNOS4X12_CPUFREQ
69 def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
70 help
71 This adds the CPUFreq driver for Samsung EXYNOS4X12
72 SoC (EXYNOS4212 or EXYNOS4412).
73
74config ARM_EXYNOS5250_CPUFREQ
75 def_bool SOC_EXYNOS5250
76 help
77 This adds the CPUFreq driver for Samsung EXYNOS5250
78 SoC.
79
80config ARM_KIRKWOOD_CPUFREQ
81 def_bool ARCH_KIRKWOOD && OF
82 help
83 This adds the CPUFreq driver for Marvell Kirkwood
84 SoCs.
85
86config ARM_IMX6Q_CPUFREQ
87 tristate "Freescale i.MX6Q cpufreq support"
88 depends on SOC_IMX6Q
89 depends on REGULATOR_ANATOP
90 help
91 This adds cpufreq driver support for Freescale i.MX6Q SOC.
92
93 If in doubt, say N.
94 145
95config ARM_SPEAR_CPUFREQ 146config ARM_SPEAR_CPUFREQ
96 bool "SPEAr CPUFreq support" 147 bool "SPEAr CPUFreq support"
@@ -98,18 +149,3 @@ config ARM_SPEAR_CPUFREQ
98 default y 149 default y
99 help 150 help
100 This adds the CPUFreq driver support for SPEAr SOCs. 151 This adds the CPUFreq driver support for SPEAr SOCs.
101
102config ARM_HIGHBANK_CPUFREQ
103 tristate "Calxeda Highbank-based"
104 depends on ARCH_HIGHBANK
105 select CPU_FREQ_TABLE
106 select GENERIC_CPUFREQ_CPU0
107 select PM_OPP
108 select REGULATOR
109
110 default m
111 help
112 This adds the CPUFreq driver for Calxeda Highbank SoC
113 based boards.
114
115 If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index e76992f79683..9c926ca0d718 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,3 +1,21 @@
1config CPU_FREQ_CBE
2 tristate "CBE frequency scaling"
3 depends on CBE_RAS && PPC_CELL
4 default m
5 help
6 This adds the cpufreq driver for Cell BE processors.
7 For details, take a look at <file:Documentation/cpu-freq/>.
8 If you don't have such processor, say N
9
10config CPU_FREQ_CBE_PMI
11 bool "CBE frequency scaling using PMI interface"
12 depends on CPU_FREQ_CBE
13 default n
14 help
15 Select this, if you want to use the PMI interface to switch
16 frequencies. Using PMI, the processor will not only be able to run at
17 lower speed, but also at lower core voltage.
18
1config CPU_FREQ_MAPLE 19config CPU_FREQ_MAPLE
2 bool "Support for Maple 970FX Evaluation Board" 20 bool "Support for Maple 970FX Evaluation Board"
3 depends on PPC_MAPLE 21 depends on PPC_MAPLE
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index d7dc0ed6adb0..2b8a8c374548 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -129,6 +129,23 @@ config X86_POWERNOW_K8
129 129
130 For details, take a look at <file:Documentation/cpu-freq/>. 130 For details, take a look at <file:Documentation/cpu-freq/>.
131 131
132config X86_AMD_FREQ_SENSITIVITY
133 tristate "AMD frequency sensitivity feedback powersave bias"
134 depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
135 help
136 This adds AMD-specific powersave bias function to the ondemand
137 governor, which allows it to make more power-conscious frequency
138 change decisions based on feedback from hardware (availble on AMD
139 Family 16h and above).
140
141 Hardware feedback tells software how "sensitive" to frequency changes
142 the CPUs' workloads are. CPU-bound workloads will be more sensitive
143 -- they will perform better as frequency increases. Memory/IO-bound
144 workloads will be less sensitive -- they will not necessarily perform
145 better as frequency increases.
146
147 If in doubt, say N.
148
132config X86_GX_SUSPMOD 149config X86_GX_SUSPMOD
133 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 150 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
134 depends on X86_32 && PCI 151 depends on X86_32 && PCI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 863fd1865d45..315b9231feb1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,23 +41,54 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
41obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 41obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
42obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 42obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
43obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o 43obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
44obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
44 45
45################################################################################## 46##################################################################################
46# ARM SoC drivers 47# ARM SoC drivers
48obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
49# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
50# LITTLE drivers, so that it is probed last.
51obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
52
53obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
47obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 54obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
48obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
49obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
50obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
51obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
52obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 56obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
53obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o 57obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
54obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o 58obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
59obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
60obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
61obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
62obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
55obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o 63obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
56obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 64obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
65obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
66obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
67obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
68obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
69obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
70obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
71obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
72obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
57obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o 73obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 74obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o
59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
60 75
61################################################################################## 76##################################################################################
62# PowerPC platform drivers 77# PowerPC platform drivers
78obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
79ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
80obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
63obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o 81obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
82
83##################################################################################
84# Other platform drivers
85obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
86obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o
87obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
88obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
89obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
90obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
91obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
92obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
93obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
94obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 57a8774f0b4e..11b8b4b54ceb 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -423,7 +423,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
423 struct drv_cmd cmd; 423 struct drv_cmd cmd;
424 unsigned int next_state = 0; /* Index into freq_table */ 424 unsigned int next_state = 0; /* Index into freq_table */
425 unsigned int next_perf_state = 0; /* Index into perf table */ 425 unsigned int next_perf_state = 0; /* Index into perf table */
426 unsigned int i;
427 int result = 0; 426 int result = 0;
428 427
429 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); 428 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
@@ -486,10 +485,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
486 485
487 freqs.old = perf->states[perf->state].core_frequency * 1000; 486 freqs.old = perf->states[perf->state].core_frequency * 1000;
488 freqs.new = data->freq_table[next_state].frequency; 487 freqs.new = data->freq_table[next_state].frequency;
489 for_each_cpu(i, policy->cpus) { 488 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
490 freqs.cpu = i;
491 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
492 }
493 489
494 drv_write(&cmd); 490 drv_write(&cmd);
495 491
@@ -502,10 +498,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
502 } 498 }
503 } 499 }
504 500
505 for_each_cpu(i, policy->cpus) { 501 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
506 freqs.cpu = i;
507 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
508 }
509 perf->state = next_perf_state; 502 perf->state = next_perf_state;
510 503
511out: 504out:
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 000000000000..f6b79ab0070b
--- /dev/null
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,148 @@
1/*
2 * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
3 * for the ondemand governor.
4 *
5 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 *
7 * Author: Jacob Shin <jacob.shin@amd.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/percpu-defs.h>
18#include <linux/init.h>
19#include <linux/mod_devicetable.h>
20
21#include <asm/msr.h>
22#include <asm/cpufeature.h>
23
24#include "cpufreq_governor.h"
25
26#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
27#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
28#define CLASS_CODE_SHIFT 56
29#define POWERSAVE_BIAS_MAX 1000
30#define POWERSAVE_BIAS_DEF 400
31
32struct cpu_data_t {
33 u64 actual;
34 u64 reference;
35 unsigned int freq_prev;
36};
37
38static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
39
40static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
41 unsigned int freq_next,
42 unsigned int relation)
43{
44 int sensitivity;
45 long d_actual, d_reference;
46 struct msr actual, reference;
47 struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
48 struct dbs_data *od_data = policy->governor_data;
49 struct od_dbs_tuners *od_tuners = od_data->tuners;
50 struct od_cpu_dbs_info_s *od_info =
51 od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
52
53 if (!od_info->freq_table)
54 return freq_next;
55
56 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
57 &actual.l, &actual.h);
58 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
59 &reference.l, &reference.h);
60 actual.h &= 0x00ffffff;
61 reference.h &= 0x00ffffff;
62
63 /* counter wrapped around, so stay on current frequency */
64 if (actual.q < data->actual || reference.q < data->reference) {
65 freq_next = policy->cur;
66 goto out;
67 }
68
69 d_actual = actual.q - data->actual;
70 d_reference = reference.q - data->reference;
71
72 /* divide by 0, so stay on current frequency as well */
73 if (d_reference == 0) {
74 freq_next = policy->cur;
75 goto out;
76 }
77
78 sensitivity = POWERSAVE_BIAS_MAX -
79 (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
80
81 clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
82
83 /* this workload is not CPU bound, so choose a lower freq */
84 if (sensitivity < od_tuners->powersave_bias) {
85 if (data->freq_prev == policy->cur)
86 freq_next = policy->cur;
87
88 if (freq_next > policy->cur)
89 freq_next = policy->cur;
90 else if (freq_next < policy->cur)
91 freq_next = policy->min;
92 else {
93 unsigned int index;
94
95 cpufreq_frequency_table_target(policy,
96 od_info->freq_table, policy->cur - 1,
97 CPUFREQ_RELATION_H, &index);
98 freq_next = od_info->freq_table[index].frequency;
99 }
100
101 data->freq_prev = freq_next;
102 } else
103 data->freq_prev = 0;
104
105out:
106 data->actual = actual.q;
107 data->reference = reference.q;
108 return freq_next;
109}
110
111static int __init amd_freq_sensitivity_init(void)
112{
113 u64 val;
114
115 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
116 return -ENODEV;
117
118 if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
119 return -ENODEV;
120
121 if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
122 return -ENODEV;
123
124 if (!(val >> CLASS_CODE_SHIFT))
125 return -ENODEV;
126
127 od_register_powersave_bias_handler(amd_powersave_bias_target,
128 POWERSAVE_BIAS_DEF);
129 return 0;
130}
131late_initcall(amd_freq_sensitivity_init);
132
133static void __exit amd_freq_sensitivity_exit(void)
134{
135 od_unregister_powersave_bias_handler();
136}
137module_exit(amd_freq_sensitivity_exit);
138
139static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
140 X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
141 {}
142};
143MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
144
145MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
146MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
147 "the ondemand governor.");
148MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 000000000000..dbdf677d2f36
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,278 @@
1/*
2 * ARM big.LITTLE Platforms CPUFreq support
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/clk.h>
23#include <linux/cpu.h>
24#include <linux/cpufreq.h>
25#include <linux/cpumask.h>
26#include <linux/export.h>
27#include <linux/of_platform.h>
28#include <linux/opp.h>
29#include <linux/slab.h>
30#include <linux/topology.h>
31#include <linux/types.h>
32
33#include "arm_big_little.h"
34
35/* Currently we support only two clusters */
36#define MAX_CLUSTERS 2
37
38static struct cpufreq_arm_bL_ops *arm_bL_ops;
39static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu)
49{
50 u32 cur_cluster = cpu_to_cluster(cpu);
51
52 return clk_get_rate(clk[cur_cluster]) / 1000;
53}
54
55/* Validate policy frequency range */
56static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
57{
58 u32 cur_cluster = cpu_to_cluster(policy->cpu);
59
60 return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
61}
62
63/* Set clock frequency */
64static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
65 unsigned int target_freq, unsigned int relation)
66{
67 struct cpufreq_freqs freqs;
68 u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
69 int ret = 0;
70
71 cur_cluster = cpu_to_cluster(policy->cpu);
72
73 freqs.old = bL_cpufreq_get(policy->cpu);
74
75 /* Determine valid target frequency using freq_table */
76 cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
77 target_freq, relation, &freq_tab_idx);
78 freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
79
80 pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
81 __func__, cpu, cur_cluster, freqs.old, target_freq,
82 freqs.new);
83
84 if (freqs.old == freqs.new)
85 return 0;
86
87 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
88
89 ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
90 if (ret) {
91 pr_err("clk_set_rate failed: %d\n", ret);
92 return ret;
93 }
94
95 policy->cur = freqs.new;
96
97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
98
99 return ret;
100}
101
102static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
103{
104 u32 cluster = cpu_to_cluster(cpu_dev->id);
105
106 if (!atomic_dec_return(&cluster_usage[cluster])) {
107 clk_put(clk[cluster]);
108 opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
109 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
110 }
111}
112
113static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
114{
115 u32 cluster = cpu_to_cluster(cpu_dev->id);
116 char name[14] = "cpu-cluster.";
117 int ret;
118
119 if (atomic_inc_return(&cluster_usage[cluster]) != 1)
120 return 0;
121
122 ret = arm_bL_ops->init_opp_table(cpu_dev);
123 if (ret) {
124 dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
125 __func__, cpu_dev->id, ret);
126 goto atomic_dec;
127 }
128
129 ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
130 if (ret) {
131 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
132 __func__, cpu_dev->id, ret);
133 goto atomic_dec;
134 }
135
136 name[12] = cluster + '0';
137 clk[cluster] = clk_get_sys(name, NULL);
138 if (!IS_ERR(clk[cluster])) {
139 dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
140 __func__, clk[cluster], freq_table[cluster],
141 cluster);
142 return 0;
143 }
144
145 dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
146 __func__, cpu_dev->id, cluster);
147 ret = PTR_ERR(clk[cluster]);
148 opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
149
150atomic_dec:
151 atomic_dec(&cluster_usage[cluster]);
152 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
153 cluster);
154 return ret;
155}
156
157/* Per-CPU initialization */
158static int bL_cpufreq_init(struct cpufreq_policy *policy)
159{
160 u32 cur_cluster = cpu_to_cluster(policy->cpu);
161 struct device *cpu_dev;
162 int ret;
163
164 cpu_dev = get_cpu_device(policy->cpu);
165 if (!cpu_dev) {
166 pr_err("%s: failed to get cpu%d device\n", __func__,
167 policy->cpu);
168 return -ENODEV;
169 }
170
171 ret = get_cluster_clk_and_freq_table(cpu_dev);
172 if (ret)
173 return ret;
174
175 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
176 if (ret) {
177 dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
178 policy->cpu, cur_cluster);
179 put_cluster_clk_and_freq_table(cpu_dev);
180 return ret;
181 }
182
183 cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
184
185 if (arm_bL_ops->get_transition_latency)
186 policy->cpuinfo.transition_latency =
187 arm_bL_ops->get_transition_latency(cpu_dev);
188 else
189 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
190
191 policy->cur = bL_cpufreq_get(policy->cpu);
192
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
196 return 0;
197}
198
199static int bL_cpufreq_exit(struct cpufreq_policy *policy)
200{
201 struct device *cpu_dev;
202
203 cpu_dev = get_cpu_device(policy->cpu);
204 if (!cpu_dev) {
205 pr_err("%s: failed to get cpu%d device\n", __func__,
206 policy->cpu);
207 return -ENODEV;
208 }
209
210 put_cluster_clk_and_freq_table(cpu_dev);
211 dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
212
213 return 0;
214}
215
216/* Export freq_table to sysfs */
217static struct freq_attr *bL_cpufreq_attr[] = {
218 &cpufreq_freq_attr_scaling_available_freqs,
219 NULL,
220};
221
222static struct cpufreq_driver bL_cpufreq_driver = {
223 .name = "arm-big-little",
224 .flags = CPUFREQ_STICKY,
225 .verify = bL_cpufreq_verify_policy,
226 .target = bL_cpufreq_set_target,
227 .get = bL_cpufreq_get,
228 .init = bL_cpufreq_init,
229 .exit = bL_cpufreq_exit,
230 .have_governor_per_policy = true,
231 .attr = bL_cpufreq_attr,
232};
233
234int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
235{
236 int ret;
237
238 if (arm_bL_ops) {
239 pr_debug("%s: Already registered: %s, exiting\n", __func__,
240 arm_bL_ops->name);
241 return -EBUSY;
242 }
243
244 if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
245 pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
246 return -ENODEV;
247 }
248
249 arm_bL_ops = ops;
250
251 ret = cpufreq_register_driver(&bL_cpufreq_driver);
252 if (ret) {
253 pr_info("%s: Failed registering platform driver: %s, err: %d\n",
254 __func__, ops->name, ret);
255 arm_bL_ops = NULL;
256 } else {
257 pr_info("%s: Registered platform driver: %s\n", __func__,
258 ops->name);
259 }
260
261 return ret;
262}
263EXPORT_SYMBOL_GPL(bL_cpufreq_register);
264
265void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
266{
267 if (arm_bL_ops != ops) {
268 pr_err("%s: Registered with: %s, can't unregister, exiting\n",
269 __func__, arm_bL_ops->name);
270 return;
271 }
272
273 cpufreq_unregister_driver(&bL_cpufreq_driver);
274 pr_info("%s: Un-registered platform driver: %s\n", __func__,
275 arm_bL_ops->name);
276 arm_bL_ops = NULL;
277}
278EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 000000000000..70f18fc12d4a
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,40 @@
1/*
2 * ARM big.LITTLE platform's CPUFreq header file
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19#ifndef CPUFREQ_ARM_BIG_LITTLE_H
20#define CPUFREQ_ARM_BIG_LITTLE_H
21
22#include <linux/cpufreq.h>
23#include <linux/device.h>
24#include <linux/types.h>
25
26struct cpufreq_arm_bL_ops {
27 char name[CPUFREQ_NAME_LEN];
28 int (*get_transition_latency)(struct device *cpu_dev);
29
30 /*
31 * This must set opp table for cpu_dev in a similar way as done by
32 * of_init_opp_table().
33 */
34 int (*init_opp_table)(struct device *cpu_dev);
35};
36
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39
40#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
new file mode 100644
index 000000000000..44be3115375c
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -0,0 +1,107 @@
1/*
2 * Generic big.LITTLE CPUFreq Interface driver
3 *
4 * It provides necessary ops to arm_big_little cpufreq driver and gets
5 * Frequency information from Device Tree. Freq table in DT must be in KHz.
6 *
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpufreq.h>
23#include <linux/device.h>
24#include <linux/export.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/opp.h>
28#include <linux/slab.h>
29#include <linux/types.h>
30#include "arm_big_little.h"
31
32static int dt_init_opp_table(struct device *cpu_dev)
33{
34 struct device_node *np, *parent;
35 int count = 0, ret;
36
37 parent = of_find_node_by_path("/cpus");
38 if (!parent) {
39 pr_err("failed to find OF /cpus\n");
40 return -ENOENT;
41 }
42
43 for_each_child_of_node(parent, np) {
44 if (count++ != cpu_dev->id)
45 continue;
46 if (!of_get_property(np, "operating-points", NULL)) {
47 ret = -ENODATA;
48 } else {
49 cpu_dev->of_node = np;
50 ret = of_init_opp_table(cpu_dev);
51 }
52 of_node_put(np);
53 of_node_put(parent);
54
55 return ret;
56 }
57
58 return -ENODEV;
59}
60
61static int dt_get_transition_latency(struct device *cpu_dev)
62{
63 struct device_node *np, *parent;
64 u32 transition_latency = CPUFREQ_ETERNAL;
65 int count = 0;
66
67 parent = of_find_node_by_path("/cpus");
68 if (!parent) {
69 pr_err("failed to find OF /cpus\n");
70 return -ENOENT;
71 }
72
73 for_each_child_of_node(parent, np) {
74 if (count++ != cpu_dev->id)
75 continue;
76
77 of_property_read_u32(np, "clock-latency", &transition_latency);
78 of_node_put(np);
79 of_node_put(parent);
80
81 return 0;
82 }
83
84 return -ENODEV;
85}
86
87static struct cpufreq_arm_bL_ops dt_bL_ops = {
88 .name = "dt-bl",
89 .get_transition_latency = dt_get_transition_latency,
90 .init_opp_table = dt_init_opp_table,
91};
92
93static int generic_bL_init(void)
94{
95 return bL_cpufreq_register(&dt_bL_ops);
96}
97module_init(generic_bL_init);
98
99static void generic_bL_exit(void)
100{
101 return bL_cpufreq_unregister(&dt_bL_ops);
102}
103module_exit(generic_bL_exit);
104
105MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
106MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
107MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
new file mode 100644
index 000000000000..654488723cb5
--- /dev/null
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2004-2007 Atmel Corporation
3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12/*#define DEBUG*/
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/cpufreq.h>
18#include <linux/io.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/export.h>
22
23static struct clk *cpuclk;
24
25static int at32_verify_speed(struct cpufreq_policy *policy)
26{
27 if (policy->cpu != 0)
28 return -EINVAL;
29
30 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
31 policy->cpuinfo.max_freq);
32 return 0;
33}
34
35static unsigned int at32_get_speed(unsigned int cpu)
36{
37 /* No SMP support */
38 if (cpu)
39 return 0;
40 return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
41}
42
43static unsigned int ref_freq;
44static unsigned long loops_per_jiffy_ref;
45
46static int at32_set_target(struct cpufreq_policy *policy,
47 unsigned int target_freq,
48 unsigned int relation)
49{
50 struct cpufreq_freqs freqs;
51 long freq;
52
53 /* Convert target_freq from kHz to Hz */
54 freq = clk_round_rate(cpuclk, target_freq * 1000);
55
56 /* Check if policy->min <= new_freq <= policy->max */
57 if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
58 return -EINVAL;
59
60 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
61
62 freqs.old = at32_get_speed(0);
63 freqs.new = (freq + 500) / 1000;
64 freqs.flags = 0;
65
66 if (!ref_freq) {
67 ref_freq = freqs.old;
68 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
69 }
70
71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 if (freqs.old < freqs.new)
73 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
74 loops_per_jiffy_ref, ref_freq, freqs.new);
75 clk_set_rate(cpuclk, freq);
76 if (freqs.new < freqs.old)
77 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
78 loops_per_jiffy_ref, ref_freq, freqs.new);
79 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
80
81 pr_debug("cpufreq: set frequency %lu Hz\n", freq);
82
83 return 0;
84}
85
86static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
87{
88 if (policy->cpu != 0)
89 return -EINVAL;
90
91 cpuclk = clk_get(NULL, "cpu");
92 if (IS_ERR(cpuclk)) {
93 pr_debug("cpufreq: could not get CPU clk\n");
94 return PTR_ERR(cpuclk);
95 }
96
97 policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
98 policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
99 policy->cpuinfo.transition_latency = 0;
100 policy->cur = at32_get_speed(0);
101 policy->min = policy->cpuinfo.min_freq;
102 policy->max = policy->cpuinfo.max_freq;
103
104 printk("cpufreq: AT32AP CPU frequency driver\n");
105
106 return 0;
107}
108
109static struct cpufreq_driver at32_driver = {
110 .name = "at32ap",
111 .owner = THIS_MODULE,
112 .init = at32_cpufreq_driver_init,
113 .verify = at32_verify_speed,
114 .target = at32_set_target,
115 .get = at32_get_speed,
116 .flags = CPUFREQ_STICKY,
117};
118
119static int __init at32_cpufreq_init(void)
120{
121 return cpufreq_register_driver(&at32_driver);
122}
123late_initcall(at32_cpufreq_init);
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
new file mode 100644
index 000000000000..995511e80bef
--- /dev/null
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -0,0 +1,247 @@
1/*
2 * Blackfin core clock scaling
3 *
4 * Copyright 2008-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
15#include <linux/fs.h>
16#include <linux/delay.h>
17#include <asm/blackfin.h>
18#include <asm/time.h>
19#include <asm/dpmc.h>
20
21
22/* this is the table of CCLK frequencies, in Hz */
23/* .index is the entry in the auxiliary dpm_state_table[] */
24static struct cpufreq_frequency_table bfin_freq_table[] = {
25 {
26 .frequency = CPUFREQ_TABLE_END,
27 .index = 0,
28 },
29 {
30 .frequency = CPUFREQ_TABLE_END,
31 .index = 1,
32 },
33 {
34 .frequency = CPUFREQ_TABLE_END,
35 .index = 2,
36 },
37 {
38 .frequency = CPUFREQ_TABLE_END,
39 .index = 0,
40 },
41};
42
43static struct bfin_dpm_state {
44 unsigned int csel; /* system clock divider */
45 unsigned int tscale; /* change the divider on the core timer interrupt */
46} dpm_state_table[3];
47
48#if defined(CONFIG_CYCLES_CLOCKSOURCE)
49/*
50 * normalized to maximum frequency offset for CYCLES,
51 * used in time-ts cycles clock source, but could be used
52 * somewhere also.
53 */
54unsigned long long __bfin_cycles_off;
55unsigned int __bfin_cycles_mod;
56#endif
57
58/**************************************************************************/
59static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
60{
61
62 unsigned long csel, min_cclk;
63 int index;
64
65 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
66#if ANOMALY_05000273 || ANOMALY_05000274 || \
67 (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
68 && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
69 min_cclk = sclk * 2;
70#else
71 min_cclk = sclk;
72#endif
73
74#ifndef CONFIG_BF60x
75 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
76#else
77 csel = bfin_read32(CGU0_DIV) & 0x1F;
78#endif
79
80 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) {
81 bfin_freq_table[index].frequency = cclk >> index;
82#ifndef CONFIG_BF60x
83 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
84#else
85 dpm_state_table[index].csel = csel;
86#endif
87 dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1;
88
89 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
90 bfin_freq_table[index].frequency,
91 dpm_state_table[index].csel,
92 dpm_state_table[index].tscale);
93 }
94 return;
95}
96
97static void bfin_adjust_core_timer(void *info)
98{
99 unsigned int tscale;
100 unsigned int index = *(unsigned int *)info;
101
102 /* we have to adjust the core timer, because it is using cclk */
103 tscale = dpm_state_table[index].tscale;
104 bfin_write_TSCALE(tscale);
105 return;
106}
107
108static unsigned int bfin_getfreq_khz(unsigned int cpu)
109{
110 /* Both CoreA/B have the same core clock */
111 return get_cclk() / 1000;
112}
113
114#ifdef CONFIG_BF60x
115unsigned long cpu_set_cclk(int cpu, unsigned long new)
116{
117 struct clk *clk;
118 int ret;
119
120 clk = clk_get(NULL, "CCLK");
121 if (IS_ERR(clk))
122 return -ENODEV;
123
124 ret = clk_set_rate(clk, new);
125 clk_put(clk);
126 return ret;
127}
128#endif
129
130static int bfin_target(struct cpufreq_policy *policy,
131 unsigned int target_freq, unsigned int relation)
132{
133#ifndef CONFIG_BF60x
134 unsigned int plldiv;
135#endif
136 unsigned int index;
137 unsigned long cclk_hz;
138 struct cpufreq_freqs freqs;
139 static unsigned long lpj_ref;
140 static unsigned int lpj_ref_freq;
141 int ret = 0;
142
143#if defined(CONFIG_CYCLES_CLOCKSOURCE)
144 cycles_t cycles;
145#endif
146
147 if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
148 relation, &index))
149 return -EINVAL;
150
151 cclk_hz = bfin_freq_table[index].frequency;
152
153 freqs.old = bfin_getfreq_khz(0);
154 freqs.new = cclk_hz;
155
156 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
157 cclk_hz, target_freq, freqs.old);
158
159 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
160#ifndef CONFIG_BF60x
161 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
162 bfin_write_PLL_DIV(plldiv);
163#else
164 ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
165 if (ret != 0) {
166 WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
167 return ret;
168 }
169#endif
170 on_each_cpu(bfin_adjust_core_timer, &index, 1);
171#if defined(CONFIG_CYCLES_CLOCKSOURCE)
172 cycles = get_cycles();
173 SSYNC();
174 cycles += 10; /* ~10 cycles we lose after get_cycles() */
175 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
176 __bfin_cycles_mod = index;
177#endif
178 if (!lpj_ref_freq) {
179 lpj_ref = loops_per_jiffy;
180 lpj_ref_freq = freqs.old;
181 }
182 if (freqs.new != freqs.old) {
183 loops_per_jiffy = cpufreq_scale(lpj_ref,
184 lpj_ref_freq, freqs.new);
185 }
186
187 /* TODO: just test case for cycles clock source, remove later */
188 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
189
190 pr_debug("cpufreq: done\n");
191 return ret;
192}
193
194static int bfin_verify_speed(struct cpufreq_policy *policy)
195{
196 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
197}
198
199static int __bfin_cpu_init(struct cpufreq_policy *policy)
200{
201
202 unsigned long cclk, sclk;
203
204 cclk = get_cclk() / 1000;
205 sclk = get_sclk() / 1000;
206
207 if (policy->cpu == CPUFREQ_CPU)
208 bfin_init_tables(cclk, sclk);
209
210 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
211
212 policy->cur = cclk;
213 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
214 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
215}
216
217static struct freq_attr *bfin_freq_attr[] = {
218 &cpufreq_freq_attr_scaling_available_freqs,
219 NULL,
220};
221
222static struct cpufreq_driver bfin_driver = {
223 .verify = bfin_verify_speed,
224 .target = bfin_target,
225 .get = bfin_getfreq_khz,
226 .init = __bfin_cpu_init,
227 .name = "bfin cpufreq",
228 .owner = THIS_MODULE,
229 .attr = bfin_freq_attr,
230};
231
232static int __init bfin_cpu_init(void)
233{
234 return cpufreq_register_driver(&bfin_driver);
235}
236
237static void __exit bfin_cpu_exit(void)
238{
239 cpufreq_unregister_driver(&bfin_driver);
240}
241
242MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
243MODULE_DESCRIPTION("cpufreq driver for Blackfin");
244MODULE_LICENSE("GPL");
245
246module_init(bfin_cpu_init);
247module_exit(bfin_cpu_exit);
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 37d23a0f8c56..3ab8294eab04 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -44,8 +44,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
44{ 44{
45 struct cpufreq_freqs freqs; 45 struct cpufreq_freqs freqs;
46 struct opp *opp; 46 struct opp *opp;
47 unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0; 47 unsigned long volt = 0, volt_old = 0, tol = 0;
48 unsigned int index, cpu; 48 long freq_Hz;
49 unsigned int index;
49 int ret; 50 int ret;
50 51
51 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, 52 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -65,10 +66,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
65 if (freqs.old == freqs.new) 66 if (freqs.old == freqs.new)
66 return 0; 67 return 0;
67 68
68 for_each_online_cpu(cpu) { 69 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
69 freqs.cpu = cpu;
70 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
71 }
72 70
73 if (cpu_reg) { 71 if (cpu_reg) {
74 rcu_read_lock(); 72 rcu_read_lock();
@@ -76,7 +74,9 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
76 if (IS_ERR(opp)) { 74 if (IS_ERR(opp)) {
77 rcu_read_unlock(); 75 rcu_read_unlock();
78 pr_err("failed to find OPP for %ld\n", freq_Hz); 76 pr_err("failed to find OPP for %ld\n", freq_Hz);
79 return PTR_ERR(opp); 77 freqs.new = freqs.old;
78 ret = PTR_ERR(opp);
79 goto post_notify;
80 } 80 }
81 volt = opp_get_voltage(opp); 81 volt = opp_get_voltage(opp);
82 rcu_read_unlock(); 82 rcu_read_unlock();
@@ -94,7 +94,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
94 if (ret) { 94 if (ret) {
95 pr_err("failed to scale voltage up: %d\n", ret); 95 pr_err("failed to scale voltage up: %d\n", ret);
96 freqs.new = freqs.old; 96 freqs.new = freqs.old;
97 return ret; 97 goto post_notify;
98 } 98 }
99 } 99 }
100 100
@@ -103,7 +103,8 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
103 pr_err("failed to set clock rate: %d\n", ret); 103 pr_err("failed to set clock rate: %d\n", ret);
104 if (cpu_reg) 104 if (cpu_reg)
105 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 105 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
106 return ret; 106 freqs.new = freqs.old;
107 goto post_notify;
107 } 108 }
108 109
109 /* scaling down? scale voltage after frequency */ 110 /* scaling down? scale voltage after frequency */
@@ -113,25 +114,19 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
113 pr_err("failed to scale voltage down: %d\n", ret); 114 pr_err("failed to scale voltage down: %d\n", ret);
114 clk_set_rate(cpu_clk, freqs.old * 1000); 115 clk_set_rate(cpu_clk, freqs.old * 1000);
115 freqs.new = freqs.old; 116 freqs.new = freqs.old;
116 return ret;
117 } 117 }
118 } 118 }
119 119
120 for_each_online_cpu(cpu) { 120post_notify:
121 freqs.cpu = cpu; 121 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
122 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
123 }
124 122
125 return 0; 123 return ret;
126} 124}
127 125
128static int cpu0_cpufreq_init(struct cpufreq_policy *policy) 126static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
129{ 127{
130 int ret; 128 int ret;
131 129
132 if (policy->cpu != 0)
133 return -EINVAL;
134
135 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); 130 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
136 if (ret) { 131 if (ret) {
137 pr_err("invalid frequency table: %d\n", ret); 132 pr_err("invalid frequency table: %d\n", ret);
@@ -262,6 +257,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
262 } 257 }
263 258
264 of_node_put(np); 259 of_node_put(np);
260 of_node_put(parent);
265 return 0; 261 return 0;
266 262
267out_free_table: 263out_free_table:
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 13d311ee08b3..af1542d41440 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -263,7 +263,6 @@ static int nforce2_target(struct cpufreq_policy *policy,
263 263
264 freqs.old = nforce2_get(policy->cpu); 264 freqs.old = nforce2_get(policy->cpu);
265 freqs.new = target_fsb * fid * 100; 265 freqs.new = target_fsb * fid * 100;
266 freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
267 266
268 if (freqs.old == freqs.new) 267 if (freqs.old == freqs.new)
269 return 0; 268 return 0;
@@ -271,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
271 pr_debug("Old CPU frequency %d kHz, new %d kHz\n", 270 pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
272 freqs.old, freqs.new); 271 freqs.old, freqs.new);
273 272
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 273 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
275 274
276 /* Disable IRQs */ 275 /* Disable IRQs */
277 /* local_irq_save(flags); */ 276 /* local_irq_save(flags); */
@@ -286,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
286 /* Enable IRQs */ 285 /* Enable IRQs */
287 /* local_irq_restore(flags); */ 286 /* local_irq_restore(flags); */
288 287
289 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 288 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
290 289
291 return 0; 290 return 0;
292} 291}
@@ -360,12 +359,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
360 min_fsb = NFORCE2_MIN_FSB; 359 min_fsb = NFORCE2_MIN_FSB;
361 360
362 /* cpuinfo and default policy values */ 361 /* cpuinfo and default policy values */
363 policy->cpuinfo.min_freq = min_fsb * fid * 100; 362 policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
364 policy->cpuinfo.max_freq = max_fsb * fid * 100; 363 policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
365 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 364 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
366 policy->cur = nforce2_get(policy->cpu); 365 policy->cur = nforce2_get(policy->cpu);
367 policy->min = policy->cpuinfo.min_freq;
368 policy->max = policy->cpuinfo.max_freq;
369 366
370 return 0; 367 return 0;
371} 368}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b02824d092e7..a6f65954b0ab 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -39,13 +39,13 @@
39 * level driver of CPUFreq support, and its spinlock. This lock 39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array. 40 * also protects the cpufreq_cpu_data array.
41 */ 41 */
42static struct cpufreq_driver *cpufreq_driver; 42static struct cpufreq_driver __rcu *cpufreq_driver;
43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44#ifdef CONFIG_HOTPLUG_CPU 44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */ 45/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47#endif 47#endif
48static DEFINE_SPINLOCK(cpufreq_driver_lock); 48static DEFINE_RWLOCK(cpufreq_driver_lock);
49 49
50/* 50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -128,23 +128,36 @@ void disable_cpufreq(void)
128static LIST_HEAD(cpufreq_governor_list); 128static LIST_HEAD(cpufreq_governor_list);
129static DEFINE_MUTEX(cpufreq_governor_mutex); 129static DEFINE_MUTEX(cpufreq_governor_mutex);
130 130
131bool have_governor_per_policy(void)
132{
133 bool have_governor_per_policy;
134 rcu_read_lock();
135 have_governor_per_policy =
136 rcu_dereference(cpufreq_driver)->have_governor_per_policy;
137 rcu_read_unlock();
138 return have_governor_per_policy;
139}
140
131static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
132{ 142{
133 struct cpufreq_policy *data; 143 struct cpufreq_policy *data;
144 struct cpufreq_driver *driver;
134 unsigned long flags; 145 unsigned long flags;
135 146
136 if (cpu >= nr_cpu_ids) 147 if (cpu >= nr_cpu_ids)
137 goto err_out; 148 goto err_out;
138 149
139 /* get the cpufreq driver */ 150 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags); 151 rcu_read_lock();
152 driver = rcu_dereference(cpufreq_driver);
141 153
142 if (!cpufreq_driver) 154 if (!driver)
143 goto err_out_unlock; 155 goto err_out_unlock;
144 156
145 if (!try_module_get(cpufreq_driver->owner)) 157 if (!try_module_get(driver->owner))
146 goto err_out_unlock; 158 goto err_out_unlock;
147 159
160 read_lock_irqsave(&cpufreq_driver_lock, flags);
148 161
149 /* get the CPU */ 162 /* get the CPU */
150 data = per_cpu(cpufreq_cpu_data, cpu); 163 data = per_cpu(cpufreq_cpu_data, cpu);
@@ -155,13 +168,15 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
155 if (!sysfs && !kobject_get(&data->kobj)) 168 if (!sysfs && !kobject_get(&data->kobj))
156 goto err_out_put_module; 169 goto err_out_put_module;
157 170
158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 171 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
172 rcu_read_unlock();
159 return data; 173 return data;
160 174
161err_out_put_module: 175err_out_put_module:
162 module_put(cpufreq_driver->owner); 176 module_put(driver->owner);
177 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
163err_out_unlock: 178err_out_unlock:
164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 179 rcu_read_unlock();
165err_out: 180err_out:
166 return NULL; 181 return NULL;
167} 182}
@@ -184,7 +199,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
184{ 199{
185 if (!sysfs) 200 if (!sysfs)
186 kobject_put(&data->kobj); 201 kobject_put(&data->kobj);
187 module_put(cpufreq_driver->owner); 202 rcu_read_lock();
203 module_put(rcu_dereference(cpufreq_driver)->owner);
204 rcu_read_unlock();
188} 205}
189 206
190void cpufreq_cpu_put(struct cpufreq_policy *data) 207void cpufreq_cpu_put(struct cpufreq_policy *data)
@@ -244,32 +261,20 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
244#endif 261#endif
245 262
246 263
247/** 264void __cpufreq_notify_transition(struct cpufreq_policy *policy,
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 265 struct cpufreq_freqs *freqs, unsigned int state)
249 * on frequency transition.
250 *
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
253 * external effects.
254 */
255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
256{ 266{
257 struct cpufreq_policy *policy;
258 unsigned long flags;
259
260 BUG_ON(irqs_disabled()); 267 BUG_ON(irqs_disabled());
261 268
262 if (cpufreq_disabled()) 269 if (cpufreq_disabled())
263 return; 270 return;
264 271
265 freqs->flags = cpufreq_driver->flags; 272 rcu_read_lock();
273 freqs->flags = rcu_dereference(cpufreq_driver)->flags;
274 rcu_read_unlock();
266 pr_debug("notification %u of frequency transition to %u kHz\n", 275 pr_debug("notification %u of frequency transition to %u kHz\n",
267 state, freqs->new); 276 state, freqs->new);
268 277
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
273 switch (state) { 278 switch (state) {
274 279
275 case CPUFREQ_PRECHANGE: 280 case CPUFREQ_PRECHANGE:
@@ -277,7 +282,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
277 * which is not equal to what the cpufreq core thinks is 282 * which is not equal to what the cpufreq core thinks is
278 * "old frequency". 283 * "old frequency".
279 */ 284 */
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 285 if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) {
281 if ((policy) && (policy->cpu == freqs->cpu) && 286 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) { 287 (policy->cur) && (policy->cur != freqs->old)) {
283 pr_debug("Warning: CPU frequency is" 288 pr_debug("Warning: CPU frequency is"
@@ -303,6 +308,20 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
303 break; 308 break;
304 } 309 }
305} 310}
311/**
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
314 *
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
317 * external effects.
318 */
319void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
321{
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
324}
306EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 325EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
307 326
308 327
@@ -329,11 +348,21 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
329 struct cpufreq_governor **governor) 348 struct cpufreq_governor **governor)
330{ 349{
331 int err = -EINVAL; 350 int err = -EINVAL;
332 351 struct cpufreq_driver *driver;
333 if (!cpufreq_driver) 352 bool has_setpolicy;
353 bool has_target;
354
355 rcu_read_lock();
356 driver = rcu_dereference(cpufreq_driver);
357 if (!driver) {
358 rcu_read_unlock();
334 goto out; 359 goto out;
360 }
361 has_setpolicy = driver->setpolicy ? true : false;
362 has_target = driver->target ? true : false;
363 rcu_read_unlock();
335 364
336 if (cpufreq_driver->setpolicy) { 365 if (has_setpolicy) {
337 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 366 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
338 *policy = CPUFREQ_POLICY_PERFORMANCE; 367 *policy = CPUFREQ_POLICY_PERFORMANCE;
339 err = 0; 368 err = 0;
@@ -342,7 +371,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
342 *policy = CPUFREQ_POLICY_POWERSAVE; 371 *policy = CPUFREQ_POLICY_POWERSAVE;
343 err = 0; 372 err = 0;
344 } 373 }
345 } else if (cpufreq_driver->target) { 374 } else if (has_target) {
346 struct cpufreq_governor *t; 375 struct cpufreq_governor *t;
347 376
348 mutex_lock(&cpufreq_governor_mutex); 377 mutex_lock(&cpufreq_governor_mutex);
@@ -493,7 +522,12 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
493 */ 522 */
494static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 523static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
495{ 524{
496 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 525 ssize_t size;
526 rcu_read_lock();
527 size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
528 rcu_dereference(cpufreq_driver)->name);
529 rcu_read_unlock();
530 return size;
497} 531}
498 532
499/** 533/**
@@ -505,10 +539,13 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
505 ssize_t i = 0; 539 ssize_t i = 0;
506 struct cpufreq_governor *t; 540 struct cpufreq_governor *t;
507 541
508 if (!cpufreq_driver->target) { 542 rcu_read_lock();
543 if (!rcu_dereference(cpufreq_driver)->target) {
544 rcu_read_unlock();
509 i += sprintf(buf, "performance powersave"); 545 i += sprintf(buf, "performance powersave");
510 goto out; 546 goto out;
511 } 547 }
548 rcu_read_unlock();
512 549
513 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 550 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
514 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 551 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
@@ -586,9 +623,15 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
586static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 623static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
587{ 624{
588 unsigned int limit; 625 unsigned int limit;
626 int (*bios_limit)(int cpu, unsigned int *limit);
589 int ret; 627 int ret;
590 if (cpufreq_driver->bios_limit) { 628
591 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 629 rcu_read_lock();
630 bios_limit = rcu_dereference(cpufreq_driver)->bios_limit;
631 rcu_read_unlock();
632
633 if (bios_limit) {
634 ret = bios_limit(policy->cpu, &limit);
592 if (!ret) 635 if (!ret)
593 return sprintf(buf, "%u\n", limit); 636 return sprintf(buf, "%u\n", limit);
594 } 637 }
@@ -731,6 +774,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
731{ 774{
732 struct cpufreq_policy new_policy; 775 struct cpufreq_policy new_policy;
733 struct freq_attr **drv_attr; 776 struct freq_attr **drv_attr;
777 struct cpufreq_driver *driver;
734 unsigned long flags; 778 unsigned long flags;
735 int ret = 0; 779 int ret = 0;
736 unsigned int j; 780 unsigned int j;
@@ -742,35 +786,38 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
742 return ret; 786 return ret;
743 787
744 /* set up files for this cpu device */ 788 /* set up files for this cpu device */
745 drv_attr = cpufreq_driver->attr; 789 rcu_read_lock();
790 driver = rcu_dereference(cpufreq_driver);
791 drv_attr = driver->attr;
746 while ((drv_attr) && (*drv_attr)) { 792 while ((drv_attr) && (*drv_attr)) {
747 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 793 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
748 if (ret) 794 if (ret)
749 goto err_out_kobj_put; 795 goto err_out_unlock;
750 drv_attr++; 796 drv_attr++;
751 } 797 }
752 if (cpufreq_driver->get) { 798 if (driver->get) {
753 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 799 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
754 if (ret) 800 if (ret)
755 goto err_out_kobj_put; 801 goto err_out_unlock;
756 } 802 }
757 if (cpufreq_driver->target) { 803 if (driver->target) {
758 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 804 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
759 if (ret) 805 if (ret)
760 goto err_out_kobj_put; 806 goto err_out_unlock;
761 } 807 }
762 if (cpufreq_driver->bios_limit) { 808 if (driver->bios_limit) {
763 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 809 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
764 if (ret) 810 if (ret)
765 goto err_out_kobj_put; 811 goto err_out_unlock;
766 } 812 }
813 rcu_read_unlock();
767 814
768 spin_lock_irqsave(&cpufreq_driver_lock, flags); 815 write_lock_irqsave(&cpufreq_driver_lock, flags);
769 for_each_cpu(j, policy->cpus) { 816 for_each_cpu(j, policy->cpus) {
770 per_cpu(cpufreq_cpu_data, j) = policy; 817 per_cpu(cpufreq_cpu_data, j) = policy;
771 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 818 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
772 } 819 }
773 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 820 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
774 821
775 ret = cpufreq_add_dev_symlink(cpu, policy); 822 ret = cpufreq_add_dev_symlink(cpu, policy);
776 if (ret) 823 if (ret)
@@ -786,12 +833,20 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
786 policy->user_policy.governor = policy->governor; 833 policy->user_policy.governor = policy->governor;
787 834
788 if (ret) { 835 if (ret) {
836 int (*exit)(struct cpufreq_policy *policy);
837
789 pr_debug("setting policy failed\n"); 838 pr_debug("setting policy failed\n");
790 if (cpufreq_driver->exit) 839 rcu_read_lock();
791 cpufreq_driver->exit(policy); 840 exit = rcu_dereference(cpufreq_driver)->exit;
841 rcu_read_unlock();
842 if (exit)
843 exit(policy);
844
792 } 845 }
793 return ret; 846 return ret;
794 847
848err_out_unlock:
849 rcu_read_unlock();
795err_out_kobj_put: 850err_out_kobj_put:
796 kobject_put(&policy->kobj); 851 kobject_put(&policy->kobj);
797 wait_for_completion(&policy->kobj_unregister); 852 wait_for_completion(&policy->kobj_unregister);
@@ -803,27 +858,34 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
803 struct device *dev) 858 struct device *dev)
804{ 859{
805 struct cpufreq_policy *policy; 860 struct cpufreq_policy *policy;
806 int ret = 0; 861 int ret = 0, has_target = 0;
807 unsigned long flags; 862 unsigned long flags;
808 863
809 policy = cpufreq_cpu_get(sibling); 864 policy = cpufreq_cpu_get(sibling);
810 WARN_ON(!policy); 865 WARN_ON(!policy);
811 866
812 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 867 rcu_read_lock();
868 has_target = !!rcu_dereference(cpufreq_driver)->target;
869 rcu_read_unlock();
870
871 if (has_target)
872 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
813 873
814 lock_policy_rwsem_write(sibling); 874 lock_policy_rwsem_write(sibling);
815 875
816 spin_lock_irqsave(&cpufreq_driver_lock, flags); 876 write_lock_irqsave(&cpufreq_driver_lock, flags);
817 877
818 cpumask_set_cpu(cpu, policy->cpus); 878 cpumask_set_cpu(cpu, policy->cpus);
819 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; 879 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
820 per_cpu(cpufreq_cpu_data, cpu) = policy; 880 per_cpu(cpufreq_cpu_data, cpu) = policy;
821 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 881 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
822 882
823 unlock_policy_rwsem_write(sibling); 883 unlock_policy_rwsem_write(sibling);
824 884
825 __cpufreq_governor(policy, CPUFREQ_GOV_START); 885 if (has_target) {
826 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 886 __cpufreq_governor(policy, CPUFREQ_GOV_START);
887 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
888 }
827 889
828 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 890 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
829 if (ret) { 891 if (ret) {
@@ -849,6 +911,8 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
849 unsigned int j, cpu = dev->id; 911 unsigned int j, cpu = dev->id;
850 int ret = -ENOMEM; 912 int ret = -ENOMEM;
851 struct cpufreq_policy *policy; 913 struct cpufreq_policy *policy;
914 struct cpufreq_driver *driver;
915 int (*init)(struct cpufreq_policy *policy);
852 unsigned long flags; 916 unsigned long flags;
853#ifdef CONFIG_HOTPLUG_CPU 917#ifdef CONFIG_HOTPLUG_CPU
854 struct cpufreq_governor *gov; 918 struct cpufreq_governor *gov;
@@ -871,22 +935,27 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
871 935
872#ifdef CONFIG_HOTPLUG_CPU 936#ifdef CONFIG_HOTPLUG_CPU
873 /* Check if this cpu was hot-unplugged earlier and has siblings */ 937 /* Check if this cpu was hot-unplugged earlier and has siblings */
874 spin_lock_irqsave(&cpufreq_driver_lock, flags); 938 read_lock_irqsave(&cpufreq_driver_lock, flags);
875 for_each_online_cpu(sibling) { 939 for_each_online_cpu(sibling) {
876 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 940 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
877 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { 941 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
878 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 942 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
879 return cpufreq_add_policy_cpu(cpu, sibling, dev); 943 return cpufreq_add_policy_cpu(cpu, sibling, dev);
880 } 944 }
881 } 945 }
882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 946 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
883#endif 947#endif
884#endif 948#endif
885 949
886 if (!try_module_get(cpufreq_driver->owner)) { 950 rcu_read_lock();
951 driver = rcu_dereference(cpufreq_driver);
952 if (!try_module_get(driver->owner)) {
953 rcu_read_unlock();
887 ret = -EINVAL; 954 ret = -EINVAL;
888 goto module_out; 955 goto module_out;
889 } 956 }
957 init = driver->init;
958 rcu_read_unlock();
890 959
891 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); 960 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
892 if (!policy) 961 if (!policy)
@@ -911,7 +980,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
911 /* call driver. From then on the cpufreq must be able 980 /* call driver. From then on the cpufreq must be able
912 * to accept all calls to ->verify and ->setpolicy for this CPU 981 * to accept all calls to ->verify and ->setpolicy for this CPU
913 */ 982 */
914 ret = cpufreq_driver->init(policy); 983 ret = init(policy);
915 if (ret) { 984 if (ret) {
916 pr_debug("initialization failed\n"); 985 pr_debug("initialization failed\n");
917 goto err_set_policy_cpu; 986 goto err_set_policy_cpu;
@@ -946,16 +1015,18 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
946 goto err_out_unregister; 1015 goto err_out_unregister;
947 1016
948 kobject_uevent(&policy->kobj, KOBJ_ADD); 1017 kobject_uevent(&policy->kobj, KOBJ_ADD);
949 module_put(cpufreq_driver->owner); 1018 rcu_read_lock();
1019 module_put(rcu_dereference(cpufreq_driver)->owner);
1020 rcu_read_unlock();
950 pr_debug("initialization complete\n"); 1021 pr_debug("initialization complete\n");
951 1022
952 return 0; 1023 return 0;
953 1024
954err_out_unregister: 1025err_out_unregister:
955 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1026 write_lock_irqsave(&cpufreq_driver_lock, flags);
956 for_each_cpu(j, policy->cpus) 1027 for_each_cpu(j, policy->cpus)
957 per_cpu(cpufreq_cpu_data, j) = NULL; 1028 per_cpu(cpufreq_cpu_data, j) = NULL;
958 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1029 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
959 1030
960 kobject_put(&policy->kobj); 1031 kobject_put(&policy->kobj);
961 wait_for_completion(&policy->kobj_unregister); 1032 wait_for_completion(&policy->kobj_unregister);
@@ -968,7 +1039,9 @@ err_free_cpumask:
968err_free_policy: 1039err_free_policy:
969 kfree(policy); 1040 kfree(policy);
970nomem_out: 1041nomem_out:
971 module_put(cpufreq_driver->owner); 1042 rcu_read_lock();
1043 module_put(rcu_dereference(cpufreq_driver)->owner);
1044 rcu_read_unlock();
972module_out: 1045module_out:
973 return ret; 1046 return ret;
974} 1047}
@@ -1002,36 +1075,46 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1002 unsigned int cpu = dev->id, ret, cpus; 1075 unsigned int cpu = dev->id, ret, cpus;
1003 unsigned long flags; 1076 unsigned long flags;
1004 struct cpufreq_policy *data; 1077 struct cpufreq_policy *data;
1078 struct cpufreq_driver *driver;
1005 struct kobject *kobj; 1079 struct kobject *kobj;
1006 struct completion *cmp; 1080 struct completion *cmp;
1007 struct device *cpu_dev; 1081 struct device *cpu_dev;
1082 bool has_target;
1083 int (*exit)(struct cpufreq_policy *policy);
1008 1084
1009 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1085 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1010 1086
1011 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1087 write_lock_irqsave(&cpufreq_driver_lock, flags);
1012 1088
1013 data = per_cpu(cpufreq_cpu_data, cpu); 1089 data = per_cpu(cpufreq_cpu_data, cpu);
1014 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1090 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015 1091
1016 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1092 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1017 1093
1018 if (!data) { 1094 if (!data) {
1019 pr_debug("%s: No cpu_data found\n", __func__); 1095 pr_debug("%s: No cpu_data found\n", __func__);
1020 return -EINVAL; 1096 return -EINVAL;
1021 } 1097 }
1022 1098
1023 if (cpufreq_driver->target) 1099 rcu_read_lock();
1100 driver = rcu_dereference(cpufreq_driver);
1101 has_target = driver->target ? true : false;
1102 exit = driver->exit;
1103 if (has_target)
1024 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1104 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1025 1105
1026#ifdef CONFIG_HOTPLUG_CPU 1106#ifdef CONFIG_HOTPLUG_CPU
1027 if (!cpufreq_driver->setpolicy) 1107 if (!driver->setpolicy)
1028 strncpy(per_cpu(cpufreq_cpu_governor, cpu), 1108 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1029 data->governor->name, CPUFREQ_NAME_LEN); 1109 data->governor->name, CPUFREQ_NAME_LEN);
1030#endif 1110#endif
1111 rcu_read_unlock();
1031 1112
1032 WARN_ON(lock_policy_rwsem_write(cpu)); 1113 WARN_ON(lock_policy_rwsem_write(cpu));
1033 cpus = cpumask_weight(data->cpus); 1114 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus); 1115
1116 if (cpus > 1)
1117 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu); 1118 unlock_policy_rwsem_write(cpu);
1036 1119
1037 if (cpu != data->cpu) { 1120 if (cpu != data->cpu) {
@@ -1047,9 +1130,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1047 WARN_ON(lock_policy_rwsem_write(cpu)); 1130 WARN_ON(lock_policy_rwsem_write(cpu));
1048 cpumask_set_cpu(cpu, data->cpus); 1131 cpumask_set_cpu(cpu, data->cpus);
1049 1132
1050 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1133 write_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data; 1134 per_cpu(cpufreq_cpu_data, cpu) = data;
1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1135 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1053 1136
1054 unlock_policy_rwsem_write(cpu); 1137 unlock_policy_rwsem_write(cpu);
1055 1138
@@ -1070,6 +1153,9 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1070 1153
1071 /* If cpu is last user of policy, free policy */ 1154 /* If cpu is last user of policy, free policy */
1072 if (cpus == 1) { 1155 if (cpus == 1) {
1156 if (has_target)
1157 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1158
1073 lock_policy_rwsem_read(cpu); 1159 lock_policy_rwsem_read(cpu);
1074 kobj = &data->kobj; 1160 kobj = &data->kobj;
1075 cmp = &data->kobj_unregister; 1161 cmp = &data->kobj_unregister;
@@ -1084,13 +1170,13 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1084 wait_for_completion(cmp); 1170 wait_for_completion(cmp);
1085 pr_debug("wait complete\n"); 1171 pr_debug("wait complete\n");
1086 1172
1087 if (cpufreq_driver->exit) 1173 if (exit)
1088 cpufreq_driver->exit(data); 1174 exit(data);
1089 1175
1090 free_cpumask_var(data->related_cpus); 1176 free_cpumask_var(data->related_cpus);
1091 free_cpumask_var(data->cpus); 1177 free_cpumask_var(data->cpus);
1092 kfree(data); 1178 kfree(data);
1093 } else if (cpufreq_driver->target) { 1179 } else if (has_target) {
1094 __cpufreq_governor(data, CPUFREQ_GOV_START); 1180 __cpufreq_governor(data, CPUFREQ_GOV_START);
1095 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1181 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1096 } 1182 }
@@ -1134,16 +1220,23 @@ static void handle_update(struct work_struct *work)
1134static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1220static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1135 unsigned int new_freq) 1221 unsigned int new_freq)
1136{ 1222{
1223 struct cpufreq_policy *policy;
1137 struct cpufreq_freqs freqs; 1224 struct cpufreq_freqs freqs;
1225 unsigned long flags;
1226
1138 1227
1139 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1228 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1140 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1229 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1141 1230
1142 freqs.cpu = cpu;
1143 freqs.old = old_freq; 1231 freqs.old = old_freq;
1144 freqs.new = new_freq; 1232 freqs.new = new_freq;
1145 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1233
1146 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1234 read_lock_irqsave(&cpufreq_driver_lock, flags);
1235 policy = per_cpu(cpufreq_cpu_data, cpu);
1236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1237
1238 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1239 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1147} 1240}
1148 1241
1149 1242
@@ -1157,10 +1250,18 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1157unsigned int cpufreq_quick_get(unsigned int cpu) 1250unsigned int cpufreq_quick_get(unsigned int cpu)
1158{ 1251{
1159 struct cpufreq_policy *policy; 1252 struct cpufreq_policy *policy;
1253 struct cpufreq_driver *driver;
1254 unsigned int (*get)(unsigned int cpu);
1160 unsigned int ret_freq = 0; 1255 unsigned int ret_freq = 0;
1161 1256
1162 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 1257 rcu_read_lock();
1163 return cpufreq_driver->get(cpu); 1258 driver = rcu_dereference(cpufreq_driver);
1259 if (driver && driver->setpolicy && driver->get) {
1260 get = driver->get;
1261 rcu_read_unlock();
1262 return get(cpu);
1263 }
1264 rcu_read_unlock();
1164 1265
1165 policy = cpufreq_cpu_get(cpu); 1266 policy = cpufreq_cpu_get(cpu);
1166 if (policy) { 1267 if (policy) {
@@ -1196,15 +1297,26 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
1196static unsigned int __cpufreq_get(unsigned int cpu) 1297static unsigned int __cpufreq_get(unsigned int cpu)
1197{ 1298{
1198 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1299 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1300 struct cpufreq_driver *driver;
1301 unsigned int (*get)(unsigned int cpu);
1199 unsigned int ret_freq = 0; 1302 unsigned int ret_freq = 0;
1303 u8 flags;
1304
1200 1305
1201 if (!cpufreq_driver->get) 1306 rcu_read_lock();
1307 driver = rcu_dereference(cpufreq_driver);
1308 if (!driver->get) {
1309 rcu_read_unlock();
1202 return ret_freq; 1310 return ret_freq;
1311 }
1312 flags = driver->flags;
1313 get = driver->get;
1314 rcu_read_unlock();
1203 1315
1204 ret_freq = cpufreq_driver->get(cpu); 1316 ret_freq = get(cpu);
1205 1317
1206 if (ret_freq && policy->cur && 1318 if (ret_freq && policy->cur &&
1207 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1319 !(flags & CPUFREQ_CONST_LOOPS)) {
1208 /* verify no discrepancy between actual and 1320 /* verify no discrepancy between actual and
1209 saved value exists */ 1321 saved value exists */
1210 if (unlikely(ret_freq != policy->cur)) { 1322 if (unlikely(ret_freq != policy->cur)) {
@@ -1260,6 +1372,7 @@ static struct subsys_interface cpufreq_interface = {
1260 */ 1372 */
1261static int cpufreq_bp_suspend(void) 1373static int cpufreq_bp_suspend(void)
1262{ 1374{
1375 int (*suspend)(struct cpufreq_policy *policy);
1263 int ret = 0; 1376 int ret = 0;
1264 1377
1265 int cpu = smp_processor_id(); 1378 int cpu = smp_processor_id();
@@ -1272,8 +1385,11 @@ static int cpufreq_bp_suspend(void)
1272 if (!cpu_policy) 1385 if (!cpu_policy)
1273 return 0; 1386 return 0;
1274 1387
1275 if (cpufreq_driver->suspend) { 1388 rcu_read_lock();
1276 ret = cpufreq_driver->suspend(cpu_policy); 1389 suspend = rcu_dereference(cpufreq_driver)->suspend;
1390 rcu_read_unlock();
1391 if (suspend) {
1392 ret = suspend(cpu_policy);
1277 if (ret) 1393 if (ret)
1278 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1394 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1279 "step on CPU %u\n", cpu_policy->cpu); 1395 "step on CPU %u\n", cpu_policy->cpu);
@@ -1299,6 +1415,7 @@ static int cpufreq_bp_suspend(void)
1299static void cpufreq_bp_resume(void) 1415static void cpufreq_bp_resume(void)
1300{ 1416{
1301 int ret = 0; 1417 int ret = 0;
1418 int (*resume)(struct cpufreq_policy *policy);
1302 1419
1303 int cpu = smp_processor_id(); 1420 int cpu = smp_processor_id();
1304 struct cpufreq_policy *cpu_policy; 1421 struct cpufreq_policy *cpu_policy;
@@ -1310,8 +1427,12 @@ static void cpufreq_bp_resume(void)
1310 if (!cpu_policy) 1427 if (!cpu_policy)
1311 return; 1428 return;
1312 1429
1313 if (cpufreq_driver->resume) { 1430 rcu_read_lock();
1314 ret = cpufreq_driver->resume(cpu_policy); 1431 resume = rcu_dereference(cpufreq_driver)->resume;
1432 rcu_read_unlock();
1433
1434 if (resume) {
1435 ret = resume(cpu_policy);
1315 if (ret) { 1436 if (ret) {
1316 printk(KERN_ERR "cpufreq: resume failed in ->resume " 1437 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1317 "step on CPU %u\n", cpu_policy->cpu); 1438 "step on CPU %u\n", cpu_policy->cpu);
@@ -1338,10 +1459,14 @@ static struct syscore_ops cpufreq_syscore_ops = {
1338 */ 1459 */
1339const char *cpufreq_get_current_driver(void) 1460const char *cpufreq_get_current_driver(void)
1340{ 1461{
1341 if (cpufreq_driver) 1462 struct cpufreq_driver *driver;
1342 return cpufreq_driver->name; 1463 const char *name = NULL;
1343 1464 rcu_read_lock();
1344 return NULL; 1465 driver = rcu_dereference(cpufreq_driver);
1466 if (driver)
1467 name = driver->name;
1468 rcu_read_unlock();
1469 return name;
1345} 1470}
1346EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1471EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1347 1472
@@ -1435,6 +1560,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1435{ 1560{
1436 int retval = -EINVAL; 1561 int retval = -EINVAL;
1437 unsigned int old_target_freq = target_freq; 1562 unsigned int old_target_freq = target_freq;
1563 int (*target)(struct cpufreq_policy *policy,
1564 unsigned int target_freq,
1565 unsigned int relation);
1438 1566
1439 if (cpufreq_disabled()) 1567 if (cpufreq_disabled())
1440 return -ENODEV; 1568 return -ENODEV;
@@ -1451,8 +1579,11 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1451 if (target_freq == policy->cur) 1579 if (target_freq == policy->cur)
1452 return 0; 1580 return 0;
1453 1581
1454 if (cpufreq_driver->target) 1582 rcu_read_lock();
1455 retval = cpufreq_driver->target(policy, target_freq, relation); 1583 target = rcu_dereference(cpufreq_driver)->target;
1584 rcu_read_unlock();
1585 if (target)
1586 retval = target(policy, target_freq, relation);
1456 1587
1457 return retval; 1588 return retval;
1458} 1589}
@@ -1485,18 +1616,24 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1485int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) 1616int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1486{ 1617{
1487 int ret = 0; 1618 int ret = 0;
1619 unsigned int (*getavg)(struct cpufreq_policy *policy,
1620 unsigned int cpu);
1488 1621
1489 if (cpufreq_disabled()) 1622 if (cpufreq_disabled())
1490 return ret; 1623 return ret;
1491 1624
1492 if (!cpufreq_driver->getavg) 1625 rcu_read_lock();
1626 getavg = rcu_dereference(cpufreq_driver)->getavg;
1627 rcu_read_unlock();
1628
1629 if (!getavg)
1493 return 0; 1630 return 0;
1494 1631
1495 policy = cpufreq_cpu_get(policy->cpu); 1632 policy = cpufreq_cpu_get(policy->cpu);
1496 if (!policy) 1633 if (!policy)
1497 return -EINVAL; 1634 return -EINVAL;
1498 1635
1499 ret = cpufreq_driver->getavg(policy, cpu); 1636 ret = getavg(policy, cpu);
1500 1637
1501 cpufreq_cpu_put(policy); 1638 cpufreq_cpu_put(policy);
1502 return ret; 1639 return ret;
@@ -1544,10 +1681,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1544 policy->cpu, event); 1681 policy->cpu, event);
1545 ret = policy->governor->governor(policy, event); 1682 ret = policy->governor->governor(policy, event);
1546 1683
1547 if (event == CPUFREQ_GOV_START) 1684 if (!ret) {
1548 policy->governor->initialized++; 1685 if (event == CPUFREQ_GOV_POLICY_INIT)
1549 else if (event == CPUFREQ_GOV_STOP) 1686 policy->governor->initialized++;
1550 policy->governor->initialized--; 1687 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1688 policy->governor->initialized--;
1689 }
1551 1690
1552 /* we keep one module reference alive for 1691 /* we keep one module reference alive for
1553 each CPU governed by this CPU */ 1692 each CPU governed by this CPU */
@@ -1651,7 +1790,10 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1651static int __cpufreq_set_policy(struct cpufreq_policy *data, 1790static int __cpufreq_set_policy(struct cpufreq_policy *data,
1652 struct cpufreq_policy *policy) 1791 struct cpufreq_policy *policy)
1653{ 1792{
1654 int ret = 0; 1793 int ret = 0, failed = 1;
1794 struct cpufreq_driver *driver;
1795 int (*verify)(struct cpufreq_policy *policy);
1796 int (*setpolicy)(struct cpufreq_policy *policy);
1655 1797
1656 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1798 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1657 policy->min, policy->max); 1799 policy->min, policy->max);
@@ -1665,7 +1807,13 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1665 } 1807 }
1666 1808
1667 /* verify the cpu speed can be set within this limit */ 1809 /* verify the cpu speed can be set within this limit */
1668 ret = cpufreq_driver->verify(policy); 1810 rcu_read_lock();
1811 driver = rcu_dereference(cpufreq_driver);
1812 verify = driver->verify;
1813 setpolicy = driver->setpolicy;
1814 rcu_read_unlock();
1815
1816 ret = verify(policy);
1669 if (ret) 1817 if (ret)
1670 goto error_out; 1818 goto error_out;
1671 1819
@@ -1679,7 +1827,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1679 1827
1680 /* verify the cpu speed can be set within this limit, 1828 /* verify the cpu speed can be set within this limit,
1681 which might be different to the first one */ 1829 which might be different to the first one */
1682 ret = cpufreq_driver->verify(policy); 1830 ret = verify(policy);
1683 if (ret) 1831 if (ret)
1684 goto error_out; 1832 goto error_out;
1685 1833
@@ -1693,10 +1841,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1693 pr_debug("new min and max freqs are %u - %u kHz\n", 1841 pr_debug("new min and max freqs are %u - %u kHz\n",
1694 data->min, data->max); 1842 data->min, data->max);
1695 1843
1696 if (cpufreq_driver->setpolicy) { 1844 if (setpolicy) {
1697 data->policy = policy->policy; 1845 data->policy = policy->policy;
1698 pr_debug("setting range\n"); 1846 pr_debug("setting range\n");
1699 ret = cpufreq_driver->setpolicy(policy); 1847 ret = setpolicy(policy);
1700 } else { 1848 } else {
1701 if (policy->governor != data->governor) { 1849 if (policy->governor != data->governor) {
1702 /* save old, working values */ 1850 /* save old, working values */
@@ -1705,18 +1853,31 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1705 pr_debug("governor switch\n"); 1853 pr_debug("governor switch\n");
1706 1854
1707 /* end old governor */ 1855 /* end old governor */
1708 if (data->governor) 1856 if (data->governor) {
1709 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1857 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1858 __cpufreq_governor(data,
1859 CPUFREQ_GOV_POLICY_EXIT);
1860 }
1710 1861
1711 /* start new governor */ 1862 /* start new governor */
1712 data->governor = policy->governor; 1863 data->governor = policy->governor;
1713 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1864 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1865 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1866 failed = 0;
1867 else
1868 __cpufreq_governor(data,
1869 CPUFREQ_GOV_POLICY_EXIT);
1870 }
1871
1872 if (failed) {
1714 /* new governor failed, so re-start old one */ 1873 /* new governor failed, so re-start old one */
1715 pr_debug("starting governor %s failed\n", 1874 pr_debug("starting governor %s failed\n",
1716 data->governor->name); 1875 data->governor->name);
1717 if (old_gov) { 1876 if (old_gov) {
1718 data->governor = old_gov; 1877 data->governor = old_gov;
1719 __cpufreq_governor(data, 1878 __cpufreq_governor(data,
1879 CPUFREQ_GOV_POLICY_INIT);
1880 __cpufreq_governor(data,
1720 CPUFREQ_GOV_START); 1881 CPUFREQ_GOV_START);
1721 } 1882 }
1722 ret = -EINVAL; 1883 ret = -EINVAL;
@@ -1743,6 +1904,11 @@ int cpufreq_update_policy(unsigned int cpu)
1743{ 1904{
1744 struct cpufreq_policy *data = cpufreq_cpu_get(cpu); 1905 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1745 struct cpufreq_policy policy; 1906 struct cpufreq_policy policy;
1907 struct cpufreq_driver *driver;
1908 unsigned int (*get)(unsigned int cpu);
1909 int (*target)(struct cpufreq_policy *policy,
1910 unsigned int target_freq,
1911 unsigned int relation);
1746 int ret; 1912 int ret;
1747 1913
1748 if (!data) { 1914 if (!data) {
@@ -1764,13 +1930,18 @@ int cpufreq_update_policy(unsigned int cpu)
1764 1930
1765 /* BIOS might change freq behind our back 1931 /* BIOS might change freq behind our back
1766 -> ask driver for current freq and notify governors about a change */ 1932 -> ask driver for current freq and notify governors about a change */
1767 if (cpufreq_driver->get) { 1933 rcu_read_lock();
1768 policy.cur = cpufreq_driver->get(cpu); 1934 driver = rcu_access_pointer(cpufreq_driver);
1935 get = driver->get;
1936 target = driver->target;
1937 rcu_read_unlock();
1938 if (get) {
1939 policy.cur = get(cpu);
1769 if (!data->cur) { 1940 if (!data->cur) {
1770 pr_debug("Driver did not initialize current freq"); 1941 pr_debug("Driver did not initialize current freq");
1771 data->cur = policy.cur; 1942 data->cur = policy.cur;
1772 } else { 1943 } else {
1773 if (data->cur != policy.cur && cpufreq_driver->target) 1944 if (data->cur != policy.cur && target)
1774 cpufreq_out_of_sync(cpu, data->cur, 1945 cpufreq_out_of_sync(cpu, data->cur,
1775 policy.cur); 1946 policy.cur);
1776 } 1947 }
@@ -1848,19 +2019,20 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1848 if (driver_data->setpolicy) 2019 if (driver_data->setpolicy)
1849 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2020 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1850 2021
1851 spin_lock_irqsave(&cpufreq_driver_lock, flags); 2022 write_lock_irqsave(&cpufreq_driver_lock, flags);
1852 if (cpufreq_driver) { 2023 if (rcu_access_pointer(cpufreq_driver)) {
1853 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 2024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1854 return -EBUSY; 2025 return -EBUSY;
1855 } 2026 }
1856 cpufreq_driver = driver_data; 2027 rcu_assign_pointer(cpufreq_driver, driver_data);
1857 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 2028 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2029 synchronize_rcu();
1858 2030
1859 ret = subsys_interface_register(&cpufreq_interface); 2031 ret = subsys_interface_register(&cpufreq_interface);
1860 if (ret) 2032 if (ret)
1861 goto err_null_driver; 2033 goto err_null_driver;
1862 2034
1863 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 2035 if (!(driver_data->flags & CPUFREQ_STICKY)) {
1864 int i; 2036 int i;
1865 ret = -ENODEV; 2037 ret = -ENODEV;
1866 2038
@@ -1886,9 +2058,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1886err_if_unreg: 2058err_if_unreg:
1887 subsys_interface_unregister(&cpufreq_interface); 2059 subsys_interface_unregister(&cpufreq_interface);
1888err_null_driver: 2060err_null_driver:
1889 spin_lock_irqsave(&cpufreq_driver_lock, flags); 2061 write_lock_irqsave(&cpufreq_driver_lock, flags);
1890 cpufreq_driver = NULL; 2062 rcu_assign_pointer(cpufreq_driver, NULL);
1891 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 2063 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2064 synchronize_rcu();
1892 return ret; 2065 return ret;
1893} 2066}
1894EXPORT_SYMBOL_GPL(cpufreq_register_driver); 2067EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -1905,18 +2078,25 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1905int cpufreq_unregister_driver(struct cpufreq_driver *driver) 2078int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1906{ 2079{
1907 unsigned long flags; 2080 unsigned long flags;
2081 struct cpufreq_driver *old_driver;
1908 2082
1909 if (!cpufreq_driver || (driver != cpufreq_driver)) 2083 rcu_read_lock();
2084 old_driver = rcu_access_pointer(cpufreq_driver);
2085 if (!old_driver || (driver != old_driver)) {
2086 rcu_read_unlock();
1910 return -EINVAL; 2087 return -EINVAL;
2088 }
2089 rcu_read_unlock();
1911 2090
1912 pr_debug("unregistering driver %s\n", driver->name); 2091 pr_debug("unregistering driver %s\n", driver->name);
1913 2092
1914 subsys_interface_unregister(&cpufreq_interface); 2093 subsys_interface_unregister(&cpufreq_interface);
1915 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2094 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1916 2095
1917 spin_lock_irqsave(&cpufreq_driver_lock, flags); 2096 write_lock_irqsave(&cpufreq_driver_lock, flags);
1918 cpufreq_driver = NULL; 2097 rcu_assign_pointer(cpufreq_driver, NULL);
1919 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 2098 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2099 synchronize_rcu();
1920 2100
1921 return 0; 2101 return 0;
1922} 2102}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4fd0006b1291..0ceb2eff5a7e 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -20,6 +20,7 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/percpu-defs.h> 22#include <linux/percpu-defs.h>
23#include <linux/slab.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
24#include <linux/types.h> 25#include <linux/types.h>
25 26
@@ -28,25 +29,29 @@
28/* Conservative governor macros */ 29/* Conservative governor macros */
29#define DEF_FREQUENCY_UP_THRESHOLD (80) 30#define DEF_FREQUENCY_UP_THRESHOLD (80)
30#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 31#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
32#define DEF_FREQUENCY_STEP (5)
31#define DEF_SAMPLING_DOWN_FACTOR (1) 33#define DEF_SAMPLING_DOWN_FACTOR (1)
32#define MAX_SAMPLING_DOWN_FACTOR (10) 34#define MAX_SAMPLING_DOWN_FACTOR (10)
33 35
34static struct dbs_data cs_dbs_data;
35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); 36static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
36 37
37static struct cs_dbs_tuners cs_tuners = { 38static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
38 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 39 struct cpufreq_policy *policy)
39 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 40{
40 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 41 unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
41 .ignore_nice = 0, 42
42 .freq_step = 5, 43 /* max freq cannot be less than 100. But who knows... */
43}; 44 if (unlikely(freq_target == 0))
45 freq_target = DEF_FREQUENCY_STEP;
46
47 return freq_target;
48}
44 49
45/* 50/*
46 * Every sampling_rate, we check, if current idle time is less than 20% 51 * Every sampling_rate, we check, if current idle time is less than 20%
47 * (default), then we try to increase frequency Every sampling_rate * 52 * (default), then we try to increase frequency. Every sampling_rate *
48 * sampling_down_factor, we check, if current idle time is more than 80%, then 53 * sampling_down_factor, we check, if current idle time is more than 80%
49 * we try to decrease frequency 54 * (default), then we try to decrease frequency
50 * 55 *
51 * Any frequency increase takes it to the maximum frequency. Frequency reduction 56 * Any frequency increase takes it to the maximum frequency. Frequency reduction
52 * happens at minimum steps of 5% (default) of maximum frequency 57 * happens at minimum steps of 5% (default) of maximum frequency
@@ -55,30 +60,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
55{ 60{
56 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); 61 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
57 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 62 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
58 unsigned int freq_target; 63 struct dbs_data *dbs_data = policy->governor_data;
64 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
59 65
60 /* 66 /*
61 * break out if we 'cannot' reduce the speed as the user might 67 * break out if we 'cannot' reduce the speed as the user might
62 * want freq_step to be zero 68 * want freq_step to be zero
63 */ 69 */
64 if (cs_tuners.freq_step == 0) 70 if (cs_tuners->freq_step == 0)
65 return; 71 return;
66 72
67 /* Check for frequency increase */ 73 /* Check for frequency increase */
68 if (load > cs_tuners.up_threshold) { 74 if (load > cs_tuners->up_threshold) {
69 dbs_info->down_skip = 0; 75 dbs_info->down_skip = 0;
70 76
71 /* if we are already at full speed then break out early */ 77 /* if we are already at full speed then break out early */
72 if (dbs_info->requested_freq == policy->max) 78 if (dbs_info->requested_freq == policy->max)
73 return; 79 return;
74 80
75 freq_target = (cs_tuners.freq_step * policy->max) / 100; 81 dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
76
77 /* max freq cannot be less than 100. But who knows.... */
78 if (unlikely(freq_target == 0))
79 freq_target = 5;
80
81 dbs_info->requested_freq += freq_target;
82 if (dbs_info->requested_freq > policy->max) 82 if (dbs_info->requested_freq > policy->max)
83 dbs_info->requested_freq = policy->max; 83 dbs_info->requested_freq = policy->max;
84 84
@@ -87,45 +87,48 @@ static void cs_check_cpu(int cpu, unsigned int load)
87 return; 87 return;
88 } 88 }
89 89
90 /* 90 /* if sampling_down_factor is active break out early */
91 * The optimal frequency is the frequency that is the lowest that can 91 if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
92 * support the current CPU usage without triggering the up policy. To be 92 return;
93 * safe, we focus 10 points under the threshold. 93 dbs_info->down_skip = 0;
94 */
95 if (load < (cs_tuners.down_threshold - 10)) {
96 freq_target = (cs_tuners.freq_step * policy->max) / 100;
97
98 dbs_info->requested_freq -= freq_target;
99 if (dbs_info->requested_freq < policy->min)
100 dbs_info->requested_freq = policy->min;
101 94
95 /* Check for frequency decrease */
96 if (load < cs_tuners->down_threshold) {
102 /* 97 /*
103 * if we cannot reduce the frequency anymore, break out early 98 * if we cannot reduce the frequency anymore, break out early
104 */ 99 */
105 if (policy->cur == policy->min) 100 if (policy->cur == policy->min)
106 return; 101 return;
107 102
103 dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
104 if (dbs_info->requested_freq < policy->min)
105 dbs_info->requested_freq = policy->min;
106
108 __cpufreq_driver_target(policy, dbs_info->requested_freq, 107 __cpufreq_driver_target(policy, dbs_info->requested_freq,
109 CPUFREQ_RELATION_H); 108 CPUFREQ_RELATION_L);
110 return; 109 return;
111 } 110 }
112} 111}
113 112
114static void cs_dbs_timer(struct work_struct *work) 113static void cs_dbs_timer(struct work_struct *work)
115{ 114{
116 struct delayed_work *dw = to_delayed_work(work);
117 struct cs_cpu_dbs_info_s *dbs_info = container_of(work, 115 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
118 struct cs_cpu_dbs_info_s, cdbs.work.work); 116 struct cs_cpu_dbs_info_s, cdbs.work.work);
119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 117 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, 118 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu); 119 cpu);
122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 120 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
121 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
122 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
123 bool modify_all = true;
123 124
124 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 125 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate)) 126 if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
126 dbs_check_cpu(&cs_dbs_data, cpu); 127 modify_all = false;
128 else
129 dbs_check_cpu(dbs_data, cpu);
127 130
128 schedule_delayed_work_on(smp_processor_id(), dw, delay); 131 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 132 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
130} 133}
131 134
@@ -154,16 +157,12 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
154} 157}
155 158
156/************************** sysfs interface ************************/ 159/************************** sysfs interface ************************/
157static ssize_t show_sampling_rate_min(struct kobject *kobj, 160static struct common_dbs_data cs_dbs_cdata;
158 struct attribute *attr, char *buf)
159{
160 return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
161}
162 161
163static ssize_t store_sampling_down_factor(struct kobject *a, 162static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
164 struct attribute *b, 163 const char *buf, size_t count)
165 const char *buf, size_t count)
166{ 164{
165 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
167 unsigned int input; 166 unsigned int input;
168 int ret; 167 int ret;
169 ret = sscanf(buf, "%u", &input); 168 ret = sscanf(buf, "%u", &input);
@@ -171,13 +170,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
171 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 170 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
172 return -EINVAL; 171 return -EINVAL;
173 172
174 cs_tuners.sampling_down_factor = input; 173 cs_tuners->sampling_down_factor = input;
175 return count; 174 return count;
176} 175}
177 176
178static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 177static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
179 const char *buf, size_t count) 178 size_t count)
180{ 179{
180 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181 unsigned int input; 181 unsigned int input;
182 int ret; 182 int ret;
183 ret = sscanf(buf, "%u", &input); 183 ret = sscanf(buf, "%u", &input);
@@ -185,43 +185,46 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
185 if (ret != 1) 185 if (ret != 1)
186 return -EINVAL; 186 return -EINVAL;
187 187
188 cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); 188 cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
189 return count; 189 return count;
190} 190}
191 191
192static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 192static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
193 const char *buf, size_t count) 193 size_t count)
194{ 194{
195 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
195 unsigned int input; 196 unsigned int input;
196 int ret; 197 int ret;
197 ret = sscanf(buf, "%u", &input); 198 ret = sscanf(buf, "%u", &input);
198 199
199 if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) 200 if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
200 return -EINVAL; 201 return -EINVAL;
201 202
202 cs_tuners.up_threshold = input; 203 cs_tuners->up_threshold = input;
203 return count; 204 return count;
204} 205}
205 206
206static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, 207static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
207 const char *buf, size_t count) 208 size_t count)
208{ 209{
210 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
209 unsigned int input; 211 unsigned int input;
210 int ret; 212 int ret;
211 ret = sscanf(buf, "%u", &input); 213 ret = sscanf(buf, "%u", &input);
212 214
213 /* cannot be lower than 11 otherwise freq will not fall */ 215 /* cannot be lower than 11 otherwise freq will not fall */
214 if (ret != 1 || input < 11 || input > 100 || 216 if (ret != 1 || input < 11 || input > 100 ||
215 input >= cs_tuners.up_threshold) 217 input >= cs_tuners->up_threshold)
216 return -EINVAL; 218 return -EINVAL;
217 219
218 cs_tuners.down_threshold = input; 220 cs_tuners->down_threshold = input;
219 return count; 221 return count;
220} 222}
221 223
222static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 224static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
223 const char *buf, size_t count) 225 size_t count)
224{ 226{
227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
225 unsigned int input, j; 228 unsigned int input, j;
226 int ret; 229 int ret;
227 230
@@ -232,27 +235,28 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
232 if (input > 1) 235 if (input > 1)
233 input = 1; 236 input = 1;
234 237
235 if (input == cs_tuners.ignore_nice) /* nothing to do */ 238 if (input == cs_tuners->ignore_nice) /* nothing to do */
236 return count; 239 return count;
237 240
238 cs_tuners.ignore_nice = input; 241 cs_tuners->ignore_nice = input;
239 242
240 /* we need to re-evaluate prev_cpu_idle */ 243 /* we need to re-evaluate prev_cpu_idle */
241 for_each_online_cpu(j) { 244 for_each_online_cpu(j) {
242 struct cs_cpu_dbs_info_s *dbs_info; 245 struct cs_cpu_dbs_info_s *dbs_info;
243 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
244 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
245 &dbs_info->cdbs.prev_cpu_wall); 248 &dbs_info->cdbs.prev_cpu_wall, 0);
246 if (cs_tuners.ignore_nice) 249 if (cs_tuners->ignore_nice)
247 dbs_info->cdbs.prev_cpu_nice = 250 dbs_info->cdbs.prev_cpu_nice =
248 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
249 } 252 }
250 return count; 253 return count;
251} 254}
252 255
253static ssize_t store_freq_step(struct kobject *a, struct attribute *b, 256static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
254 const char *buf, size_t count) 257 size_t count)
255{ 258{
259 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
256 unsigned int input; 260 unsigned int input;
257 int ret; 261 int ret;
258 ret = sscanf(buf, "%u", &input); 262 ret = sscanf(buf, "%u", &input);
@@ -267,43 +271,88 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
267 * no need to test here if freq_step is zero as the user might actually 271 * no need to test here if freq_step is zero as the user might actually
268 * want this, they would be crazy though :) 272 * want this, they would be crazy though :)
269 */ 273 */
270 cs_tuners.freq_step = input; 274 cs_tuners->freq_step = input;
271 return count; 275 return count;
272} 276}
273 277
274show_one(cs, sampling_rate, sampling_rate); 278show_store_one(cs, sampling_rate);
275show_one(cs, sampling_down_factor, sampling_down_factor); 279show_store_one(cs, sampling_down_factor);
276show_one(cs, up_threshold, up_threshold); 280show_store_one(cs, up_threshold);
277show_one(cs, down_threshold, down_threshold); 281show_store_one(cs, down_threshold);
278show_one(cs, ignore_nice_load, ignore_nice); 282show_store_one(cs, ignore_nice);
279show_one(cs, freq_step, freq_step); 283show_store_one(cs, freq_step);
280 284declare_show_sampling_rate_min(cs);
281define_one_global_rw(sampling_rate); 285
282define_one_global_rw(sampling_down_factor); 286gov_sys_pol_attr_rw(sampling_rate);
283define_one_global_rw(up_threshold); 287gov_sys_pol_attr_rw(sampling_down_factor);
284define_one_global_rw(down_threshold); 288gov_sys_pol_attr_rw(up_threshold);
285define_one_global_rw(ignore_nice_load); 289gov_sys_pol_attr_rw(down_threshold);
286define_one_global_rw(freq_step); 290gov_sys_pol_attr_rw(ignore_nice);
287define_one_global_ro(sampling_rate_min); 291gov_sys_pol_attr_rw(freq_step);
288 292gov_sys_pol_attr_ro(sampling_rate_min);
289static struct attribute *dbs_attributes[] = { 293
290 &sampling_rate_min.attr, 294static struct attribute *dbs_attributes_gov_sys[] = {
291 &sampling_rate.attr, 295 &sampling_rate_min_gov_sys.attr,
292 &sampling_down_factor.attr, 296 &sampling_rate_gov_sys.attr,
293 &up_threshold.attr, 297 &sampling_down_factor_gov_sys.attr,
294 &down_threshold.attr, 298 &up_threshold_gov_sys.attr,
295 &ignore_nice_load.attr, 299 &down_threshold_gov_sys.attr,
296 &freq_step.attr, 300 &ignore_nice_gov_sys.attr,
301 &freq_step_gov_sys.attr,
297 NULL 302 NULL
298}; 303};
299 304
300static struct attribute_group cs_attr_group = { 305static struct attribute_group cs_attr_group_gov_sys = {
301 .attrs = dbs_attributes, 306 .attrs = dbs_attributes_gov_sys,
307 .name = "conservative",
308};
309
310static struct attribute *dbs_attributes_gov_pol[] = {
311 &sampling_rate_min_gov_pol.attr,
312 &sampling_rate_gov_pol.attr,
313 &sampling_down_factor_gov_pol.attr,
314 &up_threshold_gov_pol.attr,
315 &down_threshold_gov_pol.attr,
316 &ignore_nice_gov_pol.attr,
317 &freq_step_gov_pol.attr,
318 NULL
319};
320
321static struct attribute_group cs_attr_group_gov_pol = {
322 .attrs = dbs_attributes_gov_pol,
302 .name = "conservative", 323 .name = "conservative",
303}; 324};
304 325
305/************************** sysfs end ************************/ 326/************************** sysfs end ************************/
306 327
328static int cs_init(struct dbs_data *dbs_data)
329{
330 struct cs_dbs_tuners *tuners;
331
332 tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
333 if (!tuners) {
334 pr_err("%s: kzalloc failed\n", __func__);
335 return -ENOMEM;
336 }
337
338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341 tuners->ignore_nice = 0;
342 tuners->freq_step = DEF_FREQUENCY_STEP;
343
344 dbs_data->tuners = tuners;
345 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
346 jiffies_to_usecs(10);
347 mutex_init(&dbs_data->mutex);
348 return 0;
349}
350
351static void cs_exit(struct dbs_data *dbs_data)
352{
353 kfree(dbs_data->tuners);
354}
355
307define_get_cpu_dbs_routines(cs_cpu_dbs_info); 356define_get_cpu_dbs_routines(cs_cpu_dbs_info);
308 357
309static struct notifier_block cs_cpufreq_notifier_block = { 358static struct notifier_block cs_cpufreq_notifier_block = {
@@ -314,21 +363,23 @@ static struct cs_ops cs_ops = {
314 .notifier_block = &cs_cpufreq_notifier_block, 363 .notifier_block = &cs_cpufreq_notifier_block,
315}; 364};
316 365
317static struct dbs_data cs_dbs_data = { 366static struct common_dbs_data cs_dbs_cdata = {
318 .governor = GOV_CONSERVATIVE, 367 .governor = GOV_CONSERVATIVE,
319 .attr_group = &cs_attr_group, 368 .attr_group_gov_sys = &cs_attr_group_gov_sys,
320 .tuners = &cs_tuners, 369 .attr_group_gov_pol = &cs_attr_group_gov_pol,
321 .get_cpu_cdbs = get_cpu_cdbs, 370 .get_cpu_cdbs = get_cpu_cdbs,
322 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 371 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
323 .gov_dbs_timer = cs_dbs_timer, 372 .gov_dbs_timer = cs_dbs_timer,
324 .gov_check_cpu = cs_check_cpu, 373 .gov_check_cpu = cs_check_cpu,
325 .gov_ops = &cs_ops, 374 .gov_ops = &cs_ops,
375 .init = cs_init,
376 .exit = cs_exit,
326}; 377};
327 378
328static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, 379static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
329 unsigned int event) 380 unsigned int event)
330{ 381{
331 return cpufreq_governor_dbs(&cs_dbs_data, policy, event); 382 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
332} 383}
333 384
334#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 385#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -343,7 +394,6 @@ struct cpufreq_governor cpufreq_gov_conservative = {
343 394
344static int __init cpufreq_gov_dbs_init(void) 395static int __init cpufreq_gov_dbs_init(void)
345{ 396{
346 mutex_init(&cs_dbs_data.mutex);
347 return cpufreq_register_governor(&cpufreq_gov_conservative); 397 return cpufreq_register_governor(&cpufreq_gov_conservative);
348} 398}
349 399
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5a76086ff09b..443442df113b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,12 +22,29 @@
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
25#include <linux/tick.h> 26#include <linux/tick.h>
26#include <linux/types.h> 27#include <linux/types.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
28 29
29#include "cpufreq_governor.h" 30#include "cpufreq_governor.h"
30 31
32static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
33{
34 if (have_governor_per_policy())
35 return &policy->kobj;
36 else
37 return cpufreq_global_kobject;
38}
39
40static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
41{
42 if (have_governor_per_policy())
43 return dbs_data->cdata->attr_group_gov_pol;
44 else
45 return dbs_data->cdata->attr_group_gov_sys;
46}
47
31static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 48static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
32{ 49{
33 u64 idle_time; 50 u64 idle_time;
@@ -50,13 +67,13 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
50 return cputime_to_usecs(idle_time); 67 return cputime_to_usecs(idle_time);
51} 68}
52 69
53u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) 70u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
54{ 71{
55 u64 idle_time = get_cpu_idle_time_us(cpu, NULL); 72 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
56 73
57 if (idle_time == -1ULL) 74 if (idle_time == -1ULL)
58 return get_cpu_idle_time_jiffy(cpu, wall); 75 return get_cpu_idle_time_jiffy(cpu, wall);
59 else 76 else if (!io_busy)
60 idle_time += get_cpu_iowait_time_us(cpu, wall); 77 idle_time += get_cpu_iowait_time_us(cpu, wall);
61 78
62 return idle_time; 79 return idle_time;
@@ -65,7 +82,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time);
65 82
66void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) 83void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
67{ 84{
68 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); 85 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
69 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 86 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
70 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 87 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
71 struct cpufreq_policy *policy; 88 struct cpufreq_policy *policy;
@@ -73,7 +90,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
73 unsigned int ignore_nice; 90 unsigned int ignore_nice;
74 unsigned int j; 91 unsigned int j;
75 92
76 if (dbs_data->governor == GOV_ONDEMAND) 93 if (dbs_data->cdata->governor == GOV_ONDEMAND)
77 ignore_nice = od_tuners->ignore_nice; 94 ignore_nice = od_tuners->ignore_nice;
78 else 95 else
79 ignore_nice = cs_tuners->ignore_nice; 96 ignore_nice = cs_tuners->ignore_nice;
@@ -83,13 +100,22 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
83 /* Get Absolute Load (in terms of freq for ondemand gov) */ 100 /* Get Absolute Load (in terms of freq for ondemand gov) */
84 for_each_cpu(j, policy->cpus) { 101 for_each_cpu(j, policy->cpus) {
85 struct cpu_dbs_common_info *j_cdbs; 102 struct cpu_dbs_common_info *j_cdbs;
86 u64 cur_wall_time, cur_idle_time, cur_iowait_time; 103 u64 cur_wall_time, cur_idle_time;
87 unsigned int idle_time, wall_time, iowait_time; 104 unsigned int idle_time, wall_time;
88 unsigned int load; 105 unsigned int load;
106 int io_busy = 0;
89 107
90 j_cdbs = dbs_data->get_cpu_cdbs(j); 108 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
91 109
92 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 110 /*
111 * For the purpose of ondemand, waiting for disk IO is
112 * an indication that you're performance critical, and
113 * not that the system is actually idle. So do not add
114 * the iowait time to the cpu idle time.
115 */
116 if (dbs_data->cdata->governor == GOV_ONDEMAND)
117 io_busy = od_tuners->io_is_busy;
118 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
93 119
94 wall_time = (unsigned int) 120 wall_time = (unsigned int)
95 (cur_wall_time - j_cdbs->prev_cpu_wall); 121 (cur_wall_time - j_cdbs->prev_cpu_wall);
@@ -117,35 +143,12 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
117 idle_time += jiffies_to_usecs(cur_nice_jiffies); 143 idle_time += jiffies_to_usecs(cur_nice_jiffies);
118 } 144 }
119 145
120 if (dbs_data->governor == GOV_ONDEMAND) {
121 struct od_cpu_dbs_info_s *od_j_dbs_info =
122 dbs_data->get_cpu_dbs_info_s(cpu);
123
124 cur_iowait_time = get_cpu_iowait_time_us(j,
125 &cur_wall_time);
126 if (cur_iowait_time == -1ULL)
127 cur_iowait_time = 0;
128
129 iowait_time = (unsigned int) (cur_iowait_time -
130 od_j_dbs_info->prev_cpu_iowait);
131 od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
132
133 /*
134 * For the purpose of ondemand, waiting for disk IO is
135 * an indication that you're performance critical, and
136 * not that the system is actually idle. So subtract the
137 * iowait time from the cpu idle time.
138 */
139 if (od_tuners->io_is_busy && idle_time >= iowait_time)
140 idle_time -= iowait_time;
141 }
142
143 if (unlikely(!wall_time || wall_time < idle_time)) 146 if (unlikely(!wall_time || wall_time < idle_time))
144 continue; 147 continue;
145 148
146 load = 100 * (wall_time - idle_time) / wall_time; 149 load = 100 * (wall_time - idle_time) / wall_time;
147 150
148 if (dbs_data->governor == GOV_ONDEMAND) { 151 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
149 int freq_avg = __cpufreq_driver_getavg(policy, j); 152 int freq_avg = __cpufreq_driver_getavg(policy, j);
150 if (freq_avg <= 0) 153 if (freq_avg <= 0)
151 freq_avg = policy->cur; 154 freq_avg = policy->cur;
@@ -157,24 +160,42 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
157 max_load = load; 160 max_load = load;
158 } 161 }
159 162
160 dbs_data->gov_check_cpu(cpu, max_load); 163 dbs_data->cdata->gov_check_cpu(cpu, max_load);
161} 164}
162EXPORT_SYMBOL_GPL(dbs_check_cpu); 165EXPORT_SYMBOL_GPL(dbs_check_cpu);
163 166
164static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, 167static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
165 unsigned int sampling_rate) 168 unsigned int delay)
166{ 169{
167 int delay = delay_for_sampling_rate(sampling_rate); 170 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
168 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
169 171
170 schedule_delayed_work_on(cpu, &cdbs->work, delay); 172 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
171} 173}
172 174
173static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) 175void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
176 unsigned int delay, bool all_cpus)
174{ 177{
175 struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); 178 int i;
176 179
177 cancel_delayed_work_sync(&cdbs->work); 180 if (!all_cpus) {
181 __gov_queue_work(smp_processor_id(), dbs_data, delay);
182 } else {
183 for_each_cpu(i, policy->cpus)
184 __gov_queue_work(i, dbs_data, delay);
185 }
186}
187EXPORT_SYMBOL_GPL(gov_queue_work);
188
189static inline void gov_cancel_work(struct dbs_data *dbs_data,
190 struct cpufreq_policy *policy)
191{
192 struct cpu_dbs_common_info *cdbs;
193 int i;
194
195 for_each_cpu(i, policy->cpus) {
196 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
197 cancel_delayed_work_sync(&cdbs->work);
198 }
178} 199}
179 200
180/* Will return if we need to evaluate cpu load again or not */ 201/* Will return if we need to evaluate cpu load again or not */
@@ -196,31 +217,130 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
196} 217}
197EXPORT_SYMBOL_GPL(need_load_eval); 218EXPORT_SYMBOL_GPL(need_load_eval);
198 219
199int cpufreq_governor_dbs(struct dbs_data *dbs_data, 220static void set_sampling_rate(struct dbs_data *dbs_data,
200 struct cpufreq_policy *policy, unsigned int event) 221 unsigned int sampling_rate)
201{ 222{
223 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
224 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
225 cs_tuners->sampling_rate = sampling_rate;
226 } else {
227 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
228 od_tuners->sampling_rate = sampling_rate;
229 }
230}
231
232int cpufreq_governor_dbs(struct cpufreq_policy *policy,
233 struct common_dbs_data *cdata, unsigned int event)
234{
235 struct dbs_data *dbs_data;
202 struct od_cpu_dbs_info_s *od_dbs_info = NULL; 236 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
203 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; 237 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
204 struct cs_ops *cs_ops = NULL;
205 struct od_ops *od_ops = NULL; 238 struct od_ops *od_ops = NULL;
206 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 239 struct od_dbs_tuners *od_tuners = NULL;
207 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 240 struct cs_dbs_tuners *cs_tuners = NULL;
208 struct cpu_dbs_common_info *cpu_cdbs; 241 struct cpu_dbs_common_info *cpu_cdbs;
209 unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; 242 unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
243 int io_busy = 0;
210 int rc; 244 int rc;
211 245
212 cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); 246 if (have_governor_per_policy())
247 dbs_data = policy->governor_data;
248 else
249 dbs_data = cdata->gdbs_data;
250
251 WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
252
253 switch (event) {
254 case CPUFREQ_GOV_POLICY_INIT:
255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data);
257 } else if (dbs_data) {
258 policy->governor_data = dbs_data;
259 return 0;
260 }
261
262 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
263 if (!dbs_data) {
264 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
265 return -ENOMEM;
266 }
267
268 dbs_data->cdata = cdata;
269 rc = cdata->init(dbs_data);
270 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
272 kfree(dbs_data);
273 return rc;
274 }
275
276 rc = sysfs_create_group(get_governor_parent_kobj(policy),
277 get_sysfs_attr(dbs_data));
278 if (rc) {
279 cdata->exit(dbs_data);
280 kfree(dbs_data);
281 return rc;
282 }
283
284 policy->governor_data = dbs_data;
213 285
214 if (dbs_data->governor == GOV_CONSERVATIVE) { 286 /* policy latency is in nS. Convert it to uS first */
215 cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 287 latency = policy->cpuinfo.transition_latency / 1000;
216 sampling_rate = &cs_tuners->sampling_rate; 288 if (latency == 0)
289 latency = 1;
290
291 /* Bring kernel and HW constraints together */
292 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
293 MIN_LATENCY_MULTIPLIER * latency);
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER));
296
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299
300 cpufreq_register_notifier(cs_ops->notifier_block,
301 CPUFREQ_TRANSITION_NOTIFIER);
302 }
303
304 if (!have_governor_per_policy())
305 cdata->gdbs_data = dbs_data;
306
307 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) ||
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data));
313
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316
317 cpufreq_unregister_notifier(cs_ops->notifier_block,
318 CPUFREQ_TRANSITION_NOTIFIER);
319 }
320
321 cdata->exit(dbs_data);
322 kfree(dbs_data);
323 cdata->gdbs_data = NULL;
324 }
325
326 policy->governor_data = NULL;
327 return 0;
328 }
329
330 cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
331
332 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
333 cs_tuners = dbs_data->tuners;
334 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
335 sampling_rate = cs_tuners->sampling_rate;
217 ignore_nice = cs_tuners->ignore_nice; 336 ignore_nice = cs_tuners->ignore_nice;
218 cs_ops = dbs_data->gov_ops;
219 } else { 337 } else {
220 od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); 338 od_tuners = dbs_data->tuners;
221 sampling_rate = &od_tuners->sampling_rate; 339 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
340 sampling_rate = od_tuners->sampling_rate;
222 ignore_nice = od_tuners->ignore_nice; 341 ignore_nice = od_tuners->ignore_nice;
223 od_ops = dbs_data->gov_ops; 342 od_ops = dbs_data->cdata->gov_ops;
343 io_busy = od_tuners->io_is_busy;
224 } 344 }
225 345
226 switch (event) { 346 switch (event) {
@@ -232,96 +352,53 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
232 352
233 for_each_cpu(j, policy->cpus) { 353 for_each_cpu(j, policy->cpus) {
234 struct cpu_dbs_common_info *j_cdbs = 354 struct cpu_dbs_common_info *j_cdbs =
235 dbs_data->get_cpu_cdbs(j); 355 dbs_data->cdata->get_cpu_cdbs(j);
236 356
237 j_cdbs->cpu = j; 357 j_cdbs->cpu = j;
238 j_cdbs->cur_policy = policy; 358 j_cdbs->cur_policy = policy;
239 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, 359 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
240 &j_cdbs->prev_cpu_wall); 360 &j_cdbs->prev_cpu_wall, io_busy);
241 if (ignore_nice) 361 if (ignore_nice)
242 j_cdbs->prev_cpu_nice = 362 j_cdbs->prev_cpu_nice =
243 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 363 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
244 364
245 mutex_init(&j_cdbs->timer_mutex); 365 mutex_init(&j_cdbs->timer_mutex);
246 INIT_DEFERRABLE_WORK(&j_cdbs->work, 366 INIT_DEFERRABLE_WORK(&j_cdbs->work,
247 dbs_data->gov_dbs_timer); 367 dbs_data->cdata->gov_dbs_timer);
248 }
249
250 if (!policy->governor->initialized) {
251 rc = sysfs_create_group(cpufreq_global_kobject,
252 dbs_data->attr_group);
253 if (rc) {
254 mutex_unlock(&dbs_data->mutex);
255 return rc;
256 }
257 } 368 }
258 369
259 /* 370 /*
260 * conservative does not implement micro like ondemand 371 * conservative does not implement micro like ondemand
261 * governor, thus we are bound to jiffes/HZ 372 * governor, thus we are bound to jiffes/HZ
262 */ 373 */
263 if (dbs_data->governor == GOV_CONSERVATIVE) { 374 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
264 cs_dbs_info->down_skip = 0; 375 cs_dbs_info->down_skip = 0;
265 cs_dbs_info->enable = 1; 376 cs_dbs_info->enable = 1;
266 cs_dbs_info->requested_freq = policy->cur; 377 cs_dbs_info->requested_freq = policy->cur;
267
268 if (!policy->governor->initialized) {
269 cpufreq_register_notifier(cs_ops->notifier_block,
270 CPUFREQ_TRANSITION_NOTIFIER);
271
272 dbs_data->min_sampling_rate =
273 MIN_SAMPLING_RATE_RATIO *
274 jiffies_to_usecs(10);
275 }
276 } else { 378 } else {
277 od_dbs_info->rate_mult = 1; 379 od_dbs_info->rate_mult = 1;
278 od_dbs_info->sample_type = OD_NORMAL_SAMPLE; 380 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
279 od_ops->powersave_bias_init_cpu(cpu); 381 od_ops->powersave_bias_init_cpu(cpu);
280
281 if (!policy->governor->initialized)
282 od_tuners->io_is_busy = od_ops->io_busy();
283 } 382 }
284 383
285 if (policy->governor->initialized)
286 goto unlock;
287
288 /* policy latency is in nS. Convert it to uS first */
289 latency = policy->cpuinfo.transition_latency / 1000;
290 if (latency == 0)
291 latency = 1;
292
293 /* Bring kernel and HW constraints together */
294 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
295 MIN_LATENCY_MULTIPLIER * latency);
296 *sampling_rate = max(dbs_data->min_sampling_rate, latency *
297 LATENCY_MULTIPLIER);
298unlock:
299 mutex_unlock(&dbs_data->mutex); 384 mutex_unlock(&dbs_data->mutex);
300 385
301 /* Initiate timer time stamp */ 386 /* Initiate timer time stamp */
302 cpu_cdbs->time_stamp = ktime_get(); 387 cpu_cdbs->time_stamp = ktime_get();
303 388
304 for_each_cpu(j, policy->cpus) 389 gov_queue_work(dbs_data, policy,
305 dbs_timer_init(dbs_data, j, *sampling_rate); 390 delay_for_sampling_rate(sampling_rate), true);
306 break; 391 break;
307 392
308 case CPUFREQ_GOV_STOP: 393 case CPUFREQ_GOV_STOP:
309 if (dbs_data->governor == GOV_CONSERVATIVE) 394 if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
310 cs_dbs_info->enable = 0; 395 cs_dbs_info->enable = 0;
311 396
312 for_each_cpu(j, policy->cpus) 397 gov_cancel_work(dbs_data, policy);
313 dbs_timer_exit(dbs_data, j);
314 398
315 mutex_lock(&dbs_data->mutex); 399 mutex_lock(&dbs_data->mutex);
316 mutex_destroy(&cpu_cdbs->timer_mutex); 400 mutex_destroy(&cpu_cdbs->timer_mutex);
317 401
318 if (policy->governor->initialized == 1) {
319 sysfs_remove_group(cpufreq_global_kobject,
320 dbs_data->attr_group);
321 if (dbs_data->governor == GOV_CONSERVATIVE)
322 cpufreq_unregister_notifier(cs_ops->notifier_block,
323 CPUFREQ_TRANSITION_NOTIFIER);
324 }
325 mutex_unlock(&dbs_data->mutex); 402 mutex_unlock(&dbs_data->mutex);
326 403
327 break; 404 break;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index cc4bd2f6838a..8ac33538d0bd 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -34,20 +34,81 @@
34 */ 34 */
35#define MIN_SAMPLING_RATE_RATIO (2) 35#define MIN_SAMPLING_RATE_RATIO (2)
36#define LATENCY_MULTIPLIER (1000) 36#define LATENCY_MULTIPLIER (1000)
37#define MIN_LATENCY_MULTIPLIER (100) 37#define MIN_LATENCY_MULTIPLIER (20)
38#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 38#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
39 39
40/* Ondemand Sampling types */ 40/* Ondemand Sampling types */
41enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; 41enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
42 42
43/* Macro creating sysfs show routines */ 43/*
44#define show_one(_gov, file_name, object) \ 44 * Macro for creating governors sysfs routines
45static ssize_t show_##file_name \ 45 *
46 * - gov_sys: One governor instance per whole system
47 * - gov_pol: One governor instance per policy
48 */
49
50/* Create attributes */
51#define gov_sys_attr_ro(_name) \
52static struct global_attr _name##_gov_sys = \
53__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
54
55#define gov_sys_attr_rw(_name) \
56static struct global_attr _name##_gov_sys = \
57__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
58
59#define gov_pol_attr_ro(_name) \
60static struct freq_attr _name##_gov_pol = \
61__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
62
63#define gov_pol_attr_rw(_name) \
64static struct freq_attr _name##_gov_pol = \
65__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
66
67#define gov_sys_pol_attr_rw(_name) \
68 gov_sys_attr_rw(_name); \
69 gov_pol_attr_rw(_name)
70
71#define gov_sys_pol_attr_ro(_name) \
72 gov_sys_attr_ro(_name); \
73 gov_pol_attr_ro(_name)
74
75/* Create show/store routines */
76#define show_one(_gov, file_name) \
77static ssize_t show_##file_name##_gov_sys \
46(struct kobject *kobj, struct attribute *attr, char *buf) \ 78(struct kobject *kobj, struct attribute *attr, char *buf) \
47{ \ 79{ \
48 return sprintf(buf, "%u\n", _gov##_tuners.object); \ 80 struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
81 return sprintf(buf, "%u\n", tuners->file_name); \
82} \
83 \
84static ssize_t show_##file_name##_gov_pol \
85(struct cpufreq_policy *policy, char *buf) \
86{ \
87 struct dbs_data *dbs_data = policy->governor_data; \
88 struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
89 return sprintf(buf, "%u\n", tuners->file_name); \
90}
91
92#define store_one(_gov, file_name) \
93static ssize_t store_##file_name##_gov_sys \
94(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
95{ \
96 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
97 return store_##file_name(dbs_data, buf, count); \
98} \
99 \
100static ssize_t store_##file_name##_gov_pol \
101(struct cpufreq_policy *policy, const char *buf, size_t count) \
102{ \
103 struct dbs_data *dbs_data = policy->governor_data; \
104 return store_##file_name(dbs_data, buf, count); \
49} 105}
50 106
107#define show_store_one(_gov, file_name) \
108show_one(_gov, file_name); \
109store_one(_gov, file_name)
110
111/* create helper routines */
51#define define_get_cpu_dbs_routines(_dbs_info) \ 112#define define_get_cpu_dbs_routines(_dbs_info) \
52static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ 113static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
53{ \ 114{ \
@@ -87,7 +148,6 @@ struct cpu_dbs_common_info {
87 148
88struct od_cpu_dbs_info_s { 149struct od_cpu_dbs_info_s {
89 struct cpu_dbs_common_info cdbs; 150 struct cpu_dbs_common_info cdbs;
90 u64 prev_cpu_iowait;
91 struct cpufreq_frequency_table *freq_table; 151 struct cpufreq_frequency_table *freq_table;
92 unsigned int freq_lo; 152 unsigned int freq_lo;
93 unsigned int freq_lo_jiffies; 153 unsigned int freq_lo_jiffies;
@@ -103,7 +163,7 @@ struct cs_cpu_dbs_info_s {
103 unsigned int enable:1; 163 unsigned int enable:1;
104}; 164};
105 165
106/* Governers sysfs tunables */ 166/* Per policy Governers sysfs tunables */
107struct od_dbs_tuners { 167struct od_dbs_tuners {
108 unsigned int ignore_nice; 168 unsigned int ignore_nice;
109 unsigned int sampling_rate; 169 unsigned int sampling_rate;
@@ -123,31 +183,42 @@ struct cs_dbs_tuners {
123 unsigned int freq_step; 183 unsigned int freq_step;
124}; 184};
125 185
126/* Per Governer data */ 186/* Common Governer data across policies */
127struct dbs_data { 187struct dbs_data;
188struct common_dbs_data {
128 /* Common across governors */ 189 /* Common across governors */
129 #define GOV_ONDEMAND 0 190 #define GOV_ONDEMAND 0
130 #define GOV_CONSERVATIVE 1 191 #define GOV_CONSERVATIVE 1
131 int governor; 192 int governor;
132 unsigned int min_sampling_rate; 193 struct attribute_group *attr_group_gov_sys; /* one governor - system */
133 struct attribute_group *attr_group; 194 struct attribute_group *attr_group_gov_pol; /* one governor - policy */
134 void *tuners;
135 195
136 /* dbs_mutex protects dbs_enable in governor start/stop */ 196 /* Common data for platforms that don't set have_governor_per_policy */
137 struct mutex mutex; 197 struct dbs_data *gdbs_data;
138 198
139 struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); 199 struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
140 void *(*get_cpu_dbs_info_s)(int cpu); 200 void *(*get_cpu_dbs_info_s)(int cpu);
141 void (*gov_dbs_timer)(struct work_struct *work); 201 void (*gov_dbs_timer)(struct work_struct *work);
142 void (*gov_check_cpu)(int cpu, unsigned int load); 202 void (*gov_check_cpu)(int cpu, unsigned int load);
203 int (*init)(struct dbs_data *dbs_data);
204 void (*exit)(struct dbs_data *dbs_data);
143 205
144 /* Governor specific ops, see below */ 206 /* Governor specific ops, see below */
145 void *gov_ops; 207 void *gov_ops;
146}; 208};
147 209
210/* Governer Per policy data */
211struct dbs_data {
212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate;
214 void *tuners;
215
216 /* dbs_mutex protects dbs_enable in governor start/stop */
217 struct mutex mutex;
218};
219
148/* Governor specific ops, will be passed to dbs_data->gov_ops */ 220/* Governor specific ops, will be passed to dbs_data->gov_ops */
149struct od_ops { 221struct od_ops {
150 int (*io_busy)(void);
151 void (*powersave_bias_init_cpu)(int cpu); 222 void (*powersave_bias_init_cpu)(int cpu);
152 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, 223 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
153 unsigned int freq_next, unsigned int relation); 224 unsigned int freq_next, unsigned int relation);
@@ -169,10 +240,31 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
169 return delay; 240 return delay;
170} 241}
171 242
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); 243#define declare_show_sampling_rate_min(_gov) \
244static ssize_t show_sampling_rate_min_gov_sys \
245(struct kobject *kobj, struct attribute *attr, char *buf) \
246{ \
247 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
248 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
249} \
250 \
251static ssize_t show_sampling_rate_min_gov_pol \
252(struct cpufreq_policy *policy, char *buf) \
253{ \
254 struct dbs_data *dbs_data = policy->governor_data; \
255 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
256}
257
258u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
173void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 259void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
174bool need_load_eval(struct cpu_dbs_common_info *cdbs, 260bool need_load_eval(struct cpu_dbs_common_info *cdbs,
175 unsigned int sampling_rate); 261 unsigned int sampling_rate);
176int cpufreq_governor_dbs(struct dbs_data *dbs_data, 262int cpufreq_governor_dbs(struct cpufreq_policy *policy,
177 struct cpufreq_policy *policy, unsigned int event); 263 struct common_dbs_data *cdata, unsigned int event);
264void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
265 unsigned int delay, bool all_cpus);
266void od_register_powersave_bias_handler(unsigned int (*f)
267 (struct cpufreq_policy *, unsigned int, unsigned int),
268 unsigned int powersave_bias);
269void od_unregister_powersave_bias_handler(void);
178#endif /* _CPUFREQ_GOVERNOR_H */ 270#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f3eb26cd848f..b0ffef96bf77 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -20,9 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/percpu-defs.h> 22#include <linux/percpu-defs.h>
23#include <linux/slab.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
24#include <linux/tick.h> 25#include <linux/tick.h>
25#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/cpu.h>
26 28
27#include "cpufreq_governor.h" 29#include "cpufreq_governor.h"
28 30
@@ -37,22 +39,14 @@
37#define MIN_FREQUENCY_UP_THRESHOLD (11) 39#define MIN_FREQUENCY_UP_THRESHOLD (11)
38#define MAX_FREQUENCY_UP_THRESHOLD (100) 40#define MAX_FREQUENCY_UP_THRESHOLD (100)
39 41
40static struct dbs_data od_dbs_data;
41static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); 42static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
42 43
44static struct od_ops od_ops;
45
43#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 46#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
44static struct cpufreq_governor cpufreq_gov_ondemand; 47static struct cpufreq_governor cpufreq_gov_ondemand;
45#endif 48#endif
46 49
47static struct od_dbs_tuners od_tuners = {
48 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
49 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
50 .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
51 DEF_FREQUENCY_DOWN_DIFFERENTIAL,
52 .ignore_nice = 0,
53 .powersave_bias = 0,
54};
55
56static void ondemand_powersave_bias_init_cpu(int cpu) 50static void ondemand_powersave_bias_init_cpu(int cpu)
57{ 51{
58 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 52 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -89,7 +83,7 @@ static int should_io_be_busy(void)
89 * Returns the freq_hi to be used right now and will set freq_hi_jiffies, 83 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
90 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. 84 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
91 */ 85 */
92static unsigned int powersave_bias_target(struct cpufreq_policy *policy, 86static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
93 unsigned int freq_next, unsigned int relation) 87 unsigned int freq_next, unsigned int relation)
94{ 88{
95 unsigned int freq_req, freq_reduc, freq_avg; 89 unsigned int freq_req, freq_reduc, freq_avg;
@@ -98,6 +92,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
98 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 92 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
99 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 93 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
100 policy->cpu); 94 policy->cpu);
95 struct dbs_data *dbs_data = policy->governor_data;
96 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
101 97
102 if (!dbs_info->freq_table) { 98 if (!dbs_info->freq_table) {
103 dbs_info->freq_lo = 0; 99 dbs_info->freq_lo = 0;
@@ -108,7 +104,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
108 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, 104 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
109 relation, &index); 105 relation, &index);
110 freq_req = dbs_info->freq_table[index].frequency; 106 freq_req = dbs_info->freq_table[index].frequency;
111 freq_reduc = freq_req * od_tuners.powersave_bias / 1000; 107 freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
112 freq_avg = freq_req - freq_reduc; 108 freq_avg = freq_req - freq_reduc;
113 109
114 /* Find freq bounds for freq_avg in freq_table */ 110 /* Find freq bounds for freq_avg in freq_table */
@@ -127,7 +123,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
127 dbs_info->freq_lo_jiffies = 0; 123 dbs_info->freq_lo_jiffies = 0;
128 return freq_lo; 124 return freq_lo;
129 } 125 }
130 jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); 126 jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
131 jiffies_hi = (freq_avg - freq_lo) * jiffies_total; 127 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
132 jiffies_hi += ((freq_hi - freq_lo) / 2); 128 jiffies_hi += ((freq_hi - freq_lo) / 2);
133 jiffies_hi /= (freq_hi - freq_lo); 129 jiffies_hi /= (freq_hi - freq_lo);
@@ -148,12 +144,16 @@ static void ondemand_powersave_bias_init(void)
148 144
149static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 145static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
150{ 146{
151 if (od_tuners.powersave_bias) 147 struct dbs_data *dbs_data = p->governor_data;
152 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); 148 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
149
150 if (od_tuners->powersave_bias)
151 freq = od_ops.powersave_bias_target(p, freq,
152 CPUFREQ_RELATION_H);
153 else if (p->cur == p->max) 153 else if (p->cur == p->max)
154 return; 154 return;
155 155
156 __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? 156 __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
157 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); 157 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
158} 158}
159 159
@@ -170,15 +170,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
170{ 170{
171 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 171 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
172 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 172 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
173 struct dbs_data *dbs_data = policy->governor_data;
174 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
173 175
174 dbs_info->freq_lo = 0; 176 dbs_info->freq_lo = 0;
175 177
176 /* Check for frequency increase */ 178 /* Check for frequency increase */
177 if (load_freq > od_tuners.up_threshold * policy->cur) { 179 if (load_freq > od_tuners->up_threshold * policy->cur) {
178 /* If switching to max speed, apply sampling_down_factor */ 180 /* If switching to max speed, apply sampling_down_factor */
179 if (policy->cur < policy->max) 181 if (policy->cur < policy->max)
180 dbs_info->rate_mult = 182 dbs_info->rate_mult =
181 od_tuners.sampling_down_factor; 183 od_tuners->sampling_down_factor;
182 dbs_freq_increase(policy, policy->max); 184 dbs_freq_increase(policy, policy->max);
183 return; 185 return;
184 } 186 }
@@ -193,9 +195,10 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
193 * support the current CPU usage without triggering the up policy. To be 195 * support the current CPU usage without triggering the up policy. To be
194 * safe, we focus 10 points under the threshold. 196 * safe, we focus 10 points under the threshold.
195 */ 197 */
196 if (load_freq < od_tuners.adj_up_threshold * policy->cur) { 198 if (load_freq < od_tuners->adj_up_threshold
199 * policy->cur) {
197 unsigned int freq_next; 200 unsigned int freq_next;
198 freq_next = load_freq / od_tuners.adj_up_threshold; 201 freq_next = load_freq / od_tuners->adj_up_threshold;
199 202
200 /* No longer fully busy, reset rate_mult */ 203 /* No longer fully busy, reset rate_mult */
201 dbs_info->rate_mult = 1; 204 dbs_info->rate_mult = 1;
@@ -203,65 +206,62 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
203 if (freq_next < policy->min) 206 if (freq_next < policy->min)
204 freq_next = policy->min; 207 freq_next = policy->min;
205 208
206 if (!od_tuners.powersave_bias) { 209 if (!od_tuners->powersave_bias) {
207 __cpufreq_driver_target(policy, freq_next, 210 __cpufreq_driver_target(policy, freq_next,
208 CPUFREQ_RELATION_L); 211 CPUFREQ_RELATION_L);
209 } else { 212 return;
210 int freq = powersave_bias_target(policy, freq_next,
211 CPUFREQ_RELATION_L);
212 __cpufreq_driver_target(policy, freq,
213 CPUFREQ_RELATION_L);
214 } 213 }
214
215 freq_next = od_ops.powersave_bias_target(policy, freq_next,
216 CPUFREQ_RELATION_L);
217 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
215 } 218 }
216} 219}
217 220
218static void od_dbs_timer(struct work_struct *work) 221static void od_dbs_timer(struct work_struct *work)
219{ 222{
220 struct delayed_work *dw = to_delayed_work(work);
221 struct od_cpu_dbs_info_s *dbs_info = 223 struct od_cpu_dbs_info_s *dbs_info =
222 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); 224 container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
223 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 225 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
224 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, 226 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
225 cpu); 227 cpu);
226 int delay, sample_type = core_dbs_info->sample_type; 228 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
227 bool eval_load; 229 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
230 int delay = 0, sample_type = core_dbs_info->sample_type;
231 bool modify_all = true;
228 232
229 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 233 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230 eval_load = need_load_eval(&core_dbs_info->cdbs, 234 if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231 od_tuners.sampling_rate); 235 modify_all = false;
236 goto max_delay;
237 }
232 238
233 /* Common NORMAL_SAMPLE setup */ 239 /* Common NORMAL_SAMPLE setup */
234 core_dbs_info->sample_type = OD_NORMAL_SAMPLE; 240 core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
235 if (sample_type == OD_SUB_SAMPLE) { 241 if (sample_type == OD_SUB_SAMPLE) {
236 delay = core_dbs_info->freq_lo_jiffies; 242 delay = core_dbs_info->freq_lo_jiffies;
237 if (eval_load) 243 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
238 __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, 244 core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
239 core_dbs_info->freq_lo,
240 CPUFREQ_RELATION_H);
241 } else { 245 } else {
242 if (eval_load) 246 dbs_check_cpu(dbs_data, cpu);
243 dbs_check_cpu(&od_dbs_data, cpu);
244 if (core_dbs_info->freq_lo) { 247 if (core_dbs_info->freq_lo) {
245 /* Setup timer for SUB_SAMPLE */ 248 /* Setup timer for SUB_SAMPLE */
246 core_dbs_info->sample_type = OD_SUB_SAMPLE; 249 core_dbs_info->sample_type = OD_SUB_SAMPLE;
247 delay = core_dbs_info->freq_hi_jiffies; 250 delay = core_dbs_info->freq_hi_jiffies;
248 } else {
249 delay = delay_for_sampling_rate(od_tuners.sampling_rate
250 * core_dbs_info->rate_mult);
251 } 251 }
252 } 252 }
253 253
254 schedule_delayed_work_on(smp_processor_id(), dw, delay); 254max_delay:
255 if (!delay)
256 delay = delay_for_sampling_rate(od_tuners->sampling_rate
257 * core_dbs_info->rate_mult);
258
259 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
255 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 260 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
256} 261}
257 262
258/************************** sysfs interface ************************/ 263/************************** sysfs interface ************************/
259 264static struct common_dbs_data od_dbs_cdata;
260static ssize_t show_sampling_rate_min(struct kobject *kobj,
261 struct attribute *attr, char *buf)
262{
263 return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
264}
265 265
266/** 266/**
267 * update_sampling_rate - update sampling rate effective immediately if needed. 267 * update_sampling_rate - update sampling rate effective immediately if needed.
@@ -276,12 +276,14 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
276 * reducing the sampling rate, we need to make the new value effective 276 * reducing the sampling rate, we need to make the new value effective
277 * immediately. 277 * immediately.
278 */ 278 */
279static void update_sampling_rate(unsigned int new_rate) 279static void update_sampling_rate(struct dbs_data *dbs_data,
280 unsigned int new_rate)
280{ 281{
282 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
281 int cpu; 283 int cpu;
282 284
283 od_tuners.sampling_rate = new_rate = max(new_rate, 285 od_tuners->sampling_rate = new_rate = max(new_rate,
284 od_dbs_data.min_sampling_rate); 286 dbs_data->min_sampling_rate);
285 287
286 for_each_online_cpu(cpu) { 288 for_each_online_cpu(cpu) {
287 struct cpufreq_policy *policy; 289 struct cpufreq_policy *policy;
@@ -314,42 +316,54 @@ static void update_sampling_rate(unsigned int new_rate)
314 cancel_delayed_work_sync(&dbs_info->cdbs.work); 316 cancel_delayed_work_sync(&dbs_info->cdbs.work);
315 mutex_lock(&dbs_info->cdbs.timer_mutex); 317 mutex_lock(&dbs_info->cdbs.timer_mutex);
316 318
317 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, 319 gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
318 usecs_to_jiffies(new_rate)); 320 usecs_to_jiffies(new_rate), true);
319 321
320 } 322 }
321 mutex_unlock(&dbs_info->cdbs.timer_mutex); 323 mutex_unlock(&dbs_info->cdbs.timer_mutex);
322 } 324 }
323} 325}
324 326
325static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 327static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
326 const char *buf, size_t count) 328 size_t count)
327{ 329{
328 unsigned int input; 330 unsigned int input;
329 int ret; 331 int ret;
330 ret = sscanf(buf, "%u", &input); 332 ret = sscanf(buf, "%u", &input);
331 if (ret != 1) 333 if (ret != 1)
332 return -EINVAL; 334 return -EINVAL;
333 update_sampling_rate(input); 335
336 update_sampling_rate(dbs_data, input);
334 return count; 337 return count;
335} 338}
336 339
337static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, 340static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
338 const char *buf, size_t count) 341 size_t count)
339{ 342{
343 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
340 unsigned int input; 344 unsigned int input;
341 int ret; 345 int ret;
346 unsigned int j;
342 347
343 ret = sscanf(buf, "%u", &input); 348 ret = sscanf(buf, "%u", &input);
344 if (ret != 1) 349 if (ret != 1)
345 return -EINVAL; 350 return -EINVAL;
346 od_tuners.io_is_busy = !!input; 351 od_tuners->io_is_busy = !!input;
352
353 /* we need to re-evaluate prev_cpu_idle */
354 for_each_online_cpu(j) {
355 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
356 j);
357 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
358 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
359 }
347 return count; 360 return count;
348} 361}
349 362
350static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 363static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
351 const char *buf, size_t count) 364 size_t count)
352{ 365{
366 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
353 unsigned int input; 367 unsigned int input;
354 int ret; 368 int ret;
355 ret = sscanf(buf, "%u", &input); 369 ret = sscanf(buf, "%u", &input);
@@ -359,23 +373,24 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
359 return -EINVAL; 373 return -EINVAL;
360 } 374 }
361 /* Calculate the new adj_up_threshold */ 375 /* Calculate the new adj_up_threshold */
362 od_tuners.adj_up_threshold += input; 376 od_tuners->adj_up_threshold += input;
363 od_tuners.adj_up_threshold -= od_tuners.up_threshold; 377 od_tuners->adj_up_threshold -= od_tuners->up_threshold;
364 378
365 od_tuners.up_threshold = input; 379 od_tuners->up_threshold = input;
366 return count; 380 return count;
367} 381}
368 382
369static ssize_t store_sampling_down_factor(struct kobject *a, 383static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
370 struct attribute *b, const char *buf, size_t count) 384 const char *buf, size_t count)
371{ 385{
386 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
372 unsigned int input, j; 387 unsigned int input, j;
373 int ret; 388 int ret;
374 ret = sscanf(buf, "%u", &input); 389 ret = sscanf(buf, "%u", &input);
375 390
376 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 391 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
377 return -EINVAL; 392 return -EINVAL;
378 od_tuners.sampling_down_factor = input; 393 od_tuners->sampling_down_factor = input;
379 394
380 /* Reset down sampling multiplier in case it was active */ 395 /* Reset down sampling multiplier in case it was active */
381 for_each_online_cpu(j) { 396 for_each_online_cpu(j) {
@@ -386,9 +401,10 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
386 return count; 401 return count;
387} 402}
388 403
389static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 404static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
390 const char *buf, size_t count) 405 size_t count)
391{ 406{
407 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
392 unsigned int input; 408 unsigned int input;
393 int ret; 409 int ret;
394 410
@@ -401,18 +417,18 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
401 if (input > 1) 417 if (input > 1)
402 input = 1; 418 input = 1;
403 419
404 if (input == od_tuners.ignore_nice) { /* nothing to do */ 420 if (input == od_tuners->ignore_nice) { /* nothing to do */
405 return count; 421 return count;
406 } 422 }
407 od_tuners.ignore_nice = input; 423 od_tuners->ignore_nice = input;
408 424
409 /* we need to re-evaluate prev_cpu_idle */ 425 /* we need to re-evaluate prev_cpu_idle */
410 for_each_online_cpu(j) { 426 for_each_online_cpu(j) {
411 struct od_cpu_dbs_info_s *dbs_info; 427 struct od_cpu_dbs_info_s *dbs_info;
412 dbs_info = &per_cpu(od_cpu_dbs_info, j); 428 dbs_info = &per_cpu(od_cpu_dbs_info, j);
413 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 429 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
414 &dbs_info->cdbs.prev_cpu_wall); 430 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
415 if (od_tuners.ignore_nice) 431 if (od_tuners->ignore_nice)
416 dbs_info->cdbs.prev_cpu_nice = 432 dbs_info->cdbs.prev_cpu_nice =
417 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 433 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
418 434
@@ -420,9 +436,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
420 return count; 436 return count;
421} 437}
422 438
423static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, 439static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
424 const char *buf, size_t count) 440 size_t count)
425{ 441{
442 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
426 unsigned int input; 443 unsigned int input;
427 int ret; 444 int ret;
428 ret = sscanf(buf, "%u", &input); 445 ret = sscanf(buf, "%u", &input);
@@ -433,68 +450,179 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
433 if (input > 1000) 450 if (input > 1000)
434 input = 1000; 451 input = 1000;
435 452
436 od_tuners.powersave_bias = input; 453 od_tuners->powersave_bias = input;
437 ondemand_powersave_bias_init(); 454 ondemand_powersave_bias_init();
438 return count; 455 return count;
439} 456}
440 457
441show_one(od, sampling_rate, sampling_rate); 458show_store_one(od, sampling_rate);
442show_one(od, io_is_busy, io_is_busy); 459show_store_one(od, io_is_busy);
443show_one(od, up_threshold, up_threshold); 460show_store_one(od, up_threshold);
444show_one(od, sampling_down_factor, sampling_down_factor); 461show_store_one(od, sampling_down_factor);
445show_one(od, ignore_nice_load, ignore_nice); 462show_store_one(od, ignore_nice);
446show_one(od, powersave_bias, powersave_bias); 463show_store_one(od, powersave_bias);
447 464declare_show_sampling_rate_min(od);
448define_one_global_rw(sampling_rate); 465
449define_one_global_rw(io_is_busy); 466gov_sys_pol_attr_rw(sampling_rate);
450define_one_global_rw(up_threshold); 467gov_sys_pol_attr_rw(io_is_busy);
451define_one_global_rw(sampling_down_factor); 468gov_sys_pol_attr_rw(up_threshold);
452define_one_global_rw(ignore_nice_load); 469gov_sys_pol_attr_rw(sampling_down_factor);
453define_one_global_rw(powersave_bias); 470gov_sys_pol_attr_rw(ignore_nice);
454define_one_global_ro(sampling_rate_min); 471gov_sys_pol_attr_rw(powersave_bias);
455 472gov_sys_pol_attr_ro(sampling_rate_min);
456static struct attribute *dbs_attributes[] = { 473
457 &sampling_rate_min.attr, 474static struct attribute *dbs_attributes_gov_sys[] = {
458 &sampling_rate.attr, 475 &sampling_rate_min_gov_sys.attr,
459 &up_threshold.attr, 476 &sampling_rate_gov_sys.attr,
460 &sampling_down_factor.attr, 477 &up_threshold_gov_sys.attr,
461 &ignore_nice_load.attr, 478 &sampling_down_factor_gov_sys.attr,
462 &powersave_bias.attr, 479 &ignore_nice_gov_sys.attr,
463 &io_is_busy.attr, 480 &powersave_bias_gov_sys.attr,
481 &io_is_busy_gov_sys.attr,
482 NULL
483};
484
485static struct attribute_group od_attr_group_gov_sys = {
486 .attrs = dbs_attributes_gov_sys,
487 .name = "ondemand",
488};
489
490static struct attribute *dbs_attributes_gov_pol[] = {
491 &sampling_rate_min_gov_pol.attr,
492 &sampling_rate_gov_pol.attr,
493 &up_threshold_gov_pol.attr,
494 &sampling_down_factor_gov_pol.attr,
495 &ignore_nice_gov_pol.attr,
496 &powersave_bias_gov_pol.attr,
497 &io_is_busy_gov_pol.attr,
464 NULL 498 NULL
465}; 499};
466 500
467static struct attribute_group od_attr_group = { 501static struct attribute_group od_attr_group_gov_pol = {
468 .attrs = dbs_attributes, 502 .attrs = dbs_attributes_gov_pol,
469 .name = "ondemand", 503 .name = "ondemand",
470}; 504};
471 505
472/************************** sysfs end ************************/ 506/************************** sysfs end ************************/
473 507
508static int od_init(struct dbs_data *dbs_data)
509{
510 struct od_dbs_tuners *tuners;
511 u64 idle_time;
512 int cpu;
513
514 tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
515 if (!tuners) {
516 pr_err("%s: kzalloc failed\n", __func__);
517 return -ENOMEM;
518 }
519
520 cpu = get_cpu();
521 idle_time = get_cpu_idle_time_us(cpu, NULL);
522 put_cpu();
523 if (idle_time != -1ULL) {
524 /* Idle micro accounting is supported. Use finer thresholds */
525 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
526 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
527 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
528 /*
529 * In nohz/micro accounting case we set the minimum frequency
530 * not depending on HZ, but fixed (very low). The deferred
531 * timer might skip some samples if idle/sleeping as needed.
532 */
533 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
534 } else {
535 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
536 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
537 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
538
539 /* For correct statistics, we need 10 ticks for each measure */
540 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
541 jiffies_to_usecs(10);
542 }
543
544 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
545 tuners->ignore_nice = 0;
546 tuners->powersave_bias = 0;
547 tuners->io_is_busy = should_io_be_busy();
548
549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex);
552 return 0;
553}
554
555static void od_exit(struct dbs_data *dbs_data)
556{
557 kfree(dbs_data->tuners);
558}
559
474define_get_cpu_dbs_routines(od_cpu_dbs_info); 560define_get_cpu_dbs_routines(od_cpu_dbs_info);
475 561
476static struct od_ops od_ops = { 562static struct od_ops od_ops = {
477 .io_busy = should_io_be_busy,
478 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, 563 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
479 .powersave_bias_target = powersave_bias_target, 564 .powersave_bias_target = generic_powersave_bias_target,
480 .freq_increase = dbs_freq_increase, 565 .freq_increase = dbs_freq_increase,
481}; 566};
482 567
483static struct dbs_data od_dbs_data = { 568static struct common_dbs_data od_dbs_cdata = {
484 .governor = GOV_ONDEMAND, 569 .governor = GOV_ONDEMAND,
485 .attr_group = &od_attr_group, 570 .attr_group_gov_sys = &od_attr_group_gov_sys,
486 .tuners = &od_tuners, 571 .attr_group_gov_pol = &od_attr_group_gov_pol,
487 .get_cpu_cdbs = get_cpu_cdbs, 572 .get_cpu_cdbs = get_cpu_cdbs,
488 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 573 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
489 .gov_dbs_timer = od_dbs_timer, 574 .gov_dbs_timer = od_dbs_timer,
490 .gov_check_cpu = od_check_cpu, 575 .gov_check_cpu = od_check_cpu,
491 .gov_ops = &od_ops, 576 .gov_ops = &od_ops,
577 .init = od_init,
578 .exit = od_exit,
492}; 579};
493 580
581static void od_set_powersave_bias(unsigned int powersave_bias)
582{
583 struct cpufreq_policy *policy;
584 struct dbs_data *dbs_data;
585 struct od_dbs_tuners *od_tuners;
586 unsigned int cpu;
587 cpumask_t done;
588
589 cpumask_clear(&done);
590
591 get_online_cpus();
592 for_each_online_cpu(cpu) {
593 if (cpumask_test_cpu(cpu, &done))
594 continue;
595
596 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
597 dbs_data = policy->governor_data;
598 od_tuners = dbs_data->tuners;
599 od_tuners->powersave_bias = powersave_bias;
600
601 cpumask_or(&done, &done, policy->cpus);
602 }
603 put_online_cpus();
604}
605
606void od_register_powersave_bias_handler(unsigned int (*f)
607 (struct cpufreq_policy *, unsigned int, unsigned int),
608 unsigned int powersave_bias)
609{
610 od_ops.powersave_bias_target = f;
611 od_set_powersave_bias(powersave_bias);
612}
613EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
614
615void od_unregister_powersave_bias_handler(void)
616{
617 od_ops.powersave_bias_target = generic_powersave_bias_target;
618 od_set_powersave_bias(0);
619}
620EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
621
494static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, 622static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
495 unsigned int event) 623 unsigned int event)
496{ 624{
497 return cpufreq_governor_dbs(&od_dbs_data, policy, event); 625 return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
498} 626}
499 627
500#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 628#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
@@ -509,29 +637,6 @@ struct cpufreq_governor cpufreq_gov_ondemand = {
509 637
510static int __init cpufreq_gov_dbs_init(void) 638static int __init cpufreq_gov_dbs_init(void)
511{ 639{
512 u64 idle_time;
513 int cpu = get_cpu();
514
515 mutex_init(&od_dbs_data.mutex);
516 idle_time = get_cpu_idle_time_us(cpu, NULL);
517 put_cpu();
518 if (idle_time != -1ULL) {
519 /* Idle micro accounting is supported. Use finer thresholds */
520 od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
521 od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
522 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
523 /*
524 * In nohz/micro accounting case we set the minimum frequency
525 * not depending on HZ, but fixed (very low). The deferred
526 * timer might skip some samples if idle/sleeping as needed.
527 */
528 od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
529 } else {
530 /* For correct statistics, we need 10 ticks for each measure */
531 od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
532 jiffies_to_usecs(10);
533 }
534
535 return cpufreq_register_governor(&cpufreq_gov_ondemand); 640 return cpufreq_register_governor(&cpufreq_gov_ondemand);
536} 641}
537 642
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
new file mode 100644
index 000000000000..ee142c490575
--- /dev/null
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -0,0 +1,146 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <hwregs/reg_rdwr.h>
6#include <hwregs/clkgen_defs.h>
7#include <hwregs/ddr2_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_clkgen_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
31 unsigned int state)
32{
33 struct cpufreq_freqs freqs;
34 reg_clkgen_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
36
37 freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
38 freqs.new = cris_freq_table[state].frequency;
39
40 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
41
42 local_irq_disable();
43
44 /* Even though we may be SMP they will share the same clock
45 * so all settings are made on CPU0. */
46 if (cris_freq_table[state].frequency == 200000)
47 clk_ctrl.pll = 1;
48 else
49 clk_ctrl.pll = 0;
50 REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
51
52 local_irq_enable();
53
54 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
55};
56
57static int cris_freq_verify(struct cpufreq_policy *policy)
58{
59 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
60}
61
62static int cris_freq_target(struct cpufreq_policy *policy,
63 unsigned int target_freq,
64 unsigned int relation)
65{
66 unsigned int newstate = 0;
67
68 if (cpufreq_frequency_table_target(policy, cris_freq_table,
69 target_freq, relation, &newstate))
70 return -EINVAL;
71
72 cris_freq_set_cpu_state(policy, newstate);
73
74 return 0;
75}
76
77static int cris_freq_cpu_init(struct cpufreq_policy *policy)
78{
79 int result;
80
81 /* cpuinfo and default policy values */
82 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
83 policy->cur = cris_freq_get_cpu_frequency(0);
84
85 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
86 if (result)
87 return (result);
88
89 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
90
91 return 0;
92}
93
94
95static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
96{
97 cpufreq_frequency_table_put_attr(policy->cpu);
98 return 0;
99}
100
101
102static struct freq_attr *cris_freq_attr[] = {
103 &cpufreq_freq_attr_scaling_available_freqs,
104 NULL,
105};
106
107static struct cpufreq_driver cris_freq_driver = {
108 .get = cris_freq_get_cpu_frequency,
109 .verify = cris_freq_verify,
110 .target = cris_freq_target,
111 .init = cris_freq_cpu_init,
112 .exit = cris_freq_cpu_exit,
113 .name = "cris_freq",
114 .owner = THIS_MODULE,
115 .attr = cris_freq_attr,
116};
117
118static int __init cris_freq_init(void)
119{
120 int ret;
121 ret = cpufreq_register_driver(&cris_freq_driver);
122 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
123 CPUFREQ_TRANSITION_NOTIFIER);
124 return ret;
125}
126
127static int
128cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
129 void *data)
130{
131 int i;
132 struct cpufreq_freqs *freqs = data;
133 if (val == CPUFREQ_PRECHANGE) {
134 reg_ddr2_rw_cfg cfg =
135 REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
136 cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
137
138 if (freqs->new == 200000)
139 for (i = 0; i < 50000; i++);
140 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
141 }
142 return 0;
143}
144
145
146module_init(cris_freq_init);
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
new file mode 100644
index 000000000000..12952235d5db
--- /dev/null
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -0,0 +1,142 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <arch/hwregs/reg_rdwr.h>
6#include <arch/hwregs/config_defs.h>
7#include <arch/hwregs/bif_core_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_config_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
31 unsigned int state)
32{
33 struct cpufreq_freqs freqs;
34 reg_config_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
36
37 freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
38 freqs.new = cris_freq_table[state].frequency;
39
40 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
41
42 local_irq_disable();
43
44 /* Even though we may be SMP they will share the same clock
45 * so all settings are made on CPU0. */
46 if (cris_freq_table[state].frequency == 200000)
47 clk_ctrl.pll = 1;
48 else
49 clk_ctrl.pll = 0;
50 REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
51
52 local_irq_enable();
53
54 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
55};
56
57static int cris_freq_verify(struct cpufreq_policy *policy)
58{
59 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
60}
61
62static int cris_freq_target(struct cpufreq_policy *policy,
63 unsigned int target_freq, unsigned int relation)
64{
65 unsigned int newstate = 0;
66
67 if (cpufreq_frequency_table_target
68 (policy, cris_freq_table, target_freq, relation, &newstate))
69 return -EINVAL;
70
71 cris_freq_set_cpu_state(policy, newstate);
72
73 return 0;
74}
75
76static int cris_freq_cpu_init(struct cpufreq_policy *policy)
77{
78 int result;
79
80 /* cpuinfo and default policy values */
81 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
82 policy->cur = cris_freq_get_cpu_frequency(0);
83
84 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
85 if (result)
86 return (result);
87
88 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
89
90 return 0;
91}
92
93static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
94{
95 cpufreq_frequency_table_put_attr(policy->cpu);
96 return 0;
97}
98
99static struct freq_attr *cris_freq_attr[] = {
100 &cpufreq_freq_attr_scaling_available_freqs,
101 NULL,
102};
103
104static struct cpufreq_driver cris_freq_driver = {
105 .get = cris_freq_get_cpu_frequency,
106 .verify = cris_freq_verify,
107 .target = cris_freq_target,
108 .init = cris_freq_cpu_init,
109 .exit = cris_freq_cpu_exit,
110 .name = "cris_freq",
111 .owner = THIS_MODULE,
112 .attr = cris_freq_attr,
113};
114
115static int __init cris_freq_init(void)
116{
117 int ret;
118 ret = cpufreq_register_driver(&cris_freq_driver);
119 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
120 CPUFREQ_TRANSITION_NOTIFIER);
121 return ret;
122}
123
124static int
125cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
126 void *data)
127{
128 int i;
129 struct cpufreq_freqs *freqs = data;
130 if (val == CPUFREQ_PRECHANGE) {
131 reg_bif_core_rw_sdram_timing timing =
132 REG_RD(bif_core, regi_bif_core, rw_sdram_timing);
133 timing.cpd = (freqs->new == 200000 ? 0 : 1);
134
135 if (freqs->new == 200000)
136 for (i = 0; i < 50000; i++) ;
137 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
138 }
139 return 0;
140}
141
142module_init(cris_freq_init);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
new file mode 100644
index 000000000000..c33c76c360fa
--- /dev/null
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -0,0 +1,231 @@
1/*
2 * CPU frequency scaling for DaVinci
3 *
4 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
7 *
8 * Copyright (C) 2005 Nokia Corporation
9 * Written by Tony Lindgren <tony@atomide.com>
10 *
11 * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
12 *
13 * Copyright (C) 2007-2008 Texas Instruments, Inc.
14 * Updated to support OMAP3
15 * Rajendra Nayak <rnayak@ti.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 */
21#include <linux/types.h>
22#include <linux/cpufreq.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27#include <linux/export.h>
28
29#include <mach/hardware.h>
30#include <mach/cpufreq.h>
31#include <mach/common.h>
32
33struct davinci_cpufreq {
34 struct device *dev;
35 struct clk *armclk;
36 struct clk *asyncclk;
37 unsigned long asyncrate;
38};
39static struct davinci_cpufreq cpufreq;
40
41static int davinci_verify_speed(struct cpufreq_policy *policy)
42{
43 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
44 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
45 struct clk *armclk = cpufreq.armclk;
46
47 if (freq_table)
48 return cpufreq_frequency_table_verify(policy, freq_table);
49
50 if (policy->cpu)
51 return -EINVAL;
52
53 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
54 policy->cpuinfo.max_freq);
55
56 policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
57 policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
58 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
59 policy->cpuinfo.max_freq);
60 return 0;
61}
62
63static unsigned int davinci_getspeed(unsigned int cpu)
64{
65 if (cpu)
66 return 0;
67
68 return clk_get_rate(cpufreq.armclk) / 1000;
69}
70
71static int davinci_target(struct cpufreq_policy *policy,
72 unsigned int target_freq, unsigned int relation)
73{
74 int ret = 0;
75 unsigned int idx;
76 struct cpufreq_freqs freqs;
77 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
78 struct clk *armclk = cpufreq.armclk;
79
80 freqs.old = davinci_getspeed(0);
81 freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
82
83 if (freqs.old == freqs.new)
84 return ret;
85
86 dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
87
88 ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
89 freqs.new, relation, &idx);
90 if (ret)
91 return -EINVAL;
92
93 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
94
95 /* if moving to higher frequency, up the voltage beforehand */
96 if (pdata->set_voltage && freqs.new > freqs.old) {
97 ret = pdata->set_voltage(idx);
98 if (ret)
99 goto out;
100 }
101
102 ret = clk_set_rate(armclk, idx);
103 if (ret)
104 goto out;
105
106 if (cpufreq.asyncclk) {
107 ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
108 if (ret)
109 goto out;
110 }
111
112 /* if moving to lower freq, lower the voltage after lowering freq */
113 if (pdata->set_voltage && freqs.new < freqs.old)
114 pdata->set_voltage(idx);
115
116out:
117 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
118
119 return ret;
120}
121
122static int davinci_cpu_init(struct cpufreq_policy *policy)
123{
124 int result = 0;
125 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
126 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
127
128 if (policy->cpu != 0)
129 return -EINVAL;
130
131 /* Finish platform specific initialization */
132 if (pdata->init) {
133 result = pdata->init();
134 if (result)
135 return result;
136 }
137
138 policy->cur = davinci_getspeed(0);
139
140 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
141 if (result) {
142 pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
143 __func__);
144 return result;
145 }
146
147 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
148
149 /*
150 * Time measurement across the target() function yields ~1500-1800us
151 * time taken with no drivers on notification list.
152 * Setting the latency to 2000 us to accommodate addition of drivers
153 * to pre/post change notification list.
154 */
155 policy->cpuinfo.transition_latency = 2000 * 1000;
156 return 0;
157}
158
159static int davinci_cpu_exit(struct cpufreq_policy *policy)
160{
161 cpufreq_frequency_table_put_attr(policy->cpu);
162 return 0;
163}
164
165static struct freq_attr *davinci_cpufreq_attr[] = {
166 &cpufreq_freq_attr_scaling_available_freqs,
167 NULL,
168};
169
170static struct cpufreq_driver davinci_driver = {
171 .flags = CPUFREQ_STICKY,
172 .verify = davinci_verify_speed,
173 .target = davinci_target,
174 .get = davinci_getspeed,
175 .init = davinci_cpu_init,
176 .exit = davinci_cpu_exit,
177 .name = "davinci",
178 .attr = davinci_cpufreq_attr,
179};
180
181static int __init davinci_cpufreq_probe(struct platform_device *pdev)
182{
183 struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
184 struct clk *asyncclk;
185
186 if (!pdata)
187 return -EINVAL;
188 if (!pdata->freq_table)
189 return -EINVAL;
190
191 cpufreq.dev = &pdev->dev;
192
193 cpufreq.armclk = clk_get(NULL, "arm");
194 if (IS_ERR(cpufreq.armclk)) {
195 dev_err(cpufreq.dev, "Unable to get ARM clock\n");
196 return PTR_ERR(cpufreq.armclk);
197 }
198
199 asyncclk = clk_get(cpufreq.dev, "async");
200 if (!IS_ERR(asyncclk)) {
201 cpufreq.asyncclk = asyncclk;
202 cpufreq.asyncrate = clk_get_rate(asyncclk);
203 }
204
205 return cpufreq_register_driver(&davinci_driver);
206}
207
208static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
209{
210 clk_put(cpufreq.armclk);
211
212 if (cpufreq.asyncclk)
213 clk_put(cpufreq.asyncclk);
214
215 return cpufreq_unregister_driver(&davinci_driver);
216}
217
218static struct platform_driver davinci_cpufreq_driver = {
219 .driver = {
220 .name = "cpufreq-davinci",
221 .owner = THIS_MODULE,
222 },
223 .remove = __exit_p(davinci_cpufreq_remove),
224};
225
226int __init davinci_cpufreq_init(void)
227{
228 return platform_driver_probe(&davinci_cpufreq_driver,
229 davinci_cpufreq_probe);
230}
231
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 72f0c3efa76e..6ec6539ae041 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -37,12 +37,6 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
37 unsigned int idx; 37 unsigned int idx;
38 int ret; 38 int ret;
39 39
40 /* scale the target frequency to one of the extremes supported */
41 if (target_freq < policy->cpuinfo.min_freq)
42 target_freq = policy->cpuinfo.min_freq;
43 if (target_freq > policy->cpuinfo.max_freq)
44 target_freq = policy->cpuinfo.max_freq;
45
46 /* Lookup the next frequency */ 40 /* Lookup the next frequency */
47 if (cpufreq_frequency_table_target(policy, freq_table, target_freq, 41 if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
48 relation, &idx)) 42 relation, &idx))
@@ -55,8 +49,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
55 return 0; 49 return 0;
56 50
57 /* pre-change notification */ 51 /* pre-change notification */
58 for_each_cpu(freqs.cpu, policy->cpus) 52 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
59 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
60 53
61 /* update armss clk frequency */ 54 /* update armss clk frequency */
62 ret = clk_set_rate(armss_clk, freqs.new * 1000); 55 ret = clk_set_rate(armss_clk, freqs.new * 1000);
@@ -68,8 +61,7 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
68 } 61 }
69 62
70 /* post change notification */ 63 /* post change notification */
71 for_each_cpu(freqs.cpu, policy->cpus) 64 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
72 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
73 65
74 return 0; 66 return 0;
75} 67}
@@ -79,15 +71,15 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
79 int i = 0; 71 int i = 0;
80 unsigned long freq = clk_get_rate(armss_clk) / 1000; 72 unsigned long freq = clk_get_rate(armss_clk) / 1000;
81 73
82 while (freq_table[i].frequency != CPUFREQ_TABLE_END) { 74 /* The value is rounded to closest frequency in the defined table. */
83 if (freq <= freq_table[i].frequency) 75 while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
76 if (freq < freq_table[i].frequency +
77 (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
84 return freq_table[i].frequency; 78 return freq_table[i].frequency;
85 i++; 79 i++;
86 } 80 }
87 81
88 /* We could not find a corresponding frequency. */ 82 return freq_table[i].frequency;
89 pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
90 return 0;
91} 83}
92 84
93static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) 85static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 3fffbe6025cd..37380fb92621 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -104,7 +104,7 @@ static unsigned int eps_get(unsigned int cpu)
104} 104}
105 105
106static int eps_set_state(struct eps_cpu_data *centaur, 106static int eps_set_state(struct eps_cpu_data *centaur,
107 unsigned int cpu, 107 struct cpufreq_policy *policy,
108 u32 dest_state) 108 u32 dest_state)
109{ 109{
110 struct cpufreq_freqs freqs; 110 struct cpufreq_freqs freqs;
@@ -112,10 +112,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
112 int err = 0; 112 int err = 0;
113 int i; 113 int i;
114 114
115 freqs.old = eps_get(cpu); 115 freqs.old = eps_get(policy->cpu);
116 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); 116 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
117 freqs.cpu = cpu; 117 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
118 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
119 118
120 /* Wait while CPU is busy */ 119 /* Wait while CPU is busy */
121 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 120 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
@@ -162,7 +161,7 @@ postchange:
162 current_multiplier); 161 current_multiplier);
163 } 162 }
164#endif 163#endif
165 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
166 return err; 165 return err;
167} 166}
168 167
@@ -190,7 +189,7 @@ static int eps_target(struct cpufreq_policy *policy,
190 189
191 /* Make frequency transition */ 190 /* Make frequency transition */
192 dest_state = centaur->freq_table[newstate].index & 0xffff; 191 dest_state = centaur->freq_table[newstate].index & 0xffff;
193 ret = eps_set_state(centaur, cpu, dest_state); 192 ret = eps_set_state(centaur, policy, dest_state);
194 if (ret) 193 if (ret)
195 printk(KERN_ERR "eps: Timeout!\n"); 194 printk(KERN_ERR "eps: Timeout!\n");
196 return ret; 195 return ret;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 960671fd3d7e..658d860344b0 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -117,15 +117,15 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
117 * There is no return value. 117 * There is no return value.
118 */ 118 */
119 119
120static void elanfreq_set_cpu_state(unsigned int state) 120static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
121 unsigned int state)
121{ 122{
122 struct cpufreq_freqs freqs; 123 struct cpufreq_freqs freqs;
123 124
124 freqs.old = elanfreq_get_cpu_frequency(0); 125 freqs.old = elanfreq_get_cpu_frequency(0);
125 freqs.new = elan_multiplier[state].clock; 126 freqs.new = elan_multiplier[state].clock;
126 freqs.cpu = 0; /* elanfreq.c is UP only driver */
127 127
128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 128 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
129 129
130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", 130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
131 elan_multiplier[state].clock); 131 elan_multiplier[state].clock);
@@ -161,7 +161,7 @@ static void elanfreq_set_cpu_state(unsigned int state)
161 udelay(10000); 161 udelay(10000);
162 local_irq_enable(); 162 local_irq_enable();
163 163
164 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
165}; 165};
166 166
167 167
@@ -188,7 +188,7 @@ static int elanfreq_target(struct cpufreq_policy *policy,
188 target_freq, relation, &newstate)) 188 target_freq, relation, &newstate))
189 return -EINVAL; 189 return -EINVAL;
190 190
191 elanfreq_set_cpu_state(newstate); 191 elanfreq_set_cpu_state(policy, newstate);
192 192
193 return 0; 193 return 0;
194} 194}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 78057a357ddb..475b4f607f0d 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -70,7 +70,6 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
70 70
71 freqs.old = policy->cur; 71 freqs.old = policy->cur;
72 freqs.new = target_freq; 72 freqs.new = target_freq;
73 freqs.cpu = policy->cpu;
74 73
75 if (freqs.new == freqs.old) 74 if (freqs.new == freqs.old)
76 goto out; 75 goto out;
@@ -105,8 +104,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
105 } 104 }
106 arm_volt = volt_table[index]; 105 arm_volt = volt_table[index];
107 106
108 for_each_cpu(freqs.cpu, policy->cpus) 107 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
109 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
110 108
111 /* When the new frequency is higher than current frequency */ 109 /* When the new frequency is higher than current frequency */
112 if ((freqs.new > freqs.old) && !safe_arm_volt) { 110 if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -131,8 +129,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
131 129
132 exynos_info->set_freq(old_index, index); 130 exynos_info->set_freq(old_index, index);
133 131
134 for_each_cpu(freqs.cpu, policy->cpus) 132 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
135 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
136 133
137 /* When the new frequency is lower than current frequency */ 134 /* When the new frequency is lower than current frequency */
138 if ((freqs.new < freqs.old) || 135 if ((freqs.new < freqs.old) ||
@@ -297,7 +294,7 @@ static int __init exynos_cpufreq_init(void)
297 else if (soc_is_exynos5250()) 294 else if (soc_is_exynos5250())
298 ret = exynos5250_cpufreq_init(exynos_info); 295 ret = exynos5250_cpufreq_init(exynos_info);
299 else 296 else
300 pr_err("%s: CPU type not found\n", __func__); 297 return 0;
301 298
302 if (ret) 299 if (ret)
303 goto err_vdd_arm; 300 goto err_vdd_arm;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
new file mode 100644
index 000000000000..0c74018eda47
--- /dev/null
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * EXYNOS5440 - CPU frequency scaling support
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/opp.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/* Register definitions */
28#define XMU_DVFS_CTRL 0x0060
29#define XMU_PMU_P0_7 0x0064
30#define XMU_C0_3_PSTATE 0x0090
31#define XMU_P_LIMIT 0x00a0
32#define XMU_P_STATUS 0x00a4
33#define XMU_PMUEVTEN 0x00d0
34#define XMU_PMUIRQEN 0x00d4
35#define XMU_PMUIRQ 0x00d8
36
37/* PMU mask and shift definations */
38#define P_VALUE_MASK 0x7
39
40#define XMU_DVFS_CTRL_EN_SHIFT 0
41
42#define P0_7_CPUCLKDEV_SHIFT 21
43#define P0_7_CPUCLKDEV_MASK 0x7
44#define P0_7_ATBCLKDEV_SHIFT 18
45#define P0_7_ATBCLKDEV_MASK 0x7
46#define P0_7_CSCLKDEV_SHIFT 15
47#define P0_7_CSCLKDEV_MASK 0x7
48#define P0_7_CPUEMA_SHIFT 28
49#define P0_7_CPUEMA_MASK 0xf
50#define P0_7_L2EMA_SHIFT 24
51#define P0_7_L2EMA_MASK 0xf
52#define P0_7_VDD_SHIFT 8
53#define P0_7_VDD_MASK 0x7f
54#define P0_7_FREQ_SHIFT 0
55#define P0_7_FREQ_MASK 0xff
56
57#define C0_3_PSTATE_VALID_SHIFT 8
58#define C0_3_PSTATE_CURR_SHIFT 4
59#define C0_3_PSTATE_NEW_SHIFT 0
60
61#define PSTATE_CHANGED_EVTEN_SHIFT 0
62
63#define PSTATE_CHANGED_IRQEN_SHIFT 0
64
65#define PSTATE_CHANGED_SHIFT 0
66
67/* some constant values for clock divider calculation */
68#define CPU_DIV_FREQ_MAX 500
69#define CPU_DBG_FREQ_MAX 375
70#define CPU_ATB_FREQ_MAX 500
71
72#define PMIC_LOW_VOLT 0x30
73#define PMIC_HIGH_VOLT 0x28
74
75#define CPUEMA_HIGH 0x2
76#define CPUEMA_MID 0x4
77#define CPUEMA_LOW 0x7
78
79#define L2EMA_HIGH 0x1
80#define L2EMA_MID 0x3
81#define L2EMA_LOW 0x4
82
83#define DIV_TAB_MAX 2
84/* frequency unit is 20MHZ */
85#define FREQ_UNIT 20
86#define MAX_VOLTAGE 1550000 /* In microvolt */
87#define VOLTAGE_STEP 12500 /* In microvolt */
88
89#define CPUFREQ_NAME "exynos5440_dvfs"
90#define DEF_TRANS_LATENCY 100000
91
92enum cpufreq_level_index {
93 L0, L1, L2, L3, L4,
94 L5, L6, L7, L8, L9,
95};
96#define CPUFREQ_LEVEL_END (L7 + 1)
97
98struct exynos_dvfs_data {
99 void __iomem *base;
100 struct resource *mem;
101 int irq;
102 struct clk *cpu_clk;
103 unsigned int cur_frequency;
104 unsigned int latency;
105 struct cpufreq_frequency_table *freq_table;
106 unsigned int freq_count;
107 struct device *dev;
108 bool dvfs_enabled;
109 struct work_struct irq_work;
110};
111
112static struct exynos_dvfs_data *dvfs_info;
113static DEFINE_MUTEX(cpufreq_lock);
114static struct cpufreq_freqs freqs;
115
116static int init_div_table(void)
117{
118 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
119 unsigned int tmp, clk_div, ema_div, freq, volt_id;
120 int i = 0;
121 struct opp *opp;
122
123 rcu_read_lock();
124 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
125
126 opp = opp_find_freq_exact(dvfs_info->dev,
127 freq_tbl[i].frequency * 1000, true);
128 if (IS_ERR(opp)) {
129 rcu_read_unlock();
130 dev_err(dvfs_info->dev,
131 "failed to find valid OPP for %u KHZ\n",
132 freq_tbl[i].frequency);
133 return PTR_ERR(opp);
134 }
135
136 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
137 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
138 << P0_7_CPUCLKDEV_SHIFT;
139 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
140 << P0_7_ATBCLKDEV_SHIFT;
141 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
142 << P0_7_CSCLKDEV_SHIFT;
143
144 /* Calculate EMA */
145 volt_id = opp_get_voltage(opp);
146 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
147 if (volt_id < PMIC_HIGH_VOLT) {
148 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
149 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
150 } else if (volt_id > PMIC_LOW_VOLT) {
151 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
152 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153 } else {
154 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
155 (L2EMA_MID << P0_7_L2EMA_SHIFT);
156 }
157
158 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
159 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
160
161 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
162 }
163
164 rcu_read_unlock();
165 return 0;
166}
167
168static void exynos_enable_dvfs(void)
169{
170 unsigned int tmp, i, cpu;
171 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172 /* Disable DVFS */
173 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
174
175 /* Enable PSTATE Change Event */
176 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
177 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
178 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
179
180 /* Enable PSTATE Change IRQ */
181 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
182 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
183 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
184
185 /* Set initial performance index */
186 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187 if (freq_table[i].frequency == dvfs_info->cur_frequency)
188 break;
189
190 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */
193 i = 0;
194 dvfs_info->cur_frequency = freq_table[i].frequency;
195 }
196
197 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info->cur_frequency);
199
200 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
202 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
203 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
204 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
205 }
206
207 /* Enable DVFS */
208 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
209 dvfs_info->base + XMU_DVFS_CTRL);
210}
211
212static int exynos_verify_speed(struct cpufreq_policy *policy)
213{
214 return cpufreq_frequency_table_verify(policy,
215 dvfs_info->freq_table);
216}
217
218static unsigned int exynos_getspeed(unsigned int cpu)
219{
220 return dvfs_info->cur_frequency;
221}
222
223static int exynos_target(struct cpufreq_policy *policy,
224 unsigned int target_freq,
225 unsigned int relation)
226{
227 unsigned int index, tmp;
228 int ret = 0, i;
229 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
230
231 mutex_lock(&cpufreq_lock);
232
233 ret = cpufreq_frequency_table_target(policy, freq_table,
234 target_freq, relation, &index);
235 if (ret)
236 goto out;
237
238 freqs.old = dvfs_info->cur_frequency;
239 freqs.new = freq_table[index].frequency;
240
241 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
242
243 /* Set the target frequency in all C0_3_PSTATE register */
244 for_each_cpu(i, policy->cpus) {
245 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
246 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
247 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
248
249 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
250 }
251out:
252 mutex_unlock(&cpufreq_lock);
253 return ret;
254}
255
256static void exynos_cpufreq_work(struct work_struct *work)
257{
258 unsigned int cur_pstate, index;
259 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
260 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
261
262 /* Ensure we can access cpufreq structures */
263 if (unlikely(dvfs_info->dvfs_enabled == false))
264 goto skip_work;
265
266 mutex_lock(&cpufreq_lock);
267 freqs.old = dvfs_info->cur_frequency;
268
269 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
270 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
271 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
272 else
273 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
274
275 if (likely(index < dvfs_info->freq_count)) {
276 freqs.new = freq_table[index].frequency;
277 dvfs_info->cur_frequency = freqs.new;
278 } else {
279 dev_crit(dvfs_info->dev, "New frequency out of range\n");
280 freqs.new = dvfs_info->cur_frequency;
281 }
282 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
283
284 cpufreq_cpu_put(policy);
285 mutex_unlock(&cpufreq_lock);
286skip_work:
287 enable_irq(dvfs_info->irq);
288}
289
290static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
291{
292 unsigned int tmp;
293
294 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
295 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
296 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
297 disable_irq_nosync(irq);
298 schedule_work(&dvfs_info->irq_work);
299 }
300 return IRQ_HANDLED;
301}
302
303static void exynos_sort_descend_freq_table(void)
304{
305 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
306 int i = 0, index;
307 unsigned int tmp_freq;
308 /*
309 * Exynos5440 clock controller state logic expects the cpufreq table to
310 * be in descending order. But the OPP library constructs the table in
311 * ascending order. So to make the table descending we just need to
312 * swap the i element with the N - i element.
313 */
314 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
315 index = dvfs_info->freq_count - i - 1;
316 tmp_freq = freq_tbl[i].frequency;
317 freq_tbl[i].frequency = freq_tbl[index].frequency;
318 freq_tbl[index].frequency = tmp_freq;
319 }
320}
321
322static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
323{
324 int ret;
325
326 ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
327 if (ret) {
328 dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
329 return ret;
330 }
331
332 policy->cur = dvfs_info->cur_frequency;
333 policy->cpuinfo.transition_latency = dvfs_info->latency;
334 cpumask_setall(policy->cpus);
335
336 cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
337
338 return 0;
339}
340
341static struct cpufreq_driver exynos_driver = {
342 .flags = CPUFREQ_STICKY,
343 .verify = exynos_verify_speed,
344 .target = exynos_target,
345 .get = exynos_getspeed,
346 .init = exynos_cpufreq_cpu_init,
347 .name = CPUFREQ_NAME,
348};
349
350static const struct of_device_id exynos_cpufreq_match[] = {
351 {
352 .compatible = "samsung,exynos5440-cpufreq",
353 },
354 {},
355};
356MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
357
358static int exynos_cpufreq_probe(struct platform_device *pdev)
359{
360 int ret = -EINVAL;
361 struct device_node *np;
362 struct resource res;
363
364 np = pdev->dev.of_node;
365 if (!np)
366 return -ENODEV;
367
368 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
369 if (!dvfs_info) {
370 ret = -ENOMEM;
371 goto err_put_node;
372 }
373
374 dvfs_info->dev = &pdev->dev;
375
376 ret = of_address_to_resource(np, 0, &res);
377 if (ret)
378 goto err_put_node;
379
380 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
381 if (IS_ERR(dvfs_info->base)) {
382 ret = PTR_ERR(dvfs_info->base);
383 goto err_put_node;
384 }
385
386 dvfs_info->irq = irq_of_parse_and_map(np, 0);
387 if (!dvfs_info->irq) {
388 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
389 ret = -ENODEV;
390 goto err_put_node;
391 }
392
393 ret = of_init_opp_table(dvfs_info->dev);
394 if (ret) {
395 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
396 goto err_put_node;
397 }
398
399 ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
400 if (ret) {
401 dev_err(dvfs_info->dev,
402 "failed to init cpufreq table: %d\n", ret);
403 goto err_put_node;
404 }
405 dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
406 exynos_sort_descend_freq_table();
407
408 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
409 dvfs_info->latency = DEF_TRANS_LATENCY;
410
411 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
412 if (IS_ERR(dvfs_info->cpu_clk)) {
413 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
414 ret = PTR_ERR(dvfs_info->cpu_clk);
415 goto err_free_table;
416 }
417
418 dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
419 if (!dvfs_info->cur_frequency) {
420 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
421 ret = -EINVAL;
422 goto err_free_table;
423 }
424 dvfs_info->cur_frequency /= 1000;
425
426 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
427 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
428 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
429 CPUFREQ_NAME, dvfs_info);
430 if (ret) {
431 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
432 goto err_free_table;
433 }
434
435 ret = init_div_table();
436 if (ret) {
437 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
438 goto err_free_table;
439 }
440
441 exynos_enable_dvfs();
442 ret = cpufreq_register_driver(&exynos_driver);
443 if (ret) {
444 dev_err(dvfs_info->dev,
445 "%s: failed to register cpufreq driver\n", __func__);
446 goto err_free_table;
447 }
448
449 of_node_put(np);
450 dvfs_info->dvfs_enabled = true;
451 return 0;
452
453err_free_table:
454 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
455err_put_node:
456 of_node_put(np);
457 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
458 return ret;
459}
460
461static int exynos_cpufreq_remove(struct platform_device *pdev)
462{
463 cpufreq_unregister_driver(&exynos_driver);
464 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
465 return 0;
466}
467
468static struct platform_driver exynos_cpufreq_platdrv = {
469 .driver = {
470 .name = "exynos5440-cpufreq",
471 .owner = THIS_MODULE,
472 .of_match_table = exynos_cpufreq_match,
473 },
474 .probe = exynos_cpufreq_probe,
475 .remove = exynos_cpufreq_remove,
476};
477module_platform_driver(exynos_cpufreq_platdrv);
478
479MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
480MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
481MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 456bee058fe6..3dfc99b9ca86 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -251,14 +251,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
251 * set cpu speed in khz. 251 * set cpu speed in khz.
252 **/ 252 **/
253 253
254static void gx_set_cpuspeed(unsigned int khz) 254static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
255{ 255{
256 u8 suscfg, pmer1; 256 u8 suscfg, pmer1;
257 unsigned int new_khz; 257 unsigned int new_khz;
258 unsigned long flags; 258 unsigned long flags;
259 struct cpufreq_freqs freqs; 259 struct cpufreq_freqs freqs;
260 260
261 freqs.cpu = 0;
262 freqs.old = gx_get_cpuspeed(0); 261 freqs.old = gx_get_cpuspeed(0);
263 262
264 new_khz = gx_validate_speed(khz, &gx_params->on_duration, 263 new_khz = gx_validate_speed(khz, &gx_params->on_duration,
@@ -266,11 +265,9 @@ static void gx_set_cpuspeed(unsigned int khz)
266 265
267 freqs.new = new_khz; 266 freqs.new = new_khz;
268 267
269 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 268 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
270 local_irq_save(flags); 269 local_irq_save(flags);
271 270
272
273
274 if (new_khz != stock_freq) { 271 if (new_khz != stock_freq) {
275 /* if new khz == 100% of CPU speed, it is special case */ 272 /* if new khz == 100% of CPU speed, it is special case */
276 switch (gx_params->cs55x0->device) { 273 switch (gx_params->cs55x0->device) {
@@ -317,7 +314,7 @@ static void gx_set_cpuspeed(unsigned int khz)
317 314
318 gx_params->pci_suscfg = suscfg; 315 gx_params->pci_suscfg = suscfg;
319 316
320 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 317 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
321 318
322 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", 319 pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
323 gx_params->on_duration * 32, gx_params->off_duration * 32); 320 gx_params->on_duration * 32, gx_params->off_duration * 32);
@@ -397,7 +394,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
397 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); 394 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
398 } 395 }
399 396
400 gx_set_cpuspeed(tmp_freq); 397 gx_set_cpuspeed(policy, tmp_freq);
401 398
402 return 0; 399 return 0;
403} 400}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
new file mode 100644
index 000000000000..c0075dbaa633
--- /dev/null
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -0,0 +1,438 @@
1/*
2 * This file provides the ACPI based P-state support. This
3 * module works with generic cpufreq infrastructure. Most of
4 * the code is based on i386 version
5 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
6 *
7 * Copyright (C) 2005 Intel Corp
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20#include <asm/pal.h>
21
22#include <linux/acpi.h>
23#include <acpi/processor.h>
24
25MODULE_AUTHOR("Venkatesh Pallipadi");
26MODULE_DESCRIPTION("ACPI Processor P-States Driver");
27MODULE_LICENSE("GPL");
28
29
30struct cpufreq_acpi_io {
31 struct acpi_processor_performance acpi_data;
32 struct cpufreq_frequency_table *freq_table;
33 unsigned int resume;
34};
35
36static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
37
38static struct cpufreq_driver acpi_cpufreq_driver;
39
40
41static int
42processor_set_pstate (
43 u32 value)
44{
45 s64 retval;
46
47 pr_debug("processor_set_pstate\n");
48
49 retval = ia64_pal_set_pstate((u64)value);
50
51 if (retval) {
52 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
53 value, retval);
54 return -ENODEV;
55 }
56 return (int)retval;
57}
58
59
60static int
61processor_get_pstate (
62 u32 *value)
63{
64 u64 pstate_index = 0;
65 s64 retval;
66
67 pr_debug("processor_get_pstate\n");
68
69 retval = ia64_pal_get_pstate(&pstate_index,
70 PAL_GET_PSTATE_TYPE_INSTANT);
71 *value = (u32) pstate_index;
72
73 if (retval)
74 pr_debug("Failed to get current freq with "
75 "error 0x%lx, idx 0x%x\n", retval, *value);
76
77 return (int)retval;
78}
79
80
81/* To be used only after data->acpi_data is initialized */
82static unsigned
83extract_clock (
84 struct cpufreq_acpi_io *data,
85 unsigned value,
86 unsigned int cpu)
87{
88 unsigned long i;
89
90 pr_debug("extract_clock\n");
91
92 for (i = 0; i < data->acpi_data.state_count; i++) {
93 if (value == data->acpi_data.states[i].status)
94 return data->acpi_data.states[i].core_frequency;
95 }
96 return data->acpi_data.states[i-1].core_frequency;
97}
98
99
100static unsigned int
101processor_get_freq (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu)
104{
105 int ret = 0;
106 u32 value = 0;
107 cpumask_t saved_mask;
108 unsigned long clock_freq;
109
110 pr_debug("processor_get_freq\n");
111
112 saved_mask = current->cpus_allowed;
113 set_cpus_allowed_ptr(current, cpumask_of(cpu));
114 if (smp_processor_id() != cpu)
115 goto migrate_end;
116
117 /* processor_get_pstate gets the instantaneous frequency */
118 ret = processor_get_pstate(&value);
119
120 if (ret) {
121 set_cpus_allowed_ptr(current, &saved_mask);
122 printk(KERN_WARNING "get performance failed with error %d\n",
123 ret);
124 ret = 0;
125 goto migrate_end;
126 }
127 clock_freq = extract_clock(data, value, cpu);
128 ret = (clock_freq*1000);
129
130migrate_end:
131 set_cpus_allowed_ptr(current, &saved_mask);
132 return ret;
133}
134
135
136static int
137processor_set_freq (
138 struct cpufreq_acpi_io *data,
139 struct cpufreq_policy *policy,
140 int state)
141{
142 int ret = 0;
143 u32 value = 0;
144 struct cpufreq_freqs cpufreq_freqs;
145 cpumask_t saved_mask;
146 int retval;
147
148 pr_debug("processor_set_freq\n");
149
150 saved_mask = current->cpus_allowed;
151 set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
152 if (smp_processor_id() != policy->cpu) {
153 retval = -EAGAIN;
154 goto migrate_end;
155 }
156
157 if (state == data->acpi_data.state) {
158 if (unlikely(data->resume)) {
159 pr_debug("Called after resume, resetting to P%d\n", state);
160 data->resume = 0;
161 } else {
162 pr_debug("Already at target state (P%d)\n", state);
163 retval = 0;
164 goto migrate_end;
165 }
166 }
167
168 pr_debug("Transitioning from P%d to P%d\n",
169 data->acpi_data.state, state);
170
171 /* cpufreq frequency struct */
172 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
173 cpufreq_freqs.new = data->freq_table[state].frequency;
174
175 /* notify cpufreq */
176 cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
177
178 /*
179 * First we write the target state's 'control' value to the
180 * control_register.
181 */
182
183 value = (u32) data->acpi_data.states[state].control;
184
185 pr_debug("Transitioning to state: 0x%08x\n", value);
186
187 ret = processor_set_pstate(value);
188 if (ret) {
189 unsigned int tmp = cpufreq_freqs.new;
190 cpufreq_notify_transition(policy, &cpufreq_freqs,
191 CPUFREQ_POSTCHANGE);
192 cpufreq_freqs.new = cpufreq_freqs.old;
193 cpufreq_freqs.old = tmp;
194 cpufreq_notify_transition(policy, &cpufreq_freqs,
195 CPUFREQ_PRECHANGE);
196 cpufreq_notify_transition(policy, &cpufreq_freqs,
197 CPUFREQ_POSTCHANGE);
198 printk(KERN_WARNING "Transition failed with error %d\n", ret);
199 retval = -ENODEV;
200 goto migrate_end;
201 }
202
203 cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
204
205 data->acpi_data.state = state;
206
207 retval = 0;
208
209migrate_end:
210 set_cpus_allowed_ptr(current, &saved_mask);
211 return (retval);
212}
213
214
215static unsigned int
216acpi_cpufreq_get (
217 unsigned int cpu)
218{
219 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
220
221 pr_debug("acpi_cpufreq_get\n");
222
223 return processor_get_freq(data, cpu);
224}
225
226
227static int
228acpi_cpufreq_target (
229 struct cpufreq_policy *policy,
230 unsigned int target_freq,
231 unsigned int relation)
232{
233 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
234 unsigned int next_state = 0;
235 unsigned int result = 0;
236
237 pr_debug("acpi_cpufreq_setpolicy\n");
238
239 result = cpufreq_frequency_table_target(policy,
240 data->freq_table, target_freq, relation, &next_state);
241 if (result)
242 return (result);
243
244 result = processor_set_freq(data, policy, next_state);
245
246 return (result);
247}
248
249
250static int
251acpi_cpufreq_verify (
252 struct cpufreq_policy *policy)
253{
254 unsigned int result = 0;
255 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
256
257 pr_debug("acpi_cpufreq_verify\n");
258
259 result = cpufreq_frequency_table_verify(policy,
260 data->freq_table);
261
262 return (result);
263}
264
265
266static int
267acpi_cpufreq_cpu_init (
268 struct cpufreq_policy *policy)
269{
270 unsigned int i;
271 unsigned int cpu = policy->cpu;
272 struct cpufreq_acpi_io *data;
273 unsigned int result = 0;
274
275 pr_debug("acpi_cpufreq_cpu_init\n");
276
277 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
278 if (!data)
279 return (-ENOMEM);
280
281 acpi_io_data[cpu] = data;
282
283 result = acpi_processor_register_performance(&data->acpi_data, cpu);
284
285 if (result)
286 goto err_free;
287
288 /* capability check */
289 if (data->acpi_data.state_count <= 1) {
290 pr_debug("No P-States\n");
291 result = -ENODEV;
292 goto err_unreg;
293 }
294
295 if ((data->acpi_data.control_register.space_id !=
296 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
297 (data->acpi_data.status_register.space_id !=
298 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
299 pr_debug("Unsupported address space [%d, %d]\n",
300 (u32) (data->acpi_data.control_register.space_id),
301 (u32) (data->acpi_data.status_register.space_id));
302 result = -ENODEV;
303 goto err_unreg;
304 }
305
306 /* alloc freq_table */
307 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
308 (data->acpi_data.state_count + 1),
309 GFP_KERNEL);
310 if (!data->freq_table) {
311 result = -ENOMEM;
312 goto err_unreg;
313 }
314
315 /* detect transition latency */
316 policy->cpuinfo.transition_latency = 0;
317 for (i=0; i<data->acpi_data.state_count; i++) {
318 if ((data->acpi_data.states[i].transition_latency * 1000) >
319 policy->cpuinfo.transition_latency) {
320 policy->cpuinfo.transition_latency =
321 data->acpi_data.states[i].transition_latency * 1000;
322 }
323 }
324 policy->cur = processor_get_freq(data, policy->cpu);
325
326 /* table init */
327 for (i = 0; i <= data->acpi_data.state_count; i++)
328 {
329 data->freq_table[i].index = i;
330 if (i < data->acpi_data.state_count) {
331 data->freq_table[i].frequency =
332 data->acpi_data.states[i].core_frequency * 1000;
333 } else {
334 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
335 }
336 }
337
338 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
339 if (result) {
340 goto err_freqfree;
341 }
342
343 /* notify BIOS that we exist */
344 acpi_processor_notify_smm(THIS_MODULE);
345
346 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
347 "activated.\n", cpu);
348
349 for (i = 0; i < data->acpi_data.state_count; i++)
350 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
351 (i == data->acpi_data.state?'*':' '), i,
352 (u32) data->acpi_data.states[i].core_frequency,
353 (u32) data->acpi_data.states[i].power,
354 (u32) data->acpi_data.states[i].transition_latency,
355 (u32) data->acpi_data.states[i].bus_master_latency,
356 (u32) data->acpi_data.states[i].status,
357 (u32) data->acpi_data.states[i].control);
358
359 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
360
361 /* the first call to ->target() should result in us actually
362 * writing something to the appropriate registers. */
363 data->resume = 1;
364
365 return (result);
366
367 err_freqfree:
368 kfree(data->freq_table);
369 err_unreg:
370 acpi_processor_unregister_performance(&data->acpi_data, cpu);
371 err_free:
372 kfree(data);
373 acpi_io_data[cpu] = NULL;
374
375 return (result);
376}
377
378
379static int
380acpi_cpufreq_cpu_exit (
381 struct cpufreq_policy *policy)
382{
383 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
384
385 pr_debug("acpi_cpufreq_cpu_exit\n");
386
387 if (data) {
388 cpufreq_frequency_table_put_attr(policy->cpu);
389 acpi_io_data[policy->cpu] = NULL;
390 acpi_processor_unregister_performance(&data->acpi_data,
391 policy->cpu);
392 kfree(data);
393 }
394
395 return (0);
396}
397
398
399static struct freq_attr* acpi_cpufreq_attr[] = {
400 &cpufreq_freq_attr_scaling_available_freqs,
401 NULL,
402};
403
404
405static struct cpufreq_driver acpi_cpufreq_driver = {
406 .verify = acpi_cpufreq_verify,
407 .target = acpi_cpufreq_target,
408 .get = acpi_cpufreq_get,
409 .init = acpi_cpufreq_cpu_init,
410 .exit = acpi_cpufreq_cpu_exit,
411 .name = "acpi-cpufreq",
412 .owner = THIS_MODULE,
413 .attr = acpi_cpufreq_attr,
414};
415
416
417static int __init
418acpi_cpufreq_init (void)
419{
420 pr_debug("acpi_cpufreq_init\n");
421
422 return cpufreq_register_driver(&acpi_cpufreq_driver);
423}
424
425
426static void __exit
427acpi_cpufreq_exit (void)
428{
429 pr_debug("acpi_cpufreq_exit\n");
430
431 cpufreq_unregister_driver(&acpi_cpufreq_driver);
432 return;
433}
434
435
436late_initcall(acpi_cpufreq_init);
437module_exit(acpi_cpufreq_exit);
438
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 54e336de373b..b78bc35973ba 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -50,7 +50,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
50 struct cpufreq_freqs freqs; 50 struct cpufreq_freqs freqs;
51 struct opp *opp; 51 struct opp *opp;
52 unsigned long freq_hz, volt, volt_old; 52 unsigned long freq_hz, volt, volt_old;
53 unsigned int index, cpu; 53 unsigned int index;
54 int ret; 54 int ret;
55 55
56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, 56 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -68,10 +68,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
68 if (freqs.old == freqs.new) 68 if (freqs.old == freqs.new)
69 return 0; 69 return 0;
70 70
71 for_each_online_cpu(cpu) { 71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 freqs.cpu = cpu;
73 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
74 }
75 72
76 rcu_read_lock(); 73 rcu_read_lock();
77 opp = opp_find_freq_ceil(cpu_dev, &freq_hz); 74 opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
@@ -166,10 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
166 } 163 }
167 } 164 }
168 165
169 for_each_online_cpu(cpu) { 166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
170 freqs.cpu = cpu;
171 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
172 }
173 167
174 return 0; 168 return 0;
175} 169}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
new file mode 100644
index 000000000000..f7c99df0880b
--- /dev/null
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * CPU support functions
9 */
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/cpufreq.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/init.h>
17#include <linux/io.h>
18
19#include <mach/hardware.h>
20#include <mach/platform.h>
21#include <asm/mach-types.h>
22#include <asm/hardware/icst.h>
23
24static struct cpufreq_driver integrator_driver;
25
26#define CM_ID __io_address(INTEGRATOR_HDR_ID)
27#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
28#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
29#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
30
31static const struct icst_params lclk_params = {
32 .ref = 24000000,
33 .vco_max = ICST525_VCO_MAX_5V,
34 .vco_min = ICST525_VCO_MIN,
35 .vd_min = 8,
36 .vd_max = 132,
37 .rd_min = 24,
38 .rd_max = 24,
39 .s2div = icst525_s2div,
40 .idx2s = icst525_idx2s,
41};
42
43static const struct icst_params cclk_params = {
44 .ref = 24000000,
45 .vco_max = ICST525_VCO_MAX_5V,
46 .vco_min = ICST525_VCO_MIN,
47 .vd_min = 12,
48 .vd_max = 160,
49 .rd_min = 24,
50 .rd_max = 24,
51 .s2div = icst525_s2div,
52 .idx2s = icst525_idx2s,
53};
54
55/*
56 * Validate the speed policy.
57 */
58static int integrator_verify_policy(struct cpufreq_policy *policy)
59{
60 struct icst_vco vco;
61
62 cpufreq_verify_within_limits(policy,
63 policy->cpuinfo.min_freq,
64 policy->cpuinfo.max_freq);
65
66 vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
67 policy->max = icst_hz(&cclk_params, vco) / 1000;
68
69 vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
70 policy->min = icst_hz(&cclk_params, vco) / 1000;
71
72 cpufreq_verify_within_limits(policy,
73 policy->cpuinfo.min_freq,
74 policy->cpuinfo.max_freq);
75
76 return 0;
77}
78
79
80static int integrator_set_target(struct cpufreq_policy *policy,
81 unsigned int target_freq,
82 unsigned int relation)
83{
84 cpumask_t cpus_allowed;
85 int cpu = policy->cpu;
86 struct icst_vco vco;
87 struct cpufreq_freqs freqs;
88 u_int cm_osc;
89
90 /*
91 * Save this threads cpus_allowed mask.
92 */
93 cpus_allowed = current->cpus_allowed;
94
95 /*
96 * Bind to the specified CPU. When this call returns,
97 * we should be running on the right CPU.
98 */
99 set_cpus_allowed(current, cpumask_of_cpu(cpu));
100 BUG_ON(cpu != smp_processor_id());
101
102 /* get current setting */
103 cm_osc = __raw_readl(CM_OSC);
104
105 if (machine_is_integrator()) {
106 vco.s = (cm_osc >> 8) & 7;
107 } else if (machine_is_cintegrator()) {
108 vco.s = 1;
109 }
110 vco.v = cm_osc & 255;
111 vco.r = 22;
112 freqs.old = icst_hz(&cclk_params, vco) / 1000;
113
114 /* icst_hz_to_vco rounds down -- so we need the next
115 * larger freq in case of CPUFREQ_RELATION_L.
116 */
117 if (relation == CPUFREQ_RELATION_L)
118 target_freq += 999;
119 if (target_freq > policy->max)
120 target_freq = policy->max;
121 vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
122 freqs.new = icst_hz(&cclk_params, vco) / 1000;
123
124 if (freqs.old == freqs.new) {
125 set_cpus_allowed(current, cpus_allowed);
126 return 0;
127 }
128
129 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
130
131 cm_osc = __raw_readl(CM_OSC);
132
133 if (machine_is_integrator()) {
134 cm_osc &= 0xfffff800;
135 cm_osc |= vco.s << 8;
136 } else if (machine_is_cintegrator()) {
137 cm_osc &= 0xffffff00;
138 }
139 cm_osc |= vco.v;
140
141 __raw_writel(0xa05f, CM_LOCK);
142 __raw_writel(cm_osc, CM_OSC);
143 __raw_writel(0, CM_LOCK);
144
145 /*
146 * Restore the CPUs allowed mask.
147 */
148 set_cpus_allowed(current, cpus_allowed);
149
150 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
151
152 return 0;
153}
154
155static unsigned int integrator_get(unsigned int cpu)
156{
157 cpumask_t cpus_allowed;
158 unsigned int current_freq;
159 u_int cm_osc;
160 struct icst_vco vco;
161
162 cpus_allowed = current->cpus_allowed;
163
164 set_cpus_allowed(current, cpumask_of_cpu(cpu));
165 BUG_ON(cpu != smp_processor_id());
166
167 /* detect memory etc. */
168 cm_osc = __raw_readl(CM_OSC);
169
170 if (machine_is_integrator()) {
171 vco.s = (cm_osc >> 8) & 7;
172 } else {
173 vco.s = 1;
174 }
175 vco.v = cm_osc & 255;
176 vco.r = 22;
177
178 current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
179
180 set_cpus_allowed(current, cpus_allowed);
181
182 return current_freq;
183}
184
185static int integrator_cpufreq_init(struct cpufreq_policy *policy)
186{
187
188 /* set default policy and cpuinfo */
189 policy->cpuinfo.max_freq = 160000;
190 policy->cpuinfo.min_freq = 12000;
191 policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
192 policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
193
194 return 0;
195}
196
197static struct cpufreq_driver integrator_driver = {
198 .verify = integrator_verify_policy,
199 .target = integrator_set_target,
200 .get = integrator_get,
201 .init = integrator_cpufreq_init,
202 .name = "integrator",
203};
204
205static int __init integrator_cpu_init(void)
206{
207 return cpufreq_register_driver(&integrator_driver);
208}
209
210static void __exit integrator_cpu_exit(void)
211{
212 cpufreq_unregister_driver(&integrator_driver);
213}
214
215MODULE_AUTHOR ("Russell M. King");
216MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
217MODULE_LICENSE ("GPL");
218
219module_init(integrator_cpu_init);
220module_exit(integrator_cpu_exit);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..cc3a8e6c92be 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * cpufreq_snb.c: Native P state management for Intel processors 2 * intel_pstate.c: Native P state management for Intel processors
3 * 3 *
4 * (C) Copyright 2012 Intel Corporation 4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
@@ -657,30 +657,27 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
657static int intel_pstate_set_policy(struct cpufreq_policy *policy) 657static int intel_pstate_set_policy(struct cpufreq_policy *policy)
658{ 658{
659 struct cpudata *cpu; 659 struct cpudata *cpu;
660 int min, max;
661 660
662 cpu = all_cpu_data[policy->cpu]; 661 cpu = all_cpu_data[policy->cpu];
663 662
664 if (!policy->cpuinfo.max_freq) 663 if (!policy->cpuinfo.max_freq)
665 return -ENODEV; 664 return -ENODEV;
666 665
667 intel_pstate_get_min_max(cpu, &min, &max);
668
669 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
670 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
671 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
672
673 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
674 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
675 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
676
677 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 666 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
678 limits.min_perf_pct = 100; 667 limits.min_perf_pct = 100;
679 limits.min_perf = int_tofp(1); 668 limits.min_perf = int_tofp(1);
680 limits.max_perf_pct = 100; 669 limits.max_perf_pct = 100;
681 limits.max_perf = int_tofp(1); 670 limits.max_perf = int_tofp(1);
682 limits.no_turbo = 0; 671 limits.no_turbo = 0;
672 return 0;
683 } 673 }
674 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
684 681
685 return 0; 682 return 0;
686} 683}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0e83e3c24f5b..d36ea8dc96eb 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,7 +55,8 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
55 return kirkwood_freq_table[0].frequency; 55 return kirkwood_freq_table[0].frequency;
56} 56}
57 57
58static void kirkwood_cpufreq_set_cpu_state(unsigned int index) 58static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
59 unsigned int index)
59{ 60{
60 struct cpufreq_freqs freqs; 61 struct cpufreq_freqs freqs;
61 unsigned int state = kirkwood_freq_table[index].index; 62 unsigned int state = kirkwood_freq_table[index].index;
@@ -63,9 +64,8 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
63 64
64 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); 65 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
65 freqs.new = kirkwood_freq_table[index].frequency; 66 freqs.new = kirkwood_freq_table[index].frequency;
66 freqs.cpu = 0; /* Kirkwood is UP */
67 67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 68 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
69 69
70 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", 70 dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
71 kirkwood_freq_table[index].frequency); 71 kirkwood_freq_table[index].frequency);
@@ -99,7 +99,7 @@ static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
99 99
100 local_irq_enable(); 100 local_irq_enable();
101 } 101 }
102 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 102 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
103}; 103};
104 104
105static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) 105static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
@@ -117,7 +117,7 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
117 target_freq, relation, &index)) 117 target_freq, relation, &index))
118 return -EINVAL; 118 return -EINVAL;
119 119
120 kirkwood_cpufreq_set_cpu_state(index); 120 kirkwood_cpufreq_set_cpu_state(policy, index);
121 121
122 return 0; 122 return 0;
123} 123}
@@ -175,11 +175,9 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
175 dev_err(&pdev->dev, "Cannot get memory resource\n"); 175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV; 176 return -ENODEV;
177 } 177 }
178 priv.base = devm_request_and_ioremap(&pdev->dev, res); 178 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (!priv.base) { 179 if (IS_ERR(priv.base))
180 dev_err(&pdev->dev, "Cannot ioremap\n"); 180 return PTR_ERR(priv.base);
181 return -EADDRNOTAVAIL;
182 }
183 181
184 np = of_find_node_by_path("/cpus/cpu@0"); 182 np = of_find_node_by_path("/cpus/cpu@0");
185 if (!np) 183 if (!np)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 1180d536d1eb..b448638e34de 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,8 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
242 * Sets a new clock ratio. 242 * Sets a new clock ratio.
243 */ 243 */
244 244
245static void longhaul_setstate(unsigned int table_index) 245static void longhaul_setstate(struct cpufreq_policy *policy,
246 unsigned int table_index)
246{ 247{
247 unsigned int mults_index; 248 unsigned int mults_index;
248 int speed, mult; 249 int speed, mult;
@@ -267,9 +268,8 @@ static void longhaul_setstate(unsigned int table_index)
267 268
268 freqs.old = calc_speed(longhaul_get_cpu_mult()); 269 freqs.old = calc_speed(longhaul_get_cpu_mult());
269 freqs.new = speed; 270 freqs.new = speed;
270 freqs.cpu = 0; /* longhaul.c is UP only driver */
271 271
272 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
273 273
274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
275 fsb, mult/10, mult%10, print_speed(speed/1000)); 275 fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@ retry_loop:
386 } 386 }
387 } 387 }
388 /* Report true CPU frequency */ 388 /* Report true CPU frequency */
389 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 389 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
390 390
391 if (!bm_timeout) 391 if (!bm_timeout)
392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 392 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -648,7 +648,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
648 return 0; 648 return 0;
649 649
650 if (!can_scale_voltage) 650 if (!can_scale_voltage)
651 longhaul_setstate(table_index); 651 longhaul_setstate(policy, table_index);
652 else { 652 else {
653 /* On test system voltage transitions exceeding single 653 /* On test system voltage transitions exceeding single
654 * step up or down were turning motherboard off. Both 654 * step up or down were turning motherboard off. Both
@@ -663,7 +663,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
663 while (i != table_index) { 663 while (i != table_index) {
664 vid = (longhaul_table[i].index >> 8) & 0x1f; 664 vid = (longhaul_table[i].index >> 8) & 0x1f;
665 if (vid != current_vid) { 665 if (vid != current_vid) {
666 longhaul_setstate(i); 666 longhaul_setstate(policy, i);
667 current_vid = vid; 667 current_vid = vid;
668 msleep(200); 668 msleep(200);
669 } 669 }
@@ -672,7 +672,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
672 else 672 else
673 i--; 673 i--;
674 } 674 }
675 longhaul_setstate(table_index); 675 longhaul_setstate(policy, table_index);
676 } 676 }
677 longhaul_index = table_index; 677 longhaul_index = table_index;
678 return 0; 678 return 0;
@@ -998,15 +998,17 @@ static int __init longhaul_init(void)
998 998
999static void __exit longhaul_exit(void) 999static void __exit longhaul_exit(void)
1000{ 1000{
1001 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
1001 int i; 1002 int i;
1002 1003
1003 for (i = 0; i < numscales; i++) { 1004 for (i = 0; i < numscales; i++) {
1004 if (mults[i] == maxmult) { 1005 if (mults[i] == maxmult) {
1005 longhaul_setstate(i); 1006 longhaul_setstate(policy, i);
1006 break; 1007 break;
1007 } 1008 }
1008 } 1009 }
1009 1010
1011 cpufreq_cpu_put(policy);
1010 cpufreq_unregister_driver(&longhaul_driver); 1012 cpufreq_unregister_driver(&longhaul_driver);
1011 kfree(longhaul_table); 1013 kfree(longhaul_table);
1012} 1014}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 000000000000..84889573b566
--- /dev/null
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,248 @@
1/*
2 * Cpufreq driver for the loongson-2 processors
3 *
4 * The 2E revision of loongson processor not support this feature.
5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/cpufreq.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/sched.h> /* set_cpus_allowed() */
17#include <linux/delay.h>
18#include <linux/platform_device.h>
19
20#include <asm/clock.h>
21
22#include <asm/mach-loongson/loongson.h>
23
24static uint nowait;
25
26static struct clk *cpuclk;
27
28static void (*saved_cpu_wait) (void);
29
30static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
31 unsigned long val, void *data);
32
33static struct notifier_block loongson2_cpufreq_notifier_block = {
34 .notifier_call = loongson2_cpu_freq_notifier
35};
36
37static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
38 unsigned long val, void *data)
39{
40 if (val == CPUFREQ_POSTCHANGE)
41 current_cpu_data.udelay_val = loops_per_jiffy;
42
43 return 0;
44}
45
46static unsigned int loongson2_cpufreq_get(unsigned int cpu)
47{
48 return clk_get_rate(cpuclk);
49}
50
51/*
52 * Here we notify other drivers of the proposed change and the final change.
53 */
54static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 unsigned int cpu = policy->cpu;
59 unsigned int newstate = 0;
60 cpumask_t cpus_allowed;
61 struct cpufreq_freqs freqs;
62 unsigned int freq;
63
64 cpus_allowed = current->cpus_allowed;
65 set_cpus_allowed_ptr(current, cpumask_of(cpu));
66
67 if (cpufreq_frequency_table_target
68 (policy, &loongson2_clockmod_table[0], target_freq, relation,
69 &newstate))
70 return -EINVAL;
71
72 freq =
73 ((cpu_clock_freq / 1000) *
74 loongson2_clockmod_table[newstate].index) / 8;
75 if (freq < policy->min || freq > policy->max)
76 return -EINVAL;
77
78 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
79
80 freqs.old = loongson2_cpufreq_get(cpu);
81 freqs.new = freq;
82 freqs.flags = 0;
83
84 if (freqs.new == freqs.old)
85 return 0;
86
87 /* notifiers */
88 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
89
90 set_cpus_allowed_ptr(current, &cpus_allowed);
91
92 /* setting the cpu frequency */
93 clk_set_rate(cpuclk, freq);
94
95 /* notifiers */
96 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
97
98 pr_debug("cpufreq: set frequency %u kHz\n", freq);
99
100 return 0;
101}
102
103static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
104{
105 int i;
106 unsigned long rate;
107 int ret;
108
109 cpuclk = clk_get(NULL, "cpu_clk");
110 if (IS_ERR(cpuclk)) {
111 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
112 return PTR_ERR(cpuclk);
113 }
114
115 rate = cpu_clock_freq / 1000;
116 if (!rate) {
117 clk_put(cpuclk);
118 return -EINVAL;
119 }
120 ret = clk_set_rate(cpuclk, rate);
121 if (ret) {
122 clk_put(cpuclk);
123 return ret;
124 }
125
126 /* clock table init */
127 for (i = 2;
128 (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
129 i++)
130 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
131
132 policy->cur = loongson2_cpufreq_get(policy->cpu);
133
134 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
135 policy->cpu);
136
137 return cpufreq_frequency_table_cpuinfo(policy,
138 &loongson2_clockmod_table[0]);
139}
140
141static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
142{
143 return cpufreq_frequency_table_verify(policy,
144 &loongson2_clockmod_table[0]);
145}
146
147static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
148{
149 clk_put(cpuclk);
150 return 0;
151}
152
153static struct freq_attr *loongson2_table_attr[] = {
154 &cpufreq_freq_attr_scaling_available_freqs,
155 NULL,
156};
157
158static struct cpufreq_driver loongson2_cpufreq_driver = {
159 .owner = THIS_MODULE,
160 .name = "loongson2",
161 .init = loongson2_cpufreq_cpu_init,
162 .verify = loongson2_cpufreq_verify,
163 .target = loongson2_cpufreq_target,
164 .get = loongson2_cpufreq_get,
165 .exit = loongson2_cpufreq_exit,
166 .attr = loongson2_table_attr,
167};
168
169static struct platform_device_id platform_device_ids[] = {
170 {
171 .name = "loongson2_cpufreq",
172 },
173 {}
174};
175
176MODULE_DEVICE_TABLE(platform, platform_device_ids);
177
178static struct platform_driver platform_driver = {
179 .driver = {
180 .name = "loongson2_cpufreq",
181 .owner = THIS_MODULE,
182 },
183 .id_table = platform_device_ids,
184};
185
186/*
187 * This is the simple version of Loongson-2 wait, Maybe we need do this in
188 * interrupt disabled context.
189 */
190
191static DEFINE_SPINLOCK(loongson2_wait_lock);
192
193static void loongson2_cpu_wait(void)
194{
195 unsigned long flags;
196 u32 cpu_freq;
197
198 spin_lock_irqsave(&loongson2_wait_lock, flags);
199 cpu_freq = LOONGSON_CHIPCFG0;
200 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
201 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
202 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
203}
204
205static int __init cpufreq_init(void)
206{
207 int ret;
208
209 /* Register platform stuff */
210 ret = platform_driver_register(&platform_driver);
211 if (ret)
212 return ret;
213
214 pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
215
216 cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
217 CPUFREQ_TRANSITION_NOTIFIER);
218
219 ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
220
221 if (!ret && !nowait) {
222 saved_cpu_wait = cpu_wait;
223 cpu_wait = loongson2_cpu_wait;
224 }
225
226 return ret;
227}
228
229static void __exit cpufreq_exit(void)
230{
231 if (!nowait && saved_cpu_wait)
232 cpu_wait = saved_cpu_wait;
233 cpufreq_unregister_driver(&loongson2_cpufreq_driver);
234 cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
235 CPUFREQ_TRANSITION_NOTIFIER);
236
237 platform_driver_unregister(&platform_driver);
238}
239
240module_init(cpufreq_init);
241module_exit(cpufreq_exit);
242
243module_param(nowait, uint, 0644);
244MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
245
246MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
247MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
248MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d4c4989823dc..cdd62915efaf 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -158,11 +158,10 @@ static int maple_cpufreq_target(struct cpufreq_policy *policy,
158 158
159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency; 159 freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
160 freqs.new = maple_cpu_freqs[newstate].frequency; 160 freqs.new = maple_cpu_freqs[newstate].frequency;
161 freqs.cpu = 0;
162 161
163 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 162 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
164 rc = maple_scom_switch_freq(newstate); 163 rc = maple_scom_switch_freq(newstate);
165 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
166 165
167 mutex_unlock(&maple_switch_mutex); 166 mutex_unlock(&maple_switch_mutex);
168 167
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 9128c07bafba..0279d18a57f9 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -25,6 +25,7 @@
25#include <linux/opp.h> 25#include <linux/opp.h>
26#include <linux/cpu.h> 26#include <linux/cpu.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h> 29#include <linux/regulator/consumer.h>
29 30
30#include <asm/smp_plat.h> 31#include <asm/smp_plat.h>
@@ -88,16 +89,12 @@ static int omap_target(struct cpufreq_policy *policy,
88 } 89 }
89 90
90 freqs.old = omap_getspeed(policy->cpu); 91 freqs.old = omap_getspeed(policy->cpu);
91 freqs.cpu = policy->cpu;
92 92
93 if (freqs.old == freqs.new && policy->cur == freqs.new) 93 if (freqs.old == freqs.new && policy->cur == freqs.new)
94 return ret; 94 return ret;
95 95
96 /* notifiers */ 96 /* notifiers */
97 for_each_cpu(i, policy->cpus) { 97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
98 freqs.cpu = i;
99 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
100 }
101 98
102 freq = freqs.new * 1000; 99 freq = freqs.new * 1000;
103 ret = clk_round_rate(mpu_clk, freq); 100 ret = clk_round_rate(mpu_clk, freq);
@@ -157,10 +154,7 @@ static int omap_target(struct cpufreq_policy *policy,
157 154
158done: 155done:
159 /* notifiers */ 156 /* notifiers */
160 for_each_cpu(i, policy->cpus) { 157 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
161 freqs.cpu = i;
162 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
163 }
164 158
165 return ret; 159 return ret;
166} 160}
@@ -184,7 +178,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
184 goto fail_ck; 178 goto fail_ck;
185 } 179 }
186 180
187 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); 181 policy->cur = omap_getspeed(policy->cpu);
188 182
189 if (!freq_table) 183 if (!freq_table)
190 result = opp_init_cpufreq_table(mpu_dev, &freq_table); 184 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -203,8 +197,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
203 197
204 cpufreq_frequency_table_get_attr(freq_table, policy->cpu); 198 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
205 199
206 policy->min = policy->cpuinfo.min_freq;
207 policy->max = policy->cpuinfo.max_freq;
208 policy->cur = omap_getspeed(policy->cpu); 200 policy->cur = omap_getspeed(policy->cpu);
209 201
210 /* 202 /*
@@ -252,7 +244,7 @@ static struct cpufreq_driver omap_driver = {
252 .attr = omap_cpufreq_attr, 244 .attr = omap_cpufreq_attr,
253}; 245};
254 246
255static int __init omap_cpufreq_init(void) 247static int omap_cpufreq_probe(struct platform_device *pdev)
256{ 248{
257 mpu_dev = get_cpu_device(0); 249 mpu_dev = get_cpu_device(0);
258 if (!mpu_dev) { 250 if (!mpu_dev) {
@@ -280,12 +272,20 @@ static int __init omap_cpufreq_init(void)
280 return cpufreq_register_driver(&omap_driver); 272 return cpufreq_register_driver(&omap_driver);
281} 273}
282 274
283static void __exit omap_cpufreq_exit(void) 275static int omap_cpufreq_remove(struct platform_device *pdev)
284{ 276{
285 cpufreq_unregister_driver(&omap_driver); 277 return cpufreq_unregister_driver(&omap_driver);
286} 278}
287 279
280static struct platform_driver omap_cpufreq_platdrv = {
281 .driver = {
282 .name = "omap-cpufreq",
283 .owner = THIS_MODULE,
284 },
285 .probe = omap_cpufreq_probe,
286 .remove = omap_cpufreq_remove,
287};
288module_platform_driver(omap_cpufreq_platdrv);
289
288MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs"); 290MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
289MODULE_LICENSE("GPL"); 291MODULE_LICENSE("GPL");
290module_init(omap_cpufreq_init);
291module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 827629c9aad7..421ef37d0bb3 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -58,8 +58,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
58{ 58{
59 u32 l, h; 59 u32 l, h;
60 60
61 if (!cpu_online(cpu) || 61 if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
62 (newstate > DC_DISABLE) || (newstate == DC_RESV))
63 return -EINVAL; 62 return -EINVAL;
64 63
65 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); 64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
@@ -125,10 +124,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
125 return 0; 124 return 0;
126 125
127 /* notifiers */ 126 /* notifiers */
128 for_each_cpu(i, policy->cpus) { 127 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
129 freqs.cpu = i;
130 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
131 }
132 128
133 /* run on each logical CPU, 129 /* run on each logical CPU,
134 * see section 13.15.3 of IA32 Intel Architecture Software 130 * see section 13.15.3 of IA32 Intel Architecture Software
@@ -138,10 +134,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
138 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
139 135
140 /* notifiers */ 136 /* notifiers */
141 for_each_cpu(i, policy->cpus) { 137 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
142 freqs.cpu = i;
143 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
144 }
145 138
146 return 0; 139 return 0;
147} 140}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 503996a94a6a..0de00081a81e 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,8 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
215 (pcch_virt_addr + pcc_cpu_data->input_offset)); 215 (pcch_virt_addr + pcc_cpu_data->input_offset));
216 216
217 freqs.new = target_freq; 217 freqs.new = target_freq;
218 freqs.cpu = cpu; 218 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
219 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
220 219
221 input_buffer = 0x1 | (((target_freq * 100) 220 input_buffer = 0x1 | (((target_freq * 100)
222 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); 221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -237,7 +236,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
237 } 236 }
238 iowrite16(0, &pcch_hdr->status); 237 iowrite16(0, &pcch_hdr->status);
239 238
240 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 239 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
241 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); 240 pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
242 spin_unlock(&pcc_lock); 241 spin_unlock(&pcc_lock);
243 242
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index af23e0b9ec92..ea0222a45b7b 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -68,7 +68,8 @@ static int powernow_k6_get_cpu_multiplier(void)
68 * 68 *
69 * Tries to change the PowerNow! multiplier 69 * Tries to change the PowerNow! multiplier
70 */ 70 */
71static void powernow_k6_set_state(unsigned int best_i) 71static void powernow_k6_set_state(struct cpufreq_policy *policy,
72 unsigned int best_i)
72{ 73{
73 unsigned long outvalue = 0, invalue = 0; 74 unsigned long outvalue = 0, invalue = 0;
74 unsigned long msrval; 75 unsigned long msrval;
@@ -81,9 +82,8 @@ static void powernow_k6_set_state(unsigned int best_i)
81 82
82 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 83 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
83 freqs.new = busfreq * clock_ratio[best_i].index; 84 freqs.new = busfreq * clock_ratio[best_i].index;
84 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
85 85
86 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 86 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
87 87
88 /* we now need to transform best_i to the BVC format, see AMD#23446 */ 88 /* we now need to transform best_i to the BVC format, see AMD#23446 */
89 89
@@ -98,7 +98,7 @@ static void powernow_k6_set_state(unsigned int best_i)
98 msrval = POWERNOW_IOPORT + 0x0; 98 msrval = POWERNOW_IOPORT + 0x0;
99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 99 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
100 100
101 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 101 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
102 102
103 return; 103 return;
104} 104}
@@ -136,7 +136,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
136 target_freq, relation, &newstate)) 136 target_freq, relation, &newstate))
137 return -EINVAL; 137 return -EINVAL;
138 138
139 powernow_k6_set_state(newstate); 139 powernow_k6_set_state(policy, newstate);
140 140
141 return 0; 141 return 0;
142} 142}
@@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
182 unsigned int i; 182 unsigned int i;
183 for (i = 0; i < 8; i++) { 183 for (i = 0; i < 8; i++) {
184 if (i == max_multiplier) 184 if (i == max_multiplier)
185 powernow_k6_set_state(i); 185 powernow_k6_set_state(policy, i);
186 } 186 }
187 cpufreq_frequency_table_put_attr(policy->cpu); 187 cpufreq_frequency_table_put_attr(policy->cpu);
188 return 0; 188 return 0;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 334cc2f1e9f1..53888dacbe58 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
248} 248}
249 249
250 250
251static void change_speed(unsigned int index) 251static void change_speed(struct cpufreq_policy *policy, unsigned int index)
252{ 252{
253 u8 fid, vid; 253 u8 fid, vid;
254 struct cpufreq_freqs freqs; 254 struct cpufreq_freqs freqs;
@@ -263,15 +263,13 @@ static void change_speed(unsigned int index)
263 fid = powernow_table[index].index & 0xFF; 263 fid = powernow_table[index].index & 0xFF;
264 vid = (powernow_table[index].index & 0xFF00) >> 8; 264 vid = (powernow_table[index].index & 0xFF00) >> 8;
265 265
266 freqs.cpu = 0;
267
268 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 266 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
269 cfid = fidvidstatus.bits.CFID; 267 cfid = fidvidstatus.bits.CFID;
270 freqs.old = fsb * fid_codes[cfid] / 10; 268 freqs.old = fsb * fid_codes[cfid] / 10;
271 269
272 freqs.new = powernow_table[index].frequency; 270 freqs.new = powernow_table[index].frequency;
273 271
274 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 272 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
275 273
276 /* Now do the magic poking into the MSRs. */ 274 /* Now do the magic poking into the MSRs. */
277 275
@@ -292,7 +290,7 @@ static void change_speed(unsigned int index)
292 if (have_a0 == 1) 290 if (have_a0 == 1)
293 local_irq_enable(); 291 local_irq_enable();
294 292
295 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 293 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
296} 294}
297 295
298 296
@@ -546,7 +544,7 @@ static int powernow_target(struct cpufreq_policy *policy,
546 relation, &newstate)) 544 relation, &newstate))
547 return -EINVAL; 545 return -EINVAL;
548 546
549 change_speed(newstate); 547 change_speed(policy, newstate);
550 548
551 return 0; 549 return 0;
552} 550}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d13a13678b5f..b828efe4b2f8 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -928,9 +928,10 @@ static int get_transition_latency(struct powernow_k8_data *data)
928static int transition_frequency_fidvid(struct powernow_k8_data *data, 928static int transition_frequency_fidvid(struct powernow_k8_data *data,
929 unsigned int index) 929 unsigned int index)
930{ 930{
931 struct cpufreq_policy *policy;
931 u32 fid = 0; 932 u32 fid = 0;
932 u32 vid = 0; 933 u32 vid = 0;
933 int res, i; 934 int res;
934 struct cpufreq_freqs freqs; 935 struct cpufreq_freqs freqs;
935 936
936 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 937 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
@@ -959,10 +960,10 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
959 freqs.old = find_khz_freq_from_fid(data->currfid); 960 freqs.old = find_khz_freq_from_fid(data->currfid);
960 freqs.new = find_khz_freq_from_fid(fid); 961 freqs.new = find_khz_freq_from_fid(fid);
961 962
962 for_each_cpu(i, data->available_cores) { 963 policy = cpufreq_cpu_get(smp_processor_id());
963 freqs.cpu = i; 964 cpufreq_cpu_put(policy);
964 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 965
965 } 966 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
966 967
967 res = transition_fid_vid(data, fid, vid); 968 res = transition_fid_vid(data, fid, vid);
968 if (res) 969 if (res)
@@ -970,10 +971,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
970 971
971 freqs.new = find_khz_freq_from_fid(data->currfid); 972 freqs.new = find_khz_freq_from_fid(data->currfid);
972 973
973 for_each_cpu(i, data->available_cores) { 974 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
974 freqs.cpu = i;
975 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
976 }
977 return res; 975 return res;
978} 976}
979 977
@@ -1104,9 +1102,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1104 struct init_on_cpu init_on_cpu; 1102 struct init_on_cpu init_on_cpu;
1105 int rc; 1103 int rc;
1106 1104
1107 if (!cpu_online(pol->cpu))
1108 return -ENODEV;
1109
1110 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1105 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1111 if (rc) 1106 if (rc)
1112 return -ENODEV; 1107 return -ENODEV;
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
new file mode 100644
index 000000000000..e577a1dbbfcd
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -0,0 +1,209 @@
1/*
2 * cpufreq driver for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/of_platform.h>
26
27#include <asm/machdep.h>
28#include <asm/prom.h>
29#include <asm/cell-regs.h>
30
31#include "ppc_cbe_cpufreq.h"
32
33static DEFINE_MUTEX(cbe_switch_mutex);
34
35
36/* the CBE supports an 8 step frequency scaling */
37static struct cpufreq_frequency_table cbe_freqs[] = {
38 {1, 0},
39 {2, 0},
40 {3, 0},
41 {4, 0},
42 {5, 0},
43 {6, 0},
44 {8, 0},
45 {10, 0},
46 {0, CPUFREQ_TABLE_END},
47};
48
49/*
50 * hardware specific functions
51 */
52
53static int set_pmode(unsigned int cpu, unsigned int slow_mode)
54{
55 int rc;
56
57 if (cbe_cpufreq_has_pmi)
58 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
59 else
60 rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
61
62 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
63
64 return rc;
65}
66
67/*
68 * cpufreq functions
69 */
70
71static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
72{
73 const u32 *max_freqp;
74 u32 max_freq;
75 int i, cur_pmode;
76 struct device_node *cpu;
77
78 cpu = of_get_cpu_node(policy->cpu, NULL);
79
80 if (!cpu)
81 return -ENODEV;
82
83 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
84
85 /*
86 * Let's check we can actually get to the CELL regs
87 */
88 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
89 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
90 pr_info("invalid CBE regs pointers for cpufreq\n");
91 return -EINVAL;
92 }
93
94 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
95
96 of_node_put(cpu);
97
98 if (!max_freqp)
99 return -EINVAL;
100
101 /* we need the freq in kHz */
102 max_freq = *max_freqp / 1000;
103
104 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
105 pr_debug("initializing frequency table\n");
106
107 /* initialize frequency table */
108 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
109 cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
110 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
111 }
112
113 /* if DEBUG is enabled set_pmode() measures the latency
114 * of a transition */
115 policy->cpuinfo.transition_latency = 25000;
116
117 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
118 pr_debug("current pmode is at %d\n",cur_pmode);
119
120 policy->cur = cbe_freqs[cur_pmode].frequency;
121
122#ifdef CONFIG_SMP
123 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
124#endif
125
126 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
127
128 /* this ensures that policy->cpuinfo_min
129 * and policy->cpuinfo_max are set correctly */
130 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
131}
132
133static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
134{
135 cpufreq_frequency_table_put_attr(policy->cpu);
136 return 0;
137}
138
139static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
140{
141 return cpufreq_frequency_table_verify(policy, cbe_freqs);
142}
143
144static int cbe_cpufreq_target(struct cpufreq_policy *policy,
145 unsigned int target_freq,
146 unsigned int relation)
147{
148 int rc;
149 struct cpufreq_freqs freqs;
150 unsigned int cbe_pmode_new;
151
152 cpufreq_frequency_table_target(policy,
153 cbe_freqs,
154 target_freq,
155 relation,
156 &cbe_pmode_new);
157
158 freqs.old = policy->cur;
159 freqs.new = cbe_freqs[cbe_pmode_new].frequency;
160
161 mutex_lock(&cbe_switch_mutex);
162 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
163
164 pr_debug("setting frequency for cpu %d to %d kHz, " \
165 "1/%d of max frequency\n",
166 policy->cpu,
167 cbe_freqs[cbe_pmode_new].frequency,
168 cbe_freqs[cbe_pmode_new].index);
169
170 rc = set_pmode(policy->cpu, cbe_pmode_new);
171
172 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
173 mutex_unlock(&cbe_switch_mutex);
174
175 return rc;
176}
177
178static struct cpufreq_driver cbe_cpufreq_driver = {
179 .verify = cbe_cpufreq_verify,
180 .target = cbe_cpufreq_target,
181 .init = cbe_cpufreq_cpu_init,
182 .exit = cbe_cpufreq_cpu_exit,
183 .name = "cbe-cpufreq",
184 .owner = THIS_MODULE,
185 .flags = CPUFREQ_CONST_LOOPS,
186};
187
188/*
189 * module init and destoy
190 */
191
192static int __init cbe_cpufreq_init(void)
193{
194 if (!machine_is(cell))
195 return -ENODEV;
196
197 return cpufreq_register_driver(&cbe_cpufreq_driver);
198}
199
200static void __exit cbe_cpufreq_exit(void)
201{
202 cpufreq_unregister_driver(&cbe_cpufreq_driver);
203}
204
205module_init(cbe_cpufreq_init);
206module_exit(cbe_cpufreq_exit);
207
208MODULE_LICENSE("GPL");
209MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
new file mode 100644
index 000000000000..b4c00a5a6a59
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -0,0 +1,24 @@
1/*
2 * ppc_cbe_cpufreq.h
3 *
4 * This file contains the definitions used by the cbe_cpufreq driver.
5 *
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
7 *
8 * Author: Christian Krafft <krafft@de.ibm.com>
9 *
10 */
11
12#include <linux/cpufreq.h>
13#include <linux/types.h>
14
15int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
16int cbe_cpufreq_get_pmode(int cpu);
17
18int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
19
20#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
21extern bool cbe_cpufreq_has_pmi;
22#else
23#define cbe_cpufreq_has_pmi (0)
24#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000000..84d2f2cf5ba7
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
1/*
2 * pervasive backend for the cbe_cpufreq driver
3 *
4 * This driver makes use of the pervasive unit to
5 * engage the desired frequency.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
8 *
9 * Author: Christian Krafft <krafft@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/time.h>
29#include <asm/machdep.h>
30#include <asm/hw_irq.h>
31#include <asm/cell-regs.h>
32
33#include "ppc_cbe_cpufreq.h"
34
35/* to write to MIC register */
36static u64 MIC_Slow_Fast_Timer_table[] = {
37 [0 ... 7] = 0x007fc00000000000ull,
38};
39
40/* more values for the MIC */
41static u64 MIC_Slow_Next_Timer_table[] = {
42 0x0000240000000000ull,
43 0x0000268000000000ull,
44 0x000029C000000000ull,
45 0x00002D0000000000ull,
46 0x0000300000000000ull,
47 0x0000334000000000ull,
48 0x000039C000000000ull,
49 0x00003FC000000000ull,
50};
51
52
53int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
54{
55 struct cbe_pmd_regs __iomem *pmd_regs;
56 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
57 unsigned long flags;
58 u64 value;
59#ifdef DEBUG
60 long time;
61#endif
62
63 local_irq_save(flags);
64
65 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
66 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
67
68#ifdef DEBUG
69 time = jiffies;
70#endif
71
72 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
73 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
74
75 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
76 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
77
78 value = in_be64(&pmd_regs->pmcr);
79 /* set bits to zero */
80 value &= 0xFFFFFFFFFFFFFFF8ull;
81 /* set bits to next pmode */
82 value |= pmode;
83
84 out_be64(&pmd_regs->pmcr, value);
85
86#ifdef DEBUG
87 /* wait until new pmode appears in status register */
88 value = in_be64(&pmd_regs->pmsr) & 0x07;
89 while (value != pmode) {
90 cpu_relax();
91 value = in_be64(&pmd_regs->pmsr) & 0x07;
92 }
93
94 time = jiffies - time;
95 time = jiffies_to_msecs(time);
96 pr_debug("had to wait %lu ms for a transition using " \
97 "pervasive unit\n", time);
98#endif
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104
105int cbe_cpufreq_get_pmode(int cpu)
106{
107 int ret;
108 struct cbe_pmd_regs __iomem *pmd_regs;
109
110 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
111 ret = in_be64(&pmd_regs->pmsr) & 0x07;
112
113 return ret;
114}
115
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
new file mode 100644
index 000000000000..d29e8da396a0
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -0,0 +1,156 @@
1/*
2 * pmi backend for the cbe_cpufreq driver
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/timer.h>
26#include <linux/module.h>
27#include <linux/of_platform.h>
28
29#include <asm/processor.h>
30#include <asm/prom.h>
31#include <asm/pmi.h>
32#include <asm/cell-regs.h>
33
34#ifdef DEBUG
35#include <asm/time.h>
36#endif
37
38#include "ppc_cbe_cpufreq.h"
39
40static u8 pmi_slow_mode_limit[MAX_CBE];
41
42bool cbe_cpufreq_has_pmi = false;
43EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
44
45/*
46 * hardware specific functions
47 */
48
49int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
50{
51 int ret;
52 pmi_message_t pmi_msg;
53#ifdef DEBUG
54 long time;
55#endif
56 pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
57 pmi_msg.data1 = cbe_cpu_to_node(cpu);
58 pmi_msg.data2 = pmode;
59
60#ifdef DEBUG
61 time = jiffies;
62#endif
63 pmi_send_message(pmi_msg);
64
65#ifdef DEBUG
66 time = jiffies - time;
67 time = jiffies_to_msecs(time);
68 pr_debug("had to wait %lu ms for a transition using " \
69 "PMI\n", time);
70#endif
71 ret = pmi_msg.data2;
72 pr_debug("PMI returned slow mode %d\n", ret);
73
74 return ret;
75}
76EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
77
78
79static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
80{
81 u8 node, slow_mode;
82
83 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
84
85 node = pmi_msg.data1;
86 slow_mode = pmi_msg.data2;
87
88 pmi_slow_mode_limit[node] = slow_mode;
89
90 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
91}
92
93static int pmi_notifier(struct notifier_block *nb,
94 unsigned long event, void *data)
95{
96 struct cpufreq_policy *policy = data;
97 struct cpufreq_frequency_table *cbe_freqs;
98 u8 node;
99
100 /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
101 * and CPUFREQ_NOTIFY policy events?)
102 */
103 if (event == CPUFREQ_START)
104 return 0;
105
106 cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
107 node = cbe_cpu_to_node(policy->cpu);
108
109 pr_debug("got notified, event=%lu, node=%u\n", event, node);
110
111 if (pmi_slow_mode_limit[node] != 0) {
112 pr_debug("limiting node %d to slow mode %d\n",
113 node, pmi_slow_mode_limit[node]);
114
115 cpufreq_verify_within_limits(policy, 0,
116
117 cbe_freqs[pmi_slow_mode_limit[node]].frequency);
118 }
119
120 return 0;
121}
122
123static struct notifier_block pmi_notifier_block = {
124 .notifier_call = pmi_notifier,
125};
126
127static struct pmi_handler cbe_pmi_handler = {
128 .type = PMI_TYPE_FREQ_CHANGE,
129 .handle_pmi_message = cbe_cpufreq_handle_pmi,
130};
131
132
133
134static int __init cbe_cpufreq_pmi_init(void)
135{
136 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
137
138 if (!cbe_cpufreq_has_pmi)
139 return -ENODEV;
140
141 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
142
143 return 0;
144}
145
146static void __exit cbe_cpufreq_pmi_exit(void)
147{
148 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
149 pmi_unregister_handler(&cbe_pmi_handler);
150}
151
152module_init(cbe_cpufreq_pmi_init);
153module_exit(cbe_cpufreq_pmi_exit);
154
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
new file mode 100644
index 000000000000..9e5bc8e388a0
--- /dev/null
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -0,0 +1,492 @@
1/*
2 * Copyright (C) 2002,2003 Intrinsyc Software
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * History:
19 * 31-Jul-2002 : Initial version [FB]
20 * 29-Jan-2003 : added PXA255 support [FB]
21 * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
22 *
23 * Note:
24 * This driver may change the memory bus clock rate, but will not do any
25 * platform specific access timing changes... for example if you have flash
26 * memory connected to CS0, you will need to register a platform specific
27 * notifier which will adjust the memory access strobes to maintain a
28 * minimum strobe width.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/sched.h>
35#include <linux/init.h>
36#include <linux/cpufreq.h>
37#include <linux/err.h>
38#include <linux/regulator/consumer.h>
39#include <linux/io.h>
40
41#include <mach/pxa2xx-regs.h>
42#include <mach/smemc.h>
43
44#ifdef DEBUG
45static unsigned int freq_debug;
46module_param(freq_debug, uint, 0);
47MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
48#else
49#define freq_debug 0
50#endif
51
52static struct regulator *vcc_core;
53
54static unsigned int pxa27x_maxfreq;
55module_param(pxa27x_maxfreq, uint, 0);
56MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
57 "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
58
59typedef struct {
60 unsigned int khz;
61 unsigned int membus;
62 unsigned int cccr;
63 unsigned int div2;
64 unsigned int cclkcfg;
65 int vmin;
66 int vmax;
67} pxa_freqs_t;
68
69/* Define the refresh period in mSec for the SDRAM and the number of rows */
70#define SDRAM_TREF 64 /* standard 64ms SDRAM */
71static unsigned int sdram_rows;
72
73#define CCLKCFG_TURBO 0x1
74#define CCLKCFG_FCS 0x2
75#define CCLKCFG_HALFTURBO 0x4
76#define CCLKCFG_FASTBUS 0x8
77#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
78#define MDREFR_DRI_MASK 0xFFF
79
80#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
81#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
82
83/*
84 * PXA255 definitions
85 */
86/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
87#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
88
89static pxa_freqs_t pxa255_run_freqs[] =
90{
91 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
92 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
93 {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
94 {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
95 {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
96 {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
97 {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
98};
99
100/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
101static pxa_freqs_t pxa255_turbo_freqs[] =
102{
103 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
104 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
105 {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
106 {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
107 {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
108 {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
109};
110
111#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
112#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
113
114static struct cpufreq_frequency_table
115 pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
116static struct cpufreq_frequency_table
117 pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
118
119static unsigned int pxa255_turbo_table;
120module_param(pxa255_turbo_table, uint, 0);
121MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
122
123/*
124 * PXA270 definitions
125 *
126 * For the PXA27x:
127 * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
128 *
129 * A = 0 => memory controller clock from table 3-7,
130 * A = 1 => memory controller clock = system bus clock
131 * Run mode frequency = 13 MHz * L
132 * Turbo mode frequency = 13 MHz * L * N
133 * System bus frequency = 13 MHz * L / (B + 1)
134 *
135 * In CCCR:
136 * A = 1
137 * L = 16 oscillator to run mode ratio
138 * 2N = 6 2 * (turbo mode to run mode ratio)
139 *
140 * In CCLKCFG:
141 * B = 1 Fast bus mode
142 * HT = 0 Half-Turbo mode
143 * T = 1 Turbo mode
144 *
145 * For now, just support some of the combinations in table 3-7 of
146 * PXA27x Processor Family Developer's Manual to simplify frequency
147 * change sequences.
148 */
149#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
150#define CCLKCFG2(B, HT, T) \
151 (CCLKCFG_FCS | \
152 ((B) ? CCLKCFG_FASTBUS : 0) | \
153 ((HT) ? CCLKCFG_HALFTURBO : 0) | \
154 ((T) ? CCLKCFG_TURBO : 0))
155
156static pxa_freqs_t pxa27x_freqs[] = {
157 {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
158 {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
159 {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
160 {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
161 {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
162 {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
163 {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
164};
165
166#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
167static struct cpufreq_frequency_table
168 pxa27x_freq_table[NUM_PXA27x_FREQS+1];
169
170extern unsigned get_clk_frequency_khz(int info);
171
172#ifdef CONFIG_REGULATOR
173
174static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
175{
176 int ret = 0;
177 int vmin, vmax;
178
179 if (!cpu_is_pxa27x())
180 return 0;
181
182 vmin = pxa_freq->vmin;
183 vmax = pxa_freq->vmax;
184 if ((vmin == -1) || (vmax == -1))
185 return 0;
186
187 ret = regulator_set_voltage(vcc_core, vmin, vmax);
188 if (ret)
189 pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
190 vmin, vmax);
191 return ret;
192}
193
194static __init void pxa_cpufreq_init_voltages(void)
195{
196 vcc_core = regulator_get(NULL, "vcc_core");
197 if (IS_ERR(vcc_core)) {
198 pr_info("cpufreq: Didn't find vcc_core regulator\n");
199 vcc_core = NULL;
200 } else {
201 pr_info("cpufreq: Found vcc_core regulator\n");
202 }
203}
204#else
205static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
206{
207 return 0;
208}
209
210static __init void pxa_cpufreq_init_voltages(void) { }
211#endif
212
213static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
214 pxa_freqs_t **pxa_freqs)
215{
216 if (cpu_is_pxa25x()) {
217 if (!pxa255_turbo_table) {
218 *pxa_freqs = pxa255_run_freqs;
219 *freq_table = pxa255_run_freq_table;
220 } else {
221 *pxa_freqs = pxa255_turbo_freqs;
222 *freq_table = pxa255_turbo_freq_table;
223 }
224 } else if (cpu_is_pxa27x()) {
225 *pxa_freqs = pxa27x_freqs;
226 *freq_table = pxa27x_freq_table;
227 } else {
228 BUG();
229 }
230}
231
232static void pxa27x_guess_max_freq(void)
233{
234 if (!pxa27x_maxfreq) {
235 pxa27x_maxfreq = 416000;
236 printk(KERN_INFO "PXA CPU 27x max frequency not defined "
237 "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
238 pxa27x_maxfreq);
239 } else {
240 pxa27x_maxfreq *= 1000;
241 }
242}
243
244static void init_sdram_rows(void)
245{
246 uint32_t mdcnfg = __raw_readl(MDCNFG);
247 unsigned int drac2 = 0, drac0 = 0;
248
249 if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
250 drac2 = MDCNFG_DRAC2(mdcnfg);
251
252 if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
253 drac0 = MDCNFG_DRAC0(mdcnfg);
254
255 sdram_rows = 1 << (11 + max(drac0, drac2));
256}
257
258static u32 mdrefr_dri(unsigned int freq)
259{
260 u32 interval = freq * SDRAM_TREF / sdram_rows;
261
262 return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
263}
264
265/* find a valid frequency point */
266static int pxa_verify_policy(struct cpufreq_policy *policy)
267{
268 struct cpufreq_frequency_table *pxa_freqs_table;
269 pxa_freqs_t *pxa_freqs;
270 int ret;
271
272 find_freq_tables(&pxa_freqs_table, &pxa_freqs);
273 ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
274
275 if (freq_debug)
276 pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
277 policy->min, policy->max);
278
279 return ret;
280}
281
282static unsigned int pxa_cpufreq_get(unsigned int cpu)
283{
284 return get_clk_frequency_khz(0);
285}
286
287static int pxa_set_target(struct cpufreq_policy *policy,
288 unsigned int target_freq,
289 unsigned int relation)
290{
291 struct cpufreq_frequency_table *pxa_freqs_table;
292 pxa_freqs_t *pxa_freq_settings;
293 struct cpufreq_freqs freqs;
294 unsigned int idx;
295 unsigned long flags;
296 unsigned int new_freq_cpu, new_freq_mem;
297 unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
298 int ret = 0;
299
300 /* Get the current policy */
301 find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
302
303 /* Lookup the next frequency */
304 if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
305 target_freq, relation, &idx)) {
306 return -EINVAL;
307 }
308
309 new_freq_cpu = pxa_freq_settings[idx].khz;
310 new_freq_mem = pxa_freq_settings[idx].membus;
311 freqs.old = policy->cur;
312 freqs.new = new_freq_cpu;
313
314 if (freq_debug)
315 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
317 (new_freq_mem / 2000) : (new_freq_mem / 1000));
318
319 if (vcc_core && freqs.new > freqs.old)
320 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
321 if (ret)
322 return ret;
323 /*
324 * Tell everyone what we're about to do...
325 * you should add a notify client with any platform specific
326 * Vcc changing capability
327 */
328 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
329
330 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
331 * we need to preset the smaller DRI before the change. If we're
332 * speeding up we need to set the larger DRI value after the change.
333 */
334 preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
335 if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
336 preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
337 preset_mdrefr |= mdrefr_dri(new_freq_mem);
338 }
339 postset_mdrefr =
340 (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
341
342 /* If we're dividing the memory clock by two for the SDRAM clock, this
343 * must be set prior to the change. Clearing the divide must be done
344 * after the change.
345 */
346 if (pxa_freq_settings[idx].div2) {
347 preset_mdrefr |= MDREFR_DB2_MASK;
348 postset_mdrefr |= MDREFR_DB2_MASK;
349 } else {
350 postset_mdrefr &= ~MDREFR_DB2_MASK;
351 }
352
353 local_irq_save(flags);
354
355 /* Set new the CCCR and prepare CCLKCFG */
356 CCCR = pxa_freq_settings[idx].cccr;
357 cclkcfg = pxa_freq_settings[idx].cclkcfg;
358
359 asm volatile(" \n\
360 ldr r4, [%1] /* load MDREFR */ \n\
361 b 2f \n\
362 .align 5 \n\
3631: \n\
364 str %3, [%1] /* preset the MDREFR */ \n\
365 mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
366 str %4, [%1] /* postset the MDREFR */ \n\
367 \n\
368 b 3f \n\
3692: b 1b \n\
3703: nop \n\
371 "
372 : "=&r" (unused)
373 : "r" (MDREFR), "r" (cclkcfg),
374 "r" (preset_mdrefr), "r" (postset_mdrefr)
375 : "r4", "r5");
376 local_irq_restore(flags);
377
378 /*
379 * Tell everyone what we've just done...
380 * you should add a notify client with any platform specific
381 * SDRAM refresh timer adjustments
382 */
383 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
384
385 /*
386 * Even if voltage setting fails, we don't report it, as the frequency
387 * change succeeded. The voltage reduction is not a critical failure,
388 * only power savings will suffer from this.
389 *
390 * Note: if the voltage change fails, and a return value is returned, a
391 * bug is triggered (seems a deadlock). Should anybody find out where,
392 * the "return 0" should become a "return ret".
393 */
394 if (vcc_core && freqs.new < freqs.old)
395 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
396
397 return 0;
398}
399
400static int pxa_cpufreq_init(struct cpufreq_policy *policy)
401{
402 int i;
403 unsigned int freq;
404 struct cpufreq_frequency_table *pxa255_freq_table;
405 pxa_freqs_t *pxa255_freqs;
406
407 /* try to guess pxa27x cpu */
408 if (cpu_is_pxa27x())
409 pxa27x_guess_max_freq();
410
411 pxa_cpufreq_init_voltages();
412
413 init_sdram_rows();
414
415 /* set default policy and cpuinfo */
416 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
417 policy->cur = get_clk_frequency_khz(0); /* current freq */
418 policy->min = policy->max = policy->cur;
419
420 /* Generate pxa25x the run cpufreq_frequency_table struct */
421 for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
422 pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
423 pxa255_run_freq_table[i].index = i;
424 }
425 pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
426
427 /* Generate pxa25x the turbo cpufreq_frequency_table struct */
428 for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
429 pxa255_turbo_freq_table[i].frequency =
430 pxa255_turbo_freqs[i].khz;
431 pxa255_turbo_freq_table[i].index = i;
432 }
433 pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
434
435 pxa255_turbo_table = !!pxa255_turbo_table;
436
437 /* Generate the pxa27x cpufreq_frequency_table struct */
438 for (i = 0; i < NUM_PXA27x_FREQS; i++) {
439 freq = pxa27x_freqs[i].khz;
440 if (freq > pxa27x_maxfreq)
441 break;
442 pxa27x_freq_table[i].frequency = freq;
443 pxa27x_freq_table[i].index = i;
444 }
445 pxa27x_freq_table[i].index = i;
446 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
447
448 /*
449 * Set the policy's minimum and maximum frequencies from the tables
450 * just constructed. This sets cpuinfo.mxx_freq, min and max.
451 */
452 if (cpu_is_pxa25x()) {
453 find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
454 pr_info("PXA255 cpufreq using %s frequency table\n",
455 pxa255_turbo_table ? "turbo" : "run");
456 cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
457 }
458 else if (cpu_is_pxa27x())
459 cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
460
461 printk(KERN_INFO "PXA CPU frequency change support initialized\n");
462
463 return 0;
464}
465
466static struct cpufreq_driver pxa_cpufreq_driver = {
467 .verify = pxa_verify_policy,
468 .target = pxa_set_target,
469 .init = pxa_cpufreq_init,
470 .get = pxa_cpufreq_get,
471 .name = "PXA2xx",
472};
473
474static int __init pxa_cpu_init(void)
475{
476 int ret = -ENODEV;
477 if (cpu_is_pxa25x() || cpu_is_pxa27x())
478 ret = cpufreq_register_driver(&pxa_cpufreq_driver);
479 return ret;
480}
481
482static void __exit pxa_cpu_exit(void)
483{
484 cpufreq_unregister_driver(&pxa_cpufreq_driver);
485}
486
487
488MODULE_AUTHOR("Intrinsyc Software Inc.");
489MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
490MODULE_LICENSE("GPL");
491module_init(pxa_cpu_init);
492module_exit(pxa_cpu_exit);
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
new file mode 100644
index 000000000000..15d60f857ad5
--- /dev/null
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (C) 2008 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17
18#include <mach/generic.h>
19#include <mach/pxa3xx-regs.h>
20
21#define HSS_104M (0)
22#define HSS_156M (1)
23#define HSS_208M (2)
24#define HSS_312M (3)
25
26#define SMCFS_78M (0)
27#define SMCFS_104M (2)
28#define SMCFS_208M (5)
29
30#define SFLFS_104M (0)
31#define SFLFS_156M (1)
32#define SFLFS_208M (2)
33#define SFLFS_312M (3)
34
35#define XSPCLK_156M (0)
36#define XSPCLK_NONE (3)
37
38#define DMCFS_26M (0)
39#define DMCFS_260M (3)
40
41struct pxa3xx_freq_info {
42 unsigned int cpufreq_mhz;
43 unsigned int core_xl : 5;
44 unsigned int core_xn : 3;
45 unsigned int hss : 2;
46 unsigned int dmcfs : 2;
47 unsigned int smcfs : 3;
48 unsigned int sflfs : 2;
49 unsigned int df_clkdiv : 3;
50
51 int vcc_core; /* in mV */
52 int vcc_sram; /* in mV */
53};
54
55#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
56{ \
57 .cpufreq_mhz = cpufreq, \
58 .core_xl = _xl, \
59 .core_xn = _xn, \
60 .hss = HSS_##_hss##M, \
61 .dmcfs = DMCFS_##_dmc##M, \
62 .smcfs = SMCFS_##_smc##M, \
63 .sflfs = SFLFS_##_sfl##M, \
64 .df_clkdiv = _dfi, \
65 .vcc_core = vcore, \
66 .vcc_sram = vsram, \
67}
68
69static struct pxa3xx_freq_info pxa300_freqs[] = {
70 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
71 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
72 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
73 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
74 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
75};
76
77static struct pxa3xx_freq_info pxa320_freqs[] = {
78 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
79 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
80 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
81 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
82 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
83 OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
84};
85
86static unsigned int pxa3xx_freqs_num;
87static struct pxa3xx_freq_info *pxa3xx_freqs;
88static struct cpufreq_frequency_table *pxa3xx_freqs_table;
89
90static int setup_freqs_table(struct cpufreq_policy *policy,
91 struct pxa3xx_freq_info *freqs, int num)
92{
93 struct cpufreq_frequency_table *table;
94 int i;
95
96 table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
97 if (table == NULL)
98 return -ENOMEM;
99
100 for (i = 0; i < num; i++) {
101 table[i].index = i;
102 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
103 }
104 table[num].index = i;
105 table[num].frequency = CPUFREQ_TABLE_END;
106
107 pxa3xx_freqs = freqs;
108 pxa3xx_freqs_num = num;
109 pxa3xx_freqs_table = table;
110
111 return cpufreq_frequency_table_cpuinfo(policy, table);
112}
113
114static void __update_core_freq(struct pxa3xx_freq_info *info)
115{
116 uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
117 uint32_t accr = ACCR;
118 uint32_t xclkcfg;
119
120 accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
121 accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
122
123 /* No clock until core PLL is re-locked */
124 accr |= ACCR_XSPCLK(XSPCLK_NONE);
125
126 xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
127
128 ACCR = accr;
129 __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
130
131 while ((ACSR & mask) != (accr & mask))
132 cpu_relax();
133}
134
135static void __update_bus_freq(struct pxa3xx_freq_info *info)
136{
137 uint32_t mask;
138 uint32_t accr = ACCR;
139
140 mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
141 ACCR_DMCFS_MASK;
142
143 accr &= ~mask;
144 accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
145 ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
146
147 ACCR = accr;
148
149 while ((ACSR & mask) != (accr & mask))
150 cpu_relax();
151}
152
153static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
154{
155 return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
156}
157
158static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
159{
160 return pxa3xx_get_clk_frequency_khz(0);
161}
162
163static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
164 unsigned int target_freq,
165 unsigned int relation)
166{
167 struct pxa3xx_freq_info *next;
168 struct cpufreq_freqs freqs;
169 unsigned long flags;
170 int idx;
171
172 if (policy->cpu != 0)
173 return -EINVAL;
174
175 /* Lookup the next frequency */
176 if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
177 target_freq, relation, &idx))
178 return -EINVAL;
179
180 next = &pxa3xx_freqs[idx];
181
182 freqs.old = policy->cur;
183 freqs.new = next->cpufreq_mhz * 1000;
184
185 pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
186 freqs.old / 1000, freqs.new / 1000,
187 (freqs.old == freqs.new) ? " (skipped)" : "");
188
189 if (freqs.old == target_freq)
190 return 0;
191
192 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
193
194 local_irq_save(flags);
195 __update_core_freq(next);
196 __update_bus_freq(next);
197 local_irq_restore(flags);
198
199 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
200
201 return 0;
202}
203
204static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
205{
206 int ret = -EINVAL;
207
208 /* set default policy and cpuinfo */
209 policy->cpuinfo.min_freq = 104000;
210 policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
211 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
212 policy->max = pxa3xx_get_clk_frequency_khz(0);
213 policy->cur = policy->min = policy->max;
214
215 if (cpu_is_pxa300() || cpu_is_pxa310())
216 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
217
218 if (cpu_is_pxa320())
219 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
220
221 if (ret) {
222 pr_err("failed to setup frequency table\n");
223 return ret;
224 }
225
226 pr_info("CPUFREQ support for PXA3xx initialized\n");
227 return 0;
228}
229
230static struct cpufreq_driver pxa3xx_cpufreq_driver = {
231 .verify = pxa3xx_cpufreq_verify,
232 .target = pxa3xx_cpufreq_set,
233 .init = pxa3xx_cpufreq_init,
234 .get = pxa3xx_cpufreq_get,
235 .name = "pxa3xx-cpufreq",
236};
237
238static int __init cpufreq_init(void)
239{
240 if (cpu_is_pxa3xx())
241 return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
242
243 return 0;
244}
245module_init(cpufreq_init);
246
247static void __exit cpufreq_exit(void)
248{
249 cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
250}
251module_exit(cpufreq_exit);
252
253MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
254MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index bcc053bc02c4..4f1881eee3f1 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -256,7 +256,6 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
256 goto out; 256 goto out;
257 } 257 }
258 258
259 freqs.cpu = 0;
260 freqs.flags = 0; 259 freqs.flags = 0;
261 freqs.old = s3c_freq->is_dvs ? FREQ_DVS 260 freqs.old = s3c_freq->is_dvs ? FREQ_DVS
262 : clk_get_rate(s3c_freq->armclk) / 1000; 261 : clk_get_rate(s3c_freq->armclk) / 1000;
@@ -274,7 +273,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
274 if (!to_dvs && freqs.old == freqs.new) 273 if (!to_dvs && freqs.old == freqs.new)
275 goto out; 274 goto out;
276 275
277 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 276 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
278 277
279 if (to_dvs) { 278 if (to_dvs) {
280 pr_debug("cpufreq: enter dvs\n"); 279 pr_debug("cpufreq: enter dvs\n");
@@ -287,7 +286,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
287 ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new); 286 ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
288 } 287 }
289 288
290 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 289 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
291 290
292out: 291out:
293 mutex_unlock(&cpufreq_lock); 292 mutex_unlock(&cpufreq_lock);
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 6f9490b3c356..27cacb524796 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -84,7 +84,6 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
84 if (ret != 0) 84 if (ret != 0)
85 return ret; 85 return ret;
86 86
87 freqs.cpu = 0;
88 freqs.old = clk_get_rate(armclk) / 1000; 87 freqs.old = clk_get_rate(armclk) / 1000;
89 freqs.new = s3c64xx_freq_table[i].frequency; 88 freqs.new = s3c64xx_freq_table[i].frequency;
90 freqs.flags = 0; 89 freqs.flags = 0;
@@ -95,7 +94,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
95 94
96 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); 95 pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
97 96
98 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 97 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
99 98
100#ifdef CONFIG_REGULATOR 99#ifdef CONFIG_REGULATOR
101 if (vddarm && freqs.new > freqs.old) { 100 if (vddarm && freqs.new > freqs.old) {
@@ -117,7 +116,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
117 goto err; 116 goto err;
118 } 117 }
119 118
120 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 119 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
121 120
122#ifdef CONFIG_REGULATOR 121#ifdef CONFIG_REGULATOR
123 if (vddarm && freqs.new < freqs.old) { 122 if (vddarm && freqs.new < freqs.old) {
@@ -141,7 +140,7 @@ err_clk:
141 if (clk_set_rate(armclk, freqs.old * 1000) < 0) 140 if (clk_set_rate(armclk, freqs.old * 1000) < 0)
142 pr_err("Failed to restore original clock rate\n"); 141 pr_err("Failed to restore original clock rate\n");
143err: 142err:
144 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 143 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
145 144
146 return ret; 145 return ret;
147} 146}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a484aaea9809..5c7757073793 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -229,7 +229,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
229 } 229 }
230 230
231 freqs.new = s5pv210_freq_table[index].frequency; 231 freqs.new = s5pv210_freq_table[index].frequency;
232 freqs.cpu = 0;
233 232
234 if (freqs.new == freqs.old) 233 if (freqs.new == freqs.old)
235 goto exit; 234 goto exit;
@@ -256,7 +255,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
256 goto exit; 255 goto exit;
257 } 256 }
258 257
259 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 258 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
260 259
261 /* Check if there need to change PLL */ 260 /* Check if there need to change PLL */
262 if ((index == L0) || (priv_index == L0)) 261 if ((index == L0) || (priv_index == L0))
@@ -468,7 +467,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
468 } 467 }
469 } 468 }
470 469
471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 470 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
472 471
473 if (freqs.new < freqs.old) { 472 if (freqs.new < freqs.old) {
474 regulator_set_voltage(int_regulator, 473 regulator_set_voltage(int_regulator,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
new file mode 100644
index 000000000000..cff18e87ca58
--- /dev/null
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -0,0 +1,247 @@
1/*
2 * cpu-sa1100.c: clock scaling for the SA1100
3 *
4 * Copyright (C) 2000 2001, The Delft University of Technology
5 *
6 * Authors:
7 * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
8 * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
9 * - major rewrite for linux-2.3.99
10 * - rewritten for the more generic power management scheme in
11 * linux-2.4.5-rmk1
12 *
13 * This software has been developed while working on the LART
14 * computing board (http://www.lartmaker.nl/), which is
15 * sponsored by the Mobile Multi-media Communications
16 * (http://www.mobimedia.org/) and Ubiquitous Communications
17 * (http://www.ubicom.tudelft.nl/) projects.
18 *
19 * The authors can be reached at:
20 *
21 * Erik Mouw
22 * Information and Communication Theory Group
23 * Faculty of Information Technology and Systems
24 * Delft University of Technology
25 * P.O. Box 5031
26 * 2600 GA Delft
27 * The Netherlands
28 *
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
34 *
35 * This program is distributed in the hope that it will be useful,
36 * but WITHOUT ANY WARRANTY; without even the implied warranty of
37 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
38 * GNU General Public License for more details.
39 *
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 *
44 *
45 * Theory of operations
46 * ====================
47 *
48 * Clock scaling can be used to lower the power consumption of the CPU
49 * core. This will give you a somewhat longer running time.
50 *
51 * The SA-1100 has a single register to change the core clock speed:
52 *
53 * PPCR 0x90020014 PLL config
54 *
55 * However, the DRAM timings are closely related to the core clock
56 * speed, so we need to change these, too. The used registers are:
57 *
58 * MDCNFG 0xA0000000 DRAM config
59 * MDCAS0 0xA0000004 Access waveform
60 * MDCAS1 0xA0000008 Access waveform
61 * MDCAS2 0xA000000C Access waveform
62 *
63 * Care must be taken to change the DRAM parameters the correct way,
64 * because otherwise the DRAM becomes unusable and the kernel will
65 * crash.
66 *
67 * The simple solution to avoid a kernel crash is to put the actual
68 * clock change in ROM and jump to that code from the kernel. The main
69 * disadvantage is that the ROM has to be modified, which is not
70 * possible on all SA-1100 platforms. Another disadvantage is that
71 * jumping to ROM makes clock switching unnecessary complicated.
72 *
73 * The idea behind this driver is that the memory configuration can be
74 * changed while running from DRAM (even with interrupts turned on!)
75 * as long as all re-configuration steps yield a valid DRAM
76 * configuration. The advantages are clear: it will run on all SA-1100
77 * platforms, and the code is very simple.
78 *
79 * If you really want to understand what is going on in
80 * sa1100_update_dram_timings(), you'll have to read sections 8.2,
81 * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
82 * Developers Manual" (available for free from Intel).
83 *
84 */
85
86#include <linux/kernel.h>
87#include <linux/types.h>
88#include <linux/init.h>
89#include <linux/cpufreq.h>
90#include <linux/io.h>
91
92#include <asm/cputype.h>
93
94#include <mach/generic.h>
95#include <mach/hardware.h>
96
97struct sa1100_dram_regs {
98 int speed;
99 u32 mdcnfg;
100 u32 mdcas0;
101 u32 mdcas1;
102 u32 mdcas2;
103};
104
105
106static struct cpufreq_driver sa1100_driver;
107
108static struct sa1100_dram_regs sa1100_dram_settings[] = {
109 /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
110 { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
111 { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
112 { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
113 {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
114 {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
115 {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
116 {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
117 {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
118 {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
119 {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
120 {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
121 {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
122 {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
123 {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
124 {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
125 {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
126 { 0, 0, 0, 0, 0 } /* last entry */
127};
128
129static void sa1100_update_dram_timings(int current_speed, int new_speed)
130{
131 struct sa1100_dram_regs *settings = sa1100_dram_settings;
132
133 /* find speed */
134 while (settings->speed != 0) {
135 if (new_speed == settings->speed)
136 break;
137
138 settings++;
139 }
140
141 if (settings->speed == 0) {
142 panic("%s: couldn't find dram setting for speed %d\n",
143 __func__, new_speed);
144 }
145
146 /* No risk, no fun: run with interrupts on! */
147 if (new_speed > current_speed) {
148 /* We're going FASTER, so first relax the memory
149 * timings before changing the core frequency
150 */
151
152 /* Half the memory access clock */
153 MDCNFG |= MDCNFG_CDB2;
154
155 /* The order of these statements IS important, keep 8
156 * pulses!!
157 */
158 MDCAS2 = settings->mdcas2;
159 MDCAS1 = settings->mdcas1;
160 MDCAS0 = settings->mdcas0;
161 MDCNFG = settings->mdcnfg;
162 } else {
163 /* We're going SLOWER: first decrease the core
164 * frequency and then tighten the memory settings.
165 */
166
167 /* Half the memory access clock */
168 MDCNFG |= MDCNFG_CDB2;
169
170 /* The order of these statements IS important, keep 8
171 * pulses!!
172 */
173 MDCAS0 = settings->mdcas0;
174 MDCAS1 = settings->mdcas1;
175 MDCAS2 = settings->mdcas2;
176 MDCNFG = settings->mdcnfg;
177 }
178}
179
180static int sa1100_target(struct cpufreq_policy *policy,
181 unsigned int target_freq,
182 unsigned int relation)
183{
184 unsigned int cur = sa11x0_getspeed(0);
185 unsigned int new_ppcr;
186 struct cpufreq_freqs freqs;
187
188 new_ppcr = sa11x0_freq_to_ppcr(target_freq);
189 switch (relation) {
190 case CPUFREQ_RELATION_L:
191 if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
192 new_ppcr--;
193 break;
194 case CPUFREQ_RELATION_H:
195 if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
196 (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
197 new_ppcr--;
198 break;
199 }
200
201 freqs.old = cur;
202 freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
203
204 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
205
206 if (freqs.new > cur)
207 sa1100_update_dram_timings(cur, freqs.new);
208
209 PPCR = new_ppcr;
210
211 if (freqs.new < cur)
212 sa1100_update_dram_timings(cur, freqs.new);
213
214 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
215
216 return 0;
217}
218
219static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
220{
221 if (policy->cpu != 0)
222 return -EINVAL;
223 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
224 policy->cpuinfo.min_freq = 59000;
225 policy->cpuinfo.max_freq = 287000;
226 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
227 return 0;
228}
229
230static struct cpufreq_driver sa1100_driver __refdata = {
231 .flags = CPUFREQ_STICKY,
232 .verify = sa11x0_verify_speed,
233 .target = sa1100_target,
234 .get = sa11x0_getspeed,
235 .init = sa1100_cpu_init,
236 .name = "sa1100",
237};
238
239static int __init sa1100_dram_init(void)
240{
241 if (cpu_is_sa1100())
242 return cpufreq_register_driver(&sa1100_driver);
243 else
244 return -ENODEV;
245}
246
247arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
new file mode 100644
index 000000000000..39c90b6f4286
--- /dev/null
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -0,0 +1,406 @@
1/*
2 * linux/arch/arm/mach-sa1100/cpu-sa1110.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Note: there are two erratas that apply to the SA1110 here:
11 * 7 - SDRAM auto-power-up failure (rev A0)
12 * 13 - Corruption of internal register reads/writes following
13 * SDRAM reads (rev A0, B0, B1)
14 *
15 * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
16 *
17 * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
18 */
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/kernel.h>
24#include <linux/moduleparam.h>
25#include <linux/types.h>
26
27#include <asm/cputype.h>
28#include <asm/mach-types.h>
29
30#include <mach/generic.h>
31#include <mach/hardware.h>
32
33#undef DEBUG
34
35struct sdram_params {
36 const char name[20];
37 u_char rows; /* bits */
38 u_char cas_latency; /* cycles */
39 u_char tck; /* clock cycle time (ns) */
40 u_char trcd; /* activate to r/w (ns) */
41 u_char trp; /* precharge to activate (ns) */
42 u_char twr; /* write recovery time (ns) */
43 u_short refresh; /* refresh time for array (us) */
44};
45
46struct sdram_info {
47 u_int mdcnfg;
48 u_int mdrefr;
49 u_int mdcas[3];
50};
51
52static struct sdram_params sdram_tbl[] __initdata = {
53 { /* Toshiba TC59SM716 CL2 */
54 .name = "TC59SM716-CL2",
55 .rows = 12,
56 .tck = 10,
57 .trcd = 20,
58 .trp = 20,
59 .twr = 10,
60 .refresh = 64000,
61 .cas_latency = 2,
62 }, { /* Toshiba TC59SM716 CL3 */
63 .name = "TC59SM716-CL3",
64 .rows = 12,
65 .tck = 8,
66 .trcd = 20,
67 .trp = 20,
68 .twr = 8,
69 .refresh = 64000,
70 .cas_latency = 3,
71 }, { /* Samsung K4S641632D TC75 */
72 .name = "K4S641632D",
73 .rows = 14,
74 .tck = 9,
75 .trcd = 27,
76 .trp = 20,
77 .twr = 9,
78 .refresh = 64000,
79 .cas_latency = 3,
80 }, { /* Samsung K4S281632B-1H */
81 .name = "K4S281632B-1H",
82 .rows = 12,
83 .tck = 10,
84 .trp = 20,
85 .twr = 10,
86 .refresh = 64000,
87 .cas_latency = 3,
88 }, { /* Samsung KM416S4030CT */
89 .name = "KM416S4030CT",
90 .rows = 13,
91 .tck = 8,
92 .trcd = 24, /* 3 CLKs */
93 .trp = 24, /* 3 CLKs */
94 .twr = 16, /* Trdl: 2 CLKs */
95 .refresh = 64000,
96 .cas_latency = 3,
97 }, { /* Winbond W982516AH75L CL3 */
98 .name = "W982516AH75L",
99 .rows = 16,
100 .tck = 8,
101 .trcd = 20,
102 .trp = 20,
103 .twr = 8,
104 .refresh = 64000,
105 .cas_latency = 3,
106 }, { /* Micron MT48LC8M16A2TG-75 */
107 .name = "MT48LC8M16A2TG-75",
108 .rows = 12,
109 .tck = 8,
110 .trcd = 20,
111 .trp = 20,
112 .twr = 8,
113 .refresh = 64000,
114 .cas_latency = 3,
115 },
116};
117
118static struct sdram_params sdram_params;
119
120/*
121 * Given a period in ns and frequency in khz, calculate the number of
122 * cycles of frequency in period. Note that we round up to the next
123 * cycle, even if we are only slightly over.
124 */
125static inline u_int ns_to_cycles(u_int ns, u_int khz)
126{
127 return (ns * khz + 999999) / 1000000;
128}
129
130/*
131 * Create the MDCAS register bit pattern.
132 */
133static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
134{
135 u_int shift;
136
137 rcd = 2 * rcd - 1;
138 shift = delayed + 1 + rcd;
139
140 mdcas[0] = (1 << rcd) - 1;
141 mdcas[0] |= 0x55555555 << shift;
142 mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
143}
144
145static void
146sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
147 struct sdram_params *sdram)
148{
149 u_int mem_khz, sd_khz, trp, twr;
150
151 mem_khz = cpu_khz / 2;
152 sd_khz = mem_khz;
153
154 /*
155 * If SDCLK would invalidate the SDRAM timings,
156 * run SDCLK at half speed.
157 *
158 * CPU steppings prior to B2 must either run the memory at
159 * half speed or use delayed read latching (errata 13).
160 */
161 if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
162 (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
163 sd_khz /= 2;
164
165 sd->mdcnfg = MDCNFG & 0x007f007f;
166
167 twr = ns_to_cycles(sdram->twr, mem_khz);
168
169 /* trp should always be >1 */
170 trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
171 if (trp < 1)
172 trp = 1;
173
174 sd->mdcnfg |= trp << 8;
175 sd->mdcnfg |= trp << 24;
176 sd->mdcnfg |= sdram->cas_latency << 12;
177 sd->mdcnfg |= sdram->cas_latency << 28;
178 sd->mdcnfg |= twr << 14;
179 sd->mdcnfg |= twr << 30;
180
181 sd->mdrefr = MDREFR & 0xffbffff0;
182 sd->mdrefr |= 7;
183
184 if (sd_khz != mem_khz)
185 sd->mdrefr |= MDREFR_K1DB2;
186
187 /* initial number of '1's in MDCAS + 1 */
188 set_mdcas(sd->mdcas, sd_khz >= 62000,
189 ns_to_cycles(sdram->trcd, mem_khz));
190
191#ifdef DEBUG
192 printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
193 sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
194 sd->mdcas[2]);
195#endif
196}
197
198/*
199 * Set the SDRAM refresh rate.
200 */
201static inline void sdram_set_refresh(u_int dri)
202{
203 MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
204 (void) MDREFR;
205}
206
207/*
208 * Update the refresh period. We do this such that we always refresh
209 * the SDRAMs within their permissible period. The refresh period is
210 * always a multiple of the memory clock (fixed at cpu_clock / 2).
211 *
212 * FIXME: we don't currently take account of burst accesses here,
213 * but neither do Intels DM nor Angel.
214 */
215static void
216sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
217{
218 u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
219 u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
220
221#ifdef DEBUG
222 mdelay(250);
223 printk(KERN_DEBUG "new dri value = %d\n", dri);
224#endif
225
226 sdram_set_refresh(dri);
227}
228
229/*
230 * Ok, set the CPU frequency.
231 */
232static int sa1110_target(struct cpufreq_policy *policy,
233 unsigned int target_freq,
234 unsigned int relation)
235{
236 struct sdram_params *sdram = &sdram_params;
237 struct cpufreq_freqs freqs;
238 struct sdram_info sd;
239 unsigned long flags;
240 unsigned int ppcr, unused;
241
242 switch (relation) {
243 case CPUFREQ_RELATION_L:
244 ppcr = sa11x0_freq_to_ppcr(target_freq);
245 if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
246 ppcr--;
247 break;
248 case CPUFREQ_RELATION_H:
249 ppcr = sa11x0_freq_to_ppcr(target_freq);
250 if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
251 (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
252 ppcr--;
253 break;
254 default:
255 return -EINVAL;
256 }
257
258 freqs.old = sa11x0_getspeed(0);
259 freqs.new = sa11x0_ppcr_to_freq(ppcr);
260
261 sdram_calculate_timing(&sd, freqs.new, sdram);
262
263#if 0
264 /*
265 * These values are wrong according to the SA1110 documentation
266 * and errata, but they seem to work. Need to get a storage
267 * scope on to the SDRAM signals to work out why.
268 */
269 if (policy->max < 147500) {
270 sd.mdrefr |= MDREFR_K1DB2;
271 sd.mdcas[0] = 0xaaaaaa7f;
272 } else {
273 sd.mdrefr &= ~MDREFR_K1DB2;
274 sd.mdcas[0] = 0xaaaaaa9f;
275 }
276 sd.mdcas[1] = 0xaaaaaaaa;
277 sd.mdcas[2] = 0xaaaaaaaa;
278#endif
279
280 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
281
282 /*
283 * The clock could be going away for some time. Set the SDRAMs
284 * to refresh rapidly (every 64 memory clock cycles). To get
285 * through the whole array, we need to wait 262144 mclk cycles.
286 * We wait 20ms to be safe.
287 */
288 sdram_set_refresh(2);
289 if (!irqs_disabled())
290 msleep(20);
291 else
292 mdelay(20);
293
294 /*
295 * Reprogram the DRAM timings with interrupts disabled, and
296 * ensure that we are doing this within a complete cache line.
297 * This means that we won't access SDRAM for the duration of
298 * the programming.
299 */
300 local_irq_save(flags);
301 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
302 udelay(10);
303 __asm__ __volatile__("\n\
304 b 2f \n\
305 .align 5 \n\
3061: str %3, [%1, #0] @ MDCNFG \n\
307 str %4, [%1, #28] @ MDREFR \n\
308 str %5, [%1, #4] @ MDCAS0 \n\
309 str %6, [%1, #8] @ MDCAS1 \n\
310 str %7, [%1, #12] @ MDCAS2 \n\
311 str %8, [%2, #0] @ PPCR \n\
312 ldr %0, [%1, #0] \n\
313 b 3f \n\
3142: b 1b \n\
3153: nop \n\
316 nop"
317 : "=&r" (unused)
318 : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
319 "r" (sd.mdrefr), "r" (sd.mdcas[0]),
320 "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
321 local_irq_restore(flags);
322
323 /*
324 * Now, return the SDRAM refresh back to normal.
325 */
326 sdram_update_refresh(freqs.new, sdram);
327
328 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
329
330 return 0;
331}
332
333static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
334{
335 if (policy->cpu != 0)
336 return -EINVAL;
337 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
338 policy->cpuinfo.min_freq = 59000;
339 policy->cpuinfo.max_freq = 287000;
340 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
341 return 0;
342}
343
344/* sa1110_driver needs __refdata because it must remain after init registers
345 * it with cpufreq_register_driver() */
346static struct cpufreq_driver sa1110_driver __refdata = {
347 .flags = CPUFREQ_STICKY,
348 .verify = sa11x0_verify_speed,
349 .target = sa1110_target,
350 .get = sa11x0_getspeed,
351 .init = sa1110_cpu_init,
352 .name = "sa1110",
353};
354
355static struct sdram_params *sa1110_find_sdram(const char *name)
356{
357 struct sdram_params *sdram;
358
359 for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
360 sdram++)
361 if (strcmp(name, sdram->name) == 0)
362 return sdram;
363
364 return NULL;
365}
366
367static char sdram_name[16];
368
369static int __init sa1110_clk_init(void)
370{
371 struct sdram_params *sdram;
372 const char *name = sdram_name;
373
374 if (!cpu_is_sa1110())
375 return -ENODEV;
376
377 if (!name[0]) {
378 if (machine_is_assabet())
379 name = "TC59SM716-CL3";
380 if (machine_is_pt_system3())
381 name = "K4S641632D";
382 if (machine_is_h3100())
383 name = "KM416S4030CT";
384 if (machine_is_jornada720())
385 name = "K4S281632B-1H";
386 if (machine_is_nanoengine())
387 name = "MT48LC8M16A2TG-75";
388 }
389
390 sdram = sa1110_find_sdram(name);
391 if (sdram) {
392 printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
393 " twr: %d refresh: %d cas_latency: %d\n",
394 sdram->tck, sdram->trcd, sdram->trp,
395 sdram->twr, sdram->refresh, sdram->cas_latency);
396
397 memcpy(&sdram_params, sdram, sizeof(sdram_params));
398
399 return cpufreq_register_driver(&sa1110_driver);
400 }
401
402 return 0;
403}
404
405module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
406arch_initcall(sa1110_clk_init);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index e42e073cd9b8..f740b134d27b 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,7 +53,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
53 } 53 }
54} 54}
55 55
56static void sc520_freq_set_cpu_state(unsigned int state) 56static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
57 unsigned int state)
57{ 58{
58 59
59 struct cpufreq_freqs freqs; 60 struct cpufreq_freqs freqs;
@@ -61,9 +62,8 @@ static void sc520_freq_set_cpu_state(unsigned int state)
61 62
62 freqs.old = sc520_freq_get_cpu_frequency(0); 63 freqs.old = sc520_freq_get_cpu_frequency(0);
63 freqs.new = sc520_freq_table[state].frequency; 64 freqs.new = sc520_freq_table[state].frequency;
64 freqs.cpu = 0; /* AMD Elan is UP */
65 65
66 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 66 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
67 67
68 pr_debug("attempting to set frequency to %i kHz\n", 68 pr_debug("attempting to set frequency to %i kHz\n",
69 sc520_freq_table[state].frequency); 69 sc520_freq_table[state].frequency);
@@ -75,7 +75,7 @@ static void sc520_freq_set_cpu_state(unsigned int state)
75 75
76 local_irq_enable(); 76 local_irq_enable();
77 77
78 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 78 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
79}; 79};
80 80
81static int sc520_freq_verify(struct cpufreq_policy *policy) 81static int sc520_freq_verify(struct cpufreq_policy *policy)
@@ -93,7 +93,7 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
93 target_freq, relation, &newstate)) 93 target_freq, relation, &newstate))
94 return -EINVAL; 94 return -EINVAL;
95 95
96 sc520_freq_set_cpu_state(newstate); 96 sc520_freq_set_cpu_state(policy, newstate);
97 97
98 return 0; 98 return 0;
99} 99}
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
new file mode 100644
index 000000000000..73adb64651e8
--- /dev/null
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -0,0 +1,189 @@
1/*
2 * cpufreq driver for the SuperH processors.
3 *
4 * Copyright (C) 2002 - 2012 Paul Mundt
5 * Copyright (C) 2002 M. R. Brown
6 *
7 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
8 *
9 * Copyright (C) 2004-2007 Atmel Corporation
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#define pr_fmt(fmt) "cpufreq: " fmt
16
17#include <linux/types.h>
18#include <linux/cpufreq.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/err.h>
23#include <linux/cpumask.h>
24#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/sched.h> /* set_cpus_allowed() */
27#include <linux/clk.h>
28#include <linux/percpu.h>
29#include <linux/sh_clk.h>
30
31static DEFINE_PER_CPU(struct clk, sh_cpuclk);
32
33static unsigned int sh_cpufreq_get(unsigned int cpu)
34{
35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
36}
37
38/*
39 * Here we notify other drivers of the proposed change and the final change.
40 */
41static int sh_cpufreq_target(struct cpufreq_policy *policy,
42 unsigned int target_freq,
43 unsigned int relation)
44{
45 unsigned int cpu = policy->cpu;
46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
47 cpumask_t cpus_allowed;
48 struct cpufreq_freqs freqs;
49 struct device *dev;
50 long freq;
51
52 cpus_allowed = current->cpus_allowed;
53 set_cpus_allowed_ptr(current, cpumask_of(cpu));
54
55 BUG_ON(smp_processor_id() != cpu);
56
57 dev = get_cpu_device(cpu);
58
59 /* Convert target_freq from kHz to Hz */
60 freq = clk_round_rate(cpuclk, target_freq * 1000);
61
62 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
63 return -EINVAL;
64
65 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
66
67 freqs.old = sh_cpufreq_get(cpu);
68 freqs.new = (freq + 500) / 1000;
69 freqs.flags = 0;
70
71 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
72 set_cpus_allowed_ptr(current, &cpus_allowed);
73 clk_set_rate(cpuclk, freq);
74 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
75
76 dev_dbg(dev, "set frequency %lu Hz\n", freq);
77
78 return 0;
79}
80
81static int sh_cpufreq_verify(struct cpufreq_policy *policy)
82{
83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
84 struct cpufreq_frequency_table *freq_table;
85
86 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
87 if (freq_table)
88 return cpufreq_frequency_table_verify(policy, freq_table);
89
90 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
91 policy->cpuinfo.max_freq);
92
93 policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
94 policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
95
96 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
97 policy->cpuinfo.max_freq);
98
99 return 0;
100}
101
102static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
103{
104 unsigned int cpu = policy->cpu;
105 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
106 struct cpufreq_frequency_table *freq_table;
107 struct device *dev;
108
109 dev = get_cpu_device(cpu);
110
111 cpuclk = clk_get(dev, "cpu_clk");
112 if (IS_ERR(cpuclk)) {
113 dev_err(dev, "couldn't get CPU clk\n");
114 return PTR_ERR(cpuclk);
115 }
116
117 policy->cur = sh_cpufreq_get(cpu);
118
119 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
120 if (freq_table) {
121 int result;
122
123 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
124 if (!result)
125 cpufreq_frequency_table_get_attr(freq_table, cpu);
126 } else {
127 dev_notice(dev, "no frequency table found, falling back "
128 "to rate rounding.\n");
129
130 policy->min = policy->cpuinfo.min_freq =
131 (clk_round_rate(cpuclk, 1) + 500) / 1000;
132 policy->max = policy->cpuinfo.max_freq =
133 (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
134 }
135
136 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
137
138 dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
139 "Maximum %u.%03u MHz.\n",
140 policy->min / 1000, policy->min % 1000,
141 policy->max / 1000, policy->max % 1000);
142
143 return 0;
144}
145
146static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
147{
148 unsigned int cpu = policy->cpu;
149 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
150
151 cpufreq_frequency_table_put_attr(cpu);
152 clk_put(cpuclk);
153
154 return 0;
155}
156
157static struct freq_attr *sh_freq_attr[] = {
158 &cpufreq_freq_attr_scaling_available_freqs,
159 NULL,
160};
161
162static struct cpufreq_driver sh_cpufreq_driver = {
163 .owner = THIS_MODULE,
164 .name = "sh",
165 .get = sh_cpufreq_get,
166 .target = sh_cpufreq_target,
167 .verify = sh_cpufreq_verify,
168 .init = sh_cpufreq_cpu_init,
169 .exit = sh_cpufreq_cpu_exit,
170 .attr = sh_freq_attr,
171};
172
173static int __init sh_cpufreq_module_init(void)
174{
175 pr_notice("SuperH CPU frequency driver.\n");
176 return cpufreq_register_driver(&sh_cpufreq_driver);
177}
178
179static void __exit sh_cpufreq_module_exit(void)
180{
181 cpufreq_unregister_driver(&sh_cpufreq_driver);
182}
183
184module_init(sh_cpufreq_module_init);
185module_exit(sh_cpufreq_module_exit);
186
187MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
188MODULE_DESCRIPTION("cpufreq driver for SuperH");
189MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
new file mode 100644
index 000000000000..306ae462bba6
--- /dev/null
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -0,0 +1,408 @@
1/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18
19#include <asm/asi.h>
20#include <asm/timer.h>
21
22static struct cpufreq_driver *cpufreq_us2e_driver;
23
24struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
26};
27
28/* Indexed by cpu number. */
29static struct us2e_freq_percpu_info *us2e_freq_table;
30
31#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
33
34/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
36 */
37#define ESTAR_MODE_DIV_1 0x0000000000000000UL
38#define ESTAR_MODE_DIV_2 0x0000000000000001UL
39#define ESTAR_MODE_DIV_4 0x0000000000000003UL
40#define ESTAR_MODE_DIV_6 0x0000000000000002UL
41#define ESTAR_MODE_DIV_8 0x0000000000000004UL
42#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
43
44#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46#define MCTRL0_REFR_COUNT_SHIFT 8
47#define MCTRL0_REFR_INTERVAL 7800
48#define MCTRL0_REFR_CLKS_P_CNT 64
49
50static unsigned long read_hbreg(unsigned long addr)
51{
52 unsigned long ret;
53
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
58}
59
60static void write_hbreg(unsigned long addr, unsigned long val)
61{
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
70 }
71}
72
73static void self_refresh_ctl(int enable)
74{
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
76
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
83}
84
85static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
88{
89 unsigned long old_refr_count, refr_count, mctrl;
90
91 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
92 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
93
94 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
95 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
96 >> MCTRL0_REFR_COUNT_SHIFT;
97
98 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
99 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
100 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
101 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
102
103 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
104 unsigned long usecs;
105
106 /* We have to wait for both refresh counts (old
107 * and new) to go to zero.
108 */
109 usecs = (MCTRL0_REFR_CLKS_P_CNT *
110 (refr_count + old_refr_count) *
111 1000000UL *
112 old_divisor) / clock_tick;
113 udelay(usecs + 1UL);
114 }
115}
116
117static void us2e_transition(unsigned long estar, unsigned long new_bits,
118 unsigned long clock_tick,
119 unsigned long old_divisor, unsigned long divisor)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124
125 estar &= ~ESTAR_MODE_DIV_MASK;
126
127 /* This is based upon the state transition diagram in the IIe manual. */
128 if (old_divisor == 2 && divisor == 1) {
129 self_refresh_ctl(0);
130 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
131 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
132 } else if (old_divisor == 1 && divisor == 2) {
133 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
134 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
135 self_refresh_ctl(1);
136 } else if (old_divisor == 1 && divisor > 2) {
137 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
138 1, 2);
139 us2e_transition(estar, new_bits, clock_tick,
140 2, divisor);
141 } else if (old_divisor > 2 && divisor == 1) {
142 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
143 old_divisor, 2);
144 us2e_transition(estar, new_bits, clock_tick,
145 2, divisor);
146 } else if (old_divisor < divisor) {
147 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
148 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
149 } else if (old_divisor > divisor) {
150 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
151 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
152 } else {
153 BUG();
154 }
155
156 local_irq_restore(flags);
157}
158
159static unsigned long index_to_estar_mode(unsigned int index)
160{
161 switch (index) {
162 case 0:
163 return ESTAR_MODE_DIV_1;
164
165 case 1:
166 return ESTAR_MODE_DIV_2;
167
168 case 2:
169 return ESTAR_MODE_DIV_4;
170
171 case 3:
172 return ESTAR_MODE_DIV_6;
173
174 case 4:
175 return ESTAR_MODE_DIV_8;
176
177 default:
178 BUG();
179 }
180}
181
182static unsigned long index_to_divisor(unsigned int index)
183{
184 switch (index) {
185 case 0:
186 return 1;
187
188 case 1:
189 return 2;
190
191 case 2:
192 return 4;
193
194 case 3:
195 return 6;
196
197 case 4:
198 return 8;
199
200 default:
201 BUG();
202 }
203}
204
205static unsigned long estar_to_divisor(unsigned long estar)
206{
207 unsigned long ret;
208
209 switch (estar & ESTAR_MODE_DIV_MASK) {
210 case ESTAR_MODE_DIV_1:
211 ret = 1;
212 break;
213 case ESTAR_MODE_DIV_2:
214 ret = 2;
215 break;
216 case ESTAR_MODE_DIV_4:
217 ret = 4;
218 break;
219 case ESTAR_MODE_DIV_6:
220 ret = 6;
221 break;
222 case ESTAR_MODE_DIV_8:
223 ret = 8;
224 break;
225 default:
226 BUG();
227 }
228
229 return ret;
230}
231
232static unsigned int us2e_freq_get(unsigned int cpu)
233{
234 cpumask_t cpus_allowed;
235 unsigned long clock_tick, estar;
236
237 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
238 set_cpus_allowed_ptr(current, cpumask_of(cpu));
239
240 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
241 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
242
243 set_cpus_allowed_ptr(current, &cpus_allowed);
244
245 return clock_tick / estar_to_divisor(estar);
246}
247
248static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
249 unsigned int index)
250{
251 unsigned int cpu = policy->cpu;
252 unsigned long new_bits, new_freq;
253 unsigned long clock_tick, divisor, old_divisor, estar;
254 cpumask_t cpus_allowed;
255 struct cpufreq_freqs freqs;
256
257 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
258 set_cpus_allowed_ptr(current, cpumask_of(cpu));
259
260 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
261 new_bits = index_to_estar_mode(index);
262 divisor = index_to_divisor(index);
263 new_freq /= divisor;
264
265 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
266
267 old_divisor = estar_to_divisor(estar);
268
269 freqs.old = clock_tick / old_divisor;
270 freqs.new = new_freq;
271 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
272
273 if (old_divisor != divisor)
274 us2e_transition(estar, new_bits, clock_tick * 1000,
275 old_divisor, divisor);
276
277 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
278
279 set_cpus_allowed_ptr(current, &cpus_allowed);
280}
281
282static int us2e_freq_target(struct cpufreq_policy *policy,
283 unsigned int target_freq,
284 unsigned int relation)
285{
286 unsigned int new_index = 0;
287
288 if (cpufreq_frequency_table_target(policy,
289 &us2e_freq_table[policy->cpu].table[0],
290 target_freq, relation, &new_index))
291 return -EINVAL;
292
293 us2e_set_cpu_divider_index(policy, new_index);
294
295 return 0;
296}
297
298static int us2e_freq_verify(struct cpufreq_policy *policy)
299{
300 return cpufreq_frequency_table_verify(policy,
301 &us2e_freq_table[policy->cpu].table[0]);
302}
303
304static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
305{
306 unsigned int cpu = policy->cpu;
307 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
308 struct cpufreq_frequency_table *table =
309 &us2e_freq_table[cpu].table[0];
310
311 table[0].index = 0;
312 table[0].frequency = clock_tick / 1;
313 table[1].index = 1;
314 table[1].frequency = clock_tick / 2;
315 table[2].index = 2;
316 table[2].frequency = clock_tick / 4;
317 table[2].index = 3;
318 table[2].frequency = clock_tick / 6;
319 table[2].index = 4;
320 table[2].frequency = clock_tick / 8;
321 table[2].index = 5;
322 table[3].frequency = CPUFREQ_TABLE_END;
323
324 policy->cpuinfo.transition_latency = 0;
325 policy->cur = clock_tick;
326
327 return cpufreq_frequency_table_cpuinfo(policy, table);
328}
329
330static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
331{
332 if (cpufreq_us2e_driver)
333 us2e_set_cpu_divider_index(policy, 0);
334
335 return 0;
336}
337
338static int __init us2e_freq_init(void)
339{
340 unsigned long manuf, impl, ver;
341 int ret;
342
343 if (tlb_type != spitfire)
344 return -ENODEV;
345
346 __asm__("rdpr %%ver, %0" : "=r" (ver));
347 manuf = ((ver >> 48) & 0xffff);
348 impl = ((ver >> 32) & 0xffff);
349
350 if (manuf == 0x17 && impl == 0x13) {
351 struct cpufreq_driver *driver;
352
353 ret = -ENOMEM;
354 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
355 if (!driver)
356 goto err_out;
357
358 us2e_freq_table = kzalloc(
359 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
360 GFP_KERNEL);
361 if (!us2e_freq_table)
362 goto err_out;
363
364 driver->init = us2e_freq_cpu_init;
365 driver->verify = us2e_freq_verify;
366 driver->target = us2e_freq_target;
367 driver->get = us2e_freq_get;
368 driver->exit = us2e_freq_cpu_exit;
369 driver->owner = THIS_MODULE,
370 strcpy(driver->name, "UltraSPARC-IIe");
371
372 cpufreq_us2e_driver = driver;
373 ret = cpufreq_register_driver(driver);
374 if (ret)
375 goto err_out;
376
377 return 0;
378
379err_out:
380 if (driver) {
381 kfree(driver);
382 cpufreq_us2e_driver = NULL;
383 }
384 kfree(us2e_freq_table);
385 us2e_freq_table = NULL;
386 return ret;
387 }
388
389 return -ENODEV;
390}
391
392static void __exit us2e_freq_exit(void)
393{
394 if (cpufreq_us2e_driver) {
395 cpufreq_unregister_driver(cpufreq_us2e_driver);
396 kfree(cpufreq_us2e_driver);
397 cpufreq_us2e_driver = NULL;
398 kfree(us2e_freq_table);
399 us2e_freq_table = NULL;
400 }
401}
402
403MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
404MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
405MODULE_LICENSE("GPL");
406
407module_init(us2e_freq_init);
408module_exit(us2e_freq_exit);
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
new file mode 100644
index 000000000000..c71ee142347a
--- /dev/null
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -0,0 +1,269 @@
1/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17
18#include <asm/head.h>
19#include <asm/timer.h>
20
21static struct cpufreq_driver *cpufreq_us3_driver;
22
23struct us3_freq_percpu_info {
24 struct cpufreq_frequency_table table[4];
25};
26
27/* Indexed by cpu number. */
28static struct us3_freq_percpu_info *us3_freq_table;
29
30/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
31 * in the Safari config register.
32 */
33#define SAFARI_CFG_DIV_1 0x0000000000000000UL
34#define SAFARI_CFG_DIV_2 0x0000000040000000UL
35#define SAFARI_CFG_DIV_32 0x0000000080000000UL
36#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
37
38static unsigned long read_safari_cfg(void)
39{
40 unsigned long ret;
41
42 __asm__ __volatile__("ldxa [%%g0] %1, %0"
43 : "=&r" (ret)
44 : "i" (ASI_SAFARI_CONFIG));
45 return ret;
46}
47
48static void write_safari_cfg(unsigned long val)
49{
50 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
51 "membar #Sync"
52 : /* no outputs */
53 : "r" (val), "i" (ASI_SAFARI_CONFIG)
54 : "memory");
55}
56
57static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
58{
59 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
60 unsigned long ret;
61
62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
63 case SAFARI_CFG_DIV_1:
64 ret = clock_tick / 1;
65 break;
66 case SAFARI_CFG_DIV_2:
67 ret = clock_tick / 2;
68 break;
69 case SAFARI_CFG_DIV_32:
70 ret = clock_tick / 32;
71 break;
72 default:
73 BUG();
74 }
75
76 return ret;
77}
78
79static unsigned int us3_freq_get(unsigned int cpu)
80{
81 cpumask_t cpus_allowed;
82 unsigned long reg;
83 unsigned int ret;
84
85 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
86 set_cpus_allowed_ptr(current, cpumask_of(cpu));
87
88 reg = read_safari_cfg();
89 ret = get_current_freq(cpu, reg);
90
91 set_cpus_allowed_ptr(current, &cpus_allowed);
92
93 return ret;
94}
95
96static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
97 unsigned int index)
98{
99 unsigned int cpu = policy->cpu;
100 unsigned long new_bits, new_freq, reg;
101 cpumask_t cpus_allowed;
102 struct cpufreq_freqs freqs;
103
104 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
105 set_cpus_allowed_ptr(current, cpumask_of(cpu));
106
107 new_freq = sparc64_get_clock_tick(cpu) / 1000;
108 switch (index) {
109 case 0:
110 new_bits = SAFARI_CFG_DIV_1;
111 new_freq /= 1;
112 break;
113 case 1:
114 new_bits = SAFARI_CFG_DIV_2;
115 new_freq /= 2;
116 break;
117 case 2:
118 new_bits = SAFARI_CFG_DIV_32;
119 new_freq /= 32;
120 break;
121
122 default:
123 BUG();
124 }
125
126 reg = read_safari_cfg();
127
128 freqs.old = get_current_freq(cpu, reg);
129 freqs.new = new_freq;
130 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
131
132 reg &= ~SAFARI_CFG_DIV_MASK;
133 reg |= new_bits;
134 write_safari_cfg(reg);
135
136 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
137
138 set_cpus_allowed_ptr(current, &cpus_allowed);
139}
140
141static int us3_freq_target(struct cpufreq_policy *policy,
142 unsigned int target_freq,
143 unsigned int relation)
144{
145 unsigned int new_index = 0;
146
147 if (cpufreq_frequency_table_target(policy,
148 &us3_freq_table[policy->cpu].table[0],
149 target_freq,
150 relation,
151 &new_index))
152 return -EINVAL;
153
154 us3_set_cpu_divider_index(policy, new_index);
155
156 return 0;
157}
158
159static int us3_freq_verify(struct cpufreq_policy *policy)
160{
161 return cpufreq_frequency_table_verify(policy,
162 &us3_freq_table[policy->cpu].table[0]);
163}
164
165static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
166{
167 unsigned int cpu = policy->cpu;
168 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
169 struct cpufreq_frequency_table *table =
170 &us3_freq_table[cpu].table[0];
171
172 table[0].index = 0;
173 table[0].frequency = clock_tick / 1;
174 table[1].index = 1;
175 table[1].frequency = clock_tick / 2;
176 table[2].index = 2;
177 table[2].frequency = clock_tick / 32;
178 table[3].index = 0;
179 table[3].frequency = CPUFREQ_TABLE_END;
180
181 policy->cpuinfo.transition_latency = 0;
182 policy->cur = clock_tick;
183
184 return cpufreq_frequency_table_cpuinfo(policy, table);
185}
186
187static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
188{
189 if (cpufreq_us3_driver)
190 us3_set_cpu_divider_index(policy, 0);
191
192 return 0;
193}
194
195static int __init us3_freq_init(void)
196{
197 unsigned long manuf, impl, ver;
198 int ret;
199
200 if (tlb_type != cheetah && tlb_type != cheetah_plus)
201 return -ENODEV;
202
203 __asm__("rdpr %%ver, %0" : "=r" (ver));
204 manuf = ((ver >> 48) & 0xffff);
205 impl = ((ver >> 32) & 0xffff);
206
207 if (manuf == CHEETAH_MANUF &&
208 (impl == CHEETAH_IMPL ||
209 impl == CHEETAH_PLUS_IMPL ||
210 impl == JAGUAR_IMPL ||
211 impl == PANTHER_IMPL)) {
212 struct cpufreq_driver *driver;
213
214 ret = -ENOMEM;
215 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
216 if (!driver)
217 goto err_out;
218
219 us3_freq_table = kzalloc(
220 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
221 GFP_KERNEL);
222 if (!us3_freq_table)
223 goto err_out;
224
225 driver->init = us3_freq_cpu_init;
226 driver->verify = us3_freq_verify;
227 driver->target = us3_freq_target;
228 driver->get = us3_freq_get;
229 driver->exit = us3_freq_cpu_exit;
230 driver->owner = THIS_MODULE,
231 strcpy(driver->name, "UltraSPARC-III");
232
233 cpufreq_us3_driver = driver;
234 ret = cpufreq_register_driver(driver);
235 if (ret)
236 goto err_out;
237
238 return 0;
239
240err_out:
241 if (driver) {
242 kfree(driver);
243 cpufreq_us3_driver = NULL;
244 }
245 kfree(us3_freq_table);
246 us3_freq_table = NULL;
247 return ret;
248 }
249
250 return -ENODEV;
251}
252
253static void __exit us3_freq_exit(void)
254{
255 if (cpufreq_us3_driver) {
256 cpufreq_unregister_driver(cpufreq_us3_driver);
257 kfree(cpufreq_us3_driver);
258 cpufreq_us3_driver = NULL;
259 kfree(us3_freq_table);
260 us3_freq_table = NULL;
261 }
262}
263
264MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
265MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
266MODULE_LICENSE("GPL");
267
268module_init(us3_freq_init);
269module_exit(us3_freq_exit);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7e4d77327957..156829f4576d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -121,7 +121,6 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
121 target_freq, relation, &index)) 121 target_freq, relation, &index))
122 return -EINVAL; 122 return -EINVAL;
123 123
124 freqs.cpu = policy->cpu;
125 freqs.old = spear_cpufreq_get(0); 124 freqs.old = spear_cpufreq_get(0);
126 125
127 newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; 126 newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
@@ -158,8 +157,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
158 freqs.new = newfreq / 1000; 157 freqs.new = newfreq / 1000;
159 freqs.new /= mult; 158 freqs.new /= mult;
160 159
161 for_each_cpu(freqs.cpu, policy->cpus) 160 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
162 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
163 161
164 if (mult == 2) 162 if (mult == 2)
165 ret = spear1340_set_cpu_rate(srcclk, newfreq); 163 ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -172,8 +170,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
172 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; 170 freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
173 } 171 }
174 172
175 for_each_cpu(freqs.cpu, policy->cpus) 173 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
176 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
177 return ret; 174 return ret;
178} 175}
179 176
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3a953d519f46..618e6f417b1c 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -457,7 +457,7 @@ static int centrino_target (struct cpufreq_policy *policy,
457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; 457 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
458 struct cpufreq_freqs freqs; 458 struct cpufreq_freqs freqs;
459 int retval = 0; 459 int retval = 0;
460 unsigned int j, k, first_cpu, tmp; 460 unsigned int j, first_cpu, tmp;
461 cpumask_var_t covered_cpus; 461 cpumask_var_t covered_cpus;
462 462
463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) 463 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -481,10 +481,6 @@ static int centrino_target (struct cpufreq_policy *policy,
481 for_each_cpu(j, policy->cpus) { 481 for_each_cpu(j, policy->cpus) {
482 int good_cpu; 482 int good_cpu;
483 483
484 /* cpufreq holds the hotplug lock, so we are safe here */
485 if (!cpu_online(j))
486 continue;
487
488 /* 484 /*
489 * Support for SMP systems. 485 * Support for SMP systems.
490 * Make sure we are running on CPU that wants to change freq 486 * Make sure we are running on CPU that wants to change freq
@@ -522,13 +518,8 @@ static int centrino_target (struct cpufreq_policy *policy,
522 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", 518 pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
523 target_freq, freqs.old, freqs.new, msr); 519 target_freq, freqs.old, freqs.new, msr);
524 520
525 for_each_cpu(k, policy->cpus) { 521 cpufreq_notify_transition(policy, &freqs,
526 if (!cpu_online(k))
527 continue;
528 freqs.cpu = k;
529 cpufreq_notify_transition(&freqs,
530 CPUFREQ_PRECHANGE); 522 CPUFREQ_PRECHANGE);
531 }
532 523
533 first_cpu = 0; 524 first_cpu = 0;
534 /* all but 16 LSB are reserved, treat them with care */ 525 /* all but 16 LSB are reserved, treat them with care */
@@ -544,12 +535,7 @@ static int centrino_target (struct cpufreq_policy *policy,
544 cpumask_set_cpu(j, covered_cpus); 535 cpumask_set_cpu(j, covered_cpus);
545 } 536 }
546 537
547 for_each_cpu(k, policy->cpus) { 538 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
548 if (!cpu_online(k))
549 continue;
550 freqs.cpu = k;
551 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
552 }
553 539
554 if (unlikely(retval)) { 540 if (unlikely(retval)) {
555 /* 541 /*
@@ -565,12 +551,8 @@ static int centrino_target (struct cpufreq_policy *policy,
565 tmp = freqs.new; 551 tmp = freqs.new;
566 freqs.new = freqs.old; 552 freqs.new = freqs.old;
567 freqs.old = tmp; 553 freqs.old = tmp;
568 for_each_cpu(j, policy->cpus) { 554 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
569 if (!cpu_online(j)) 555 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
570 continue;
571 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
572 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
573 }
574 } 556 }
575 retval = 0; 557 retval = 0;
576 558
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index e29b59aa68a8..e2e5aa971452 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -263,7 +263,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
263{ 263{
264 unsigned int newstate = 0, policy_cpu; 264 unsigned int newstate = 0, policy_cpu;
265 struct cpufreq_freqs freqs; 265 struct cpufreq_freqs freqs;
266 int i;
267 266
268 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
269 target_freq, relation, &newstate)) 268 target_freq, relation, &newstate))
@@ -272,7 +271,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
272 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); 271 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
273 freqs.old = speedstep_get(policy_cpu); 272 freqs.old = speedstep_get(policy_cpu);
274 freqs.new = speedstep_freqs[newstate].frequency; 273 freqs.new = speedstep_freqs[newstate].frequency;
275 freqs.cpu = policy->cpu;
276 274
277 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); 275 pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
278 276
@@ -280,18 +278,12 @@ static int speedstep_target(struct cpufreq_policy *policy,
280 if (freqs.old == freqs.new) 278 if (freqs.old == freqs.new)
281 return 0; 279 return 0;
282 280
283 for_each_cpu(i, policy->cpus) { 281 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
284 freqs.cpu = i;
285 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
286 }
287 282
288 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, 283 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
289 true); 284 true);
290 285
291 for_each_cpu(i, policy->cpus) { 286 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
292 freqs.cpu = i;
293 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
294 }
295 287
296 return 0; 288 return 0;
297} 289}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 6a457fcaaad5..f5a6b70ee6c0 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -252,14 +252,13 @@ static int speedstep_target(struct cpufreq_policy *policy,
252 252
253 freqs.old = speedstep_freqs[speedstep_get_state()].frequency; 253 freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
254 freqs.new = speedstep_freqs[newstate].frequency; 254 freqs.new = speedstep_freqs[newstate].frequency;
255 freqs.cpu = 0; /* speedstep.c is UP only driver */
256 255
257 if (freqs.old == freqs.new) 256 if (freqs.old == freqs.new)
258 return 0; 257 return 0;
259 258
260 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 259 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
261 speedstep_set_state(newstate); 260 speedstep_set_state(newstate);
262 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 261 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
263 262
264 return 0; 263 return 0;
265} 264}
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
new file mode 100644
index 000000000000..c74c0e130ef4
--- /dev/null
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -0,0 +1,292 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@google.com>
6 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/cpufreq.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/suspend.h>
30
31/* Frequency table index must be sequential starting at 0 */
32static struct cpufreq_frequency_table freq_table[] = {
33 { 0, 216000 },
34 { 1, 312000 },
35 { 2, 456000 },
36 { 3, 608000 },
37 { 4, 760000 },
38 { 5, 816000 },
39 { 6, 912000 },
40 { 7, 1000000 },
41 { 8, CPUFREQ_TABLE_END },
42};
43
44#define NUM_CPUS 2
45
46static struct clk *cpu_clk;
47static struct clk *pll_x_clk;
48static struct clk *pll_p_clk;
49static struct clk *emc_clk;
50
51static unsigned long target_cpu_speed[NUM_CPUS];
52static DEFINE_MUTEX(tegra_cpu_lock);
53static bool is_suspended;
54
55static int tegra_verify_speed(struct cpufreq_policy *policy)
56{
57 return cpufreq_frequency_table_verify(policy, freq_table);
58}
59
60static unsigned int tegra_getspeed(unsigned int cpu)
61{
62 unsigned long rate;
63
64 if (cpu >= NUM_CPUS)
65 return 0;
66
67 rate = clk_get_rate(cpu_clk) / 1000;
68 return rate;
69}
70
71static int tegra_cpu_clk_set_rate(unsigned long rate)
72{
73 int ret;
74
75 /*
76 * Take an extra reference to the main pll so it doesn't turn
77 * off when we move the cpu off of it
78 */
79 clk_prepare_enable(pll_x_clk);
80
81 ret = clk_set_parent(cpu_clk, pll_p_clk);
82 if (ret) {
83 pr_err("Failed to switch cpu to clock pll_p\n");
84 goto out;
85 }
86
87 if (rate == clk_get_rate(pll_p_clk))
88 goto out;
89
90 ret = clk_set_rate(pll_x_clk, rate);
91 if (ret) {
92 pr_err("Failed to change pll_x to %lu\n", rate);
93 goto out;
94 }
95
96 ret = clk_set_parent(cpu_clk, pll_x_clk);
97 if (ret) {
98 pr_err("Failed to switch cpu to clock pll_x\n");
99 goto out;
100 }
101
102out:
103 clk_disable_unprepare(pll_x_clk);
104 return ret;
105}
106
107static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
108 unsigned long rate)
109{
110 int ret = 0;
111 struct cpufreq_freqs freqs;
112
113 freqs.old = tegra_getspeed(0);
114 freqs.new = rate;
115
116 if (freqs.old == freqs.new)
117 return ret;
118
119 /*
120 * Vote on memory bus frequency based on cpu frequency
121 * This sets the minimum frequency, display or avp may request higher
122 */
123 if (rate >= 816000)
124 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
125 else if (rate >= 456000)
126 clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
127 else
128 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
129
130 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
131
132#ifdef CONFIG_CPU_FREQ_DEBUG
133 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
134 freqs.old, freqs.new);
135#endif
136
137 ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
138 if (ret) {
139 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
140 freqs.new);
141 return ret;
142 }
143
144 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
145
146 return 0;
147}
148
149static unsigned long tegra_cpu_highest_speed(void)
150{
151 unsigned long rate = 0;
152 int i;
153
154 for_each_online_cpu(i)
155 rate = max(rate, target_cpu_speed[i]);
156 return rate;
157}
158
159static int tegra_target(struct cpufreq_policy *policy,
160 unsigned int target_freq,
161 unsigned int relation)
162{
163 unsigned int idx;
164 unsigned int freq;
165 int ret = 0;
166
167 mutex_lock(&tegra_cpu_lock);
168
169 if (is_suspended) {
170 ret = -EBUSY;
171 goto out;
172 }
173
174 cpufreq_frequency_table_target(policy, freq_table, target_freq,
175 relation, &idx);
176
177 freq = freq_table[idx].frequency;
178
179 target_cpu_speed[policy->cpu] = freq;
180
181 ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
182
183out:
184 mutex_unlock(&tegra_cpu_lock);
185 return ret;
186}
187
188static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
189 void *dummy)
190{
191 mutex_lock(&tegra_cpu_lock);
192 if (event == PM_SUSPEND_PREPARE) {
193 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
194 is_suspended = true;
195 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
196 freq_table[0].frequency);
197 tegra_update_cpu_speed(policy, freq_table[0].frequency);
198 cpufreq_cpu_put(policy);
199 } else if (event == PM_POST_SUSPEND) {
200 is_suspended = false;
201 }
202 mutex_unlock(&tegra_cpu_lock);
203
204 return NOTIFY_OK;
205}
206
207static struct notifier_block tegra_cpu_pm_notifier = {
208 .notifier_call = tegra_pm_notify,
209};
210
211static int tegra_cpu_init(struct cpufreq_policy *policy)
212{
213 if (policy->cpu >= NUM_CPUS)
214 return -EINVAL;
215
216 clk_prepare_enable(emc_clk);
217 clk_prepare_enable(cpu_clk);
218
219 cpufreq_frequency_table_cpuinfo(policy, freq_table);
220 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
221 policy->cur = tegra_getspeed(policy->cpu);
222 target_cpu_speed[policy->cpu] = policy->cur;
223
224 /* FIXME: what's the actual transition time? */
225 policy->cpuinfo.transition_latency = 300 * 1000;
226
227 cpumask_copy(policy->cpus, cpu_possible_mask);
228
229 if (policy->cpu == 0)
230 register_pm_notifier(&tegra_cpu_pm_notifier);
231
232 return 0;
233}
234
235static int tegra_cpu_exit(struct cpufreq_policy *policy)
236{
237 cpufreq_frequency_table_cpuinfo(policy, freq_table);
238 clk_disable_unprepare(emc_clk);
239 return 0;
240}
241
242static struct freq_attr *tegra_cpufreq_attr[] = {
243 &cpufreq_freq_attr_scaling_available_freqs,
244 NULL,
245};
246
247static struct cpufreq_driver tegra_cpufreq_driver = {
248 .verify = tegra_verify_speed,
249 .target = tegra_target,
250 .get = tegra_getspeed,
251 .init = tegra_cpu_init,
252 .exit = tegra_cpu_exit,
253 .name = "tegra",
254 .attr = tegra_cpufreq_attr,
255};
256
257static int __init tegra_cpufreq_init(void)
258{
259 cpu_clk = clk_get_sys(NULL, "cpu");
260 if (IS_ERR(cpu_clk))
261 return PTR_ERR(cpu_clk);
262
263 pll_x_clk = clk_get_sys(NULL, "pll_x");
264 if (IS_ERR(pll_x_clk))
265 return PTR_ERR(pll_x_clk);
266
267 pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
268 if (IS_ERR(pll_p_clk))
269 return PTR_ERR(pll_p_clk);
270
271 emc_clk = clk_get_sys("cpu", "emc");
272 if (IS_ERR(emc_clk)) {
273 clk_put(cpu_clk);
274 return PTR_ERR(emc_clk);
275 }
276
277 return cpufreq_register_driver(&tegra_cpufreq_driver);
278}
279
280static void __exit tegra_cpufreq_exit(void)
281{
282 cpufreq_unregister_driver(&tegra_cpufreq_driver);
283 clk_put(emc_clk);
284 clk_put(cpu_clk);
285}
286
287
288MODULE_AUTHOR("Colin Cross <ccross@android.com>");
289MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
290MODULE_LICENSE("GPL");
291module_init(tegra_cpufreq_init);
292module_exit(tegra_cpufreq_exit);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
new file mode 100644
index 000000000000..12fc904d7dab
--- /dev/null
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -0,0 +1,92 @@
1/*
2 * clock scaling for the UniCore-II
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/clk.h>
18#include <linux/cpufreq.h>
19
20#include <mach/hardware.h>
21
22static struct cpufreq_driver ucv2_driver;
23
24/* make sure that only the "userspace" governor is run
25 * -- anything else wouldn't make sense on this platform, anyway.
26 */
27int ucv2_verify_speed(struct cpufreq_policy *policy)
28{
29 if (policy->cpu)
30 return -EINVAL;
31
32 cpufreq_verify_within_limits(policy,
33 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
34
35 return 0;
36}
37
38static unsigned int ucv2_getspeed(unsigned int cpu)
39{
40 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
41
42 if (cpu)
43 return 0;
44 return clk_get_rate(mclk)/1000;
45}
46
47static int ucv2_target(struct cpufreq_policy *policy,
48 unsigned int target_freq,
49 unsigned int relation)
50{
51 unsigned int cur = ucv2_getspeed(0);
52 struct cpufreq_freqs freqs;
53 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
54
55 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
56
57 if (!clk_set_rate(mclk, target_freq * 1000)) {
58 freqs.old = cur;
59 freqs.new = target_freq;
60 }
61
62 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
63
64 return 0;
65}
66
67static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
68{
69 if (policy->cpu != 0)
70 return -EINVAL;
71 policy->cur = ucv2_getspeed(0);
72 policy->min = policy->cpuinfo.min_freq = 250000;
73 policy->max = policy->cpuinfo.max_freq = 1000000;
74 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
75 return 0;
76}
77
78static struct cpufreq_driver ucv2_driver = {
79 .flags = CPUFREQ_STICKY,
80 .verify = ucv2_verify_speed,
81 .target = ucv2_target,
82 .get = ucv2_getspeed,
83 .init = ucv2_cpu_init,
84 .name = "UniCore-II",
85};
86
87static int __init ucv2_cpufreq_init(void)
88{
89 return cpufreq_register_driver(&ucv2_driver);
90}
91
92arch_initcall(ucv2_cpufreq_init);