aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-06-27 15:46:45 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-06-27 15:46:45 -0400
commit39a95f4861381a87167729be8f71c59ed4efc27d (patch)
tree5dc01d8d229d9f553a7d3502902b4acb2191a19f /drivers/cpufreq
parent7ae9b27b2af4bb79a105bcdea08400fd00f6de87 (diff)
parent7f77a563f0c110a633b4ab0fec9f49d41630039a (diff)
Merge branch 'pm-cpufreq-assorted' into pm-cpufreq
* pm-cpufreq-assorted: (21 commits) cpufreq: powernow-k8: call CPUFREQ_POSTCHANGE notfier in error cases cpufreq: pcc: call CPUFREQ_POSTCHANGE notfier in error cases cpufreq: e_powersaver: call CPUFREQ_POSTCHANGE notfier in error cases cpufreq: ACPI: call CPUFREQ_POSTCHANGE notfier in error cases cpufreq: make __cpufreq_notify_transition() static cpufreq: Fix minor formatting issues cpufreq: Fix governor start/stop race condition cpufreq: Simplify userspace governor cpufreq: powerpc: move cpufreq driver to drivers/cpufreq cpufreq: kirkwood: Select CPU_FREQ_TABLE option cpufreq: big.LITTLE needs cpufreq table cpufreq: SPEAr needs cpufreq table cpufreq: powerpc: Add cpufreq driver for Freescale e500mc SoCs cpufreq: remove unnecessary cpufreq_cpu_{get|put}() calls cpufreq: MAINTAINERS: Add git tree path for ARM specific updates cpufreq: rename index as driver_data in cpufreq_frequency_table cpufreq: Don't create empty /sys/devices/system/cpu/cpufreq directory cpufreq: Move get_cpu_idle_time() to cpufreq.c cpufreq: governors: Move get_governor_parent_kobj() to cpufreq.c cpufreq: Add EXPORT_SYMBOL_GPL for have_governor_per_policy ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Kconfig.powerpc36
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c16
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq.c202
-rw-r--r--drivers/cpufreq/cpufreq_governor.c50
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_performance.c4
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c112
-rw-r--r--drivers/cpufreq/e_powersaver.c11
-rw-r--r--drivers/cpufreq/freq_table.c26
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c16
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/p4-clockmod.c4
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c331
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c721
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c746
-rw-r--r--drivers/cpufreq/powernow-k6.c8
-rw-r--r--drivers/cpufreq/powernow-k7.c16
-rw-r--r--drivers/cpufreq/powernow-k8.c24
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c380
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c4
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c12
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c8
-rw-r--r--drivers/cpufreq/spear-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-centrino.c8
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c19
38 files changed, 2499 insertions, 317 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5085427eb29d..fee9f489269a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,6 +5,7 @@
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate "Generic ARM big LITTLE CPUfreq driver" 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK 7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
8 select CPU_FREQ_TABLE
8 help 9 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 10 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
10 11
@@ -88,6 +89,7 @@ config ARM_INTEGRATOR
88 89
89config ARM_KIRKWOOD_CPUFREQ 90config ARM_KIRKWOOD_CPUFREQ
90 def_bool ARCH_KIRKWOOD && OF 91 def_bool ARCH_KIRKWOOD && OF
92 select CPU_FREQ_TABLE
91 help 93 help
92 This adds the CPUFreq driver for Marvell Kirkwood 94 This adds the CPUFreq driver for Marvell Kirkwood
93 SoCs. 95 SoCs.
@@ -151,6 +153,7 @@ config ARM_SA1110_CPUFREQ
151config ARM_SPEAR_CPUFREQ 153config ARM_SPEAR_CPUFREQ
152 bool "SPEAr CPUFreq support" 154 bool "SPEAr CPUFreq support"
153 depends on PLAT_SPEAR 155 depends on PLAT_SPEAR
156 select CPU_FREQ_TABLE
154 default y 157 default y
155 help 158 help
156 This adds the CPUFreq driver support for SPEAr SOCs. 159 This adds the CPUFreq driver support for SPEAr SOCs.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 68c1abc401f6..25ca9db62e09 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -24,3 +24,39 @@ config CPU_FREQ_MAPLE
24 help 24 help
25 This adds support for frequency switching on Maple 970FX 25 This adds support for frequency switching on Maple 970FX
26 Evaluation Board and compatible boards (IBM JS2x blades). 26 Evaluation Board and compatible boards (IBM JS2x blades).
27
28config PPC_CORENET_CPUFREQ
29 tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
30 depends on PPC_E500MC && OF && COMMON_CLK
31 select CPU_FREQ_TABLE
32 select CLK_PPC_CORENET
33 help
34 This adds the CPUFreq driver support for Freescale e500mc,
35 e5500 and e6500 series SoCs which are capable of changing
36 the CPU's frequency dynamically.
37
38config CPU_FREQ_PMAC
39 bool "Support for Apple PowerBooks"
40 depends on ADB_PMU && PPC32
41 select CPU_FREQ_TABLE
42 help
43 This adds support for frequency switching on Apple PowerBooks,
44 this currently includes some models of iBook & Titanium
45 PowerBook.
46
47config CPU_FREQ_PMAC64
48 bool "Support for some Apple G5s"
49 depends on PPC_PMAC && PPC64
50 select CPU_FREQ_TABLE
51 help
52 This adds support for frequency switching on Apple iMac G5,
53 and some of the more recent desktop G5 machines as well.
54
55config PPC_PASEMI_CPUFREQ
56 bool "Support for PA Semi PWRficient"
57 depends on PPC_PASEMI
58 select CPU_FREQ_TABLE
59 default y
60 help
61 This adds the support for frequency switching on PA Semi
62 PWRficient processors.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9c873e778ee0..0c8d0c69306d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -79,6 +79,10 @@ obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
79ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o 79ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
80obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o 80obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
81obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o 81obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
82obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o
83obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
84obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
85obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
82 86
83################################################################################## 87##################################################################################
84# Other platform drivers 88# Other platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index edc089e9d0c4..2c5906d71397 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -232,7 +232,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
232 perf = data->acpi_data; 232 perf = data->acpi_data;
233 233
234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
235 if (msr == perf->states[data->freq_table[i].index].status) 235 if (msr == perf->states[data->freq_table[i].driver_data].status)
236 return data->freq_table[i].frequency; 236 return data->freq_table[i].frequency;
237 } 237 }
238 return data->freq_table[0].frequency; 238 return data->freq_table[0].frequency;
@@ -442,7 +442,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
442 goto out; 442 goto out;
443 } 443 }
444 444
445 next_perf_state = data->freq_table[next_state].index; 445 next_perf_state = data->freq_table[next_state].driver_data;
446 if (perf->state == next_perf_state) { 446 if (perf->state == next_perf_state) {
447 if (unlikely(data->resume)) { 447 if (unlikely(data->resume)) {
448 pr_debug("Called after resume, resetting to P%d\n", 448 pr_debug("Called after resume, resetting to P%d\n",
@@ -494,12 +494,14 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
494 pr_debug("acpi_cpufreq_target failed (%d)\n", 494 pr_debug("acpi_cpufreq_target failed (%d)\n",
495 policy->cpu); 495 policy->cpu);
496 result = -EAGAIN; 496 result = -EAGAIN;
497 goto out; 497 freqs.new = freqs.old;
498 } 498 }
499 } 499 }
500 500
501 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 501 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
502 perf->state = next_perf_state; 502
503 if (!result)
504 perf->state = next_perf_state;
503 505
504out: 506out:
505 return result; 507 return result;
@@ -811,7 +813,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
811 data->freq_table[valid_states-1].frequency / 1000) 813 data->freq_table[valid_states-1].frequency / 1000)
812 continue; 814 continue;
813 815
814 data->freq_table[valid_states].index = i; 816 data->freq_table[valid_states].driver_data = i;
815 data->freq_table[valid_states].frequency = 817 data->freq_table[valid_states].frequency =
816 perf->states[i].core_frequency * 1000; 818 perf->states[i].core_frequency * 1000;
817 valid_states++; 819 valid_states++;
@@ -947,7 +949,7 @@ static void __init acpi_cpufreq_boost_init(void)
947 /* We create the boost file in any case, though for systems without 949 /* We create the boost file in any case, though for systems without
948 * hardware support it will be read-only and hardwired to return 0. 950 * hardware support it will be read-only and hardwired to return 0.
949 */ 951 */
950 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) 952 if (cpufreq_sysfs_create_file(&(global_boost.attr)))
951 pr_warn(PFX "could not register global boost sysfs file\n"); 953 pr_warn(PFX "could not register global boost sysfs file\n");
952 else 954 else
953 pr_debug("registered global boost sysfs file\n"); 955 pr_debug("registered global boost sysfs file\n");
@@ -955,7 +957,7 @@ static void __init acpi_cpufreq_boost_init(void)
955 957
956static void __exit acpi_cpufreq_boost_exit(void) 958static void __exit acpi_cpufreq_boost_exit(void)
957{ 959{
958 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); 960 cpufreq_sysfs_remove_file(&(global_boost.attr));
959 961
960 if (msrs) { 962 if (msrs) {
961 unregister_cpu_notifier(&boost_nb); 963 unregister_cpu_notifier(&boost_nb);
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index 995511e80bef..9cdbbd278a80 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -20,23 +20,23 @@
20 20
21 21
22/* this is the table of CCLK frequencies, in Hz */ 22/* this is the table of CCLK frequencies, in Hz */
23/* .index is the entry in the auxiliary dpm_state_table[] */ 23/* .driver_data is the entry in the auxiliary dpm_state_table[] */
24static struct cpufreq_frequency_table bfin_freq_table[] = { 24static struct cpufreq_frequency_table bfin_freq_table[] = {
25 { 25 {
26 .frequency = CPUFREQ_TABLE_END, 26 .frequency = CPUFREQ_TABLE_END,
27 .index = 0, 27 .driver_data = 0,
28 }, 28 },
29 { 29 {
30 .frequency = CPUFREQ_TABLE_END, 30 .frequency = CPUFREQ_TABLE_END,
31 .index = 1, 31 .driver_data = 1,
32 }, 32 },
33 { 33 {
34 .frequency = CPUFREQ_TABLE_END, 34 .frequency = CPUFREQ_TABLE_END,
35 .index = 2, 35 .driver_data = 2,
36 }, 36 },
37 { 37 {
38 .frequency = CPUFREQ_TABLE_END, 38 .frequency = CPUFREQ_TABLE_END,
39 .index = 0, 39 .driver_data = 0,
40 }, 40 },
41}; 41};
42 42
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d53f47d1747..d976e222f10f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
6 * 7 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug 9 * Added handling for CPU hotplug
@@ -12,12 +13,13 @@
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation. 15 * published by the Free Software Foundation.
15 *
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <asm/cputime.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/kernel_stat.h>
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/notifier.h> 25#include <linux/notifier.h>
@@ -25,6 +27,7 @@
25#include <linux/delay.h> 27#include <linux/delay.h>
26#include <linux/interrupt.h> 28#include <linux/interrupt.h>
27#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/tick.h>
28#include <linux/device.h> 31#include <linux/device.h>
29#include <linux/slab.h> 32#include <linux/slab.h>
30#include <linux/cpu.h> 33#include <linux/cpu.h>
@@ -41,11 +44,13 @@
41 */ 44 */
42static struct cpufreq_driver *cpufreq_driver; 45static struct cpufreq_driver *cpufreq_driver;
43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47static DEFINE_RWLOCK(cpufreq_driver_lock);
48static DEFINE_MUTEX(cpufreq_governor_lock);
49
44#ifdef CONFIG_HOTPLUG_CPU 50#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */ 51/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 52static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47#endif 53#endif
48static DEFINE_RWLOCK(cpufreq_driver_lock);
49 54
50/* 55/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 56 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -132,6 +137,51 @@ bool have_governor_per_policy(void)
132{ 137{
133 return cpufreq_driver->have_governor_per_policy; 138 return cpufreq_driver->have_governor_per_policy;
134} 139}
140EXPORT_SYMBOL_GPL(have_governor_per_policy);
141
142struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
143{
144 if (have_governor_per_policy())
145 return &policy->kobj;
146 else
147 return cpufreq_global_kobject;
148}
149EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
150
151static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
152{
153 u64 idle_time;
154 u64 cur_wall_time;
155 u64 busy_time;
156
157 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
158
159 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
165
166 idle_time = cur_wall_time - busy_time;
167 if (wall)
168 *wall = cputime_to_usecs(cur_wall_time);
169
170 return cputime_to_usecs(idle_time);
171}
172
173u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
174{
175 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
176
177 if (idle_time == -1ULL)
178 return get_cpu_idle_time_jiffy(cpu, wall);
179 else if (!io_busy)
180 idle_time += get_cpu_iowait_time_us(cpu, wall);
181
182 return idle_time;
183}
184EXPORT_SYMBOL_GPL(get_cpu_idle_time);
135 185
136static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) 186static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
137{ 187{
@@ -150,7 +200,6 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
150 if (!try_module_get(cpufreq_driver->owner)) 200 if (!try_module_get(cpufreq_driver->owner))
151 goto err_out_unlock; 201 goto err_out_unlock;
152 202
153
154 /* get the CPU */ 203 /* get the CPU */
155 data = per_cpu(cpufreq_cpu_data, cpu); 204 data = per_cpu(cpufreq_cpu_data, cpu);
156 205
@@ -220,7 +269,7 @@ static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
220 */ 269 */
221#ifndef CONFIG_SMP 270#ifndef CONFIG_SMP
222static unsigned long l_p_j_ref; 271static unsigned long l_p_j_ref;
223static unsigned int l_p_j_ref_freq; 272static unsigned int l_p_j_ref_freq;
224 273
225static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 274static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
226{ 275{
@@ -233,7 +282,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
233 pr_debug("saving %lu as reference value for loops_per_jiffy; " 282 pr_debug("saving %lu as reference value for loops_per_jiffy; "
234 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 283 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
235 } 284 }
236 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || 285 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
237 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 286 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
238 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
239 ci->new); 288 ci->new);
@@ -248,8 +297,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
248} 297}
249#endif 298#endif
250 299
251 300static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
252void __cpufreq_notify_transition(struct cpufreq_policy *policy,
253 struct cpufreq_freqs *freqs, unsigned int state) 301 struct cpufreq_freqs *freqs, unsigned int state)
254{ 302{
255 BUG_ON(irqs_disabled()); 303 BUG_ON(irqs_disabled());
@@ -294,6 +342,7 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy,
294 break; 342 break;
295 } 343 }
296} 344}
345
297/** 346/**
298 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 347 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299 * on frequency transition. 348 * on frequency transition.
@@ -311,7 +360,6 @@ void cpufreq_notify_transition(struct cpufreq_policy *policy,
311EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 360EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
312 361
313 362
314
315/********************************************************************* 363/*********************************************************************
316 * SYSFS INTERFACE * 364 * SYSFS INTERFACE *
317 *********************************************************************/ 365 *********************************************************************/
@@ -376,7 +424,6 @@ out:
376 return err; 424 return err;
377} 425}
378 426
379
380/** 427/**
381 * cpufreq_per_cpu_attr_read() / show_##file_name() - 428 * cpufreq_per_cpu_attr_read() / show_##file_name() -
382 * print out cpufreq information 429 * print out cpufreq information
@@ -441,7 +488,6 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
441 return sprintf(buf, "%u\n", cur_freq); 488 return sprintf(buf, "%u\n", cur_freq);
442} 489}
443 490
444
445/** 491/**
446 * show_scaling_governor - show the current policy for the specified CPU 492 * show_scaling_governor - show the current policy for the specified CPU
447 */ 493 */
@@ -457,7 +503,6 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
457 return -EINVAL; 503 return -EINVAL;
458} 504}
459 505
460
461/** 506/**
462 * store_scaling_governor - store policy for the specified CPU 507 * store_scaling_governor - store policy for the specified CPU
463 */ 508 */
@@ -480,8 +525,10 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
480 &new_policy.governor)) 525 &new_policy.governor))
481 return -EINVAL; 526 return -EINVAL;
482 527
483 /* Do not use cpufreq_set_policy here or the user_policy.max 528 /*
484 will be wrongly overridden */ 529 * Do not use cpufreq_set_policy here or the user_policy.max
530 * will be wrongly overridden
531 */
485 ret = __cpufreq_set_policy(policy, &new_policy); 532 ret = __cpufreq_set_policy(policy, &new_policy);
486 533
487 policy->user_policy.policy = policy->policy; 534 policy->user_policy.policy = policy->policy;
@@ -630,9 +677,6 @@ static struct attribute *default_attrs[] = {
630 NULL 677 NULL
631}; 678};
632 679
633struct kobject *cpufreq_global_kobject;
634EXPORT_SYMBOL(cpufreq_global_kobject);
635
636#define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 680#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637#define to_attr(a) container_of(a, struct freq_attr, attr) 681#define to_attr(a) container_of(a, struct freq_attr, attr)
638 682
@@ -703,6 +747,49 @@ static struct kobj_type ktype_cpufreq = {
703 .release = cpufreq_sysfs_release, 747 .release = cpufreq_sysfs_release,
704}; 748};
705 749
750struct kobject *cpufreq_global_kobject;
751EXPORT_SYMBOL(cpufreq_global_kobject);
752
753static int cpufreq_global_kobject_usage;
754
755int cpufreq_get_global_kobject(void)
756{
757 if (!cpufreq_global_kobject_usage++)
758 return kobject_add(cpufreq_global_kobject,
759 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
760
761 return 0;
762}
763EXPORT_SYMBOL(cpufreq_get_global_kobject);
764
765void cpufreq_put_global_kobject(void)
766{
767 if (!--cpufreq_global_kobject_usage)
768 kobject_del(cpufreq_global_kobject);
769}
770EXPORT_SYMBOL(cpufreq_put_global_kobject);
771
772int cpufreq_sysfs_create_file(const struct attribute *attr)
773{
774 int ret = cpufreq_get_global_kobject();
775
776 if (!ret) {
777 ret = sysfs_create_file(cpufreq_global_kobject, attr);
778 if (ret)
779 cpufreq_put_global_kobject();
780 }
781
782 return ret;
783}
784EXPORT_SYMBOL(cpufreq_sysfs_create_file);
785
786void cpufreq_sysfs_remove_file(const struct attribute *attr)
787{
788 sysfs_remove_file(cpufreq_global_kobject, attr);
789 cpufreq_put_global_kobject();
790}
791EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
792
706/* symlink affected CPUs */ 793/* symlink affected CPUs */
707static int cpufreq_add_dev_symlink(unsigned int cpu, 794static int cpufreq_add_dev_symlink(unsigned int cpu,
708 struct cpufreq_policy *policy) 795 struct cpufreq_policy *policy)
@@ -1005,7 +1092,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1005 * Caller should already have policy_rwsem in write mode for this CPU. 1092 * Caller should already have policy_rwsem in write mode for this CPU.
1006 * This routine frees the rwsem before returning. 1093 * This routine frees the rwsem before returning.
1007 */ 1094 */
1008static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1095static int __cpufreq_remove_dev(struct device *dev,
1096 struct subsys_interface *sif)
1009{ 1097{
1010 unsigned int cpu = dev->id, ret, cpus; 1098 unsigned int cpu = dev->id, ret, cpus;
1011 unsigned long flags; 1099 unsigned long flags;
@@ -1112,7 +1200,6 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1112 return 0; 1200 return 0;
1113} 1201}
1114 1202
1115
1116static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1203static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1117{ 1204{
1118 unsigned int cpu = dev->id; 1205 unsigned int cpu = dev->id;
@@ -1125,7 +1212,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1125 return retval; 1212 return retval;
1126} 1213}
1127 1214
1128
1129static void handle_update(struct work_struct *work) 1215static void handle_update(struct work_struct *work)
1130{ 1216{
1131 struct cpufreq_policy *policy = 1217 struct cpufreq_policy *policy =
@@ -1136,7 +1222,8 @@ static void handle_update(struct work_struct *work)
1136} 1222}
1137 1223
1138/** 1224/**
1139 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. 1225 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1226 * in deep trouble.
1140 * @cpu: cpu number 1227 * @cpu: cpu number
1141 * @old_freq: CPU frequency the kernel thinks the CPU runs at 1228 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1142 * @new_freq: CPU frequency the CPU actually runs at 1229 * @new_freq: CPU frequency the CPU actually runs at
@@ -1151,7 +1238,6 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1151 struct cpufreq_freqs freqs; 1238 struct cpufreq_freqs freqs;
1152 unsigned long flags; 1239 unsigned long flags;
1153 1240
1154
1155 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1241 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1156 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1242 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1157 1243
@@ -1166,7 +1252,6 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 1252 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1167} 1253}
1168 1254
1169
1170/** 1255/**
1171 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 1256 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1172 * @cpu: CPU number 1257 * @cpu: CPU number
@@ -1212,7 +1297,6 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
1212} 1297}
1213EXPORT_SYMBOL(cpufreq_quick_get_max); 1298EXPORT_SYMBOL(cpufreq_quick_get_max);
1214 1299
1215
1216static unsigned int __cpufreq_get(unsigned int cpu) 1300static unsigned int __cpufreq_get(unsigned int cpu)
1217{ 1301{
1218 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1302 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1271,7 +1355,6 @@ static struct subsys_interface cpufreq_interface = {
1271 .remove_dev = cpufreq_remove_dev, 1355 .remove_dev = cpufreq_remove_dev,
1272}; 1356};
1273 1357
1274
1275/** 1358/**
1276 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. 1359 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1277 * 1360 *
@@ -1408,11 +1491,10 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1408} 1491}
1409EXPORT_SYMBOL(cpufreq_register_notifier); 1492EXPORT_SYMBOL(cpufreq_register_notifier);
1410 1493
1411
1412/** 1494/**
1413 * cpufreq_unregister_notifier - unregister a driver with cpufreq 1495 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1414 * @nb: notifier block to be unregistered 1496 * @nb: notifier block to be unregistered
1415 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1497 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1416 * 1498 *
1417 * Remove a driver from the CPU frequency notifier list. 1499 * Remove a driver from the CPU frequency notifier list.
1418 * 1500 *
@@ -1448,7 +1530,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1448 * GOVERNORS * 1530 * GOVERNORS *
1449 *********************************************************************/ 1531 *********************************************************************/
1450 1532
1451
1452int __cpufreq_driver_target(struct cpufreq_policy *policy, 1533int __cpufreq_driver_target(struct cpufreq_policy *policy,
1453 unsigned int target_freq, 1534 unsigned int target_freq,
1454 unsigned int relation) 1535 unsigned int relation)
@@ -1484,10 +1565,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1484{ 1565{
1485 int ret = -EINVAL; 1566 int ret = -EINVAL;
1486 1567
1487 policy = cpufreq_cpu_get(policy->cpu);
1488 if (!policy)
1489 goto no_policy;
1490
1491 if (unlikely(lock_policy_rwsem_write(policy->cpu))) 1568 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1492 goto fail; 1569 goto fail;
1493 1570
@@ -1496,30 +1573,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1496 unlock_policy_rwsem_write(policy->cpu); 1573 unlock_policy_rwsem_write(policy->cpu);
1497 1574
1498fail: 1575fail:
1499 cpufreq_cpu_put(policy);
1500no_policy:
1501 return ret; 1576 return ret;
1502} 1577}
1503EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1578EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1504 1579
1505int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) 1580int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1506{ 1581{
1507 int ret = 0;
1508
1509 if (cpufreq_disabled()) 1582 if (cpufreq_disabled())
1510 return ret; 1583 return 0;
1511 1584
1512 if (!cpufreq_driver->getavg) 1585 if (!cpufreq_driver->getavg)
1513 return 0; 1586 return 0;
1514 1587
1515 policy = cpufreq_cpu_get(policy->cpu); 1588 return cpufreq_driver->getavg(policy, cpu);
1516 if (!policy)
1517 return -EINVAL;
1518
1519 ret = cpufreq_driver->getavg(policy, cpu);
1520
1521 cpufreq_cpu_put(policy);
1522 return ret;
1523} 1589}
1524EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); 1590EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1525 1591
@@ -1562,6 +1628,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1562 1628
1563 pr_debug("__cpufreq_governor for CPU %u, event %u\n", 1629 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1564 policy->cpu, event); 1630 policy->cpu, event);
1631
1632 mutex_lock(&cpufreq_governor_lock);
1633 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1634 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1635 mutex_unlock(&cpufreq_governor_lock);
1636 return -EBUSY;
1637 }
1638
1639 if (event == CPUFREQ_GOV_STOP)
1640 policy->governor_enabled = false;
1641 else if (event == CPUFREQ_GOV_START)
1642 policy->governor_enabled = true;
1643
1644 mutex_unlock(&cpufreq_governor_lock);
1645
1565 ret = policy->governor->governor(policy, event); 1646 ret = policy->governor->governor(policy, event);
1566 1647
1567 if (!ret) { 1648 if (!ret) {
@@ -1569,6 +1650,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1569 policy->governor->initialized++; 1650 policy->governor->initialized++;
1570 else if (event == CPUFREQ_GOV_POLICY_EXIT) 1651 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1571 policy->governor->initialized--; 1652 policy->governor->initialized--;
1653 } else {
1654 /* Restore original values */
1655 mutex_lock(&cpufreq_governor_lock);
1656 if (event == CPUFREQ_GOV_STOP)
1657 policy->governor_enabled = true;
1658 else if (event == CPUFREQ_GOV_START)
1659 policy->governor_enabled = false;
1660 mutex_unlock(&cpufreq_governor_lock);
1572 } 1661 }
1573 1662
1574 /* we keep one module reference alive for 1663 /* we keep one module reference alive for
@@ -1581,7 +1670,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
1581 return ret; 1670 return ret;
1582} 1671}
1583 1672
1584
1585int cpufreq_register_governor(struct cpufreq_governor *governor) 1673int cpufreq_register_governor(struct cpufreq_governor *governor)
1586{ 1674{
1587 int err; 1675 int err;
@@ -1606,7 +1694,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
1606} 1694}
1607EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1695EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1608 1696
1609
1610void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1697void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1611{ 1698{
1612#ifdef CONFIG_HOTPLUG_CPU 1699#ifdef CONFIG_HOTPLUG_CPU
@@ -1636,7 +1723,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1636EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1723EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1637 1724
1638 1725
1639
1640/********************************************************************* 1726/*********************************************************************
1641 * POLICY INTERFACE * 1727 * POLICY INTERFACE *
1642 *********************************************************************/ 1728 *********************************************************************/
@@ -1665,7 +1751,6 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1665} 1751}
1666EXPORT_SYMBOL(cpufreq_get_policy); 1752EXPORT_SYMBOL(cpufreq_get_policy);
1667 1753
1668
1669/* 1754/*
1670 * data : current policy. 1755 * data : current policy.
1671 * policy : policy to be set. 1756 * policy : policy to be set.
@@ -1699,8 +1784,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1699 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1784 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1700 CPUFREQ_INCOMPATIBLE, policy); 1785 CPUFREQ_INCOMPATIBLE, policy);
1701 1786
1702 /* verify the cpu speed can be set within this limit, 1787 /*
1703 which might be different to the first one */ 1788 * verify the cpu speed can be set within this limit, which might be
1789 * different to the first one
1790 */
1704 ret = cpufreq_driver->verify(policy); 1791 ret = cpufreq_driver->verify(policy);
1705 if (ret) 1792 if (ret)
1706 goto error_out; 1793 goto error_out;
@@ -1802,8 +1889,10 @@ int cpufreq_update_policy(unsigned int cpu)
1802 policy.policy = data->user_policy.policy; 1889 policy.policy = data->user_policy.policy;
1803 policy.governor = data->user_policy.governor; 1890 policy.governor = data->user_policy.governor;
1804 1891
1805 /* BIOS might change freq behind our back 1892 /*
1806 -> ask driver for current freq and notify governors about a change */ 1893 * BIOS might change freq behind our back
1894 * -> ask driver for current freq and notify governors about a change
1895 */
1807 if (cpufreq_driver->get) { 1896 if (cpufreq_driver->get) {
1808 policy.cur = cpufreq_driver->get(cpu); 1897 policy.cur = cpufreq_driver->get(cpu);
1809 if (!data->cur) { 1898 if (!data->cur) {
@@ -1852,7 +1941,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1852} 1941}
1853 1942
1854static struct notifier_block __refdata cpufreq_cpu_notifier = { 1943static struct notifier_block __refdata cpufreq_cpu_notifier = {
1855 .notifier_call = cpufreq_cpu_callback, 1944 .notifier_call = cpufreq_cpu_callback,
1856}; 1945};
1857 1946
1858/********************************************************************* 1947/*********************************************************************
@@ -1864,7 +1953,7 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
1864 * @driver_data: A struct cpufreq_driver containing the values# 1953 * @driver_data: A struct cpufreq_driver containing the values#
1865 * submitted by the CPU Frequency driver. 1954 * submitted by the CPU Frequency driver.
1866 * 1955 *
1867 * Registers a CPU Frequency driver to this core code. This code 1956 * Registers a CPU Frequency driver to this core code. This code
1868 * returns zero on success, -EBUSY when another driver got here first 1957 * returns zero on success, -EBUSY when another driver got here first
1869 * (and isn't unregistered in the meantime). 1958 * (and isn't unregistered in the meantime).
1870 * 1959 *
@@ -1931,11 +2020,10 @@ err_null_driver:
1931} 2020}
1932EXPORT_SYMBOL_GPL(cpufreq_register_driver); 2021EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1933 2022
1934
1935/** 2023/**
1936 * cpufreq_unregister_driver - unregister the current CPUFreq driver 2024 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1937 * 2025 *
1938 * Unregister the current CPUFreq driver. Only call this if you have 2026 * Unregister the current CPUFreq driver. Only call this if you have
1939 * the right to do so, i.e. if you have succeeded in initialising before! 2027 * the right to do so, i.e. if you have succeeded in initialising before!
1940 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 2028 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1941 * currently not initialised. 2029 * currently not initialised.
@@ -1972,7 +2060,7 @@ static int __init cpufreq_core_init(void)
1972 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); 2060 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1973 } 2061 }
1974 2062
1975 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); 2063 cpufreq_global_kobject = kobject_create();
1976 BUG_ON(!cpufreq_global_kobject); 2064 BUG_ON(!cpufreq_global_kobject);
1977 register_syscore_ops(&cpufreq_syscore_ops); 2065 register_syscore_ops(&cpufreq_syscore_ops);
1978 2066
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index dc9b72e25c1a..a849b2d499fa 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -23,21 +23,12 @@
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/tick.h>
27#include <linux/types.h> 26#include <linux/types.h>
28#include <linux/workqueue.h> 27#include <linux/workqueue.h>
29#include <linux/cpu.h> 28#include <linux/cpu.h>
30 29
31#include "cpufreq_governor.h" 30#include "cpufreq_governor.h"
32 31
33static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
34{
35 if (have_governor_per_policy())
36 return &policy->kobj;
37 else
38 return cpufreq_global_kobject;
39}
40
41static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) 32static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
42{ 33{
43 if (have_governor_per_policy()) 34 if (have_governor_per_policy())
@@ -46,41 +37,6 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
46 return dbs_data->cdata->attr_group_gov_sys; 37 return dbs_data->cdata->attr_group_gov_sys;
47} 38}
48 39
49static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
50{
51 u64 idle_time;
52 u64 cur_wall_time;
53 u64 busy_time;
54
55 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
56
57 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
58 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
59 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
60 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
61 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
62 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
63
64 idle_time = cur_wall_time - busy_time;
65 if (wall)
66 *wall = cputime_to_usecs(cur_wall_time);
67
68 return cputime_to_usecs(idle_time);
69}
70
71u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
72{
73 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
74
75 if (idle_time == -1ULL)
76 return get_cpu_idle_time_jiffy(cpu, wall);
77 else if (!io_busy)
78 idle_time += get_cpu_iowait_time_us(cpu, wall);
79
80 return idle_time;
81}
82EXPORT_SYMBOL_GPL(get_cpu_idle_time);
83
84void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) 40void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
85{ 41{
86 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); 42 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
@@ -278,6 +234,9 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
278 return rc; 234 return rc;
279 } 235 }
280 236
237 if (!have_governor_per_policy())
238 WARN_ON(cpufreq_get_global_kobject());
239
281 rc = sysfs_create_group(get_governor_parent_kobj(policy), 240 rc = sysfs_create_group(get_governor_parent_kobj(policy),
282 get_sysfs_attr(dbs_data)); 241 get_sysfs_attr(dbs_data));
283 if (rc) { 242 if (rc) {
@@ -316,6 +275,9 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
316 sysfs_remove_group(get_governor_parent_kobj(policy), 275 sysfs_remove_group(get_governor_parent_kobj(policy),
317 get_sysfs_attr(dbs_data)); 276 get_sysfs_attr(dbs_data));
318 277
278 if (!have_governor_per_policy())
279 cpufreq_put_global_kobject();
280
319 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && 281 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
320 (policy->governor->initialized == 1)) { 282 (policy->governor->initialized == 1)) {
321 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 283 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index e16a96130cb3..6663ec3b3056 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -81,7 +81,7 @@ static ssize_t show_##file_name##_gov_sys \
81 return sprintf(buf, "%u\n", tuners->file_name); \ 81 return sprintf(buf, "%u\n", tuners->file_name); \
82} \ 82} \
83 \ 83 \
84static ssize_t show_##file_name##_gov_pol \ 84static ssize_t show_##file_name##_gov_pol \
85(struct cpufreq_policy *policy, char *buf) \ 85(struct cpufreq_policy *policy, char *buf) \
86{ \ 86{ \
87 struct dbs_data *dbs_data = policy->governor_data; \ 87 struct dbs_data *dbs_data = policy->governor_data; \
@@ -91,7 +91,7 @@ static ssize_t show_##file_name##_gov_pol \
91 91
92#define store_one(_gov, file_name) \ 92#define store_one(_gov, file_name) \
93static ssize_t store_##file_name##_gov_sys \ 93static ssize_t store_##file_name##_gov_sys \
94(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ 94(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
95{ \ 95{ \
96 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ 96 struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
97 return store_##file_name(dbs_data, buf, count); \ 97 return store_##file_name(dbs_data, buf, count); \
@@ -256,7 +256,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
256 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ 256 return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
257} 257}
258 258
259u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
260void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); 259void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
261bool need_load_eval(struct cpu_dbs_common_info *cdbs, 260bool need_load_eval(struct cpu_dbs_common_info *cdbs,
262 unsigned int sampling_rate); 261 unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index ceee06849b91..9fef7d6e4e6a 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -17,7 +17,6 @@
17#include <linux/cpufreq.h> 17#include <linux/cpufreq.h>
18#include <linux/init.h> 18#include <linux/init.h>
19 19
20
21static int cpufreq_governor_performance(struct cpufreq_policy *policy, 20static int cpufreq_governor_performance(struct cpufreq_policy *policy,
22 unsigned int event) 21 unsigned int event)
23{ 22{
@@ -44,19 +43,16 @@ struct cpufreq_governor cpufreq_gov_performance = {
44 .owner = THIS_MODULE, 43 .owner = THIS_MODULE,
45}; 44};
46 45
47
48static int __init cpufreq_gov_performance_init(void) 46static int __init cpufreq_gov_performance_init(void)
49{ 47{
50 return cpufreq_register_governor(&cpufreq_gov_performance); 48 return cpufreq_register_governor(&cpufreq_gov_performance);
51} 49}
52 50
53
54static void __exit cpufreq_gov_performance_exit(void) 51static void __exit cpufreq_gov_performance_exit(void)
55{ 52{
56 cpufreq_unregister_governor(&cpufreq_gov_performance); 53 cpufreq_unregister_governor(&cpufreq_gov_performance);
57} 54}
58 55
59
60MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); 56MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
61MODULE_DESCRIPTION("CPUfreq policy governor 'performance'"); 57MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
62MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 2d948a171155..32109a14f5dc 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/cpufreq/cpufreq_powersave.c 2 * linux/drivers/cpufreq/cpufreq_powersave.c
3 * 3 *
4 * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 4 * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -48,13 +48,11 @@ static int __init cpufreq_gov_powersave_init(void)
48 return cpufreq_register_governor(&cpufreq_gov_powersave); 48 return cpufreq_register_governor(&cpufreq_gov_powersave);
49} 49}
50 50
51
52static void __exit cpufreq_gov_powersave_exit(void) 51static void __exit cpufreq_gov_powersave_exit(void)
53{ 52{
54 cpufreq_unregister_governor(&cpufreq_gov_powersave); 53 cpufreq_unregister_governor(&cpufreq_gov_powersave);
55} 54}
56 55
57
58MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); 56MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
59MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'"); 57MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
60MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index fb65decffa28..6d35caa91167 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ static spinlock_t cpufreq_stats_lock;
27struct cpufreq_stats { 27struct cpufreq_stats {
28 unsigned int cpu; 28 unsigned int cpu;
29 unsigned int total_trans; 29 unsigned int total_trans;
30 unsigned long long last_time; 30 unsigned long long last_time;
31 unsigned int max_state; 31 unsigned int max_state;
32 unsigned int state_num; 32 unsigned int state_num;
33 unsigned int last_index; 33 unsigned int last_index;
@@ -116,7 +116,7 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
116 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", 116 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
117 stat->freq_table[i]); 117 stat->freq_table[i]);
118 118
119 for (j = 0; j < stat->state_num; j++) { 119 for (j = 0; j < stat->state_num; j++) {
120 if (len >= PAGE_SIZE) 120 if (len >= PAGE_SIZE)
121 break; 121 break;
122 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 122 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index bbeb9c0720a6..03078090b5f7 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -13,55 +13,13 @@
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/smp.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
23#include <linux/cpu.h> 17#include <linux/init.h>
24#include <linux/types.h> 18#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/sysfs.h>
27#include <linux/mutex.h> 19#include <linux/mutex.h>
28 20
29/**
30 * A few values needed by the userspace governor
31 */
32static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
33static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
34static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
35static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
36 userspace */
37static DEFINE_PER_CPU(unsigned int, cpu_is_managed); 21static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38
39static DEFINE_MUTEX(userspace_mutex); 22static DEFINE_MUTEX(userspace_mutex);
40static int cpus_using_userspace_governor;
41
42/* keep track of frequency transitions */
43static int
44userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
45 void *data)
46{
47 struct cpufreq_freqs *freq = data;
48
49 if (!per_cpu(cpu_is_managed, freq->cpu))
50 return 0;
51
52 if (val == CPUFREQ_POSTCHANGE) {
53 pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
54 freq->cpu, freq->new);
55 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
56 }
57
58 return 0;
59}
60
61static struct notifier_block userspace_cpufreq_notifier_block = {
62 .notifier_call = userspace_cpufreq_notifier
63};
64
65 23
66/** 24/**
67 * cpufreq_set - set the CPU frequency 25 * cpufreq_set - set the CPU frequency
@@ -80,13 +38,6 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
80 if (!per_cpu(cpu_is_managed, policy->cpu)) 38 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 39 goto err;
82 40
83 per_cpu(cpu_set_freq, policy->cpu) = freq;
84
85 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = per_cpu(cpu_max_freq, policy->cpu);
89
90 /* 41 /*
91 * We're safe from concurrent calls to ->target() here 42 * We're safe from concurrent calls to ->target() here
92 * as we hold the userspace_mutex lock. If we were calling 43 * as we hold the userspace_mutex lock. If we were calling
@@ -104,10 +55,9 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 return ret; 55 return ret;
105} 56}
106 57
107
108static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 58static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
109{ 59{
110 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu)); 60 return sprintf(buf, "%u\n", policy->cur);
111} 61}
112 62
113static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 63static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -119,73 +69,37 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
119 switch (event) { 69 switch (event) {
120 case CPUFREQ_GOV_START: 70 case CPUFREQ_GOV_START:
121 BUG_ON(!policy->cur); 71 BUG_ON(!policy->cur);
122 mutex_lock(&userspace_mutex); 72 pr_debug("started managing cpu %u\n", cpu);
123
124 if (cpus_using_userspace_governor == 0) {
125 cpufreq_register_notifier(
126 &userspace_cpufreq_notifier_block,
127 CPUFREQ_TRANSITION_NOTIFIER);
128 }
129 cpus_using_userspace_governor++;
130 73
74 mutex_lock(&userspace_mutex);
131 per_cpu(cpu_is_managed, cpu) = 1; 75 per_cpu(cpu_is_managed, cpu) = 1;
132 per_cpu(cpu_min_freq, cpu) = policy->min;
133 per_cpu(cpu_max_freq, cpu) = policy->max;
134 per_cpu(cpu_cur_freq, cpu) = policy->cur;
135 per_cpu(cpu_set_freq, cpu) = policy->cur;
136 pr_debug("managing cpu %u started "
137 "(%u - %u kHz, currently %u kHz)\n",
138 cpu,
139 per_cpu(cpu_min_freq, cpu),
140 per_cpu(cpu_max_freq, cpu),
141 per_cpu(cpu_cur_freq, cpu));
142
143 mutex_unlock(&userspace_mutex); 76 mutex_unlock(&userspace_mutex);
144 break; 77 break;
145 case CPUFREQ_GOV_STOP: 78 case CPUFREQ_GOV_STOP:
146 mutex_lock(&userspace_mutex); 79 pr_debug("managing cpu %u stopped\n", cpu);
147 cpus_using_userspace_governor--;
148 if (cpus_using_userspace_governor == 0) {
149 cpufreq_unregister_notifier(
150 &userspace_cpufreq_notifier_block,
151 CPUFREQ_TRANSITION_NOTIFIER);
152 }
153 80
81 mutex_lock(&userspace_mutex);
154 per_cpu(cpu_is_managed, cpu) = 0; 82 per_cpu(cpu_is_managed, cpu) = 0;
155 per_cpu(cpu_min_freq, cpu) = 0;
156 per_cpu(cpu_max_freq, cpu) = 0;
157 per_cpu(cpu_set_freq, cpu) = 0;
158 pr_debug("managing cpu %u stopped\n", cpu);
159 mutex_unlock(&userspace_mutex); 83 mutex_unlock(&userspace_mutex);
160 break; 84 break;
161 case CPUFREQ_GOV_LIMITS: 85 case CPUFREQ_GOV_LIMITS:
162 mutex_lock(&userspace_mutex); 86 mutex_lock(&userspace_mutex);
163 pr_debug("limit event for cpu %u: %u - %u kHz, " 87 pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
164 "currently %u kHz, last set to %u kHz\n",
165 cpu, policy->min, policy->max, 88 cpu, policy->min, policy->max,
166 per_cpu(cpu_cur_freq, cpu), 89 policy->cur);
167 per_cpu(cpu_set_freq, cpu)); 90
168 if (policy->max < per_cpu(cpu_set_freq, cpu)) { 91 if (policy->max < policy->cur)
169 __cpufreq_driver_target(policy, policy->max, 92 __cpufreq_driver_target(policy, policy->max,
170 CPUFREQ_RELATION_H); 93 CPUFREQ_RELATION_H);
171 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) { 94 else if (policy->min > policy->cur)
172 __cpufreq_driver_target(policy, policy->min, 95 __cpufreq_driver_target(policy, policy->min,
173 CPUFREQ_RELATION_L); 96 CPUFREQ_RELATION_L);
174 } else {
175 __cpufreq_driver_target(policy,
176 per_cpu(cpu_set_freq, cpu),
177 CPUFREQ_RELATION_L);
178 }
179 per_cpu(cpu_min_freq, cpu) = policy->min;
180 per_cpu(cpu_max_freq, cpu) = policy->max;
181 per_cpu(cpu_cur_freq, cpu) = policy->cur;
182 mutex_unlock(&userspace_mutex); 97 mutex_unlock(&userspace_mutex);
183 break; 98 break;
184 } 99 }
185 return rc; 100 return rc;
186} 101}
187 102
188
189#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE 103#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
190static 104static
191#endif 105#endif
@@ -202,13 +116,11 @@ static int __init cpufreq_gov_userspace_init(void)
202 return cpufreq_register_governor(&cpufreq_gov_userspace); 116 return cpufreq_register_governor(&cpufreq_gov_userspace);
203} 117}
204 118
205
206static void __exit cpufreq_gov_userspace_exit(void) 119static void __exit cpufreq_gov_userspace_exit(void)
207{ 120{
208 cpufreq_unregister_governor(&cpufreq_gov_userspace); 121 cpufreq_unregister_governor(&cpufreq_gov_userspace);
209} 122}
210 123
211
212MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, " 124MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
213 "Russell King <rmk@arm.linux.org.uk>"); 125 "Russell King <rmk@arm.linux.org.uk>");
214MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'"); 126MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 37380fb92621..a60efaeb4cf8 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -161,6 +161,9 @@ postchange:
161 current_multiplier); 161 current_multiplier);
162 } 162 }
163#endif 163#endif
164 if (err)
165 freqs.new = freqs.old;
166
164 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 167 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
165 return err; 168 return err;
166} 169}
@@ -188,7 +191,7 @@ static int eps_target(struct cpufreq_policy *policy,
188 } 191 }
189 192
190 /* Make frequency transition */ 193 /* Make frequency transition */
191 dest_state = centaur->freq_table[newstate].index & 0xffff; 194 dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
192 ret = eps_set_state(centaur, policy, dest_state); 195 ret = eps_set_state(centaur, policy, dest_state);
193 if (ret) 196 if (ret)
194 printk(KERN_ERR "eps: Timeout!\n"); 197 printk(KERN_ERR "eps: Timeout!\n");
@@ -380,9 +383,9 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
380 f_table = &centaur->freq_table[0]; 383 f_table = &centaur->freq_table[0];
381 if (brand != EPS_BRAND_C7M) { 384 if (brand != EPS_BRAND_C7M) {
382 f_table[0].frequency = fsb * min_multiplier; 385 f_table[0].frequency = fsb * min_multiplier;
383 f_table[0].index = (min_multiplier << 8) | min_voltage; 386 f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
384 f_table[1].frequency = fsb * max_multiplier; 387 f_table[1].frequency = fsb * max_multiplier;
385 f_table[1].index = (max_multiplier << 8) | max_voltage; 388 f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
386 f_table[2].frequency = CPUFREQ_TABLE_END; 389 f_table[2].frequency = CPUFREQ_TABLE_END;
387 } else { 390 } else {
388 k = 0; 391 k = 0;
@@ -391,7 +394,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
391 for (i = min_multiplier; i <= max_multiplier; i++) { 394 for (i = min_multiplier; i <= max_multiplier; i++) {
392 voltage = (k * step) / 256 + min_voltage; 395 voltage = (k * step) / 256 + min_voltage;
393 f_table[k].frequency = fsb * i; 396 f_table[k].frequency = fsb * i;
394 f_table[k].index = (i << 8) | voltage; 397 f_table[k].driver_data = (i << 8) | voltage;
395 k++; 398 k++;
396 } 399 }
397 f_table[k].frequency = CPUFREQ_TABLE_END; 400 f_table[k].frequency = CPUFREQ_TABLE_END;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index d7a79662e24c..f0d87412cc91 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -34,8 +34,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
34 34
35 continue; 35 continue;
36 } 36 }
37 pr_debug("table entry %u: %u kHz, %u index\n", 37 pr_debug("table entry %u: %u kHz, %u driver_data\n",
38 i, freq, table[i].index); 38 i, freq, table[i].driver_data);
39 if (freq < min_freq) 39 if (freq < min_freq)
40 min_freq = freq; 40 min_freq = freq;
41 if (freq > max_freq) 41 if (freq > max_freq)
@@ -97,11 +97,11 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
97 unsigned int *index) 97 unsigned int *index)
98{ 98{
99 struct cpufreq_frequency_table optimal = { 99 struct cpufreq_frequency_table optimal = {
100 .index = ~0, 100 .driver_data = ~0,
101 .frequency = 0, 101 .frequency = 0,
102 }; 102 };
103 struct cpufreq_frequency_table suboptimal = { 103 struct cpufreq_frequency_table suboptimal = {
104 .index = ~0, 104 .driver_data = ~0,
105 .frequency = 0, 105 .frequency = 0,
106 }; 106 };
107 unsigned int i; 107 unsigned int i;
@@ -129,12 +129,12 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
129 if (freq <= target_freq) { 129 if (freq <= target_freq) {
130 if (freq >= optimal.frequency) { 130 if (freq >= optimal.frequency) {
131 optimal.frequency = freq; 131 optimal.frequency = freq;
132 optimal.index = i; 132 optimal.driver_data = i;
133 } 133 }
134 } else { 134 } else {
135 if (freq <= suboptimal.frequency) { 135 if (freq <= suboptimal.frequency) {
136 suboptimal.frequency = freq; 136 suboptimal.frequency = freq;
137 suboptimal.index = i; 137 suboptimal.driver_data = i;
138 } 138 }
139 } 139 }
140 break; 140 break;
@@ -142,26 +142,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
142 if (freq >= target_freq) { 142 if (freq >= target_freq) {
143 if (freq <= optimal.frequency) { 143 if (freq <= optimal.frequency) {
144 optimal.frequency = freq; 144 optimal.frequency = freq;
145 optimal.index = i; 145 optimal.driver_data = i;
146 } 146 }
147 } else { 147 } else {
148 if (freq >= suboptimal.frequency) { 148 if (freq >= suboptimal.frequency) {
149 suboptimal.frequency = freq; 149 suboptimal.frequency = freq;
150 suboptimal.index = i; 150 suboptimal.driver_data = i;
151 } 151 }
152 } 152 }
153 break; 153 break;
154 } 154 }
155 } 155 }
156 if (optimal.index > i) { 156 if (optimal.driver_data > i) {
157 if (suboptimal.index > i) 157 if (suboptimal.driver_data > i)
158 return -EINVAL; 158 return -EINVAL;
159 *index = suboptimal.index; 159 *index = suboptimal.driver_data;
160 } else 160 } else
161 *index = optimal.index; 161 *index = optimal.driver_data;
162 162
163 pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, 163 pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
164 table[*index].index); 164 table[*index].driver_data);
165 165
166 return 0; 166 return 0;
167} 167}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index c0075dbaa633..573c14ea802d 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -326,7 +326,7 @@ acpi_cpufreq_cpu_init (
326 /* table init */ 326 /* table init */
327 for (i = 0; i <= data->acpi_data.state_count; i++) 327 for (i = 0; i <= data->acpi_data.state_count; i++)
328 { 328 {
329 data->freq_table[i].index = i; 329 data->freq_table[i].driver_data = i;
330 if (i < data->acpi_data.state_count) { 330 if (i < data->acpi_data.state_count) {
331 data->freq_table[i].frequency = 331 data->freq_table[i].frequency =
332 data->acpi_data.states[i].core_frequency * 1000; 332 data->acpi_data.states[i].core_frequency * 1000;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index b2644af985ec..c233ea617366 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -59,7 +59,7 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
59 unsigned int index) 59 unsigned int index)
60{ 60{
61 struct cpufreq_freqs freqs; 61 struct cpufreq_freqs freqs;
62 unsigned int state = kirkwood_freq_table[index].index; 62 unsigned int state = kirkwood_freq_table[index].driver_data;
63 unsigned long reg; 63 unsigned long reg;
64 64
65 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); 65 freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index b448638e34de..b6a0a7a406b0 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -254,7 +254,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
254 u32 bm_timeout = 1000; 254 u32 bm_timeout = 1000;
255 unsigned int dir = 0; 255 unsigned int dir = 0;
256 256
257 mults_index = longhaul_table[table_index].index; 257 mults_index = longhaul_table[table_index].driver_data;
258 /* Safety precautions */ 258 /* Safety precautions */
259 mult = mults[mults_index & 0x1f]; 259 mult = mults[mults_index & 0x1f];
260 if (mult == -1) 260 if (mult == -1)
@@ -487,7 +487,7 @@ static int __cpuinit longhaul_get_ranges(void)
487 if (ratio > maxmult || ratio < minmult) 487 if (ratio > maxmult || ratio < minmult)
488 continue; 488 continue;
489 longhaul_table[k].frequency = calc_speed(ratio); 489 longhaul_table[k].frequency = calc_speed(ratio);
490 longhaul_table[k].index = j; 490 longhaul_table[k].driver_data = j;
491 k++; 491 k++;
492 } 492 }
493 if (k <= 1) { 493 if (k <= 1) {
@@ -508,8 +508,8 @@ static int __cpuinit longhaul_get_ranges(void)
508 if (min_i != j) { 508 if (min_i != j) {
509 swap(longhaul_table[j].frequency, 509 swap(longhaul_table[j].frequency,
510 longhaul_table[min_i].frequency); 510 longhaul_table[min_i].frequency);
511 swap(longhaul_table[j].index, 511 swap(longhaul_table[j].driver_data,
512 longhaul_table[min_i].index); 512 longhaul_table[min_i].driver_data);
513 } 513 }
514 } 514 }
515 515
@@ -517,7 +517,7 @@ static int __cpuinit longhaul_get_ranges(void)
517 517
518 /* Find index we are running on */ 518 /* Find index we are running on */
519 for (j = 0; j < k; j++) { 519 for (j = 0; j < k; j++) {
520 if (mults[longhaul_table[j].index & 0x1f] == mult) { 520 if (mults[longhaul_table[j].driver_data & 0x1f] == mult) {
521 longhaul_index = j; 521 longhaul_index = j;
522 break; 522 break;
523 } 523 }
@@ -613,7 +613,7 @@ static void __cpuinit longhaul_setup_voltagescaling(void)
613 pos = (speed - min_vid_speed) / kHz_step + minvid.pos; 613 pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
614 else 614 else
615 pos = minvid.pos; 615 pos = minvid.pos;
616 longhaul_table[j].index |= mV_vrm_table[pos] << 8; 616 longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8;
617 vid = vrm_mV_table[mV_vrm_table[pos]]; 617 vid = vrm_mV_table[mV_vrm_table[pos]];
618 printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", 618 printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
619 speed, j, vid.mV); 619 speed, j, vid.mV);
@@ -656,12 +656,12 @@ static int longhaul_target(struct cpufreq_policy *policy,
656 * this in hardware, C3 is old and we need to do this 656 * this in hardware, C3 is old and we need to do this
657 * in software. */ 657 * in software. */
658 i = longhaul_index; 658 i = longhaul_index;
659 current_vid = (longhaul_table[longhaul_index].index >> 8); 659 current_vid = (longhaul_table[longhaul_index].driver_data >> 8);
660 current_vid &= 0x1f; 660 current_vid &= 0x1f;
661 if (table_index > longhaul_index) 661 if (table_index > longhaul_index)
662 dir = 1; 662 dir = 1;
663 while (i != table_index) { 663 while (i != table_index) {
664 vid = (longhaul_table[i].index >> 8) & 0x1f; 664 vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
665 if (vid != current_vid) { 665 if (vid != current_vid) {
666 longhaul_setstate(policy, i); 666 longhaul_setstate(policy, i);
667 current_vid = vid; 667 current_vid = vid;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index d53912768946..bb838b985077 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -72,7 +72,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
72 72
73 freq = 73 freq =
74 ((cpu_clock_freq / 1000) * 74 ((cpu_clock_freq / 1000) *
75 loongson2_clockmod_table[newstate].index) / 8; 75 loongson2_clockmod_table[newstate].driver_data) / 8;
76 if (freq < policy->min || freq > policy->max) 76 if (freq < policy->min || freq > policy->max)
77 return -EINVAL; 77 return -EINVAL;
78 78
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 421ef37d0bb3..9ee78170ff86 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -118,7 +118,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
118 return -EINVAL; 118 return -EINVAL;
119 119
120 freqs.old = cpufreq_p4_get(policy->cpu); 120 freqs.old = cpufreq_p4_get(policy->cpu);
121 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; 121 freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
122 122
123 if (freqs.new == freqs.old) 123 if (freqs.new == freqs.old)
124 return 0; 124 return 0;
@@ -131,7 +131,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu(i, policy->cpus) 133 for_each_cpu(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
135 135
136 /* notifiers */ 136 /* notifiers */
137 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 137 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
new file mode 100644
index 000000000000..b704da404067
--- /dev/null
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -0,0 +1,331 @@
1/*
2 * Copyright (C) 2007 PA Semi, Inc
3 *
4 * Authors: Egor Martovetsky <egor@pasemi.com>
5 * Olof Johansson <olof@lixom.net>
6 *
7 * Maintained by: Olof Johansson <olof@lixom.net>
8 *
9 * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
10 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28#include <linux/cpufreq.h>
29#include <linux/timer.h>
30#include <linux/module.h>
31
32#include <asm/hw_irq.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/time.h>
36#include <asm/smp.h>
37
38#define SDCASR_REG 0x0100
39#define SDCASR_REG_STRIDE 0x1000
40#define SDCPWR_CFGA0_REG 0x0100
41#define SDCPWR_PWST0_REG 0x0000
42#define SDCPWR_GIZTIME_REG 0x0440
43
44/* SDCPWR_GIZTIME_REG fields */
45#define SDCPWR_GIZTIME_GR 0x80000000
46#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
47
48/* Offset of ASR registers from SDC base */
49#define SDCASR_OFFSET 0x120000
50
51static void __iomem *sdcpwr_mapbase;
52static void __iomem *sdcasr_mapbase;
53
54static DEFINE_MUTEX(pas_switch_mutex);
55
56/* Current astate, is used when waking up from power savings on
57 * one core, in case the other core has switched states during
58 * the idle time.
59 */
60static int current_astate;
61
62/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
63static struct cpufreq_frequency_table pas_freqs[] = {
64 {0, 0},
65 {1, 0},
66 {2, 0},
67 {3, 0},
68 {4, 0},
69 {0, CPUFREQ_TABLE_END},
70};
71
72static struct freq_attr *pas_cpu_freqs_attr[] = {
73 &cpufreq_freq_attr_scaling_available_freqs,
74 NULL,
75};
76
77/*
78 * hardware specific functions
79 */
80
81static int get_astate_freq(int astate)
82{
83 u32 ret;
84 ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
85
86 return ret & 0x3f;
87}
88
89static int get_cur_astate(int cpu)
90{
91 u32 ret;
92
93 ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
94 ret = (ret >> (cpu * 4)) & 0x7;
95
96 return ret;
97}
98
99static int get_gizmo_latency(void)
100{
101 u32 giztime, ret;
102
103 giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
104
105 /* just provide the upper bound */
106 if (giztime & SDCPWR_GIZTIME_GR)
107 ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
108 else
109 ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
110
111 return ret;
112}
113
114static void set_astate(int cpu, unsigned int astate)
115{
116 unsigned long flags;
117
118 /* Return if called before init has run */
119 if (unlikely(!sdcasr_mapbase))
120 return;
121
122 local_irq_save(flags);
123
124 out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
125
126 local_irq_restore(flags);
127}
128
129int check_astate(void)
130{
131 return get_cur_astate(hard_smp_processor_id());
132}
133
134void restore_astate(int cpu)
135{
136 set_astate(cpu, current_astate);
137}
138
139/*
140 * cpufreq functions
141 */
142
143static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
144{
145 const u32 *max_freqp;
146 u32 max_freq;
147 int i, cur_astate;
148 struct resource res;
149 struct device_node *cpu, *dn;
150 int err = -ENODEV;
151
152 cpu = of_get_cpu_node(policy->cpu, NULL);
153
154 if (!cpu)
155 goto out;
156
157 dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
158 if (!dn)
159 dn = of_find_compatible_node(NULL, NULL,
160 "pasemi,pwrficient-sdc");
161 if (!dn)
162 goto out;
163 err = of_address_to_resource(dn, 0, &res);
164 of_node_put(dn);
165 if (err)
166 goto out;
167 sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
168 if (!sdcasr_mapbase) {
169 err = -EINVAL;
170 goto out;
171 }
172
173 dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
174 if (!dn)
175 dn = of_find_compatible_node(NULL, NULL,
176 "pasemi,pwrficient-gizmo");
177 if (!dn) {
178 err = -ENODEV;
179 goto out_unmap_sdcasr;
180 }
181 err = of_address_to_resource(dn, 0, &res);
182 of_node_put(dn);
183 if (err)
184 goto out_unmap_sdcasr;
185 sdcpwr_mapbase = ioremap(res.start, 0x1000);
186 if (!sdcpwr_mapbase) {
187 err = -EINVAL;
188 goto out_unmap_sdcasr;
189 }
190
191 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
192
193 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
194 if (!max_freqp) {
195 err = -EINVAL;
196 goto out_unmap_sdcpwr;
197 }
198
199 /* we need the freq in kHz */
200 max_freq = *max_freqp / 1000;
201
202 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
203 pr_debug("initializing frequency table\n");
204
205 /* initialize frequency table */
206 for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
207 pas_freqs[i].frequency =
208 get_astate_freq(pas_freqs[i].driver_data) * 100000;
209 pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
210 }
211
212 policy->cpuinfo.transition_latency = get_gizmo_latency();
213
214 cur_astate = get_cur_astate(policy->cpu);
215 pr_debug("current astate is at %d\n",cur_astate);
216
217 policy->cur = pas_freqs[cur_astate].frequency;
218 cpumask_copy(policy->cpus, cpu_online_mask);
219
220 ppc_proc_freq = policy->cur * 1000ul;
221
222 cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
223
224 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
225 * are set correctly
226 */
227 return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
228
229out_unmap_sdcpwr:
230 iounmap(sdcpwr_mapbase);
231
232out_unmap_sdcasr:
233 iounmap(sdcasr_mapbase);
234out:
235 return err;
236}
237
238static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
239{
240 /*
241 * We don't support CPU hotplug. Don't unmap after the system
242 * has already made it to a running state.
243 */
244 if (system_state != SYSTEM_BOOTING)
245 return 0;
246
247 if (sdcasr_mapbase)
248 iounmap(sdcasr_mapbase);
249 if (sdcpwr_mapbase)
250 iounmap(sdcpwr_mapbase);
251
252 cpufreq_frequency_table_put_attr(policy->cpu);
253 return 0;
254}
255
256static int pas_cpufreq_verify(struct cpufreq_policy *policy)
257{
258 return cpufreq_frequency_table_verify(policy, pas_freqs);
259}
260
261static int pas_cpufreq_target(struct cpufreq_policy *policy,
262 unsigned int target_freq,
263 unsigned int relation)
264{
265 struct cpufreq_freqs freqs;
266 int pas_astate_new;
267 int i;
268
269 cpufreq_frequency_table_target(policy,
270 pas_freqs,
271 target_freq,
272 relation,
273 &pas_astate_new);
274
275 freqs.old = policy->cur;
276 freqs.new = pas_freqs[pas_astate_new].frequency;
277
278 mutex_lock(&pas_switch_mutex);
279 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
280
281 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
282 policy->cpu,
283 pas_freqs[pas_astate_new].frequency,
284 pas_freqs[pas_astate_new].driver_data);
285
286 current_astate = pas_astate_new;
287
288 for_each_online_cpu(i)
289 set_astate(i, pas_astate_new);
290
291 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
292 mutex_unlock(&pas_switch_mutex);
293
294 ppc_proc_freq = freqs.new * 1000ul;
295 return 0;
296}
297
298static struct cpufreq_driver pas_cpufreq_driver = {
299 .name = "pas-cpufreq",
300 .owner = THIS_MODULE,
301 .flags = CPUFREQ_CONST_LOOPS,
302 .init = pas_cpufreq_cpu_init,
303 .exit = pas_cpufreq_cpu_exit,
304 .verify = pas_cpufreq_verify,
305 .target = pas_cpufreq_target,
306 .attr = pas_cpu_freqs_attr,
307};
308
309/*
310 * module init and destoy
311 */
312
313static int __init pas_cpufreq_init(void)
314{
315 if (!of_machine_is_compatible("PA6T-1682M") &&
316 !of_machine_is_compatible("pasemi,pwrficient"))
317 return -ENODEV;
318
319 return cpufreq_register_driver(&pas_cpufreq_driver);
320}
321
322static void __exit pas_cpufreq_exit(void)
323{
324 cpufreq_unregister_driver(&pas_cpufreq_driver);
325}
326
327module_init(pas_cpufreq_init);
328module_exit(pas_cpufreq_exit);
329
330MODULE_LICENSE("GPL");
331MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 0de00081a81e..1581fcc4cf4a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -243,6 +243,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
243 return 0; 243 return 0;
244 244
245cmd_incomplete: 245cmd_incomplete:
246 freqs.new = freqs.old;
247 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
246 iowrite16(0, &pcch_hdr->status); 248 iowrite16(0, &pcch_hdr->status);
247 spin_unlock(&pcc_lock); 249 spin_unlock(&pcc_lock);
248 return -EINVAL; 250 return -EINVAL;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
new file mode 100644
index 000000000000..3104fad82480
--- /dev/null
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -0,0 +1,721 @@
1/*
2 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * TODO: Need a big cleanup here. Basically, we need to have different
10 * cpufreq_driver structures for the different type of HW instead of the
11 * current mess. We also need to better deal with the detection of the
12 * type of machine.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/adb.h>
23#include <linux/pmu.h>
24#include <linux/cpufreq.h>
25#include <linux/init.h>
26#include <linux/device.h>
27#include <linux/hardirq.h>
28#include <asm/prom.h>
29#include <asm/machdep.h>
30#include <asm/irq.h>
31#include <asm/pmac_feature.h>
32#include <asm/mmu_context.h>
33#include <asm/sections.h>
34#include <asm/cputable.h>
35#include <asm/time.h>
36#include <asm/mpic.h>
37#include <asm/keylargo.h>
38#include <asm/switch_to.h>
39
40/* WARNING !!! This will cause calibrate_delay() to be called,
41 * but this is an __init function ! So you MUST go edit
42 * init/main.c to make it non-init before enabling DEBUG_FREQ
43 */
44#undef DEBUG_FREQ
45
46extern void low_choose_7447a_dfs(int dfs);
47extern void low_choose_750fx_pll(int pll);
48extern void low_sleep_handler(void);
49
50/*
51 * Currently, PowerMac cpufreq supports only high & low frequencies
52 * that are set by the firmware
53 */
54static unsigned int low_freq;
55static unsigned int hi_freq;
56static unsigned int cur_freq;
57static unsigned int sleep_freq;
58static unsigned long transition_latency;
59
60/*
61 * Different models uses different mechanisms to switch the frequency
62 */
63static int (*set_speed_proc)(int low_speed);
64static unsigned int (*get_speed_proc)(void);
65
66/*
67 * Some definitions used by the various speedprocs
68 */
69static u32 voltage_gpio;
70static u32 frequency_gpio;
71static u32 slew_done_gpio;
72static int no_schedule;
73static int has_cpu_l2lve;
74static int is_pmu_based;
75
76/* There are only two frequency states for each processor. Values
77 * are in kHz for the time being.
78 */
79#define CPUFREQ_HIGH 0
80#define CPUFREQ_LOW 1
81
82static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
83 {CPUFREQ_HIGH, 0},
84 {CPUFREQ_LOW, 0},
85 {0, CPUFREQ_TABLE_END},
86};
87
88static struct freq_attr* pmac_cpu_freqs_attr[] = {
89 &cpufreq_freq_attr_scaling_available_freqs,
90 NULL,
91};
92
93static inline void local_delay(unsigned long ms)
94{
95 if (no_schedule)
96 mdelay(ms);
97 else
98 msleep(ms);
99}
100
101#ifdef DEBUG_FREQ
102static inline void debug_calc_bogomips(void)
103{
104 /* This will cause a recalc of bogomips and display the
105 * result. We backup/restore the value to avoid affecting the
106 * core cpufreq framework's own calculation.
107 */
108 unsigned long save_lpj = loops_per_jiffy;
109 calibrate_delay();
110 loops_per_jiffy = save_lpj;
111}
112#endif /* DEBUG_FREQ */
113
114/* Switch CPU speed under 750FX CPU control
115 */
116static int cpu_750fx_cpu_speed(int low_speed)
117{
118 u32 hid2;
119
120 if (low_speed == 0) {
121 /* ramping up, set voltage first */
122 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
123 /* Make sure we sleep for at least 1ms */
124 local_delay(10);
125
126 /* tweak L2 for high voltage */
127 if (has_cpu_l2lve) {
128 hid2 = mfspr(SPRN_HID2);
129 hid2 &= ~0x2000;
130 mtspr(SPRN_HID2, hid2);
131 }
132 }
133#ifdef CONFIG_6xx
134 low_choose_750fx_pll(low_speed);
135#endif
136 if (low_speed == 1) {
137 /* tweak L2 for low voltage */
138 if (has_cpu_l2lve) {
139 hid2 = mfspr(SPRN_HID2);
140 hid2 |= 0x2000;
141 mtspr(SPRN_HID2, hid2);
142 }
143
144 /* ramping down, set voltage last */
145 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
146 local_delay(10);
147 }
148
149 return 0;
150}
151
152static unsigned int cpu_750fx_get_cpu_speed(void)
153{
154 if (mfspr(SPRN_HID1) & HID1_PS)
155 return low_freq;
156 else
157 return hi_freq;
158}
159
160/* Switch CPU speed using DFS */
161static int dfs_set_cpu_speed(int low_speed)
162{
163 if (low_speed == 0) {
164 /* ramping up, set voltage first */
165 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
166 /* Make sure we sleep for at least 1ms */
167 local_delay(1);
168 }
169
170 /* set frequency */
171#ifdef CONFIG_6xx
172 low_choose_7447a_dfs(low_speed);
173#endif
174 udelay(100);
175
176 if (low_speed == 1) {
177 /* ramping down, set voltage last */
178 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
179 local_delay(1);
180 }
181
182 return 0;
183}
184
185static unsigned int dfs_get_cpu_speed(void)
186{
187 if (mfspr(SPRN_HID1) & HID1_DFS)
188 return low_freq;
189 else
190 return hi_freq;
191}
192
193
194/* Switch CPU speed using slewing GPIOs
195 */
196static int gpios_set_cpu_speed(int low_speed)
197{
198 int gpio, timeout = 0;
199
200 /* If ramping up, set voltage first */
201 if (low_speed == 0) {
202 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
203 /* Delay is way too big but it's ok, we schedule */
204 local_delay(10);
205 }
206
207 /* Set frequency */
208 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
209 if (low_speed == ((gpio & 0x01) == 0))
210 goto skip;
211
212 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
213 low_speed ? 0x04 : 0x05);
214 udelay(200);
215 do {
216 if (++timeout > 100)
217 break;
218 local_delay(1);
219 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
220 } while((gpio & 0x02) == 0);
221 skip:
222 /* If ramping down, set voltage last */
223 if (low_speed == 1) {
224 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
225 /* Delay is way too big but it's ok, we schedule */
226 local_delay(10);
227 }
228
229#ifdef DEBUG_FREQ
230 debug_calc_bogomips();
231#endif
232
233 return 0;
234}
235
236/* Switch CPU speed under PMU control
237 */
238static int pmu_set_cpu_speed(int low_speed)
239{
240 struct adb_request req;
241 unsigned long save_l2cr;
242 unsigned long save_l3cr;
243 unsigned int pic_prio;
244 unsigned long flags;
245
246 preempt_disable();
247
248#ifdef DEBUG_FREQ
249 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
250#endif
251 pmu_suspend();
252
253 /* Disable all interrupt sources on openpic */
254 pic_prio = mpic_cpu_get_priority();
255 mpic_cpu_set_priority(0xf);
256
257 /* Make sure the decrementer won't interrupt us */
258 asm volatile("mtdec %0" : : "r" (0x7fffffff));
259 /* Make sure any pending DEC interrupt occurring while we did
260 * the above didn't re-enable the DEC */
261 mb();
262 asm volatile("mtdec %0" : : "r" (0x7fffffff));
263
264 /* We can now disable MSR_EE */
265 local_irq_save(flags);
266
267 /* Giveup the FPU & vec */
268 enable_kernel_fp();
269
270#ifdef CONFIG_ALTIVEC
271 if (cpu_has_feature(CPU_FTR_ALTIVEC))
272 enable_kernel_altivec();
273#endif /* CONFIG_ALTIVEC */
274
275 /* Save & disable L2 and L3 caches */
276 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
277 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
278
279 /* Send the new speed command. My assumption is that this command
280 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
281 */
282 pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
283 while (!req.complete)
284 pmu_poll();
285
286 /* Prepare the northbridge for the speed transition */
287 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
288
289 /* Call low level code to backup CPU state and recover from
290 * hardware reset
291 */
292 low_sleep_handler();
293
294 /* Restore the northbridge */
295 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
296
297 /* Restore L2 cache */
298 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
299 _set_L2CR(save_l2cr);
300 /* Restore L3 cache */
301 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
302 _set_L3CR(save_l3cr);
303
304 /* Restore userland MMU context */
305 switch_mmu_context(NULL, current->active_mm);
306
307#ifdef DEBUG_FREQ
308 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
309#endif
310
311 /* Restore low level PMU operations */
312 pmu_unlock();
313
314 /*
315 * Restore decrementer; we'll take a decrementer interrupt
316 * as soon as interrupts are re-enabled and the generic
317 * clockevents code will reprogram it with the right value.
318 */
319 set_dec(1);
320
321 /* Restore interrupts */
322 mpic_cpu_set_priority(pic_prio);
323
324 /* Let interrupts flow again ... */
325 local_irq_restore(flags);
326
327#ifdef DEBUG_FREQ
328 debug_calc_bogomips();
329#endif
330
331 pmu_resume();
332
333 preempt_enable();
334
335 return 0;
336}
337
338static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
339 int notify)
340{
341 struct cpufreq_freqs freqs;
342 unsigned long l3cr;
343 static unsigned long prev_l3cr;
344
345 freqs.old = cur_freq;
346 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
347
348 if (freqs.old == freqs.new)
349 return 0;
350
351 if (notify)
352 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
353 if (speed_mode == CPUFREQ_LOW &&
354 cpu_has_feature(CPU_FTR_L3CR)) {
355 l3cr = _get_L3CR();
356 if (l3cr & L3CR_L3E) {
357 prev_l3cr = l3cr;
358 _set_L3CR(0);
359 }
360 }
361 set_speed_proc(speed_mode == CPUFREQ_LOW);
362 if (speed_mode == CPUFREQ_HIGH &&
363 cpu_has_feature(CPU_FTR_L3CR)) {
364 l3cr = _get_L3CR();
365 if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
366 _set_L3CR(prev_l3cr);
367 }
368 if (notify)
369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
370 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
371
372 return 0;
373}
374
375static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
376{
377 return cur_freq;
378}
379
380static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
381{
382 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
383}
384
385static int pmac_cpufreq_target( struct cpufreq_policy *policy,
386 unsigned int target_freq,
387 unsigned int relation)
388{
389 unsigned int newstate = 0;
390 int rc;
391
392 if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
393 target_freq, relation, &newstate))
394 return -EINVAL;
395
396 rc = do_set_cpu_speed(policy, newstate, 1);
397
398 ppc_proc_freq = cur_freq * 1000ul;
399 return rc;
400}
401
402static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
403{
404 if (policy->cpu != 0)
405 return -ENODEV;
406
407 policy->cpuinfo.transition_latency = transition_latency;
408 policy->cur = cur_freq;
409
410 cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
411 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
412}
413
414static u32 read_gpio(struct device_node *np)
415{
416 const u32 *reg = of_get_property(np, "reg", NULL);
417 u32 offset;
418
419 if (reg == NULL)
420 return 0;
421 /* That works for all keylargos but shall be fixed properly
422 * some day... The problem is that it seems we can't rely
423 * on the "reg" property of the GPIO nodes, they are either
424 * relative to the base of KeyLargo or to the base of the
425 * GPIO space, and the device-tree doesn't help.
426 */
427 offset = *reg;
428 if (offset < KEYLARGO_GPIO_LEVELS0)
429 offset += KEYLARGO_GPIO_LEVELS0;
430 return offset;
431}
432
433static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
434{
435 /* Ok, this could be made a bit smarter, but let's be robust for now. We
436 * always force a speed change to high speed before sleep, to make sure
437 * we have appropriate voltage and/or bus speed for the wakeup process,
438 * and to make sure our loops_per_jiffies are "good enough", that is will
439 * not cause too short delays if we sleep in low speed and wake in high
440 * speed..
441 */
442 no_schedule = 1;
443 sleep_freq = cur_freq;
444 if (cur_freq == low_freq && !is_pmu_based)
445 do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
446 return 0;
447}
448
449static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
450{
451 /* If we resume, first check if we have a get() function */
452 if (get_speed_proc)
453 cur_freq = get_speed_proc();
454 else
455 cur_freq = 0;
456
457 /* We don't, hrm... we don't really know our speed here, best
458 * is that we force a switch to whatever it was, which is
459 * probably high speed due to our suspend() routine
460 */
461 do_set_cpu_speed(policy, sleep_freq == low_freq ?
462 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
463
464 ppc_proc_freq = cur_freq * 1000ul;
465
466 no_schedule = 0;
467 return 0;
468}
469
470static struct cpufreq_driver pmac_cpufreq_driver = {
471 .verify = pmac_cpufreq_verify,
472 .target = pmac_cpufreq_target,
473 .get = pmac_cpufreq_get_speed,
474 .init = pmac_cpufreq_cpu_init,
475 .suspend = pmac_cpufreq_suspend,
476 .resume = pmac_cpufreq_resume,
477 .flags = CPUFREQ_PM_NO_WARN,
478 .attr = pmac_cpu_freqs_attr,
479 .name = "powermac",
480 .owner = THIS_MODULE,
481};
482
483
484static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
485{
486 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
487 "voltage-gpio");
488 struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
489 "frequency-gpio");
490 struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
491 "slewing-done");
492 const u32 *value;
493
494 /*
495 * Check to see if it's GPIO driven or PMU only
496 *
497 * The way we extract the GPIO address is slightly hackish, but it
498 * works well enough for now. We need to abstract the whole GPIO
499 * stuff sooner or later anyway
500 */
501
502 if (volt_gpio_np)
503 voltage_gpio = read_gpio(volt_gpio_np);
504 if (freq_gpio_np)
505 frequency_gpio = read_gpio(freq_gpio_np);
506 if (slew_done_gpio_np)
507 slew_done_gpio = read_gpio(slew_done_gpio_np);
508
509 /* If we use the frequency GPIOs, calculate the min/max speeds based
510 * on the bus frequencies
511 */
512 if (frequency_gpio && slew_done_gpio) {
513 int lenp, rc;
514 const u32 *freqs, *ratio;
515
516 freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
517 lenp /= sizeof(u32);
518 if (freqs == NULL || lenp != 2) {
519 printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
520 return 1;
521 }
522 ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
523 NULL);
524 if (ratio == NULL) {
525 printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
526 return 1;
527 }
528
529 /* Get the min/max bus frequencies */
530 low_freq = min(freqs[0], freqs[1]);
531 hi_freq = max(freqs[0], freqs[1]);
532
533 /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
534 * frequency, it claims it to be around 84Mhz on some models while
535 * it appears to be approx. 101Mhz on all. Let's hack around here...
536 * fortunately, we don't need to be too precise
537 */
538 if (low_freq < 98000000)
539 low_freq = 101000000;
540
541 /* Convert those to CPU core clocks */
542 low_freq = (low_freq * (*ratio)) / 2000;
543 hi_freq = (hi_freq * (*ratio)) / 2000;
544
545 /* Now we get the frequencies, we read the GPIO to see what is out current
546 * speed
547 */
548 rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
549 cur_freq = (rc & 0x01) ? hi_freq : low_freq;
550
551 set_speed_proc = gpios_set_cpu_speed;
552 return 1;
553 }
554
555 /* If we use the PMU, look for the min & max frequencies in the
556 * device-tree
557 */
558 value = of_get_property(cpunode, "min-clock-frequency", NULL);
559 if (!value)
560 return 1;
561 low_freq = (*value) / 1000;
562 /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
563 * here */
564 if (low_freq < 100000)
565 low_freq *= 10;
566
567 value = of_get_property(cpunode, "max-clock-frequency", NULL);
568 if (!value)
569 return 1;
570 hi_freq = (*value) / 1000;
571 set_speed_proc = pmu_set_cpu_speed;
572 is_pmu_based = 1;
573
574 return 0;
575}
576
577static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
578{
579 struct device_node *volt_gpio_np;
580
581 if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
582 return 1;
583
584 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
585 if (volt_gpio_np)
586 voltage_gpio = read_gpio(volt_gpio_np);
587 if (!voltage_gpio){
588 printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
589 return 1;
590 }
591
592 /* OF only reports the high frequency */
593 hi_freq = cur_freq;
594 low_freq = cur_freq/2;
595
596 /* Read actual frequency from CPU */
597 cur_freq = dfs_get_cpu_speed();
598 set_speed_proc = dfs_set_cpu_speed;
599 get_speed_proc = dfs_get_cpu_speed;
600
601 return 0;
602}
603
604static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
605{
606 struct device_node *volt_gpio_np;
607 u32 pvr;
608 const u32 *value;
609
610 if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
611 return 1;
612
613 hi_freq = cur_freq;
614 value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
615 if (!value)
616 return 1;
617 low_freq = (*value) / 1000;
618
619 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
620 if (volt_gpio_np)
621 voltage_gpio = read_gpio(volt_gpio_np);
622
623 pvr = mfspr(SPRN_PVR);
624 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
625
626 set_speed_proc = cpu_750fx_cpu_speed;
627 get_speed_proc = cpu_750fx_get_cpu_speed;
628 cur_freq = cpu_750fx_get_cpu_speed();
629
630 return 0;
631}
632
633/* Currently, we support the following machines:
634 *
635 * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
636 * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
637 * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
638 * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
639 * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
640 * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
641 * - Recent MacRISC3 laptops
642 * - All new machines with 7447A CPUs
643 */
644static int __init pmac_cpufreq_setup(void)
645{
646 struct device_node *cpunode;
647 const u32 *value;
648
649 if (strstr(cmd_line, "nocpufreq"))
650 return 0;
651
652 /* Assume only one CPU */
653 cpunode = of_find_node_by_type(NULL, "cpu");
654 if (!cpunode)
655 goto out;
656
657 /* Get current cpu clock freq */
658 value = of_get_property(cpunode, "clock-frequency", NULL);
659 if (!value)
660 goto out;
661 cur_freq = (*value) / 1000;
662 transition_latency = CPUFREQ_ETERNAL;
663
664 /* Check for 7447A based MacRISC3 */
665 if (of_machine_is_compatible("MacRISC3") &&
666 of_get_property(cpunode, "dynamic-power-step", NULL) &&
667 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
668 pmac_cpufreq_init_7447A(cpunode);
669 transition_latency = 8000000;
670 /* Check for other MacRISC3 machines */
671 } else if (of_machine_is_compatible("PowerBook3,4") ||
672 of_machine_is_compatible("PowerBook3,5") ||
673 of_machine_is_compatible("MacRISC3")) {
674 pmac_cpufreq_init_MacRISC3(cpunode);
675 /* Else check for iBook2 500/600 */
676 } else if (of_machine_is_compatible("PowerBook4,1")) {
677 hi_freq = cur_freq;
678 low_freq = 400000;
679 set_speed_proc = pmu_set_cpu_speed;
680 is_pmu_based = 1;
681 }
682 /* Else check for TiPb 550 */
683 else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
684 hi_freq = cur_freq;
685 low_freq = 500000;
686 set_speed_proc = pmu_set_cpu_speed;
687 is_pmu_based = 1;
688 }
689 /* Else check for TiPb 400 & 500 */
690 else if (of_machine_is_compatible("PowerBook3,2")) {
691 /* We only know about the 400 MHz and the 500Mhz model
692 * they both have 300 MHz as low frequency
693 */
694 if (cur_freq < 350000 || cur_freq > 550000)
695 goto out;
696 hi_freq = cur_freq;
697 low_freq = 300000;
698 set_speed_proc = pmu_set_cpu_speed;
699 is_pmu_based = 1;
700 }
701 /* Else check for 750FX */
702 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
703 pmac_cpufreq_init_750FX(cpunode);
704out:
705 of_node_put(cpunode);
706 if (set_speed_proc == NULL)
707 return -ENODEV;
708
709 pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
710 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
711 ppc_proc_freq = cur_freq * 1000ul;
712
713 printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
714 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
715 low_freq/1000, hi_freq/1000, cur_freq/1000);
716
717 return cpufreq_register_driver(&pmac_cpufreq_driver);
718}
719
720module_init(pmac_cpufreq_setup);
721
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
new file mode 100644
index 000000000000..7ba423431cfe
--- /dev/null
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -0,0 +1,746 @@
1/*
2 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
10 * that is iMac G5 and latest single CPU desktop.
11 */
12
13#undef DEBUG
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/cpufreq.h>
22#include <linux/init.h>
23#include <linux/completion.h>
24#include <linux/mutex.h>
25#include <asm/prom.h>
26#include <asm/machdep.h>
27#include <asm/irq.h>
28#include <asm/sections.h>
29#include <asm/cputable.h>
30#include <asm/time.h>
31#include <asm/smu.h>
32#include <asm/pmac_pfunc.h>
33
34#define DBG(fmt...) pr_debug(fmt)
35
36/* see 970FX user manual */
37
38#define SCOM_PCR 0x0aa001 /* PCR scom addr */
39
40#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
41#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
42#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
43#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
44#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
45#define PCR_SPEED_SHIFT 17
46#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
47#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
48#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
49#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
50#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
51#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
52
53#define SCOM_PSR 0x408001 /* PSR scom addr */
54/* warning: PSR is a 64 bits register */
55#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
56#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
57#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
58#define PSR_CUR_SPEED_SHIFT (56)
59
60/*
61 * The G5 only supports two frequencies (Quarter speed is not supported)
62 */
63#define CPUFREQ_HIGH 0
64#define CPUFREQ_LOW 1
65
66static struct cpufreq_frequency_table g5_cpu_freqs[] = {
67 {CPUFREQ_HIGH, 0},
68 {CPUFREQ_LOW, 0},
69 {0, CPUFREQ_TABLE_END},
70};
71
72static struct freq_attr* g5_cpu_freqs_attr[] = {
73 &cpufreq_freq_attr_scaling_available_freqs,
74 NULL,
75};
76
77/* Power mode data is an array of the 32 bits PCR values to use for
78 * the various frequencies, retrieved from the device-tree
79 */
80static int g5_pmode_cur;
81
82static void (*g5_switch_volt)(int speed_mode);
83static int (*g5_switch_freq)(int speed_mode);
84static int (*g5_query_freq)(void);
85
86static DEFINE_MUTEX(g5_switch_mutex);
87
88static unsigned long transition_latency;
89
90#ifdef CONFIG_PMAC_SMU
91
92static const u32 *g5_pmode_data;
93static int g5_pmode_max;
94
95static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
96static int g5_fvt_count; /* number of op. points */
97static int g5_fvt_cur; /* current op. point */
98
99/*
100 * SMU based voltage switching for Neo2 platforms
101 */
102
103static void g5_smu_switch_volt(int speed_mode)
104{
105 struct smu_simple_cmd cmd;
106
107 DECLARE_COMPLETION_ONSTACK(comp);
108 smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
109 &comp, 'V', 'S', 'L', 'E', 'W',
110 0xff, g5_fvt_cur+1, speed_mode);
111 wait_for_completion(&comp);
112}
113
114/*
115 * Platform function based voltage/vdnap switching for Neo2
116 */
117
118static struct pmf_function *pfunc_set_vdnap0;
119static struct pmf_function *pfunc_vdnap0_complete;
120
121static void g5_vdnap_switch_volt(int speed_mode)
122{
123 struct pmf_args args;
124 u32 slew, done = 0;
125 unsigned long timeout;
126
127 slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
128 args.count = 1;
129 args.u[0].p = &slew;
130
131 pmf_call_one(pfunc_set_vdnap0, &args);
132
133 /* It's an irq GPIO so we should be able to just block here,
134 * I'll do that later after I've properly tested the IRQ code for
135 * platform functions
136 */
137 timeout = jiffies + HZ/10;
138 while(!time_after(jiffies, timeout)) {
139 args.count = 1;
140 args.u[0].p = &done;
141 pmf_call_one(pfunc_vdnap0_complete, &args);
142 if (done)
143 break;
144 msleep(1);
145 }
146 if (done == 0)
147 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
148}
149
150
151/*
152 * SCOM based frequency switching for 970FX rev3
153 */
154static int g5_scom_switch_freq(int speed_mode)
155{
156 unsigned long flags;
157 int to;
158
159 /* If frequency is going up, first ramp up the voltage */
160 if (speed_mode < g5_pmode_cur)
161 g5_switch_volt(speed_mode);
162
163 local_irq_save(flags);
164
165 /* Clear PCR high */
166 scom970_write(SCOM_PCR, 0);
167 /* Clear PCR low */
168 scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
169 /* Set PCR low */
170 scom970_write(SCOM_PCR, PCR_HILO_SELECT |
171 g5_pmode_data[speed_mode]);
172
173 /* Wait for completion */
174 for (to = 0; to < 10; to++) {
175 unsigned long psr = scom970_read(SCOM_PSR);
176
177 if ((psr & PSR_CMD_RECEIVED) == 0 &&
178 (((psr >> PSR_CUR_SPEED_SHIFT) ^
179 (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
180 == 0)
181 break;
182 if (psr & PSR_CMD_COMPLETED)
183 break;
184 udelay(100);
185 }
186
187 local_irq_restore(flags);
188
189 /* If frequency is going down, last ramp the voltage */
190 if (speed_mode > g5_pmode_cur)
191 g5_switch_volt(speed_mode);
192
193 g5_pmode_cur = speed_mode;
194 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
195
196 return 0;
197}
198
199static int g5_scom_query_freq(void)
200{
201 unsigned long psr = scom970_read(SCOM_PSR);
202 int i;
203
204 for (i = 0; i <= g5_pmode_max; i++)
205 if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
206 (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
207 break;
208 return i;
209}
210
211/*
212 * Fake voltage switching for platforms with missing support
213 */
214
215static void g5_dummy_switch_volt(int speed_mode)
216{
217}
218
219#endif /* CONFIG_PMAC_SMU */
220
221/*
222 * Platform function based voltage switching for PowerMac7,2 & 7,3
223 */
224
225static struct pmf_function *pfunc_cpu0_volt_high;
226static struct pmf_function *pfunc_cpu0_volt_low;
227static struct pmf_function *pfunc_cpu1_volt_high;
228static struct pmf_function *pfunc_cpu1_volt_low;
229
230static void g5_pfunc_switch_volt(int speed_mode)
231{
232 if (speed_mode == CPUFREQ_HIGH) {
233 if (pfunc_cpu0_volt_high)
234 pmf_call_one(pfunc_cpu0_volt_high, NULL);
235 if (pfunc_cpu1_volt_high)
236 pmf_call_one(pfunc_cpu1_volt_high, NULL);
237 } else {
238 if (pfunc_cpu0_volt_low)
239 pmf_call_one(pfunc_cpu0_volt_low, NULL);
240 if (pfunc_cpu1_volt_low)
241 pmf_call_one(pfunc_cpu1_volt_low, NULL);
242 }
243 msleep(10); /* should be faster , to fix */
244}
245
246/*
247 * Platform function based frequency switching for PowerMac7,2 & 7,3
248 */
249
250static struct pmf_function *pfunc_cpu_setfreq_high;
251static struct pmf_function *pfunc_cpu_setfreq_low;
252static struct pmf_function *pfunc_cpu_getfreq;
253static struct pmf_function *pfunc_slewing_done;
254
255static int g5_pfunc_switch_freq(int speed_mode)
256{
257 struct pmf_args args;
258 u32 done = 0;
259 unsigned long timeout;
260 int rc;
261
262 DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
263
264 /* If frequency is going up, first ramp up the voltage */
265 if (speed_mode < g5_pmode_cur)
266 g5_switch_volt(speed_mode);
267
268 /* Do it */
269 if (speed_mode == CPUFREQ_HIGH)
270 rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
271 else
272 rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
273
274 if (rc)
275 printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
276
277 /* It's an irq GPIO so we should be able to just block here,
278 * I'll do that later after I've properly tested the IRQ code for
279 * platform functions
280 */
281 timeout = jiffies + HZ/10;
282 while(!time_after(jiffies, timeout)) {
283 args.count = 1;
284 args.u[0].p = &done;
285 pmf_call_one(pfunc_slewing_done, &args);
286 if (done)
287 break;
288 msleep(1);
289 }
290 if (done == 0)
291 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
292
293 /* If frequency is going down, last ramp the voltage */
294 if (speed_mode > g5_pmode_cur)
295 g5_switch_volt(speed_mode);
296
297 g5_pmode_cur = speed_mode;
298 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
299
300 return 0;
301}
302
303static int g5_pfunc_query_freq(void)
304{
305 struct pmf_args args;
306 u32 val = 0;
307
308 args.count = 1;
309 args.u[0].p = &val;
310 pmf_call_one(pfunc_cpu_getfreq, &args);
311 return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
312}
313
314
315/*
316 * Common interface to the cpufreq core
317 */
318
319static int g5_cpufreq_verify(struct cpufreq_policy *policy)
320{
321 return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
322}
323
324static int g5_cpufreq_target(struct cpufreq_policy *policy,
325 unsigned int target_freq, unsigned int relation)
326{
327 unsigned int newstate = 0;
328 struct cpufreq_freqs freqs;
329 int rc;
330
331 if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
332 target_freq, relation, &newstate))
333 return -EINVAL;
334
335 if (g5_pmode_cur == newstate)
336 return 0;
337
338 mutex_lock(&g5_switch_mutex);
339
340 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
341 freqs.new = g5_cpu_freqs[newstate].frequency;
342
343 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
344 rc = g5_switch_freq(newstate);
345 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
346
347 mutex_unlock(&g5_switch_mutex);
348
349 return rc;
350}
351
352static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
353{
354 return g5_cpu_freqs[g5_pmode_cur].frequency;
355}
356
357static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
358{
359 policy->cpuinfo.transition_latency = transition_latency;
360 policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
361 /* secondary CPUs are tied to the primary one by the
362 * cpufreq core if in the secondary policy we tell it that
363 * it actually must be one policy together with all others. */
364 cpumask_copy(policy->cpus, cpu_online_mask);
365 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
366
367 return cpufreq_frequency_table_cpuinfo(policy,
368 g5_cpu_freqs);
369}
370
371
372static struct cpufreq_driver g5_cpufreq_driver = {
373 .name = "powermac",
374 .owner = THIS_MODULE,
375 .flags = CPUFREQ_CONST_LOOPS,
376 .init = g5_cpufreq_cpu_init,
377 .verify = g5_cpufreq_verify,
378 .target = g5_cpufreq_target,
379 .get = g5_cpufreq_get_speed,
380 .attr = g5_cpu_freqs_attr,
381};
382
383
384#ifdef CONFIG_PMAC_SMU
385
386static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
387{
388 struct device_node *cpunode;
389 unsigned int psize, ssize;
390 unsigned long max_freq;
391 char *freq_method, *volt_method;
392 const u32 *valp;
393 u32 pvr_hi;
394 int use_volts_vdnap = 0;
395 int use_volts_smu = 0;
396 int rc = -ENODEV;
397
398 /* Check supported platforms */
399 if (of_machine_is_compatible("PowerMac8,1") ||
400 of_machine_is_compatible("PowerMac8,2") ||
401 of_machine_is_compatible("PowerMac9,1"))
402 use_volts_smu = 1;
403 else if (of_machine_is_compatible("PowerMac11,2"))
404 use_volts_vdnap = 1;
405 else
406 return -ENODEV;
407
408 /* Get first CPU node */
409 for (cpunode = NULL;
410 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
411 const u32 *reg = of_get_property(cpunode, "reg", NULL);
412 if (reg == NULL || (*reg) != 0)
413 continue;
414 if (!strcmp(cpunode->type, "cpu"))
415 break;
416 }
417 if (cpunode == NULL) {
418 printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
419 return -ENODEV;
420 }
421
422 /* Check 970FX for now */
423 valp = of_get_property(cpunode, "cpu-version", NULL);
424 if (!valp) {
425 DBG("No cpu-version property !\n");
426 goto bail_noprops;
427 }
428 pvr_hi = (*valp) >> 16;
429 if (pvr_hi != 0x3c && pvr_hi != 0x44) {
430 printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
431 goto bail_noprops;
432 }
433
434 /* Look for the powertune data in the device-tree */
435 g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
436 if (!g5_pmode_data) {
437 DBG("No power-mode-data !\n");
438 goto bail_noprops;
439 }
440 g5_pmode_max = psize / sizeof(u32) - 1;
441
442 if (use_volts_smu) {
443 const struct smu_sdbp_header *shdr;
444
445 /* Look for the FVT table */
446 shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
447 if (!shdr)
448 goto bail_noprops;
449 g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
450 ssize = (shdr->len * sizeof(u32)) -
451 sizeof(struct smu_sdbp_header);
452 g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
453 g5_fvt_cur = 0;
454
455 /* Sanity checking */
456 if (g5_fvt_count < 1 || g5_pmode_max < 1)
457 goto bail_noprops;
458
459 g5_switch_volt = g5_smu_switch_volt;
460 volt_method = "SMU";
461 } else if (use_volts_vdnap) {
462 struct device_node *root;
463
464 root = of_find_node_by_path("/");
465 if (root == NULL) {
466 printk(KERN_ERR "cpufreq: Can't find root of "
467 "device tree\n");
468 goto bail_noprops;
469 }
470 pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
471 pfunc_vdnap0_complete =
472 pmf_find_function(root, "slewing-done");
473 if (pfunc_set_vdnap0 == NULL ||
474 pfunc_vdnap0_complete == NULL) {
475 printk(KERN_ERR "cpufreq: Can't find required "
476 "platform function\n");
477 goto bail_noprops;
478 }
479
480 g5_switch_volt = g5_vdnap_switch_volt;
481 volt_method = "GPIO";
482 } else {
483 g5_switch_volt = g5_dummy_switch_volt;
484 volt_method = "none";
485 }
486
487 /*
488 * From what I see, clock-frequency is always the maximal frequency.
489 * The current driver can not slew sysclk yet, so we really only deal
490 * with powertune steps for now. We also only implement full freq and
491 * half freq in this version. So far, I haven't yet seen a machine
492 * supporting anything else.
493 */
494 valp = of_get_property(cpunode, "clock-frequency", NULL);
495 if (!valp)
496 return -ENODEV;
497 max_freq = (*valp)/1000;
498 g5_cpu_freqs[0].frequency = max_freq;
499 g5_cpu_freqs[1].frequency = max_freq/2;
500
501 /* Set callbacks */
502 transition_latency = 12000;
503 g5_switch_freq = g5_scom_switch_freq;
504 g5_query_freq = g5_scom_query_freq;
505 freq_method = "SCOM";
506
507 /* Force apply current frequency to make sure everything is in
508 * sync (voltage is right for example). Firmware may leave us with
509 * a strange setting ...
510 */
511 g5_switch_volt(CPUFREQ_HIGH);
512 msleep(10);
513 g5_pmode_cur = -1;
514 g5_switch_freq(g5_query_freq());
515
516 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
517 printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
518 freq_method, volt_method);
519 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
520 g5_cpu_freqs[1].frequency/1000,
521 g5_cpu_freqs[0].frequency/1000,
522 g5_cpu_freqs[g5_pmode_cur].frequency/1000);
523
524 rc = cpufreq_register_driver(&g5_cpufreq_driver);
525
526 /* We keep the CPU node on hold... hopefully, Apple G5 don't have
527 * hotplug CPU with a dynamic device-tree ...
528 */
529 return rc;
530
531 bail_noprops:
532 of_node_put(cpunode);
533
534 return rc;
535}
536
537#endif /* CONFIG_PMAC_SMU */
538
539
540static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
541{
542 struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
543 const u8 *eeprom = NULL;
544 const u32 *valp;
545 u64 max_freq, min_freq, ih, il;
546 int has_volt = 1, rc = 0;
547
548 DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
549 " RackMac3,1...\n");
550
551 /* Get first CPU node */
552 for (cpunode = NULL;
553 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
554 if (!strcmp(cpunode->type, "cpu"))
555 break;
556 }
557 if (cpunode == NULL) {
558 printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
559 return -ENODEV;
560 }
561
562 /* Lookup the cpuid eeprom node */
563 cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
564 if (cpuid != NULL)
565 eeprom = of_get_property(cpuid, "cpuid", NULL);
566 if (eeprom == NULL) {
567 printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
568 rc = -ENODEV;
569 goto bail;
570 }
571
572 /* Lookup the i2c hwclock */
573 for (hwclock = NULL;
574 (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){
575 const char *loc = of_get_property(hwclock,
576 "hwctrl-location", NULL);
577 if (loc == NULL)
578 continue;
579 if (strcmp(loc, "CPU CLOCK"))
580 continue;
581 if (!of_get_property(hwclock, "platform-get-frequency", NULL))
582 continue;
583 break;
584 }
585 if (hwclock == NULL) {
586 printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
587 rc = -ENODEV;
588 goto bail;
589 }
590
591 DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
592
593 /* Now get all the platform functions */
594 pfunc_cpu_getfreq =
595 pmf_find_function(hwclock, "get-frequency");
596 pfunc_cpu_setfreq_high =
597 pmf_find_function(hwclock, "set-frequency-high");
598 pfunc_cpu_setfreq_low =
599 pmf_find_function(hwclock, "set-frequency-low");
600 pfunc_slewing_done =
601 pmf_find_function(hwclock, "slewing-done");
602 pfunc_cpu0_volt_high =
603 pmf_find_function(hwclock, "set-voltage-high-0");
604 pfunc_cpu0_volt_low =
605 pmf_find_function(hwclock, "set-voltage-low-0");
606 pfunc_cpu1_volt_high =
607 pmf_find_function(hwclock, "set-voltage-high-1");
608 pfunc_cpu1_volt_low =
609 pmf_find_function(hwclock, "set-voltage-low-1");
610
611 /* Check we have minimum requirements */
612 if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
613 pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
614 printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
615 rc = -ENODEV;
616 goto bail;
617 }
618
619 /* Check that we have complete sets */
620 if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
621 pmf_put_function(pfunc_cpu0_volt_high);
622 pmf_put_function(pfunc_cpu0_volt_low);
623 pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
624 has_volt = 0;
625 }
626 if (!has_volt ||
627 pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
628 pmf_put_function(pfunc_cpu1_volt_high);
629 pmf_put_function(pfunc_cpu1_volt_low);
630 pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
631 }
632
633 /* Note: The device tree also contains a "platform-set-values"
634 * function for which I haven't quite figured out the usage. It
635 * might have to be called on init and/or wakeup, I'm not too sure
636 * but things seem to work fine without it so far ...
637 */
638
639 /* Get max frequency from device-tree */
640 valp = of_get_property(cpunode, "clock-frequency", NULL);
641 if (!valp) {
642 printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
643 rc = -ENODEV;
644 goto bail;
645 }
646
647 max_freq = (*valp)/1000;
648
649 /* Now calculate reduced frequency by using the cpuid input freq
650 * ratio. This requires 64 bits math unless we are willing to lose
651 * some precision
652 */
653 ih = *((u32 *)(eeprom + 0x10));
654 il = *((u32 *)(eeprom + 0x20));
655
656 /* Check for machines with no useful settings */
657 if (il == ih) {
658 printk(KERN_WARNING "cpufreq: No low frequency mode available"
659 " on this model !\n");
660 rc = -ENODEV;
661 goto bail;
662 }
663
664 min_freq = 0;
665 if (ih != 0 && il != 0)
666 min_freq = (max_freq * il) / ih;
667
668 /* Sanity check */
669 if (min_freq >= max_freq || min_freq < 1000) {
670 printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
671 rc = -ENXIO;
672 goto bail;
673 }
674 g5_cpu_freqs[0].frequency = max_freq;
675 g5_cpu_freqs[1].frequency = min_freq;
676
677 /* Set callbacks */
678 transition_latency = CPUFREQ_ETERNAL;
679 g5_switch_volt = g5_pfunc_switch_volt;
680 g5_switch_freq = g5_pfunc_switch_freq;
681 g5_query_freq = g5_pfunc_query_freq;
682
683 /* Force apply current frequency to make sure everything is in
684 * sync (voltage is right for example). Firmware may leave us with
685 * a strange setting ...
686 */
687 g5_switch_volt(CPUFREQ_HIGH);
688 msleep(10);
689 g5_pmode_cur = -1;
690 g5_switch_freq(g5_query_freq());
691
692 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
693 printk(KERN_INFO "Frequency method: i2c/pfunc, "
694 "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
695 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
696 g5_cpu_freqs[1].frequency/1000,
697 g5_cpu_freqs[0].frequency/1000,
698 g5_cpu_freqs[g5_pmode_cur].frequency/1000);
699
700 rc = cpufreq_register_driver(&g5_cpufreq_driver);
701 bail:
702 if (rc != 0) {
703 pmf_put_function(pfunc_cpu_getfreq);
704 pmf_put_function(pfunc_cpu_setfreq_high);
705 pmf_put_function(pfunc_cpu_setfreq_low);
706 pmf_put_function(pfunc_slewing_done);
707 pmf_put_function(pfunc_cpu0_volt_high);
708 pmf_put_function(pfunc_cpu0_volt_low);
709 pmf_put_function(pfunc_cpu1_volt_high);
710 pmf_put_function(pfunc_cpu1_volt_low);
711 }
712 of_node_put(hwclock);
713 of_node_put(cpuid);
714 of_node_put(cpunode);
715
716 return rc;
717}
718
719static int __init g5_cpufreq_init(void)
720{
721 struct device_node *cpus;
722 int rc = 0;
723
724 cpus = of_find_node_by_path("/cpus");
725 if (cpus == NULL) {
726 DBG("No /cpus node !\n");
727 return -ENODEV;
728 }
729
730 if (of_machine_is_compatible("PowerMac7,2") ||
731 of_machine_is_compatible("PowerMac7,3") ||
732 of_machine_is_compatible("RackMac3,1"))
733 rc = g5_pm72_cpufreq_init(cpus);
734#ifdef CONFIG_PMAC_SMU
735 else
736 rc = g5_neo2_cpufreq_init(cpus);
737#endif /* CONFIG_PMAC_SMU */
738
739 of_node_put(cpus);
740 return rc;
741}
742
743module_init(g5_cpufreq_init);
744
745
746MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index ea0222a45b7b..ea8e10382ec5 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -58,7 +58,7 @@ static int powernow_k6_get_cpu_multiplier(void)
58 msrval = POWERNOW_IOPORT + 0x0; 58 msrval = POWERNOW_IOPORT + 0x0;
59 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ 59 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
60 60
61 return clock_ratio[(invalue >> 5)&7].index; 61 return clock_ratio[(invalue >> 5)&7].driver_data;
62} 62}
63 63
64 64
@@ -75,13 +75,13 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
75 unsigned long msrval; 75 unsigned long msrval;
76 struct cpufreq_freqs freqs; 76 struct cpufreq_freqs freqs;
77 77
78 if (clock_ratio[best_i].index > max_multiplier) { 78 if (clock_ratio[best_i].driver_data > max_multiplier) {
79 printk(KERN_ERR PFX "invalid target frequency\n"); 79 printk(KERN_ERR PFX "invalid target frequency\n");
80 return; 80 return;
81 } 81 }
82 82
83 freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); 83 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
84 freqs.new = busfreq * clock_ratio[best_i].index; 84 freqs.new = busfreq * clock_ratio[best_i].driver_data;
85 85
86 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 86 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
87 87
@@ -156,7 +156,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
156 156
157 /* table init */ 157 /* table init */
158 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 158 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
159 f = clock_ratio[i].index; 159 f = clock_ratio[i].driver_data;
160 if (f > max_multiplier) 160 if (f > max_multiplier)
161 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; 161 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
162 else 162 else
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 53888dacbe58..b9f80b713fda 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -186,7 +186,7 @@ static int get_ranges(unsigned char *pst)
186 fid = *pst++; 186 fid = *pst++;
187 187
188 powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; 188 powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
189 powernow_table[j].index = fid; /* lower 8 bits */ 189 powernow_table[j].driver_data = fid; /* lower 8 bits */
190 190
191 speed = powernow_table[j].frequency; 191 speed = powernow_table[j].frequency;
192 192
@@ -203,7 +203,7 @@ static int get_ranges(unsigned char *pst)
203 maximum_speed = speed; 203 maximum_speed = speed;
204 204
205 vid = *pst++; 205 vid = *pst++;
206 powernow_table[j].index |= (vid << 8); /* upper 8 bits */ 206 powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */
207 207
208 pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " 208 pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
209 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, 209 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
@@ -212,7 +212,7 @@ static int get_ranges(unsigned char *pst)
212 mobile_vid_table[vid]%1000); 212 mobile_vid_table[vid]%1000);
213 } 213 }
214 powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; 214 powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
215 powernow_table[number_scales].index = 0; 215 powernow_table[number_scales].driver_data = 0;
216 216
217 return 0; 217 return 0;
218} 218}
@@ -260,8 +260,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index)
260 * vid are the upper 8 bits. 260 * vid are the upper 8 bits.
261 */ 261 */
262 262
263 fid = powernow_table[index].index & 0xFF; 263 fid = powernow_table[index].driver_data & 0xFF;
264 vid = (powernow_table[index].index & 0xFF00) >> 8; 264 vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
265 265
266 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); 266 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
267 cfid = fidvidstatus.bits.CFID; 267 cfid = fidvidstatus.bits.CFID;
@@ -373,8 +373,8 @@ static int powernow_acpi_init(void)
373 fid = pc.bits.fid; 373 fid = pc.bits.fid;
374 374
375 powernow_table[i].frequency = fsb * fid_codes[fid] / 10; 375 powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
376 powernow_table[i].index = fid; /* lower 8 bits */ 376 powernow_table[i].driver_data = fid; /* lower 8 bits */
377 powernow_table[i].index |= (vid << 8); /* upper 8 bits */ 377 powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */
378 378
379 speed = powernow_table[i].frequency; 379 speed = powernow_table[i].frequency;
380 speed_mhz = speed / 1000; 380 speed_mhz = speed / 1000;
@@ -417,7 +417,7 @@ static int powernow_acpi_init(void)
417 } 417 }
418 418
419 powernow_table[i].frequency = CPUFREQ_TABLE_END; 419 powernow_table[i].frequency = CPUFREQ_TABLE_END;
420 powernow_table[i].index = 0; 420 powernow_table[i].driver_data = 0;
421 421
422 /* notify BIOS that we exist */ 422 /* notify BIOS that we exist */
423 acpi_processor_notify_smm(THIS_MODULE); 423 acpi_processor_notify_smm(THIS_MODULE);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index b828efe4b2f8..78f018f2a5de 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -584,9 +584,9 @@ static void print_basics(struct powernow_k8_data *data)
584 CPUFREQ_ENTRY_INVALID) { 584 CPUFREQ_ENTRY_INVALID) {
585 printk(KERN_INFO PFX 585 printk(KERN_INFO PFX
586 "fid 0x%x (%d MHz), vid 0x%x\n", 586 "fid 0x%x (%d MHz), vid 0x%x\n",
587 data->powernow_table[j].index & 0xff, 587 data->powernow_table[j].driver_data & 0xff,
588 data->powernow_table[j].frequency/1000, 588 data->powernow_table[j].frequency/1000,
589 data->powernow_table[j].index >> 8); 589 data->powernow_table[j].driver_data >> 8);
590 } 590 }
591 } 591 }
592 if (data->batps) 592 if (data->batps)
@@ -632,13 +632,13 @@ static int fill_powernow_table(struct powernow_k8_data *data,
632 632
633 for (j = 0; j < data->numps; j++) { 633 for (j = 0; j < data->numps; j++) {
634 int freq; 634 int freq;
635 powernow_table[j].index = pst[j].fid; /* lower 8 bits */ 635 powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */
636 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ 636 powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */
637 freq = find_khz_freq_from_fid(pst[j].fid); 637 freq = find_khz_freq_from_fid(pst[j].fid);
638 powernow_table[j].frequency = freq; 638 powernow_table[j].frequency = freq;
639 } 639 }
640 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; 640 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
641 powernow_table[data->numps].index = 0; 641 powernow_table[data->numps].driver_data = 0;
642 642
643 if (query_current_values_with_pending_wait(data)) { 643 if (query_current_values_with_pending_wait(data)) {
644 kfree(powernow_table); 644 kfree(powernow_table);
@@ -810,7 +810,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
810 810
811 powernow_table[data->acpi_data.state_count].frequency = 811 powernow_table[data->acpi_data.state_count].frequency =
812 CPUFREQ_TABLE_END; 812 CPUFREQ_TABLE_END;
813 powernow_table[data->acpi_data.state_count].index = 0; 813 powernow_table[data->acpi_data.state_count].driver_data = 0;
814 data->powernow_table = powernow_table; 814 data->powernow_table = powernow_table;
815 815
816 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 816 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
@@ -865,7 +865,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
865 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 865 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
866 866
867 index = fid | (vid<<8); 867 index = fid | (vid<<8);
868 powernow_table[i].index = index; 868 powernow_table[i].driver_data = index;
869 869
870 freq = find_khz_freq_from_fid(fid); 870 freq = find_khz_freq_from_fid(fid);
871 powernow_table[i].frequency = freq; 871 powernow_table[i].frequency = freq;
@@ -941,8 +941,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
941 * the cpufreq frequency table in find_psb_table, vid 941 * the cpufreq frequency table in find_psb_table, vid
942 * are the upper 8 bits. 942 * are the upper 8 bits.
943 */ 943 */
944 fid = data->powernow_table[index].index & 0xFF; 944 fid = data->powernow_table[index].driver_data & 0xFF;
945 vid = (data->powernow_table[index].index & 0xFF00) >> 8; 945 vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8;
946 946
947 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); 947 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
948 948
@@ -967,9 +967,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
967 967
968 res = transition_fid_vid(data, fid, vid); 968 res = transition_fid_vid(data, fid, vid);
969 if (res) 969 if (res)
970 return res; 970 freqs.new = freqs.old;
971 971 else
972 freqs.new = find_khz_freq_from_fid(data->currfid); 972 freqs.new = find_khz_freq_from_fid(data->currfid);
973 973
974 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 974 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
975 return res; 975 return res;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
new file mode 100644
index 000000000000..3cae4529f959
--- /dev/null
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright 2013 Freescale Semiconductor, Inc.
3 *
4 * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
15#include <linux/errno.h>
16#include <sysdev/fsl_soc.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/of.h>
22#include <linux/slab.h>
23#include <linux/smp.h>
24
25/**
26 * struct cpu_data - per CPU data struct
27 * @clk: the clk of CPU
28 * @parent: the parent node of cpu clock
29 * @table: frequency table
30 */
31struct cpu_data {
32 struct clk *clk;
33 struct device_node *parent;
34 struct cpufreq_frequency_table *table;
35};
36
37/**
38 * struct soc_data - SoC specific data
39 * @freq_mask: mask the disallowed frequencies
40 * @flag: unique flags
41 */
42struct soc_data {
43 u32 freq_mask[4];
44 u32 flag;
45};
46
47#define FREQ_MASK 1
48/* see hardware specification for the allowed frqeuencies */
49static const struct soc_data sdata[] = {
50 { /* used by p2041 and p3041 */
51 .freq_mask = {0x8, 0x8, 0x2, 0x2},
52 .flag = FREQ_MASK,
53 },
54 { /* used by p5020 */
55 .freq_mask = {0x8, 0x2},
56 .flag = FREQ_MASK,
57 },
58 { /* used by p4080, p5040 */
59 .freq_mask = {0},
60 .flag = 0,
61 },
62};
63
64/*
65 * the minimum allowed core frequency, in Hz
66 * for chassis v1.0, >= platform frequency
67 * for chassis v2.0, >= platform frequency / 2
68 */
69static u32 min_cpufreq;
70static const u32 *fmask;
71
72/* serialize frequency changes */
73static DEFINE_MUTEX(cpufreq_lock);
74static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
75
76/* cpumask in a cluster */
77static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
78
79#ifndef CONFIG_SMP
80static inline const struct cpumask *cpu_core_mask(int cpu)
81{
82 return cpumask_of(0);
83}
84#endif
85
86static unsigned int corenet_cpufreq_get_speed(unsigned int cpu)
87{
88 struct cpu_data *data = per_cpu(cpu_data, cpu);
89
90 return clk_get_rate(data->clk) / 1000;
91}
92
93/* reduce the duplicated frequencies in frequency table */
94static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
95 int count)
96{
97 int i, j;
98
99 for (i = 1; i < count; i++) {
100 for (j = 0; j < i; j++) {
101 if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID ||
102 freq_table[j].frequency !=
103 freq_table[i].frequency)
104 continue;
105
106 freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
107 break;
108 }
109 }
110}
111
112/* sort the frequencies in frequency table in descenting order */
113static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
114 int count)
115{
116 int i, j, ind;
117 unsigned int freq, max_freq;
118 struct cpufreq_frequency_table table;
119 for (i = 0; i < count - 1; i++) {
120 max_freq = freq_table[i].frequency;
121 ind = i;
122 for (j = i + 1; j < count; j++) {
123 freq = freq_table[j].frequency;
124 if (freq == CPUFREQ_ENTRY_INVALID ||
125 freq <= max_freq)
126 continue;
127 ind = j;
128 max_freq = freq;
129 }
130
131 if (ind != i) {
132 /* exchange the frequencies */
133 table.driver_data = freq_table[i].driver_data;
134 table.frequency = freq_table[i].frequency;
135 freq_table[i].driver_data = freq_table[ind].driver_data;
136 freq_table[i].frequency = freq_table[ind].frequency;
137 freq_table[ind].driver_data = table.driver_data;
138 freq_table[ind].frequency = table.frequency;
139 }
140 }
141}
142
143static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
144{
145 struct device_node *np;
146 int i, count, ret;
147 u32 freq, mask;
148 struct clk *clk;
149 struct cpufreq_frequency_table *table;
150 struct cpu_data *data;
151 unsigned int cpu = policy->cpu;
152
153 np = of_get_cpu_node(cpu, NULL);
154 if (!np)
155 return -ENODEV;
156
157 data = kzalloc(sizeof(*data), GFP_KERNEL);
158 if (!data) {
159 pr_err("%s: no memory\n", __func__);
160 goto err_np;
161 }
162
163 data->clk = of_clk_get(np, 0);
164 if (IS_ERR(data->clk)) {
165 pr_err("%s: no clock information\n", __func__);
166 goto err_nomem2;
167 }
168
169 data->parent = of_parse_phandle(np, "clocks", 0);
170 if (!data->parent) {
171 pr_err("%s: could not get clock information\n", __func__);
172 goto err_nomem2;
173 }
174
175 count = of_property_count_strings(data->parent, "clock-names");
176 table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
177 if (!table) {
178 pr_err("%s: no memory\n", __func__);
179 goto err_node;
180 }
181
182 if (fmask)
183 mask = fmask[get_hard_smp_processor_id(cpu)];
184 else
185 mask = 0x0;
186
187 for (i = 0; i < count; i++) {
188 clk = of_clk_get(data->parent, i);
189 freq = clk_get_rate(clk);
190 /*
191 * the clock is valid if its frequency is not masked
192 * and large than minimum allowed frequency.
193 */
194 if (freq < min_cpufreq || (mask & (1 << i)))
195 table[i].frequency = CPUFREQ_ENTRY_INVALID;
196 else
197 table[i].frequency = freq / 1000;
198 table[i].driver_data = i;
199 }
200 freq_table_redup(table, count);
201 freq_table_sort(table, count);
202 table[i].frequency = CPUFREQ_TABLE_END;
203
204 /* set the min and max frequency properly */
205 ret = cpufreq_frequency_table_cpuinfo(policy, table);
206 if (ret) {
207 pr_err("invalid frequency table: %d\n", ret);
208 goto err_nomem1;
209 }
210
211 data->table = table;
212 per_cpu(cpu_data, cpu) = data;
213
214 /* update ->cpus if we have cluster, no harm if not */
215 cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
216 for_each_cpu(i, per_cpu(cpu_mask, cpu))
217 per_cpu(cpu_data, i) = data;
218
219 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
220 policy->cur = corenet_cpufreq_get_speed(policy->cpu);
221
222 cpufreq_frequency_table_get_attr(table, cpu);
223 of_node_put(np);
224
225 return 0;
226
227err_nomem1:
228 kfree(table);
229err_node:
230 of_node_put(data->parent);
231err_nomem2:
232 per_cpu(cpu_data, cpu) = NULL;
233 kfree(data);
234err_np:
235 of_node_put(np);
236
237 return -ENODEV;
238}
239
240static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
241{
242 struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
243 unsigned int cpu;
244
245 cpufreq_frequency_table_put_attr(policy->cpu);
246 of_node_put(data->parent);
247 kfree(data->table);
248 kfree(data);
249
250 for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
251 per_cpu(cpu_data, cpu) = NULL;
252
253 return 0;
254}
255
256static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
257{
258 struct cpufreq_frequency_table *table =
259 per_cpu(cpu_data, policy->cpu)->table;
260
261 return cpufreq_frequency_table_verify(policy, table);
262}
263
264static int corenet_cpufreq_target(struct cpufreq_policy *policy,
265 unsigned int target_freq, unsigned int relation)
266{
267 struct cpufreq_freqs freqs;
268 unsigned int new;
269 struct clk *parent;
270 int ret;
271 struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
272
273 cpufreq_frequency_table_target(policy, data->table,
274 target_freq, relation, &new);
275
276 if (policy->cur == data->table[new].frequency)
277 return 0;
278
279 freqs.old = policy->cur;
280 freqs.new = data->table[new].frequency;
281
282 mutex_lock(&cpufreq_lock);
283 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
284
285 parent = of_clk_get(data->parent, data->table[new].driver_data);
286 ret = clk_set_parent(data->clk, parent);
287 if (ret)
288 freqs.new = freqs.old;
289
290 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
291 mutex_unlock(&cpufreq_lock);
292
293 return ret;
294}
295
296static struct freq_attr *corenet_cpufreq_attr[] = {
297 &cpufreq_freq_attr_scaling_available_freqs,
298 NULL,
299};
300
301static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
302 .name = "ppc_cpufreq",
303 .owner = THIS_MODULE,
304 .flags = CPUFREQ_CONST_LOOPS,
305 .init = corenet_cpufreq_cpu_init,
306 .exit = __exit_p(corenet_cpufreq_cpu_exit),
307 .verify = corenet_cpufreq_verify,
308 .target = corenet_cpufreq_target,
309 .get = corenet_cpufreq_get_speed,
310 .attr = corenet_cpufreq_attr,
311};
312
313static const struct of_device_id node_matches[] __initdata = {
314 { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
315 { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
316 { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
317 { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
318 { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
319 { .compatible = "fsl,qoriq-clockgen-2.0", },
320 {}
321};
322
323static int __init ppc_corenet_cpufreq_init(void)
324{
325 int ret;
326 struct device_node *np;
327 const struct of_device_id *match;
328 const struct soc_data *data;
329 unsigned int cpu;
330
331 np = of_find_matching_node(NULL, node_matches);
332 if (!np)
333 return -ENODEV;
334
335 for_each_possible_cpu(cpu) {
336 if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
337 goto err_mask;
338 cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
339 }
340
341 match = of_match_node(node_matches, np);
342 data = match->data;
343 if (data) {
344 if (data->flag)
345 fmask = data->freq_mask;
346 min_cpufreq = fsl_get_sys_freq();
347 } else {
348 min_cpufreq = fsl_get_sys_freq() / 2;
349 }
350
351 of_node_put(np);
352
353 ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver);
354 if (!ret)
355 pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
356
357 return ret;
358
359err_mask:
360 for_each_possible_cpu(cpu)
361 free_cpumask_var(per_cpu(cpu_mask, cpu));
362
363 return -ENOMEM;
364}
365module_init(ppc_corenet_cpufreq_init);
366
367static void __exit ppc_corenet_cpufreq_exit(void)
368{
369 unsigned int cpu;
370
371 for_each_possible_cpu(cpu)
372 free_cpumask_var(per_cpu(cpu_mask, cpu));
373
374 cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
375}
376module_exit(ppc_corenet_cpufreq_exit);
377
378MODULE_LICENSE("GPL");
379MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
380MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e577a1dbbfcd..5936f8d6f2cc 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -106,7 +106,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
106 106
107 /* initialize frequency table */ 107 /* initialize frequency table */
108 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { 108 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
109 cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; 109 cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data;
110 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); 110 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
111 } 111 }
112 112
@@ -165,7 +165,7 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy,
165 "1/%d of max frequency\n", 165 "1/%d of max frequency\n",
166 policy->cpu, 166 policy->cpu,
167 cbe_freqs[cbe_pmode_new].frequency, 167 cbe_freqs[cbe_pmode_new].frequency,
168 cbe_freqs[cbe_pmode_new].index); 168 cbe_freqs[cbe_pmode_new].driver_data);
169 169
170 rc = set_pmode(policy->cpu, cbe_pmode_new); 170 rc = set_pmode(policy->cpu, cbe_pmode_new);
171 171
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 9e5bc8e388a0..fb3981ac829f 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -420,7 +420,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
420 /* Generate pxa25x the run cpufreq_frequency_table struct */ 420 /* Generate pxa25x the run cpufreq_frequency_table struct */
421 for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { 421 for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
422 pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; 422 pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
423 pxa255_run_freq_table[i].index = i; 423 pxa255_run_freq_table[i].driver_data = i;
424 } 424 }
425 pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; 425 pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
426 426
@@ -428,7 +428,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
428 for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { 428 for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
429 pxa255_turbo_freq_table[i].frequency = 429 pxa255_turbo_freq_table[i].frequency =
430 pxa255_turbo_freqs[i].khz; 430 pxa255_turbo_freqs[i].khz;
431 pxa255_turbo_freq_table[i].index = i; 431 pxa255_turbo_freq_table[i].driver_data = i;
432 } 432 }
433 pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; 433 pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
434 434
@@ -440,9 +440,9 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
440 if (freq > pxa27x_maxfreq) 440 if (freq > pxa27x_maxfreq)
441 break; 441 break;
442 pxa27x_freq_table[i].frequency = freq; 442 pxa27x_freq_table[i].frequency = freq;
443 pxa27x_freq_table[i].index = i; 443 pxa27x_freq_table[i].driver_data = i;
444 } 444 }
445 pxa27x_freq_table[i].index = i; 445 pxa27x_freq_table[i].driver_data = i;
446 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; 446 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
447 447
448 /* 448 /*
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 15d60f857ad5..9c92ef032a9e 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -98,10 +98,10 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
98 return -ENOMEM; 98 return -ENOMEM;
99 99
100 for (i = 0; i < num; i++) { 100 for (i = 0; i < num; i++) {
101 table[i].index = i; 101 table[i].driver_data = i;
102 table[i].frequency = freqs[i].cpufreq_mhz * 1000; 102 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
103 } 103 }
104 table[num].index = i; 104 table[num].driver_data = i;
105 table[num].frequency = CPUFREQ_TABLE_END; 105 table[num].frequency = CPUFREQ_TABLE_END;
106 106
107 pxa3xx_freqs = freqs; 107 pxa3xx_freqs = freqs;
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 4f1881eee3f1..69f2e55828dc 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -244,7 +244,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
244 if (ret != 0) 244 if (ret != 0)
245 goto out; 245 goto out;
246 246
247 idx = s3c_freq->freq_table[i].index; 247 idx = s3c_freq->freq_table[i].driver_data;
248 248
249 if (idx == SOURCE_HCLK) 249 if (idx == SOURCE_HCLK)
250 to_dvs = 1; 250 to_dvs = 1;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 27cacb524796..306d395de990 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -87,7 +87,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
87 freqs.old = clk_get_rate(armclk) / 1000; 87 freqs.old = clk_get_rate(armclk) / 1000;
88 freqs.new = s3c64xx_freq_table[i].frequency; 88 freqs.new = s3c64xx_freq_table[i].frequency;
89 freqs.flags = 0; 89 freqs.flags = 0;
90 dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index]; 90 dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
91 91
92 if (freqs.old == freqs.new) 92 if (freqs.old == freqs.new)
93 return 0; 93 return 0;
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index f740b134d27b..77a210975fc4 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -71,7 +71,7 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
71 local_irq_disable(); 71 local_irq_disable();
72 72
73 clockspeed_reg = *cpuctl & ~0x03; 73 clockspeed_reg = *cpuctl & ~0x03;
74 *cpuctl = clockspeed_reg | sc520_freq_table[state].index; 74 *cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data;
75 75
76 local_irq_enable(); 76 local_irq_enable();
77 77
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 306ae462bba6..93061a408773 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -308,17 +308,17 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
308 struct cpufreq_frequency_table *table = 308 struct cpufreq_frequency_table *table =
309 &us2e_freq_table[cpu].table[0]; 309 &us2e_freq_table[cpu].table[0];
310 310
311 table[0].index = 0; 311 table[0].driver_data = 0;
312 table[0].frequency = clock_tick / 1; 312 table[0].frequency = clock_tick / 1;
313 table[1].index = 1; 313 table[1].driver_data = 1;
314 table[1].frequency = clock_tick / 2; 314 table[1].frequency = clock_tick / 2;
315 table[2].index = 2; 315 table[2].driver_data = 2;
316 table[2].frequency = clock_tick / 4; 316 table[2].frequency = clock_tick / 4;
317 table[2].index = 3; 317 table[2].driver_data = 3;
318 table[2].frequency = clock_tick / 6; 318 table[2].frequency = clock_tick / 6;
319 table[2].index = 4; 319 table[2].driver_data = 4;
320 table[2].frequency = clock_tick / 8; 320 table[2].frequency = clock_tick / 8;
321 table[2].index = 5; 321 table[2].driver_data = 5;
322 table[3].frequency = CPUFREQ_TABLE_END; 322 table[3].frequency = CPUFREQ_TABLE_END;
323 323
324 policy->cpuinfo.transition_latency = 0; 324 policy->cpuinfo.transition_latency = 0;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index c71ee142347a..880ee293d61e 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -169,13 +169,13 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
169 struct cpufreq_frequency_table *table = 169 struct cpufreq_frequency_table *table =
170 &us3_freq_table[cpu].table[0]; 170 &us3_freq_table[cpu].table[0];
171 171
172 table[0].index = 0; 172 table[0].driver_data = 0;
173 table[0].frequency = clock_tick / 1; 173 table[0].frequency = clock_tick / 1;
174 table[1].index = 1; 174 table[1].driver_data = 1;
175 table[1].frequency = clock_tick / 2; 175 table[1].frequency = clock_tick / 2;
176 table[2].index = 2; 176 table[2].driver_data = 2;
177 table[2].frequency = clock_tick / 32; 177 table[2].frequency = clock_tick / 32;
178 table[3].index = 0; 178 table[3].driver_data = 0;
179 table[3].frequency = CPUFREQ_TABLE_END; 179 table[3].frequency = CPUFREQ_TABLE_END;
180 180
181 policy->cpuinfo.transition_latency = 0; 181 policy->cpuinfo.transition_latency = 0;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 156829f4576d..c3efa7f2a908 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -250,11 +250,11 @@ static int spear_cpufreq_driver_init(void)
250 } 250 }
251 251
252 for (i = 0; i < cnt; i++) { 252 for (i = 0; i < cnt; i++) {
253 freq_tbl[i].index = i; 253 freq_tbl[i].driver_data = i;
254 freq_tbl[i].frequency = be32_to_cpup(val++); 254 freq_tbl[i].frequency = be32_to_cpup(val++);
255 } 255 }
256 256
257 freq_tbl[i].index = i; 257 freq_tbl[i].driver_data = i;
258 freq_tbl[i].frequency = CPUFREQ_TABLE_END; 258 freq_tbl[i].frequency = CPUFREQ_TABLE_END;
259 259
260 spear_cpufreq.freq_tbl = freq_tbl; 260 spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 618e6f417b1c..0915e712fbdc 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -79,11 +79,11 @@ static struct cpufreq_driver centrino_driver;
79 79
80/* Computes the correct form for IA32_PERF_CTL MSR for a particular 80/* Computes the correct form for IA32_PERF_CTL MSR for a particular
81 frequency/voltage operating point; frequency in MHz, volts in mV. 81 frequency/voltage operating point; frequency in MHz, volts in mV.
82 This is stored as "index" in the structure. */ 82 This is stored as "driver_data" in the structure. */
83#define OP(mhz, mv) \ 83#define OP(mhz, mv) \
84 { \ 84 { \
85 .frequency = (mhz) * 1000, \ 85 .frequency = (mhz) * 1000, \
86 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ 86 .driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \
87 } 87 }
88 88
89/* 89/*
@@ -307,7 +307,7 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
307 per_cpu(centrino_model, cpu)->op_points[i].frequency 307 per_cpu(centrino_model, cpu)->op_points[i].frequency
308 != CPUFREQ_TABLE_END; 308 != CPUFREQ_TABLE_END;
309 i++) { 309 i++) {
310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) 310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
311 return per_cpu(centrino_model, cpu)-> 311 return per_cpu(centrino_model, cpu)->
312 op_points[i].frequency; 312 op_points[i].frequency;
313 } 313 }
@@ -501,7 +501,7 @@ static int centrino_target (struct cpufreq_policy *policy,
501 break; 501 break;
502 } 502 }
503 503
504 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; 504 msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
505 505
506 if (first_cpu) { 506 if (first_cpu) {
507 rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); 507 rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index c74c0e130ef4..81561be17e8c 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -28,17 +28,16 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/suspend.h> 29#include <linux/suspend.h>
30 30
31/* Frequency table index must be sequential starting at 0 */
32static struct cpufreq_frequency_table freq_table[] = { 31static struct cpufreq_frequency_table freq_table[] = {
33 { 0, 216000 }, 32 { .frequency = 216000 },
34 { 1, 312000 }, 33 { .frequency = 312000 },
35 { 2, 456000 }, 34 { .frequency = 456000 },
36 { 3, 608000 }, 35 { .frequency = 608000 },
37 { 4, 760000 }, 36 { .frequency = 760000 },
38 { 5, 816000 }, 37 { .frequency = 816000 },
39 { 6, 912000 }, 38 { .frequency = 912000 },
40 { 7, 1000000 }, 39 { .frequency = 1000000 },
41 { 8, CPUFREQ_TABLE_END }, 40 { .frequency = CPUFREQ_TABLE_END },
42}; 41};
43 42
44#define NUM_CPUS 2 43#define NUM_CPUS 2