aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/cpu/cpufreq
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/i386/kernel/cpu/cpufreq')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig231
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile14
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c537
-rw-r--r--arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c457
-rw-r--r--arch/i386/kernel/cpu/cpufreq/elanfreq.c312
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c502
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c658
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.h466
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longrun.c326
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c337
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k6.c256
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c690
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.h44
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c1135
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h176
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c715
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h25
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-ich.c424
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c385
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c424
21 files changed, 8161 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
new file mode 100644
index 000000000000..f25ffd74235c
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -0,0 +1,231 @@
1#
2# CPU Frequency scaling
3#
4
5menu "CPU Frequency scaling"
6
7source "drivers/cpufreq/Kconfig"
8
9if CPU_FREQ
10
11comment "CPUFreq processor drivers"
12
13config X86_ACPI_CPUFREQ
14 tristate "ACPI Processor P-States driver"
15 select CPU_FREQ_TABLE
16 depends on ACPI_PROCESSOR
17 help
18 This driver adds a CPUFreq driver which utilizes the ACPI
19 Processor Performance States.
20
21 For details, take a look at <file:Documentation/cpu-freq/>.
22
23 If in doubt, say N.
24
25config ELAN_CPUFREQ
26 tristate "AMD Elan"
27 select CPU_FREQ_TABLE
28 depends on X86_ELAN
29 ---help---
30 This adds the CPUFreq driver for AMD Elan SC400 and SC410
31 processors.
32
33 You need to specify the processor maximum speed as boot
34 parameter: elanfreq=maxspeed (in kHz) or as module
35 parameter "max_freq".
36
37 For details, take a look at <file:Documentation/cpu-freq/>.
38
39 If in doubt, say N.
40
41config X86_POWERNOW_K6
42 tristate "AMD Mobile K6-2/K6-3 PowerNow!"
43 select CPU_FREQ_TABLE
44 help
45 This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
46 AMD K6-3+ processors.
47
48 For details, take a look at <file:Documentation/cpu-freq/>.
49
50 If in doubt, say N.
51
52config X86_POWERNOW_K7
53 tristate "AMD Mobile Athlon/Duron PowerNow!"
54 select CPU_FREQ_TABLE
55 help
56 This adds the CPUFreq driver for mobile AMD K7 mobile processors.
57
58 For details, take a look at <file:Documentation/cpu-freq/>.
59
60 If in doubt, say N.
61
62config X86_POWERNOW_K7_ACPI
63 bool
64 depends on X86_POWERNOW_K7 && ACPI_PROCESSOR
65 depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m)
66 default y
67
68config X86_POWERNOW_K8
69 tristate "AMD Opteron/Athlon64 PowerNow!"
70 select CPU_FREQ_TABLE
71 depends on EXPERIMENTAL
72 help
73 This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors.
74
75 For details, take a look at <file:Documentation/cpu-freq/>.
76
77 If in doubt, say N.
78
79config X86_POWERNOW_K8_ACPI
80 bool
81 depends on X86_POWERNOW_K8 && ACPI_PROCESSOR
82 depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m)
83 default y
84
85config X86_GX_SUSPMOD
86 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
87 help
88 This add the CPUFreq driver for NatSemi Geode processors which
89 support suspend modulation.
90
91 For details, take a look at <file:Documentation/cpu-freq/>.
92
93 If in doubt, say N.
94
95config X86_SPEEDSTEP_CENTRINO
96 tristate "Intel Enhanced SpeedStep"
97 select CPU_FREQ_TABLE
98 select X86_SPEEDSTEP_CENTRINO_TABLE if (!X86_SPEEDSTEP_CENTRINO_ACPI)
99 help
100 This adds the CPUFreq driver for Enhanced SpeedStep enabled
101 mobile CPUs. This means Intel Pentium M (Centrino) CPUs. However,
102 you also need to say Y to "Use ACPI tables to decode..." below
103 [which might imply enabling ACPI] if you want to use this driver
104 on non-Banias CPUs.
105
106 For details, take a look at <file:Documentation/cpu-freq/>.
107
108 If in doubt, say N.
109
110config X86_SPEEDSTEP_CENTRINO_ACPI
111 bool "Use ACPI tables to decode valid frequency/voltage pairs"
112 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
113 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
114 default y
115 help
116 Use primarily the information provided in the BIOS ACPI tables
117 to determine valid CPU frequency and voltage pairings. It is
118 required for the driver to work on non-Banias CPUs.
119
120 If in doubt, say Y.
121
122config X86_SPEEDSTEP_CENTRINO_TABLE
123 bool "Built-in tables for Banias CPUs"
124 depends on X86_SPEEDSTEP_CENTRINO
125 default y
126 help
127 Use built-in tables for Banias CPUs if ACPI encoding
128 is not available.
129
130 If in doubt, say N.
131
132config X86_SPEEDSTEP_ICH
133 tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
134 select CPU_FREQ_TABLE
135 help
136 This adds the CPUFreq driver for certain mobile Intel Pentium III
137 (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
138 mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
139 ICH3 or ICH4 southbridge.
140
141 For details, take a look at <file:Documentation/cpu-freq/>.
142
143 If in doubt, say N.
144
145config X86_SPEEDSTEP_SMI
146 tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
147 select CPU_FREQ_TABLE
148 depends on EXPERIMENTAL
149 help
150 This adds the CPUFreq driver for certain mobile Intel Pentium III
151 (Coppermine), all mobile Intel Pentium III-M (Tualatin)
152 on systems which have an Intel 440BX/ZX/MX southbridge.
153
154 For details, take a look at <file:Documentation/cpu-freq/>.
155
156 If in doubt, say N.
157
158config X86_P4_CLOCKMOD
159 tristate "Intel Pentium 4 clock modulation"
160 select CPU_FREQ_TABLE
161 help
162 This adds the CPUFreq driver for Intel Pentium 4 / XEON
163 processors.
164
165 For details, take a look at <file:Documentation/cpu-freq/>.
166
167 If in doubt, say N.
168
169config X86_CPUFREQ_NFORCE2
170 tristate "nVidia nForce2 FSB changing"
171 depends on EXPERIMENTAL
172 help
173 This adds the CPUFreq driver for FSB changing on nVidia nForce2
174 platforms.
175
176 For details, take a look at <file:Documentation/cpu-freq/>.
177
178 If in doubt, say N.
179
180config X86_LONGRUN
181 tristate "Transmeta LongRun"
182 help
183 This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors
184 which support LongRun.
185
186 For details, take a look at <file:Documentation/cpu-freq/>.
187
188 If in doubt, say N.
189
190config X86_LONGHAUL
191 tristate "VIA Cyrix III Longhaul"
192 select CPU_FREQ_TABLE
193 help
194 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
195 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
196 processors.
197
198 For details, take a look at <file:Documentation/cpu-freq/>.
199
200 If in doubt, say N.
201
202comment "shared options"
203
204config X86_ACPI_CPUFREQ_PROC_INTF
205 bool "/proc/acpi/processor/../performance interface (deprecated)"
206 depends on PROC_FS
207 depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
208 help
209 This enables the deprecated /proc/acpi/processor/../performance
210 interface. While it is helpful for debugging, the generic,
211 cross-architecture cpufreq interfaces should be used.
212
213 If in doubt, say N.
214
215config X86_SPEEDSTEP_LIB
216 tristate
217 default X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD
218
219config X86_SPEEDSTEP_RELAXED_CAP_CHECK
220 bool "Relaxed speedstep capability checks"
221 depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
222 help
223 Don't perform all checks for a speedstep capable system which would
224 normally be done. Some ancient or strange systems, though speedstep
225 capable, don't always indicate that they are speedstep capable. This
226 option lets the probing code bypass some of those checks if the
227 parameter "relaxed_check=1" is passed to the module.
228
229endif # CPU_FREQ
230
231endmenu
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
new file mode 100644
index 000000000000..a922e97aeedd
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -0,0 +1,14 @@
1obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
2obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
3obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
4obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
5obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
6obj-$(CONFIG_X86_LONGRUN) += longrun.o
7obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
8obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
9obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
10obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
11obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
12obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
13obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
14obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
new file mode 100644
index 000000000000..963e17aa205d
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -0,0 +1,537 @@
1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $)
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/cpufreq.h>
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
34#include <asm/io.h>
35#include <asm/delay.h>
36#include <asm/uaccess.h>
37
38#include <linux/acpi.h>
39#include <acpi/processor.h>
40
41#include "speedstep-est-common.h"
42
43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
44
45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
46MODULE_DESCRIPTION("ACPI Processor P-States Driver");
47MODULE_LICENSE("GPL");
48
49
50struct cpufreq_acpi_io {
51 struct acpi_processor_performance acpi_data;
52 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume;
54};
55
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
57
58static struct cpufreq_driver acpi_cpufreq_driver;
59
60static int
61acpi_processor_write_port(
62 u16 port,
63 u8 bit_width,
64 u32 value)
65{
66 if (bit_width <= 8) {
67 outb(value, port);
68 } else if (bit_width <= 16) {
69 outw(value, port);
70 } else if (bit_width <= 32) {
71 outl(value, port);
72 } else {
73 return -ENODEV;
74 }
75 return 0;
76}
77
78static int
79acpi_processor_read_port(
80 u16 port,
81 u8 bit_width,
82 u32 *ret)
83{
84 *ret = 0;
85 if (bit_width <= 8) {
86 *ret = inb(port);
87 } else if (bit_width <= 16) {
88 *ret = inw(port);
89 } else if (bit_width <= 32) {
90 *ret = inl(port);
91 } else {
92 return -ENODEV;
93 }
94 return 0;
95}
96
97static int
98acpi_processor_set_performance (
99 struct cpufreq_acpi_io *data,
100 unsigned int cpu,
101 int state)
102{
103 u16 port = 0;
104 u8 bit_width = 0;
105 int ret = 0;
106 u32 value = 0;
107 int i = 0;
108 struct cpufreq_freqs cpufreq_freqs;
109 cpumask_t saved_mask;
110 int retval;
111
112 dprintk("acpi_processor_set_performance\n");
113
114 /*
115 * TBD: Use something other than set_cpus_allowed.
116 * As set_cpus_allowed is a bit racy,
117 * with any other set_cpus_allowed for this process.
118 */
119 saved_mask = current->cpus_allowed;
120 set_cpus_allowed(current, cpumask_of_cpu(cpu));
121 if (smp_processor_id() != cpu) {
122 return (-EAGAIN);
123 }
124
125 if (state == data->acpi_data.state) {
126 if (unlikely(data->resume)) {
127 dprintk("Called after resume, resetting to P%d\n", state);
128 data->resume = 0;
129 } else {
130 dprintk("Already at target state (P%d)\n", state);
131 retval = 0;
132 goto migrate_end;
133 }
134 }
135
136 dprintk("Transitioning from P%d to P%d\n",
137 data->acpi_data.state, state);
138
139 /* cpufreq frequency struct */
140 cpufreq_freqs.cpu = cpu;
141 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
142 cpufreq_freqs.new = data->freq_table[state].frequency;
143
144 /* notify cpufreq */
145 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
146
147 /*
148 * First we write the target state's 'control' value to the
149 * control_register.
150 */
151
152 port = data->acpi_data.control_register.address;
153 bit_width = data->acpi_data.control_register.bit_width;
154 value = (u32) data->acpi_data.states[state].control;
155
156 dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
157
158 ret = acpi_processor_write_port(port, bit_width, value);
159 if (ret) {
160 dprintk("Invalid port width 0x%04x\n", bit_width);
161 retval = ret;
162 goto migrate_end;
163 }
164
165 /*
166 * Then we read the 'status_register' and compare the value with the
167 * target state's 'status' to make sure the transition was successful.
168 * Note that we'll poll for up to 1ms (100 cycles of 10us) before
169 * giving up.
170 */
171
172 port = data->acpi_data.status_register.address;
173 bit_width = data->acpi_data.status_register.bit_width;
174
175 dprintk("Looking for 0x%08x from port 0x%04x\n",
176 (u32) data->acpi_data.states[state].status, port);
177
178 for (i=0; i<100; i++) {
179 ret = acpi_processor_read_port(port, bit_width, &value);
180 if (ret) {
181 dprintk("Invalid port width 0x%04x\n", bit_width);
182 retval = ret;
183 goto migrate_end;
184 }
185 if (value == (u32) data->acpi_data.states[state].status)
186 break;
187 udelay(10);
188 }
189
190 /* notify cpufreq */
191 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
192
193 if (value != (u32) data->acpi_data.states[state].status) {
194 unsigned int tmp = cpufreq_freqs.new;
195 cpufreq_freqs.new = cpufreq_freqs.old;
196 cpufreq_freqs.old = tmp;
197 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
198 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
199 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
200 retval = -ENODEV;
201 goto migrate_end;
202 }
203
204 dprintk("Transition successful after %d microseconds\n", i * 10);
205
206 data->acpi_data.state = state;
207
208 retval = 0;
209migrate_end:
210 set_cpus_allowed(current, saved_mask);
211 return (retval);
212}
213
214
215static int
216acpi_cpufreq_target (
217 struct cpufreq_policy *policy,
218 unsigned int target_freq,
219 unsigned int relation)
220{
221 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
222 unsigned int next_state = 0;
223 unsigned int result = 0;
224
225 dprintk("acpi_cpufreq_setpolicy\n");
226
227 result = cpufreq_frequency_table_target(policy,
228 data->freq_table,
229 target_freq,
230 relation,
231 &next_state);
232 if (result)
233 return (result);
234
235 result = acpi_processor_set_performance (data, policy->cpu, next_state);
236
237 return (result);
238}
239
240
241static int
242acpi_cpufreq_verify (
243 struct cpufreq_policy *policy)
244{
245 unsigned int result = 0;
246 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
247
248 dprintk("acpi_cpufreq_verify\n");
249
250 result = cpufreq_frequency_table_verify(policy,
251 data->freq_table);
252
253 return (result);
254}
255
256
257static unsigned long
258acpi_cpufreq_guess_freq (
259 struct cpufreq_acpi_io *data,
260 unsigned int cpu)
261{
262 if (cpu_khz) {
263 /* search the closest match to cpu_khz */
264 unsigned int i;
265 unsigned long freq;
266 unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
267
268 for (i=0; i < (data->acpi_data.state_count - 1); i++) {
269 freq = freqn;
270 freqn = data->acpi_data.states[i+1].core_frequency * 1000;
271 if ((2 * cpu_khz) > (freqn + freq)) {
272 data->acpi_data.state = i;
273 return (freq);
274 }
275 }
276 data->acpi_data.state = data->acpi_data.state_count - 1;
277 return (freqn);
278 } else
279 /* assume CPU is at P0... */
280 data->acpi_data.state = 0;
281 return data->acpi_data.states[0].core_frequency * 1000;
282
283}
284
285
286/*
287 * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
288 * of this driver
289 * @perf: processor-specific acpi_io_data struct
290 * @cpu: CPU being initialized
291 *
292 * To avoid issues with legacy OSes, some BIOSes require to be informed of
293 * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
294 * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
295 * driver/acpi/processor.c
296 */
297static void
298acpi_processor_cpu_init_pdc_est(
299 struct acpi_processor_performance *perf,
300 unsigned int cpu,
301 struct acpi_object_list *obj_list
302 )
303{
304 union acpi_object *obj;
305 u32 *buf;
306 struct cpuinfo_x86 *c = cpu_data + cpu;
307 dprintk("acpi_processor_cpu_init_pdc_est\n");
308
309 if (!cpu_has(c, X86_FEATURE_EST))
310 return;
311
312 /* Initialize pdc. It will be used later. */
313 if (!obj_list)
314 return;
315
316 if (!(obj_list->count && obj_list->pointer))
317 return;
318
319 obj = obj_list->pointer;
320 if ((obj->buffer.length == 12) && obj->buffer.pointer) {
321 buf = (u32 *)obj->buffer.pointer;
322 buf[0] = ACPI_PDC_REVISION_ID;
323 buf[1] = 1;
324 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
325 perf->pdc = obj_list;
326 }
327 return;
328}
329
330
331/* CPU specific PDC initialization */
332static void
333acpi_processor_cpu_init_pdc(
334 struct acpi_processor_performance *perf,
335 unsigned int cpu,
336 struct acpi_object_list *obj_list
337 )
338{
339 struct cpuinfo_x86 *c = cpu_data + cpu;
340 dprintk("acpi_processor_cpu_init_pdc\n");
341 perf->pdc = NULL;
342 if (cpu_has(c, X86_FEATURE_EST))
343 acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
344 return;
345}
346
347
348static int
349acpi_cpufreq_cpu_init (
350 struct cpufreq_policy *policy)
351{
352 unsigned int i;
353 unsigned int cpu = policy->cpu;
354 struct cpufreq_acpi_io *data;
355 unsigned int result = 0;
356
357 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
358 u32 arg0_buf[3];
359 struct acpi_object_list arg_list = {1, &arg0};
360
361 dprintk("acpi_cpufreq_cpu_init\n");
362 /* setup arg_list for _PDC settings */
363 arg0.buffer.length = 12;
364 arg0.buffer.pointer = (u8 *) arg0_buf;
365
366 data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
367 if (!data)
368 return (-ENOMEM);
369 memset(data, 0, sizeof(struct cpufreq_acpi_io));
370
371 acpi_io_data[cpu] = data;
372
373 acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
374 result = acpi_processor_register_performance(&data->acpi_data, cpu);
375 data->acpi_data.pdc = NULL;
376
377 if (result)
378 goto err_free;
379
380 if (is_const_loops_cpu(cpu)) {
381 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
382 }
383
384 /* capability check */
385 if (data->acpi_data.state_count <= 1) {
386 dprintk("No P-States\n");
387 result = -ENODEV;
388 goto err_unreg;
389 }
390 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
391 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
392 dprintk("Unsupported address space [%d, %d]\n",
393 (u32) (data->acpi_data.control_register.space_id),
394 (u32) (data->acpi_data.status_register.space_id));
395 result = -ENODEV;
396 goto err_unreg;
397 }
398
399 /* alloc freq_table */
400 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
401 if (!data->freq_table) {
402 result = -ENOMEM;
403 goto err_unreg;
404 }
405
406 /* detect transition latency */
407 policy->cpuinfo.transition_latency = 0;
408 for (i=0; i<data->acpi_data.state_count; i++) {
409 if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
410 policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
411 }
412 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
413
414 /* The current speed is unknown and not detectable by ACPI... */
415 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
416
417 /* table init */
418 for (i=0; i<=data->acpi_data.state_count; i++)
419 {
420 data->freq_table[i].index = i;
421 if (i<data->acpi_data.state_count)
422 data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
423 else
424 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
425 }
426
427 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
428 if (result) {
429 goto err_freqfree;
430 }
431
432 /* notify BIOS that we exist */
433 acpi_processor_notify_smm(THIS_MODULE);
434
435 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
436 cpu);
437 for (i = 0; i < data->acpi_data.state_count; i++)
438 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
439 (i == data->acpi_data.state?'*':' '), i,
440 (u32) data->acpi_data.states[i].core_frequency,
441 (u32) data->acpi_data.states[i].power,
442 (u32) data->acpi_data.states[i].transition_latency);
443
444 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
445 return (result);
446
447 err_freqfree:
448 kfree(data->freq_table);
449 err_unreg:
450 acpi_processor_unregister_performance(&data->acpi_data, cpu);
451 err_free:
452 kfree(data);
453 acpi_io_data[cpu] = NULL;
454
455 return (result);
456}
457
458
459static int
460acpi_cpufreq_cpu_exit (
461 struct cpufreq_policy *policy)
462{
463 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
464
465
466 dprintk("acpi_cpufreq_cpu_exit\n");
467
468 if (data) {
469 cpufreq_frequency_table_put_attr(policy->cpu);
470 acpi_io_data[policy->cpu] = NULL;
471 acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
472 kfree(data);
473 }
474
475 return (0);
476}
477
478static int
479acpi_cpufreq_resume (
480 struct cpufreq_policy *policy)
481{
482 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
483
484
485 dprintk("acpi_cpufreq_resume\n");
486
487 data->resume = 1;
488
489 return (0);
490}
491
492
493static struct freq_attr* acpi_cpufreq_attr[] = {
494 &cpufreq_freq_attr_scaling_available_freqs,
495 NULL,
496};
497
498static struct cpufreq_driver acpi_cpufreq_driver = {
499 .verify = acpi_cpufreq_verify,
500 .target = acpi_cpufreq_target,
501 .init = acpi_cpufreq_cpu_init,
502 .exit = acpi_cpufreq_cpu_exit,
503 .resume = acpi_cpufreq_resume,
504 .name = "acpi-cpufreq",
505 .owner = THIS_MODULE,
506 .attr = acpi_cpufreq_attr,
507};
508
509
510static int __init
511acpi_cpufreq_init (void)
512{
513 int result = 0;
514
515 dprintk("acpi_cpufreq_init\n");
516
517 result = cpufreq_register_driver(&acpi_cpufreq_driver);
518
519 return (result);
520}
521
522
523static void __exit
524acpi_cpufreq_exit (void)
525{
526 dprintk("acpi_cpufreq_exit\n");
527
528 cpufreq_unregister_driver(&acpi_cpufreq_driver);
529
530 return;
531}
532
533
534late_initcall(acpi_cpufreq_init);
535module_exit(acpi_cpufreq_exit);
536
537MODULE_ALIAS("acpi");
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
new file mode 100644
index 000000000000..04a405345203
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -0,0 +1,457 @@
1/*
2 * (C) 2004 Sebastian Witt <se.witt@gmx.net>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 * Based upon reverse engineered information
6 *
7 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17
18#define NFORCE2_XTAL 25
19#define NFORCE2_BOOTFSB 0x48
20#define NFORCE2_PLLENABLE 0xa8
21#define NFORCE2_PLLREG 0xa4
22#define NFORCE2_PLLADR 0xa0
23#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div)
24
25#define NFORCE2_MIN_FSB 50
26#define NFORCE2_SAFE_DISTANCE 50
27
28/* Delay in ms between FSB changes */
29//#define NFORCE2_DELAY 10
30
31/* nforce2_chipset:
32 * FSB is changed using the chipset
33 */
34static struct pci_dev *nforce2_chipset_dev;
35
36/* fid:
37 * multiplier * 10
38 */
39static int fid = 0;
40
41/* min_fsb, max_fsb:
42 * minimum and maximum FSB (= FSB at boot time)
43 */
44static int min_fsb = 0;
45static int max_fsb = 0;
46
47MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
48MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver");
49MODULE_LICENSE("GPL");
50
51module_param(fid, int, 0444);
52module_param(min_fsb, int, 0444);
53
54MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
55MODULE_PARM_DESC(min_fsb,
56 "Minimum FSB to use, if not defined: current FSB - 50");
57
58#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg)
59
60/*
61 * nforce2_calc_fsb - calculate FSB
62 * @pll: PLL value
63 *
64 * Calculates FSB from PLL value
65 */
66static int nforce2_calc_fsb(int pll)
67{
68 unsigned char mul, div;
69
70 mul = (pll >> 8) & 0xff;
71 div = pll & 0xff;
72
73 if (div > 0)
74 return NFORCE2_XTAL * mul / div;
75
76 return 0;
77}
78
79/*
80 * nforce2_calc_pll - calculate PLL value
81 * @fsb: FSB
82 *
83 * Calculate PLL value for given FSB
84 */
85static int nforce2_calc_pll(unsigned int fsb)
86{
87 unsigned char xmul, xdiv;
88 unsigned char mul = 0, div = 0;
89 int tried = 0;
90
91 /* Try to calculate multiplier and divider up to 4 times */
92 while (((mul == 0) || (div == 0)) && (tried <= 3)) {
93 for (xdiv = 1; xdiv <= 0x80; xdiv++)
94 for (xmul = 1; xmul <= 0xfe; xmul++)
95 if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) ==
96 fsb + tried) {
97 mul = xmul;
98 div = xdiv;
99 }
100 tried++;
101 }
102
103 if ((mul == 0) || (div == 0))
104 return -1;
105
106 return NFORCE2_PLL(mul, div);
107}
108
109/*
110 * nforce2_write_pll - write PLL value to chipset
111 * @pll: PLL value
112 *
113 * Writes new FSB PLL value to chipset
114 */
115static void nforce2_write_pll(int pll)
116{
117 int temp;
118
119 /* Set the pll addr. to 0x00 */
120 temp = 0x00;
121 pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, temp);
122
123 /* Now write the value in all 64 registers */
124 for (temp = 0; temp <= 0x3f; temp++) {
125 pci_write_config_dword(nforce2_chipset_dev,
126 NFORCE2_PLLREG, pll);
127 }
128
129 return;
130}
131
132/*
133 * nforce2_fsb_read - Read FSB
134 *
135 * Read FSB from chipset
136 * If bootfsb != 0, return FSB at boot-time
137 */
138static unsigned int nforce2_fsb_read(int bootfsb)
139{
140 struct pci_dev *nforce2_sub5;
141 u32 fsb, temp = 0;
142
143
144 /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
145 nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
146 0x01EF,
147 PCI_ANY_ID,
148 PCI_ANY_ID,
149 NULL);
150
151 if (!nforce2_sub5)
152 return 0;
153
154 pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
155 fsb /= 1000000;
156
157 /* Check if PLL register is already set */
158 pci_read_config_byte(nforce2_chipset_dev,
159 NFORCE2_PLLENABLE, (u8 *)&temp);
160
161 if(bootfsb || !temp)
162 return fsb;
163
164 /* Use PLL register FSB value */
165 pci_read_config_dword(nforce2_chipset_dev,
166 NFORCE2_PLLREG, &temp);
167 fsb = nforce2_calc_fsb(temp);
168
169 return fsb;
170}
171
172/*
173 * nforce2_set_fsb - set new FSB
174 * @fsb: New FSB
175 *
176 * Sets new FSB
177 */
178static int nforce2_set_fsb(unsigned int fsb)
179{
180 u32 pll, temp = 0;
181 unsigned int tfsb;
182 int diff;
183
184 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
185 printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
186 return -EINVAL;
187 }
188
189 tfsb = nforce2_fsb_read(0);
190 if (!tfsb) {
191 printk(KERN_ERR "cpufreq: Error while reading the FSB\n");
192 return -EINVAL;
193 }
194
195 /* First write? Then set actual value */
196 pci_read_config_byte(nforce2_chipset_dev,
197 NFORCE2_PLLENABLE, (u8 *)&temp);
198 if (!temp) {
199 pll = nforce2_calc_pll(tfsb);
200
201 if (pll < 0)
202 return -EINVAL;
203
204 nforce2_write_pll(pll);
205 }
206
207 /* Enable write access */
208 temp = 0x01;
209 pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8)temp);
210
211 diff = tfsb - fsb;
212
213 if (!diff)
214 return 0;
215
216 while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) {
217 if (diff < 0)
218 tfsb++;
219 else
220 tfsb--;
221
222 /* Calculate the PLL reg. value */
223 if ((pll = nforce2_calc_pll(tfsb)) == -1)
224 return -EINVAL;
225
226 nforce2_write_pll(pll);
227#ifdef NFORCE2_DELAY
228 mdelay(NFORCE2_DELAY);
229#endif
230 }
231
232 temp = 0x40;
233 pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLADR, (u8)temp);
234
235 return 0;
236}
237
238/**
239 * nforce2_get - get the CPU frequency
240 * @cpu: CPU number
241 *
242 * Returns the CPU frequency
243 */
244static unsigned int nforce2_get(unsigned int cpu)
245{
246 if (cpu)
247 return 0;
248 return nforce2_fsb_read(0) * fid * 100;
249}
250
251/**
252 * nforce2_target - set a new CPUFreq policy
253 * @policy: new policy
254 * @target_freq: the target frequency
255 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
256 *
257 * Sets a new CPUFreq policy.
258 */
259static int nforce2_target(struct cpufreq_policy *policy,
260 unsigned int target_freq, unsigned int relation)
261{
262// unsigned long flags;
263 struct cpufreq_freqs freqs;
264 unsigned int target_fsb;
265
266 if ((target_freq > policy->max) || (target_freq < policy->min))
267 return -EINVAL;
268
269 target_fsb = target_freq / (fid * 100);
270
271 freqs.old = nforce2_get(policy->cpu);
272 freqs.new = target_fsb * fid * 100;
273 freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */
274
275 if (freqs.old == freqs.new)
276 return 0;
277
278 dprintk(KERN_INFO "cpufreq: Old CPU frequency %d kHz, new %d kHz\n",
279 freqs.old, freqs.new);
280
281 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
282
283 /* Disable IRQs */
284 //local_irq_save(flags);
285
286 if (nforce2_set_fsb(target_fsb) < 0)
287 printk(KERN_ERR "cpufreq: Changing FSB to %d failed\n",
288 target_fsb);
289 else
290 dprintk(KERN_INFO "cpufreq: Changed FSB successfully to %d\n",
291 target_fsb);
292
293 /* Enable IRQs */
294 //local_irq_restore(flags);
295
296 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
297
298 return 0;
299}
300
301/**
302 * nforce2_verify - verifies a new CPUFreq policy
303 * @policy: new policy
304 */
305static int nforce2_verify(struct cpufreq_policy *policy)
306{
307 unsigned int fsb_pol_max;
308
309 fsb_pol_max = policy->max / (fid * 100);
310
311 if (policy->min < (fsb_pol_max * fid * 100))
312 policy->max = (fsb_pol_max + 1) * fid * 100;
313
314 cpufreq_verify_within_limits(policy,
315 policy->cpuinfo.min_freq,
316 policy->cpuinfo.max_freq);
317 return 0;
318}
319
320static int nforce2_cpu_init(struct cpufreq_policy *policy)
321{
322 unsigned int fsb;
323 unsigned int rfid;
324
325 /* capability check */
326 if (policy->cpu != 0)
327 return -ENODEV;
328
329 /* Get current FSB */
330 fsb = nforce2_fsb_read(0);
331
332 if (!fsb)
333 return -EIO;
334
335 /* FIX: Get FID from CPU */
336 if (!fid) {
337 if (!cpu_khz) {
338 printk(KERN_WARNING
339 "cpufreq: cpu_khz not set, can't calculate multiplier!\n");
340 return -ENODEV;
341 }
342
343 fid = cpu_khz / (fsb * 100);
344 rfid = fid % 5;
345
346 if (rfid) {
347 if (rfid > 2)
348 fid += 5 - rfid;
349 else
350 fid -= rfid;
351 }
352 }
353
354 printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb,
355 fid / 10, fid % 10);
356
357 /* Set maximum FSB to FSB at boot time */
358 max_fsb = nforce2_fsb_read(1);
359
360 if(!max_fsb)
361 return -EIO;
362
363 if (!min_fsb)
364 min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE;
365
366 if (min_fsb < NFORCE2_MIN_FSB)
367 min_fsb = NFORCE2_MIN_FSB;
368
369 /* cpuinfo and default policy values */
370 policy->cpuinfo.min_freq = min_fsb * fid * 100;
371 policy->cpuinfo.max_freq = max_fsb * fid * 100;
372 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
373 policy->cur = nforce2_get(policy->cpu);
374 policy->min = policy->cpuinfo.min_freq;
375 policy->max = policy->cpuinfo.max_freq;
376 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
377
378 return 0;
379}
380
381static int nforce2_cpu_exit(struct cpufreq_policy *policy)
382{
383 return 0;
384}
385
386static struct cpufreq_driver nforce2_driver = {
387 .name = "nforce2",
388 .verify = nforce2_verify,
389 .target = nforce2_target,
390 .get = nforce2_get,
391 .init = nforce2_cpu_init,
392 .exit = nforce2_cpu_exit,
393 .owner = THIS_MODULE,
394};
395
396/**
397 * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
398 *
399 * Detects nForce2 A2 and C1 stepping
400 *
401 */
402static unsigned int nforce2_detect_chipset(void)
403{
404 u8 revision;
405
406 nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
407 PCI_DEVICE_ID_NVIDIA_NFORCE2,
408 PCI_ANY_ID,
409 PCI_ANY_ID,
410 NULL);
411
412 if (nforce2_chipset_dev == NULL)
413 return -ENODEV;
414
415 pci_read_config_byte(nforce2_chipset_dev, PCI_REVISION_ID, &revision);
416
417 printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n",
418 revision);
419 printk(KERN_INFO
420 "cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n");
421
422 return 0;
423}
424
425/**
426 * nforce2_init - initializes the nForce2 CPUFreq driver
427 *
428 * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
429 * devices, -EINVAL on problems during initiatization, and zero on
430 * success.
431 */
432static int __init nforce2_init(void)
433{
434 /* TODO: do we need to detect the processor? */
435
436 /* detect chipset */
437 if (nforce2_detect_chipset()) {
438 printk(KERN_ERR "cpufreq: No nForce2 chipset.\n");
439 return -ENODEV;
440 }
441
442 return cpufreq_register_driver(&nforce2_driver);
443}
444
445/**
446 * nforce2_exit - unregisters cpufreq module
447 *
448 * Unregisters nForce2 FSB change support.
449 */
450static void __exit nforce2_exit(void)
451{
452 cpufreq_unregister_driver(&nforce2_driver);
453}
454
455module_init(nforce2_init);
456module_exit(nforce2_exit);
457
diff --git a/arch/i386/kernel/cpu/cpufreq/elanfreq.c b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
new file mode 100644
index 000000000000..3f7caa4ae6d6
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
@@ -0,0 +1,312 @@
1/*
2 * elanfreq: cpufreq driver for the AMD ELAN family
3 *
4 * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
5 *
6 * Parts of this code are (c) Sven Geggus <sven@geggus.net>
7 *
8 * All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/cpufreq.h>
26
27#include <asm/msr.h>
28#include <asm/timex.h>
29#include <asm/io.h>
30
31#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
32#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
33
34/* Module parameter */
35static int max_freq;
36
37struct s_elan_multiplier {
38 int clock; /* frequency in kHz */
39 int val40h; /* PMU Force Mode register */
40 int val80h; /* CPU Clock Speed Register */
41};
42
43/*
44 * It is important that the frequencies
45 * are listed in ascending order here!
46 */
47struct s_elan_multiplier elan_multiplier[] = {
48 {1000, 0x02, 0x18},
49 {2000, 0x02, 0x10},
50 {4000, 0x02, 0x08},
51 {8000, 0x00, 0x00},
52 {16000, 0x00, 0x02},
53 {33000, 0x00, 0x04},
54 {66000, 0x01, 0x04},
55 {99000, 0x01, 0x05}
56};
57
58static struct cpufreq_frequency_table elanfreq_table[] = {
59 {0, 1000},
60 {1, 2000},
61 {2, 4000},
62 {3, 8000},
63 {4, 16000},
64 {5, 33000},
65 {6, 66000},
66 {7, 99000},
67 {0, CPUFREQ_TABLE_END},
68};
69
70
71/**
72 * elanfreq_get_cpu_frequency: determine current cpu speed
73 *
74 * Finds out at which frequency the CPU of the Elan SOC runs
75 * at the moment. Frequencies from 1 to 33 MHz are generated
76 * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
77 * and have the rest of the chip running with 33 MHz.
78 */
79
80static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
81{
82 u8 clockspeed_reg; /* Clock Speed Register */
83
84 local_irq_disable();
85 outb_p(0x80,REG_CSCIR);
86 clockspeed_reg = inb_p(REG_CSCDR);
87 local_irq_enable();
88
89 if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; }
90
91 /* Are we in CPU clock multiplied mode (66/99 MHz)? */
92 if ((clockspeed_reg & 0xE0) == 0xC0) {
93 if ((clockspeed_reg & 0x01) == 0) {
94 return 66000;
95 } else {
96 return 99000;
97 }
98 }
99
100 /* 33 MHz is not 32 MHz... */
101 if ((clockspeed_reg & 0xE0)==0xA0)
102 return 33000;
103
104 return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
105}
106
107
108/**
109 * elanfreq_set_cpu_frequency: Change the CPU core frequency
110 * @cpu: cpu number
111 * @freq: frequency in kHz
112 *
113 * This function takes a frequency value and changes the CPU frequency
114 * according to this. Note that the frequency has to be checked by
115 * elanfreq_validatespeed() for correctness!
116 *
117 * There is no return value.
118 */
119
120static void elanfreq_set_cpu_state (unsigned int state) {
121
122 struct cpufreq_freqs freqs;
123
124 freqs.old = elanfreq_get_cpu_frequency(0);
125 freqs.new = elan_multiplier[state].clock;
126 freqs.cpu = 0; /* elanfreq.c is UP only driver */
127
128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
129
130 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock);
131
132
133 /*
134 * Access to the Elan's internal registers is indexed via
135 * 0x22: Chip Setup & Control Register Index Register (CSCI)
136 * 0x23: Chip Setup & Control Register Data Register (CSCD)
137 *
138 */
139
140 /*
141 * 0x40 is the Power Management Unit's Force Mode Register.
142 * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
143 */
144
145 local_irq_disable();
146 outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
147 outb_p(0x00,REG_CSCDR);
148 local_irq_enable(); /* wait till internal pipelines and */
149 udelay(1000); /* buffers have cleaned up */
150
151 local_irq_disable();
152
153 /* now, set the CPU clock speed register (0x80) */
154 outb_p(0x80,REG_CSCIR);
155 outb_p(elan_multiplier[state].val80h,REG_CSCDR);
156
157 /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
158 outb_p(0x40,REG_CSCIR);
159 outb_p(elan_multiplier[state].val40h,REG_CSCDR);
160 udelay(10000);
161 local_irq_enable();
162
163 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
164};
165
166
167/**
168 * elanfreq_validatespeed: test if frequency range is valid
169 * @policy: the policy to validate
170 *
171 * This function checks if a given frequency range in kHz is valid
172 * for the hardware supported by the driver.
173 */
174
175static int elanfreq_verify (struct cpufreq_policy *policy)
176{
177 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
178}
179
180static int elanfreq_target (struct cpufreq_policy *policy,
181 unsigned int target_freq,
182 unsigned int relation)
183{
184 unsigned int newstate = 0;
185
186 if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate))
187 return -EINVAL;
188
189 elanfreq_set_cpu_state(newstate);
190
191 return 0;
192}
193
194
195/*
196 * Module init and exit code
197 */
198
199static int elanfreq_cpu_init(struct cpufreq_policy *policy)
200{
201 struct cpuinfo_x86 *c = cpu_data;
202 unsigned int i;
203 int result;
204
205 /* capability check */
206 if ((c->x86_vendor != X86_VENDOR_AMD) ||
207 (c->x86 != 4) || (c->x86_model!=10))
208 return -ENODEV;
209
210 /* max freq */
211 if (!max_freq)
212 max_freq = elanfreq_get_cpu_frequency(0);
213
214 /* table init */
215 for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
216 if (elanfreq_table[i].frequency > max_freq)
217 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
218 }
219
220 /* cpuinfo and default policy values */
221 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
222 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
223 policy->cur = elanfreq_get_cpu_frequency(0);
224
225 result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
226 if (result)
227 return (result);
228
229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
230
231 return 0;
232}
233
234
235static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
236{
237 cpufreq_frequency_table_put_attr(policy->cpu);
238 return 0;
239}
240
241
242#ifndef MODULE
243/**
244 * elanfreq_setup - elanfreq command line parameter parsing
245 *
246 * elanfreq command line parameter. Use:
247 * elanfreq=66000
248 * to set the maximum CPU frequency to 66 MHz. Note that in
249 * case you do not give this boot parameter, the maximum
250 * frequency will fall back to _current_ CPU frequency which
251 * might be lower. If you build this as a module, use the
252 * max_freq module parameter instead.
253 */
254static int __init elanfreq_setup(char *str)
255{
256 max_freq = simple_strtoul(str, &str, 0);
257 printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
258 return 1;
259}
260__setup("elanfreq=", elanfreq_setup);
261#endif
262
263
264static struct freq_attr* elanfreq_attr[] = {
265 &cpufreq_freq_attr_scaling_available_freqs,
266 NULL,
267};
268
269
270static struct cpufreq_driver elanfreq_driver = {
271 .get = elanfreq_get_cpu_frequency,
272 .verify = elanfreq_verify,
273 .target = elanfreq_target,
274 .init = elanfreq_cpu_init,
275 .exit = elanfreq_cpu_exit,
276 .name = "elanfreq",
277 .owner = THIS_MODULE,
278 .attr = elanfreq_attr,
279};
280
281
282static int __init elanfreq_init(void)
283{
284 struct cpuinfo_x86 *c = cpu_data;
285
286 /* Test if we have the right hardware */
287 if ((c->x86_vendor != X86_VENDOR_AMD) ||
288 (c->x86 != 4) || (c->x86_model!=10))
289 {
290 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
291 return -ENODEV;
292 }
293
294 return cpufreq_register_driver(&elanfreq_driver);
295}
296
297
298static void __exit elanfreq_exit(void)
299{
300 cpufreq_unregister_driver(&elanfreq_driver);
301}
302
303
304module_param (max_freq, int, 0444);
305
306MODULE_LICENSE("GPL");
307MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>");
308MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
309
310module_init(elanfreq_init);
311module_exit(elanfreq_exit);
312
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
new file mode 100644
index 000000000000..1a49adb1f4a6
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -0,0 +1,502 @@
1/*
2 * Cyrix MediaGX and NatSemi Geode Suspend Modulation
3 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
4 * (C) 2002 Hiroshi Miura <miura@da-cha.org>
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation
10 *
11 * The author(s) of this software shall not be held liable for damages
12 * of any nature resulting due to the use of this software. This
13 * software is provided AS-IS with no warranties.
14 *
15 * Theoritical note:
16 *
17 * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
18 *
19 * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
20 * are based on Suspend Moduration.
21 *
22 * Suspend Modulation works by asserting and de-asserting the SUSP# pin
23 * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
24 * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
25 * asserted then power consumption is reduced.
26 *
27 * Suspend Modulation's OFF/ON duration are configurable
28 * with 'Suspend Modulation OFF Count Register'
29 * and 'Suspend Modulation ON Count Register'.
30 * These registers are 8bit counters that represent the number of
31 * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
32 * to the processor.
33 *
34 * These counters define a ratio which is the effective frequency
35 * of operation of the system.
36 *
37 * OFF Count
38 * F_eff = Fgx * ----------------------
39 * OFF Count + ON Count
40 *
41 * 0 <= On Count, Off Count <= 255
42 *
43 * From these limits, we can get register values
44 *
45 * off_duration + on_duration <= MAX_DURATION
46 * on_duration = off_duration * (stock_freq - freq) / freq
47 *
48 * off_duration = (freq * DURATION) / stock_freq
49 * on_duration = DURATION - off_duration
50 *
51 *
52 *---------------------------------------------------------------------------
53 *
54 * ChangeLog:
55 * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
56 * - fix on/off register mistake
57 * - fix cpu_khz calc when it stops cpu modulation.
58 *
59 * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
60 * - rewrite for Cyrix MediaGX Cx5510/5520 and
61 * NatSemi Geode Cs5530(A).
62 *
63 * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
64 * - cs5530_mod patch for 2.4.19-rc1.
65 *
66 *---------------------------------------------------------------------------
67 *
68 * Todo
69 * Test on machines with 5510, 5530, 5530A
70 */
71
72/************************************************************************
73 * Suspend Modulation - Definitions *
74 ************************************************************************/
75
76#include <linux/kernel.h>
77#include <linux/module.h>
78#include <linux/init.h>
79#include <linux/smp.h>
80#include <linux/cpufreq.h>
81#include <linux/pci.h>
82#include <asm/processor.h>
83#include <asm/errno.h>
84
85/* PCI config registers, all at F0 */
86#define PCI_PMER1 0x80 /* power management enable register 1 */
87#define PCI_PMER2 0x81 /* power management enable register 2 */
88#define PCI_PMER3 0x82 /* power management enable register 3 */
89#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
90#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
91#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
92#define PCI_MODON 0x95 /* suspend modulation ON counter register */
93#define PCI_SUSCFG 0x96 /* suspend configuration register */
94
95/* PMER1 bits */
96#define GPM (1<<0) /* global power management */
97#define GIT (1<<1) /* globally enable PM device idle timers */
98#define GTR (1<<2) /* globally enable IO traps */
99#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
100#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
101
102/* SUSCFG bits */
103#define SUSMOD (1<<0) /* enable/disable suspend modulation */
104/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
105#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
106 /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
107#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
108/* the belows support only with cs5530A */
109#define PWRSVE_ISA (1<<3) /* stop ISA clock */
110#define PWRSVE (1<<4) /* active idle */
111
112struct gxfreq_params {
113 u8 on_duration;
114 u8 off_duration;
115 u8 pci_suscfg;
116 u8 pci_pmer1;
117 u8 pci_pmer2;
118 u8 pci_rev;
119 struct pci_dev *cs55x0;
120};
121
122static struct gxfreq_params *gx_params;
123static int stock_freq;
124
125/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
126static int pci_busclk = 0;
127module_param (pci_busclk, int, 0444);
128
129/* maximum duration for which the cpu may be suspended
130 * (32us * MAX_DURATION). If no parameter is given, this defaults
131 * to 255.
132 * Note that this leads to a maximum of 8 ms(!) where the CPU clock
133 * is suspended -- processing power is just 0.39% of what it used to be,
134 * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
135static int max_duration = 255;
136module_param (max_duration, int, 0444);
137
138/* For the default policy, we want at least some processing power
139 * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
140 */
141#define POLICY_MIN_DIV 20
142
143
144#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg)
145
146/**
147 * we can detect a core multipiler from dir0_lsb
148 * from GX1 datasheet p.56,
149 * MULT[3:0]:
150 * 0000 = SYSCLK multiplied by 4 (test only)
151 * 0001 = SYSCLK multiplied by 10
152 * 0010 = SYSCLK multiplied by 4
153 * 0011 = SYSCLK multiplied by 6
154 * 0100 = SYSCLK multiplied by 9
155 * 0101 = SYSCLK multiplied by 5
156 * 0110 = SYSCLK multiplied by 7
157 * 0111 = SYSCLK multiplied by 8
158 * of 33.3MHz
159 **/
160static int gx_freq_mult[16] = {
161 4, 10, 4, 6, 9, 5, 7, 8,
162 0, 0, 0, 0, 0, 0, 0, 0
163};
164
165
166/****************************************************************
167 * Low Level chipset interface *
168 ****************************************************************/
169static struct pci_device_id gx_chipset_tbl[] __initdata = {
170 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
171 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
172 { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
173 { 0, },
174};
175
176/**
177 * gx_detect_chipset:
178 *
179 **/
180static __init struct pci_dev *gx_detect_chipset(void)
181{
182 struct pci_dev *gx_pci = NULL;
183
184 /* check if CPU is a MediaGX or a Geode. */
185 if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
186 (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
187 dprintk("error: no MediaGX/Geode processor found!\n");
188 return NULL;
189 }
190
191 /* detect which companion chip is used */
192 while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
193 if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) {
194 return gx_pci;
195 }
196 }
197
198 dprintk("error: no supported chipset found!\n");
199 return NULL;
200}
201
202/**
203 * gx_get_cpuspeed:
204 *
205 * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs.
206 */
207static unsigned int gx_get_cpuspeed(unsigned int cpu)
208{
209 if ((gx_params->pci_suscfg & SUSMOD) == 0)
210 return stock_freq;
211
212 return (stock_freq * gx_params->off_duration)
213 / (gx_params->on_duration + gx_params->off_duration);
214}
215
216/**
217 * gx_validate_speed:
218 * determine current cpu speed
219 *
220**/
221
222static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration)
223{
224 unsigned int i;
225 u8 tmp_on, tmp_off;
226 int old_tmp_freq = stock_freq;
227 int tmp_freq;
228
229 *off_duration=1;
230 *on_duration=0;
231
232 for (i=max_duration; i>0; i--) {
233 tmp_off = ((khz * i) / stock_freq) & 0xff;
234 tmp_on = i - tmp_off;
235 tmp_freq = (stock_freq * tmp_off) / i;
236 /* if this relation is closer to khz, use this. If it's equal,
237 * prefer it, too - lower latency */
238 if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
239 *on_duration = tmp_on;
240 *off_duration = tmp_off;
241 old_tmp_freq = tmp_freq;
242 }
243 }
244
245 return old_tmp_freq;
246}
247
248
249/**
250 * gx_set_cpuspeed:
251 * set cpu speed in khz.
252 **/
253
254static void gx_set_cpuspeed(unsigned int khz)
255{
256 u8 suscfg, pmer1;
257 unsigned int new_khz;
258 unsigned long flags;
259 struct cpufreq_freqs freqs;
260
261
262 freqs.cpu = 0;
263 freqs.old = gx_get_cpuspeed(0);
264
265 new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration);
266
267 freqs.new = new_khz;
268
269 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
270 local_irq_save(flags);
271
272 if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */
273 switch (gx_params->cs55x0->device) {
274 case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
275 pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
276 /* FIXME: need to test other values -- Zwane,Miura */
277 pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */
278 pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */
279 pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1);
280
281 if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */
282 suscfg = gx_params->pci_suscfg | SUSMOD;
283 } else { /* CS5530A,B.. */
284 suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE;
285 }
286 break;
287 case PCI_DEVICE_ID_CYRIX_5520:
288 case PCI_DEVICE_ID_CYRIX_5510:
289 suscfg = gx_params->pci_suscfg | SUSMOD;
290 break;
291 default:
292 local_irq_restore(flags);
293 dprintk("fatal: try to set unknown chipset.\n");
294 return;
295 }
296 } else {
297 suscfg = gx_params->pci_suscfg & ~(SUSMOD);
298 gx_params->off_duration = 0;
299 gx_params->on_duration = 0;
300 dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n");
301 }
302
303 pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
304 pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration);
305
306 pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
307 pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
308
309 local_irq_restore(flags);
310
311 gx_params->pci_suscfg = suscfg;
312
313 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
314
315 dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
316 gx_params->on_duration * 32, gx_params->off_duration * 32);
317 dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
318}
319
320/****************************************************************
321 * High level functions *
322 ****************************************************************/
323
324/*
325 * cpufreq_gx_verify: test if frequency range is valid
326 *
327 * This function checks if a given frequency range in kHz is valid
328 * for the hardware supported by the driver.
329 */
330
331static int cpufreq_gx_verify(struct cpufreq_policy *policy)
332{
333 unsigned int tmp_freq = 0;
334 u8 tmp1, tmp2;
335
336 if (!stock_freq || !policy)
337 return -EINVAL;
338
339 policy->cpu = 0;
340 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
341
342 /* it needs to be assured that at least one supported frequency is
343 * within policy->min and policy->max. If it is not, policy->max
344 * needs to be increased until one freuqency is supported.
345 * policy->min may not be decreased, though. This way we guarantee a
346 * specific processing capacity.
347 */
348 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
349 if (tmp_freq < policy->min)
350 tmp_freq += stock_freq / max_duration;
351 policy->min = tmp_freq;
352 if (policy->min > policy->max)
353 policy->max = tmp_freq;
354 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
355 if (tmp_freq > policy->max)
356 tmp_freq -= stock_freq / max_duration;
357 policy->max = tmp_freq;
358 if (policy->max < policy->min)
359 policy->max = policy->min;
360 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
361
362 return 0;
363}
364
365/*
366 * cpufreq_gx_target:
367 *
368 */
369static int cpufreq_gx_target(struct cpufreq_policy *policy,
370 unsigned int target_freq,
371 unsigned int relation)
372{
373 u8 tmp1, tmp2;
374 unsigned int tmp_freq;
375
376 if (!stock_freq || !policy)
377 return -EINVAL;
378
379 policy->cpu = 0;
380
381 tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
382 while (tmp_freq < policy->min) {
383 tmp_freq += stock_freq / max_duration;
384 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
385 }
386 while (tmp_freq > policy->max) {
387 tmp_freq -= stock_freq / max_duration;
388 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
389 }
390
391 gx_set_cpuspeed(tmp_freq);
392
393 return 0;
394}
395
396static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
397{
398 unsigned int maxfreq, curfreq;
399
400 if (!policy || policy->cpu != 0)
401 return -ENODEV;
402
403 /* determine maximum frequency */
404 if (pci_busclk) {
405 maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
406 } else if (cpu_khz) {
407 maxfreq = cpu_khz;
408 } else {
409 maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
410 }
411 stock_freq = maxfreq;
412 curfreq = gx_get_cpuspeed(0);
413
414 dprintk("cpu max frequency is %d.\n", maxfreq);
415 dprintk("cpu current frequency is %dkHz.\n",curfreq);
416
417 /* setup basic struct for cpufreq API */
418 policy->cpu = 0;
419
420 if (max_duration < POLICY_MIN_DIV)
421 policy->min = maxfreq / max_duration;
422 else
423 policy->min = maxfreq / POLICY_MIN_DIV;
424 policy->max = maxfreq;
425 policy->cur = curfreq;
426 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
427 policy->cpuinfo.min_freq = maxfreq / max_duration;
428 policy->cpuinfo.max_freq = maxfreq;
429 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
430
431 return 0;
432}
433
434/*
435 * cpufreq_gx_init:
436 * MediaGX/Geode GX initialize cpufreq driver
437 */
438static struct cpufreq_driver gx_suspmod_driver = {
439 .get = gx_get_cpuspeed,
440 .verify = cpufreq_gx_verify,
441 .target = cpufreq_gx_target,
442 .init = cpufreq_gx_cpu_init,
443 .name = "gx-suspmod",
444 .owner = THIS_MODULE,
445};
446
447static int __init cpufreq_gx_init(void)
448{
449 int ret;
450 struct gxfreq_params *params;
451 struct pci_dev *gx_pci;
452 u32 class_rev;
453
454 /* Test if we have the right hardware */
455 if ((gx_pci = gx_detect_chipset()) == NULL)
456 return -ENODEV;
457
458 /* check whether module parameters are sane */
459 if (max_duration > 0xff)
460 max_duration = 0xff;
461
462 dprintk("geode suspend modulation available.\n");
463
464 params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
465 if (params == NULL)
466 return -ENOMEM;
467 memset(params, 0, sizeof(struct gxfreq_params));
468
469 params->cs55x0 = gx_pci;
470 gx_params = params;
471
472 /* keep cs55x0 configurations */
473 pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
474 pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
475 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
476 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
477 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
478 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev);
479 params->pci_rev = class_rev && 0xff;
480
481 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
482 kfree(params);
483 return ret; /* register error! */
484 }
485
486 return 0;
487}
488
489static void __exit cpufreq_gx_exit(void)
490{
491 cpufreq_unregister_driver(&gx_suspmod_driver);
492 pci_dev_put(gx_params->cs55x0);
493 kfree(gx_params);
494}
495
496MODULE_AUTHOR ("Hiroshi Miura <miura@da-cha.org>");
497MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
498MODULE_LICENSE ("GPL");
499
500module_init(cpufreq_gx_init);
501module_exit(cpufreq_gx_exit);
502
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
new file mode 100644
index 000000000000..ab0f9f5aac11
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -0,0 +1,658 @@
1/*
2 * (C) 2001-2004 Dave Jones. <davej@codemonkey.org.uk>
3 * (C) 2002 Padraig Brady. <padraig@antefacto.com>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon datasheets & sample CPUs kindly provided by VIA.
7 *
8 * VIA have currently 3 different versions of Longhaul.
9 * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
10 * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
11 * Version 2 of longhaul is the same as v1, but adds voltage scaling.
12 * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C)
13 * voltage scaling support has currently been disabled in this driver
14 * until we have code that gets it right.
15 * Version 3 of longhaul got renamed to Powersaver and redesigned
16 * to use the POWERSAVER MSR at 0x110a.
17 * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
18 * It's pretty much the same feature wise to longhaul v2, though
19 * there is provision for scaling FSB too, but this doesn't work
20 * too well in practice so we don't even try to use this.
21 *
22 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/init.h>
29#include <linux/cpufreq.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32
33#include <asm/msr.h>
34#include <asm/timex.h>
35#include <asm/io.h>
36
37#include "longhaul.h"
38
39#define PFX "longhaul: "
40
41#define TYPE_LONGHAUL_V1 1
42#define TYPE_LONGHAUL_V2 2
43#define TYPE_POWERSAVER 3
44
45#define CPU_SAMUEL 1
46#define CPU_SAMUEL2 2
47#define CPU_EZRA 3
48#define CPU_EZRA_T 4
49#define CPU_NEHEMIAH 5
50
51static int cpu_model;
52static unsigned int numscales=16, numvscales;
53static unsigned int fsb;
54static int minvid, maxvid;
55static unsigned int minmult, maxmult;
56static int can_scale_voltage;
57static int vrmrev;
58
59/* Module parameters */
60static int dont_scale_voltage;
61
62
63#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
64
65
66#define __hlt() __asm__ __volatile__("hlt": : :"memory")
67
68/* Clock ratios multiplied by 10 */
69static int clock_ratio[32];
70static int eblcr_table[32];
71static int voltage_table[32];
72static unsigned int highest_speed, lowest_speed; /* kHz */
73static int longhaul_version;
74static struct cpufreq_frequency_table *longhaul_table;
75
76#ifdef CONFIG_CPU_FREQ_DEBUG
77static char speedbuffer[8];
78
79static char *print_speed(int speed)
80{
81 if (speed > 1000) {
82 if (speed%1000 == 0)
83 sprintf (speedbuffer, "%dGHz", speed/1000);
84 else
85 sprintf (speedbuffer, "%d.%dGHz", speed/1000, (speed%1000)/100);
86 } else
87 sprintf (speedbuffer, "%dMHz", speed);
88
89 return speedbuffer;
90}
91#endif
92
93
94static unsigned int calc_speed(int mult)
95{
96 int khz;
97 khz = (mult/10)*fsb;
98 if (mult%10)
99 khz += fsb/2;
100 khz *= 1000;
101 return khz;
102}
103
104
105static int longhaul_get_cpu_mult(void)
106{
107 unsigned long invalue=0,lo, hi;
108
109 rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
110 invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22;
111 if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) {
112 if (lo & (1<<27))
113 invalue+=16;
114 }
115 return eblcr_table[invalue];
116}
117
118
119static void do_powersaver(union msr_longhaul *longhaul,
120 unsigned int clock_ratio_index)
121{
122 int version;
123
124 switch (cpu_model) {
125 case CPU_EZRA_T:
126 version = 3;
127 break;
128 case CPU_NEHEMIAH:
129 version = 0xf;
130 break;
131 default:
132 return;
133 }
134
135 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
136 longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
137 longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
138 longhaul->bits.EnableSoftBusRatio = 1;
139 longhaul->bits.RevisionKey = 0;
140 local_irq_disable();
141 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
142 local_irq_enable();
143 __hlt();
144
145 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
146 longhaul->bits.EnableSoftBusRatio = 0;
147 longhaul->bits.RevisionKey = version;
148 local_irq_disable();
149 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
150 local_irq_enable();
151}
152
153/**
154 * longhaul_set_cpu_frequency()
155 * @clock_ratio_index : bitpattern of the new multiplier.
156 *
157 * Sets a new clock ratio.
158 */
159
160static void longhaul_setstate(unsigned int clock_ratio_index)
161{
162 int speed, mult;
163 struct cpufreq_freqs freqs;
164 union msr_longhaul longhaul;
165 union msr_bcr2 bcr2;
166 static unsigned int old_ratio=-1;
167
168 if (old_ratio == clock_ratio_index)
169 return;
170 old_ratio = clock_ratio_index;
171
172 mult = clock_ratio[clock_ratio_index];
173 if (mult == -1)
174 return;
175
176 speed = calc_speed(mult);
177 if ((speed > highest_speed) || (speed < lowest_speed))
178 return;
179
180 freqs.old = calc_speed(longhaul_get_cpu_mult());
181 freqs.new = speed;
182 freqs.cpu = 0; /* longhaul.c is UP only driver */
183
184 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
185
186 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
187 fsb, mult/10, mult%10, print_speed(speed/1000));
188
189 switch (longhaul_version) {
190
191 /*
192 * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
193 * Software controlled multipliers only.
194 *
195 * *NB* Until we get voltage scaling working v1 & v2 are the same code.
196 * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
197 */
198 case TYPE_LONGHAUL_V1:
199 case TYPE_LONGHAUL_V2:
200 rdmsrl (MSR_VIA_BCR2, bcr2.val);
201 /* Enable software clock multiplier */
202 bcr2.bits.ESOFTBF = 1;
203 bcr2.bits.CLOCKMUL = clock_ratio_index;
204 local_irq_disable();
205 wrmsrl (MSR_VIA_BCR2, bcr2.val);
206 local_irq_enable();
207
208 __hlt();
209
210 /* Disable software clock multiplier */
211 rdmsrl (MSR_VIA_BCR2, bcr2.val);
212 bcr2.bits.ESOFTBF = 0;
213 local_irq_disable();
214 wrmsrl (MSR_VIA_BCR2, bcr2.val);
215 local_irq_enable();
216 break;
217
218 /*
219 * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
220 * We can scale voltage with this too, but that's currently
221 * disabled until we come up with a decent 'match freq to voltage'
222 * algorithm.
223 * When we add voltage scaling, we will also need to do the
224 * voltage/freq setting in order depending on the direction
225 * of scaling (like we do in powernow-k7.c)
226 * Nehemiah can do FSB scaling too, but this has never been proven
227 * to work in practice.
228 */
229 case TYPE_POWERSAVER:
230 do_powersaver(&longhaul, clock_ratio_index);
231 break;
232 }
233
234 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
235}
236
237/*
238 * Centaur decided to make life a little more tricky.
239 * Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
240 * Samuel2 and above have to try and guess what the FSB is.
241 * We do this by assuming we booted at maximum multiplier, and interpolate
242 * between that value multiplied by possible FSBs and cpu_mhz which
243 * was calculated at boot time. Really ugly, but no other way to do this.
244 */
245
246#define ROUNDING 0xf
247
248static int _guess(int guess)
249{
250 int target;
251
252 target = ((maxmult/10)*guess);
253 if (maxmult%10 != 0)
254 target += (guess/2);
255 target += ROUNDING/2;
256 target &= ~ROUNDING;
257 return target;
258}
259
260
261static int guess_fsb(void)
262{
263 int speed = (cpu_khz/1000);
264 int i;
265 int speeds[3] = { 66, 100, 133 };
266
267 speed += ROUNDING/2;
268 speed &= ~ROUNDING;
269
270 for (i=0; i<3; i++) {
271 if (_guess(speeds[i]) == speed)
272 return speeds[i];
273 }
274 return 0;
275}
276
277
278static int __init longhaul_get_ranges(void)
279{
280 unsigned long invalue;
281 unsigned int multipliers[32]= {
282 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
283 -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 };
284 unsigned int j, k = 0;
285 union msr_longhaul longhaul;
286 unsigned long lo, hi;
287 unsigned int eblcr_fsb_table_v1[] = { 66, 133, 100, -1 };
288 unsigned int eblcr_fsb_table_v2[] = { 133, 100, -1, 66 };
289
290 switch (longhaul_version) {
291 case TYPE_LONGHAUL_V1:
292 case TYPE_LONGHAUL_V2:
293 /* Ugh, Longhaul v1 didn't have the min/max MSRs.
294 Assume min=3.0x & max = whatever we booted at. */
295 minmult = 30;
296 maxmult = longhaul_get_cpu_mult();
297 rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
298 invalue = (lo & (1<<18|1<<19)) >>18;
299 if (cpu_model==CPU_SAMUEL || cpu_model==CPU_SAMUEL2)
300 fsb = eblcr_fsb_table_v1[invalue];
301 else
302 fsb = guess_fsb();
303 break;
304
305 case TYPE_POWERSAVER:
306 /* Ezra-T */
307 if (cpu_model==CPU_EZRA_T) {
308 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
309 invalue = longhaul.bits.MaxMHzBR;
310 if (longhaul.bits.MaxMHzBR4)
311 invalue += 16;
312 maxmult=multipliers[invalue];
313
314 invalue = longhaul.bits.MinMHzBR;
315 if (longhaul.bits.MinMHzBR4 == 1)
316 minmult = 30;
317 else
318 minmult = multipliers[invalue];
319 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
320 break;
321 }
322
323 /* Nehemiah */
324 if (cpu_model==CPU_NEHEMIAH) {
325 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
326
327 /*
328 * TODO: This code works, but raises a lot of questions.
329 * - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
330 * We get around this by using a hardcoded multiplier of 4.0x
331 * for the minimimum speed, and the speed we booted up at for the max.
332 * This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
333 * - According to some VIA documentation EBLCR is only
334 * in pre-Nehemiah C3s. How this still works is a mystery.
335 * We're possibly using something undocumented and unsupported,
336 * But it works, so we don't grumble.
337 */
338 minmult=40;
339 maxmult=longhaul_get_cpu_mult();
340
341 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
342 if ((cpu_khz/1000) > 1200)
343 fsb = 200;
344 else
345 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
346 break;
347 }
348 }
349
350 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
351 minmult/10, minmult%10, maxmult/10, maxmult%10);
352
353 if (fsb == -1) {
354 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n");
355 return -EINVAL;
356 }
357
358 highest_speed = calc_speed(maxmult);
359 lowest_speed = calc_speed(minmult);
360 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
361 print_speed(lowest_speed/1000),
362 print_speed(highest_speed/1000));
363
364 if (lowest_speed == highest_speed) {
365 printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n");
366 return -EINVAL;
367 }
368 if (lowest_speed > highest_speed) {
369 printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
370 lowest_speed, highest_speed);
371 return -EINVAL;
372 }
373
374 longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL);
375 if(!longhaul_table)
376 return -ENOMEM;
377
378 for (j=0; j < numscales; j++) {
379 unsigned int ratio;
380 ratio = clock_ratio[j];
381 if (ratio == -1)
382 continue;
383 if (ratio > maxmult || ratio < minmult)
384 continue;
385 longhaul_table[k].frequency = calc_speed(ratio);
386 longhaul_table[k].index = j;
387 k++;
388 }
389
390 longhaul_table[k].frequency = CPUFREQ_TABLE_END;
391 if (!k) {
392 kfree (longhaul_table);
393 return -EINVAL;
394 }
395
396 return 0;
397}
398
399
400static void __init longhaul_setup_voltagescaling(void)
401{
402 union msr_longhaul longhaul;
403
404 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
405
406 if (!(longhaul.bits.RevisionID & 1))
407 return;
408
409 minvid = longhaul.bits.MinimumVID;
410 maxvid = longhaul.bits.MaximumVID;
411 vrmrev = longhaul.bits.VRMRev;
412
413 if (minvid == 0 || maxvid == 0) {
414 printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
415 "Voltage scaling disabled.\n",
416 minvid/1000, minvid%1000, maxvid/1000, maxvid%1000);
417 return;
418 }
419
420 if (minvid == maxvid) {
421 printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are "
422 "both %d.%03d. Voltage scaling disabled\n",
423 maxvid/1000, maxvid%1000);
424 return;
425 }
426
427 if (vrmrev==0) {
428 dprintk ("VRM 8.5 \n");
429 memcpy (voltage_table, vrm85scales, sizeof(voltage_table));
430 numvscales = (voltage_table[maxvid]-voltage_table[minvid])/25;
431 } else {
432 dprintk ("Mobile VRM \n");
433 memcpy (voltage_table, mobilevrmscales, sizeof(voltage_table));
434 numvscales = (voltage_table[maxvid]-voltage_table[minvid])/5;
435 }
436
437 /* Current voltage isn't readable at first, so we need to
438 set it to a known value. The spec says to use maxvid */
439 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; /* FIXME: This is bad. */
440 longhaul.bits.EnableSoftVID = 1;
441 longhaul.bits.SoftVID = maxvid;
442 wrmsrl (MSR_VIA_LONGHAUL, longhaul.val);
443
444 minvid = voltage_table[minvid];
445 maxvid = voltage_table[maxvid];
446
447 dprintk ("Min VID=%d.%03d Max VID=%d.%03d, %d possible voltage scales\n",
448 maxvid/1000, maxvid%1000, minvid/1000, minvid%1000, numvscales);
449
450 can_scale_voltage = 1;
451}
452
453
454static int longhaul_verify(struct cpufreq_policy *policy)
455{
456 return cpufreq_frequency_table_verify(policy, longhaul_table);
457}
458
459
460static int longhaul_target(struct cpufreq_policy *policy,
461 unsigned int target_freq, unsigned int relation)
462{
463 unsigned int table_index = 0;
464 unsigned int new_clock_ratio = 0;
465
466 if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index))
467 return -EINVAL;
468
469 new_clock_ratio = longhaul_table[table_index].index & 0xFF;
470
471 longhaul_setstate(new_clock_ratio);
472
473 return 0;
474}
475
476
477static unsigned int longhaul_get(unsigned int cpu)
478{
479 if (cpu)
480 return 0;
481 return calc_speed(longhaul_get_cpu_mult());
482}
483
484
485static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
486{
487 struct cpuinfo_x86 *c = cpu_data;
488 char *cpuname=NULL;
489 int ret;
490
491 switch (c->x86_model) {
492 case 6:
493 cpu_model = CPU_SAMUEL;
494 cpuname = "C3 'Samuel' [C5A]";
495 longhaul_version = TYPE_LONGHAUL_V1;
496 memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
497 memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr));
498 break;
499
500 case 7:
501 longhaul_version = TYPE_LONGHAUL_V1;
502 switch (c->x86_mask) {
503 case 0:
504 cpu_model = CPU_SAMUEL2;
505 cpuname = "C3 'Samuel 2' [C5B]";
506 /* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */
507 memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio));
508 memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr));
509 break;
510 case 1 ... 15:
511 if (c->x86_mask < 8) {
512 cpu_model = CPU_SAMUEL2;
513 cpuname = "C3 'Samuel 2' [C5B]";
514 } else {
515 cpu_model = CPU_EZRA;
516 cpuname = "C3 'Ezra' [C5C]";
517 }
518 memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio));
519 memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr));
520 break;
521 }
522 break;
523
524 case 8:
525 cpu_model = CPU_EZRA_T;
526 cpuname = "C3 'Ezra-T' [C5M]";
527 longhaul_version = TYPE_POWERSAVER;
528 numscales=32;
529 memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio));
530 memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr));
531 break;
532
533 case 9:
534 cpu_model = CPU_NEHEMIAH;
535 longhaul_version = TYPE_POWERSAVER;
536 numscales=32;
537 switch (c->x86_mask) {
538 case 0 ... 1:
539 cpuname = "C3 'Nehemiah A' [C5N]";
540 memcpy (clock_ratio, nehemiah_a_clock_ratio, sizeof(nehemiah_a_clock_ratio));
541 memcpy (eblcr_table, nehemiah_a_eblcr, sizeof(nehemiah_a_eblcr));
542 break;
543 case 2 ... 4:
544 cpuname = "C3 'Nehemiah B' [C5N]";
545 memcpy (clock_ratio, nehemiah_b_clock_ratio, sizeof(nehemiah_b_clock_ratio));
546 memcpy (eblcr_table, nehemiah_b_eblcr, sizeof(nehemiah_b_eblcr));
547 break;
548 case 5 ... 15:
549 cpuname = "C3 'Nehemiah C' [C5N]";
550 memcpy (clock_ratio, nehemiah_c_clock_ratio, sizeof(nehemiah_c_clock_ratio));
551 memcpy (eblcr_table, nehemiah_c_eblcr, sizeof(nehemiah_c_eblcr));
552 break;
553 }
554 break;
555
556 default:
557 cpuname = "Unknown";
558 break;
559 }
560
561 printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
562 switch (longhaul_version) {
563 case TYPE_LONGHAUL_V1:
564 case TYPE_LONGHAUL_V2:
565 printk ("Longhaul v%d supported.\n", longhaul_version);
566 break;
567 case TYPE_POWERSAVER:
568 printk ("Powersaver supported.\n");
569 break;
570 };
571
572 ret = longhaul_get_ranges();
573 if (ret != 0)
574 return ret;
575
576 if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) &&
577 (dont_scale_voltage==0))
578 longhaul_setup_voltagescaling();
579
580 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
581 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
582 policy->cur = calc_speed(longhaul_get_cpu_mult());
583
584 ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
585 if (ret)
586 return ret;
587
588 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
589
590 return 0;
591}
592
593static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
594{
595 cpufreq_frequency_table_put_attr(policy->cpu);
596 return 0;
597}
598
599static struct freq_attr* longhaul_attr[] = {
600 &cpufreq_freq_attr_scaling_available_freqs,
601 NULL,
602};
603
604static struct cpufreq_driver longhaul_driver = {
605 .verify = longhaul_verify,
606 .target = longhaul_target,
607 .get = longhaul_get,
608 .init = longhaul_cpu_init,
609 .exit = __devexit_p(longhaul_cpu_exit),
610 .name = "longhaul",
611 .owner = THIS_MODULE,
612 .attr = longhaul_attr,
613};
614
615
616static int __init longhaul_init(void)
617{
618 struct cpuinfo_x86 *c = cpu_data;
619
620 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
621 return -ENODEV;
622
623 switch (c->x86_model) {
624 case 6 ... 9:
625 return cpufreq_register_driver(&longhaul_driver);
626 default:
627 printk (KERN_INFO PFX "Unknown VIA CPU. Contact davej@codemonkey.org.uk\n");
628 }
629
630 return -ENODEV;
631}
632
633
634static void __exit longhaul_exit(void)
635{
636 int i=0;
637
638 for (i=0; i < numscales; i++) {
639 if (clock_ratio[i] == maxmult) {
640 longhaul_setstate(i);
641 break;
642 }
643 }
644
645 cpufreq_unregister_driver(&longhaul_driver);
646 kfree(longhaul_table);
647}
648
649module_param (dont_scale_voltage, int, 0644);
650MODULE_PARM_DESC(dont_scale_voltage, "Don't scale voltage of processor");
651
652MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
653MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
654MODULE_LICENSE ("GPL");
655
656module_init(longhaul_init);
657module_exit(longhaul_exit);
658
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h
new file mode 100644
index 000000000000..2a495c162ec7
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h
@@ -0,0 +1,466 @@
1/*
2 * longhaul.h
3 * (C) 2003 Dave Jones.
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 *
7 * VIA-specific information
8 */
9
10union msr_bcr2 {
11 struct {
12 unsigned Reseved:19, // 18:0
13 ESOFTBF:1, // 19
14 Reserved2:3, // 22:20
15 CLOCKMUL:4, // 26:23
16 Reserved3:5; // 31:27
17 } bits;
18 unsigned long val;
19};
20
21union msr_longhaul {
22 struct {
23 unsigned RevisionID:4, // 3:0
24 RevisionKey:4, // 7:4
25 EnableSoftBusRatio:1, // 8
26 EnableSoftVID:1, // 9
27 EnableSoftBSEL:1, // 10
28 Reserved:3, // 11:13
29 SoftBusRatio4:1, // 14
30 VRMRev:1, // 15
31 SoftBusRatio:4, // 19:16
32 SoftVID:5, // 24:20
33 Reserved2:3, // 27:25
34 SoftBSEL:2, // 29:28
35 Reserved3:2, // 31:30
36 MaxMHzBR:4, // 35:32
37 MaximumVID:5, // 40:36
38 MaxMHzFSB:2, // 42:41
39 MaxMHzBR4:1, // 43
40 Reserved4:4, // 47:44
41 MinMHzBR:4, // 51:48
42 MinimumVID:5, // 56:52
43 MinMHzFSB:2, // 58:57
44 MinMHzBR4:1, // 59
45 Reserved5:4; // 63:60
46 } bits;
47 unsigned long long val;
48};
49
50/*
51 * Clock ratio tables. Div/Mod by 10 to get ratio.
52 * The eblcr ones specify the ratio read from the CPU.
53 * The clock_ratio ones specify what to write to the CPU.
54 */
55
56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */
59static int __initdata samuel1_clock_ratio[16] = {
60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */
63 -1, /* 0011 -> RESERVED */
64 -1, /* 0100 -> RESERVED */
65 35, /* 0101 -> 3.5x */
66 45, /* 0110 -> 4.5x */
67 55, /* 0111 -> 5.5x */
68 60, /* 1000 -> 6.0x */
69 70, /* 1001 -> 7.0x */
70 80, /* 1010 -> 8.0x */
71 50, /* 1011 -> 5.0x */
72 65, /* 1100 -> 6.5x */
73 75, /* 1101 -> 7.5x */
74 -1, /* 1110 -> RESERVED */
75 -1, /* 1111 -> RESERVED */
76};
77
78static int __initdata samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */
82 -1, /* 0011 -> RESERVED */
83 55, /* 0100 -> 5.5x */
84 35, /* 0101 -> 3.5x */
85 45, /* 0110 -> 4.5x */
86 -1, /* 0111 -> RESERVED */
87 -1, /* 1000 -> RESERVED */
88 70, /* 1001 -> 7.0x */
89 80, /* 1010 -> 8.0x */
90 60, /* 1011 -> 6.0x */
91 -1, /* 1100 -> RESERVED */
92 75, /* 1101 -> 7.5x */
93 -1, /* 1110 -> RESERVED */
94 65, /* 1111 -> 6.5x */
95};
96
97/*
98 * VIA C3 Samuel2 Stepping 1->15
99 */
100static int __initdata samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */
104 100, /* 0011 -> 10.0x */
105 55, /* 0100 -> 5.5x */
106 35, /* 0101 -> 3.5x */
107 45, /* 0110 -> 4.5x */
108 110, /* 0111 -> 11.0x */
109 90, /* 1000 -> 9.0x */
110 70, /* 1001 -> 7.0x */
111 80, /* 1010 -> 8.0x */
112 60, /* 1011 -> 6.0x */
113 120, /* 1100 -> 12.0x */
114 75, /* 1101 -> 7.5x */
115 130, /* 1110 -> 13.0x */
116 65, /* 1111 -> 6.5x */
117};
118
119/*
120 * VIA C3 Ezra
121 */
122static int __initdata ezra_clock_ratio[16] = {
123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */
126 90, /* 0011 -> 9.0x */
127 95, /* 0100 -> 9.5x */
128 35, /* 0101 -> 3.5x */
129 45, /* 0110 -> 4.5x */
130 55, /* 0111 -> 5.5x */
131 60, /* 1000 -> 6.0x */
132 70, /* 1001 -> 7.0x */
133 80, /* 1010 -> 8.0x */
134 50, /* 1011 -> 5.0x */
135 65, /* 1100 -> 6.5x */
136 75, /* 1101 -> 7.5x */
137 85, /* 1110 -> 8.5x */
138 120, /* 1111 -> 12.0x */
139};
140
141static int __initdata ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */
145 100, /* 0011 -> 10.0x */
146 55, /* 0100 -> 5.5x */
147 35, /* 0101 -> 3.5x */
148 45, /* 0110 -> 4.5x */
149 95, /* 0111 -> 9.5x */
150 90, /* 1000 -> 9.0x */
151 70, /* 1001 -> 7.0x */
152 80, /* 1010 -> 8.0x */
153 60, /* 1011 -> 6.0x */
154 120, /* 1100 -> 12.0x */
155 75, /* 1101 -> 7.5x */
156 85, /* 1110 -> 8.5x */
157 65, /* 1111 -> 6.5x */
158};
159
160/*
161 * VIA C3 (Ezra-T) [C5M].
162 */
163static int __initdata ezrat_clock_ratio[32] = {
164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */
167 90, /* 0011 -> 9.0x */
168 95, /* 0100 -> 9.5x */
169 35, /* 0101 -> 3.5x */
170 45, /* 0110 -> 4.5x */
171 55, /* 0111 -> 5.5x */
172 60, /* 1000 -> 6.0x */
173 70, /* 1001 -> 7.0x */
174 80, /* 1010 -> 8.0x */
175 50, /* 1011 -> 5.0x */
176 65, /* 1100 -> 6.5x */
177 75, /* 1101 -> 7.5x */
178 85, /* 1110 -> 8.5x */
179 120, /* 1111 -> 12.0x */
180
181 -1, /* 0000 -> RESERVED (10.0x) */
182 110, /* 0001 -> 11.0x */
183 120, /* 0010 -> 12.0x */
184 -1, /* 0011 -> RESERVED (9.0x)*/
185 105, /* 0100 -> 10.5x */
186 115, /* 0101 -> 11.5x */
187 125, /* 0110 -> 12.5x */
188 135, /* 0111 -> 13.5x */
189 140, /* 1000 -> 14.0x */
190 150, /* 1001 -> 15.0x */
191 160, /* 1010 -> 16.0x */
192 130, /* 1011 -> 13.0x */
193 145, /* 1100 -> 14.5x */
194 155, /* 1101 -> 15.5x */
195 -1, /* 1110 -> RESERVED (13.0x) */
196 -1, /* 1111 -> RESERVED (12.0x) */
197};
198
199static int __initdata ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */
203 100, /* 0011 -> 10.0x */
204 55, /* 0100 -> 5.5x */
205 35, /* 0101 -> 3.5x */
206 45, /* 0110 -> 4.5x */
207 95, /* 0111 -> 9.5x */
208 90, /* 1000 -> 9.0x */
209 70, /* 1001 -> 7.0x */
210 80, /* 1010 -> 8.0x */
211 60, /* 1011 -> 6.0x */
212 120, /* 1100 -> 12.0x */
213 75, /* 1101 -> 7.5x */
214 85, /* 1110 -> 8.5x */
215 65, /* 1111 -> 6.5x */
216
217 -1, /* 0000 -> RESERVED (9.0x) */
218 110, /* 0001 -> 11.0x */
219 120, /* 0010 -> 12.0x */
220 -1, /* 0011 -> RESERVED (10.0x)*/
221 135, /* 0100 -> 13.5x */
222 115, /* 0101 -> 11.5x */
223 125, /* 0110 -> 12.5x */
224 105, /* 0111 -> 10.5x */
225 130, /* 1000 -> 13.0x */
226 150, /* 1001 -> 15.0x */
227 160, /* 1010 -> 16.0x */
228 140, /* 1011 -> 14.0x */
229 -1, /* 1100 -> RESERVED (12.0x) */
230 155, /* 1101 -> 15.5x */
231 -1, /* 1110 -> RESERVED (13.0x) */
232 145, /* 1111 -> 14.5x */
233};
234
235/*
236 * VIA C3 Nehemiah */
237
238static int __initdata nehemiah_a_clock_ratio[32] = {
239 100, /* 0000 -> 10.0x */
240 160, /* 0001 -> 16.0x */
241 -1, /* 0010 -> RESERVED */
242 90, /* 0011 -> 9.0x */
243 95, /* 0100 -> 9.5x */
244 -1, /* 0101 -> RESERVED */
245 -1, /* 0110 -> RESERVED */
246 55, /* 0111 -> 5.5x */
247 60, /* 1000 -> 6.0x */
248 70, /* 1001 -> 7.0x */
249 80, /* 1010 -> 8.0x */
250 50, /* 1011 -> 5.0x */
251 65, /* 1100 -> 6.5x */
252 75, /* 1101 -> 7.5x */
253 85, /* 1110 -> 8.5x */
254 120, /* 1111 -> 12.0x */
255 100, /* 0000 -> 10.0x */
256 -1, /* 0001 -> RESERVED */
257 120, /* 0010 -> 12.0x */
258 90, /* 0011 -> 9.0x */
259 105, /* 0100 -> 10.5x */
260 115, /* 0101 -> 11.5x */
261 125, /* 0110 -> 12.5x */
262 135, /* 0111 -> 13.5x */
263 140, /* 1000 -> 14.0x */
264 150, /* 1001 -> 15.0x */
265 160, /* 1010 -> 16.0x */
266 130, /* 1011 -> 13.0x */
267 145, /* 1100 -> 14.5x */
268 155, /* 1101 -> 15.5x */
269 -1, /* 1110 -> RESERVED (13.0x) */
270 120, /* 1111 -> 12.0x */
271};
272
273static int __initdata nehemiah_b_clock_ratio[32] = {
274 100, /* 0000 -> 10.0x */
275 160, /* 0001 -> 16.0x */
276 -1, /* 0010 -> RESERVED */
277 90, /* 0011 -> 9.0x */
278 95, /* 0100 -> 9.5x */
279 -1, /* 0101 -> RESERVED */
280 -1, /* 0110 -> RESERVED */
281 55, /* 0111 -> 5.5x */
282 60, /* 1000 -> 6.0x */
283 70, /* 1001 -> 7.0x */
284 80, /* 1010 -> 8.0x */
285 50, /* 1011 -> 5.0x */
286 65, /* 1100 -> 6.5x */
287 75, /* 1101 -> 7.5x */
288 85, /* 1110 -> 8.5x */
289 120, /* 1111 -> 12.0x */
290 100, /* 0000 -> 10.0x */
291 110, /* 0001 -> 11.0x */
292 120, /* 0010 -> 12.0x */
293 90, /* 0011 -> 9.0x */
294 105, /* 0100 -> 10.5x */
295 115, /* 0101 -> 11.5x */
296 125, /* 0110 -> 12.5x */
297 135, /* 0111 -> 13.5x */
298 140, /* 1000 -> 14.0x */
299 150, /* 1001 -> 15.0x */
300 160, /* 1010 -> 16.0x */
301 130, /* 1011 -> 13.0x */
302 145, /* 1100 -> 14.5x */
303 155, /* 1101 -> 15.5x */
304 -1, /* 1110 -> RESERVED (13.0x) */
305 120, /* 1111 -> 12.0x */
306};
307
308static int __initdata nehemiah_c_clock_ratio[32] = {
309 100, /* 0000 -> 10.0x */
310 160, /* 0001 -> 16.0x */
311 40, /* 0010 -> RESERVED */
312 90, /* 0011 -> 9.0x */
313 95, /* 0100 -> 9.5x */
314 -1, /* 0101 -> RESERVED */
315 45, /* 0110 -> RESERVED */
316 55, /* 0111 -> 5.5x */
317 60, /* 1000 -> 6.0x */
318 70, /* 1001 -> 7.0x */
319 80, /* 1010 -> 8.0x */
320 50, /* 1011 -> 5.0x */
321 65, /* 1100 -> 6.5x */
322 75, /* 1101 -> 7.5x */
323 85, /* 1110 -> 8.5x */
324 120, /* 1111 -> 12.0x */
325 100, /* 0000 -> 10.0x */
326 110, /* 0001 -> 11.0x */
327 120, /* 0010 -> 12.0x */
328 90, /* 0011 -> 9.0x */
329 105, /* 0100 -> 10.5x */
330 115, /* 0101 -> 11.5x */
331 125, /* 0110 -> 12.5x */
332 135, /* 0111 -> 13.5x */
333 140, /* 1000 -> 14.0x */
334 150, /* 1001 -> 15.0x */
335 160, /* 1010 -> 16.0x */
336 130, /* 1011 -> 13.0x */
337 145, /* 1100 -> 14.5x */
338 155, /* 1101 -> 15.5x */
339 -1, /* 1110 -> RESERVED (13.0x) */
340 120, /* 1111 -> 12.0x */
341};
342
343static int __initdata nehemiah_a_eblcr[32] = {
344 50, /* 0000 -> 5.0x */
345 160, /* 0001 -> 16.0x */
346 -1, /* 0010 -> RESERVED */
347 100, /* 0011 -> 10.0x */
348 55, /* 0100 -> 5.5x */
349 -1, /* 0101 -> RESERVED */
350 -1, /* 0110 -> RESERVED */
351 95, /* 0111 -> 9.5x */
352 90, /* 1000 -> 9.0x */
353 70, /* 1001 -> 7.0x */
354 80, /* 1010 -> 8.0x */
355 60, /* 1011 -> 6.0x */
356 120, /* 1100 -> 12.0x */
357 75, /* 1101 -> 7.5x */
358 85, /* 1110 -> 8.5x */
359 65, /* 1111 -> 6.5x */
360 90, /* 0000 -> 9.0x */
361 -1, /* 0001 -> RESERVED */
362 120, /* 0010 -> 12.0x */
363 100, /* 0011 -> 10.0x */
364 135, /* 0100 -> 13.5x */
365 115, /* 0101 -> 11.5x */
366 125, /* 0110 -> 12.5x */
367 105, /* 0111 -> 10.5x */
368 130, /* 1000 -> 13.0x */
369 150, /* 1001 -> 15.0x */
370 160, /* 1010 -> 16.0x */
371 140, /* 1011 -> 14.0x */
372 120, /* 1100 -> 12.0x */
373 155, /* 1101 -> 15.5x */
374 -1, /* 1110 -> RESERVED (13.0x) */
375 145 /* 1111 -> 14.5x */
376 /* end of table */
377};
378static int __initdata nehemiah_b_eblcr[32] = {
379 50, /* 0000 -> 5.0x */
380 160, /* 0001 -> 16.0x */
381 -1, /* 0010 -> RESERVED */
382 100, /* 0011 -> 10.0x */
383 55, /* 0100 -> 5.5x */
384 -1, /* 0101 -> RESERVED */
385 -1, /* 0110 -> RESERVED */
386 95, /* 0111 -> 9.5x */
387 90, /* 1000 -> 9.0x */
388 70, /* 1001 -> 7.0x */
389 80, /* 1010 -> 8.0x */
390 60, /* 1011 -> 6.0x */
391 120, /* 1100 -> 12.0x */
392 75, /* 1101 -> 7.5x */
393 85, /* 1110 -> 8.5x */
394 65, /* 1111 -> 6.5x */
395 90, /* 0000 -> 9.0x */
396 110, /* 0001 -> 11.0x */
397 120, /* 0010 -> 12.0x */
398 100, /* 0011 -> 10.0x */
399 135, /* 0100 -> 13.5x */
400 115, /* 0101 -> 11.5x */
401 125, /* 0110 -> 12.5x */
402 105, /* 0111 -> 10.5x */
403 130, /* 1000 -> 13.0x */
404 150, /* 1001 -> 15.0x */
405 160, /* 1010 -> 16.0x */
406 140, /* 1011 -> 14.0x */
407 120, /* 1100 -> 12.0x */
408 155, /* 1101 -> 15.5x */
409 -1, /* 1110 -> RESERVED (13.0x) */
410 145 /* 1111 -> 14.5x */
411 /* end of table */
412};
413static int __initdata nehemiah_c_eblcr[32] = {
414 50, /* 0000 -> 5.0x */
415 160, /* 0001 -> 16.0x */
416 40, /* 0010 -> RESERVED */
417 100, /* 0011 -> 10.0x */
418 55, /* 0100 -> 5.5x */
419 -1, /* 0101 -> RESERVED */
420 45, /* 0110 -> RESERVED */
421 95, /* 0111 -> 9.5x */
422 90, /* 1000 -> 9.0x */
423 70, /* 1001 -> 7.0x */
424 80, /* 1010 -> 8.0x */
425 60, /* 1011 -> 6.0x */
426 120, /* 1100 -> 12.0x */
427 75, /* 1101 -> 7.5x */
428 85, /* 1110 -> 8.5x */
429 65, /* 1111 -> 6.5x */
430 90, /* 0000 -> 9.0x */
431 110, /* 0001 -> 11.0x */
432 120, /* 0010 -> 12.0x */
433 100, /* 0011 -> 10.0x */
434 135, /* 0100 -> 13.5x */
435 115, /* 0101 -> 11.5x */
436 125, /* 0110 -> 12.5x */
437 105, /* 0111 -> 10.5x */
438 130, /* 1000 -> 13.0x */
439 150, /* 1001 -> 15.0x */
440 160, /* 1010 -> 16.0x */
441 140, /* 1011 -> 14.0x */
442 120, /* 1100 -> 12.0x */
443 155, /* 1101 -> 15.5x */
444 -1, /* 1110 -> RESERVED (13.0x) */
445 145 /* 1111 -> 14.5x */
446 /* end of table */
447};
448
449/*
450 * Voltage scales. Div/Mod by 1000 to get actual voltage.
451 * Which scale to use depends on the VRM type in use.
452 */
453static int __initdata vrm85scales[32] = {
454 1250, 1200, 1150, 1100, 1050, 1800, 1750, 1700,
455 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300,
456 1275, 1225, 1175, 1125, 1075, 1825, 1775, 1725,
457 1675, 1625, 1575, 1525, 1475, 1425, 1375, 1325,
458};
459
460static int __initdata mobilevrmscales[32] = {
461 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
462 1600, 1550, 1500, 1450, 1500, 1350, 1300, -1,
463 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
464 1075, 1050, 1025, 1000, 975, 950, 925, -1,
465};
466
diff --git a/arch/i386/kernel/cpu/cpufreq/longrun.c b/arch/i386/kernel/cpu/cpufreq/longrun.c
new file mode 100644
index 000000000000..e3868de4dc2e
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/longrun.c
@@ -0,0 +1,326 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/cpufreq.h>
14
15#include <asm/msr.h>
16#include <asm/processor.h>
17#include <asm/timex.h>
18
19#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg)
20
21static struct cpufreq_driver longrun_driver;
22
23/**
24 * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
25 * values into per cent values. In TMTA microcode, the following is valid:
26 * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
27 */
28static unsigned int longrun_low_freq, longrun_high_freq;
29
30
31/**
32 * longrun_get_policy - get the current LongRun policy
33 * @policy: struct cpufreq_policy where current policy is written into
34 *
35 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
36 * and MSR_TMTA_LONGRUN_CTRL
37 */
38static void __init longrun_get_policy(struct cpufreq_policy *policy)
39{
40 u32 msr_lo, msr_hi;
41
42 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
43 dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi);
44 if (msr_lo & 0x01)
45 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
46 else
47 policy->policy = CPUFREQ_POLICY_POWERSAVE;
48
49 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
50 dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
51 msr_lo &= 0x0000007F;
52 msr_hi &= 0x0000007F;
53
54 if ( longrun_high_freq <= longrun_low_freq ) {
55 /* Assume degenerate Longrun table */
56 policy->min = policy->max = longrun_high_freq;
57 } else {
58 policy->min = longrun_low_freq + msr_lo *
59 ((longrun_high_freq - longrun_low_freq) / 100);
60 policy->max = longrun_low_freq + msr_hi *
61 ((longrun_high_freq - longrun_low_freq) / 100);
62 }
63 policy->cpu = 0;
64}
65
66
67/**
68 * longrun_set_policy - sets a new CPUFreq policy
69 * @policy: new policy
70 *
71 * Sets a new CPUFreq policy on LongRun-capable processors. This function
72 * has to be called with cpufreq_driver locked.
73 */
74static int longrun_set_policy(struct cpufreq_policy *policy)
75{
76 u32 msr_lo, msr_hi;
77 u32 pctg_lo, pctg_hi;
78
79 if (!policy)
80 return -EINVAL;
81
82 if ( longrun_high_freq <= longrun_low_freq ) {
83 /* Assume degenerate Longrun table */
84 pctg_lo = pctg_hi = 100;
85 } else {
86 pctg_lo = (policy->min - longrun_low_freq) /
87 ((longrun_high_freq - longrun_low_freq) / 100);
88 pctg_hi = (policy->max - longrun_low_freq) /
89 ((longrun_high_freq - longrun_low_freq) / 100);
90 }
91
92 if (pctg_hi > 100)
93 pctg_hi = 100;
94 if (pctg_lo > pctg_hi)
95 pctg_lo = pctg_hi;
96
97 /* performance or economy mode */
98 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
99 msr_lo &= 0xFFFFFFFE;
100 switch (policy->policy) {
101 case CPUFREQ_POLICY_PERFORMANCE:
102 msr_lo |= 0x00000001;
103 break;
104 case CPUFREQ_POLICY_POWERSAVE:
105 break;
106 }
107 wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
108
109 /* lower and upper boundary */
110 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
111 msr_lo &= 0xFFFFFF80;
112 msr_hi &= 0xFFFFFF80;
113 msr_lo |= pctg_lo;
114 msr_hi |= pctg_hi;
115 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
116
117 return 0;
118}
119
120
121/**
122 * longrun_verify_poliy - verifies a new CPUFreq policy
123 * @policy: the policy to verify
124 *
125 * Validates a new CPUFreq policy. This function has to be called with
126 * cpufreq_driver locked.
127 */
128static int longrun_verify_policy(struct cpufreq_policy *policy)
129{
130 if (!policy)
131 return -EINVAL;
132
133 policy->cpu = 0;
134 cpufreq_verify_within_limits(policy,
135 policy->cpuinfo.min_freq,
136 policy->cpuinfo.max_freq);
137
138 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
139 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
140 return -EINVAL;
141
142 return 0;
143}
144
145static unsigned int longrun_get(unsigned int cpu)
146{
147 u32 eax, ebx, ecx, edx;
148
149 if (cpu)
150 return 0;
151
152 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
153 dprintk("cpuid eax is %u\n", eax);
154
155 return (eax * 1000);
156}
157
158/**
159 * longrun_determine_freqs - determines the lowest and highest possible core frequency
160 * @low_freq: an int to put the lowest frequency into
161 * @high_freq: an int to put the highest frequency into
162 *
163 * Determines the lowest and highest possible core frequencies on this CPU.
164 * This is necessary to calculate the performance percentage according to
165 * TMTA rules:
166 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
167 */
168static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
169 unsigned int *high_freq)
170{
171 u32 msr_lo, msr_hi;
172 u32 save_lo, save_hi;
173 u32 eax, ebx, ecx, edx;
174 u32 try_hi;
175 struct cpuinfo_x86 *c = cpu_data;
176
177 if (!low_freq || !high_freq)
178 return -EINVAL;
179
180 if (cpu_has(c, X86_FEATURE_LRTI)) {
181 /* if the LongRun Table Interface is present, the
182 * detection is a bit easier:
183 * For minimum frequency, read out the maximum
184 * level (msr_hi), write that into "currently
185 * selected level", and read out the frequency.
186 * For maximum frequency, read out level zero.
187 */
188 /* minimum */
189 rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
190 wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
191 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
192 *low_freq = msr_lo * 1000; /* to kHz */
193
194 /* maximum */
195 wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
196 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
197 *high_freq = msr_lo * 1000; /* to kHz */
198
199 dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq);
200
201 if (*low_freq > *high_freq)
202 *low_freq = *high_freq;
203 return 0;
204 }
205
206 /* set the upper border to the value determined during TSC init */
207 *high_freq = (cpu_khz / 1000);
208 *high_freq = *high_freq * 1000;
209 dprintk("high frequency is %u kHz\n", *high_freq);
210
211 /* get current borders */
212 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
213 save_lo = msr_lo & 0x0000007F;
214 save_hi = msr_hi & 0x0000007F;
215
216 /* if current perf_pctg is larger than 90%, we need to decrease the
217 * upper limit to make the calculation more accurate.
218 */
219 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
220 /* try decreasing in 10% steps, some processors react only
221 * on some barrier values */
222 for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) {
223 /* set to 0 to try_hi perf_pctg */
224 msr_lo &= 0xFFFFFF80;
225 msr_hi &= 0xFFFFFF80;
226 msr_lo |= 0;
227 msr_hi |= try_hi;
228 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
229
230 /* read out current core MHz and current perf_pctg */
231 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
232
233 /* restore values */
234 wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
235 }
236 dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax);
237
238 /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
239 * eqals
240 * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
241 *
242 * high_freq * perf_pctg is stored tempoarily into "ebx".
243 */
244 ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
245
246 if ((ecx > 95) || (ecx == 0) || (eax < ebx))
247 return -EIO;
248
249 edx = (eax - ebx) / (100 - ecx);
250 *low_freq = edx * 1000; /* back to kHz */
251
252 dprintk("low frequency is %u kHz\n", *low_freq);
253
254 if (*low_freq > *high_freq)
255 *low_freq = *high_freq;
256
257 return 0;
258}
259
260
261static int __init longrun_cpu_init(struct cpufreq_policy *policy)
262{
263 int result = 0;
264
265 /* capability check */
266 if (policy->cpu != 0)
267 return -ENODEV;
268
269 /* detect low and high frequency */
270 result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
271 if (result)
272 return result;
273
274 /* cpuinfo and default policy values */
275 policy->cpuinfo.min_freq = longrun_low_freq;
276 policy->cpuinfo.max_freq = longrun_high_freq;
277 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
278 longrun_get_policy(policy);
279
280 return 0;
281}
282
283
284static struct cpufreq_driver longrun_driver = {
285 .flags = CPUFREQ_CONST_LOOPS,
286 .verify = longrun_verify_policy,
287 .setpolicy = longrun_set_policy,
288 .get = longrun_get,
289 .init = longrun_cpu_init,
290 .name = "longrun",
291 .owner = THIS_MODULE,
292};
293
294
295/**
296 * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
297 *
298 * Initializes the LongRun support.
299 */
300static int __init longrun_init(void)
301{
302 struct cpuinfo_x86 *c = cpu_data;
303
304 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
305 !cpu_has(c, X86_FEATURE_LONGRUN))
306 return -ENODEV;
307
308 return cpufreq_register_driver(&longrun_driver);
309}
310
311
312/**
313 * longrun_exit - unregisters LongRun support
314 */
315static void __exit longrun_exit(void)
316{
317 cpufreq_unregister_driver(&longrun_driver);
318}
319
320
321MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
322MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe and Efficeon processors.");
323MODULE_LICENSE ("GPL");
324
325module_init(longrun_init);
326module_exit(longrun_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
new file mode 100644
index 000000000000..aa622d52c6e5
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -0,0 +1,337 @@
1/*
2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
6 * (C) 2002 Tora T. Engstad
7 * All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties.
17 *
18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/smp.h>
28#include <linux/cpufreq.h>
29#include <linux/slab.h>
30#include <linux/cpumask.h>
31
32#include <asm/processor.h>
33#include <asm/msr.h>
34#include <asm/timex.h>
35
36#include "speedstep-lib.h"
37
38#define PFX "p4-clockmod: "
39#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg)
40
41/*
42 * Duty Cycle (3bits), note DC_DISABLE is not specified in
43 * intel docs i just use it to mean disable
44 */
45enum {
46 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
47 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
48};
49
50#define DC_ENTRIES 8
51
52
53static int has_N44_O17_errata[NR_CPUS];
54static unsigned int stock_freq;
55static struct cpufreq_driver p4clockmod_driver;
56static unsigned int cpufreq_p4_get(unsigned int cpu);
57
58static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
59{
60 u32 l, h;
61
62 if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
63 return -EINVAL;
64
65 rdmsr(MSR_IA32_THERM_STATUS, l, h);
66
67 if (l & 0x01)
68 dprintk("CPU#%d currently thermal throttled\n", cpu);
69
70 if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
71 newstate = DC_38PT;
72
73 rdmsr(MSR_IA32_THERM_CONTROL, l, h);
74 if (newstate == DC_DISABLE) {
75 dprintk("CPU#%d disabling modulation\n", cpu);
76 wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
77 } else {
78 dprintk("CPU#%d setting duty cycle to %d%%\n",
79 cpu, ((125 * newstate) / 10));
80 /* bits 63 - 5 : reserved
81 * bit 4 : enable/disable
82 * bits 3-1 : duty cycle
83 * bit 0 : reserved
84 */
85 l = (l & ~14);
86 l = l | (1<<4) | ((newstate & 0x7)<<1);
87 wrmsr(MSR_IA32_THERM_CONTROL, l, h);
88 }
89
90 return 0;
91}
92
93
94static struct cpufreq_frequency_table p4clockmod_table[] = {
95 {DC_RESV, CPUFREQ_ENTRY_INVALID},
96 {DC_DFLT, 0},
97 {DC_25PT, 0},
98 {DC_38PT, 0},
99 {DC_50PT, 0},
100 {DC_64PT, 0},
101 {DC_75PT, 0},
102 {DC_88PT, 0},
103 {DC_DISABLE, 0},
104 {DC_RESV, CPUFREQ_TABLE_END},
105};
106
107
108static int cpufreq_p4_target(struct cpufreq_policy *policy,
109 unsigned int target_freq,
110 unsigned int relation)
111{
112 unsigned int newstate = DC_RESV;
113 struct cpufreq_freqs freqs;
114 cpumask_t cpus_allowed;
115 int i;
116
117 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
118 return -EINVAL;
119
120 freqs.old = cpufreq_p4_get(policy->cpu);
121 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
122
123 if (freqs.new == freqs.old)
124 return 0;
125
126 /* notifiers */
127 for_each_cpu_mask(i, policy->cpus) {
128 freqs.cpu = i;
129 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
130 }
131
132 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
133 * Developer's Manual, Volume 3
134 */
135 cpus_allowed = current->cpus_allowed;
136
137 for_each_cpu_mask(i, policy->cpus) {
138 cpumask_t this_cpu = cpumask_of_cpu(i);
139
140 set_cpus_allowed(current, this_cpu);
141 BUG_ON(smp_processor_id() != i);
142
143 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
144 }
145 set_cpus_allowed(current, cpus_allowed);
146
147 /* notifiers */
148 for_each_cpu_mask(i, policy->cpus) {
149 freqs.cpu = i;
150 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
151 }
152
153 return 0;
154}
155
156
157static int cpufreq_p4_verify(struct cpufreq_policy *policy)
158{
159 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
160}
161
162
163static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
164{
165 if ((c->x86 == 0x06) && (c->x86_model == 0x09)) {
166 /* Pentium M (Banias) */
167 printk(KERN_WARNING PFX "Warning: Pentium M detected. "
168 "The speedstep_centrino module offers voltage scaling"
169 " in addition of frequency scaling. You should use "
170 "that instead of p4-clockmod, if possible.\n");
171 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
172 }
173
174 if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) {
175 /* Pentium M (Dothan) */
176 printk(KERN_WARNING PFX "Warning: Pentium M detected. "
177 "The speedstep_centrino module offers voltage scaling"
178 " in addition of frequency scaling. You should use "
179 "that instead of p4-clockmod, if possible.\n");
180 /* on P-4s, the TSC runs with constant frequency independent whether
181 * throttling is active or not. */
182 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
183 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
184 }
185
186 if (c->x86 != 0xF) {
187 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n");
188 return 0;
189 }
190
191 /* on P-4s, the TSC runs with constant frequency independent whether
192 * throttling is active or not. */
193 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
194
195 if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) {
196 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
197 "The speedstep-ich or acpi cpufreq modules offer "
198 "voltage scaling in addition of frequency scaling. "
199 "You should use either one instead of p4-clockmod, "
200 "if possible.\n");
201 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M);
202 }
203
204 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
205}
206
207
208
209static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
210{
211 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
212 int cpuid = 0;
213 unsigned int i;
214
215#ifdef CONFIG_SMP
216 policy->cpus = cpu_sibling_map[policy->cpu];
217#endif
218
219 /* Errata workaround */
220 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
221 switch (cpuid) {
222 case 0x0f07:
223 case 0x0f0a:
224 case 0x0f11:
225 case 0x0f12:
226 has_N44_O17_errata[policy->cpu] = 1;
227 dprintk("has errata -- disabling low frequencies\n");
228 }
229
230 /* get max frequency */
231 stock_freq = cpufreq_p4_get_frequency(c);
232 if (!stock_freq)
233 return -EINVAL;
234
235 /* table init */
236 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
237 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
238 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
239 else
240 p4clockmod_table[i].frequency = (stock_freq * i)/8;
241 }
242 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
243
244 /* cpuinfo and default policy values */
245 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
246 policy->cpuinfo.transition_latency = 1000000; /* assumed */
247 policy->cur = stock_freq;
248
249 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
250}
251
252
253static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
254{
255 cpufreq_frequency_table_put_attr(policy->cpu);
256 return 0;
257}
258
259static unsigned int cpufreq_p4_get(unsigned int cpu)
260{
261 cpumask_t cpus_allowed;
262 u32 l, h;
263
264 cpus_allowed = current->cpus_allowed;
265
266 set_cpus_allowed(current, cpumask_of_cpu(cpu));
267 BUG_ON(smp_processor_id() != cpu);
268
269 rdmsr(MSR_IA32_THERM_CONTROL, l, h);
270
271 set_cpus_allowed(current, cpus_allowed);
272
273 if (l & 0x10) {
274 l = l >> 1;
275 l &= 0x7;
276 } else
277 l = DC_DISABLE;
278
279 if (l != DC_DISABLE)
280 return (stock_freq * l / 8);
281
282 return stock_freq;
283}
284
285static struct freq_attr* p4clockmod_attr[] = {
286 &cpufreq_freq_attr_scaling_available_freqs,
287 NULL,
288};
289
290static struct cpufreq_driver p4clockmod_driver = {
291 .verify = cpufreq_p4_verify,
292 .target = cpufreq_p4_target,
293 .init = cpufreq_p4_cpu_init,
294 .exit = cpufreq_p4_cpu_exit,
295 .get = cpufreq_p4_get,
296 .name = "p4-clockmod",
297 .owner = THIS_MODULE,
298 .attr = p4clockmod_attr,
299};
300
301
302static int __init cpufreq_p4_init(void)
303{
304 struct cpuinfo_x86 *c = cpu_data;
305 int ret;
306
307 /*
308 * THERM_CONTROL is architectural for IA32 now, so
309 * we can rely on the capability checks
310 */
311 if (c->x86_vendor != X86_VENDOR_INTEL)
312 return -ENODEV;
313
314 if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) ||
315 !test_bit(X86_FEATURE_ACC, c->x86_capability))
316 return -ENODEV;
317
318 ret = cpufreq_register_driver(&p4clockmod_driver);
319 if (!ret)
320 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
321
322 return (ret);
323}
324
325
326static void __exit cpufreq_p4_exit(void)
327{
328 cpufreq_unregister_driver(&p4clockmod_driver);
329}
330
331
332MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>");
333MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
334MODULE_LICENSE ("GPL");
335
336late_initcall(cpufreq_p4_init);
337module_exit(cpufreq_p4_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
new file mode 100644
index 000000000000..222f8cfe3c57
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
@@ -0,0 +1,256 @@
1/*
2 * This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
3 * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski.
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 *
7 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/cpufreq.h>
14#include <linux/ioport.h>
15#include <linux/slab.h>
16
17#include <asm/msr.h>
18#include <asm/timex.h>
19#include <asm/io.h>
20
21
22#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
23 as it is unused */
24
25static unsigned int busfreq; /* FSB, in 10 kHz */
26static unsigned int max_multiplier;
27
28
29/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
30static struct cpufreq_frequency_table clock_ratio[] = {
31 {45, /* 000 -> 4.5x */ 0},
32 {50, /* 001 -> 5.0x */ 0},
33 {40, /* 010 -> 4.0x */ 0},
34 {55, /* 011 -> 5.5x */ 0},
35 {20, /* 100 -> 2.0x */ 0},
36 {30, /* 101 -> 3.0x */ 0},
37 {60, /* 110 -> 6.0x */ 0},
38 {35, /* 111 -> 3.5x */ 0},
39 {0, CPUFREQ_TABLE_END}
40};
41
42
43/**
44 * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
45 *
46 * Returns the current setting of the frequency multiplier. Core clock
47 * speed is frequency of the Front-Side Bus multiplied with this value.
48 */
49static int powernow_k6_get_cpu_multiplier(void)
50{
51 u64 invalue = 0;
52 u32 msrval;
53
54 msrval = POWERNOW_IOPORT + 0x1;
55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
56 invalue=inl(POWERNOW_IOPORT + 0x8);
57 msrval = POWERNOW_IOPORT + 0x0;
58 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
59
60 return clock_ratio[(invalue >> 5)&7].index;
61}
62
63
64/**
65 * powernow_k6_set_state - set the PowerNow! multiplier
66 * @best_i: clock_ratio[best_i] is the target multiplier
67 *
68 * Tries to change the PowerNow! multiplier
69 */
70static void powernow_k6_set_state (unsigned int best_i)
71{
72 unsigned long outvalue=0, invalue=0;
73 unsigned long msrval;
74 struct cpufreq_freqs freqs;
75
76 if (clock_ratio[best_i].index > max_multiplier) {
77 printk(KERN_ERR "cpufreq: invalid target frequency\n");
78 return;
79 }
80
81 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
82 freqs.new = busfreq * clock_ratio[best_i].index;
83 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
84
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
86
87 /* we now need to transform best_i to the BVC format, see AMD#23446 */
88
89 outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
90
91 msrval = POWERNOW_IOPORT + 0x1;
92 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
93 invalue=inl(POWERNOW_IOPORT + 0x8);
94 invalue = invalue & 0xf;
95 outvalue = outvalue | invalue;
96 outl(outvalue ,(POWERNOW_IOPORT + 0x8));
97 msrval = POWERNOW_IOPORT + 0x0;
98 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
99
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
101
102 return;
103}
104
105
106/**
107 * powernow_k6_verify - verifies a new CPUfreq policy
108 * @policy: new policy
109 *
110 * Policy must be within lowest and highest possible CPU Frequency,
111 * and at least one possible state must be within min and max.
112 */
113static int powernow_k6_verify(struct cpufreq_policy *policy)
114{
115 return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
116}
117
118
119/**
120 * powernow_k6_setpolicy - sets a new CPUFreq policy
121 * @policy: new policy
122 * @target_freq: the target frequency
123 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
124 *
125 * sets a new CPUFreq policy
126 */
127static int powernow_k6_target (struct cpufreq_policy *policy,
128 unsigned int target_freq,
129 unsigned int relation)
130{
131 unsigned int newstate = 0;
132
133 if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate))
134 return -EINVAL;
135
136 powernow_k6_set_state(newstate);
137
138 return 0;
139}
140
141
142static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
143{
144 unsigned int i;
145 int result;
146
147 if (policy->cpu != 0)
148 return -ENODEV;
149
150 /* get frequencies */
151 max_multiplier = powernow_k6_get_cpu_multiplier();
152 busfreq = cpu_khz / max_multiplier;
153
154 /* table init */
155 for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
156 if (clock_ratio[i].index > max_multiplier)
157 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
158 else
159 clock_ratio[i].frequency = busfreq * clock_ratio[i].index;
160 }
161
162 /* cpuinfo and default policy values */
163 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
164 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
165 policy->cur = busfreq * max_multiplier;
166
167 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
168 if (result)
169 return (result);
170
171 cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
172
173 return 0;
174}
175
176
177static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
178{
179 unsigned int i;
180 for (i=0; i<8; i++) {
181 if (i==max_multiplier)
182 powernow_k6_set_state(i);
183 }
184 cpufreq_frequency_table_put_attr(policy->cpu);
185 return 0;
186}
187
188static unsigned int powernow_k6_get(unsigned int cpu)
189{
190 return busfreq * powernow_k6_get_cpu_multiplier();
191}
192
193static struct freq_attr* powernow_k6_attr[] = {
194 &cpufreq_freq_attr_scaling_available_freqs,
195 NULL,
196};
197
198static struct cpufreq_driver powernow_k6_driver = {
199 .verify = powernow_k6_verify,
200 .target = powernow_k6_target,
201 .init = powernow_k6_cpu_init,
202 .exit = powernow_k6_cpu_exit,
203 .get = powernow_k6_get,
204 .name = "powernow-k6",
205 .owner = THIS_MODULE,
206 .attr = powernow_k6_attr,
207};
208
209
210/**
211 * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
212 *
213 * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
214 * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
215 * on success.
216 */
217static int __init powernow_k6_init(void)
218{
219 struct cpuinfo_x86 *c = cpu_data;
220
221 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
222 ((c->x86_model != 12) && (c->x86_model != 13)))
223 return -ENODEV;
224
225 if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
226 printk("cpufreq: PowerNow IOPORT region already used.\n");
227 return -EIO;
228 }
229
230 if (cpufreq_register_driver(&powernow_k6_driver)) {
231 release_region (POWERNOW_IOPORT, 16);
232 return -EINVAL;
233 }
234
235 return 0;
236}
237
238
239/**
240 * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
241 *
242 * Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
243 */
244static void __exit powernow_k6_exit(void)
245{
246 cpufreq_unregister_driver(&powernow_k6_driver);
247 release_region (POWERNOW_IOPORT, 16);
248}
249
250
251MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>");
252MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
253MODULE_LICENSE ("GPL");
254
255module_init(powernow_k6_init);
256module_exit(powernow_k6_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
new file mode 100644
index 000000000000..913f652623d9
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -0,0 +1,690 @@
1/*
2 * AMD K7 Powernow driver.
3 * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs.
4 * (C) 2003-2004 Dave Jones <davej@redhat.com>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 * Based upon datasheets & sample CPUs kindly provided by AMD.
8 *
9 * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt.
10 * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
11 * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect.
12 * - We disable half multipliers if ACPI is used on A0 stepping CPUs.
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/init.h>
20#include <linux/cpufreq.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/dmi.h>
24
25#include <asm/msr.h>
26#include <asm/timex.h>
27#include <asm/io.h>
28#include <asm/system.h>
29
30#ifdef CONFIG_X86_POWERNOW_K7_ACPI
31#include <linux/acpi.h>
32#include <acpi/processor.h>
33#endif
34
35#include "powernow-k7.h"
36
37#define PFX "powernow: "
38
39
40struct psb_s {
41 u8 signature[10];
42 u8 tableversion;
43 u8 flags;
44 u16 settlingtime;
45 u8 reserved1;
46 u8 numpst;
47};
48
49struct pst_s {
50 u32 cpuid;
51 u8 fsbspeed;
52 u8 maxfid;
53 u8 startvid;
54 u8 numpstates;
55};
56
57#ifdef CONFIG_X86_POWERNOW_K7_ACPI
58union powernow_acpi_control_t {
59 struct {
60 unsigned long fid:5,
61 vid:5,
62 sgtc:20,
63 res1:2;
64 } bits;
65 unsigned long val;
66};
67#endif
68
69#ifdef CONFIG_CPU_FREQ_DEBUG
70/* divide by 1000 to get VCore voltage in V. */
71static int mobile_vid_table[32] = {
72 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
73 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
74 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
75 1075, 1050, 1025, 1000, 975, 950, 925, 0,
76};
77#endif
78
79/* divide by 10 to get FID. */
80static int fid_codes[32] = {
81 110, 115, 120, 125, 50, 55, 60, 65,
82 70, 75, 80, 85, 90, 95, 100, 105,
83 30, 190, 40, 200, 130, 135, 140, 210,
84 150, 225, 160, 165, 170, 180, -1, -1,
85};
86
87/* This parameter is used in order to force ACPI instead of legacy method for
88 * configuration purpose.
89 */
90
91static int acpi_force;
92
93static struct cpufreq_frequency_table *powernow_table;
94
95static unsigned int can_scale_bus;
96static unsigned int can_scale_vid;
97static unsigned int minimum_speed=-1;
98static unsigned int maximum_speed;
99static unsigned int number_scales;
100static unsigned int fsb;
101static unsigned int latency;
102static char have_a0;
103
104#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k7", msg)
105
106static int check_fsb(unsigned int fsbspeed)
107{
108 int delta;
109 unsigned int f = fsb / 1000;
110
111 delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
112 return (delta < 5);
113}
114
115static int check_powernow(void)
116{
117 struct cpuinfo_x86 *c = cpu_data;
118 unsigned int maxei, eax, ebx, ecx, edx;
119
120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
121#ifdef MODULE
122 printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n");
123#endif
124 return 0;
125 }
126
127 /* Get maximum capabilities */
128 maxei = cpuid_eax (0x80000000);
129 if (maxei < 0x80000007) { /* Any powernow info ? */
130#ifdef MODULE
131 printk (KERN_INFO PFX "No powernow capabilities detected\n");
132#endif
133 return 0;
134 }
135
136 if ((c->x86_model == 6) && (c->x86_mask == 0)) {
137 printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n");
138 have_a0 = 1;
139 }
140
141 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
142
143 /* Check we can actually do something before we say anything.*/
144 if (!(edx & (1 << 1 | 1 << 2)))
145 return 0;
146
147 printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
148
149 if (edx & 1 << 1) {
150 printk ("frequency");
151 can_scale_bus=1;
152 }
153
154 if ((edx & (1 << 1 | 1 << 2)) == 0x6)
155 printk (" and ");
156
157 if (edx & 1 << 2) {
158 printk ("voltage");
159 can_scale_vid=1;
160 }
161
162 printk (".\n");
163 return 1;
164}
165
166
167static int get_ranges (unsigned char *pst)
168{
169 unsigned int j;
170 unsigned int speed;
171 u8 fid, vid;
172
173 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
174 if (!powernow_table)
175 return -ENOMEM;
176 memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1)));
177
178 for (j=0 ; j < number_scales; j++) {
179 fid = *pst++;
180
181 powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
182 powernow_table[j].index = fid; /* lower 8 bits */
183
184 speed = powernow_table[j].frequency;
185
186 if ((fid_codes[fid] % 10)==5) {
187#ifdef CONFIG_X86_POWERNOW_K7_ACPI
188 if (have_a0 == 1)
189 powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID;
190#endif
191 }
192
193 if (speed < minimum_speed)
194 minimum_speed = speed;
195 if (speed > maximum_speed)
196 maximum_speed = speed;
197
198 vid = *pst++;
199 powernow_table[j].index |= (vid << 8); /* upper 8 bits */
200
201 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
202 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
203 fid_codes[fid] % 10, speed/1000, vid,
204 mobile_vid_table[vid]/1000,
205 mobile_vid_table[vid]%1000);
206 }
207 powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
208 powernow_table[number_scales].index = 0;
209
210 return 0;
211}
212
213
214static void change_FID(int fid)
215{
216 union msr_fidvidctl fidvidctl;
217
218 rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
219 if (fidvidctl.bits.FID != fid) {
220 fidvidctl.bits.SGTC = latency;
221 fidvidctl.bits.FID = fid;
222 fidvidctl.bits.VIDC = 0;
223 fidvidctl.bits.FIDC = 1;
224 wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
225 }
226}
227
228
229static void change_VID(int vid)
230{
231 union msr_fidvidctl fidvidctl;
232
233 rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
234 if (fidvidctl.bits.VID != vid) {
235 fidvidctl.bits.SGTC = latency;
236 fidvidctl.bits.VID = vid;
237 fidvidctl.bits.FIDC = 0;
238 fidvidctl.bits.VIDC = 1;
239 wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
240 }
241}
242
243
244static void change_speed (unsigned int index)
245{
246 u8 fid, vid;
247 struct cpufreq_freqs freqs;
248 union msr_fidvidstatus fidvidstatus;
249 int cfid;
250
251 /* fid are the lower 8 bits of the index we stored into
252 * the cpufreq frequency table in powernow_decode_bios,
253 * vid are the upper 8 bits.
254 */
255
256 fid = powernow_table[index].index & 0xFF;
257 vid = (powernow_table[index].index & 0xFF00) >> 8;
258
259 freqs.cpu = 0;
260
261 rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
262 cfid = fidvidstatus.bits.CFID;
263 freqs.old = fsb * fid_codes[cfid] / 10;
264
265 freqs.new = powernow_table[index].frequency;
266
267 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
268
269 /* Now do the magic poking into the MSRs. */
270
271 if (have_a0 == 1) /* A0 errata 5 */
272 local_irq_disable();
273
274 if (freqs.old > freqs.new) {
275 /* Going down, so change FID first */
276 change_FID(fid);
277 change_VID(vid);
278 } else {
279 /* Going up, so change VID first */
280 change_VID(vid);
281 change_FID(fid);
282 }
283
284
285 if (have_a0 == 1)
286 local_irq_enable();
287
288 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
289}
290
291
292#ifdef CONFIG_X86_POWERNOW_K7_ACPI
293
294static struct acpi_processor_performance *acpi_processor_perf;
295
296static int powernow_acpi_init(void)
297{
298 int i;
299 int retval = 0;
300 union powernow_acpi_control_t pc;
301
302 if (acpi_processor_perf != NULL && powernow_table != NULL) {
303 retval = -EINVAL;
304 goto err0;
305 }
306
307 acpi_processor_perf = kmalloc(sizeof(struct acpi_processor_performance),
308 GFP_KERNEL);
309
310 if (!acpi_processor_perf) {
311 retval = -ENOMEM;
312 goto err0;
313 }
314
315 memset(acpi_processor_perf, 0, sizeof(struct acpi_processor_performance));
316
317 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
318 retval = -EIO;
319 goto err1;
320 }
321
322 if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) {
323 retval = -ENODEV;
324 goto err2;
325 }
326
327 if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) {
328 retval = -ENODEV;
329 goto err2;
330 }
331
332 number_scales = acpi_processor_perf->state_count;
333
334 if (number_scales < 2) {
335 retval = -ENODEV;
336 goto err2;
337 }
338
339 powernow_table = kmalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
340 if (!powernow_table) {
341 retval = -ENOMEM;
342 goto err2;
343 }
344
345 memset(powernow_table, 0, ((number_scales + 1) * sizeof(struct cpufreq_frequency_table)));
346
347 pc.val = (unsigned long) acpi_processor_perf->states[0].control;
348 for (i = 0; i < number_scales; i++) {
349 u8 fid, vid;
350 unsigned int speed;
351
352 pc.val = (unsigned long) acpi_processor_perf->states[i].control;
353 dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
354 i,
355 (u32) acpi_processor_perf->states[i].core_frequency,
356 (u32) acpi_processor_perf->states[i].power,
357 (u32) acpi_processor_perf->states[i].transition_latency,
358 (u32) acpi_processor_perf->states[i].control,
359 pc.bits.sgtc);
360
361 vid = pc.bits.vid;
362 fid = pc.bits.fid;
363
364 powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
365 powernow_table[i].index = fid; /* lower 8 bits */
366 powernow_table[i].index |= (vid << 8); /* upper 8 bits */
367
368 speed = powernow_table[i].frequency;
369
370 if ((fid_codes[fid] % 10)==5) {
371 if (have_a0 == 1)
372 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
373 }
374
375 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
376 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
377 fid_codes[fid] % 10, speed/1000, vid,
378 mobile_vid_table[vid]/1000,
379 mobile_vid_table[vid]%1000);
380
381 if (latency < pc.bits.sgtc)
382 latency = pc.bits.sgtc;
383
384 if (speed < minimum_speed)
385 minimum_speed = speed;
386 if (speed > maximum_speed)
387 maximum_speed = speed;
388 }
389
390 powernow_table[i].frequency = CPUFREQ_TABLE_END;
391 powernow_table[i].index = 0;
392
393 /* notify BIOS that we exist */
394 acpi_processor_notify_smm(THIS_MODULE);
395
396 return 0;
397
398err2:
399 acpi_processor_unregister_performance(acpi_processor_perf, 0);
400err1:
401 kfree(acpi_processor_perf);
402err0:
403 printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
404 acpi_processor_perf = NULL;
405 return retval;
406}
407#else
408static int powernow_acpi_init(void)
409{
410 printk(KERN_INFO PFX "no support for ACPI processor found."
411 " Please recompile your kernel with ACPI processor\n");
412 return -EINVAL;
413}
414#endif
415
416static int powernow_decode_bios (int maxfid, int startvid)
417{
418 struct psb_s *psb;
419 struct pst_s *pst;
420 unsigned int i, j;
421 unsigned char *p;
422 unsigned int etuple;
423 unsigned int ret;
424
425 etuple = cpuid_eax(0x80000001);
426
427 for (i=0xC0000; i < 0xffff0 ; i+=16) {
428
429 p = phys_to_virt(i);
430
431 if (memcmp(p, "AMDK7PNOW!", 10) == 0){
432 dprintk ("Found PSB header at %p\n", p);
433 psb = (struct psb_s *) p;
434 dprintk ("Table version: 0x%x\n", psb->tableversion);
435 if (psb->tableversion != 0x12) {
436 printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n");
437 return -ENODEV;
438 }
439
440 dprintk ("Flags: 0x%x\n", psb->flags);
441 if ((psb->flags & 1)==0) {
442 dprintk ("Mobile voltage regulator\n");
443 } else {
444 dprintk ("Desktop voltage regulator\n");
445 }
446
447 latency = psb->settlingtime;
448 if (latency < 100) {
449 printk (KERN_INFO PFX "BIOS set settling time to %d microseconds."
450 "Should be at least 100. Correcting.\n", latency);
451 latency = 100;
452 }
453 dprintk ("Settling Time: %d microseconds.\n", psb->settlingtime);
454 dprintk ("Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst);
455
456 p += sizeof (struct psb_s);
457
458 pst = (struct pst_s *) p;
459
460 for (i = 0 ; i <psb->numpst; i++) {
461 pst = (struct pst_s *) p;
462 number_scales = pst->numpstates;
463
464 if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) &&
465 (maxfid==pst->maxfid) && (startvid==pst->startvid))
466 {
467 dprintk ("PST:%d (@%p)\n", i, pst);
468 dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
469 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
470
471 ret = get_ranges ((char *) pst + sizeof (struct pst_s));
472 return ret;
473
474 } else {
475 p = (char *) pst + sizeof (struct pst_s);
476 for (j=0 ; j < number_scales; j++)
477 p+=2;
478 }
479 }
480 printk (KERN_INFO PFX "No PST tables match this cpuid (0x%x)\n", etuple);
481 printk (KERN_INFO PFX "This is indicative of a broken BIOS.\n");
482
483 return -EINVAL;
484 }
485 p++;
486 }
487
488 return -ENODEV;
489}
490
491
492static int powernow_target (struct cpufreq_policy *policy,
493 unsigned int target_freq,
494 unsigned int relation)
495{
496 unsigned int newstate;
497
498 if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate))
499 return -EINVAL;
500
501 change_speed(newstate);
502
503 return 0;
504}
505
506
507static int powernow_verify (struct cpufreq_policy *policy)
508{
509 return cpufreq_frequency_table_verify(policy, powernow_table);
510}
511
512/*
513 * We use the fact that the bus frequency is somehow
514 * a multiple of 100000/3 khz, then we compute sgtc according
515 * to this multiple.
516 * That way, we match more how AMD thinks all of that work.
517 * We will then get the same kind of behaviour already tested under
518 * the "well-known" other OS.
519 */
520static int __init fixup_sgtc(void)
521{
522 unsigned int sgtc;
523 unsigned int m;
524
525 m = fsb / 3333;
526 if ((m % 10) >= 5)
527 m += 5;
528
529 m /= 10;
530
531 sgtc = 100 * m * latency;
532 sgtc = sgtc / 3;
533 if (sgtc > 0xfffff) {
534 printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
535 sgtc = 0xfffff;
536 }
537 return sgtc;
538}
539
540static unsigned int powernow_get(unsigned int cpu)
541{
542 union msr_fidvidstatus fidvidstatus;
543 unsigned int cfid;
544
545 if (cpu)
546 return 0;
547 rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
548 cfid = fidvidstatus.bits.CFID;
549
550 return (fsb * fid_codes[cfid] / 10);
551}
552
553
554static int __init acer_cpufreq_pst(struct dmi_system_id *d)
555{
556 printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident);
557 printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
558 printk(KERN_WARNING "cpufreq scaling has been disabled as a result of this.\n");
559 return 0;
560}
561
562/*
563 * Some Athlon laptops have really fucked PST tables.
564 * A BIOS update is all that can save them.
565 * Mention this, and disable cpufreq.
566 */
567static struct dmi_system_id __initdata powernow_dmi_table[] = {
568 {
569 .callback = acer_cpufreq_pst,
570 .ident = "Acer Aspire",
571 .matches = {
572 DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
573 DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
574 },
575 },
576 { }
577};
578
579static int __init powernow_cpu_init (struct cpufreq_policy *policy)
580{
581 union msr_fidvidstatus fidvidstatus;
582 int result;
583
584 if (policy->cpu != 0)
585 return -ENODEV;
586
587 rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
588
589 /* A K7 with powernow technology is set to max frequency by BIOS */
590 fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.MFID];
591 if (!fsb) {
592 printk(KERN_WARNING PFX "can not determine bus frequency\n");
593 return -EINVAL;
594 }
595 dprintk("FSB: %3d.%03d MHz\n", fsb/1000, fsb%1000);
596
597 if (dmi_check_system(powernow_dmi_table) || acpi_force) {
598 printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n");
599 result = powernow_acpi_init();
600 } else {
601 result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID);
602 if (result) {
603 printk (KERN_INFO PFX "Trying ACPI perflib\n");
604 maximum_speed = 0;
605 minimum_speed = -1;
606 latency = 0;
607 result = powernow_acpi_init();
608 if (result) {
609 printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
610 printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n");
611 }
612 } else {
613 /* SGTC use the bus clock as timer */
614 latency = fixup_sgtc();
615 printk(KERN_INFO PFX "SGTC: %d\n", latency);
616 }
617 }
618
619 if (result)
620 return result;
621
622 printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
623 minimum_speed/1000, maximum_speed/1000);
624
625 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
626
627 policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency);
628
629 policy->cur = powernow_get(0);
630
631 cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
632
633 return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
634}
635
636static int powernow_cpu_exit (struct cpufreq_policy *policy) {
637 cpufreq_frequency_table_put_attr(policy->cpu);
638
639#ifdef CONFIG_X86_POWERNOW_K7_ACPI
640 if (acpi_processor_perf) {
641 acpi_processor_unregister_performance(acpi_processor_perf, 0);
642 kfree(acpi_processor_perf);
643 }
644#endif
645
646 if (powernow_table)
647 kfree(powernow_table);
648
649 return 0;
650}
651
652static struct freq_attr* powernow_table_attr[] = {
653 &cpufreq_freq_attr_scaling_available_freqs,
654 NULL,
655};
656
657static struct cpufreq_driver powernow_driver = {
658 .verify = powernow_verify,
659 .target = powernow_target,
660 .get = powernow_get,
661 .init = powernow_cpu_init,
662 .exit = powernow_cpu_exit,
663 .name = "powernow-k7",
664 .owner = THIS_MODULE,
665 .attr = powernow_table_attr,
666};
667
668static int __init powernow_init (void)
669{
670 if (check_powernow()==0)
671 return -ENODEV;
672 return cpufreq_register_driver(&powernow_driver);
673}
674
675
676static void __exit powernow_exit (void)
677{
678 cpufreq_unregister_driver(&powernow_driver);
679}
680
681module_param(acpi_force, int, 0444);
682MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
683
684MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
685MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors.");
686MODULE_LICENSE ("GPL");
687
688late_initcall(powernow_init);
689module_exit(powernow_exit);
690
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.h b/arch/i386/kernel/cpu/cpufreq/powernow-k7.h
new file mode 100644
index 000000000000..f8a63b3664e3
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.h
@@ -0,0 +1,44 @@
1/*
2 * $Id: powernow-k7.h,v 1.2 2003/02/10 18:26:01 davej Exp $
3 * (C) 2003 Dave Jones.
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 *
7 * AMD-specific information
8 *
9 */
10
11union msr_fidvidctl {
12 struct {
13 unsigned FID:5, // 4:0
14 reserved1:3, // 7:5
15 VID:5, // 12:8
16 reserved2:3, // 15:13
17 FIDC:1, // 16
18 VIDC:1, // 17
19 reserved3:2, // 19:18
20 FIDCHGRATIO:1, // 20
21 reserved4:11, // 31-21
22 SGTC:20, // 32:51
23 reserved5:12; // 63:52
24 } bits;
25 unsigned long long val;
26};
27
28union msr_fidvidstatus {
29 struct {
30 unsigned CFID:5, // 4:0
31 reserved1:3, // 7:5
32 SFID:5, // 12:8
33 reserved2:3, // 15:13
34 MFID:5, // 20:16
35 reserved3:11, // 31:21
36 CVID:5, // 36:32
37 reserved4:3, // 39:37
38 SVID:5, // 44:40
39 reserved5:3, // 47:45
40 MVID:5, // 52:48
41 reserved6:11; // 63:53
42 } bits;
43 unsigned long long val;
44};
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
new file mode 100644
index 000000000000..a65ff7e32e5d
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -0,0 +1,1135 @@
1/*
2 * (c) 2003, 2004 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Support : paul.devriendt@amd.com
8 *
9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@suse.cz>
13 * Licensed under the terms of the GNU GPL License version 2.
14 * Based upon datasheets & sample CPUs kindly provided by AMD.
15 *
16 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, and others.
18 * Processor information obtained from Chapter 9 (Power and Thermal Management)
19 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
20 * Opteron Processors" available for download from www.amd.com
21 *
22 * Tables for specific CPUs can be infrerred from
23 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
24 */
25
26#include <linux/kernel.h>
27#include <linux/smp.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/cpufreq.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33
34#include <asm/msr.h>
35#include <asm/io.h>
36#include <asm/delay.h>
37
38#ifdef CONFIG_X86_POWERNOW_K8_ACPI
39#include <linux/acpi.h>
40#include <acpi/processor.h>
41#endif
42
43#define PFX "powernow-k8: "
44#define BFX PFX "BIOS error: "
45#define VERSION "version 1.00.09e"
46#include "powernow-k8.h"
47
48/* serialize freq changes */
49static DECLARE_MUTEX(fidvid_sem);
50
51static struct powernow_k8_data *powernow_data[NR_CPUS];
52
53/* Return a frequency in MHz, given an input fid */
54static u32 find_freq_from_fid(u32 fid)
55{
56 return 800 + (fid * 100);
57}
58
59/* Return a frequency in KHz, given an input fid */
60static u32 find_khz_freq_from_fid(u32 fid)
61{
62 return 1000 * find_freq_from_fid(fid);
63}
64
65/* Return a voltage in miliVolts, given an input vid */
66static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
67{
68 return 1550-vid*25;
69}
70
71/* Return the vco fid for an input fid
72 *
73 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
74 * only from corresponding high fids. This returns "high" fid corresponding to
75 * "low" one.
76 */
77static u32 convert_fid_to_vco_fid(u32 fid)
78{
79 if (fid < HI_FID_TABLE_BOTTOM) {
80 return 8 + (2 * fid);
81 } else {
82 return fid;
83 }
84}
85
86/*
87 * Return 1 if the pending bit is set. Unless we just instructed the processor
88 * to transition to a new state, seeing this bit set is really bad news.
89 */
90static int pending_bit_stuck(void)
91{
92 u32 lo, hi;
93
94 rdmsr(MSR_FIDVID_STATUS, lo, hi);
95 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
96}
97
98/*
99 * Update the global current fid / vid values from the status msr.
100 * Returns 1 on error.
101 */
102static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
103{
104 u32 lo, hi;
105 u32 i = 0;
106
107 lo = MSR_S_LO_CHANGE_PENDING;
108 while (lo & MSR_S_LO_CHANGE_PENDING) {
109 if (i++ > 0x1000000) {
110 printk(KERN_ERR PFX "detected change pending stuck\n");
111 return 1;
112 }
113 rdmsr(MSR_FIDVID_STATUS, lo, hi);
114 }
115
116 data->currvid = hi & MSR_S_HI_CURRENT_VID;
117 data->currfid = lo & MSR_S_LO_CURRENT_FID;
118
119 return 0;
120}
121
122/* the isochronous relief time */
123static void count_off_irt(struct powernow_k8_data *data)
124{
125 udelay((1 << data->irt) * 10);
126 return;
127}
128
129/* the voltage stabalization time */
130static void count_off_vst(struct powernow_k8_data *data)
131{
132 udelay(data->vstable * VST_UNITS_20US);
133 return;
134}
135
136/* need to init the control msr to a safe value (for each cpu) */
137static void fidvid_msr_init(void)
138{
139 u32 lo, hi;
140 u8 fid, vid;
141
142 rdmsr(MSR_FIDVID_STATUS, lo, hi);
143 vid = hi & MSR_S_HI_CURRENT_VID;
144 fid = lo & MSR_S_LO_CURRENT_FID;
145 lo = fid | (vid << MSR_C_LO_VID_SHIFT);
146 hi = MSR_C_HI_STP_GNT_BENIGN;
147 dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
148 wrmsr(MSR_FIDVID_CTL, lo, hi);
149}
150
151
152/* write the new fid value along with the other control fields to the msr */
153static int write_new_fid(struct powernow_k8_data *data, u32 fid)
154{
155 u32 lo;
156 u32 savevid = data->currvid;
157
158 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
159 printk(KERN_ERR PFX "internal error - overflow on fid write\n");
160 return 1;
161 }
162
163 lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
164
165 dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
166 fid, lo, data->plllock * PLL_LOCK_CONVERSION);
167
168 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
169
170 if (query_current_values_with_pending_wait(data))
171 return 1;
172
173 count_off_irt(data);
174
175 if (savevid != data->currvid) {
176 printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n",
177 savevid, data->currvid);
178 return 1;
179 }
180
181 if (fid != data->currfid) {
182 printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
183 data->currfid);
184 return 1;
185 }
186
187 return 0;
188}
189
190/* Write a new vid to the hardware */
191static int write_new_vid(struct powernow_k8_data *data, u32 vid)
192{
193 u32 lo;
194 u32 savefid = data->currfid;
195
196 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
197 printk(KERN_ERR PFX "internal error - overflow on vid write\n");
198 return 1;
199 }
200
201 lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
202
203 dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
204 vid, lo, STOP_GRANT_5NS);
205
206 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
207
208 if (query_current_values_with_pending_wait(data))
209 return 1;
210
211 if (savefid != data->currfid) {
212 printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
213 savefid, data->currfid);
214 return 1;
215 }
216
217 if (vid != data->currvid) {
218 printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid,
219 data->currvid);
220 return 1;
221 }
222
223 return 0;
224}
225
226/*
227 * Reduce the vid by the max of step or reqvid.
228 * Decreasing vid codes represent increasing voltages:
229 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off.
230 */
231static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
232{
233 if ((data->currvid - reqvid) > step)
234 reqvid = data->currvid - step;
235
236 if (write_new_vid(data, reqvid))
237 return 1;
238
239 count_off_vst(data);
240
241 return 0;
242}
243
244/* Change the fid and vid, by the 3 phases. */
245static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid)
246{
247 if (core_voltage_pre_transition(data, reqvid))
248 return 1;
249
250 if (core_frequency_transition(data, reqfid))
251 return 1;
252
253 if (core_voltage_post_transition(data, reqvid))
254 return 1;
255
256 if (query_current_values_with_pending_wait(data))
257 return 1;
258
259 if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
260 printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
261 smp_processor_id(),
262 reqfid, reqvid, data->currfid, data->currvid);
263 return 1;
264 }
265
266 dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
267 smp_processor_id(), data->currfid, data->currvid);
268
269 return 0;
270}
271
272/* Phase 1 - core voltage transition ... setup voltage */
273static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid)
274{
275 u32 rvosteps = data->rvo;
276 u32 savefid = data->currfid;
277
278 dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
279 smp_processor_id(),
280 data->currfid, data->currvid, reqvid, data->rvo);
281
282 while (data->currvid > reqvid) {
283 dprintk("ph1: curr 0x%x, req vid 0x%x\n",
284 data->currvid, reqvid);
285 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
286 return 1;
287 }
288
289 while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
290 if (data->currvid == 0) {
291 rvosteps = 0;
292 } else {
293 dprintk("ph1: changing vid for rvo, req 0x%x\n",
294 data->currvid - 1);
295 if (decrease_vid_code_by_step(data, data->currvid - 1, 1))
296 return 1;
297 rvosteps--;
298 }
299 }
300
301 if (query_current_values_with_pending_wait(data))
302 return 1;
303
304 if (savefid != data->currfid) {
305 printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid);
306 return 1;
307 }
308
309 dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n",
310 data->currfid, data->currvid);
311
312 return 0;
313}
314
315/* Phase 2 - core frequency transition */
316static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
317{
318 u32 vcoreqfid, vcocurrfid, vcofiddiff, savevid = data->currvid;
319
320 if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
321 printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
322 reqfid, data->currfid);
323 return 1;
324 }
325
326 if (data->currfid == reqfid) {
327 printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid);
328 return 0;
329 }
330
331 dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
332 smp_processor_id(),
333 data->currfid, data->currvid, reqfid);
334
335 vcoreqfid = convert_fid_to_vco_fid(reqfid);
336 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
337 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
338 : vcoreqfid - vcocurrfid;
339
340 while (vcofiddiff > 2) {
341 if (reqfid > data->currfid) {
342 if (data->currfid > LO_FID_TABLE_TOP) {
343 if (write_new_fid(data, data->currfid + 2)) {
344 return 1;
345 }
346 } else {
347 if (write_new_fid
348 (data, 2 + convert_fid_to_vco_fid(data->currfid))) {
349 return 1;
350 }
351 }
352 } else {
353 if (write_new_fid(data, data->currfid - 2))
354 return 1;
355 }
356
357 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
358 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
359 : vcoreqfid - vcocurrfid;
360 }
361
362 if (write_new_fid(data, reqfid))
363 return 1;
364
365 if (query_current_values_with_pending_wait(data))
366 return 1;
367
368 if (data->currfid != reqfid) {
369 printk(KERN_ERR PFX
370 "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
371 data->currfid, reqfid);
372 return 1;
373 }
374
375 if (savevid != data->currvid) {
376 printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
377 savevid, data->currvid);
378 return 1;
379 }
380
381 dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n",
382 data->currfid, data->currvid);
383
384 return 0;
385}
386
387/* Phase 3 - core voltage transition flow ... jump to the final vid. */
388static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid)
389{
390 u32 savefid = data->currfid;
391 u32 savereqvid = reqvid;
392
393 dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
394 smp_processor_id(),
395 data->currfid, data->currvid);
396
397 if (reqvid != data->currvid) {
398 if (write_new_vid(data, reqvid))
399 return 1;
400
401 if (savefid != data->currfid) {
402 printk(KERN_ERR PFX
403 "ph3: bad fid change, save 0x%x, curr 0x%x\n",
404 savefid, data->currfid);
405 return 1;
406 }
407
408 if (data->currvid != reqvid) {
409 printk(KERN_ERR PFX
410 "ph3: failed vid transition\n, req 0x%x, curr 0x%x",
411 reqvid, data->currvid);
412 return 1;
413 }
414 }
415
416 if (query_current_values_with_pending_wait(data))
417 return 1;
418
419 if (savereqvid != data->currvid) {
420 dprintk("ph3 failed, currvid 0x%x\n", data->currvid);
421 return 1;
422 }
423
424 if (savefid != data->currfid) {
425 dprintk("ph3 failed, currfid changed 0x%x\n",
426 data->currfid);
427 return 1;
428 }
429
430 dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n",
431 data->currfid, data->currvid);
432
433 return 0;
434}
435
436static int check_supported_cpu(unsigned int cpu)
437{
438 cpumask_t oldmask = CPU_MASK_ALL;
439 u32 eax, ebx, ecx, edx;
440 unsigned int rc = 0;
441
442 oldmask = current->cpus_allowed;
443 set_cpus_allowed(current, cpumask_of_cpu(cpu));
444 schedule();
445
446 if (smp_processor_id() != cpu) {
447 printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
448 goto out;
449 }
450
451 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
452 goto out;
453
454 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
455 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
456 ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
457 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_E)) {
458 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
459 goto out;
460 }
461
462 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
463 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
464 printk(KERN_INFO PFX
465 "No frequency change capabilities detected\n");
466 goto out;
467 }
468
469 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
470 if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) {
471 printk(KERN_INFO PFX "Power state transitions not supported\n");
472 goto out;
473 }
474
475 rc = 1;
476
477out:
478 set_cpus_allowed(current, oldmask);
479 schedule();
480 return rc;
481
482}
483
484static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
485{
486 unsigned int j;
487 u8 lastfid = 0xff;
488
489 for (j = 0; j < data->numps; j++) {
490 if (pst[j].vid > LEAST_VID) {
491 printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid);
492 return -EINVAL;
493 }
494 if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */
495 printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j);
496 return -ENODEV;
497 }
498 if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */
499 printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j);
500 return -ENODEV;
501 }
502 if ((pst[j].fid > MAX_FID)
503 || (pst[j].fid & 1)
504 || (j && (pst[j].fid < HI_FID_TABLE_BOTTOM))) {
505 /* Only first fid is allowed to be in "low" range */
506 printk(KERN_ERR PFX "two low fids - %d : 0x%x\n", j, pst[j].fid);
507 return -EINVAL;
508 }
509 if (pst[j].fid < lastfid)
510 lastfid = pst[j].fid;
511 }
512 if (lastfid & 1) {
513 printk(KERN_ERR PFX "lastfid invalid\n");
514 return -EINVAL;
515 }
516 if (lastfid > LO_FID_TABLE_TOP)
517 printk(KERN_INFO PFX "first fid not from lo freq table\n");
518
519 return 0;
520}
521
522static void print_basics(struct powernow_k8_data *data)
523{
524 int j;
525 for (j = 0; j < data->numps; j++) {
526 if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID)
527 printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x (%d mV)\n", j,
528 data->powernow_table[j].index & 0xff,
529 data->powernow_table[j].frequency/1000,
530 data->powernow_table[j].index >> 8,
531 find_millivolts_from_vid(data, data->powernow_table[j].index >> 8));
532 }
533 if (data->batps)
534 printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps);
535}
536
537static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
538{
539 struct cpufreq_frequency_table *powernow_table;
540 unsigned int j;
541
542 if (data->batps) { /* use ACPI support to get full speed on mains power */
543 printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps);
544 data->numps = data->batps;
545 }
546
547 for ( j=1; j<data->numps; j++ ) {
548 if (pst[j-1].fid >= pst[j].fid) {
549 printk(KERN_ERR PFX "PST out of sequence\n");
550 return -EINVAL;
551 }
552 }
553
554 if (data->numps < 2) {
555 printk(KERN_ERR PFX "no p states to transition\n");
556 return -ENODEV;
557 }
558
559 if (check_pst_table(data, pst, maxvid))
560 return -EINVAL;
561
562 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
563 * (data->numps + 1)), GFP_KERNEL);
564 if (!powernow_table) {
565 printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
566 return -ENOMEM;
567 }
568
569 for (j = 0; j < data->numps; j++) {
570 powernow_table[j].index = pst[j].fid; /* lower 8 bits */
571 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
572 powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid);
573 }
574 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
575 powernow_table[data->numps].index = 0;
576
577 if (query_current_values_with_pending_wait(data)) {
578 kfree(powernow_table);
579 return -EIO;
580 }
581
582 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
583 data->powernow_table = powernow_table;
584 print_basics(data);
585
586 for (j = 0; j < data->numps; j++)
587 if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
588 return 0;
589
590 dprintk("currfid/vid do not match PST, ignoring\n");
591 return 0;
592}
593
594/* Find and validate the PSB/PST table in BIOS. */
595static int find_psb_table(struct powernow_k8_data *data)
596{
597 struct psb_s *psb;
598 unsigned int i;
599 u32 mvs;
600 u8 maxvid;
601 u32 cpst = 0;
602 u32 thiscpuid;
603
604 for (i = 0xc0000; i < 0xffff0; i += 0x10) {
605 /* Scan BIOS looking for the signature. */
606 /* It can not be at ffff0 - it is too big. */
607
608 psb = phys_to_virt(i);
609 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
610 continue;
611
612 dprintk("found PSB header at 0x%p\n", psb);
613
614 dprintk("table vers: 0x%x\n", psb->tableversion);
615 if (psb->tableversion != PSB_VERSION_1_4) {
616 printk(KERN_INFO BFX "PSB table is not v1.4\n");
617 return -ENODEV;
618 }
619
620 dprintk("flags: 0x%x\n", psb->flags1);
621 if (psb->flags1) {
622 printk(KERN_ERR BFX "unknown flags\n");
623 return -ENODEV;
624 }
625
626 data->vstable = psb->vstable;
627 dprintk("voltage stabilization time: %d(*20us)\n", data->vstable);
628
629 dprintk("flags2: 0x%x\n", psb->flags2);
630 data->rvo = psb->flags2 & 3;
631 data->irt = ((psb->flags2) >> 2) & 3;
632 mvs = ((psb->flags2) >> 4) & 3;
633 data->vidmvs = 1 << mvs;
634 data->batps = ((psb->flags2) >> 6) & 3;
635
636 dprintk("ramp voltage offset: %d\n", data->rvo);
637 dprintk("isochronous relief time: %d\n", data->irt);
638 dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
639
640 dprintk("numpst: 0x%x\n", psb->num_tables);
641 cpst = psb->num_tables;
642 if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){
643 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
644 if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) {
645 cpst = 1;
646 }
647 }
648 if (cpst != 1) {
649 printk(KERN_ERR BFX "numpst must be 1\n");
650 return -ENODEV;
651 }
652
653 data->plllock = psb->plllocktime;
654 dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
655 dprintk("maxfid: 0x%x\n", psb->maxfid);
656 dprintk("maxvid: 0x%x\n", psb->maxvid);
657 maxvid = psb->maxvid;
658
659 data->numps = psb->numps;
660 dprintk("numpstates: 0x%x\n", data->numps);
661 return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
662 }
663 /*
664 * If you see this message, complain to BIOS manufacturer. If
665 * he tells you "we do not support Linux" or some similar
666 * nonsense, remember that Windows 2000 uses the same legacy
667 * mechanism that the old Linux PSB driver uses. Tell them it
668 * is broken with Windows 2000.
669 *
670 * The reference to the AMD documentation is chapter 9 in the
671 * BIOS and Kernel Developer's Guide, which is available on
672 * www.amd.com
673 */
674 printk(KERN_ERR PFX "BIOS error - no PSB\n");
675 return -ENODEV;
676}
677
678#ifdef CONFIG_X86_POWERNOW_K8_ACPI
679static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
680{
681 if (!data->acpi_data.state_count)
682 return;
683
684 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
685 data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
686 data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
687 data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
688 data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
689}
690
691static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
692{
693 int i;
694 int cntlofreq = 0;
695 struct cpufreq_frequency_table *powernow_table;
696
697 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
698 dprintk("register performance failed\n");
699 return -EIO;
700 }
701
702 /* verify the data contained in the ACPI structures */
703 if (data->acpi_data.state_count <= 1) {
704 dprintk("No ACPI P-States\n");
705 goto err_out;
706 }
707
708 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
709 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
710 dprintk("Invalid control/status registers (%x - %x)\n",
711 data->acpi_data.control_register.space_id,
712 data->acpi_data.status_register.space_id);
713 goto err_out;
714 }
715
716 /* fill in data->powernow_table */
717 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
718 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
719 if (!powernow_table) {
720 dprintk("powernow_table memory alloc failure\n");
721 goto err_out;
722 }
723
724 for (i = 0; i < data->acpi_data.state_count; i++) {
725 u32 fid = data->acpi_data.states[i].control & FID_MASK;
726 u32 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
727
728 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
729
730 powernow_table[i].index = fid; /* lower 8 bits */
731 powernow_table[i].index |= (vid << 8); /* upper 8 bits */
732 powernow_table[i].frequency = find_khz_freq_from_fid(fid);
733
734 /* verify frequency is OK */
735 if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) ||
736 (powernow_table[i].frequency < (MIN_FREQ * 1000))) {
737 dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency);
738 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
739 continue;
740 }
741
742 /* verify voltage is OK - BIOSs are using "off" to indicate invalid */
743 if (vid == 0x1f) {
744 dprintk("invalid vid %u, ignoring\n", vid);
745 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
746 continue;
747 }
748
749 if (fid < HI_FID_TABLE_BOTTOM) {
750 if (cntlofreq) {
751 /* if both entries are the same, ignore this
752 * one...
753 */
754 if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
755 (powernow_table[i].index != powernow_table[cntlofreq].index)) {
756 printk(KERN_ERR PFX "Too many lo freq table entries\n");
757 goto err_out_mem;
758 }
759
760 dprintk("double low frequency table entry, ignoring it.\n");
761 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
762 continue;
763 } else
764 cntlofreq = i;
765 }
766
767 if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
768 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
769 powernow_table[i].frequency,
770 (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
771 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
772 continue;
773 }
774 }
775
776 powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
777 powernow_table[data->acpi_data.state_count].index = 0;
778 data->powernow_table = powernow_table;
779
780 /* fill in data */
781 data->numps = data->acpi_data.state_count;
782 print_basics(data);
783 powernow_k8_acpi_pst_values(data, 0);
784
785 /* notify BIOS that we exist */
786 acpi_processor_notify_smm(THIS_MODULE);
787
788 return 0;
789
790err_out_mem:
791 kfree(powernow_table);
792
793err_out:
794 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
795
796 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
797 data->acpi_data.state_count = 0;
798
799 return -ENODEV;
800}
801
802static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
803{
804 if (data->acpi_data.state_count)
805 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
806}
807
808#else
809static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
810static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
811static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
812#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
813
814/* Take a frequency, and issue the fid/vid transition command */
815static int transition_frequency(struct powernow_k8_data *data, unsigned int index)
816{
817 u32 fid;
818 u32 vid;
819 int res;
820 struct cpufreq_freqs freqs;
821
822 dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
823
824 /* fid are the lower 8 bits of the index we stored into
825 * the cpufreq frequency table in find_psb_table, vid are
826 * the upper 8 bits.
827 */
828
829 fid = data->powernow_table[index].index & 0xFF;
830 vid = (data->powernow_table[index].index & 0xFF00) >> 8;
831
832 dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
833
834 if (query_current_values_with_pending_wait(data))
835 return 1;
836
837 if ((data->currvid == vid) && (data->currfid == fid)) {
838 dprintk("target matches current values (fid 0x%x, vid 0x%x)\n",
839 fid, vid);
840 return 0;
841 }
842
843 if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
844 printk("ignoring illegal change in lo freq table-%x to 0x%x\n",
845 data->currfid, fid);
846 return 1;
847 }
848
849 dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n",
850 smp_processor_id(), fid, vid);
851
852 freqs.cpu = data->cpu;
853
854 freqs.old = find_khz_freq_from_fid(data->currfid);
855 freqs.new = find_khz_freq_from_fid(fid);
856 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
857
858 down(&fidvid_sem);
859 res = transition_fid_vid(data, fid, vid);
860 up(&fidvid_sem);
861
862 freqs.new = find_khz_freq_from_fid(data->currfid);
863 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
864
865 return res;
866}
867
868/* Driver entry point to switch to the target frequency */
869static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
870{
871 cpumask_t oldmask = CPU_MASK_ALL;
872 struct powernow_k8_data *data = powernow_data[pol->cpu];
873 u32 checkfid = data->currfid;
874 u32 checkvid = data->currvid;
875 unsigned int newstate;
876 int ret = -EIO;
877
878 /* only run on specific CPU from here on */
879 oldmask = current->cpus_allowed;
880 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
881 schedule();
882
883 if (smp_processor_id() != pol->cpu) {
884 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
885 goto err_out;
886 }
887
888 if (pending_bit_stuck()) {
889 printk(KERN_ERR PFX "failing targ, change pending bit set\n");
890 goto err_out;
891 }
892
893 dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
894 pol->cpu, targfreq, pol->min, pol->max, relation);
895
896 if (query_current_values_with_pending_wait(data)) {
897 ret = -EIO;
898 goto err_out;
899 }
900
901 dprintk("targ: curr fid 0x%x, vid 0x%x\n",
902 data->currfid, data->currvid);
903
904 if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
905 printk(KERN_ERR PFX
906 "error - out of sync, fid 0x%x 0x%x, vid 0x%x 0x%x\n",
907 checkfid, data->currfid, checkvid, data->currvid);
908 }
909
910 if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
911 goto err_out;
912
913 powernow_k8_acpi_pst_values(data, newstate);
914
915 if (transition_frequency(data, newstate)) {
916 printk(KERN_ERR PFX "transition frequency failed\n");
917 ret = 1;
918 goto err_out;
919 }
920
921 pol->cur = find_khz_freq_from_fid(data->currfid);
922 ret = 0;
923
924err_out:
925 set_cpus_allowed(current, oldmask);
926 schedule();
927
928 return ret;
929}
930
931/* Driver entry point to verify the policy and range of frequencies */
932static int powernowk8_verify(struct cpufreq_policy *pol)
933{
934 struct powernow_k8_data *data = powernow_data[pol->cpu];
935
936 return cpufreq_frequency_table_verify(pol, data->powernow_table);
937}
938
939/* per CPU init entry point to the driver */
940static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
941{
942 struct powernow_k8_data *data;
943 cpumask_t oldmask = CPU_MASK_ALL;
944 int rc;
945
946 if (!check_supported_cpu(pol->cpu))
947 return -ENODEV;
948
949 data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
950 if (!data) {
951 printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
952 return -ENOMEM;
953 }
954 memset(data,0,sizeof(struct powernow_k8_data));
955
956 data->cpu = pol->cpu;
957
958 if (powernow_k8_cpu_init_acpi(data)) {
959 /*
960 * Use the PSB BIOS structure. This is only availabe on
961 * an UP version, and is deprecated by AMD.
962 */
963
964 if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
965 printk(KERN_INFO PFX "MP systems not supported by PSB BIOS structure\n");
966 kfree(data);
967 return -ENODEV;
968 }
969 if (pol->cpu != 0) {
970 printk(KERN_ERR PFX "init not cpu 0\n");
971 kfree(data);
972 return -ENODEV;
973 }
974 rc = find_psb_table(data);
975 if (rc) {
976 kfree(data);
977 return -ENODEV;
978 }
979 }
980
981 /* only run on specific CPU from here on */
982 oldmask = current->cpus_allowed;
983 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
984 schedule();
985
986 if (smp_processor_id() != pol->cpu) {
987 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
988 goto err_out;
989 }
990
991 if (pending_bit_stuck()) {
992 printk(KERN_ERR PFX "failing init, change pending bit set\n");
993 goto err_out;
994 }
995
996 if (query_current_values_with_pending_wait(data))
997 goto err_out;
998
999 fidvid_msr_init();
1000
1001 /* run on any CPU again */
1002 set_cpus_allowed(current, oldmask);
1003 schedule();
1004
1005 pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
1006
1007 /* Take a crude guess here.
1008 * That guess was in microseconds, so multiply with 1000 */
1009 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1010 + (3 * (1 << data->irt) * 10)) * 1000;
1011
1012 pol->cur = find_khz_freq_from_fid(data->currfid);
1013 dprintk("policy current frequency %d kHz\n", pol->cur);
1014
1015 /* min/max the cpu is capable of */
1016 if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
1017 printk(KERN_ERR PFX "invalid powernow_table\n");
1018 powernow_k8_cpu_exit_acpi(data);
1019 kfree(data->powernow_table);
1020 kfree(data);
1021 return -EINVAL;
1022 }
1023
1024 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1025
1026 printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
1027 data->currfid, data->currvid);
1028
1029 powernow_data[pol->cpu] = data;
1030
1031 return 0;
1032
1033err_out:
1034 set_cpus_allowed(current, oldmask);
1035 schedule();
1036 powernow_k8_cpu_exit_acpi(data);
1037
1038 kfree(data);
1039 return -ENODEV;
1040}
1041
1042static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
1043{
1044 struct powernow_k8_data *data = powernow_data[pol->cpu];
1045
1046 if (!data)
1047 return -EINVAL;
1048
1049 powernow_k8_cpu_exit_acpi(data);
1050
1051 cpufreq_frequency_table_put_attr(pol->cpu);
1052
1053 kfree(data->powernow_table);
1054 kfree(data);
1055
1056 return 0;
1057}
1058
1059static unsigned int powernowk8_get (unsigned int cpu)
1060{
1061 struct powernow_k8_data *data = powernow_data[cpu];
1062 cpumask_t oldmask = current->cpus_allowed;
1063 unsigned int khz = 0;
1064
1065 set_cpus_allowed(current, cpumask_of_cpu(cpu));
1066 if (smp_processor_id() != cpu) {
1067 printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
1068 set_cpus_allowed(current, oldmask);
1069 return 0;
1070 }
1071 preempt_disable();
1072
1073 if (query_current_values_with_pending_wait(data))
1074 goto out;
1075
1076 khz = find_khz_freq_from_fid(data->currfid);
1077
1078 out:
1079 preempt_enable_no_resched();
1080 set_cpus_allowed(current, oldmask);
1081
1082 return khz;
1083}
1084
1085static struct freq_attr* powernow_k8_attr[] = {
1086 &cpufreq_freq_attr_scaling_available_freqs,
1087 NULL,
1088};
1089
1090static struct cpufreq_driver cpufreq_amd64_driver = {
1091 .verify = powernowk8_verify,
1092 .target = powernowk8_target,
1093 .init = powernowk8_cpu_init,
1094 .exit = __devexit_p(powernowk8_cpu_exit),
1095 .get = powernowk8_get,
1096 .name = "powernow-k8",
1097 .owner = THIS_MODULE,
1098 .attr = powernow_k8_attr,
1099};
1100
1101/* driver entry point for init */
1102static int __init powernowk8_init(void)
1103{
1104 unsigned int i, supported_cpus = 0;
1105
1106 for (i=0; i<NR_CPUS; i++) {
1107 if (!cpu_online(i))
1108 continue;
1109 if (check_supported_cpu(i))
1110 supported_cpus++;
1111 }
1112
1113 if (supported_cpus == num_online_cpus()) {
1114 printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n",
1115 supported_cpus);
1116 return cpufreq_register_driver(&cpufreq_amd64_driver);
1117 }
1118
1119 return -ENODEV;
1120}
1121
1122/* driver entry point for term */
1123static void __exit powernowk8_exit(void)
1124{
1125 dprintk("exit\n");
1126
1127 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1128}
1129
1130MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
1131MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1132MODULE_LICENSE("GPL");
1133
1134late_initcall(powernowk8_init);
1135module_exit(powernowk8_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
new file mode 100644
index 000000000000..63ebc8470f52
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -0,0 +1,176 @@
1/*
2 * (c) 2003, 2004 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8struct powernow_k8_data {
9 unsigned int cpu;
10
11 u32 numps; /* number of p-states */
12 u32 batps; /* number of p-states supported on battery */
13
14 /* these values are constant when the PSB is used to determine
15 * vid/fid pairings, but are modified during the ->target() call
16 * when ACPI is used */
17 u32 rvo; /* ramp voltage offset */
18 u32 irt; /* isochronous relief time */
19 u32 vidmvs; /* usable value calculated from mvs */
20 u32 vstable; /* voltage stabilization time, units 20 us */
21 u32 plllock; /* pll lock time, units 1 us */
22
23 /* keep track of the current fid / vid */
24 u32 currvid, currfid;
25
26 /* the powernow_table includes all frequency and vid/fid pairings:
27 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
28 * frequency is in kHz */
29 struct cpufreq_frequency_table *powernow_table;
30
31#ifdef CONFIG_X86_POWERNOW_K8_ACPI
32 /* the acpi table needs to be kept. it's only available if ACPI was
33 * used to determine valid frequency/vid/fid states */
34 struct acpi_processor_performance acpi_data;
35#endif
36};
37
38
39/* processor's cpuid instruction support */
40#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
41#define CPUID_XFAM 0x0ff00000 /* extended family */
42#define CPUID_XFAM_K8 0
43#define CPUID_XMOD 0x000f0000 /* extended model */
44#define CPUID_XMOD_REV_E 0x00020000
45#define CPUID_USE_XFAM_XMOD 0x00000f00
46#define CPUID_GET_MAX_CAPABILITIES 0x80000000
47#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
48#define P_STATE_TRANSITION_CAPABLE 6
49
50/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */
51/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */
52/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */
53/* the register number is placed in ecx, and the data is returned in edx:eax. */
54
55#define MSR_FIDVID_CTL 0xc0010041
56#define MSR_FIDVID_STATUS 0xc0010042
57
58/* Field definitions within the FID VID Low Control MSR : */
59#define MSR_C_LO_INIT_FID_VID 0x00010000
60#define MSR_C_LO_NEW_VID 0x00001f00
61#define MSR_C_LO_NEW_FID 0x0000002f
62#define MSR_C_LO_VID_SHIFT 8
63
64/* Field definitions within the FID VID High Control MSR : */
65#define MSR_C_HI_STP_GNT_TO 0x000fffff
66
67/* Field definitions within the FID VID Low Status MSR : */
68#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
69#define MSR_S_LO_MAX_RAMP_VID 0x1f000000
70#define MSR_S_LO_MAX_FID 0x003f0000
71#define MSR_S_LO_START_FID 0x00003f00
72#define MSR_S_LO_CURRENT_FID 0x0000003f
73
74/* Field definitions within the FID VID High Status MSR : */
75#define MSR_S_HI_MAX_WORKING_VID 0x001f0000
76#define MSR_S_HI_START_VID 0x00001f00
77#define MSR_S_HI_CURRENT_VID 0x0000001f
78#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
79
80/*
81 * There are restrictions frequencies have to follow:
82 * - only 1 entry in the low fid table ( <=1.4GHz )
83 * - lowest entry in the high fid table must be >= 2 * the entry in the
84 * low fid table
85 * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
86 * in the low fid table
87 * - the parts can only step at 200 MHz intervals, so 1.9 GHz is never valid
88 * - lowest frequency must be >= interprocessor hypertransport link speed
89 * (only applies to MP systems obviously)
90 */
91
92/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
93#define LO_FID_TABLE_TOP 6 /* fid values marking the boundary */
94#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
95
96#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
97#define HI_VCOFREQ_TABLE_BOTTOM 1600
98
99#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
100
101#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
102#define LEAST_VID 0x1e /* Lowest (numerically highest) useful vid value */
103
104#define MIN_FREQ 800 /* Min and max freqs, per spec */
105#define MAX_FREQ 5000
106
107#define INVALID_FID_MASK 0xffffffc1 /* not a valid fid if these bits are set */
108#define INVALID_VID_MASK 0xffffffe0 /* not a valid vid if these bits are set */
109
110#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
111
112#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
113
114#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
115#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */
116
117/*
118 * Most values of interest are enocoded in a single field of the _PSS
119 * entries: the "control" value.
120 */
121
122#define IRT_SHIFT 30
123#define RVO_SHIFT 28
124#define PLL_L_SHIFT 20
125#define MVS_SHIFT 18
126#define VST_SHIFT 11
127#define VID_SHIFT 6
128#define IRT_MASK 3
129#define RVO_MASK 3
130#define PLL_L_MASK 0x7f
131#define MVS_MASK 3
132#define VST_MASK 0x7f
133#define VID_MASK 0x1f
134#define FID_MASK 0x3f
135
136
137/*
138 * Version 1.4 of the PSB table. This table is constructed by BIOS and is
139 * to tell the OS's power management driver which VIDs and FIDs are
140 * supported by this particular processor.
141 * If the data in the PSB / PST is wrong, then this driver will program the
142 * wrong values into hardware, which is very likely to lead to a crash.
143 */
144
145#define PSB_ID_STRING "AMDK7PNOW!"
146#define PSB_ID_STRING_LEN 10
147
148#define PSB_VERSION_1_4 0x14
149
150struct psb_s {
151 u8 signature[10];
152 u8 tableversion;
153 u8 flags1;
154 u16 vstable;
155 u8 flags2;
156 u8 num_tables;
157 u32 cpuid;
158 u8 plllocktime;
159 u8 maxfid;
160 u8 maxvid;
161 u8 numps;
162};
163
164/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */
165struct pst_s {
166 u8 fid;
167 u8 vid;
168};
169
170#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg)
171
172static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid);
173static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
174static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
175
176static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
new file mode 100644
index 000000000000..07d5612dc00f
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -0,0 +1,715 @@
1/*
2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset).
4 *
5 * Despite the "SpeedStep" in the name, this is almost entirely unlike
6 * traditional SpeedStep.
7 *
8 * Modelled on speedstep.c
9 *
10 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
11 *
12 * WARNING WARNING WARNING
13 *
14 * This driver manipulates the PERF_CTL MSR, which is only somewhat
15 * documented. While it seems to work on my laptop, it has not been
16 * tested anywhere else, and it may not work for you, do strange
17 * things or simply crash.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/cpufreq.h>
24#include <linux/config.h>
25#include <linux/delay.h>
26#include <linux/compiler.h>
27
28#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
29#include <linux/acpi.h>
30#include <acpi/processor.h>
31#endif
32
33#include <asm/msr.h>
34#include <asm/processor.h>
35#include <asm/cpufeature.h>
36
37#include "speedstep-est-common.h"
38
39#define PFX "speedstep-centrino: "
40#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>"
41
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
43
44
45struct cpu_id
46{
47 __u8 x86; /* CPU family */
48 __u8 x86_model; /* model */
49 __u8 x86_mask; /* stepping */
50};
51
52enum {
53 CPU_BANIAS,
54 CPU_DOTHAN_A1,
55 CPU_DOTHAN_A2,
56 CPU_DOTHAN_B0,
57};
58
59static const struct cpu_id cpu_ids[] = {
60 [CPU_BANIAS] = { 6, 9, 5 },
61 [CPU_DOTHAN_A1] = { 6, 13, 1 },
62 [CPU_DOTHAN_A2] = { 6, 13, 2 },
63 [CPU_DOTHAN_B0] = { 6, 13, 6 },
64};
65#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
66
67struct cpu_model
68{
69 const struct cpu_id *cpu_id;
70 const char *model_name;
71 unsigned max_freq; /* max clock in kHz */
72
73 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
74};
75static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x);
76
77/* Operating points for current CPU */
78static struct cpu_model *centrino_model[NR_CPUS];
79static const struct cpu_id *centrino_cpu[NR_CPUS];
80
81static struct cpufreq_driver centrino_driver;
82
83#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
84
85/* Computes the correct form for IA32_PERF_CTL MSR for a particular
86 frequency/voltage operating point; frequency in MHz, volts in mV.
87 This is stored as "index" in the structure. */
88#define OP(mhz, mv) \
89 { \
90 .frequency = (mhz) * 1000, \
91 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
92 }
93
94/*
95 * These voltage tables were derived from the Intel Pentium M
96 * datasheet, document 25261202.pdf, Table 5. I have verified they
97 * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
98 * M.
99 */
100
101/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
102static struct cpufreq_frequency_table banias_900[] =
103{
104 OP(600, 844),
105 OP(800, 988),
106 OP(900, 1004),
107 { .frequency = CPUFREQ_TABLE_END }
108};
109
110/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
111static struct cpufreq_frequency_table banias_1000[] =
112{
113 OP(600, 844),
114 OP(800, 972),
115 OP(900, 988),
116 OP(1000, 1004),
117 { .frequency = CPUFREQ_TABLE_END }
118};
119
120/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
121static struct cpufreq_frequency_table banias_1100[] =
122{
123 OP( 600, 956),
124 OP( 800, 1020),
125 OP( 900, 1100),
126 OP(1000, 1164),
127 OP(1100, 1180),
128 { .frequency = CPUFREQ_TABLE_END }
129};
130
131
132/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
133static struct cpufreq_frequency_table banias_1200[] =
134{
135 OP( 600, 956),
136 OP( 800, 1004),
137 OP( 900, 1020),
138 OP(1000, 1100),
139 OP(1100, 1164),
140 OP(1200, 1180),
141 { .frequency = CPUFREQ_TABLE_END }
142};
143
144/* Intel Pentium M processor 1.30GHz (Banias) */
145static struct cpufreq_frequency_table banias_1300[] =
146{
147 OP( 600, 956),
148 OP( 800, 1260),
149 OP(1000, 1292),
150 OP(1200, 1356),
151 OP(1300, 1388),
152 { .frequency = CPUFREQ_TABLE_END }
153};
154
155/* Intel Pentium M processor 1.40GHz (Banias) */
156static struct cpufreq_frequency_table banias_1400[] =
157{
158 OP( 600, 956),
159 OP( 800, 1180),
160 OP(1000, 1308),
161 OP(1200, 1436),
162 OP(1400, 1484),
163 { .frequency = CPUFREQ_TABLE_END }
164};
165
166/* Intel Pentium M processor 1.50GHz (Banias) */
167static struct cpufreq_frequency_table banias_1500[] =
168{
169 OP( 600, 956),
170 OP( 800, 1116),
171 OP(1000, 1228),
172 OP(1200, 1356),
173 OP(1400, 1452),
174 OP(1500, 1484),
175 { .frequency = CPUFREQ_TABLE_END }
176};
177
178/* Intel Pentium M processor 1.60GHz (Banias) */
179static struct cpufreq_frequency_table banias_1600[] =
180{
181 OP( 600, 956),
182 OP( 800, 1036),
183 OP(1000, 1164),
184 OP(1200, 1276),
185 OP(1400, 1420),
186 OP(1600, 1484),
187 { .frequency = CPUFREQ_TABLE_END }
188};
189
190/* Intel Pentium M processor 1.70GHz (Banias) */
191static struct cpufreq_frequency_table banias_1700[] =
192{
193 OP( 600, 956),
194 OP( 800, 1004),
195 OP(1000, 1116),
196 OP(1200, 1228),
197 OP(1400, 1308),
198 OP(1700, 1484),
199 { .frequency = CPUFREQ_TABLE_END }
200};
201#undef OP
202
203#define _BANIAS(cpuid, max, name) \
204{ .cpu_id = cpuid, \
205 .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
206 .max_freq = (max)*1000, \
207 .op_points = banias_##max, \
208}
209#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
210
211/* CPU models, their operating frequency range, and freq/voltage
212 operating points */
213static struct cpu_model models[] =
214{
215 _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
216 BANIAS(1000),
217 BANIAS(1100),
218 BANIAS(1200),
219 BANIAS(1300),
220 BANIAS(1400),
221 BANIAS(1500),
222 BANIAS(1600),
223 BANIAS(1700),
224
225 /* NULL model_name is a wildcard */
226 { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
227 { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
228 { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
229
230 { NULL, }
231};
232#undef _BANIAS
233#undef BANIAS
234
235static int centrino_cpu_init_table(struct cpufreq_policy *policy)
236{
237 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
238 struct cpu_model *model;
239
240 for(model = models; model->cpu_id != NULL; model++)
241 if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
242 (model->model_name == NULL ||
243 strcmp(cpu->x86_model_id, model->model_name) == 0))
244 break;
245
246 if (model->cpu_id == NULL) {
247 /* No match at all */
248 dprintk(KERN_INFO PFX "no support for CPU model \"%s\": "
249 "send /proc/cpuinfo to " MAINTAINER "\n",
250 cpu->x86_model_id);
251 return -ENOENT;
252 }
253
254 if (model->op_points == NULL) {
255 /* Matched a non-match */
256 dprintk(KERN_INFO PFX "no table support for CPU model \"%s\": \n",
257 cpu->x86_model_id);
258#ifndef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
259 dprintk(KERN_INFO PFX "try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n");
260#endif
261 return -ENOENT;
262 }
263
264 centrino_model[policy->cpu] = model;
265
266 dprintk("found \"%s\": max frequency: %dkHz\n",
267 model->model_name, model->max_freq);
268
269 return 0;
270}
271
272#else
273static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
274#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
275
276static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x)
277{
278 if ((c->x86 == x->x86) &&
279 (c->x86_model == x->x86_model) &&
280 (c->x86_mask == x->x86_mask))
281 return 1;
282 return 0;
283}
284
285/* To be called only after centrino_model is initialized */
286static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
287{
288 int i;
289
290 /*
291 * Extract clock in kHz from PERF_CTL value
292 * for centrino, as some DSDTs are buggy.
293 * Ideally, this can be done using the acpi_data structure.
294 */
295 if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) ||
296 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) ||
297 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) {
298 msr = (msr >> 8) & 0xff;
299 return msr * 100000;
300 }
301
302 if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points))
303 return 0;
304
305 msr &= 0xffff;
306 for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) {
307 if (msr == centrino_model[cpu]->op_points[i].index)
308 return centrino_model[cpu]->op_points[i].frequency;
309 }
310 if (failsafe)
311 return centrino_model[cpu]->op_points[i-1].frequency;
312 else
313 return 0;
314}
315
316/* Return the current CPU frequency in kHz */
317static unsigned int get_cur_freq(unsigned int cpu)
318{
319 unsigned l, h;
320 unsigned clock_freq;
321 cpumask_t saved_mask;
322
323 saved_mask = current->cpus_allowed;
324 set_cpus_allowed(current, cpumask_of_cpu(cpu));
325 if (smp_processor_id() != cpu)
326 return 0;
327
328 rdmsr(MSR_IA32_PERF_STATUS, l, h);
329 clock_freq = extract_clock(l, cpu, 0);
330
331 if (unlikely(clock_freq == 0)) {
332 /*
333 * On some CPUs, we can see transient MSR values (which are
334 * not present in _PSS), while CPU is doing some automatic
335 * P-state transition (like TM2). Get the last freq set
336 * in PERF_CTL.
337 */
338 rdmsr(MSR_IA32_PERF_CTL, l, h);
339 clock_freq = extract_clock(l, cpu, 1);
340 }
341
342 set_cpus_allowed(current, saved_mask);
343 return clock_freq;
344}
345
346
347#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
348
349static struct acpi_processor_performance p;
350
351/*
352 * centrino_cpu_init_acpi - register with ACPI P-States library
353 *
354 * Register with the ACPI P-States library (part of drivers/acpi/processor.c)
355 * in order to determine correct frequency and voltage pairings by reading
356 * the _PSS of the ACPI DSDT or SSDT tables.
357 */
358static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
359{
360 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
361 u32 arg0_buf[3];
362 struct acpi_object_list arg_list = {1, &arg0};
363 unsigned long cur_freq;
364 int result = 0, i;
365 unsigned int cpu = policy->cpu;
366
367 /* _PDC settings */
368 arg0.buffer.length = 12;
369 arg0.buffer.pointer = (u8 *) arg0_buf;
370 arg0_buf[0] = ACPI_PDC_REVISION_ID;
371 arg0_buf[1] = 1;
372 arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_MSR;
373
374 p.pdc = &arg_list;
375
376 /* register with ACPI core */
377 if (acpi_processor_register_performance(&p, cpu)) {
378 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
379 return -EIO;
380 }
381
382 /* verify the acpi_data */
383 if (p.state_count <= 1) {
384 dprintk("No P-States\n");
385 result = -ENODEV;
386 goto err_unreg;
387 }
388
389 if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
390 (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
391 dprintk("Invalid control/status registers (%x - %x)\n",
392 p.control_register.space_id, p.status_register.space_id);
393 result = -EIO;
394 goto err_unreg;
395 }
396
397 for (i=0; i<p.state_count; i++) {
398 if (p.states[i].control != p.states[i].status) {
399 dprintk("Different control (%x) and status values (%x)\n",
400 p.states[i].control, p.states[i].status);
401 result = -EINVAL;
402 goto err_unreg;
403 }
404
405 if (!p.states[i].core_frequency) {
406 dprintk("Zero core frequency for state %u\n", i);
407 result = -EINVAL;
408 goto err_unreg;
409 }
410
411 if (p.states[i].core_frequency > p.states[0].core_frequency) {
412 dprintk("P%u has larger frequency (%u) than P0 (%u), skipping\n", i,
413 p.states[i].core_frequency, p.states[0].core_frequency);
414 p.states[i].core_frequency = 0;
415 continue;
416 }
417 }
418
419 centrino_model[cpu] = kmalloc(sizeof(struct cpu_model), GFP_KERNEL);
420 if (!centrino_model[cpu]) {
421 result = -ENOMEM;
422 goto err_unreg;
423 }
424 memset(centrino_model[cpu], 0, sizeof(struct cpu_model));
425
426 centrino_model[cpu]->model_name=NULL;
427 centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
428 centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
429 (p.state_count + 1), GFP_KERNEL);
430 if (!centrino_model[cpu]->op_points) {
431 result = -ENOMEM;
432 goto err_kfree;
433 }
434
435 for (i=0; i<p.state_count; i++) {
436 centrino_model[cpu]->op_points[i].index = p.states[i].control;
437 centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
438 dprintk("adding state %i with frequency %u and control value %04x\n",
439 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
440 }
441 centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
442
443 cur_freq = get_cur_freq(cpu);
444
445 for (i=0; i<p.state_count; i++) {
446 if (!p.states[i].core_frequency) {
447 dprintk("skipping state %u\n", i);
448 centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
449 continue;
450 }
451
452 if (extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0) !=
453 (centrino_model[cpu]->op_points[i].frequency)) {
454 dprintk("Invalid encoded frequency (%u vs. %u)\n",
455 extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0),
456 centrino_model[cpu]->op_points[i].frequency);
457 result = -EINVAL;
458 goto err_kfree_all;
459 }
460
461 if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
462 p.state = i;
463 }
464
465 /* notify BIOS that we exist */
466 acpi_processor_notify_smm(THIS_MODULE);
467
468 return 0;
469
470 err_kfree_all:
471 kfree(centrino_model[cpu]->op_points);
472 err_kfree:
473 kfree(centrino_model[cpu]);
474 err_unreg:
475 acpi_processor_unregister_performance(&p, cpu);
476 dprintk(KERN_INFO PFX "invalid ACPI data\n");
477 return (result);
478}
479#else
480static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
481#endif
482
483static int centrino_cpu_init(struct cpufreq_policy *policy)
484{
485 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
486 unsigned freq;
487 unsigned l, h;
488 int ret;
489 int i;
490
491 /* Only Intel makes Enhanced Speedstep-capable CPUs */
492 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
493 return -ENODEV;
494
495 for (i = 0; i < N_IDS; i++)
496 if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
497 break;
498
499 if (i != N_IDS)
500 centrino_cpu[policy->cpu] = &cpu_ids[i];
501
502 if (is_const_loops_cpu(policy->cpu)) {
503 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
504 }
505
506 if (centrino_cpu_init_acpi(policy)) {
507 if (policy->cpu != 0)
508 return -ENODEV;
509
510 if (!centrino_cpu[policy->cpu]) {
511 dprintk(KERN_INFO PFX "found unsupported CPU with "
512 "Enhanced SpeedStep: send /proc/cpuinfo to "
513 MAINTAINER "\n");
514 return -ENODEV;
515 }
516
517 if (centrino_cpu_init_table(policy)) {
518 return -ENODEV;
519 }
520 }
521
522 /* Check to see if Enhanced SpeedStep is enabled, and try to
523 enable it if not. */
524 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
525
526 if (!(l & (1<<16))) {
527 l |= (1<<16);
528 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
529 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
530
531 /* check to see if it stuck */
532 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
533 if (!(l & (1<<16))) {
534 printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
535 return -ENODEV;
536 }
537 }
538
539 freq = get_cur_freq(policy->cpu);
540
541 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
542 policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */
543 policy->cur = freq;
544
545 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
546
547 ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points);
548 if (ret)
549 return (ret);
550
551 cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu);
552
553 return 0;
554}
555
556static int centrino_cpu_exit(struct cpufreq_policy *policy)
557{
558 unsigned int cpu = policy->cpu;
559
560 if (!centrino_model[cpu])
561 return -ENODEV;
562
563 cpufreq_frequency_table_put_attr(cpu);
564
565#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
566 if (!centrino_model[cpu]->model_name) {
567 dprintk("unregistering and freeing ACPI data\n");
568 acpi_processor_unregister_performance(&p, cpu);
569 kfree(centrino_model[cpu]->op_points);
570 kfree(centrino_model[cpu]);
571 }
572#endif
573
574 centrino_model[cpu] = NULL;
575
576 return 0;
577}
578
579/**
580 * centrino_verify - verifies a new CPUFreq policy
581 * @policy: new policy
582 *
583 * Limit must be within this model's frequency range at least one
584 * border included.
585 */
586static int centrino_verify (struct cpufreq_policy *policy)
587{
588 return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points);
589}
590
591/**
592 * centrino_setpolicy - set a new CPUFreq policy
593 * @policy: new policy
594 * @target_freq: the target frequency
595 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
596 *
597 * Sets a new CPUFreq policy.
598 */
599static int centrino_target (struct cpufreq_policy *policy,
600 unsigned int target_freq,
601 unsigned int relation)
602{
603 unsigned int newstate = 0;
604 unsigned int msr, oldmsr, h, cpu = policy->cpu;
605 struct cpufreq_freqs freqs;
606 cpumask_t saved_mask;
607 int retval;
608
609 if (centrino_model[cpu] == NULL)
610 return -ENODEV;
611
612 /*
613 * Support for SMP systems.
614 * Make sure we are running on the CPU that wants to change frequency
615 */
616 saved_mask = current->cpus_allowed;
617 set_cpus_allowed(current, policy->cpus);
618 if (!cpu_isset(smp_processor_id(), policy->cpus)) {
619 dprintk("couldn't limit to CPUs in this domain\n");
620 return(-EAGAIN);
621 }
622
623 if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
624 relation, &newstate)) {
625 retval = -EINVAL;
626 goto migrate_end;
627 }
628
629 msr = centrino_model[cpu]->op_points[newstate].index;
630 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
631
632 if (msr == (oldmsr & 0xffff)) {
633 retval = 0;
634 dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
635 goto migrate_end;
636 }
637
638 freqs.cpu = cpu;
639 freqs.old = extract_clock(oldmsr, cpu, 0);
640 freqs.new = extract_clock(msr, cpu, 0);
641
642 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
643 target_freq, freqs.old, freqs.new, msr);
644
645 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
646
647 /* all but 16 LSB are "reserved", so treat them with
648 care */
649 oldmsr &= ~0xffff;
650 msr &= 0xffff;
651 oldmsr |= msr;
652
653 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
654
655 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
656
657 retval = 0;
658migrate_end:
659 set_cpus_allowed(current, saved_mask);
660 return (retval);
661}
662
663static struct freq_attr* centrino_attr[] = {
664 &cpufreq_freq_attr_scaling_available_freqs,
665 NULL,
666};
667
668static struct cpufreq_driver centrino_driver = {
669 .name = "centrino", /* should be speedstep-centrino,
670 but there's a 16 char limit */
671 .init = centrino_cpu_init,
672 .exit = centrino_cpu_exit,
673 .verify = centrino_verify,
674 .target = centrino_target,
675 .get = get_cur_freq,
676 .attr = centrino_attr,
677 .owner = THIS_MODULE,
678};
679
680
681/**
682 * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
683 *
684 * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
685 * unsupported devices, -ENOENT if there's no voltage table for this
686 * particular CPU model, -EINVAL on problems during initiatization,
687 * and zero on success.
688 *
689 * This is quite picky. Not only does the CPU have to advertise the
690 * "est" flag in the cpuid capability flags, we look for a specific
691 * CPU model and stepping, and we need to have the exact model name in
692 * our voltage tables. That is, be paranoid about not releasing
693 * someone's valuable magic smoke.
694 */
695static int __init centrino_init(void)
696{
697 struct cpuinfo_x86 *cpu = cpu_data;
698
699 if (!cpu_has(cpu, X86_FEATURE_EST))
700 return -ENODEV;
701
702 return cpufreq_register_driver(&centrino_driver);
703}
704
705static void __exit centrino_exit(void)
706{
707 cpufreq_unregister_driver(&centrino_driver);
708}
709
710MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
711MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
712MODULE_LICENSE ("GPL");
713
714late_initcall(centrino_init);
715module_exit(centrino_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h b/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
new file mode 100644
index 000000000000..5ce995c9d866
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
@@ -0,0 +1,25 @@
1/*
2 * Routines common for drivers handling Enhanced Speedstep Technology
3 * Copyright (C) 2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4 *
5 * Licensed under the terms of the GNU GPL License version 2 -- see
6 * COPYING for details.
7 */
8
9static inline int is_const_loops_cpu(unsigned int cpu)
10{
11 struct cpuinfo_x86 *c = cpu_data + cpu;
12
13 if (c->x86_vendor != X86_VENDOR_INTEL || !cpu_has(c, X86_FEATURE_EST))
14 return 0;
15
16 /*
17 * on P-4s, the TSC runs with constant frequency independent of cpu freq
18 * when we use EST
19 */
20 if (c->x86 == 0xf)
21 return 1;
22
23 return 0;
24}
25
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
new file mode 100644
index 000000000000..5b7d18a06afa
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
@@ -0,0 +1,424 @@
1/*
2 * (C) 2001 Dave Jones, Arjan van de ven.
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon reverse engineered information, and on Intel documentation
7 * for chipsets ICH2-M and ICH3-M.
8 *
9 * Many thanks to Ducrot Bruno for finding and fixing the last
10 * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
11 * for extensive testing.
12 *
13 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
14 */
15
16
17/*********************************************************************
18 * SPEEDSTEP - DEFINITIONS *
19 *********************************************************************/
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/cpufreq.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27
28#include "speedstep-lib.h"
29
30
31/* speedstep_chipset:
32 * It is necessary to know which chipset is used. As accesses to
33 * this device occur at various places in this module, we need a
34 * static struct pci_dev * pointing to that device.
35 */
36static struct pci_dev *speedstep_chipset_dev;
37
38
39/* speedstep_processor
40 */
41static unsigned int speedstep_processor = 0;
42
43
44/*
45 * There are only two frequency states for each processor. Values
46 * are in kHz for the time being.
47 */
48static struct cpufreq_frequency_table speedstep_freqs[] = {
49 {SPEEDSTEP_HIGH, 0},
50 {SPEEDSTEP_LOW, 0},
51 {0, CPUFREQ_TABLE_END},
52};
53
54
55#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-ich", msg)
56
57
58/**
59 * speedstep_set_state - set the SpeedStep state
60 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
61 *
62 * Tries to change the SpeedStep state.
63 */
64static void speedstep_set_state (unsigned int state)
65{
66 u32 pmbase;
67 u8 pm2_blk;
68 u8 value;
69 unsigned long flags;
70
71 if (!speedstep_chipset_dev || (state > 0x1))
72 return;
73
74 /* get PMBASE */
75 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
76 if (!(pmbase & 0x01)) {
77 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
78 return;
79 }
80
81 pmbase &= 0xFFFFFFFE;
82 if (!pmbase) {
83 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
84 return;
85 }
86
87 /* Disable IRQs */
88 local_irq_save(flags);
89
90 /* read state */
91 value = inb(pmbase + 0x50);
92
93 dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
94
95 /* write new state */
96 value &= 0xFE;
97 value |= state;
98
99 dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
100
101 /* Disable bus master arbitration */
102 pm2_blk = inb(pmbase + 0x20);
103 pm2_blk |= 0x01;
104 outb(pm2_blk, (pmbase + 0x20));
105
106 /* Actual transition */
107 outb(value, (pmbase + 0x50));
108
109 /* Restore bus master arbitration */
110 pm2_blk &= 0xfe;
111 outb(pm2_blk, (pmbase + 0x20));
112
113 /* check if transition was successful */
114 value = inb(pmbase + 0x50);
115
116 /* Enable IRQs */
117 local_irq_restore(flags);
118
119 dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
120
121 if (state == (value & 0x1)) {
122 dprintk("change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000));
123 } else {
124 printk (KERN_ERR "cpufreq: change failed - I/O error\n");
125 }
126
127 return;
128}
129
130
131/**
132 * speedstep_activate - activate SpeedStep control in the chipset
133 *
134 * Tries to activate the SpeedStep status and control registers.
135 * Returns -EINVAL on an unsupported chipset, and zero on success.
136 */
137static int speedstep_activate (void)
138{
139 u16 value = 0;
140
141 if (!speedstep_chipset_dev)
142 return -EINVAL;
143
144 pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
145 if (!(value & 0x08)) {
146 value |= 0x08;
147 dprintk("activating SpeedStep (TM) registers\n");
148 pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
149 }
150
151 return 0;
152}
153
154
155/**
156 * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
157 *
158 * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
159 * the LPC bridge / PM module which contains all power-management
160 * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
161 * chipset, or zero on failure.
162 */
163static unsigned int speedstep_detect_chipset (void)
164{
165 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
166 PCI_DEVICE_ID_INTEL_82801DB_12,
167 PCI_ANY_ID,
168 PCI_ANY_ID,
169 NULL);
170 if (speedstep_chipset_dev)
171 return 4; /* 4-M */
172
173 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
174 PCI_DEVICE_ID_INTEL_82801CA_12,
175 PCI_ANY_ID,
176 PCI_ANY_ID,
177 NULL);
178 if (speedstep_chipset_dev)
179 return 3; /* 3-M */
180
181
182 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
183 PCI_DEVICE_ID_INTEL_82801BA_10,
184 PCI_ANY_ID,
185 PCI_ANY_ID,
186 NULL);
187 if (speedstep_chipset_dev) {
188 /* speedstep.c causes lockups on Dell Inspirons 8000 and
189 * 8100 which use a pretty old revision of the 82815
190 * host brige. Abort on these systems.
191 */
192 static struct pci_dev *hostbridge;
193 u8 rev = 0;
194
195 hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
196 PCI_DEVICE_ID_INTEL_82815_MC,
197 PCI_ANY_ID,
198 PCI_ANY_ID,
199 NULL);
200
201 if (!hostbridge)
202 return 2; /* 2-M */
203
204 pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev);
205 if (rev < 5) {
206 dprintk("hostbridge does not support speedstep\n");
207 speedstep_chipset_dev = NULL;
208 pci_dev_put(hostbridge);
209 return 0;
210 }
211
212 pci_dev_put(hostbridge);
213 return 2; /* 2-M */
214 }
215
216 return 0;
217}
218
219static unsigned int _speedstep_get(cpumask_t cpus)
220{
221 unsigned int speed;
222 cpumask_t cpus_allowed;
223
224 cpus_allowed = current->cpus_allowed;
225 set_cpus_allowed(current, cpus);
226 speed = speedstep_get_processor_frequency(speedstep_processor);
227 set_cpus_allowed(current, cpus_allowed);
228 dprintk("detected %u kHz as current frequency\n", speed);
229 return speed;
230}
231
232static unsigned int speedstep_get(unsigned int cpu)
233{
234 return _speedstep_get(cpumask_of_cpu(cpu));
235}
236
237/**
238 * speedstep_target - set a new CPUFreq policy
239 * @policy: new policy
240 * @target_freq: the target frequency
241 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
242 *
243 * Sets a new CPUFreq policy.
244 */
245static int speedstep_target (struct cpufreq_policy *policy,
246 unsigned int target_freq,
247 unsigned int relation)
248{
249 unsigned int newstate = 0;
250 struct cpufreq_freqs freqs;
251 cpumask_t cpus_allowed;
252 int i;
253
254 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
255 return -EINVAL;
256
257 freqs.old = _speedstep_get(policy->cpus);
258 freqs.new = speedstep_freqs[newstate].frequency;
259 freqs.cpu = policy->cpu;
260
261 dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new);
262
263 /* no transition necessary */
264 if (freqs.old == freqs.new)
265 return 0;
266
267 cpus_allowed = current->cpus_allowed;
268
269 for_each_cpu_mask(i, policy->cpus) {
270 freqs.cpu = i;
271 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
272 }
273
274 /* switch to physical CPU where state is to be changed */
275 set_cpus_allowed(current, policy->cpus);
276
277 speedstep_set_state(newstate);
278
279 /* allow to be run on all CPUs */
280 set_cpus_allowed(current, cpus_allowed);
281
282 for_each_cpu_mask(i, policy->cpus) {
283 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
285 }
286
287 return 0;
288}
289
290
291/**
292 * speedstep_verify - verifies a new CPUFreq policy
293 * @policy: new policy
294 *
295 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
296 * at least one border included.
297 */
298static int speedstep_verify (struct cpufreq_policy *policy)
299{
300 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
301}
302
303
304static int speedstep_cpu_init(struct cpufreq_policy *policy)
305{
306 int result = 0;
307 unsigned int speed;
308 cpumask_t cpus_allowed;
309
310 /* only run on CPU to be set, or on its sibling */
311#ifdef CONFIG_SMP
312 policy->cpus = cpu_sibling_map[policy->cpu];
313#endif
314
315 cpus_allowed = current->cpus_allowed;
316 set_cpus_allowed(current, policy->cpus);
317
318 /* detect low and high frequency */
319 result = speedstep_get_freqs(speedstep_processor,
320 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
321 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
322 &speedstep_set_state);
323 set_cpus_allowed(current, cpus_allowed);
324 if (result)
325 return result;
326
327 /* get current speed setting */
328 speed = _speedstep_get(policy->cpus);
329 if (!speed)
330 return -EIO;
331
332 dprintk("currently at %s speed setting - %i MHz\n",
333 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
334 (speed / 1000));
335
336 /* cpuinfo and default policy values */
337 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
338 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
339 policy->cur = speed;
340
341 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
342 if (result)
343 return (result);
344
345 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
346
347 return 0;
348}
349
350
351static int speedstep_cpu_exit(struct cpufreq_policy *policy)
352{
353 cpufreq_frequency_table_put_attr(policy->cpu);
354 return 0;
355}
356
357static struct freq_attr* speedstep_attr[] = {
358 &cpufreq_freq_attr_scaling_available_freqs,
359 NULL,
360};
361
362
363static struct cpufreq_driver speedstep_driver = {
364 .name = "speedstep-ich",
365 .verify = speedstep_verify,
366 .target = speedstep_target,
367 .init = speedstep_cpu_init,
368 .exit = speedstep_cpu_exit,
369 .get = speedstep_get,
370 .owner = THIS_MODULE,
371 .attr = speedstep_attr,
372};
373
374
375/**
376 * speedstep_init - initializes the SpeedStep CPUFreq driver
377 *
378 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
379 * devices, -EINVAL on problems during initiatization, and zero on
380 * success.
381 */
382static int __init speedstep_init(void)
383{
384 /* detect processor */
385 speedstep_processor = speedstep_detect_processor();
386 if (!speedstep_processor) {
387 dprintk("Intel(R) SpeedStep(TM) capable processor not found\n");
388 return -ENODEV;
389 }
390
391 /* detect chipset */
392 if (!speedstep_detect_chipset()) {
393 dprintk("Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n");
394 return -ENODEV;
395 }
396
397 /* activate speedstep support */
398 if (speedstep_activate()) {
399 pci_dev_put(speedstep_chipset_dev);
400 return -EINVAL;
401 }
402
403 return cpufreq_register_driver(&speedstep_driver);
404}
405
406
407/**
408 * speedstep_exit - unregisters SpeedStep support
409 *
410 * Unregisters SpeedStep support.
411 */
412static void __exit speedstep_exit(void)
413{
414 pci_dev_put(speedstep_chipset_dev);
415 cpufreq_unregister_driver(&speedstep_driver);
416}
417
418
419MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>");
420MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges.");
421MODULE_LICENSE ("GPL");
422
423module_init(speedstep_init);
424module_exit(speedstep_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
new file mode 100644
index 000000000000..8ba430a9c3a2
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -0,0 +1,385 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18
19#include <asm/msr.h>
20#include "speedstep-lib.h"
21
22#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-lib", msg)
23
24#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
25static int relaxed_check = 0;
26#else
27#define relaxed_check 0
28#endif
29
30/*********************************************************************
31 * GET PROCESSOR CORE SPEED IN KHZ *
32 *********************************************************************/
33
34static unsigned int pentium3_get_frequency (unsigned int processor)
35{
36 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
37 struct {
38 unsigned int ratio; /* Frequency Multiplier (x10) */
39 u8 bitmap; /* power on configuration bits
40 [27, 25:22] (in MSR 0x2a) */
41 } msr_decode_mult [] = {
42 { 30, 0x01 },
43 { 35, 0x05 },
44 { 40, 0x02 },
45 { 45, 0x06 },
46 { 50, 0x00 },
47 { 55, 0x04 },
48 { 60, 0x0b },
49 { 65, 0x0f },
50 { 70, 0x09 },
51 { 75, 0x0d },
52 { 80, 0x0a },
53 { 85, 0x26 },
54 { 90, 0x20 },
55 { 100, 0x2b },
56 { 0, 0xff } /* error or unknown value */
57 };
58
59 /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
60 struct {
61 unsigned int value; /* Front Side Bus speed in MHz */
62 u8 bitmap; /* power on configuration bits [18: 19]
63 (in MSR 0x2a) */
64 } msr_decode_fsb [] = {
65 { 66, 0x0 },
66 { 100, 0x2 },
67 { 133, 0x1 },
68 { 0, 0xff}
69 };
70
71 u32 msr_lo, msr_tmp;
72 int i = 0, j = 0;
73
74 /* read MSR 0x2a - we only need the low 32 bits */
75 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
76 dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
77 msr_tmp = msr_lo;
78
79 /* decode the FSB */
80 msr_tmp &= 0x00c0000;
81 msr_tmp >>= 18;
82 while (msr_tmp != msr_decode_fsb[i].bitmap) {
83 if (msr_decode_fsb[i].bitmap == 0xff)
84 return 0;
85 i++;
86 }
87
88 /* decode the multiplier */
89 if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) {
90 dprintk("workaround for early PIIIs\n");
91 msr_lo &= 0x03c00000;
92 } else
93 msr_lo &= 0x0bc00000;
94 msr_lo >>= 22;
95 while (msr_lo != msr_decode_mult[j].bitmap) {
96 if (msr_decode_mult[j].bitmap == 0xff)
97 return 0;
98 j++;
99 }
100
101 dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
102
103 return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100);
104}
105
106
107static unsigned int pentiumM_get_frequency(void)
108{
109 u32 msr_lo, msr_tmp;
110
111 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
112 dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
113
114 /* see table B-2 of 24547212.pdf */
115 if (msr_lo & 0x00040000) {
116 printk(KERN_DEBUG "speedstep-lib: PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp);
117 return 0;
118 }
119
120 msr_tmp = (msr_lo >> 22) & 0x1f;
121 dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000));
122
123 return (msr_tmp * 100 * 1000);
124}
125
126
127static unsigned int pentium4_get_frequency(void)
128{
129 struct cpuinfo_x86 *c = &boot_cpu_data;
130 u32 msr_lo, msr_hi, mult;
131 unsigned int fsb = 0;
132
133 rdmsr(0x2c, msr_lo, msr_hi);
134
135 dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
136
137 /* decode the FSB: see IA-32 Intel (C) Architecture Software
138 * Developer's Manual, Volume 3: System Prgramming Guide,
139 * revision #12 in Table B-1: MSRs in the Pentium 4 and
140 * Intel Xeon Processors, on page B-4 and B-5.
141 */
142 if (c->x86_model < 2)
143 fsb = 100 * 1000;
144 else {
145 u8 fsb_code = (msr_lo >> 16) & 0x7;
146 switch (fsb_code) {
147 case 0:
148 fsb = 100 * 1000;
149 break;
150 case 1:
151 fsb = 13333 * 10;
152 break;
153 case 2:
154 fsb = 200 * 1000;
155 break;
156 }
157 }
158
159 if (!fsb)
160 printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to <linux@brodo.de>\n");
161
162 /* Multiplier. */
163 if (c->x86_model < 2)
164 mult = msr_lo >> 27;
165 else
166 mult = msr_lo >> 24;
167
168 dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult));
169
170 return (fsb * mult);
171}
172
173
174unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{
176 switch (processor) {
177 case SPEEDSTEP_PROCESSOR_PM:
178 return pentiumM_get_frequency();
179 case SPEEDSTEP_PROCESSOR_P4D:
180 case SPEEDSTEP_PROCESSOR_P4M:
181 return pentium4_get_frequency();
182 case SPEEDSTEP_PROCESSOR_PIII_T:
183 case SPEEDSTEP_PROCESSOR_PIII_C:
184 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
185 return pentium3_get_frequency(processor);
186 default:
187 return 0;
188 };
189 return 0;
190}
191EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
192
193
194/*********************************************************************
195 * DETECT SPEEDSTEP-CAPABLE PROCESSOR *
196 *********************************************************************/
197
198unsigned int speedstep_detect_processor (void)
199{
200 struct cpuinfo_x86 *c = cpu_data;
201 u32 ebx, msr_lo, msr_hi;
202
203 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
204
205 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
206 ((c->x86 != 6) && (c->x86 != 0xF)))
207 return 0;
208
209 if (c->x86 == 0xF) {
210 /* Intel Mobile Pentium 4-M
211 * or Intel Mobile Pentium 4 with 533 MHz FSB */
212 if (c->x86_model != 2)
213 return 0;
214
215 ebx = cpuid_ebx(0x00000001);
216 ebx &= 0x000000FF;
217
218 dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
219
220 switch (c->x86_mask) {
221 case 4:
222 /*
223 * B-stepping [M-P4-M]
224 * sample has ebx = 0x0f, production has 0x0e.
225 */
226 if ((ebx == 0x0e) || (ebx == 0x0f))
227 return SPEEDSTEP_PROCESSOR_P4M;
228 break;
229 case 7:
230 /*
231 * C-stepping [M-P4-M]
232 * needs to have ebx=0x0e, else it's a celeron:
233 * cf. 25130917.pdf / page 7, footnote 5 even
234 * though 25072120.pdf / page 7 doesn't say
235 * samples are only of B-stepping...
236 */
237 if (ebx == 0x0e)
238 return SPEEDSTEP_PROCESSOR_P4M;
239 break;
240 case 9:
241 /*
242 * D-stepping [M-P4-M or M-P4/533]
243 *
244 * this is totally strange: CPUID 0x0F29 is
245 * used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
246 * The latter need to be sorted out as they don't
247 * support speedstep.
248 * Celerons with CPUID 0x0F29 may have either
249 * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
250 * specific.
251 * M-P4-Ms may have either ebx=0xe or 0xf [see above]
252 * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
253 * also, M-P4M HTs have ebx=0x8, too
254 * For now, they are distinguished by the model_id string
255 */
256 if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
257 return SPEEDSTEP_PROCESSOR_P4M;
258 break;
259 default:
260 break;
261 }
262 return 0;
263 }
264
265 switch (c->x86_model) {
266 case 0x0B: /* Intel PIII [Tualatin] */
267 /* cpuid_ebx(1) is 0x04 for desktop PIII,
268 0x06 for mobile PIII-M */
269 ebx = cpuid_ebx(0x00000001);
270 dprintk("ebx is %x\n", ebx);
271
272 ebx &= 0x000000FF;
273
274 if (ebx != 0x06)
275 return 0;
276
277 /* So far all PIII-M processors support SpeedStep. See
278 * Intel's 24540640.pdf of June 2003
279 */
280
281 return SPEEDSTEP_PROCESSOR_PIII_T;
282
283 case 0x08: /* Intel PIII [Coppermine] */
284
285 /* all mobile PIII Coppermines have FSB 100 MHz
286 * ==> sort out a few desktop PIIIs. */
287 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
288 dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi);
289 msr_lo &= 0x00c0000;
290 if (msr_lo != 0x0080000)
291 return 0;
292
293 /*
294 * If the processor is a mobile version,
295 * platform ID has bit 50 set
296 * it has SpeedStep technology if either
297 * bit 56 or 57 is set
298 */
299 rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
300 dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi);
301 if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
302 if (c->x86_mask == 0x01) {
303 dprintk("early PIII version\n");
304 return SPEEDSTEP_PROCESSOR_PIII_C_EARLY;
305 } else
306 return SPEEDSTEP_PROCESSOR_PIII_C;
307 }
308
309 default:
310 return 0;
311 }
312}
313EXPORT_SYMBOL_GPL(speedstep_detect_processor);
314
315
316/*********************************************************************
317 * DETECT SPEEDSTEP SPEEDS *
318 *********************************************************************/
319
320unsigned int speedstep_get_freqs(unsigned int processor,
321 unsigned int *low_speed,
322 unsigned int *high_speed,
323 void (*set_state) (unsigned int state))
324{
325 unsigned int prev_speed;
326 unsigned int ret = 0;
327 unsigned long flags;
328
329 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
330 return -EINVAL;
331
332 dprintk("trying to determine both speeds\n");
333
334 /* get current speed */
335 prev_speed = speedstep_get_processor_frequency(processor);
336 if (!prev_speed)
337 return -EIO;
338
339 dprintk("previous seped is %u\n", prev_speed);
340
341 local_irq_save(flags);
342
343 /* switch to low state */
344 set_state(SPEEDSTEP_LOW);
345 *low_speed = speedstep_get_processor_frequency(processor);
346 if (!*low_speed) {
347 ret = -EIO;
348 goto out;
349 }
350
351 dprintk("low seped is %u\n", *low_speed);
352
353 /* switch to high state */
354 set_state(SPEEDSTEP_HIGH);
355 *high_speed = speedstep_get_processor_frequency(processor);
356 if (!*high_speed) {
357 ret = -EIO;
358 goto out;
359 }
360
361 dprintk("high seped is %u\n", *high_speed);
362
363 if (*low_speed == *high_speed) {
364 ret = -ENODEV;
365 goto out;
366 }
367
368 /* switch to previous state, if necessary */
369 if (*high_speed != prev_speed)
370 set_state(SPEEDSTEP_LOW);
371
372 out:
373 local_irq_restore(flags);
374 return (ret);
375}
376EXPORT_SYMBOL_GPL(speedstep_get_freqs);
377
378#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
379module_param(relaxed_check, int, 0444);
380MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability.");
381#endif
382
383MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
384MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
385MODULE_LICENSE ("GPL");
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
new file mode 100644
index 000000000000..261a2c9b7f6b
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -0,0 +1,47 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11
12
13/* processors */
14
15#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */
16#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */
17#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
18#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */
19
20/* the following processors are not speedstep-capable and are not auto-detected
21 * in speedstep_detect_processor(). However, their speed can be detected using
22 * the speedstep_get_processor_frequency() call. */
23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
25
26/* speedstep states -- only two of them */
27
28#define SPEEDSTEP_HIGH 0x00000000
29#define SPEEDSTEP_LOW 0x00000001
30
31
32/* detect a speedstep-capable processor */
33extern unsigned int speedstep_detect_processor (void);
34
35/* detect the current speed (in khz) of the processor */
36extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
37
38
39/* detect the low and high speeds of the processor. The callback
40 * set_state"'s first argument is either SPEEDSTEP_HIGH or
41 * SPEEDSTEP_LOW; the second argument is zero so that no
42 * cpufreq_notify_transition calls are initiated.
43 */
44extern unsigned int speedstep_get_freqs(unsigned int processor,
45 unsigned int *low_speed,
46 unsigned int *high_speed,
47 void (*set_state) (unsigned int state));
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
new file mode 100644
index 000000000000..79440b3f087e
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -0,0 +1,424 @@
1/*
2 * Intel SpeedStep SMI driver.
3 *
4 * (C) 2003 Hiroshi Miura <miura@da-cha.org>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 *
8 */
9
10
11/*********************************************************************
12 * SPEEDSTEP - DEFINITIONS *
13 *********************************************************************/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/cpufreq.h>
20#include <linux/pci.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <asm/ist.h>
24
25#include "speedstep-lib.h"
26
27/* speedstep system management interface port/command.
28 *
29 * These parameters are got from IST-SMI BIOS call.
30 * If user gives it, these are used.
31 *
32 */
33static int smi_port = 0;
34static int smi_cmd = 0;
35static unsigned int smi_sig = 0;
36
37/* info about the processor */
38static unsigned int speedstep_processor = 0;
39
40/*
41 * There are only two frequency states for each processor. Values
42 * are in kHz for the time being.
43 */
44static struct cpufreq_frequency_table speedstep_freqs[] = {
45 {SPEEDSTEP_HIGH, 0},
46 {SPEEDSTEP_LOW, 0},
47 {0, CPUFREQ_TABLE_END},
48};
49
50#define GET_SPEEDSTEP_OWNER 0
51#define GET_SPEEDSTEP_STATE 1
52#define SET_SPEEDSTEP_STATE 2
53#define GET_SPEEDSTEP_FREQS 4
54
55/* how often shall the SMI call be tried if it failed, e.g. because
56 * of DMA activity going on? */
57#define SMI_TRIES 5
58
59#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg)
60
61/**
62 * speedstep_smi_ownership
63 */
64static int speedstep_smi_ownership (void)
65{
66 u32 command, result, magic;
67 u32 function = GET_SPEEDSTEP_OWNER;
68 unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
69
70 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
71 magic = virt_to_phys(magic_data);
72
73 dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port);
74
75 __asm__ __volatile__(
76 "out %%al, (%%dx)\n"
77 : "=D" (result)
78 : "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic)
79 );
80
81 dprintk("result is %x\n", result);
82
83 return result;
84}
85
86/**
87 * speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
88 * @low: the low frequency value is placed here
89 * @high: the high frequency value is placed here
90 *
91 * Only available on later SpeedStep-enabled systems, returns false results or
92 * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
93 * shows that the latter occurs if !(ist_info.event & 0xFFFF).
94 */
95static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
96{
97 u32 command, result = 0, edi, high_mhz, low_mhz;
98 u32 state=0;
99 u32 function = GET_SPEEDSTEP_FREQS;
100
101 if (!(ist_info.event & 0xFFFF)) {
102 dprintk("bug #1422 -- can't read freqs from BIOS\n", result);
103 return -ENODEV;
104 }
105
106 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
107
108 dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port);
109
110 __asm__ __volatile__("movl $0, %%edi\n"
111 "out %%al, (%%dx)\n"
112 : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi)
113 : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0)
114 );
115
116 dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz);
117
118 /* abort if results are obviously incorrect... */
119 if ((high_mhz + low_mhz) < 600)
120 return -EINVAL;
121
122 *high = high_mhz * 1000;
123 *low = low_mhz * 1000;
124
125 return result;
126}
127
128/**
129 * speedstep_get_state - set the SpeedStep state
130 * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
131 *
132 */
133static int speedstep_get_state (void)
134{
135 u32 function=GET_SPEEDSTEP_STATE;
136 u32 result, state, edi, command;
137
138 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
139
140 dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port);
141
142 __asm__ __volatile__("movl $0, %%edi\n"
143 "out %%al, (%%dx)\n"
144 : "=a" (result), "=b" (state), "=D" (edi)
145 : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0)
146 );
147
148 dprintk("state is %x, result is %x\n", state, result);
149
150 return (state & 1);
151}
152
153
154/**
155 * speedstep_set_state - set the SpeedStep state
156 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
157 *
158 */
159static void speedstep_set_state (unsigned int state)
160{
161 unsigned int result = 0, command, new_state;
162 unsigned long flags;
163 unsigned int function=SET_SPEEDSTEP_STATE;
164 unsigned int retry = 0;
165
166 if (state > 0x1)
167 return;
168
169 /* Disable IRQs */
170 local_irq_save(flags);
171
172 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
173
174 dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port);
175
176 do {
177 if (retry) {
178 dprintk("retry %u, previous result %u, waiting...\n", retry, result);
179 mdelay(retry * 50);
180 }
181 retry++;
182 __asm__ __volatile__(
183 "movl $0, %%edi\n"
184 "out %%al, (%%dx)\n"
185 : "=b" (new_state), "=D" (result)
186 : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0)
187 );
188 } while ((new_state != state) && (retry <= SMI_TRIES));
189
190 /* enable IRQs */
191 local_irq_restore(flags);
192
193 if (new_state == state) {
194 dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result);
195 } else {
196 printk(KERN_ERR "cpufreq: change failed with new_state %u and result %u\n", new_state, result);
197 }
198
199 return;
200}
201
202
203/**
204 * speedstep_target - set a new CPUFreq policy
205 * @policy: new policy
206 * @target_freq: new freq
207 * @relation:
208 *
209 * Sets a new CPUFreq policy/freq.
210 */
211static int speedstep_target (struct cpufreq_policy *policy,
212 unsigned int target_freq, unsigned int relation)
213{
214 unsigned int newstate = 0;
215 struct cpufreq_freqs freqs;
216
217 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
218 return -EINVAL;
219
220 freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
221 freqs.new = speedstep_freqs[newstate].frequency;
222 freqs.cpu = 0; /* speedstep.c is UP only driver */
223
224 if (freqs.old == freqs.new)
225 return 0;
226
227 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
228 speedstep_set_state(newstate);
229 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
230
231 return 0;
232}
233
234
235/**
236 * speedstep_verify - verifies a new CPUFreq policy
237 * @policy: new policy
238 *
239 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
240 * at least one border included.
241 */
242static int speedstep_verify (struct cpufreq_policy *policy)
243{
244 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
245}
246
247
248static int speedstep_cpu_init(struct cpufreq_policy *policy)
249{
250 int result;
251 unsigned int speed,state;
252
253 /* capability check */
254 if (policy->cpu != 0)
255 return -ENODEV;
256
257 result = speedstep_smi_ownership();
258 if (result) {
259 dprintk("fails in aquiring ownership of a SMI interface.\n");
260 return -EINVAL;
261 }
262
263 /* detect low and high frequency */
264 result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency,
265 &speedstep_freqs[SPEEDSTEP_HIGH].frequency);
266 if (result) {
267 /* fall back to speedstep_lib.c dection mechanism: try both states out */
268 dprintk("could not detect low and high frequencies by SMI call.\n");
269 result = speedstep_get_freqs(speedstep_processor,
270 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
271 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
272 &speedstep_set_state);
273
274 if (result) {
275 dprintk("could not detect two different speeds -- aborting.\n");
276 return result;
277 } else
278 dprintk("workaround worked.\n");
279 }
280
281 /* get current speed setting */
282 state = speedstep_get_state();
283 speed = speedstep_freqs[state].frequency;
284
285 dprintk("currently at %s speed setting - %i MHz\n",
286 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
287 (speed / 1000));
288
289 /* cpuinfo and default policy values */
290 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
291 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
292 policy->cur = speed;
293
294 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
295 if (result)
296 return (result);
297
298 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
299
300 return 0;
301}
302
303static int speedstep_cpu_exit(struct cpufreq_policy *policy)
304{
305 cpufreq_frequency_table_put_attr(policy->cpu);
306 return 0;
307}
308
309static unsigned int speedstep_get(unsigned int cpu)
310{
311 if (cpu)
312 return -ENODEV;
313 return speedstep_get_processor_frequency(speedstep_processor);
314}
315
316
317static int speedstep_resume(struct cpufreq_policy *policy)
318{
319 int result = speedstep_smi_ownership();
320
321 if (result)
322 dprintk("fails in re-aquiring ownership of a SMI interface.\n");
323
324 return result;
325}
326
327static struct freq_attr* speedstep_attr[] = {
328 &cpufreq_freq_attr_scaling_available_freqs,
329 NULL,
330};
331
332static struct cpufreq_driver speedstep_driver = {
333 .name = "speedstep-smi",
334 .verify = speedstep_verify,
335 .target = speedstep_target,
336 .init = speedstep_cpu_init,
337 .exit = speedstep_cpu_exit,
338 .get = speedstep_get,
339 .resume = speedstep_resume,
340 .owner = THIS_MODULE,
341 .attr = speedstep_attr,
342};
343
344/**
345 * speedstep_init - initializes the SpeedStep CPUFreq driver
346 *
347 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
348 * BIOS, -EINVAL on problems during initiatization, and zero on
349 * success.
350 */
351static int __init speedstep_init(void)
352{
353 speedstep_processor = speedstep_detect_processor();
354
355 switch (speedstep_processor) {
356 case SPEEDSTEP_PROCESSOR_PIII_T:
357 case SPEEDSTEP_PROCESSOR_PIII_C:
358 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
359 break;
360 default:
361 speedstep_processor = 0;
362 }
363
364 if (!speedstep_processor) {
365 dprintk ("No supported Intel CPU detected.\n");
366 return -ENODEV;
367 }
368
369 dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
370 ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level);
371
372
373 /* Error if no IST-SMI BIOS or no PARM
374 sig= 'ISGE' aka 'Intel Speedstep Gate E' */
375 if ((ist_info.signature != 0x47534943) && (
376 (smi_port == 0) || (smi_cmd == 0)))
377 return -ENODEV;
378
379 if (smi_sig == 1)
380 smi_sig = 0x47534943;
381 else
382 smi_sig = ist_info.signature;
383
384 /* setup smi_port from MODLULE_PARM or BIOS */
385 if ((smi_port > 0xff) || (smi_port < 0)) {
386 return -EINVAL;
387 } else if (smi_port == 0) {
388 smi_port = ist_info.command & 0xff;
389 }
390
391 if ((smi_cmd > 0xff) || (smi_cmd < 0)) {
392 return -EINVAL;
393 } else if (smi_cmd == 0) {
394 smi_cmd = (ist_info.command >> 16) & 0xff;
395 }
396
397 return cpufreq_register_driver(&speedstep_driver);
398}
399
400
401/**
402 * speedstep_exit - unregisters SpeedStep support
403 *
404 * Unregisters SpeedStep support.
405 */
406static void __exit speedstep_exit(void)
407{
408 cpufreq_unregister_driver(&speedstep_driver);
409}
410
411module_param(smi_port, int, 0444);
412module_param(smi_cmd, int, 0444);
413module_param(smi_sig, uint, 0444);
414
415MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2");
416MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82");
417MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface.");
418
419MODULE_AUTHOR ("Hiroshi Miura");
420MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface.");
421MODULE_LICENSE ("GPL");
422
423module_init(speedstep_init);
424module_exit(speedstep_exit);