aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c813
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c38
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
11 files changed, 584 insertions, 345 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index ccc1edff5c97..5299c5bf4454 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -17,6 +17,7 @@ config X86_ACPI_CPUFREQ
17 help 17 help
18 This driver adds a CPUFreq driver which utilizes the ACPI 18 This driver adds a CPUFreq driver which utilizes the ACPI
19 Processor Performance States. 19 Processor Performance States.
20 This driver also supports Intel Enhanced Speedstep.
20 21
21 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
22 23
@@ -121,11 +122,14 @@ config X86_SPEEDSTEP_CENTRINO
121 If in doubt, say N. 122 If in doubt, say N.
122 123
123config X86_SPEEDSTEP_CENTRINO_ACPI 124config X86_SPEEDSTEP_CENTRINO_ACPI
124 bool "Use ACPI tables to decode valid frequency/voltage pairs" 125 bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
125 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR 126 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
126 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m) 127 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
127 default y 128 default y
128 help 129 help
130 This is deprecated and this functionality is now merged into
131 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
132 speedstep_centrino.
129 Use primarily the information provided in the BIOS ACPI tables 133 Use primarily the information provided in the BIOS ACPI tables
130 to determine valid CPU frequency and voltage pairings. It is 134 to determine valid CPU frequency and voltage pairings. It is
131 required for the driver to work on non-Banias CPUs. 135 required for the driver to work on non-Banias CPUs.
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 2e894f1c8910..8de3abe322a9 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
7obj-$(CONFIG_X86_LONGRUN) += longrun.o 7obj-$(CONFIG_X86_LONGRUN) += longrun.o
8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o 8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o 9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
10obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 10obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o 11obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
13obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 12obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
13obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 57c880bf0bd6..18f4715c655d 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $) 2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
7 * 8 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 10 *
@@ -27,202 +28,387 @@
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
30#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/compiler.h> 34#include <linux/compiler.h>
34#include <linux/sched.h> /* current */
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <asm/io.h>
37#include <asm/delay.h>
38#include <asm/uaccess.h>
39 36
40#include <linux/acpi.h> 37#include <linux/acpi.h>
41#include <acpi/processor.h> 38#include <acpi/processor.h>
42 39
40#include <asm/io.h>
41#include <asm/msr.h>
42#include <asm/processor.h>
43#include <asm/cpufeature.h>
44#include <asm/delay.h>
45#include <asm/uaccess.h>
46
43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) 47#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
44 48
45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 49MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
46MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
47MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
48 52
53enum {
54 UNDEFINED_CAPABLE = 0,
55 SYSTEM_INTEL_MSR_CAPABLE,
56 SYSTEM_IO_CAPABLE,
57};
58
59#define INTEL_MSR_RANGE (0xffff)
60#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
49 61
50struct cpufreq_acpi_io { 62struct acpi_cpufreq_data {
51 struct acpi_processor_performance *acpi_data; 63 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 64 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 65 unsigned int max_freq;
66 unsigned int resume;
67 unsigned int cpu_feature;
54}; 68};
55 69
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 70static struct acpi_cpufreq_data *drv_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; 71static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
58 72
59static struct cpufreq_driver acpi_cpufreq_driver; 73static struct cpufreq_driver acpi_cpufreq_driver;
60 74
61static unsigned int acpi_pstate_strict; 75static unsigned int acpi_pstate_strict;
62 76
63static int 77static int check_est_cpu(unsigned int cpuid)
64acpi_processor_write_port( 78{
65 u16 port, 79 struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
66 u8 bit_width, 80
67 u32 value) 81 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
82 !cpu_has(cpu, X86_FEATURE_EST))
83 return 0;
84
85 return 1;
86}
87
88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
89{
90 struct acpi_processor_performance *perf;
91 int i;
92
93 perf = data->acpi_data;
94
95 for (i=0; i<perf->state_count; i++) {
96 if (value == perf->states[i].status)
97 return data->freq_table[i].frequency;
98 }
99 return 0;
100}
101
102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
68{ 103{
69 if (bit_width <= 8) { 104 int i;
105 struct acpi_processor_performance *perf;
106
107 msr &= INTEL_MSR_RANGE;
108 perf = data->acpi_data;
109
110 for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
111 if (msr == perf->states[data->freq_table[i].index].status)
112 return data->freq_table[i].frequency;
113 }
114 return data->freq_table[0].frequency;
115}
116
117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
118{
119 switch (data->cpu_feature) {
120 case SYSTEM_INTEL_MSR_CAPABLE:
121 return extract_msr(val, data);
122 case SYSTEM_IO_CAPABLE:
123 return extract_io(val, data);
124 default:
125 return 0;
126 }
127}
128
129static void wrport(u16 port, u8 bit_width, u32 value)
130{
131 if (bit_width <= 8)
70 outb(value, port); 132 outb(value, port);
71 } else if (bit_width <= 16) { 133 else if (bit_width <= 16)
72 outw(value, port); 134 outw(value, port);
73 } else if (bit_width <= 32) { 135 else if (bit_width <= 32)
74 outl(value, port); 136 outl(value, port);
75 } else {
76 return -ENODEV;
77 }
78 return 0;
79} 137}
80 138
81static int 139static void rdport(u16 port, u8 bit_width, u32 * ret)
82acpi_processor_read_port(
83 u16 port,
84 u8 bit_width,
85 u32 *ret)
86{ 140{
87 *ret = 0; 141 *ret = 0;
88 if (bit_width <= 8) { 142 if (bit_width <= 8)
89 *ret = inb(port); 143 *ret = inb(port);
90 } else if (bit_width <= 16) { 144 else if (bit_width <= 16)
91 *ret = inw(port); 145 *ret = inw(port);
92 } else if (bit_width <= 32) { 146 else if (bit_width <= 32)
93 *ret = inl(port); 147 *ret = inl(port);
94 } else { 148}
95 return -ENODEV; 149
150struct msr_addr {
151 u32 reg;
152};
153
154struct io_addr {
155 u16 port;
156 u8 bit_width;
157};
158
159typedef union {
160 struct msr_addr msr;
161 struct io_addr io;
162} drv_addr_union;
163
164struct drv_cmd {
165 unsigned int type;
166 cpumask_t mask;
167 drv_addr_union addr;
168 u32 val;
169};
170
171static void do_drv_read(struct drv_cmd *cmd)
172{
173 u32 h;
174
175 switch (cmd->type) {
176 case SYSTEM_INTEL_MSR_CAPABLE:
177 rdmsr(cmd->addr.msr.reg, cmd->val, h);
178 break;
179 case SYSTEM_IO_CAPABLE:
180 rdport(cmd->addr.io.port, cmd->addr.io.bit_width, &cmd->val);
181 break;
182 default:
183 break;
96 } 184 }
97 return 0;
98} 185}
99 186
100static int 187static void do_drv_write(struct drv_cmd *cmd)
101acpi_processor_set_performance (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu,
104 int state)
105{ 188{
106 u16 port = 0; 189 u32 h = 0;
107 u8 bit_width = 0; 190
108 int i = 0; 191 switch (cmd->type) {
109 int ret = 0; 192 case SYSTEM_INTEL_MSR_CAPABLE:
110 u32 value = 0; 193 wrmsr(cmd->addr.msr.reg, cmd->val, h);
111 int retval; 194 break;
112 struct acpi_processor_performance *perf; 195 case SYSTEM_IO_CAPABLE:
113 196 wrport(cmd->addr.io.port, cmd->addr.io.bit_width, cmd->val);
114 dprintk("acpi_processor_set_performance\n"); 197 break;
115 198 default:
116 retval = 0; 199 break;
117 perf = data->acpi_data;
118 if (state == perf->state) {
119 if (unlikely(data->resume)) {
120 dprintk("Called after resume, resetting to P%d\n", state);
121 data->resume = 0;
122 } else {
123 dprintk("Already at target state (P%d)\n", state);
124 return (retval);
125 }
126 } 200 }
201}
127 202
128 dprintk("Transitioning from P%d to P%d\n", perf->state, state); 203static void drv_read(struct drv_cmd *cmd)
204{
205 cpumask_t saved_mask = current->cpus_allowed;
206 cmd->val = 0;
129 207
130 /* 208 set_cpus_allowed(current, cmd->mask);
131 * First we write the target state's 'control' value to the 209 do_drv_read(cmd);
132 * control_register. 210 set_cpus_allowed(current, saved_mask);
133 */ 211}
212
213static void drv_write(struct drv_cmd *cmd)
214{
215 cpumask_t saved_mask = current->cpus_allowed;
216 unsigned int i;
217
218 for_each_cpu_mask(i, cmd->mask) {
219 set_cpus_allowed(current, cpumask_of_cpu(i));
220 do_drv_write(cmd);
221 }
222
223 set_cpus_allowed(current, saved_mask);
224 return;
225}
226
227static u32 get_cur_val(cpumask_t mask)
228{
229 struct acpi_processor_performance *perf;
230 struct drv_cmd cmd;
231
232 if (unlikely(cpus_empty(mask)))
233 return 0;
234
235 switch (drv_data[first_cpu(mask)]->cpu_feature) {
236 case SYSTEM_INTEL_MSR_CAPABLE:
237 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
238 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
239 break;
240 case SYSTEM_IO_CAPABLE:
241 cmd.type = SYSTEM_IO_CAPABLE;
242 perf = drv_data[first_cpu(mask)]->acpi_data;
243 cmd.addr.io.port = perf->control_register.address;
244 cmd.addr.io.bit_width = perf->control_register.bit_width;
245 break;
246 default:
247 return 0;
248 }
249
250 cmd.mask = mask;
134 251
135 port = perf->control_register.address; 252 drv_read(&cmd);
136 bit_width = perf->control_register.bit_width;
137 value = (u32) perf->states[state].control;
138 253
139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 254 dprintk("get_cur_val = %u\n", cmd.val);
140 255
141 ret = acpi_processor_write_port(port, bit_width, value); 256 return cmd.val;
142 if (ret) { 257}
143 dprintk("Invalid port width 0x%04x\n", bit_width); 258
144 return (ret); 259/*
260 * Return the measured active (C0) frequency on this CPU since last call
261 * to this function.
262 * Input: cpu number
263 * Return: Average CPU frequency in terms of max frequency (zero on error)
264 *
265 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
266 * over a period of time, while CPU is in C0 state.
267 * IA32_MPERF counts at the rate of max advertised frequency
268 * IA32_APERF counts at the rate of actual CPU frequency
269 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
270 * no meaning should be associated with absolute values of these MSRs.
271 */
272static unsigned int get_measured_perf(unsigned int cpu)
273{
274 union {
275 struct {
276 u32 lo;
277 u32 hi;
278 } split;
279 u64 whole;
280 } aperf_cur, mperf_cur;
281
282 cpumask_t saved_mask;
283 unsigned int perf_percent;
284 unsigned int retval;
285
286 saved_mask = current->cpus_allowed;
287 set_cpus_allowed(current, cpumask_of_cpu(cpu));
288 if (get_cpu() != cpu) {
289 /* We were not able to run on requested processor */
290 put_cpu();
291 return 0;
145 } 292 }
146 293
294 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
295 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
296
297 wrmsr(MSR_IA32_APERF, 0,0);
298 wrmsr(MSR_IA32_MPERF, 0,0);
299
300#ifdef __i386__
147 /* 301 /*
148 * Assume the write went through when acpi_pstate_strict is not used. 302 * We dont want to do 64 bit divide with 32 bit kernel
149 * As read status_register is an expensive operation and there 303 * Get an approximate value. Return failure in case we cannot get
150 * are no specific error cases where an IO port write will fail. 304 * an approximate value.
151 */ 305 */
152 if (acpi_pstate_strict) { 306 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
153 /* Then we read the 'status_register' and compare the value 307 int shift_count;
154 * with the target state's 'status' to make sure the 308 u32 h;
155 * transition was successful. 309
156 * Note that we'll poll for up to 1ms (100 cycles of 10us) 310 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
157 * before giving up. 311 shift_count = fls(h);
158 */ 312
159 313 aperf_cur.whole >>= shift_count;
160 port = perf->status_register.address; 314 mperf_cur.whole >>= shift_count;
161 bit_width = perf->status_register.bit_width; 315 }
162 316
163 dprintk("Looking for 0x%08x from port 0x%04x\n", 317 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
164 (u32) perf->states[state].status, port); 318 int shift_count = 7;
165 319 aperf_cur.split.lo >>= shift_count;
166 for (i = 0; i < 100; i++) { 320 mperf_cur.split.lo >>= shift_count;
167 ret = acpi_processor_read_port(port, bit_width, &value); 321 }
168 if (ret) { 322
169 dprintk("Invalid port width 0x%04x\n", bit_width); 323 if (aperf_cur.split.lo && mperf_cur.split.lo)
170 return (ret); 324 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
171 } 325 else
172 if (value == (u32) perf->states[state].status) 326 perf_percent = 0;
173 break; 327
174 udelay(10); 328#else
175 } 329 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
176 } else { 330 int shift_count = 7;
177 value = (u32) perf->states[state].status; 331 aperf_cur.whole >>= shift_count;
332 mperf_cur.whole >>= shift_count;
178 } 333 }
179 334
180 if (unlikely(value != (u32) perf->states[state].status)) { 335 if (aperf_cur.whole && mperf_cur.whole)
181 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 336 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
182 retval = -ENODEV; 337 else
183 return (retval); 338 perf_percent = 0;
339
340#endif
341
342 retval = drv_data[cpu]->max_freq * perf_percent / 100;
343
344 put_cpu();
345 set_cpus_allowed(current, saved_mask);
346
347 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
348 return retval;
349}
350
351static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
352{
353 struct acpi_cpufreq_data *data = drv_data[cpu];
354 unsigned int freq;
355
356 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
357
358 if (unlikely(data == NULL ||
359 data->acpi_data == NULL || data->freq_table == NULL)) {
360 return 0;
184 } 361 }
185 362
186 dprintk("Transition successful after %d microseconds\n", i * 10); 363 freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
364 dprintk("cur freq = %u\n", freq);
187 365
188 perf->state = state; 366 return freq;
189 return (retval);
190} 367}
191 368
369static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
370 struct acpi_cpufreq_data *data)
371{
372 unsigned int cur_freq;
373 unsigned int i;
374
375 for (i=0; i<100; i++) {
376 cur_freq = extract_freq(get_cur_val(mask), data);
377 if (cur_freq == freq)
378 return 1;
379 udelay(10);
380 }
381 return 0;
382}
192 383
193static int 384static int acpi_cpufreq_target(struct cpufreq_policy *policy,
194acpi_cpufreq_target ( 385 unsigned int target_freq, unsigned int relation)
195 struct cpufreq_policy *policy,
196 unsigned int target_freq,
197 unsigned int relation)
198{ 386{
199 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 387 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
200 struct acpi_processor_performance *perf; 388 struct acpi_processor_performance *perf;
201 struct cpufreq_freqs freqs; 389 struct cpufreq_freqs freqs;
202 cpumask_t online_policy_cpus; 390 cpumask_t online_policy_cpus;
203 cpumask_t saved_mask; 391 struct drv_cmd cmd;
204 cpumask_t set_mask; 392 unsigned int msr;
205 cpumask_t covered_cpus;
206 unsigned int cur_state = 0;
207 unsigned int next_state = 0; 393 unsigned int next_state = 0;
208 unsigned int result = 0; 394 unsigned int next_perf_state = 0;
209 unsigned int j; 395 unsigned int i;
210 unsigned int tmp; 396 int result = 0;
211 397
212 dprintk("acpi_cpufreq_setpolicy\n"); 398 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
213 399
214 result = cpufreq_frequency_table_target(policy, 400 if (unlikely(data == NULL ||
215 data->freq_table, 401 data->acpi_data == NULL || data->freq_table == NULL)) {
216 target_freq, 402 return -ENODEV;
217 relation, 403 }
218 &next_state);
219 if (unlikely(result))
220 return (result);
221 404
222 perf = data->acpi_data; 405 perf = data->acpi_data;
223 cur_state = perf->state; 406 result = cpufreq_frequency_table_target(policy,
224 freqs.old = data->freq_table[cur_state].frequency; 407 data->freq_table,
225 freqs.new = data->freq_table[next_state].frequency; 408 target_freq,
409 relation, &next_state);
410 if (unlikely(result))
411 return -ENODEV;
226 412
227#ifdef CONFIG_HOTPLUG_CPU 413#ifdef CONFIG_HOTPLUG_CPU
228 /* cpufreq holds the hotplug lock, so we are safe from here on */ 414 /* cpufreq holds the hotplug lock, so we are safe from here on */
@@ -231,106 +417,84 @@ acpi_cpufreq_target (
231 online_policy_cpus = policy->cpus; 417 online_policy_cpus = policy->cpus;
232#endif 418#endif
233 419
234 for_each_cpu_mask(j, online_policy_cpus) { 420 next_perf_state = data->freq_table[next_state].index;
235 freqs.cpu = j; 421 if (perf->state == next_perf_state) {
236 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 422 if (unlikely(data->resume)) {
423 dprintk("Called after resume, resetting to P%d\n",
424 next_perf_state);
425 data->resume = 0;
426 } else {
427 dprintk("Already at target state (P%d)\n",
428 next_perf_state);
429 return 0;
430 }
237 } 431 }
238 432
239 /* 433 switch (data->cpu_feature) {
240 * We need to call driver->target() on all or any CPU in 434 case SYSTEM_INTEL_MSR_CAPABLE:
241 * policy->cpus, depending on policy->shared_type. 435 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
242 */ 436 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
243 saved_mask = current->cpus_allowed; 437 msr =
244 cpus_clear(covered_cpus); 438 (u32) perf->states[next_perf_state].
245 for_each_cpu_mask(j, online_policy_cpus) { 439 control & INTEL_MSR_RANGE;
246 /* 440 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
247 * Support for SMP systems. 441 break;
248 * Make sure we are running on CPU that wants to change freq 442 case SYSTEM_IO_CAPABLE:
249 */ 443 cmd.type = SYSTEM_IO_CAPABLE;
250 cpus_clear(set_mask); 444 cmd.addr.io.port = perf->control_register.address;
251 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 445 cmd.addr.io.bit_width = perf->control_register.bit_width;
252 cpus_or(set_mask, set_mask, online_policy_cpus); 446 cmd.val = (u32) perf->states[next_perf_state].control;
253 else 447 break;
254 cpu_set(j, set_mask); 448 default:
255 449 return -ENODEV;
256 set_cpus_allowed(current, set_mask); 450 }
257 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
258 dprintk("couldn't limit to CPUs in this domain\n");
259 result = -EAGAIN;
260 break;
261 }
262 451
263 result = acpi_processor_set_performance (data, j, next_state); 452 cpus_clear(cmd.mask);
264 if (result) {
265 result = -EAGAIN;
266 break;
267 }
268 453
269 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 454 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
270 break; 455 cmd.mask = online_policy_cpus;
271 456 else
272 cpu_set(j, covered_cpus); 457 cpu_set(policy->cpu, cmd.mask);
273 }
274 458
275 for_each_cpu_mask(j, online_policy_cpus) { 459 freqs.old = data->freq_table[perf->state].frequency;
276 freqs.cpu = j; 460 freqs.new = data->freq_table[next_perf_state].frequency;
277 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 461 for_each_cpu_mask(i, cmd.mask) {
462 freqs.cpu = i;
463 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
278 } 464 }
279 465
280 if (unlikely(result)) { 466 drv_write(&cmd);
281 /*
282 * We have failed halfway through the frequency change.
283 * We have sent callbacks to online_policy_cpus and
284 * acpi_processor_set_performance() has been called on
285 * coverd_cpus. Best effort undo..
286 */
287
288 if (!cpus_empty(covered_cpus)) {
289 for_each_cpu_mask(j, covered_cpus) {
290 policy->cpu = j;
291 acpi_processor_set_performance (data,
292 j,
293 cur_state);
294 }
295 }
296 467
297 tmp = freqs.new; 468 if (acpi_pstate_strict) {
298 freqs.new = freqs.old; 469 if (!check_freqs(cmd.mask, freqs.new, data)) {
299 freqs.old = tmp; 470 dprintk("acpi_cpufreq_target failed (%d)\n",
300 for_each_cpu_mask(j, online_policy_cpus) { 471 policy->cpu);
301 freqs.cpu = j; 472 return -EAGAIN;
302 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
303 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
304 } 473 }
305 } 474 }
306 475
307 set_cpus_allowed(current, saved_mask); 476 for_each_cpu_mask(i, cmd.mask) {
308 return (result); 477 freqs.cpu = i;
309} 478 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
479 }
480 perf->state = next_perf_state;
310 481
482 return result;
483}
311 484
312static int 485static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
313acpi_cpufreq_verify (
314 struct cpufreq_policy *policy)
315{ 486{
316 unsigned int result = 0; 487 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
317 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
318 488
319 dprintk("acpi_cpufreq_verify\n"); 489 dprintk("acpi_cpufreq_verify\n");
320 490
321 result = cpufreq_frequency_table_verify(policy, 491 return cpufreq_frequency_table_verify(policy, data->freq_table);
322 data->freq_table);
323
324 return (result);
325} 492}
326 493
327
328static unsigned long 494static unsigned long
329acpi_cpufreq_guess_freq ( 495acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
330 struct cpufreq_acpi_io *data,
331 unsigned int cpu)
332{ 496{
333 struct acpi_processor_performance *perf = data->acpi_data; 497 struct acpi_processor_performance *perf = data->acpi_data;
334 498
335 if (cpu_khz) { 499 if (cpu_khz) {
336 /* search the closest match to cpu_khz */ 500 /* search the closest match to cpu_khz */
@@ -338,16 +502,16 @@ acpi_cpufreq_guess_freq (
338 unsigned long freq; 502 unsigned long freq;
339 unsigned long freqn = perf->states[0].core_frequency * 1000; 503 unsigned long freqn = perf->states[0].core_frequency * 1000;
340 504
341 for (i = 0; i < (perf->state_count - 1); i++) { 505 for (i=0; i<(perf->state_count-1); i++) {
342 freq = freqn; 506 freq = freqn;
343 freqn = perf->states[i+1].core_frequency * 1000; 507 freqn = perf->states[i+1].core_frequency * 1000;
344 if ((2 * cpu_khz) > (freqn + freq)) { 508 if ((2 * cpu_khz) > (freqn + freq)) {
345 perf->state = i; 509 perf->state = i;
346 return (freq); 510 return freq;
347 } 511 }
348 } 512 }
349 perf->state = perf->state_count - 1; 513 perf->state = perf->state_count-1;
350 return (freqn); 514 return freqn;
351 } else { 515 } else {
352 /* assume CPU is at P0... */ 516 /* assume CPU is at P0... */
353 perf->state = 0; 517 perf->state = 0;
@@ -355,7 +519,6 @@ acpi_cpufreq_guess_freq (
355 } 519 }
356} 520}
357 521
358
359/* 522/*
360 * acpi_cpufreq_early_init - initialize ACPI P-States library 523 * acpi_cpufreq_early_init - initialize ACPI P-States library
361 * 524 *
@@ -364,30 +527,34 @@ acpi_cpufreq_guess_freq (
364 * do _PDC and _PSD and find out the processor dependency for the 527 * do _PDC and _PSD and find out the processor dependency for the
365 * actual init that will happen later... 528 * actual init that will happen later...
366 */ 529 */
367static int acpi_cpufreq_early_init_acpi(void) 530static int acpi_cpufreq_early_init(void)
368{ 531{
369 struct acpi_processor_performance *data; 532 struct acpi_processor_performance *data;
370 unsigned int i, j; 533 cpumask_t covered;
534 unsigned int i, j;
371 535
372 dprintk("acpi_cpufreq_early_init\n"); 536 dprintk("acpi_cpufreq_early_init\n");
373 537
374 for_each_possible_cpu(i) { 538 for_each_possible_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance), 539 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL); 540 GFP_KERNEL);
377 if (!data) { 541 if (!data) {
378 for_each_possible_cpu(j) { 542 for_each_cpu_mask(j, covered) {
379 kfree(acpi_perf_data[j]); 543 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL; 544 acpi_perf_data[j] = NULL;
381 } 545 }
382 return (-ENOMEM); 546 return -ENOMEM;
383 } 547 }
384 acpi_perf_data[i] = data; 548 acpi_perf_data[i] = data;
549 cpu_set(i, covered);
385 } 550 }
386 551
387 /* Do initialization in ACPI core */ 552 /* Do initialization in ACPI core */
388 return acpi_processor_preregister_performance(acpi_perf_data); 553 acpi_processor_preregister_performance(acpi_perf_data);
554 return 0;
389} 555}
390 556
557#ifdef CONFIG_SMP
391/* 558/*
392 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 559 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
393 * or do it in BIOS firmware and won't inform about it to OS. If not 560 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -414,39 +581,42 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
414 }, 581 },
415 { } 582 { }
416}; 583};
584#endif
417 585
418static int 586static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
419acpi_cpufreq_cpu_init (
420 struct cpufreq_policy *policy)
421{ 587{
422 unsigned int i; 588 unsigned int i;
423 unsigned int cpu = policy->cpu; 589 unsigned int valid_states = 0;
424 struct cpufreq_acpi_io *data; 590 unsigned int cpu = policy->cpu;
425 unsigned int result = 0; 591 struct acpi_cpufreq_data *data;
592 unsigned int result = 0;
426 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 593 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
427 struct acpi_processor_performance *perf; 594 struct acpi_processor_performance *perf;
428 595
429 dprintk("acpi_cpufreq_cpu_init\n"); 596 dprintk("acpi_cpufreq_cpu_init\n");
430 597
431 if (!acpi_perf_data[cpu]) 598 if (!acpi_perf_data[cpu])
432 return (-ENODEV); 599 return -ENODEV;
433 600
434 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 601 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
435 if (!data) 602 if (!data)
436 return (-ENOMEM); 603 return -ENOMEM;
437 604
438 data->acpi_data = acpi_perf_data[cpu]; 605 data->acpi_data = acpi_perf_data[cpu];
439 acpi_io_data[cpu] = data; 606 drv_data[cpu] = data;
440 607
441 result = acpi_processor_register_performance(data->acpi_data, cpu); 608 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
609 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
442 610
611 result = acpi_processor_register_performance(data->acpi_data, cpu);
443 if (result) 612 if (result)
444 goto err_free; 613 goto err_free;
445 614
446 perf = data->acpi_data; 615 perf = data->acpi_data;
447 policy->shared_type = perf->shared_type; 616 policy->shared_type = perf->shared_type;
617
448 /* 618 /*
449 * Will let policy->cpus know about dependency only when software 619 * Will let policy->cpus know about dependency only when software
450 * coordination is required. 620 * coordination is required.
451 */ 621 */
452 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 622 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
@@ -462,10 +632,6 @@ acpi_cpufreq_cpu_init (
462 } 632 }
463#endif 633#endif
464 634
465 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
466 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
467 }
468
469 /* capability check */ 635 /* capability check */
470 if (perf->state_count <= 1) { 636 if (perf->state_count <= 1) {
471 dprintk("No P-States\n"); 637 dprintk("No P-States\n");
@@ -473,17 +639,33 @@ acpi_cpufreq_cpu_init (
473 goto err_unreg; 639 goto err_unreg;
474 } 640 }
475 641
476 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 642 if (perf->control_register.space_id != perf->status_register.space_id) {
477 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 643 result = -ENODEV;
478 dprintk("Unsupported address space [%d, %d]\n", 644 goto err_unreg;
479 (u32) (perf->control_register.space_id), 645 }
480 (u32) (perf->status_register.space_id)); 646
647 switch (perf->control_register.space_id) {
648 case ACPI_ADR_SPACE_SYSTEM_IO:
649 dprintk("SYSTEM IO addr space\n");
650 data->cpu_feature = SYSTEM_IO_CAPABLE;
651 break;
652 case ACPI_ADR_SPACE_FIXED_HARDWARE:
653 dprintk("HARDWARE addr space\n");
654 if (!check_est_cpu(cpu)) {
655 result = -ENODEV;
656 goto err_unreg;
657 }
658 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
659 break;
660 default:
661 dprintk("Unknown addr space %d\n",
662 (u32) (perf->control_register.space_id));
481 result = -ENODEV; 663 result = -ENODEV;
482 goto err_unreg; 664 goto err_unreg;
483 } 665 }
484 666
485 /* alloc freq_table */ 667 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
486 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); 668 (perf->state_count+1), GFP_KERNEL);
487 if (!data->freq_table) { 669 if (!data->freq_table) {
488 result = -ENOMEM; 670 result = -ENOMEM;
489 goto err_unreg; 671 goto err_unreg;
@@ -492,129 +674,140 @@ acpi_cpufreq_cpu_init (
492 /* detect transition latency */ 674 /* detect transition latency */
493 policy->cpuinfo.transition_latency = 0; 675 policy->cpuinfo.transition_latency = 0;
494 for (i=0; i<perf->state_count; i++) { 676 for (i=0; i<perf->state_count; i++) {
495 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 677 if ((perf->states[i].transition_latency * 1000) >
496 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; 678 policy->cpuinfo.transition_latency)
679 policy->cpuinfo.transition_latency =
680 perf->states[i].transition_latency * 1000;
497 } 681 }
498 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 682 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
499 683
500 /* The current speed is unknown and not detectable by ACPI... */ 684 data->max_freq = perf->states[0].core_frequency * 1000;
501 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
502
503 /* table init */ 685 /* table init */
504 for (i=0; i<=perf->state_count; i++) 686 for (i=0; i<perf->state_count; i++) {
505 { 687 if (i>0 && perf->states[i].core_frequency ==
506 data->freq_table[i].index = i; 688 perf->states[i-1].core_frequency)
507 if (i<perf->state_count) 689 continue;
508 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000; 690
509 else 691 data->freq_table[valid_states].index = i;
510 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 692 data->freq_table[valid_states].frequency =
693 perf->states[i].core_frequency * 1000;
694 valid_states++;
511 } 695 }
696 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
512 697
513 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 698 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
514 if (result) { 699 if (result)
515 goto err_freqfree; 700 goto err_freqfree;
701
702 switch (data->cpu_feature) {
703 case ACPI_ADR_SPACE_SYSTEM_IO:
704 /* Current speed is unknown and not detectable by IO port */
705 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
706 break;
707 case ACPI_ADR_SPACE_FIXED_HARDWARE:
708 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
709 get_cur_freq_on_cpu(cpu);
710 break;
711 default:
712 break;
516 } 713 }
517 714
518 /* notify BIOS that we exist */ 715 /* notify BIOS that we exist */
519 acpi_processor_notify_smm(THIS_MODULE); 716 acpi_processor_notify_smm(THIS_MODULE);
520 717
521 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 718 /* Check for APERF/MPERF support in hardware */
522 cpu); 719 if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
720 unsigned int ecx;
721 ecx = cpuid_ecx(6);
722 if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
723 acpi_cpufreq_driver.getavg = get_measured_perf;
724 }
725
726 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
523 for (i = 0; i < perf->state_count; i++) 727 for (i = 0; i < perf->state_count; i++)
524 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 728 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
525 (i == perf->state?'*':' '), i, 729 (i == perf->state ? '*' : ' '), i,
526 (u32) perf->states[i].core_frequency, 730 (u32) perf->states[i].core_frequency,
527 (u32) perf->states[i].power, 731 (u32) perf->states[i].power,
528 (u32) perf->states[i].transition_latency); 732 (u32) perf->states[i].transition_latency);
529 733
530 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 734 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
531 735
532 /* 736 /*
533 * the first call to ->target() should result in us actually 737 * the first call to ->target() should result in us actually
534 * writing something to the appropriate registers. 738 * writing something to the appropriate registers.
535 */ 739 */
536 data->resume = 1; 740 data->resume = 1;
537
538 return (result);
539 741
540 err_freqfree: 742 return result;
743
744err_freqfree:
541 kfree(data->freq_table); 745 kfree(data->freq_table);
542 err_unreg: 746err_unreg:
543 acpi_processor_unregister_performance(perf, cpu); 747 acpi_processor_unregister_performance(perf, cpu);
544 err_free: 748err_free:
545 kfree(data); 749 kfree(data);
546 acpi_io_data[cpu] = NULL; 750 drv_data[cpu] = NULL;
547 751
548 return (result); 752 return result;
549} 753}
550 754
551 755static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
552static int
553acpi_cpufreq_cpu_exit (
554 struct cpufreq_policy *policy)
555{ 756{
556 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 757 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
557
558 758
559 dprintk("acpi_cpufreq_cpu_exit\n"); 759 dprintk("acpi_cpufreq_cpu_exit\n");
560 760
561 if (data) { 761 if (data) {
562 cpufreq_frequency_table_put_attr(policy->cpu); 762 cpufreq_frequency_table_put_attr(policy->cpu);
563 acpi_io_data[policy->cpu] = NULL; 763 drv_data[policy->cpu] = NULL;
564 acpi_processor_unregister_performance(data->acpi_data, policy->cpu); 764 acpi_processor_unregister_performance(data->acpi_data,
765 policy->cpu);
565 kfree(data); 766 kfree(data);
566 } 767 }
567 768
568 return (0); 769 return 0;
569} 770}
570 771
571static int 772static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
572acpi_cpufreq_resume (
573 struct cpufreq_policy *policy)
574{ 773{
575 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 774 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
576
577 775
578 dprintk("acpi_cpufreq_resume\n"); 776 dprintk("acpi_cpufreq_resume\n");
579 777
580 data->resume = 1; 778 data->resume = 1;
581 779
582 return (0); 780 return 0;
583} 781}
584 782
585 783static struct freq_attr *acpi_cpufreq_attr[] = {
586static struct freq_attr* acpi_cpufreq_attr[] = {
587 &cpufreq_freq_attr_scaling_available_freqs, 784 &cpufreq_freq_attr_scaling_available_freqs,
588 NULL, 785 NULL,
589}; 786};
590 787
591static struct cpufreq_driver acpi_cpufreq_driver = { 788static struct cpufreq_driver acpi_cpufreq_driver = {
592 .verify = acpi_cpufreq_verify, 789 .verify = acpi_cpufreq_verify,
593 .target = acpi_cpufreq_target, 790 .target = acpi_cpufreq_target,
594 .init = acpi_cpufreq_cpu_init, 791 .init = acpi_cpufreq_cpu_init,
595 .exit = acpi_cpufreq_cpu_exit, 792 .exit = acpi_cpufreq_cpu_exit,
596 .resume = acpi_cpufreq_resume, 793 .resume = acpi_cpufreq_resume,
597 .name = "acpi-cpufreq", 794 .name = "acpi-cpufreq",
598 .owner = THIS_MODULE, 795 .owner = THIS_MODULE,
599 .attr = acpi_cpufreq_attr, 796 .attr = acpi_cpufreq_attr,
600}; 797};
601 798
602 799static int __init acpi_cpufreq_init(void)
603static int __init
604acpi_cpufreq_init (void)
605{ 800{
606 dprintk("acpi_cpufreq_init\n"); 801 dprintk("acpi_cpufreq_init\n");
607 802
608 acpi_cpufreq_early_init_acpi(); 803 acpi_cpufreq_early_init();
609 804
610 return cpufreq_register_driver(&acpi_cpufreq_driver); 805 return cpufreq_register_driver(&acpi_cpufreq_driver);
611} 806}
612 807
613 808static void __exit acpi_cpufreq_exit(void)
614static void __exit
615acpi_cpufreq_exit (void)
616{ 809{
617 unsigned int i; 810 unsigned int i;
618 dprintk("acpi_cpufreq_exit\n"); 811 dprintk("acpi_cpufreq_exit\n");
619 812
620 cpufreq_unregister_driver(&acpi_cpufreq_driver); 813 cpufreq_unregister_driver(&acpi_cpufreq_driver);
@@ -627,7 +820,9 @@ acpi_cpufreq_exit (void)
627} 820}
628 821
629module_param(acpi_pstate_strict, uint, 0644); 822module_param(acpi_pstate_strict, uint, 0644);
630MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes."); 823MODULE_PARM_DESC(acpi_pstate_strict,
824 "value 0 or non-zero. non-zero -> strict ACPI checks are "
825 "performed during frequency changes.");
631 826
632late_initcall(acpi_cpufreq_init); 827late_initcall(acpi_cpufreq_init);
633module_exit(acpi_cpufreq_exit); 828module_exit(acpi_cpufreq_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 92afa3bc84f1..6667e9cceb9f 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -447,7 +447,6 @@ static int __init cpufreq_gx_init(void)
447 int ret; 447 int ret;
448 struct gxfreq_params *params; 448 struct gxfreq_params *params;
449 struct pci_dev *gx_pci; 449 struct pci_dev *gx_pci;
450 u32 class_rev;
451 450
452 /* Test if we have the right hardware */ 451 /* Test if we have the right hardware */
453 if ((gx_pci = gx_detect_chipset()) == NULL) 452 if ((gx_pci = gx_detect_chipset()) == NULL)
@@ -472,8 +471,7 @@ static int __init cpufreq_gx_init(void)
472 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); 471 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
473 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); 472 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
474 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); 473 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
475 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); 474 pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
476 params->pci_rev = class_rev && 0xff;
477 475
478 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { 476 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
479 kfree(params); 477 kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 7233abe5d695..c548daad3476 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -410,7 +410,7 @@ static int __init longhaul_get_ranges(void)
410 maxmult=longhaul_get_cpu_mult(); 410 maxmult=longhaul_get_cpu_mult();
411 411
412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */ 412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
413 if ((cpu_khz/1000) > 1200) 413 if ((cpu_khz/maxmult) > 13400)
414 fsb = 200; 414 fsb = 200;
415 else 415 else
416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
@@ -583,6 +583,10 @@ static int enable_arbiter_disable(void)
583 if (dev == NULL) { 583 if (dev == NULL) {
584 reg = 0x76; 584 reg = 0x76;
585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); 585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
586 /* Find CN400 V-Link host bridge */
587 if (dev == NULL)
588 dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
589
586 } 590 }
587 if (dev != NULL) { 591 if (dev != NULL) {
588 /* Enable access to port 0x22 */ 592 /* Enable access to port 0x22 */
@@ -734,7 +738,7 @@ print_support_type:
734 return 0; 738 return 0;
735 739
736err_acpi: 740err_acpi:
737 printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n"); 741 printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge. Aborting.\n");
738 return -ENODEV; 742 return -ENODEV;
739} 743}
740 744
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 304d2eaa4a1b..bec50170b75a 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -163,29 +163,27 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
163 163
164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) 164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
165{ 165{
166 if ((c->x86 == 0x06) && (c->x86_model == 0x09)) { 166 if (c->x86 == 0x06) {
167 /* Pentium M (Banias) */ 167 if (cpu_has(c, X86_FEATURE_EST))
168 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 168 printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
169 "The speedstep_centrino module offers voltage scaling" 169 "The acpi-cpufreq module offers voltage scaling"
170 " in addition of frequency scaling. You should use " 170 " in addition of frequency scaling. You should use "
171 "that instead of p4-clockmod, if possible.\n"); 171 "that instead of p4-clockmod, if possible.\n");
172 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); 172 switch (c->x86_model) {
173 } 173 case 0x0E: /* Core */
174 174 case 0x0F: /* Core Duo */
175 if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) { 175 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
176 /* Pentium M (Dothan) */ 176 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
177 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 177 case 0x0D: /* Pentium M (Dothan) */
178 "The speedstep_centrino module offers voltage scaling" 178 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
179 " in addition of frequency scaling. You should use " 179 /* fall through */
180 "that instead of p4-clockmod, if possible.\n"); 180 case 0x09: /* Pentium M (Banias) */
181 /* on P-4s, the TSC runs with constant frequency independent whether 181 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
182 * throttling is active or not. */ 182 }
183 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
184 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
185 } 183 }
186 184
187 if (c->x86 != 0xF) { 185 if (c->x86 != 0xF) {
188 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n"); 186 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n");
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
index ef457d50f4ac..b8fb4b521c62 100644
--- a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -153,6 +153,7 @@ static struct cpufreq_driver sc520_freq_driver = {
153static int __init sc520_freq_init(void) 153static int __init sc520_freq_init(void)
154{ 154{
155 struct cpuinfo_x86 *c = cpu_data; 155 struct cpuinfo_x86 *c = cpu_data;
156 int err;
156 157
157 /* Test if we have the right hardware */ 158 /* Test if we have the right hardware */
158 if(c->x86_vendor != X86_VENDOR_AMD || 159 if(c->x86_vendor != X86_VENDOR_AMD ||
@@ -166,7 +167,11 @@ static int __init sc520_freq_init(void)
166 return -ENOMEM; 167 return -ENOMEM;
167 } 168 }
168 169
169 return cpufreq_register_driver(&sc520_freq_driver); 170 err = cpufreq_register_driver(&sc520_freq_driver);
171 if (err)
172 iounmap(cpuctl);
173
174 return err;
170} 175}
171 176
172 177
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index e8993baf3d14..5113e9231634 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -36,6 +36,7 @@
36 36
37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
38 38
39#define INTEL_MSR_RANGE (0xffff)
39 40
40struct cpu_id 41struct cpu_id
41{ 42{
@@ -379,6 +380,7 @@ static int centrino_cpu_early_init_acpi(void)
379} 380}
380 381
381 382
383#ifdef CONFIG_SMP
382/* 384/*
383 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 385 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
384 * or do it in BIOS firmware and won't inform about it to OS. If not 386 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -392,7 +394,6 @@ static int sw_any_bug_found(struct dmi_system_id *d)
392 return 0; 394 return 0;
393} 395}
394 396
395
396static struct dmi_system_id sw_any_bug_dmi_table[] = { 397static struct dmi_system_id sw_any_bug_dmi_table[] = {
397 { 398 {
398 .callback = sw_any_bug_found, 399 .callback = sw_any_bug_found,
@@ -405,7 +406,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
405 }, 406 },
406 { } 407 { }
407}; 408};
408 409#endif
409 410
410/* 411/*
411 * centrino_cpu_init_acpi - register with ACPI P-States library 412 * centrino_cpu_init_acpi - register with ACPI P-States library
@@ -463,8 +464,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
463 } 464 }
464 465
465 for (i=0; i<p->state_count; i++) { 466 for (i=0; i<p->state_count; i++) {
466 if (p->states[i].control != p->states[i].status) { 467 if ((p->states[i].control & INTEL_MSR_RANGE) !=
467 dprintk("Different control (%llu) and status values (%llu)\n", 468 (p->states[i].status & INTEL_MSR_RANGE)) {
469 dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
468 p->states[i].control, p->states[i].status); 470 p->states[i].control, p->states[i].status);
469 result = -EINVAL; 471 result = -EINVAL;
470 goto err_unreg; 472 goto err_unreg;
@@ -500,7 +502,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
500 } 502 }
501 503
502 for (i=0; i<p->state_count; i++) { 504 for (i=0; i<p->state_count; i++) {
503 centrino_model[cpu]->op_points[i].index = p->states[i].control; 505 centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
504 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; 506 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
505 dprintk("adding state %i with frequency %u and control value %04x\n", 507 dprintk("adding state %i with frequency %u and control value %04x\n",
506 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); 508 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
@@ -531,6 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
531 533
532 /* notify BIOS that we exist */ 534 /* notify BIOS that we exist */
533 acpi_processor_notify_smm(THIS_MODULE); 535 acpi_processor_notify_smm(THIS_MODULE);
536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI"
537 "config is deprecated.\n "
538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq instead.\n" );
534 539
535 return 0; 540 return 0;
536 541
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 4f46cac155c4..d59277c00911 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -123,6 +123,36 @@ static unsigned int pentiumM_get_frequency(void)
123 return (msr_tmp * 100 * 1000); 123 return (msr_tmp * 100 * 1000);
124} 124}
125 125
126static unsigned int pentium_core_get_frequency(void)
127{
128 u32 fsb = 0;
129 u32 msr_lo, msr_tmp;
130
131 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
132 /* see table B-2 of 25366920.pdf */
133 switch (msr_lo & 0x07) {
134 case 5:
135 fsb = 100000;
136 break;
137 case 1:
138 fsb = 133333;
139 break;
140 case 3:
141 fsb = 166667;
142 break;
143 default:
144 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
145 }
146
147 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
148 dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
149
150 msr_tmp = (msr_lo >> 22) & 0x1f;
151 dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
152
153 return (msr_tmp * fsb);
154}
155
126 156
127static unsigned int pentium4_get_frequency(void) 157static unsigned int pentium4_get_frequency(void)
128{ 158{
@@ -174,6 +204,8 @@ static unsigned int pentium4_get_frequency(void)
174unsigned int speedstep_get_processor_frequency(unsigned int processor) 204unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{ 205{
176 switch (processor) { 206 switch (processor) {
207 case SPEEDSTEP_PROCESSOR_PCORE:
208 return pentium_core_get_frequency();
177 case SPEEDSTEP_PROCESSOR_PM: 209 case SPEEDSTEP_PROCESSOR_PM:
178 return pentiumM_get_frequency(); 210 return pentiumM_get_frequency();
179 case SPEEDSTEP_PROCESSOR_P4D: 211 case SPEEDSTEP_PROCESSOR_P4D:
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index b735429c50b4..b11bcc608cac 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -22,6 +22,7 @@
22 * the speedstep_get_processor_frequency() call. */ 22 * the speedstep_get_processor_frequency() call. */
23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ 23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ 24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
25#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
25 26
26/* speedstep states -- only two of them */ 27/* speedstep states -- only two of them */
27 28
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index c28333d53646..ff0d89806114 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -360,9 +360,6 @@ static int __init speedstep_init(void)
360 case SPEEDSTEP_PROCESSOR_PIII_C: 360 case SPEEDSTEP_PROCESSOR_PIII_C:
361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: 361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
362 break; 362 break;
363 case SPEEDSTEP_PROCESSOR_P4M:
364 printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
365 break;
366 default: 363 default:
367 speedstep_processor = 0; 364 speedstep_processor = 0;
368 } 365 }