diff options
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/powernow-k8.c')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 1363 |
1 files changed, 1363 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c new file mode 100644 index 000000000000..34ed53a06730 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -0,0 +1,1363 @@ | |||
1 | /* | ||
2 | * (c) 2003-2006 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | * | ||
7 | * Support : mark.langsdorf@amd.com | ||
8 | * | ||
9 | * Based on the powernow-k7.c module written by Dave Jones. | ||
10 | * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs | ||
11 | * (C) 2004 Dominik Brodowski <linux@brodo.de> | ||
12 | * (C) 2004 Pavel Machek <pavel@suse.cz> | ||
13 | * Licensed under the terms of the GNU GPL License version 2. | ||
14 | * Based upon datasheets & sample CPUs kindly provided by AMD. | ||
15 | * | ||
16 | * Valuable input gratefully received from Dave Jones, Pavel Machek, | ||
17 | * Dominik Brodowski, Jacob Shin, and others. | ||
18 | * Originally developed by Paul Devriendt. | ||
19 | * Processor information obtained from Chapter 9 (Power and Thermal Management) | ||
20 | * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | ||
21 | * Opteron Processors" available for download from www.amd.com | ||
22 | * | ||
23 | * Tables for specific CPUs can be inferred from | ||
24 | * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/cpufreq.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/cpumask.h> | ||
35 | #include <linux/sched.h> /* for current / set_cpus_allowed() */ | ||
36 | |||
37 | #include <asm/msr.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/delay.h> | ||
40 | |||
41 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
42 | #include <linux/acpi.h> | ||
43 | #include <linux/mutex.h> | ||
44 | #include <acpi/processor.h> | ||
45 | #endif | ||
46 | |||
47 | #define PFX "powernow-k8: " | ||
48 | #define BFX PFX "BIOS error: " | ||
49 | #define VERSION "version 2.00.00" | ||
50 | #include "powernow-k8.h" | ||
51 | |||
52 | /* serialize freq changes */ | ||
53 | static DEFINE_MUTEX(fidvid_mutex); | ||
54 | |||
55 | static struct powernow_k8_data *powernow_data[NR_CPUS]; | ||
56 | |||
57 | static int cpu_family = CPU_OPTERON; | ||
58 | |||
59 | #ifndef CONFIG_SMP | ||
60 | static cpumask_t cpu_core_map[1]; | ||
61 | #endif | ||
62 | |||
63 | /* Return a frequency in MHz, given an input fid */ | ||
64 | static u32 find_freq_from_fid(u32 fid) | ||
65 | { | ||
66 | return 800 + (fid * 100); | ||
67 | } | ||
68 | |||
69 | |||
70 | /* Return a frequency in KHz, given an input fid */ | ||
71 | static u32 find_khz_freq_from_fid(u32 fid) | ||
72 | { | ||
73 | return 1000 * find_freq_from_fid(fid); | ||
74 | } | ||
75 | |||
76 | /* Return a frequency in MHz, given an input fid and did */ | ||
77 | static u32 find_freq_from_fiddid(u32 fid, u32 did) | ||
78 | { | ||
79 | return 100 * (fid + 0x10) >> did; | ||
80 | } | ||
81 | |||
82 | static u32 find_khz_freq_from_fiddid(u32 fid, u32 did) | ||
83 | { | ||
84 | return 1000 * find_freq_from_fiddid(fid, did); | ||
85 | } | ||
86 | |||
87 | static u32 find_fid_from_pstate(u32 pstate) | ||
88 | { | ||
89 | u32 hi, lo; | ||
90 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
91 | return lo & HW_PSTATE_FID_MASK; | ||
92 | } | ||
93 | |||
94 | static u32 find_did_from_pstate(u32 pstate) | ||
95 | { | ||
96 | u32 hi, lo; | ||
97 | rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi); | ||
98 | return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
99 | } | ||
100 | |||
101 | /* Return the vco fid for an input fid | ||
102 | * | ||
103 | * Each "low" fid has corresponding "high" fid, and you can get to "low" fids | ||
104 | * only from corresponding high fids. This returns "high" fid corresponding to | ||
105 | * "low" one. | ||
106 | */ | ||
107 | static u32 convert_fid_to_vco_fid(u32 fid) | ||
108 | { | ||
109 | if (fid < HI_FID_TABLE_BOTTOM) | ||
110 | return 8 + (2 * fid); | ||
111 | else | ||
112 | return fid; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Return 1 if the pending bit is set. Unless we just instructed the processor | ||
117 | * to transition to a new state, seeing this bit set is really bad news. | ||
118 | */ | ||
119 | static int pending_bit_stuck(void) | ||
120 | { | ||
121 | u32 lo, hi; | ||
122 | |||
123 | if (cpu_family == CPU_HW_PSTATE) | ||
124 | return 0; | ||
125 | |||
126 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
127 | return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Update the global current fid / vid values from the status msr. | ||
132 | * Returns 1 on error. | ||
133 | */ | ||
134 | static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | ||
135 | { | ||
136 | u32 lo, hi; | ||
137 | u32 i = 0; | ||
138 | |||
139 | if (cpu_family == CPU_HW_PSTATE) { | ||
140 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | ||
141 | i = lo & HW_PSTATE_MASK; | ||
142 | rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi); | ||
143 | data->currfid = lo & HW_PSTATE_FID_MASK; | ||
144 | data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
145 | return 0; | ||
146 | } | ||
147 | do { | ||
148 | if (i++ > 10000) { | ||
149 | dprintk("detected change pending stuck\n"); | ||
150 | return 1; | ||
151 | } | ||
152 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
153 | } while (lo & MSR_S_LO_CHANGE_PENDING); | ||
154 | |||
155 | data->currvid = hi & MSR_S_HI_CURRENT_VID; | ||
156 | data->currfid = lo & MSR_S_LO_CURRENT_FID; | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* the isochronous relief time */ | ||
162 | static void count_off_irt(struct powernow_k8_data *data) | ||
163 | { | ||
164 | udelay((1 << data->irt) * 10); | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | /* the voltage stabalization time */ | ||
169 | static void count_off_vst(struct powernow_k8_data *data) | ||
170 | { | ||
171 | udelay(data->vstable * VST_UNITS_20US); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | /* need to init the control msr to a safe value (for each cpu) */ | ||
176 | static void fidvid_msr_init(void) | ||
177 | { | ||
178 | u32 lo, hi; | ||
179 | u8 fid, vid; | ||
180 | |||
181 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | ||
182 | vid = hi & MSR_S_HI_CURRENT_VID; | ||
183 | fid = lo & MSR_S_LO_CURRENT_FID; | ||
184 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); | ||
185 | hi = MSR_C_HI_STP_GNT_BENIGN; | ||
186 | dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); | ||
187 | wrmsr(MSR_FIDVID_CTL, lo, hi); | ||
188 | } | ||
189 | |||
190 | |||
191 | /* write the new fid value along with the other control fields to the msr */ | ||
192 | static int write_new_fid(struct powernow_k8_data *data, u32 fid) | ||
193 | { | ||
194 | u32 lo; | ||
195 | u32 savevid = data->currvid; | ||
196 | u32 i = 0; | ||
197 | |||
198 | if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { | ||
199 | printk(KERN_ERR PFX "internal error - overflow on fid write\n"); | ||
200 | return 1; | ||
201 | } | ||
202 | |||
203 | lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; | ||
204 | |||
205 | dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", | ||
206 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); | ||
207 | |||
208 | do { | ||
209 | wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); | ||
210 | if (i++ > 100) { | ||
211 | printk(KERN_ERR PFX "Hardware error - pending bit very stuck - no further pstate changes possible\n"); | ||
212 | return 1; | ||
213 | } | ||
214 | } while (query_current_values_with_pending_wait(data)); | ||
215 | |||
216 | count_off_irt(data); | ||
217 | |||
218 | if (savevid != data->currvid) { | ||
219 | printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n", | ||
220 | savevid, data->currvid); | ||
221 | return 1; | ||
222 | } | ||
223 | |||
224 | if (fid != data->currfid) { | ||
225 | printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid, | ||
226 | data->currfid); | ||
227 | return 1; | ||
228 | } | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /* Write a new vid to the hardware */ | ||
234 | static int write_new_vid(struct powernow_k8_data *data, u32 vid) | ||
235 | { | ||
236 | u32 lo; | ||
237 | u32 savefid = data->currfid; | ||
238 | int i = 0; | ||
239 | |||
240 | if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { | ||
241 | printk(KERN_ERR PFX "internal error - overflow on vid write\n"); | ||
242 | return 1; | ||
243 | } | ||
244 | |||
245 | lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; | ||
246 | |||
247 | dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", | ||
248 | vid, lo, STOP_GRANT_5NS); | ||
249 | |||
250 | do { | ||
251 | wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); | ||
252 | if (i++ > 100) { | ||
253 | printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); | ||
254 | return 1; | ||
255 | } | ||
256 | } while (query_current_values_with_pending_wait(data)); | ||
257 | |||
258 | if (savefid != data->currfid) { | ||
259 | printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n", | ||
260 | savefid, data->currfid); | ||
261 | return 1; | ||
262 | } | ||
263 | |||
264 | if (vid != data->currvid) { | ||
265 | printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid, | ||
266 | data->currvid); | ||
267 | return 1; | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Reduce the vid by the max of step or reqvid. | ||
275 | * Decreasing vid codes represent increasing voltages: | ||
276 | * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. | ||
277 | */ | ||
278 | static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step) | ||
279 | { | ||
280 | if ((data->currvid - reqvid) > step) | ||
281 | reqvid = data->currvid - step; | ||
282 | |||
283 | if (write_new_vid(data, reqvid)) | ||
284 | return 1; | ||
285 | |||
286 | count_off_vst(data); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* Change hardware pstate by single MSR write */ | ||
292 | static int transition_pstate(struct powernow_k8_data *data, u32 pstate) | ||
293 | { | ||
294 | wrmsr(MSR_PSTATE_CTRL, pstate, 0); | ||
295 | data->currfid = find_fid_from_pstate(pstate); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ | ||
300 | static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid) | ||
301 | { | ||
302 | if (core_voltage_pre_transition(data, reqvid)) | ||
303 | return 1; | ||
304 | |||
305 | if (core_frequency_transition(data, reqfid)) | ||
306 | return 1; | ||
307 | |||
308 | if (core_voltage_post_transition(data, reqvid)) | ||
309 | return 1; | ||
310 | |||
311 | if (query_current_values_with_pending_wait(data)) | ||
312 | return 1; | ||
313 | |||
314 | if ((reqfid != data->currfid) || (reqvid != data->currvid)) { | ||
315 | printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", | ||
316 | smp_processor_id(), | ||
317 | reqfid, reqvid, data->currfid, data->currvid); | ||
318 | return 1; | ||
319 | } | ||
320 | |||
321 | dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", | ||
322 | smp_processor_id(), data->currfid, data->currvid); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /* Phase 1 - core voltage transition ... setup voltage */ | ||
328 | static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid) | ||
329 | { | ||
330 | u32 rvosteps = data->rvo; | ||
331 | u32 savefid = data->currfid; | ||
332 | u32 maxvid, lo; | ||
333 | |||
334 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", | ||
335 | smp_processor_id(), | ||
336 | data->currfid, data->currvid, reqvid, data->rvo); | ||
337 | |||
338 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | ||
339 | maxvid = 0x1f & (maxvid >> 16); | ||
340 | dprintk("ph1 maxvid=0x%x\n", maxvid); | ||
341 | if (reqvid < maxvid) /* lower numbers are higher voltages */ | ||
342 | reqvid = maxvid; | ||
343 | |||
344 | while (data->currvid > reqvid) { | ||
345 | dprintk("ph1: curr 0x%x, req vid 0x%x\n", | ||
346 | data->currvid, reqvid); | ||
347 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) | ||
348 | return 1; | ||
349 | } | ||
350 | |||
351 | while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) { | ||
352 | if (data->currvid == maxvid) { | ||
353 | rvosteps = 0; | ||
354 | } else { | ||
355 | dprintk("ph1: changing vid for rvo, req 0x%x\n", | ||
356 | data->currvid - 1); | ||
357 | if (decrease_vid_code_by_step(data, data->currvid - 1, 1)) | ||
358 | return 1; | ||
359 | rvosteps--; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (query_current_values_with_pending_wait(data)) | ||
364 | return 1; | ||
365 | |||
366 | if (savefid != data->currfid) { | ||
367 | printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid); | ||
368 | return 1; | ||
369 | } | ||
370 | |||
371 | dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", | ||
372 | data->currfid, data->currvid); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /* Phase 2 - core frequency transition */ | ||
378 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | ||
379 | { | ||
380 | u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid; | ||
381 | |||
382 | if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
383 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", | ||
384 | reqfid, data->currfid); | ||
385 | return 1; | ||
386 | } | ||
387 | |||
388 | if (data->currfid == reqfid) { | ||
389 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid); | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", | ||
394 | smp_processor_id(), | ||
395 | data->currfid, data->currvid, reqfid); | ||
396 | |||
397 | vcoreqfid = convert_fid_to_vco_fid(reqfid); | ||
398 | vcocurrfid = convert_fid_to_vco_fid(data->currfid); | ||
399 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | ||
400 | : vcoreqfid - vcocurrfid; | ||
401 | |||
402 | while (vcofiddiff > 2) { | ||
403 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); | ||
404 | |||
405 | if (reqfid > data->currfid) { | ||
406 | if (data->currfid > LO_FID_TABLE_TOP) { | ||
407 | if (write_new_fid(data, data->currfid + fid_interval)) { | ||
408 | return 1; | ||
409 | } | ||
410 | } else { | ||
411 | if (write_new_fid | ||
412 | (data, 2 + convert_fid_to_vco_fid(data->currfid))) { | ||
413 | return 1; | ||
414 | } | ||
415 | } | ||
416 | } else { | ||
417 | if (write_new_fid(data, data->currfid - fid_interval)) | ||
418 | return 1; | ||
419 | } | ||
420 | |||
421 | vcocurrfid = convert_fid_to_vco_fid(data->currfid); | ||
422 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | ||
423 | : vcoreqfid - vcocurrfid; | ||
424 | } | ||
425 | |||
426 | if (write_new_fid(data, reqfid)) | ||
427 | return 1; | ||
428 | |||
429 | if (query_current_values_with_pending_wait(data)) | ||
430 | return 1; | ||
431 | |||
432 | if (data->currfid != reqfid) { | ||
433 | printk(KERN_ERR PFX | ||
434 | "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", | ||
435 | data->currfid, reqfid); | ||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | if (savevid != data->currvid) { | ||
440 | printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", | ||
441 | savevid, data->currvid); | ||
442 | return 1; | ||
443 | } | ||
444 | |||
445 | dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", | ||
446 | data->currfid, data->currvid); | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* Phase 3 - core voltage transition flow ... jump to the final vid. */ | ||
452 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid) | ||
453 | { | ||
454 | u32 savefid = data->currfid; | ||
455 | u32 savereqvid = reqvid; | ||
456 | |||
457 | dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", | ||
458 | smp_processor_id(), | ||
459 | data->currfid, data->currvid); | ||
460 | |||
461 | if (reqvid != data->currvid) { | ||
462 | if (write_new_vid(data, reqvid)) | ||
463 | return 1; | ||
464 | |||
465 | if (savefid != data->currfid) { | ||
466 | printk(KERN_ERR PFX | ||
467 | "ph3: bad fid change, save 0x%x, curr 0x%x\n", | ||
468 | savefid, data->currfid); | ||
469 | return 1; | ||
470 | } | ||
471 | |||
472 | if (data->currvid != reqvid) { | ||
473 | printk(KERN_ERR PFX | ||
474 | "ph3: failed vid transition\n, req 0x%x, curr 0x%x", | ||
475 | reqvid, data->currvid); | ||
476 | return 1; | ||
477 | } | ||
478 | } | ||
479 | |||
480 | if (query_current_values_with_pending_wait(data)) | ||
481 | return 1; | ||
482 | |||
483 | if (savereqvid != data->currvid) { | ||
484 | dprintk("ph3 failed, currvid 0x%x\n", data->currvid); | ||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | if (savefid != data->currfid) { | ||
489 | dprintk("ph3 failed, currfid changed 0x%x\n", | ||
490 | data->currfid); | ||
491 | return 1; | ||
492 | } | ||
493 | |||
494 | dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", | ||
495 | data->currfid, data->currvid); | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int check_supported_cpu(unsigned int cpu) | ||
501 | { | ||
502 | cpumask_t oldmask = CPU_MASK_ALL; | ||
503 | u32 eax, ebx, ecx, edx; | ||
504 | unsigned int rc = 0; | ||
505 | |||
506 | oldmask = current->cpus_allowed; | ||
507 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
508 | |||
509 | if (smp_processor_id() != cpu) { | ||
510 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | ||
511 | goto out; | ||
512 | } | ||
513 | |||
514 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
515 | goto out; | ||
516 | |||
517 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
518 | if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && | ||
519 | ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) | ||
520 | goto out; | ||
521 | |||
522 | if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { | ||
523 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || | ||
524 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { | ||
525 | printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); | ||
526 | goto out; | ||
527 | } | ||
528 | |||
529 | eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); | ||
530 | if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { | ||
531 | printk(KERN_INFO PFX | ||
532 | "No frequency change capabilities detected\n"); | ||
533 | goto out; | ||
534 | } | ||
535 | |||
536 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | ||
537 | if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { | ||
538 | printk(KERN_INFO PFX "Power state transitions not supported\n"); | ||
539 | goto out; | ||
540 | } | ||
541 | } else { /* must be a HW Pstate capable processor */ | ||
542 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | ||
543 | if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) | ||
544 | cpu_family = CPU_HW_PSTATE; | ||
545 | else | ||
546 | goto out; | ||
547 | } | ||
548 | |||
549 | rc = 1; | ||
550 | |||
551 | out: | ||
552 | set_cpus_allowed(current, oldmask); | ||
553 | return rc; | ||
554 | } | ||
555 | |||
556 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) | ||
557 | { | ||
558 | unsigned int j; | ||
559 | u8 lastfid = 0xff; | ||
560 | |||
561 | for (j = 0; j < data->numps; j++) { | ||
562 | if (pst[j].vid > LEAST_VID) { | ||
563 | printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid); | ||
564 | return -EINVAL; | ||
565 | } | ||
566 | if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ | ||
567 | printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j); | ||
568 | return -ENODEV; | ||
569 | } | ||
570 | if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ | ||
571 | printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); | ||
572 | return -ENODEV; | ||
573 | } | ||
574 | if (pst[j].fid > MAX_FID) { | ||
575 | printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j); | ||
576 | return -ENODEV; | ||
577 | } | ||
578 | if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { | ||
579 | /* Only first fid is allowed to be in "low" range */ | ||
580 | printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid); | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | if (pst[j].fid < lastfid) | ||
584 | lastfid = pst[j].fid; | ||
585 | } | ||
586 | if (lastfid & 1) { | ||
587 | printk(KERN_ERR BFX "lastfid invalid\n"); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | if (lastfid > LO_FID_TABLE_TOP) | ||
591 | printk(KERN_INFO BFX "first fid not from lo freq table\n"); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static void print_basics(struct powernow_k8_data *data) | ||
597 | { | ||
598 | int j; | ||
599 | for (j = 0; j < data->numps; j++) { | ||
600 | if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { | ||
601 | if (cpu_family == CPU_HW_PSTATE) { | ||
602 | printk(KERN_INFO PFX " %d : fid 0x%x did 0x%x (%d MHz)\n", | ||
603 | j, | ||
604 | (data->powernow_table[j].index & 0xff00) >> 8, | ||
605 | (data->powernow_table[j].index & 0xff0000) >> 16, | ||
606 | data->powernow_table[j].frequency/1000); | ||
607 | } else { | ||
608 | printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", | ||
609 | j, | ||
610 | data->powernow_table[j].index & 0xff, | ||
611 | data->powernow_table[j].frequency/1000, | ||
612 | data->powernow_table[j].index >> 8); | ||
613 | } | ||
614 | } | ||
615 | } | ||
616 | if (data->batps) | ||
617 | printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps); | ||
618 | } | ||
619 | |||
620 | static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) | ||
621 | { | ||
622 | struct cpufreq_frequency_table *powernow_table; | ||
623 | unsigned int j; | ||
624 | |||
625 | if (data->batps) { /* use ACPI support to get full speed on mains power */ | ||
626 | printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps); | ||
627 | data->numps = data->batps; | ||
628 | } | ||
629 | |||
630 | for ( j=1; j<data->numps; j++ ) { | ||
631 | if (pst[j-1].fid >= pst[j].fid) { | ||
632 | printk(KERN_ERR PFX "PST out of sequence\n"); | ||
633 | return -EINVAL; | ||
634 | } | ||
635 | } | ||
636 | |||
637 | if (data->numps < 2) { | ||
638 | printk(KERN_ERR PFX "no p states to transition\n"); | ||
639 | return -ENODEV; | ||
640 | } | ||
641 | |||
642 | if (check_pst_table(data, pst, maxvid)) | ||
643 | return -EINVAL; | ||
644 | |||
645 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | ||
646 | * (data->numps + 1)), GFP_KERNEL); | ||
647 | if (!powernow_table) { | ||
648 | printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); | ||
649 | return -ENOMEM; | ||
650 | } | ||
651 | |||
652 | for (j = 0; j < data->numps; j++) { | ||
653 | powernow_table[j].index = pst[j].fid; /* lower 8 bits */ | ||
654 | powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ | ||
655 | powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid); | ||
656 | } | ||
657 | powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; | ||
658 | powernow_table[data->numps].index = 0; | ||
659 | |||
660 | if (query_current_values_with_pending_wait(data)) { | ||
661 | kfree(powernow_table); | ||
662 | return -EIO; | ||
663 | } | ||
664 | |||
665 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | ||
666 | data->powernow_table = powernow_table; | ||
667 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) | ||
668 | print_basics(data); | ||
669 | |||
670 | for (j = 0; j < data->numps; j++) | ||
671 | if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) | ||
672 | return 0; | ||
673 | |||
674 | dprintk("currfid/vid do not match PST, ignoring\n"); | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* Find and validate the PSB/PST table in BIOS. */ | ||
679 | static int find_psb_table(struct powernow_k8_data *data) | ||
680 | { | ||
681 | struct psb_s *psb; | ||
682 | unsigned int i; | ||
683 | u32 mvs; | ||
684 | u8 maxvid; | ||
685 | u32 cpst = 0; | ||
686 | u32 thiscpuid; | ||
687 | |||
688 | for (i = 0xc0000; i < 0xffff0; i += 0x10) { | ||
689 | /* Scan BIOS looking for the signature. */ | ||
690 | /* It can not be at ffff0 - it is too big. */ | ||
691 | |||
692 | psb = phys_to_virt(i); | ||
693 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) | ||
694 | continue; | ||
695 | |||
696 | dprintk("found PSB header at 0x%p\n", psb); | ||
697 | |||
698 | dprintk("table vers: 0x%x\n", psb->tableversion); | ||
699 | if (psb->tableversion != PSB_VERSION_1_4) { | ||
700 | printk(KERN_ERR BFX "PSB table is not v1.4\n"); | ||
701 | return -ENODEV; | ||
702 | } | ||
703 | |||
704 | dprintk("flags: 0x%x\n", psb->flags1); | ||
705 | if (psb->flags1) { | ||
706 | printk(KERN_ERR BFX "unknown flags\n"); | ||
707 | return -ENODEV; | ||
708 | } | ||
709 | |||
710 | data->vstable = psb->vstable; | ||
711 | dprintk("voltage stabilization time: %d(*20us)\n", data->vstable); | ||
712 | |||
713 | dprintk("flags2: 0x%x\n", psb->flags2); | ||
714 | data->rvo = psb->flags2 & 3; | ||
715 | data->irt = ((psb->flags2) >> 2) & 3; | ||
716 | mvs = ((psb->flags2) >> 4) & 3; | ||
717 | data->vidmvs = 1 << mvs; | ||
718 | data->batps = ((psb->flags2) >> 6) & 3; | ||
719 | |||
720 | dprintk("ramp voltage offset: %d\n", data->rvo); | ||
721 | dprintk("isochronous relief time: %d\n", data->irt); | ||
722 | dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); | ||
723 | |||
724 | dprintk("numpst: 0x%x\n", psb->num_tables); | ||
725 | cpst = psb->num_tables; | ||
726 | if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){ | ||
727 | thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
728 | if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) { | ||
729 | cpst = 1; | ||
730 | } | ||
731 | } | ||
732 | if (cpst != 1) { | ||
733 | printk(KERN_ERR BFX "numpst must be 1\n"); | ||
734 | return -ENODEV; | ||
735 | } | ||
736 | |||
737 | data->plllock = psb->plllocktime; | ||
738 | dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); | ||
739 | dprintk("maxfid: 0x%x\n", psb->maxfid); | ||
740 | dprintk("maxvid: 0x%x\n", psb->maxvid); | ||
741 | maxvid = psb->maxvid; | ||
742 | |||
743 | data->numps = psb->numps; | ||
744 | dprintk("numpstates: 0x%x\n", data->numps); | ||
745 | return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); | ||
746 | } | ||
747 | /* | ||
748 | * If you see this message, complain to BIOS manufacturer. If | ||
749 | * he tells you "we do not support Linux" or some similar | ||
750 | * nonsense, remember that Windows 2000 uses the same legacy | ||
751 | * mechanism that the old Linux PSB driver uses. Tell them it | ||
752 | * is broken with Windows 2000. | ||
753 | * | ||
754 | * The reference to the AMD documentation is chapter 9 in the | ||
755 | * BIOS and Kernel Developer's Guide, which is available on | ||
756 | * www.amd.com | ||
757 | */ | ||
758 | printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n"); | ||
759 | return -ENODEV; | ||
760 | } | ||
761 | |||
762 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
763 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) | ||
764 | { | ||
765 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) | ||
766 | return; | ||
767 | |||
768 | data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; | ||
769 | data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; | ||
770 | data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | ||
771 | data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | ||
772 | data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); | ||
773 | data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; | ||
774 | } | ||
775 | |||
776 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | ||
777 | { | ||
778 | struct cpufreq_frequency_table *powernow_table; | ||
779 | int ret_val; | ||
780 | |||
781 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | ||
782 | dprintk("register performance failed: bad ACPI data\n"); | ||
783 | return -EIO; | ||
784 | } | ||
785 | |||
786 | /* verify the data contained in the ACPI structures */ | ||
787 | if (data->acpi_data.state_count <= 1) { | ||
788 | dprintk("No ACPI P-States\n"); | ||
789 | goto err_out; | ||
790 | } | ||
791 | |||
792 | if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | ||
793 | (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | ||
794 | dprintk("Invalid control/status registers (%x - %x)\n", | ||
795 | data->acpi_data.control_register.space_id, | ||
796 | data->acpi_data.status_register.space_id); | ||
797 | goto err_out; | ||
798 | } | ||
799 | |||
800 | /* fill in data->powernow_table */ | ||
801 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | ||
802 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); | ||
803 | if (!powernow_table) { | ||
804 | dprintk("powernow_table memory alloc failure\n"); | ||
805 | goto err_out; | ||
806 | } | ||
807 | |||
808 | if (cpu_family == CPU_HW_PSTATE) | ||
809 | ret_val = fill_powernow_table_pstate(data, powernow_table); | ||
810 | else | ||
811 | ret_val = fill_powernow_table_fidvid(data, powernow_table); | ||
812 | if (ret_val) | ||
813 | goto err_out_mem; | ||
814 | |||
815 | powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; | ||
816 | powernow_table[data->acpi_data.state_count].index = 0; | ||
817 | data->powernow_table = powernow_table; | ||
818 | |||
819 | /* fill in data */ | ||
820 | data->numps = data->acpi_data.state_count; | ||
821 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) | ||
822 | print_basics(data); | ||
823 | powernow_k8_acpi_pst_values(data, 0); | ||
824 | |||
825 | /* notify BIOS that we exist */ | ||
826 | acpi_processor_notify_smm(THIS_MODULE); | ||
827 | |||
828 | return 0; | ||
829 | |||
830 | err_out_mem: | ||
831 | kfree(powernow_table); | ||
832 | |||
833 | err_out: | ||
834 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | ||
835 | |||
836 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | ||
837 | data->acpi_data.state_count = 0; | ||
838 | |||
839 | return -ENODEV; | ||
840 | } | ||
841 | |||
842 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | ||
843 | { | ||
844 | int i; | ||
845 | |||
846 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
847 | u32 index; | ||
848 | u32 hi = 0, lo = 0; | ||
849 | u32 fid; | ||
850 | u32 did; | ||
851 | |||
852 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | ||
853 | if (index > MAX_HW_PSTATE) { | ||
854 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | ||
855 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | ||
856 | } | ||
857 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | ||
858 | if (!(hi & HW_PSTATE_VALID_MASK)) { | ||
859 | dprintk("invalid pstate %d, ignoring\n", index); | ||
860 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
861 | continue; | ||
862 | } | ||
863 | |||
864 | fid = lo & HW_PSTATE_FID_MASK; | ||
865 | did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT; | ||
866 | |||
867 | dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did); | ||
868 | |||
869 | powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT); | ||
870 | |||
871 | powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did); | ||
872 | |||
873 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | ||
874 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | ||
875 | powernow_table[i].frequency, | ||
876 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | ||
877 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
878 | continue; | ||
879 | } | ||
880 | } | ||
881 | return 0; | ||
882 | } | ||
883 | |||
884 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) | ||
885 | { | ||
886 | int i; | ||
887 | int cntlofreq = 0; | ||
888 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
889 | u32 fid; | ||
890 | u32 vid; | ||
891 | |||
892 | if (data->exttype) { | ||
893 | fid = data->acpi_data.states[i].status & EXT_FID_MASK; | ||
894 | vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; | ||
895 | } else { | ||
896 | fid = data->acpi_data.states[i].control & FID_MASK; | ||
897 | vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; | ||
898 | } | ||
899 | |||
900 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | ||
901 | |||
902 | powernow_table[i].index = fid; /* lower 8 bits */ | ||
903 | powernow_table[i].index |= (vid << 8); /* upper 8 bits */ | ||
904 | powernow_table[i].frequency = find_khz_freq_from_fid(fid); | ||
905 | |||
906 | /* verify frequency is OK */ | ||
907 | if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) || | ||
908 | (powernow_table[i].frequency < (MIN_FREQ * 1000))) { | ||
909 | dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency); | ||
910 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
911 | continue; | ||
912 | } | ||
913 | |||
914 | /* verify voltage is OK - BIOSs are using "off" to indicate invalid */ | ||
915 | if (vid == VID_OFF) { | ||
916 | dprintk("invalid vid %u, ignoring\n", vid); | ||
917 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
918 | continue; | ||
919 | } | ||
920 | |||
921 | /* verify only 1 entry from the lo frequency table */ | ||
922 | if (fid < HI_FID_TABLE_BOTTOM) { | ||
923 | if (cntlofreq) { | ||
924 | /* if both entries are the same, ignore this one ... */ | ||
925 | if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) || | ||
926 | (powernow_table[i].index != powernow_table[cntlofreq].index)) { | ||
927 | printk(KERN_ERR PFX "Too many lo freq table entries\n"); | ||
928 | return 1; | ||
929 | } | ||
930 | |||
931 | dprintk("double low frequency table entry, ignoring it.\n"); | ||
932 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
933 | continue; | ||
934 | } else | ||
935 | cntlofreq = i; | ||
936 | } | ||
937 | |||
938 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | ||
939 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | ||
940 | powernow_table[i].frequency, | ||
941 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | ||
942 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
943 | continue; | ||
944 | } | ||
945 | } | ||
946 | return 0; | ||
947 | } | ||
948 | |||
949 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | ||
950 | { | ||
951 | if (data->acpi_data.state_count) | ||
952 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | ||
953 | } | ||
954 | |||
955 | #else | ||
956 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | ||
957 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | ||
958 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | ||
959 | #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ | ||
960 | |||
961 | /* Take a frequency, and issue the fid/vid transition command */ | ||
962 | static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) | ||
963 | { | ||
964 | u32 fid = 0; | ||
965 | u32 vid = 0; | ||
966 | int res, i; | ||
967 | struct cpufreq_freqs freqs; | ||
968 | |||
969 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | ||
970 | |||
971 | /* fid/vid correctness check for k8 */ | ||
972 | /* fid are the lower 8 bits of the index we stored into | ||
973 | * the cpufreq frequency table in find_psb_table, vid | ||
974 | * are the upper 8 bits. | ||
975 | */ | ||
976 | fid = data->powernow_table[index].index & 0xFF; | ||
977 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; | ||
978 | |||
979 | dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); | ||
980 | |||
981 | if (query_current_values_with_pending_wait(data)) | ||
982 | return 1; | ||
983 | |||
984 | if ((data->currvid == vid) && (data->currfid == fid)) { | ||
985 | dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", | ||
986 | fid, vid); | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
991 | printk(KERN_ERR PFX | ||
992 | "ignoring illegal change in lo freq table-%x to 0x%x\n", | ||
993 | data->currfid, fid); | ||
994 | return 1; | ||
995 | } | ||
996 | |||
997 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | ||
998 | smp_processor_id(), fid, vid); | ||
999 | freqs.old = find_khz_freq_from_fid(data->currfid); | ||
1000 | freqs.new = find_khz_freq_from_fid(fid); | ||
1001 | |||
1002 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1003 | freqs.cpu = i; | ||
1004 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
1005 | } | ||
1006 | |||
1007 | res = transition_fid_vid(data, fid, vid); | ||
1008 | freqs.new = find_khz_freq_from_fid(data->currfid); | ||
1009 | |||
1010 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1011 | freqs.cpu = i; | ||
1012 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
1013 | } | ||
1014 | return res; | ||
1015 | } | ||
1016 | |||
1017 | /* Take a frequency, and issue the hardware pstate transition command */ | ||
1018 | static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) | ||
1019 | { | ||
1020 | u32 fid = 0; | ||
1021 | u32 did = 0; | ||
1022 | u32 pstate = 0; | ||
1023 | int res, i; | ||
1024 | struct cpufreq_freqs freqs; | ||
1025 | |||
1026 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | ||
1027 | |||
1028 | /* get fid did for hardware pstate transition */ | ||
1029 | pstate = index & HW_PSTATE_MASK; | ||
1030 | if (pstate > MAX_HW_PSTATE) | ||
1031 | return 0; | ||
1032 | fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT; | ||
1033 | did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT; | ||
1034 | freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1035 | freqs.new = find_khz_freq_from_fiddid(fid, did); | ||
1036 | |||
1037 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1038 | freqs.cpu = i; | ||
1039 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
1040 | } | ||
1041 | |||
1042 | res = transition_pstate(data, pstate); | ||
1043 | data->currfid = find_fid_from_pstate(pstate); | ||
1044 | data->currdid = find_did_from_pstate(pstate); | ||
1045 | freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1046 | |||
1047 | for_each_cpu_mask(i, *(data->available_cores)) { | ||
1048 | freqs.cpu = i; | ||
1049 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
1050 | } | ||
1051 | return res; | ||
1052 | } | ||
1053 | |||
1054 | /* Driver entry point to switch to the target frequency */ | ||
1055 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | ||
1056 | { | ||
1057 | cpumask_t oldmask = CPU_MASK_ALL; | ||
1058 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1059 | u32 checkfid; | ||
1060 | u32 checkvid; | ||
1061 | unsigned int newstate; | ||
1062 | int ret = -EIO; | ||
1063 | |||
1064 | if (!data) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | checkfid = data->currfid; | ||
1068 | checkvid = data->currvid; | ||
1069 | |||
1070 | /* only run on specific CPU from here on */ | ||
1071 | oldmask = current->cpus_allowed; | ||
1072 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | ||
1073 | |||
1074 | if (smp_processor_id() != pol->cpu) { | ||
1075 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | ||
1076 | goto err_out; | ||
1077 | } | ||
1078 | |||
1079 | if (pending_bit_stuck()) { | ||
1080 | printk(KERN_ERR PFX "failing targ, change pending bit set\n"); | ||
1081 | goto err_out; | ||
1082 | } | ||
1083 | |||
1084 | dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | ||
1085 | pol->cpu, targfreq, pol->min, pol->max, relation); | ||
1086 | |||
1087 | if (query_current_values_with_pending_wait(data)) | ||
1088 | goto err_out; | ||
1089 | |||
1090 | if (cpu_family == CPU_HW_PSTATE) | ||
1091 | dprintk("targ: curr fid 0x%x, did 0x%x\n", | ||
1092 | data->currfid, data->currdid); | ||
1093 | else { | ||
1094 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", | ||
1095 | data->currfid, data->currvid); | ||
1096 | |||
1097 | if ((checkvid != data->currvid) || (checkfid != data->currfid)) { | ||
1098 | printk(KERN_INFO PFX | ||
1099 | "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", | ||
1100 | checkfid, data->currfid, checkvid, data->currvid); | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) | ||
1105 | goto err_out; | ||
1106 | |||
1107 | mutex_lock(&fidvid_mutex); | ||
1108 | |||
1109 | powernow_k8_acpi_pst_values(data, newstate); | ||
1110 | |||
1111 | if (cpu_family == CPU_HW_PSTATE) | ||
1112 | ret = transition_frequency_pstate(data, newstate); | ||
1113 | else | ||
1114 | ret = transition_frequency_fidvid(data, newstate); | ||
1115 | if (ret) { | ||
1116 | printk(KERN_ERR PFX "transition frequency failed\n"); | ||
1117 | ret = 1; | ||
1118 | mutex_unlock(&fidvid_mutex); | ||
1119 | goto err_out; | ||
1120 | } | ||
1121 | mutex_unlock(&fidvid_mutex); | ||
1122 | |||
1123 | if (cpu_family == CPU_HW_PSTATE) | ||
1124 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1125 | else | ||
1126 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1127 | ret = 0; | ||
1128 | |||
1129 | err_out: | ||
1130 | set_cpus_allowed(current, oldmask); | ||
1131 | return ret; | ||
1132 | } | ||
1133 | |||
1134 | /* Driver entry point to verify the policy and range of frequencies */ | ||
1135 | static int powernowk8_verify(struct cpufreq_policy *pol) | ||
1136 | { | ||
1137 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1138 | |||
1139 | if (!data) | ||
1140 | return -EINVAL; | ||
1141 | |||
1142 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | ||
1143 | } | ||
1144 | |||
1145 | /* per CPU init entry point to the driver */ | ||
1146 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | ||
1147 | { | ||
1148 | struct powernow_k8_data *data; | ||
1149 | cpumask_t oldmask = CPU_MASK_ALL; | ||
1150 | int rc; | ||
1151 | |||
1152 | if (!cpu_online(pol->cpu)) | ||
1153 | return -ENODEV; | ||
1154 | |||
1155 | if (!check_supported_cpu(pol->cpu)) | ||
1156 | return -ENODEV; | ||
1157 | |||
1158 | data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); | ||
1159 | if (!data) { | ||
1160 | printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); | ||
1161 | return -ENOMEM; | ||
1162 | } | ||
1163 | |||
1164 | data->cpu = pol->cpu; | ||
1165 | |||
1166 | if (powernow_k8_cpu_init_acpi(data)) { | ||
1167 | /* | ||
1168 | * Use the PSB BIOS structure. This is only availabe on | ||
1169 | * an UP version, and is deprecated by AMD. | ||
1170 | */ | ||
1171 | if (num_online_cpus() != 1) { | ||
1172 | printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); | ||
1173 | kfree(data); | ||
1174 | return -ENODEV; | ||
1175 | } | ||
1176 | if (pol->cpu != 0) { | ||
1177 | printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); | ||
1178 | kfree(data); | ||
1179 | return -ENODEV; | ||
1180 | } | ||
1181 | rc = find_psb_table(data); | ||
1182 | if (rc) { | ||
1183 | kfree(data); | ||
1184 | return -ENODEV; | ||
1185 | } | ||
1186 | } | ||
1187 | |||
1188 | /* only run on specific CPU from here on */ | ||
1189 | oldmask = current->cpus_allowed; | ||
1190 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | ||
1191 | |||
1192 | if (smp_processor_id() != pol->cpu) { | ||
1193 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | ||
1194 | goto err_out; | ||
1195 | } | ||
1196 | |||
1197 | if (pending_bit_stuck()) { | ||
1198 | printk(KERN_ERR PFX "failing init, change pending bit set\n"); | ||
1199 | goto err_out; | ||
1200 | } | ||
1201 | |||
1202 | if (query_current_values_with_pending_wait(data)) | ||
1203 | goto err_out; | ||
1204 | |||
1205 | if (cpu_family == CPU_OPTERON) | ||
1206 | fidvid_msr_init(); | ||
1207 | |||
1208 | /* run on any CPU again */ | ||
1209 | set_cpus_allowed(current, oldmask); | ||
1210 | |||
1211 | pol->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
1212 | if (cpu_family == CPU_HW_PSTATE) | ||
1213 | pol->cpus = cpumask_of_cpu(pol->cpu); | ||
1214 | else | ||
1215 | pol->cpus = cpu_core_map[pol->cpu]; | ||
1216 | data->available_cores = &(pol->cpus); | ||
1217 | |||
1218 | /* Take a crude guess here. | ||
1219 | * That guess was in microseconds, so multiply with 1000 */ | ||
1220 | pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) | ||
1221 | + (3 * (1 << data->irt) * 10)) * 1000; | ||
1222 | |||
1223 | if (cpu_family == CPU_HW_PSTATE) | ||
1224 | pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1225 | else | ||
1226 | pol->cur = find_khz_freq_from_fid(data->currfid); | ||
1227 | dprintk("policy current frequency %d kHz\n", pol->cur); | ||
1228 | |||
1229 | /* min/max the cpu is capable of */ | ||
1230 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | ||
1231 | printk(KERN_ERR PFX "invalid powernow_table\n"); | ||
1232 | powernow_k8_cpu_exit_acpi(data); | ||
1233 | kfree(data->powernow_table); | ||
1234 | kfree(data); | ||
1235 | return -EINVAL; | ||
1236 | } | ||
1237 | |||
1238 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | ||
1239 | |||
1240 | if (cpu_family == CPU_HW_PSTATE) | ||
1241 | dprintk("cpu_init done, current fid 0x%x, did 0x%x\n", | ||
1242 | data->currfid, data->currdid); | ||
1243 | else | ||
1244 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", | ||
1245 | data->currfid, data->currvid); | ||
1246 | |||
1247 | powernow_data[pol->cpu] = data; | ||
1248 | |||
1249 | return 0; | ||
1250 | |||
1251 | err_out: | ||
1252 | set_cpus_allowed(current, oldmask); | ||
1253 | powernow_k8_cpu_exit_acpi(data); | ||
1254 | |||
1255 | kfree(data); | ||
1256 | return -ENODEV; | ||
1257 | } | ||
1258 | |||
1259 | static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) | ||
1260 | { | ||
1261 | struct powernow_k8_data *data = powernow_data[pol->cpu]; | ||
1262 | |||
1263 | if (!data) | ||
1264 | return -EINVAL; | ||
1265 | |||
1266 | powernow_k8_cpu_exit_acpi(data); | ||
1267 | |||
1268 | cpufreq_frequency_table_put_attr(pol->cpu); | ||
1269 | |||
1270 | kfree(data->powernow_table); | ||
1271 | kfree(data); | ||
1272 | |||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | static unsigned int powernowk8_get (unsigned int cpu) | ||
1277 | { | ||
1278 | struct powernow_k8_data *data; | ||
1279 | cpumask_t oldmask = current->cpus_allowed; | ||
1280 | unsigned int khz = 0; | ||
1281 | |||
1282 | data = powernow_data[first_cpu(cpu_core_map[cpu])]; | ||
1283 | |||
1284 | if (!data) | ||
1285 | return -EINVAL; | ||
1286 | |||
1287 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
1288 | if (smp_processor_id() != cpu) { | ||
1289 | printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); | ||
1290 | set_cpus_allowed(current, oldmask); | ||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | if (query_current_values_with_pending_wait(data)) | ||
1295 | goto out; | ||
1296 | |||
1297 | if (cpu_family == CPU_HW_PSTATE) | ||
1298 | khz = find_khz_freq_from_fiddid(data->currfid, data->currdid); | ||
1299 | else | ||
1300 | khz = find_khz_freq_from_fid(data->currfid); | ||
1301 | |||
1302 | |||
1303 | out: | ||
1304 | set_cpus_allowed(current, oldmask); | ||
1305 | return khz; | ||
1306 | } | ||
1307 | |||
1308 | static struct freq_attr* powernow_k8_attr[] = { | ||
1309 | &cpufreq_freq_attr_scaling_available_freqs, | ||
1310 | NULL, | ||
1311 | }; | ||
1312 | |||
1313 | static struct cpufreq_driver cpufreq_amd64_driver = { | ||
1314 | .verify = powernowk8_verify, | ||
1315 | .target = powernowk8_target, | ||
1316 | .init = powernowk8_cpu_init, | ||
1317 | .exit = __devexit_p(powernowk8_cpu_exit), | ||
1318 | .get = powernowk8_get, | ||
1319 | .name = "powernow-k8", | ||
1320 | .owner = THIS_MODULE, | ||
1321 | .attr = powernow_k8_attr, | ||
1322 | }; | ||
1323 | |||
1324 | /* driver entry point for init */ | ||
1325 | static int __cpuinit powernowk8_init(void) | ||
1326 | { | ||
1327 | unsigned int i, supported_cpus = 0; | ||
1328 | unsigned int booted_cores = 1; | ||
1329 | |||
1330 | for_each_online_cpu(i) { | ||
1331 | if (check_supported_cpu(i)) | ||
1332 | supported_cpus++; | ||
1333 | } | ||
1334 | |||
1335 | #ifdef CONFIG_SMP | ||
1336 | booted_cores = cpu_data[0].booted_cores; | ||
1337 | #endif | ||
1338 | |||
1339 | if (supported_cpus == num_online_cpus()) { | ||
1340 | printk(KERN_INFO PFX "Found %d %s " | ||
1341 | "processors (%d cpu cores) (" VERSION ")\n", | ||
1342 | supported_cpus/booted_cores, | ||
1343 | boot_cpu_data.x86_model_id, supported_cpus); | ||
1344 | return cpufreq_register_driver(&cpufreq_amd64_driver); | ||
1345 | } | ||
1346 | |||
1347 | return -ENODEV; | ||
1348 | } | ||
1349 | |||
1350 | /* driver entry point for term */ | ||
1351 | static void __exit powernowk8_exit(void) | ||
1352 | { | ||
1353 | dprintk("exit\n"); | ||
1354 | |||
1355 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | ||
1356 | } | ||
1357 | |||
1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); | ||
1359 | MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); | ||
1360 | MODULE_LICENSE("GPL"); | ||
1361 | |||
1362 | late_initcall(powernowk8_init); | ||
1363 | module_exit(powernowk8_exit); | ||