aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/centaur.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/centaur.c')
-rw-r--r--arch/i386/kernel/cpu/centaur.c476
1 files changed, 476 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
new file mode 100644
index 000000000000..394814e57672
--- /dev/null
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -0,0 +1,476 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <asm/processor.h>
5#include <asm/msr.h>
6#include <asm/e820.h>
7#include "cpu.h"
8
9#ifdef CONFIG_X86_OOSTORE
10
11static u32 __init power2(u32 x)
12{
13 u32 s=1;
14 while(s<=x)
15 s<<=1;
16 return s>>=1;
17}
18
19
20/*
21 * Set up an actual MCR
22 */
23
24static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key)
25{
26 u32 lo, hi;
27
28 hi = base & ~0xFFF;
29 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
30 lo &= ~0xFFF; /* Remove the ctrl value bits */
31 lo |= key; /* Attribute we wish to set */
32 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
33 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
34}
35
36/*
37 * Figure what we can cover with MCR's
38 *
39 * Shortcut: We know you can't put 4Gig of RAM on a winchip
40 */
41
42static u32 __init ramtop(void) /* 16388 */
43{
44 int i;
45 u32 top = 0;
46 u32 clip = 0xFFFFFFFFUL;
47
48 for (i = 0; i < e820.nr_map; i++) {
49 unsigned long start, end;
50
51 if (e820.map[i].addr > 0xFFFFFFFFUL)
52 continue;
53 /*
54 * Don't MCR over reserved space. Ignore the ISA hole
55 * we frob around that catastrophy already
56 */
57
58 if (e820.map[i].type == E820_RESERVED)
59 {
60 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
61 clip = e820.map[i].addr;
62 continue;
63 }
64 start = e820.map[i].addr;
65 end = e820.map[i].addr + e820.map[i].size;
66 if (start >= end)
67 continue;
68 if (end > top)
69 top = end;
70 }
71 /* Everything below 'top' should be RAM except for the ISA hole.
72 Because of the limited MCR's we want to map NV/ACPI into our
73 MCR range for gunk in RAM
74
75 Clip might cause us to MCR insufficient RAM but that is an
76 acceptable failure mode and should only bite obscure boxes with
77 a VESA hole at 15Mb
78
79 The second case Clip sometimes kicks in is when the EBDA is marked
80 as reserved. Again we fail safe with reasonable results
81 */
82
83 if(top>clip)
84 top=clip;
85
86 return top;
87}
88
89/*
90 * Compute a set of MCR's to give maximum coverage
91 */
92
93static int __init centaur_mcr_compute(int nr, int key)
94{
95 u32 mem = ramtop();
96 u32 root = power2(mem);
97 u32 base = root;
98 u32 top = root;
99 u32 floor = 0;
100 int ct = 0;
101
102 while(ct<nr)
103 {
104 u32 fspace = 0;
105
106 /*
107 * Find the largest block we will fill going upwards
108 */
109
110 u32 high = power2(mem-top);
111
112 /*
113 * Find the largest block we will fill going downwards
114 */
115
116 u32 low = base/2;
117
118 /*
119 * Don't fill below 1Mb going downwards as there
120 * is an ISA hole in the way.
121 */
122
123 if(base <= 1024*1024)
124 low = 0;
125
126 /*
127 * See how much space we could cover by filling below
128 * the ISA hole
129 */
130
131 if(floor == 0)
132 fspace = 512*1024;
133 else if(floor ==512*1024)
134 fspace = 128*1024;
135
136 /* And forget ROM space */
137
138 /*
139 * Now install the largest coverage we get
140 */
141
142 if(fspace > high && fspace > low)
143 {
144 centaur_mcr_insert(ct, floor, fspace, key);
145 floor += fspace;
146 }
147 else if(high > low)
148 {
149 centaur_mcr_insert(ct, top, high, key);
150 top += high;
151 }
152 else if(low > 0)
153 {
154 base -= low;
155 centaur_mcr_insert(ct, base, low, key);
156 }
157 else break;
158 ct++;
159 }
160 /*
161 * We loaded ct values. We now need to set the mask. The caller
162 * must do this bit.
163 */
164
165 return ct;
166}
167
168static void __init centaur_create_optimal_mcr(void)
169{
170 int i;
171 /*
172 * Allocate up to 6 mcrs to mark as much of ram as possible
173 * as write combining and weak write ordered.
174 *
175 * To experiment with: Linux never uses stack operations for
176 * mmio spaces so we could globally enable stack operation wc
177 *
178 * Load the registers with type 31 - full write combining, all
179 * writes weakly ordered.
180 */
181 int used = centaur_mcr_compute(6, 31);
182
183 /*
184 * Wipe unused MCRs
185 */
186
187 for(i=used;i<8;i++)
188 wrmsr(MSR_IDT_MCR0+i, 0, 0);
189}
190
191static void __init winchip2_create_optimal_mcr(void)
192{
193 u32 lo, hi;
194 int i;
195
196 /*
197 * Allocate up to 6 mcrs to mark as much of ram as possible
198 * as write combining, weak store ordered.
199 *
200 * Load the registers with type 25
201 * 8 - weak write ordering
202 * 16 - weak read ordering
203 * 1 - write combining
204 */
205
206 int used = centaur_mcr_compute(6, 25);
207
208 /*
209 * Mark the registers we are using.
210 */
211
212 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
213 for(i=0;i<used;i++)
214 lo|=1<<(9+i);
215 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
216
217 /*
218 * Wipe unused MCRs
219 */
220
221 for(i=used;i<8;i++)
222 wrmsr(MSR_IDT_MCR0+i, 0, 0);
223}
224
225/*
226 * Handle the MCR key on the Winchip 2.
227 */
228
229static void __init winchip2_unprotect_mcr(void)
230{
231 u32 lo, hi;
232 u32 key;
233
234 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
235 lo&=~0x1C0; /* blank bits 8-6 */
236 key = (lo>>17) & 7;
237 lo |= key<<6; /* replace with unlock key */
238 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
239}
240
241static void __init winchip2_protect_mcr(void)
242{
243 u32 lo, hi;
244
245 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
246 lo&=~0x1C0; /* blank bits 8-6 */
247 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
248}
249#endif /* CONFIG_X86_OOSTORE */
250
251#define ACE_PRESENT (1 << 6)
252#define ACE_ENABLED (1 << 7)
253#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
254
255#define RNG_PRESENT (1 << 2)
256#define RNG_ENABLED (1 << 3)
257#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
258
259static void __init init_c3(struct cpuinfo_x86 *c)
260{
261 u32 lo, hi;
262
263 /* Test for Centaur Extended Feature Flags presence */
264 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
265 u32 tmp = cpuid_edx(0xC0000001);
266
267 /* enable ACE unit, if present and disabled */
268 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
269 rdmsr (MSR_VIA_FCR, lo, hi);
270 lo |= ACE_FCR; /* enable ACE unit */
271 wrmsr (MSR_VIA_FCR, lo, hi);
272 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
273 }
274
275 /* enable RNG unit, if present and disabled */
276 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
277 rdmsr (MSR_VIA_RNG, lo, hi);
278 lo |= RNG_ENABLE; /* enable RNG unit */
279 wrmsr (MSR_VIA_RNG, lo, hi);
280 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
281 }
282
283 /* store Centaur Extended Feature Flags as
284 * word 5 of the CPU capability bit array
285 */
286 c->x86_capability[5] = cpuid_edx(0xC0000001);
287 }
288
289 /* Cyrix III family needs CX8 & PGE explicity enabled. */
290 if (c->x86_model >=6 && c->x86_model <= 9) {
291 rdmsr (MSR_VIA_FCR, lo, hi);
292 lo |= (1<<1 | 1<<7);
293 wrmsr (MSR_VIA_FCR, lo, hi);
294 set_bit(X86_FEATURE_CX8, c->x86_capability);
295 }
296
297 /* Before Nehemiah, the C3's had 3dNOW! */
298 if (c->x86_model >=6 && c->x86_model <9)
299 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
300
301 get_model_name(c);
302 display_cacheinfo(c);
303}
304
305static void __init init_centaur(struct cpuinfo_x86 *c)
306{
307 enum {
308 ECX8=1<<1,
309 EIERRINT=1<<2,
310 DPM=1<<3,
311 DMCE=1<<4,
312 DSTPCLK=1<<5,
313 ELINEAR=1<<6,
314 DSMC=1<<7,
315 DTLOCK=1<<8,
316 EDCTLB=1<<8,
317 EMMX=1<<9,
318 DPDC=1<<11,
319 EBRPRED=1<<12,
320 DIC=1<<13,
321 DDC=1<<14,
322 DNA=1<<15,
323 ERETSTK=1<<16,
324 E2MMX=1<<19,
325 EAMD3D=1<<20,
326 };
327
328 char *name;
329 u32 fcr_set=0;
330 u32 fcr_clr=0;
331 u32 lo,hi,newlo;
332 u32 aa,bb,cc,dd;
333
334 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
335 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
336 clear_bit(0*32+31, c->x86_capability);
337
338 switch (c->x86) {
339
340 case 5:
341 switch(c->x86_model) {
342 case 4:
343 name="C6";
344 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
345 fcr_clr=DPDC;
346 printk(KERN_NOTICE "Disabling bugged TSC.\n");
347 clear_bit(X86_FEATURE_TSC, c->x86_capability);
348#ifdef CONFIG_X86_OOSTORE
349 centaur_create_optimal_mcr();
350 /* Enable
351 write combining on non-stack, non-string
352 write combining on string, all types
353 weak write ordering
354
355 The C6 original lacks weak read order
356
357 Note 0x120 is write only on Winchip 1 */
358
359 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
360#endif
361 break;
362 case 8:
363 switch(c->x86_mask) {
364 default:
365 name="2";
366 break;
367 case 7 ... 9:
368 name="2A";
369 break;
370 case 10 ... 15:
371 name="2B";
372 break;
373 }
374 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
375 fcr_clr=DPDC;
376#ifdef CONFIG_X86_OOSTORE
377 winchip2_unprotect_mcr();
378 winchip2_create_optimal_mcr();
379 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
380 /* Enable
381 write combining on non-stack, non-string
382 write combining on string, all types
383 weak write ordering
384 */
385 lo|=31;
386 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
387 winchip2_protect_mcr();
388#endif
389 break;
390 case 9:
391 name="3";
392 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
393 fcr_clr=DPDC;
394#ifdef CONFIG_X86_OOSTORE
395 winchip2_unprotect_mcr();
396 winchip2_create_optimal_mcr();
397 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
398 /* Enable
399 write combining on non-stack, non-string
400 write combining on string, all types
401 weak write ordering
402 */
403 lo|=31;
404 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
405 winchip2_protect_mcr();
406#endif
407 break;
408 case 10:
409 name="4";
410 /* no info on the WC4 yet */
411 break;
412 default:
413 name="??";
414 }
415
416 rdmsr(MSR_IDT_FCR1, lo, hi);
417 newlo=(lo|fcr_set) & (~fcr_clr);
418
419 if (newlo!=lo) {
420 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
421 wrmsr(MSR_IDT_FCR1, newlo, hi );
422 } else {
423 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
424 }
425 /* Emulate MTRRs using Centaur's MCR. */
426 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
427 /* Report CX8 */
428 set_bit(X86_FEATURE_CX8, c->x86_capability);
429 /* Set 3DNow! on Winchip 2 and above. */
430 if (c->x86_model >=8)
431 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
432 /* See if we can find out some more. */
433 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
434 /* Yes, we can. */
435 cpuid(0x80000005,&aa,&bb,&cc,&dd);
436 /* Add L1 data and code cache sizes. */
437 c->x86_cache_size = (cc>>24)+(dd>>24);
438 }
439 sprintf( c->x86_model_id, "WinChip %s", name );
440 break;
441
442 case 6:
443 init_c3(c);
444 break;
445 }
446}
447
448static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
449{
450 /* VIA C3 CPUs (670-68F) need further shifting. */
451 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
452 size >>= 8;
453
454 /* VIA also screwed up Nehemiah stepping 1, and made
455 it return '65KB' instead of '64KB'
456 - Note, it seems this may only be in engineering samples. */
457 if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
458 size -=1;
459
460 return size;
461}
462
463static struct cpu_dev centaur_cpu_dev __initdata = {
464 .c_vendor = "Centaur",
465 .c_ident = { "CentaurHauls" },
466 .c_init = init_centaur,
467 .c_size_cache = centaur_size_cache,
468};
469
470int __init centaur_init_cpu(void)
471{
472 cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
473 return 0;
474}
475
476//early_arch_initcall(centaur_init_cpu);