aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>2008-02-17 17:30:23 -0500
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:40:47 -0400
commit29a9994bd8dbafc17f43d31651d31ea7b0add6a4 (patch)
tree49091ab9768081be788008c2e1dbe5772e8c643c /arch/x86/kernel/cpu
parentca5d3f14915f5f8db75f7b0c198c0c154947fc5e (diff)
x86: coding style fixes for arch/x86/kernel/cpu/centaur.c
Kills more than 150 errors/warnings Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/centaur.c230
1 files changed, 114 insertions, 116 deletions
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 194ec8311c3b..710fe1ed0731 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,21 +11,21 @@
11 11
12static u32 __cpuinit power2(u32 x) 12static u32 __cpuinit power2(u32 x)
13{ 13{
14 u32 s=1; 14 u32 s = 1;
15 while(s<=x) 15 while(s <= x)
16 s<<=1; 16 s <<= 1;
17 return s>>=1; 17 return s >>= 1;
18} 18}
19 19
20 20
21/* 21/*
22 * Set up an actual MCR 22 * Set up an actual MCR
23 */ 23 */
24 24
25static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 25static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
26{ 26{
27 u32 lo, hi; 27 u32 lo, hi;
28 28
29 hi = base & ~0xFFF; 29 hi = base & ~0xFFF;
30 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ 30 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
31 lo &= ~0xFFF; /* Remove the ctrl value bits */ 31 lo &= ~0xFFF; /* Remove the ctrl value bits */
@@ -45,7 +45,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */
45 int i; 45 int i;
46 u32 top = 0; 46 u32 top = 0;
47 u32 clip = 0xFFFFFFFFUL; 47 u32 clip = 0xFFFFFFFFUL;
48 48
49 for (i = 0; i < e820.nr_map; i++) { 49 for (i = 0; i < e820.nr_map; i++) {
50 unsigned long start, end; 50 unsigned long start, end;
51 51
@@ -55,10 +55,10 @@ static u32 __cpuinit ramtop(void) /* 16388 */
55 * Don't MCR over reserved space. Ignore the ISA hole 55 * Don't MCR over reserved space. Ignore the ISA hole
56 * we frob around that catastrophe already 56 * we frob around that catastrophe already
57 */ 57 */
58 58
59 if (e820.map[i].type == E820_RESERVED) 59 if (e820.map[i].type == E820_RESERVED)
60 { 60 {
61 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) 61 if (e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
62 clip = e820.map[i].addr; 62 clip = e820.map[i].addr;
63 continue; 63 continue;
64 } 64 }
@@ -71,19 +71,19 @@ static u32 __cpuinit ramtop(void) /* 16388 */
71 } 71 }
72 /* Everything below 'top' should be RAM except for the ISA hole. 72 /* Everything below 'top' should be RAM except for the ISA hole.
73 Because of the limited MCR's we want to map NV/ACPI into our 73 Because of the limited MCR's we want to map NV/ACPI into our
74 MCR range for gunk in RAM 74 MCR range for gunk in RAM
75 75
76 Clip might cause us to MCR insufficient RAM but that is an 76 Clip might cause us to MCR insufficient RAM but that is an
77 acceptable failure mode and should only bite obscure boxes with 77 acceptable failure mode and should only bite obscure boxes with
78 a VESA hole at 15Mb 78 a VESA hole at 15Mb
79 79
80 The second case Clip sometimes kicks in is when the EBDA is marked 80 The second case Clip sometimes kicks in is when the EBDA is marked
81 as reserved. Again we fail safe with reasonable results 81 as reserved. Again we fail safe with reasonable results
82 */ 82 */
83 83
84 if(top>clip) 84 if(top > clip)
85 top=clip; 85 top = clip;
86 86
87 return top; 87 return top;
88} 88}
89 89
@@ -99,8 +99,8 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
99 u32 top = root; 99 u32 top = root;
100 u32 floor = 0; 100 u32 floor = 0;
101 int ct = 0; 101 int ct = 0;
102 102
103 while(ct<nr) 103 while (ct < nr)
104 { 104 {
105 u32 fspace = 0; 105 u32 fspace = 0;
106 106
@@ -108,7 +108,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
108 * Find the largest block we will fill going upwards 108 * Find the largest block we will fill going upwards
109 */ 109 */
110 110
111 u32 high = power2(mem-top); 111 u32 high = power2(mem-top);
112 112
113 /* 113 /*
114 * Find the largest block we will fill going downwards 114 * Find the largest block we will fill going downwards
@@ -119,39 +119,37 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
119 /* 119 /*
120 * Don't fill below 1Mb going downwards as there 120 * Don't fill below 1Mb going downwards as there
121 * is an ISA hole in the way. 121 * is an ISA hole in the way.
122 */ 122 */
123 123
124 if(base <= 1024*1024) 124 if (base <= 1024*1024)
125 low = 0; 125 low = 0;
126 126
127 /* 127 /*
128 * See how much space we could cover by filling below 128 * See how much space we could cover by filling below
129 * the ISA hole 129 * the ISA hole
130 */ 130 */
131 131
132 if(floor == 0) 132 if (floor == 0)
133 fspace = 512*1024; 133 fspace = 512*1024;
134 else if(floor ==512*1024) 134 else if (floor == 512*1024)
135 fspace = 128*1024; 135 fspace = 128*1024;
136 136
137 /* And forget ROM space */ 137 /* And forget ROM space */
138 138
139 /* 139 /*
140 * Now install the largest coverage we get 140 * Now install the largest coverage we get
141 */ 141 */
142 142
143 if(fspace > high && fspace > low) 143 if (fspace > high && fspace > low)
144 { 144 {
145 centaur_mcr_insert(ct, floor, fspace, key); 145 centaur_mcr_insert(ct, floor, fspace, key);
146 floor += fspace; 146 floor += fspace;
147 } 147 }
148 else if(high > low) 148 else if (high > low) {
149 {
150 centaur_mcr_insert(ct, top, high, key); 149 centaur_mcr_insert(ct, top, high, key);
151 top += high; 150 top += high;
152 } 151 }
153 else if(low > 0) 152 else if (low > 0) {
154 {
155 base -= low; 153 base -= low;
156 centaur_mcr_insert(ct, base, low, key); 154 centaur_mcr_insert(ct, base, low, key);
157 } 155 }
@@ -162,7 +160,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
162 * We loaded ct values. We now need to set the mask. The caller 160 * We loaded ct values. We now need to set the mask. The caller
163 * must do this bit. 161 * must do this bit.
164 */ 162 */
165 163
166 return ct; 164 return ct;
167} 165}
168 166
@@ -173,7 +171,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
173 * Allocate up to 6 mcrs to mark as much of ram as possible 171 * Allocate up to 6 mcrs to mark as much of ram as possible
174 * as write combining and weak write ordered. 172 * as write combining and weak write ordered.
175 * 173 *
176 * To experiment with: Linux never uses stack operations for 174 * To experiment with: Linux never uses stack operations for
177 * mmio spaces so we could globally enable stack operation wc 175 * mmio spaces so we could globally enable stack operation wc
178 * 176 *
179 * Load the registers with type 31 - full write combining, all 177 * Load the registers with type 31 - full write combining, all
@@ -184,8 +182,8 @@ static void __cpuinit centaur_create_optimal_mcr(void)
184 /* 182 /*
185 * Wipe unused MCRs 183 * Wipe unused MCRs
186 */ 184 */
187 185
188 for(i=used;i<8;i++) 186 for (i = used; i < 8; i++)
189 wrmsr(MSR_IDT_MCR0+i, 0, 0); 187 wrmsr(MSR_IDT_MCR0+i, 0, 0);
190} 188}
191 189
@@ -205,21 +203,21 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
205 */ 203 */
206 204
207 int used = centaur_mcr_compute(6, 25); 205 int used = centaur_mcr_compute(6, 25);
208 206
209 /* 207 /*
210 * Mark the registers we are using. 208 * Mark the registers we are using.
211 */ 209 */
212 210
213 rdmsr(MSR_IDT_MCR_CTRL, lo, hi); 211 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
214 for(i=0;i<used;i++) 212 for (i = 0; i < used; i++)
215 lo|=1<<(9+i); 213 lo |= 1<<(9+i);
216 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 214 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
217 215
218 /* 216 /*
219 * Wipe unused MCRs 217 * Wipe unused MCRs
220 */ 218 */
221 219
222 for(i=used;i<8;i++) 220 for (i = used; i < 8; i++)
223 wrmsr(MSR_IDT_MCR0+i, 0, 0); 221 wrmsr(MSR_IDT_MCR0+i, 0, 0);
224} 222}
225 223
@@ -231,9 +229,9 @@ static void __cpuinit winchip2_unprotect_mcr(void)
231{ 229{
232 u32 lo, hi; 230 u32 lo, hi;
233 u32 key; 231 u32 key;
234 232
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi); 233 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo&=~0x1C0; /* blank bits 8-6 */ 234 lo &= ~0x1C0; /* blank bits 8-6 */
237 key = (lo>>17) & 7; 235 key = (lo>>17) & 7;
238 lo |= key<<6; /* replace with unlock key */ 236 lo |= key<<6; /* replace with unlock key */
239 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 237 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
@@ -242,9 +240,9 @@ static void __cpuinit winchip2_unprotect_mcr(void)
242static void __cpuinit winchip2_protect_mcr(void) 240static void __cpuinit winchip2_protect_mcr(void)
243{ 241{
244 u32 lo, hi; 242 u32 lo, hi;
245 243
246 rdmsr(MSR_IDT_MCR_CTRL, lo, hi); 244 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
247 lo&=~0x1C0; /* blank bits 8-6 */ 245 lo &= ~0x1C0; /* blank bits 8-6 */
248 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 246 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
249} 247}
250#endif /* CONFIG_X86_OOSTORE */ 248#endif /* CONFIG_X86_OOSTORE */
@@ -267,17 +265,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
267 265
268 /* enable ACE unit, if present and disabled */ 266 /* enable ACE unit, if present and disabled */
269 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { 267 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
270 rdmsr (MSR_VIA_FCR, lo, hi); 268 rdmsr(MSR_VIA_FCR, lo, hi);
271 lo |= ACE_FCR; /* enable ACE unit */ 269 lo |= ACE_FCR; /* enable ACE unit */
272 wrmsr (MSR_VIA_FCR, lo, hi); 270 wrmsr(MSR_VIA_FCR, lo, hi);
273 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); 271 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
274 } 272 }
275 273
276 /* enable RNG unit, if present and disabled */ 274 /* enable RNG unit, if present and disabled */
277 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { 275 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
278 rdmsr (MSR_VIA_RNG, lo, hi); 276 rdmsr(MSR_VIA_RNG, lo, hi);
279 lo |= RNG_ENABLE; /* enable RNG unit */ 277 lo |= RNG_ENABLE; /* enable RNG unit */
280 wrmsr (MSR_VIA_RNG, lo, hi); 278 wrmsr(MSR_VIA_RNG, lo, hi);
281 printk(KERN_INFO "CPU: Enabled h/w RNG\n"); 279 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
282 } 280 }
283 281
@@ -288,15 +286,15 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
288 } 286 }
289 287
290 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ 288 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
291 if (c->x86_model >=6 && c->x86_model <= 9) { 289 if (c->x86_model >= 6 && c->x86_model <= 9) {
292 rdmsr (MSR_VIA_FCR, lo, hi); 290 rdmsr(MSR_VIA_FCR, lo, hi);
293 lo |= (1<<1 | 1<<7); 291 lo |= (1<<1 | 1<<7);
294 wrmsr (MSR_VIA_FCR, lo, hi); 292 wrmsr(MSR_VIA_FCR, lo, hi);
295 set_bit(X86_FEATURE_CX8, c->x86_capability); 293 set_bit(X86_FEATURE_CX8, c->x86_capability);
296 } 294 }
297 295
298 /* Before Nehemiah, the C3's had 3dNOW! */ 296 /* Before Nehemiah, the C3's had 3dNOW! */
299 if (c->x86_model >=6 && c->x86_model <9) 297 if (c->x86_model >= 6 && c->x86_model < 9)
300 set_bit(X86_FEATURE_3DNOW, c->x86_capability); 298 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
301 299
302 get_model_name(c); 300 get_model_name(c);
@@ -306,31 +304,31 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
306static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 304static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
307{ 305{
308 enum { 306 enum {
309 ECX8=1<<1, 307 ECX8 = 1<<1,
310 EIERRINT=1<<2, 308 EIERRINT = 1<<2,
311 DPM=1<<3, 309 DPM = 1<<3,
312 DMCE=1<<4, 310 DMCE = 1<<4,
313 DSTPCLK=1<<5, 311 DSTPCLK = 1<<5,
314 ELINEAR=1<<6, 312 ELINEAR = 1<<6,
315 DSMC=1<<7, 313 DSMC = 1<<7,
316 DTLOCK=1<<8, 314 DTLOCK = 1<<8,
317 EDCTLB=1<<8, 315 EDCTLB = 1<<8,
318 EMMX=1<<9, 316 EMMX = 1<<9,
319 DPDC=1<<11, 317 DPDC = 1<<11,
320 EBRPRED=1<<12, 318 EBRPRED = 1<<12,
321 DIC=1<<13, 319 DIC = 1<<13,
322 DDC=1<<14, 320 DDC = 1<<14,
323 DNA=1<<15, 321 DNA = 1<<15,
324 ERETSTK=1<<16, 322 ERETSTK = 1<<16,
325 E2MMX=1<<19, 323 E2MMX = 1<<19,
326 EAMD3D=1<<20, 324 EAMD3D = 1<<20,
327 }; 325 };
328 326
329 char *name; 327 char *name;
330 u32 fcr_set=0; 328 u32 fcr_set = 0;
331 u32 fcr_clr=0; 329 u32 fcr_clr = 0;
332 u32 lo,hi,newlo; 330 u32 lo, hi, newlo;
333 u32 aa,bb,cc,dd; 331 u32 aa, bb, cc, dd;
334 332
335 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 333 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
336 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 334 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
@@ -338,12 +336,12 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
338 336
339 switch (c->x86) { 337 switch (c->x86) {
340 338
341 case 5: 339 case 5:
342 switch(c->x86_model) { 340 switch (c->x86_model) {
343 case 4: 341 case 4:
344 name="C6"; 342 name = "C6";
345 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; 343 fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
346 fcr_clr=DPDC; 344 fcr_clr = DPDC;
347 printk(KERN_NOTICE "Disabling bugged TSC.\n"); 345 printk(KERN_NOTICE "Disabling bugged TSC.\n");
348 clear_bit(X86_FEATURE_TSC, c->x86_capability); 346 clear_bit(X86_FEATURE_TSC, c->x86_capability);
349#ifdef CONFIG_X86_OOSTORE 347#ifdef CONFIG_X86_OOSTORE
@@ -351,29 +349,29 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
351 /* Enable 349 /* Enable
352 write combining on non-stack, non-string 350 write combining on non-stack, non-string
353 write combining on string, all types 351 write combining on string, all types
354 weak write ordering 352 weak write ordering
355 353
356 The C6 original lacks weak read order 354 The C6 original lacks weak read order
357 355
358 Note 0x120 is write only on Winchip 1 */ 356 Note 0x120 is write only on Winchip 1 */
359 357
360 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); 358 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
361#endif 359#endif
362 break; 360 break;
363 case 8: 361 case 8:
364 switch(c->x86_mask) { 362 switch (c->x86_mask) {
365 default: 363 default:
366 name="2"; 364 name = "2";
367 break; 365 break;
368 case 7 ... 9: 366 case 7 ... 9:
369 name="2A"; 367 name = "2A";
370 break; 368 break;
371 case 10 ... 15: 369 case 10 ... 15:
372 name="2B"; 370 name = "2B";
373 break; 371 break;
374 } 372 }
375 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; 373 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
376 fcr_clr=DPDC; 374 fcr_clr = DPDC;
377#ifdef CONFIG_X86_OOSTORE 375#ifdef CONFIG_X86_OOSTORE
378 winchip2_unprotect_mcr(); 376 winchip2_unprotect_mcr();
379 winchip2_create_optimal_mcr(); 377 winchip2_create_optimal_mcr();
@@ -381,17 +379,17 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
381 /* Enable 379 /* Enable
382 write combining on non-stack, non-string 380 write combining on non-stack, non-string
383 write combining on string, all types 381 write combining on string, all types
384 weak write ordering 382 weak write ordering
385 */ 383 */
386 lo|=31; 384 lo |= 31;
387 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 385 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
388 winchip2_protect_mcr(); 386 winchip2_protect_mcr();
389#endif 387#endif
390 break; 388 break;
391 case 9: 389 case 9:
392 name="3"; 390 name = "3";
393 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; 391 fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
394 fcr_clr=DPDC; 392 fcr_clr = DPDC;
395#ifdef CONFIG_X86_OOSTORE 393#ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr(); 394 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr(); 395 winchip2_create_optimal_mcr();
@@ -399,50 +397,50 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
399 /* Enable 397 /* Enable
400 write combining on non-stack, non-string 398 write combining on non-stack, non-string
401 write combining on string, all types 399 write combining on string, all types
402 weak write ordering 400 weak write ordering
403 */ 401 */
404 lo|=31; 402 lo |= 31;
405 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 403 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
406 winchip2_protect_mcr(); 404 winchip2_protect_mcr();
407#endif 405#endif
408 break; 406 break;
409 default: 407 default:
410 name="??"; 408 name = "??";
411 } 409 }
412 410
413 rdmsr(MSR_IDT_FCR1, lo, hi); 411 rdmsr(MSR_IDT_FCR1, lo, hi);
414 newlo=(lo|fcr_set) & (~fcr_clr); 412 newlo = (lo|fcr_set) & (~fcr_clr);
415 413
416 if (newlo!=lo) { 414 if (newlo != lo) {
417 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); 415 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo);
418 wrmsr(MSR_IDT_FCR1, newlo, hi ); 416 wrmsr(MSR_IDT_FCR1, newlo, hi);
419 } else { 417 } else {
420 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo); 418 printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
421 } 419 }
422 /* Emulate MTRRs using Centaur's MCR. */ 420 /* Emulate MTRRs using Centaur's MCR. */
423 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); 421 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
424 /* Report CX8 */ 422 /* Report CX8 */
425 set_bit(X86_FEATURE_CX8, c->x86_capability); 423 set_bit(X86_FEATURE_CX8, c->x86_capability);
426 /* Set 3DNow! on Winchip 2 and above. */ 424 /* Set 3DNow! on Winchip 2 and above. */
427 if (c->x86_model >=8) 425 if (c->x86_model >= 8)
428 set_bit(X86_FEATURE_3DNOW, c->x86_capability); 426 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
429 /* See if we can find out some more. */ 427 /* See if we can find out some more. */
430 if ( cpuid_eax(0x80000000) >= 0x80000005 ) { 428 if (cpuid_eax(0x80000000) >= 0x80000005) {
431 /* Yes, we can. */ 429 /* Yes, we can. */
432 cpuid(0x80000005,&aa,&bb,&cc,&dd); 430 cpuid(0x80000005, &aa, &bb, &cc, &dd);
433 /* Add L1 data and code cache sizes. */ 431 /* Add L1 data and code cache sizes. */
434 c->x86_cache_size = (cc>>24)+(dd>>24); 432 c->x86_cache_size = (cc>>24)+(dd>>24);
435 } 433 }
436 sprintf( c->x86_model_id, "WinChip %s", name ); 434 sprintf(c->x86_model_id, "WinChip %s", name);
437 break; 435 break;
438 436
439 case 6: 437 case 6:
440 init_c3(c); 438 init_c3(c);
441 break; 439 break;
442 } 440 }
443} 441}
444 442
445static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) 443static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
446{ 444{
447 /* VIA C3 CPUs (670-68F) need further shifting. */ 445 /* VIA C3 CPUs (670-68F) need further shifting. */
448 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) 446 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
@@ -451,8 +449,8 @@ static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigne
451 /* VIA also screwed up Nehemiah stepping 1, and made 449 /* VIA also screwed up Nehemiah stepping 1, and made
452 it return '65KB' instead of '64KB' 450 it return '65KB' instead of '64KB'
453 - Note, it seems this may only be in engineering samples. */ 451 - Note, it seems this may only be in engineering samples. */
454 if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65)) 452 if ((c->x86 == 6) && (c->x86_model == 9) && (c->x86_mask == 1) && (size == 65))
455 size -=1; 453 size -= 1;
456 454
457 return size; 455 return size;
458} 456}