aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/centaur.c11
-rw-r--r--arch/x86/kernel/cpu/common.c34
-rw-r--r--arch/x86/kernel/cpu/common_64.c74
-rw-r--r--arch/x86/kernel/cpu/cyrix.c32
-rw-r--r--arch/x86/kernel/cpu/feature_names.c3
6 files changed, 136 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cae9cabc3031..18514ed26104 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
31 if (c->x86_power & (1<<8)) 31 if (c->x86_power & (1<<8))
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
33 } 33 }
34
35 /* Set MTRR capability flag if appropriate */
36 if (c->x86_model == 13 || c->x86_model == 9 ||
37 (c->x86_model == 8 && c->x86_mask >= 8))
38 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
34} 39}
35 40
36static void __cpuinit init_amd(struct cpuinfo_x86 *c) 41static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
166 mbytes); 171 mbytes);
167 } 172 }
168 173
169 /* Set MTRR capability flag if appropriate */
170 if (c->x86_model == 13 || c->x86_model == 9 ||
171 (c->x86_model == 8 && c->x86_mask >= 8))
172 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
173 break; 174 break;
174 } 175 }
175 176
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e0f45edd6a55..a0534c04d38a 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -314,6 +314,16 @@ enum {
314 EAMD3D = 1<<20, 314 EAMD3D = 1<<20,
315}; 315};
316 316
317static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
318{
319 switch (c->x86) {
320 case 5:
321 /* Emulate MTRRs using Centaur's MCR. */
322 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
323 break;
324 }
325}
326
317static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 327static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
318{ 328{
319 329
@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
462static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 472static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
463 .c_vendor = "Centaur", 473 .c_vendor = "Centaur",
464 .c_ident = { "CentaurHauls" }, 474 .c_ident = { "CentaurHauls" },
475 .c_early_init = early_init_centaur,
465 .c_init = init_centaur, 476 .c_init = init_centaur,
466 .c_size_cache = centaur_size_cache, 477 .c_size_cache = centaur_size_cache,
467}; 478};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 80ab20d4fa39..8aab8517642e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,6 +13,7 @@
13#include <asm/mtrr.h> 13#include <asm/mtrr.h>
14#include <asm/mce.h> 14#include <asm/mce.h>
15#include <asm/pat.h> 15#include <asm/pat.h>
16#include <asm/asm.h>
16#ifdef CONFIG_X86_LOCAL_APIC 17#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h> 18#include <asm/mpspec.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
@@ -334,11 +335,40 @@ static void __init early_cpu_detect(void)
334 335
335 get_cpu_vendor(c, 1); 336 get_cpu_vendor(c, 1);
336 337
338 early_get_cap(c);
339
337 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 340 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
338 cpu_devs[c->x86_vendor]->c_early_init) 341 cpu_devs[c->x86_vendor]->c_early_init)
339 cpu_devs[c->x86_vendor]->c_early_init(c); 342 cpu_devs[c->x86_vendor]->c_early_init(c);
343}
340 344
341 early_get_cap(c); 345/*
346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6, unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. Hence, probe for it based on first
350 * principles.
351 */
352static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
353{
354 const u32 nopl_signature = 0x888c53b1; /* Random number */
355 u32 has_nopl = nopl_signature;
356
357 clear_cpu_cap(c, X86_FEATURE_NOPL);
358 if (c->x86 >= 6) {
359 asm volatile("\n"
360 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
361 "2:\n"
362 " .section .fixup,\"ax\"\n"
363 "3: xor %0,%0\n"
364 " jmp 2b\n"
365 " .previous\n"
366 _ASM_EXTABLE(1b,3b)
367 : "+a" (has_nopl));
368
369 if (has_nopl == nopl_signature)
370 set_cpu_cap(c, X86_FEATURE_NOPL);
371 }
342} 372}
343 373
344static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 374static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
395 } 425 }
396 426
397 init_scattered_cpuid_features(c); 427 init_scattered_cpuid_features(c);
428 detect_nopl(c);
398 } 429 }
399
400} 430}
401 431
402static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 432static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index dd6e3f15017e..a11f5d4477cd 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -18,6 +18,7 @@
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/mce.h> 19#include <asm/mce.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
22#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
23#include <asm/mpspec.h> 24#include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
215 } 216 }
216} 217}
217 218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
218static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
219 253
220void __init early_cpu_init(void) 254void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
313 c->x86_phys_bits = eax & 0xff; 347 c->x86_phys_bits = eax & 0xff;
314 } 348 }
315 349
350 detect_nopl(c);
351
316 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
317 cpu_devs[c->x86_vendor]->c_early_init) 353 cpu_devs[c->x86_vendor]->c_early_init)
318 cpu_devs[c->x86_vendor]->c_early_init(c); 354 cpu_devs[c->x86_vendor]->c_early_init(c);
@@ -493,17 +529,20 @@ void pda_init(int cpu)
493 /* others are initialized in smpboot.c */ 529 /* others are initialized in smpboot.c */
494 pda->pcurrent = &init_task; 530 pda->pcurrent = &init_task;
495 pda->irqstackptr = boot_cpu_stack; 531 pda->irqstackptr = boot_cpu_stack;
532 pda->irqstackptr += IRQSTACKSIZE - 64;
496 } else { 533 } else {
497 pda->irqstackptr = (char *) 534 if (!pda->irqstackptr) {
498 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 535 pda->irqstackptr = (char *)
499 if (!pda->irqstackptr) 536 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
500 panic("cannot allocate irqstack for cpu %d", cpu); 537 if (!pda->irqstackptr)
538 panic("cannot allocate irqstack for cpu %d",
539 cpu);
540 pda->irqstackptr += IRQSTACKSIZE - 64;
541 }
501 542
502 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 543 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
503 pda->nodenumber = cpu_to_node(cpu); 544 pda->nodenumber = cpu_to_node(cpu);
504 } 545 }
505
506 pda->irqstackptr += IRQSTACKSIZE-64;
507} 546}
508 547
509char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 548char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
@@ -601,19 +640,22 @@ void __cpuinit cpu_init(void)
601 /* 640 /*
602 * set up and load the per-CPU TSS 641 * set up and load the per-CPU TSS
603 */ 642 */
604 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 643 if (!orig_ist->ist[0]) {
605 static const unsigned int order[N_EXCEPTION_STACKS] = { 644 static const unsigned int order[N_EXCEPTION_STACKS] = {
606 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 645 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
607 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 646 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
608 }; 647 };
609 if (cpu) { 648 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
610 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 649 if (cpu) {
611 if (!estacks) 650 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
612 panic("Cannot allocate exception stack %ld %d\n", 651 if (!estacks)
613 v, cpu); 652 panic("Cannot allocate exception "
653 "stack %ld %d\n", v, cpu);
654 }
655 estacks += PAGE_SIZE << order[v];
656 orig_ist->ist[v] = t->x86_tss.ist[v] =
657 (unsigned long)estacks;
614 } 658 }
615 estacks += PAGE_SIZE << order[v];
616 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
617 } 659 }
618 660
619 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 661 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index e710a21bb6e8..898a5a2002ed 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,13 +15,11 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 unsigned long flags;
22 21
23 /* we test for DEVID by checking whether CCR3 is writable */ 22 /* we test for DEVID by checking whether CCR3 is writable */
24 local_irq_save(flags);
25 ccr3 = getCx86(CX86_CCR3); 23 ccr3 = getCx86(CX86_CCR3);
26 setCx86(CX86_CCR3, ccr3 ^ 0x80); 24 setCx86(CX86_CCR3, ccr3 ^ 0x80);
27 getCx86(0xc0); /* dummy to change bus */ 25 getCx86(0xc0); /* dummy to change bus */
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 *dir0 = getCx86(CX86_DIR0); 42 *dir0 = getCx86(CX86_DIR0);
45 *dir1 = getCx86(CX86_DIR1); 43 *dir1 = getCx86(CX86_DIR1);
46 } 44 }
47 local_irq_restore(flags);
48} 45}
49 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{
49 unsigned long flags;
50
51 local_irq_save(flags);
52 __do_cyrix_devid(dir0, dir1);
53 local_irq_restore(flags);
54}
50/* 55/*
51 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 56 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52 * order to identify the Cyrix CPU model after we're out of setup.c 57 * order to identify the Cyrix CPU model after we're out of setup.c
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
161 local_irq_restore(flags); 166 local_irq_restore(flags);
162} 167}
163 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
170{
171 unsigned char dir0, dir0_msn, dir1 = 0;
172
173 __do_cyrix_devid(&dir0, &dir1);
174 dir0_msn = dir0 >> 4; /* identifies CPU "family" */
175
176 switch (dir0_msn) {
177 case 3: /* 6x86/6x86L */
178 /* Emulate MTRRs using Cyrix's ARRs. */
179 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
180 break;
181 case 5: /* 6x86MX/M II */
182 /* Emulate MTRRs using Cyrix's ARRs. */
183 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184 break;
185 }
186}
164 187
165static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
166{ 189{
@@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
416static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 439static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
417 .c_vendor = "Cyrix", 440 .c_vendor = "Cyrix",
418 .c_ident = { "CyrixInstead" }, 441 .c_ident = { "CyrixInstead" },
442 .c_early_init = early_init_cyrix,
419 .c_init = init_cyrix, 443 .c_init = init_cyrix,
420 .c_identify = cyrix_identify, 444 .c_identify = cyrix_identify,
421}; 445};
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
index e43ad4ad4cba..c9017799497c 100644
--- a/arch/x86/kernel/cpu/feature_names.c
+++ b/arch/x86/kernel/cpu/feature_names.c
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
39 NULL, NULL, NULL, NULL, 39 NULL, NULL, NULL, NULL,
40 "constant_tsc", "up", NULL, "arch_perfmon", 40 "constant_tsc", "up", NULL, "arch_perfmon",
41 "pebs", "bts", NULL, NULL, 41 "pebs", "bts", NULL, NULL,
42 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 42 "rep_good", NULL, NULL, NULL,
43 "nopl", NULL, NULL, NULL,
43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
44 45
45 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */