aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common_64.c')
-rw-r--r--arch/x86/kernel/cpu/common_64.c125
1 files changed, 109 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index dd6e3f15017e..305b465889b0 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -18,6 +18,7 @@
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/mce.h> 19#include <asm/mce.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
22#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
23#include <asm/mpspec.h> 24#include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
215 } 216 }
216} 217}
217 218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
218static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
219 253
220void __init early_cpu_init(void) 254void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
313 c->x86_phys_bits = eax & 0xff; 347 c->x86_phys_bits = eax & 0xff;
314 } 348 }
315 349
350 detect_nopl(c);
351
316 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
317 cpu_devs[c->x86_vendor]->c_early_init) 353 cpu_devs[c->x86_vendor]->c_early_init)
318 cpu_devs[c->x86_vendor]->c_early_init(c); 354 cpu_devs[c->x86_vendor]->c_early_init(c);
@@ -394,6 +430,49 @@ static __init int setup_noclflush(char *arg)
394} 430}
395__setup("noclflush", setup_noclflush); 431__setup("noclflush", setup_noclflush);
396 432
433struct msr_range {
434 unsigned min;
435 unsigned max;
436};
437
438static struct msr_range msr_range_array[] __cpuinitdata = {
439 { 0x00000000, 0x00000418},
440 { 0xc0000000, 0xc000040b},
441 { 0xc0010000, 0xc0010142},
442 { 0xc0011000, 0xc001103b},
443};
444
445static void __cpuinit print_cpu_msr(void)
446{
447 unsigned index;
448 u64 val;
449 int i;
450 unsigned index_min, index_max;
451
452 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
453 index_min = msr_range_array[i].min;
454 index_max = msr_range_array[i].max;
455 for (index = index_min; index < index_max; index++) {
456 if (rdmsrl_amd_safe(index, &val))
457 continue;
458 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
459 }
460 }
461}
462
463static int show_msr __cpuinitdata;
464static __init int setup_show_msr(char *arg)
465{
466 int num;
467
468 get_option(&arg, &num);
469
470 if (num > 0)
471 show_msr = num;
472 return 1;
473}
474__setup("show_msr=", setup_show_msr);
475
397void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 476void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
398{ 477{
399 if (c->x86_model_id[0]) 478 if (c->x86_model_id[0])
@@ -403,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
403 printk(KERN_CONT " stepping %02x\n", c->x86_mask); 482 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
404 else 483 else
405 printk(KERN_CONT "\n"); 484 printk(KERN_CONT "\n");
485
486#ifdef CONFIG_SMP
487 if (c->cpu_index < show_msr)
488 print_cpu_msr();
489#else
490 if (show_msr)
491 print_cpu_msr();
492#endif
406} 493}
407 494
408static __init int setup_disablecpuid(char *arg) 495static __init int setup_disablecpuid(char *arg)
@@ -493,17 +580,20 @@ void pda_init(int cpu)
493 /* others are initialized in smpboot.c */ 580 /* others are initialized in smpboot.c */
494 pda->pcurrent = &init_task; 581 pda->pcurrent = &init_task;
495 pda->irqstackptr = boot_cpu_stack; 582 pda->irqstackptr = boot_cpu_stack;
583 pda->irqstackptr += IRQSTACKSIZE - 64;
496 } else { 584 } else {
497 pda->irqstackptr = (char *) 585 if (!pda->irqstackptr) {
498 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 586 pda->irqstackptr = (char *)
499 if (!pda->irqstackptr) 587 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
500 panic("cannot allocate irqstack for cpu %d", cpu); 588 if (!pda->irqstackptr)
589 panic("cannot allocate irqstack for cpu %d",
590 cpu);
591 pda->irqstackptr += IRQSTACKSIZE - 64;
592 }
501 593
502 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 594 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
503 pda->nodenumber = cpu_to_node(cpu); 595 pda->nodenumber = cpu_to_node(cpu);
504 } 596 }
505
506 pda->irqstackptr += IRQSTACKSIZE-64;
507} 597}
508 598
509char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 599char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
@@ -601,19 +691,22 @@ void __cpuinit cpu_init(void)
601 /* 691 /*
602 * set up and load the per-CPU TSS 692 * set up and load the per-CPU TSS
603 */ 693 */
604 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 694 if (!orig_ist->ist[0]) {
605 static const unsigned int order[N_EXCEPTION_STACKS] = { 695 static const unsigned int order[N_EXCEPTION_STACKS] = {
606 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 696 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
607 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 697 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
608 }; 698 };
609 if (cpu) { 699 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
610 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 700 if (cpu) {
611 if (!estacks) 701 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
612 panic("Cannot allocate exception stack %ld %d\n", 702 if (!estacks)
613 v, cpu); 703 panic("Cannot allocate exception "
704 "stack %ld %d\n", v, cpu);
705 }
706 estacks += PAGE_SIZE << order[v];
707 orig_ist->ist[v] = t->x86_tss.ist[v] =
708 (unsigned long)estacks;
614 } 709 }
615 estacks += PAGE_SIZE << order[v];
616 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
617 } 710 }
618 711
619 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 712 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);