aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_64.c')
-rw-r--r--arch/x86/kernel/setup_64.c452
1 files changed, 67 insertions, 385 deletions
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 6dff1286ad8a..545440e471b2 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -71,6 +71,7 @@
71#include <asm/topology.h> 71#include <asm/topology.h>
72#include <asm/trampoline.h> 72#include <asm/trampoline.h>
73#include <asm/pat.h> 73#include <asm/pat.h>
74#include <asm/mmconfig.h>
74 75
75#include <mach_apic.h> 76#include <mach_apic.h>
76#ifdef CONFIG_PARAVIRT 77#ifdef CONFIG_PARAVIRT
@@ -79,6 +80,8 @@
79#define ARCH_SETUP 80#define ARCH_SETUP
80#endif 81#endif
81 82
83#include "cpu/cpu.h"
84
82/* 85/*
83 * Machine setup.. 86 * Machine setup..
84 */ 87 */
@@ -95,8 +98,6 @@ int bootloader_type;
95 98
96unsigned long saved_video_mode; 99unsigned long saved_video_mode;
97 100
98int force_mwait __cpuinitdata;
99
100/* 101/*
101 * Early DMI memory 102 * Early DMI memory
102 */ 103 */
@@ -118,7 +119,7 @@ EXPORT_SYMBOL_GPL(edid_info);
118 119
119extern int root_mountflags; 120extern int root_mountflags;
120 121
121char __initdata command_line[COMMAND_LINE_SIZE]; 122static char __initdata command_line[COMMAND_LINE_SIZE];
122 123
123static struct resource standard_io_resources[] = { 124static struct resource standard_io_resources[] = {
124 { .name = "dma1", .start = 0x00, .end = 0x1f, 125 { .name = "dma1", .start = 0x00, .end = 0x1f,
@@ -164,6 +165,7 @@ static struct resource bss_resource = {
164 .flags = IORESOURCE_RAM, 165 .flags = IORESOURCE_RAM,
165}; 166};
166 167
168static void __init early_cpu_init(void);
167static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 169static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
168 170
169#ifdef CONFIG_PROC_VMCORE 171#ifdef CONFIG_PROC_VMCORE
@@ -293,18 +295,6 @@ static void __init parse_setup_data(void)
293 } 295 }
294} 296}
295 297
296#ifdef CONFIG_PCI_MMCONFIG
297extern void __cpuinit fam10h_check_enable_mmcfg(void);
298extern void __init check_enable_amd_mmconf_dmi(void);
299#else
300void __cpuinit fam10h_check_enable_mmcfg(void)
301{
302}
303void __init check_enable_amd_mmconf_dmi(void)
304{
305}
306#endif
307
308/* 298/*
309 * setup_arch - architecture-specific boot-time initializations 299 * setup_arch - architecture-specific boot-time initializations
310 * 300 *
@@ -352,6 +342,7 @@ void __init setup_arch(char **cmdline_p)
352 bss_resource.start = virt_to_phys(&__bss_start); 342 bss_resource.start = virt_to_phys(&__bss_start);
353 bss_resource.end = virt_to_phys(&__bss_stop)-1; 343 bss_resource.end = virt_to_phys(&__bss_stop)-1;
354 344
345 early_cpu_init();
355 early_identify_cpu(&boot_cpu_data); 346 early_identify_cpu(&boot_cpu_data);
356 347
357 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 348 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -537,7 +528,20 @@ void __init setup_arch(char **cmdline_p)
537 check_enable_amd_mmconf_dmi(); 528 check_enable_amd_mmconf_dmi();
538} 529}
539 530
540static int __cpuinit get_model_name(struct cpuinfo_x86 *c) 531struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
532
533static void __cpuinit default_init(struct cpuinfo_x86 *c)
534{
535 display_cacheinfo(c);
536}
537
538static struct cpu_dev __cpuinitdata default_cpu = {
539 .c_init = default_init,
540 .c_vendor = "Unknown",
541};
542static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
543
544int __cpuinit get_model_name(struct cpuinfo_x86 *c)
541{ 545{
542 unsigned int *v; 546 unsigned int *v;
543 547
@@ -553,7 +557,7 @@ static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
553} 557}
554 558
555 559
556static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 560void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
557{ 561{
558 unsigned int n, dummy, eax, ebx, ecx, edx; 562 unsigned int n, dummy, eax, ebx, ecx, edx;
559 563
@@ -585,228 +589,6 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
585 } 589 }
586} 590}
587 591
588#ifdef CONFIG_NUMA
589static int __cpuinit nearby_node(int apicid)
590{
591 int i, node;
592
593 for (i = apicid - 1; i >= 0; i--) {
594 node = apicid_to_node[i];
595 if (node != NUMA_NO_NODE && node_online(node))
596 return node;
597 }
598 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
599 node = apicid_to_node[i];
600 if (node != NUMA_NO_NODE && node_online(node))
601 return node;
602 }
603 return first_node(node_online_map); /* Shouldn't happen */
604}
605#endif
606
607/*
608 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
609 * Assumes number of cores is a power of two.
610 */
611static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
612{
613#ifdef CONFIG_SMP
614 unsigned bits;
615#ifdef CONFIG_NUMA
616 int cpu = smp_processor_id();
617 int node = 0;
618 unsigned apicid = hard_smp_processor_id();
619#endif
620 bits = c->x86_coreid_bits;
621
622 /* Low order bits define the core id (index of core in socket) */
623 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
624 /* Convert the initial APIC ID into the socket ID */
625 c->phys_proc_id = c->initial_apicid >> bits;
626
627#ifdef CONFIG_NUMA
628 node = c->phys_proc_id;
629 if (apicid_to_node[apicid] != NUMA_NO_NODE)
630 node = apicid_to_node[apicid];
631 if (!node_online(node)) {
632 /* Two possibilities here:
633 - The CPU is missing memory and no node was created.
634 In that case try picking one from a nearby CPU
635 - The APIC IDs differ from the HyperTransport node IDs
636 which the K8 northbridge parsing fills in.
637 Assume they are all increased by a constant offset,
638 but in the same order as the HT nodeids.
639 If that doesn't result in a usable node fall back to the
640 path for the previous case. */
641
642 int ht_nodeid = c->initial_apicid;
643
644 if (ht_nodeid >= 0 &&
645 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
646 node = apicid_to_node[ht_nodeid];
647 /* Pick a nearby node */
648 if (!node_online(node))
649 node = nearby_node(apicid);
650 }
651 numa_set_node(cpu, node);
652
653 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
654#endif
655#endif
656}
657
658static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
659{
660#ifdef CONFIG_SMP
661 unsigned bits, ecx;
662
663 /* Multi core CPU? */
664 if (c->extended_cpuid_level < 0x80000008)
665 return;
666
667 ecx = cpuid_ecx(0x80000008);
668
669 c->x86_max_cores = (ecx & 0xff) + 1;
670
671 /* CPU telling us the core id bits shift? */
672 bits = (ecx >> 12) & 0xF;
673
674 /* Otherwise recompute */
675 if (bits == 0) {
676 while ((1 << bits) < c->x86_max_cores)
677 bits++;
678 }
679
680 c->x86_coreid_bits = bits;
681
682#endif
683}
684
685#define ENABLE_C1E_MASK 0x18000000
686#define CPUID_PROCESSOR_SIGNATURE 1
687#define CPUID_XFAM 0x0ff00000
688#define CPUID_XFAM_K8 0x00000000
689#define CPUID_XFAM_10H 0x00100000
690#define CPUID_XFAM_11H 0x00200000
691#define CPUID_XMOD 0x000f0000
692#define CPUID_XMOD_REV_F 0x00040000
693
694/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
695static __cpuinit int amd_apic_timer_broken(void)
696{
697 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
698
699 switch (eax & CPUID_XFAM) {
700 case CPUID_XFAM_K8:
701 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
702 break;
703 case CPUID_XFAM_10H:
704 case CPUID_XFAM_11H:
705 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
706 if (lo & ENABLE_C1E_MASK)
707 return 1;
708 break;
709 default:
710 /* err on the side of caution */
711 return 1;
712 }
713 return 0;
714}
715
716static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
717{
718 early_init_amd_mc(c);
719
720 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
721 if (c->x86_power & (1<<8))
722 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
723}
724
725static void __cpuinit init_amd(struct cpuinfo_x86 *c)
726{
727 unsigned level;
728
729#ifdef CONFIG_SMP
730 unsigned long value;
731
732 /*
733 * Disable TLB flush filter by setting HWCR.FFDIS on K8
734 * bit 6 of msr C001_0015
735 *
736 * Errata 63 for SH-B3 steppings
737 * Errata 122 for all steppings (F+ have it disabled by default)
738 */
739 if (c->x86 == 15) {
740 rdmsrl(MSR_K8_HWCR, value);
741 value |= 1 << 6;
742 wrmsrl(MSR_K8_HWCR, value);
743 }
744#endif
745
746 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
747 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
748 clear_cpu_cap(c, 0*32+31);
749
750 /* On C+ stepping K8 rep microcode works well for copy/memset */
751 level = cpuid_eax(1);
752 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
753 level >= 0x0f58))
754 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
755 if (c->x86 == 0x10 || c->x86 == 0x11)
756 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
757
758 /* Enable workaround for FXSAVE leak */
759 if (c->x86 >= 6)
760 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
761
762 level = get_model_name(c);
763 if (!level) {
764 switch (c->x86) {
765 case 15:
766 /* Should distinguish Models here, but this is only
767 a fallback anyways. */
768 strcpy(c->x86_model_id, "Hammer");
769 break;
770 }
771 }
772 display_cacheinfo(c);
773
774 /* Multi core CPU? */
775 if (c->extended_cpuid_level >= 0x80000008)
776 amd_detect_cmp(c);
777
778 if (c->extended_cpuid_level >= 0x80000006 &&
779 (cpuid_edx(0x80000006) & 0xf000))
780 num_cache_leaves = 4;
781 else
782 num_cache_leaves = 3;
783
784 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
785 set_cpu_cap(c, X86_FEATURE_K8);
786
787 /* MFENCE stops RDTSC speculation */
788 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
789
790 if (c->x86 == 0x10)
791 fam10h_check_enable_mmcfg();
792
793 if (amd_apic_timer_broken())
794 disable_apic_timer = 1;
795
796 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
797 unsigned long long tseg;
798
799 /*
800 * Split up direct mapping around the TSEG SMM area.
801 * Don't do it for gbpages because there seems very little
802 * benefit in doing so.
803 */
804 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
805 (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
806 set_memory_4k((unsigned long)__va(tseg), 1);
807 }
808}
809
810void __cpuinit detect_ht(struct cpuinfo_x86 *c) 592void __cpuinit detect_ht(struct cpuinfo_x86 *c)
811{ 593{
812#ifdef CONFIG_SMP 594#ifdef CONFIG_SMP
@@ -857,135 +639,59 @@ out:
857#endif 639#endif
858} 640}
859 641
860/* 642static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
861 * find out the number of processor cores on the die
862 */
863static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
864{
865 unsigned int eax, t;
866
867 if (c->cpuid_level < 4)
868 return 1;
869
870 cpuid_count(4, 0, &eax, &t, &t, &t);
871
872 if (eax & 0x1f)
873 return ((eax >> 26) + 1);
874 else
875 return 1;
876}
877
878static void __cpuinit srat_detect_node(void)
879{
880#ifdef CONFIG_NUMA
881 unsigned node;
882 int cpu = smp_processor_id();
883 int apicid = hard_smp_processor_id();
884
885 /* Don't do the funky fallback heuristics the AMD version employs
886 for now. */
887 node = apicid_to_node[apicid];
888 if (node == NUMA_NO_NODE || !node_online(node))
889 node = first_node(node_online_map);
890 numa_set_node(cpu, node);
891
892 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
893#endif
894}
895
896static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
897{
898 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
899 (c->x86 == 0x6 && c->x86_model >= 0x0e))
900 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
901}
902
903static void __cpuinit init_intel(struct cpuinfo_x86 *c)
904{ 643{
905 /* Cache sizes */ 644 char *v = c->x86_vendor_id;
906 unsigned n; 645 int i;
907 646 static int printed;
908 init_intel_cacheinfo(c); 647
909 if (c->cpuid_level > 9) { 648 for (i = 0; i < X86_VENDOR_NUM; i++) {
910 unsigned eax = cpuid_eax(10); 649 if (cpu_devs[i]) {
911 /* Check for version and the number of counters */ 650 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
912 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 651 (cpu_devs[i]->c_ident[1] &&
913 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 652 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
653 c->x86_vendor = i;
654 this_cpu = cpu_devs[i];
655 return;
656 }
657 }
914 } 658 }
915 659 if (!printed) {
916 if (cpu_has_ds) { 660 printed++;
917 unsigned int l1, l2; 661 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
918 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 662 printk(KERN_ERR "CPU: Your system may be unstable.\n");
919 if (!(l1 & (1<<11)))
920 set_cpu_cap(c, X86_FEATURE_BTS);
921 if (!(l1 & (1<<12)))
922 set_cpu_cap(c, X86_FEATURE_PEBS);
923 } 663 }
924 664 c->x86_vendor = X86_VENDOR_UNKNOWN;
925
926 if (cpu_has_bts)
927 ds_init_intel(c);
928
929 n = c->extended_cpuid_level;
930 if (n >= 0x80000008) {
931 unsigned eax = cpuid_eax(0x80000008);
932 c->x86_virt_bits = (eax >> 8) & 0xff;
933 c->x86_phys_bits = eax & 0xff;
934 /* CPUID workaround for Intel 0F34 CPU */
935 if (c->x86_vendor == X86_VENDOR_INTEL &&
936 c->x86 == 0xF && c->x86_model == 0x3 &&
937 c->x86_mask == 0x4)
938 c->x86_phys_bits = 36;
939 }
940
941 if (c->x86 == 15)
942 c->x86_cache_alignment = c->x86_clflush_size * 2;
943 if (c->x86 == 6)
944 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
945 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
946 c->x86_max_cores = intel_num_cpu_cores(c);
947
948 srat_detect_node();
949}
950
951static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
952{
953 if (c->x86 == 0x6 && c->x86_model >= 0xf)
954 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
955} 665}
956 666
957static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 667static void __init early_cpu_support_print(void)
958{ 668{
959 /* Cache sizes */ 669 int i,j;
960 unsigned n; 670 struct cpu_dev *cpu_devx;
961 671
962 n = c->extended_cpuid_level; 672 printk("KERNEL supported cpus:\n");
963 if (n >= 0x80000008) { 673 for (i = 0; i < X86_VENDOR_NUM; i++) {
964 unsigned eax = cpuid_eax(0x80000008); 674 cpu_devx = cpu_devs[i];
965 c->x86_virt_bits = (eax >> 8) & 0xff; 675 if (!cpu_devx)
966 c->x86_phys_bits = eax & 0xff; 676 continue;
967 } 677 for (j = 0; j < 2; j++) {
968 678 if (!cpu_devx->c_ident[j])
969 if (c->x86 == 0x6 && c->x86_model >= 0xf) { 679 continue;
970 c->x86_cache_alignment = c->x86_clflush_size * 2; 680 printk(" %s %s\n", cpu_devx->c_vendor,
971 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 681 cpu_devx->c_ident[j]);
972 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 682 }
973 } 683 }
974 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
975} 684}
976 685
977static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 686static void __init early_cpu_init(void)
978{ 687{
979 char *v = c->x86_vendor_id; 688 struct cpu_vendor_dev *cvdev;
980 689
981 if (!strcmp(v, "AuthenticAMD")) 690 for (cvdev = __x86cpuvendor_start ;
982 c->x86_vendor = X86_VENDOR_AMD; 691 cvdev < __x86cpuvendor_end ;
983 else if (!strcmp(v, "GenuineIntel")) 692 cvdev++)
984 c->x86_vendor = X86_VENDOR_INTEL; 693 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
985 else if (!strcmp(v, "CentaurHauls")) 694 early_cpu_support_print();
986 c->x86_vendor = X86_VENDOR_CENTAUR;
987 else
988 c->x86_vendor = X86_VENDOR_UNKNOWN;
989} 695}
990 696
991/* Do some early cpuid on the boot CPU to get some parameter that are 697/* Do some early cpuid on the boot CPU to get some parameter that are
@@ -1066,17 +772,9 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1066 if (c->extended_cpuid_level >= 0x80000007) 772 if (c->extended_cpuid_level >= 0x80000007)
1067 c->x86_power = cpuid_edx(0x80000007); 773 c->x86_power = cpuid_edx(0x80000007);
1068 774
1069 switch (c->x86_vendor) { 775 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
1070 case X86_VENDOR_AMD: 776 cpu_devs[c->x86_vendor]->c_early_init)
1071 early_init_amd(c); 777 cpu_devs[c->x86_vendor]->c_early_init(c);
1072 break;
1073 case X86_VENDOR_INTEL:
1074 early_init_intel(c);
1075 break;
1076 case X86_VENDOR_CENTAUR:
1077 early_init_centaur(c);
1078 break;
1079 }
1080 778
1081 validate_pat_support(c); 779 validate_pat_support(c);
1082} 780}
@@ -1104,24 +802,8 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1104 * At the end of this section, c->x86_capability better 802 * At the end of this section, c->x86_capability better
1105 * indicate the features this CPU genuinely supports! 803 * indicate the features this CPU genuinely supports!
1106 */ 804 */
1107 switch (c->x86_vendor) { 805 if (this_cpu->c_init)
1108 case X86_VENDOR_AMD: 806 this_cpu->c_init(c);
1109 init_amd(c);
1110 break;
1111
1112 case X86_VENDOR_INTEL:
1113 init_intel(c);
1114 break;
1115
1116 case X86_VENDOR_CENTAUR:
1117 init_centaur(c);
1118 break;
1119
1120 case X86_VENDOR_UNKNOWN:
1121 default:
1122 display_cacheinfo(c);
1123 break;
1124 }
1125 807
1126 detect_ht(c); 808 detect_ht(c);
1127 809