diff options
Diffstat (limited to 'arch/x86_64/kernel/setup.c')
-rw-r--r-- | arch/x86_64/kernel/setup.c | 89 |
1 files changed, 41 insertions, 48 deletions
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index da0bc3e7bdf5..750e01dcbdf4 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <asm/setup.h> | 60 | #include <asm/setup.h> |
61 | #include <asm/mach_apic.h> | 61 | #include <asm/mach_apic.h> |
62 | #include <asm/numa.h> | 62 | #include <asm/numa.h> |
63 | #include <asm/sections.h> | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Machine setup.. | 66 | * Machine setup.. |
@@ -103,7 +104,6 @@ struct edid_info edid_info; | |||
103 | struct e820map e820; | 104 | struct e820map e820; |
104 | 105 | ||
105 | extern int root_mountflags; | 106 | extern int root_mountflags; |
106 | extern char _text, _etext, _edata, _end; | ||
107 | 107 | ||
108 | char command_line[COMMAND_LINE_SIZE]; | 108 | char command_line[COMMAND_LINE_SIZE]; |
109 | 109 | ||
@@ -412,7 +412,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | |||
412 | { | 412 | { |
413 | unsigned long bootmap_size, bootmap; | 413 | unsigned long bootmap_size, bootmap; |
414 | 414 | ||
415 | memory_present(0, start_pfn, end_pfn); | ||
416 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | 415 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; |
417 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); | 416 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); |
418 | if (bootmap == -1L) | 417 | if (bootmap == -1L) |
@@ -571,6 +570,8 @@ void __init setup_arch(char **cmdline_p) | |||
571 | 570 | ||
572 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); | 571 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); |
573 | 572 | ||
573 | zap_low_mappings(0); | ||
574 | |||
574 | #ifdef CONFIG_ACPI | 575 | #ifdef CONFIG_ACPI |
575 | /* | 576 | /* |
576 | * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). | 577 | * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). |
@@ -657,8 +658,6 @@ void __init setup_arch(char **cmdline_p) | |||
657 | } | 658 | } |
658 | #endif | 659 | #endif |
659 | 660 | ||
660 | sparse_init(); | ||
661 | |||
662 | paging_init(); | 661 | paging_init(); |
663 | 662 | ||
664 | check_ioapic(); | 663 | check_ioapic(); |
@@ -793,7 +792,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
793 | #endif | 792 | #endif |
794 | 793 | ||
795 | bits = 0; | 794 | bits = 0; |
796 | while ((1 << bits) < c->x86_num_cores) | 795 | while ((1 << bits) < c->x86_max_cores) |
797 | bits++; | 796 | bits++; |
798 | 797 | ||
799 | /* Low order bits define the core id (index of core in socket) */ | 798 | /* Low order bits define the core id (index of core in socket) */ |
@@ -823,10 +822,10 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) | |||
823 | if (!node_online(node)) | 822 | if (!node_online(node)) |
824 | node = nearby_node(apicid); | 823 | node = nearby_node(apicid); |
825 | } | 824 | } |
826 | cpu_to_node[cpu] = node; | 825 | numa_set_node(cpu, node); |
827 | 826 | ||
828 | printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", | 827 | printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", |
829 | cpu, c->x86_num_cores, node, cpu_core_id[cpu]); | 828 | cpu, c->x86_max_cores, node, cpu_core_id[cpu]); |
830 | #endif | 829 | #endif |
831 | #endif | 830 | #endif |
832 | } | 831 | } |
@@ -875,9 +874,9 @@ static int __init init_amd(struct cpuinfo_x86 *c) | |||
875 | display_cacheinfo(c); | 874 | display_cacheinfo(c); |
876 | 875 | ||
877 | if (c->extended_cpuid_level >= 0x80000008) { | 876 | if (c->extended_cpuid_level >= 0x80000008) { |
878 | c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | 877 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; |
879 | if (c->x86_num_cores & (c->x86_num_cores - 1)) | 878 | if (c->x86_max_cores & (c->x86_max_cores - 1)) |
880 | c->x86_num_cores = 1; | 879 | c->x86_max_cores = 1; |
881 | 880 | ||
882 | amd_detect_cmp(c); | 881 | amd_detect_cmp(c); |
883 | } | 882 | } |
@@ -889,54 +888,44 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
889 | { | 888 | { |
890 | #ifdef CONFIG_SMP | 889 | #ifdef CONFIG_SMP |
891 | u32 eax, ebx, ecx, edx; | 890 | u32 eax, ebx, ecx, edx; |
892 | int index_msb, tmp; | 891 | int index_msb, core_bits; |
893 | int cpu = smp_processor_id(); | 892 | int cpu = smp_processor_id(); |
894 | 893 | ||
894 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
895 | |||
896 | c->apicid = phys_pkg_id(0); | ||
897 | |||
895 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 898 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
896 | return; | 899 | return; |
897 | 900 | ||
898 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
899 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 901 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
900 | 902 | ||
901 | if (smp_num_siblings == 1) { | 903 | if (smp_num_siblings == 1) { |
902 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 904 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
903 | } else if (smp_num_siblings > 1) { | 905 | } else if (smp_num_siblings > 1 ) { |
904 | index_msb = 31; | 906 | |
905 | /* | ||
906 | * At this point we only support two siblings per | ||
907 | * processor package. | ||
908 | */ | ||
909 | if (smp_num_siblings > NR_CPUS) { | 907 | if (smp_num_siblings > NR_CPUS) { |
910 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); | 908 | printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); |
911 | smp_num_siblings = 1; | 909 | smp_num_siblings = 1; |
912 | return; | 910 | return; |
913 | } | 911 | } |
914 | tmp = smp_num_siblings; | 912 | |
915 | while ((tmp & 0x80000000 ) == 0) { | 913 | index_msb = get_count_order(smp_num_siblings); |
916 | tmp <<=1 ; | ||
917 | index_msb--; | ||
918 | } | ||
919 | if (smp_num_siblings & (smp_num_siblings - 1)) | ||
920 | index_msb++; | ||
921 | phys_proc_id[cpu] = phys_pkg_id(index_msb); | 914 | phys_proc_id[cpu] = phys_pkg_id(index_msb); |
922 | 915 | ||
923 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 916 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
924 | phys_proc_id[cpu]); | 917 | phys_proc_id[cpu]); |
925 | 918 | ||
926 | smp_num_siblings = smp_num_siblings / c->x86_num_cores; | 919 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
927 | 920 | ||
928 | tmp = smp_num_siblings; | 921 | index_msb = get_count_order(smp_num_siblings) ; |
929 | index_msb = 31; | ||
930 | while ((tmp & 0x80000000) == 0) { | ||
931 | tmp <<=1 ; | ||
932 | index_msb--; | ||
933 | } | ||
934 | if (smp_num_siblings & (smp_num_siblings - 1)) | ||
935 | index_msb++; | ||
936 | 922 | ||
937 | cpu_core_id[cpu] = phys_pkg_id(index_msb); | 923 | core_bits = get_count_order(c->x86_max_cores); |
938 | 924 | ||
939 | if (c->x86_num_cores > 1) | 925 | cpu_core_id[cpu] = phys_pkg_id(index_msb) & |
926 | ((1 << core_bits) - 1); | ||
927 | |||
928 | if (c->x86_max_cores > 1) | ||
940 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 929 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
941 | cpu_core_id[cpu]); | 930 | cpu_core_id[cpu]); |
942 | } | 931 | } |
@@ -975,7 +964,7 @@ static void srat_detect_node(void) | |||
975 | node = apicid_to_node[hard_smp_processor_id()]; | 964 | node = apicid_to_node[hard_smp_processor_id()]; |
976 | if (node == NUMA_NO_NODE) | 965 | if (node == NUMA_NO_NODE) |
977 | node = 0; | 966 | node = 0; |
978 | cpu_to_node[cpu] = node; | 967 | numa_set_node(cpu, node); |
979 | 968 | ||
980 | if (acpi_numa > 0) | 969 | if (acpi_numa > 0) |
981 | printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); | 970 | printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); |
@@ -993,13 +982,18 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
993 | unsigned eax = cpuid_eax(0x80000008); | 982 | unsigned eax = cpuid_eax(0x80000008); |
994 | c->x86_virt_bits = (eax >> 8) & 0xff; | 983 | c->x86_virt_bits = (eax >> 8) & 0xff; |
995 | c->x86_phys_bits = eax & 0xff; | 984 | c->x86_phys_bits = eax & 0xff; |
985 | /* CPUID workaround for Intel 0F34 CPU */ | ||
986 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
987 | c->x86 == 0xF && c->x86_model == 0x3 && | ||
988 | c->x86_mask == 0x4) | ||
989 | c->x86_phys_bits = 36; | ||
996 | } | 990 | } |
997 | 991 | ||
998 | if (c->x86 == 15) | 992 | if (c->x86 == 15) |
999 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 993 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
1000 | if (c->x86 >= 15) | 994 | if (c->x86 >= 15) |
1001 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | 995 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); |
1002 | c->x86_num_cores = intel_num_cpu_cores(c); | 996 | c->x86_max_cores = intel_num_cpu_cores(c); |
1003 | 997 | ||
1004 | srat_detect_node(); | 998 | srat_detect_node(); |
1005 | } | 999 | } |
@@ -1037,7 +1031,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
1037 | c->x86_model_id[0] = '\0'; /* Unset */ | 1031 | c->x86_model_id[0] = '\0'; /* Unset */ |
1038 | c->x86_clflush_size = 64; | 1032 | c->x86_clflush_size = 64; |
1039 | c->x86_cache_alignment = c->x86_clflush_size; | 1033 | c->x86_cache_alignment = c->x86_clflush_size; |
1040 | c->x86_num_cores = 1; | 1034 | c->x86_max_cores = 1; |
1041 | c->extended_cpuid_level = 0; | 1035 | c->extended_cpuid_level = 0; |
1042 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 1036 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
1043 | 1037 | ||
@@ -1060,10 +1054,10 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
1060 | c->x86 = (tfms >> 8) & 0xf; | 1054 | c->x86 = (tfms >> 8) & 0xf; |
1061 | c->x86_model = (tfms >> 4) & 0xf; | 1055 | c->x86_model = (tfms >> 4) & 0xf; |
1062 | c->x86_mask = tfms & 0xf; | 1056 | c->x86_mask = tfms & 0xf; |
1063 | if (c->x86 == 0xf) { | 1057 | if (c->x86 == 0xf) |
1064 | c->x86 += (tfms >> 20) & 0xff; | 1058 | c->x86 += (tfms >> 20) & 0xff; |
1059 | if (c->x86 >= 0x6) | ||
1065 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 1060 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
1066 | } | ||
1067 | if (c->x86_capability[0] & (1<<19)) | 1061 | if (c->x86_capability[0] & (1<<19)) |
1068 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 1062 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
1069 | } else { | 1063 | } else { |
@@ -1271,13 +1265,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1271 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 1265 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
1272 | 1266 | ||
1273 | #ifdef CONFIG_SMP | 1267 | #ifdef CONFIG_SMP |
1274 | if (smp_num_siblings * c->x86_num_cores > 1) { | 1268 | if (smp_num_siblings * c->x86_max_cores > 1) { |
1275 | int cpu = c - cpu_data; | 1269 | int cpu = c - cpu_data; |
1276 | seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); | 1270 | seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); |
1277 | seq_printf(m, "siblings\t: %d\n", | 1271 | seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); |
1278 | c->x86_num_cores * smp_num_siblings); | ||
1279 | seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); | 1272 | seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); |
1280 | seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); | 1273 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
1281 | } | 1274 | } |
1282 | #endif | 1275 | #endif |
1283 | 1276 | ||