aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/smpboot.c')
-rw-r--r--arch/x86_64/kernel/smpboot.c111
1 files changed, 91 insertions, 20 deletions
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index c4e59bbdc187..683c33f7b967 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -64,6 +64,7 @@
64int smp_num_siblings = 1; 64int smp_num_siblings = 1;
65/* Package ID of each logical CPU */ 65/* Package ID of each logical CPU */
66u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 66u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
67/* core ID of each logical CPU */
67u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 68u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68 69
69/* Bitmask of currently online CPUs */ 70/* Bitmask of currently online CPUs */
@@ -87,7 +88,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
87/* Set when the idlers are all forked */ 88/* Set when the idlers are all forked */
88int smp_threads_ready; 89int smp_threads_ready;
89 90
91/* representing HT siblings of each logical CPU */
90cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 92cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
93
94/* representing HT and core siblings of each logical CPU */
91cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 95cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
92EXPORT_SYMBOL(cpu_core_map); 96EXPORT_SYMBOL(cpu_core_map);
93 97
@@ -434,30 +438,59 @@ void __cpuinit smp_callin(void)
434 cpu_set(cpuid, cpu_callin_map); 438 cpu_set(cpuid, cpu_callin_map);
435} 439}
436 440
441/* representing cpus for which sibling maps can be computed */
442static cpumask_t cpu_sibling_setup_map;
443
437static inline void set_cpu_sibling_map(int cpu) 444static inline void set_cpu_sibling_map(int cpu)
438{ 445{
439 int i; 446 int i;
447 struct cpuinfo_x86 *c = cpu_data;
448
449 cpu_set(cpu, cpu_sibling_setup_map);
440 450
441 if (smp_num_siblings > 1) { 451 if (smp_num_siblings > 1) {
442 for_each_cpu(i) { 452 for_each_cpu_mask(i, cpu_sibling_setup_map) {
443 if (cpu_core_id[cpu] == cpu_core_id[i]) { 453 if (phys_proc_id[cpu] == phys_proc_id[i] &&
454 cpu_core_id[cpu] == cpu_core_id[i]) {
444 cpu_set(i, cpu_sibling_map[cpu]); 455 cpu_set(i, cpu_sibling_map[cpu]);
445 cpu_set(cpu, cpu_sibling_map[i]); 456 cpu_set(cpu, cpu_sibling_map[i]);
457 cpu_set(i, cpu_core_map[cpu]);
458 cpu_set(cpu, cpu_core_map[i]);
446 } 459 }
447 } 460 }
448 } else { 461 } else {
449 cpu_set(cpu, cpu_sibling_map[cpu]); 462 cpu_set(cpu, cpu_sibling_map[cpu]);
450 } 463 }
451 464
452 if (current_cpu_data.x86_num_cores > 1) { 465 if (current_cpu_data.x86_max_cores == 1) {
453 for_each_cpu(i) {
454 if (phys_proc_id[cpu] == phys_proc_id[i]) {
455 cpu_set(i, cpu_core_map[cpu]);
456 cpu_set(cpu, cpu_core_map[i]);
457 }
458 }
459 } else {
460 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 466 cpu_core_map[cpu] = cpu_sibling_map[cpu];
467 c[cpu].booted_cores = 1;
468 return;
469 }
470
471 for_each_cpu_mask(i, cpu_sibling_setup_map) {
472 if (phys_proc_id[cpu] == phys_proc_id[i]) {
473 cpu_set(i, cpu_core_map[cpu]);
474 cpu_set(cpu, cpu_core_map[i]);
475 /*
476 * Does this new cpu bringup a new core?
477 */
478 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
479 /*
480 * for each core in package, increment
481 * the booted_cores for this new cpu
482 */
483 if (first_cpu(cpu_sibling_map[i]) == i)
484 c[cpu].booted_cores++;
485 /*
486 * increment the core count for all
487 * the other cpus in this package
488 */
489 if (i != cpu)
490 c[i].booted_cores++;
491 } else if (i != cpu && !c[cpu].booted_cores)
492 c[cpu].booted_cores = c[i].booted_cores;
493 }
461 } 494 }
462} 495}
463 496
@@ -879,6 +912,9 @@ static __init void disable_smp(void)
879} 912}
880 913
881#ifdef CONFIG_HOTPLUG_CPU 914#ifdef CONFIG_HOTPLUG_CPU
915
916int additional_cpus __initdata = -1;
917
882/* 918/*
883 * cpu_possible_map should be static, it cannot change as cpu's 919 * cpu_possible_map should be static, it cannot change as cpu's
884 * are onlined, or offlined. The reason is per-cpu data-structures 920 * are onlined, or offlined. The reason is per-cpu data-structures
@@ -887,14 +923,38 @@ static __init void disable_smp(void)
887 * cpu_present_map on the other hand can change dynamically. 923 * cpu_present_map on the other hand can change dynamically.
888 * In case when cpu_hotplug is not compiled, then we resort to current 924 * In case when cpu_hotplug is not compiled, then we resort to current
889 * behaviour, which is cpu_possible == cpu_present. 925 * behaviour, which is cpu_possible == cpu_present.
890 * If cpu-hotplug is supported, then we need to preallocate for all
891 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
892 * - Ashok Raj 926 * - Ashok Raj
927 *
928 * Three ways to find out the number of additional hotplug CPUs:
929 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
930 * - otherwise use half of the available CPUs or 2, whatever is more.
931 * - The user can overwrite it with additional_cpus=NUM
932 * We do this because additional CPUs waste a lot of memory.
933 * -AK
893 */ 934 */
894__init void prefill_possible_map(void) 935__init void prefill_possible_map(void)
895{ 936{
896 int i; 937 int i;
897 for (i = 0; i < NR_CPUS; i++) 938 int possible;
939
940 if (additional_cpus == -1) {
941 if (disabled_cpus > 0) {
942 additional_cpus = disabled_cpus;
943 } else {
944 additional_cpus = num_processors / 2;
945 if (additional_cpus == 0)
946 additional_cpus = 2;
947 }
948 }
949 possible = num_processors + additional_cpus;
950 if (possible > NR_CPUS)
951 possible = NR_CPUS;
952
953 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
954 possible,
955 max_t(int, possible - num_processors, 0));
956
957 for (i = 0; i < possible; i++)
898 cpu_set(i, cpu_possible_map); 958 cpu_set(i, cpu_possible_map);
899} 959}
900#endif 960#endif
@@ -965,6 +1025,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
965 nmi_watchdog_default(); 1025 nmi_watchdog_default();
966 current_cpu_data = boot_cpu_data; 1026 current_cpu_data = boot_cpu_data;
967 current_thread_info()->cpu = 0; /* needed? */ 1027 current_thread_info()->cpu = 0; /* needed? */
1028 set_cpu_sibling_map(0);
968 1029
969 if (smp_sanity_check(max_cpus) < 0) { 1030 if (smp_sanity_check(max_cpus) < 0) {
970 printk(KERN_INFO "SMP disabled\n"); 1031 printk(KERN_INFO "SMP disabled\n");
@@ -1008,8 +1069,6 @@ void __init smp_prepare_boot_cpu(void)
1008 int me = smp_processor_id(); 1069 int me = smp_processor_id();
1009 cpu_set(me, cpu_online_map); 1070 cpu_set(me, cpu_online_map);
1010 cpu_set(me, cpu_callout_map); 1071 cpu_set(me, cpu_callout_map);
1011 cpu_set(0, cpu_sibling_map[0]);
1012 cpu_set(0, cpu_core_map[0]);
1013 per_cpu(cpu_state, me) = CPU_ONLINE; 1072 per_cpu(cpu_state, me) = CPU_ONLINE;
1014} 1073}
1015 1074
@@ -1062,9 +1121,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
1062 */ 1121 */
1063void __init smp_cpus_done(unsigned int max_cpus) 1122void __init smp_cpus_done(unsigned int max_cpus)
1064{ 1123{
1065#ifndef CONFIG_HOTPLUG_CPU
1066 zap_low_mappings();
1067#endif
1068 smp_cleanup_boot(); 1124 smp_cleanup_boot();
1069 1125
1070#ifdef CONFIG_X86_IO_APIC 1126#ifdef CONFIG_X86_IO_APIC
@@ -1081,15 +1137,24 @@ void __init smp_cpus_done(unsigned int max_cpus)
1081static void remove_siblinginfo(int cpu) 1137static void remove_siblinginfo(int cpu)
1082{ 1138{
1083 int sibling; 1139 int sibling;
1140 struct cpuinfo_x86 *c = cpu_data;
1084 1141
1142 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1143 cpu_clear(cpu, cpu_core_map[sibling]);
1144 /*
1145 * last thread sibling in this cpu core going down
1146 */
1147 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1148 c[sibling].booted_cores--;
1149 }
1150
1085 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1151 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1086 cpu_clear(cpu, cpu_sibling_map[sibling]); 1152 cpu_clear(cpu, cpu_sibling_map[sibling]);
1087 for_each_cpu_mask(sibling, cpu_core_map[cpu])
1088 cpu_clear(cpu, cpu_core_map[sibling]);
1089 cpus_clear(cpu_sibling_map[cpu]); 1153 cpus_clear(cpu_sibling_map[cpu]);
1090 cpus_clear(cpu_core_map[cpu]); 1154 cpus_clear(cpu_core_map[cpu]);
1091 phys_proc_id[cpu] = BAD_APICID; 1155 phys_proc_id[cpu] = BAD_APICID;
1092 cpu_core_id[cpu] = BAD_APICID; 1156 cpu_core_id[cpu] = BAD_APICID;
1157 cpu_clear(cpu, cpu_sibling_setup_map);
1093} 1158}
1094 1159
1095void remove_cpu_from_maps(void) 1160void remove_cpu_from_maps(void)
@@ -1153,6 +1218,12 @@ void __cpu_die(unsigned int cpu)
1153 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1218 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1154} 1219}
1155 1220
1221static __init int setup_additional_cpus(char *s)
1222{
1223 return get_option(&s, &additional_cpus);
1224}
1225__setup("additional_cpus=", setup_additional_cpus);
1226
1156#else /* ... !CONFIG_HOTPLUG_CPU */ 1227#else /* ... !CONFIG_HOTPLUG_CPU */
1157 1228
1158int __cpu_disable(void) 1229int __cpu_disable(void)