aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 17:56:42 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 17:56:42 -0500
commit1de8cd3cb9f61e854e743c7210df43db517d4832 (patch)
tree2b69c5ba5e4094037fa04d0fcb6c4537c222cde8 /arch/x86/kernel/smpboot.c
parent1eb1b3b65dc3e3ffcc6a60e115c085c0c11c1077 (diff)
parent3d14bdad40315b54470cb7812293d14c8af2bf7d (diff)
Merge branch 'linus' into x86/cleanups
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r--arch/x86/kernel/smpboot.c128
1 files changed, 65 insertions, 63 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f8c885bed18c..6c2b8444b830 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -101,9 +101,6 @@ EXPORT_SYMBOL(smp_num_siblings);
101/* Last level cache ID of each logical CPU */ 101/* Last level cache ID of each logical CPU */
102DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 102DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
103 103
104cpumask_t cpu_callin_map;
105cpumask_t cpu_callout_map;
106
107/* representing HT siblings of each logical CPU */ 104/* representing HT siblings of each logical CPU */
108DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 105DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
109EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 106EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -119,9 +116,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
119static atomic_t init_deasserted; 116static atomic_t init_deasserted;
120 117
121 118
122/* representing cpus for which sibling maps can be computed */
123static cpumask_t cpu_sibling_setup_map;
124
125/* Set if we find a B stepping CPU */ 119/* Set if we find a B stepping CPU */
126static int __cpuinitdata smp_b_stepping; 120static int __cpuinitdata smp_b_stepping;
127 121
@@ -139,7 +133,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
139static void map_cpu_to_node(int cpu, int node) 133static void map_cpu_to_node(int cpu, int node)
140{ 134{
141 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 135 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
142 cpu_set(cpu, node_to_cpumask_map[node]); 136 cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
143 cpu_to_node_map[cpu] = node; 137 cpu_to_node_map[cpu] = node;
144} 138}
145 139
@@ -150,7 +144,7 @@ static void unmap_cpu_to_node(int cpu)
150 144
151 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 145 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
152 for (node = 0; node < MAX_NUMNODES; node++) 146 for (node = 0; node < MAX_NUMNODES; node++)
153 cpu_clear(cpu, node_to_cpumask_map[node]); 147 cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
154 cpu_to_node_map[cpu] = 0; 148 cpu_to_node_map[cpu] = 0;
155} 149}
156#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ 150#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -208,7 +202,7 @@ static void __cpuinit smp_callin(void)
208 */ 202 */
209 phys_id = read_apic_id(); 203 phys_id = read_apic_id();
210 cpuid = smp_processor_id(); 204 cpuid = smp_processor_id();
211 if (cpu_isset(cpuid, cpu_callin_map)) { 205 if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
212 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 206 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
213 phys_id, cpuid); 207 phys_id, cpuid);
214 } 208 }
@@ -230,7 +224,7 @@ static void __cpuinit smp_callin(void)
230 /* 224 /*
231 * Has the boot CPU finished it's STARTUP sequence? 225 * Has the boot CPU finished it's STARTUP sequence?
232 */ 226 */
233 if (cpu_isset(cpuid, cpu_callout_map)) 227 if (cpumask_test_cpu(cpuid, cpu_callout_mask))
234 break; 228 break;
235 cpu_relax(); 229 cpu_relax();
236 } 230 }
@@ -273,7 +267,7 @@ static void __cpuinit smp_callin(void)
273 /* 267 /*
274 * Allow the master to continue. 268 * Allow the master to continue.
275 */ 269 */
276 cpu_set(cpuid, cpu_callin_map); 270 cpumask_set_cpu(cpuid, cpu_callin_mask);
277} 271}
278 272
279static int __cpuinitdata unsafe_smp; 273static int __cpuinitdata unsafe_smp;
@@ -331,7 +325,7 @@ notrace static void __cpuinit start_secondary(void *unused)
331 ipi_call_lock(); 325 ipi_call_lock();
332 lock_vector_lock(); 326 lock_vector_lock();
333 __setup_vector_irq(smp_processor_id()); 327 __setup_vector_irq(smp_processor_id());
334 cpu_set(smp_processor_id(), cpu_online_map); 328 set_cpu_online(smp_processor_id(), true);
335 unlock_vector_lock(); 329 unlock_vector_lock();
336 ipi_call_unlock(); 330 ipi_call_unlock();
337 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 331 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -437,50 +431,52 @@ void __cpuinit set_cpu_sibling_map(int cpu)
437 int i; 431 int i;
438 struct cpuinfo_x86 *c = &cpu_data(cpu); 432 struct cpuinfo_x86 *c = &cpu_data(cpu);
439 433
440 cpu_set(cpu, cpu_sibling_setup_map); 434 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
441 435
442 if (smp_num_siblings > 1) { 436 if (smp_num_siblings > 1) {
443 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 437 for_each_cpu(i, cpu_sibling_setup_mask) {
444 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 438 struct cpuinfo_x86 *o = &cpu_data(i);
445 c->cpu_core_id == cpu_data(i).cpu_core_id) { 439
446 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 440 if (c->phys_proc_id == o->phys_proc_id &&
447 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 441 c->cpu_core_id == o->cpu_core_id) {
448 cpu_set(i, per_cpu(cpu_core_map, cpu)); 442 cpumask_set_cpu(i, cpu_sibling_mask(cpu));
449 cpu_set(cpu, per_cpu(cpu_core_map, i)); 443 cpumask_set_cpu(cpu, cpu_sibling_mask(i));
450 cpu_set(i, c->llc_shared_map); 444 cpumask_set_cpu(i, cpu_core_mask(cpu));
451 cpu_set(cpu, cpu_data(i).llc_shared_map); 445 cpumask_set_cpu(cpu, cpu_core_mask(i));
446 cpumask_set_cpu(i, &c->llc_shared_map);
447 cpumask_set_cpu(cpu, &o->llc_shared_map);
452 } 448 }
453 } 449 }
454 } else { 450 } else {
455 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 451 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
456 } 452 }
457 453
458 cpu_set(cpu, c->llc_shared_map); 454 cpumask_set_cpu(cpu, &c->llc_shared_map);
459 455
460 if (current_cpu_data.x86_max_cores == 1) { 456 if (current_cpu_data.x86_max_cores == 1) {
461 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 457 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
462 c->booted_cores = 1; 458 c->booted_cores = 1;
463 return; 459 return;
464 } 460 }
465 461
466 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 462 for_each_cpu(i, cpu_sibling_setup_mask) {
467 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 463 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
468 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 464 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
469 cpu_set(i, c->llc_shared_map); 465 cpumask_set_cpu(i, &c->llc_shared_map);
470 cpu_set(cpu, cpu_data(i).llc_shared_map); 466 cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
471 } 467 }
472 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 468 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
473 cpu_set(i, per_cpu(cpu_core_map, cpu)); 469 cpumask_set_cpu(i, cpu_core_mask(cpu));
474 cpu_set(cpu, per_cpu(cpu_core_map, i)); 470 cpumask_set_cpu(cpu, cpu_core_mask(i));
475 /* 471 /*
476 * Does this new cpu bringup a new core? 472 * Does this new cpu bringup a new core?
477 */ 473 */
478 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 474 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
479 /* 475 /*
480 * for each core in package, increment 476 * for each core in package, increment
481 * the booted_cores for this new cpu 477 * the booted_cores for this new cpu
482 */ 478 */
483 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 479 if (cpumask_first(cpu_sibling_mask(i)) == i)
484 c->booted_cores++; 480 c->booted_cores++;
485 /* 481 /*
486 * increment the core count for all 482 * increment the core count for all
@@ -503,7 +499,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
503 * And for power savings, we return cpu_core_map 499 * And for power savings, we return cpu_core_map
504 */ 500 */
505 if (sched_mc_power_savings || sched_smt_power_savings) 501 if (sched_mc_power_savings || sched_smt_power_savings)
506 return &per_cpu(cpu_core_map, cpu); 502 return cpu_core_mask(cpu);
507 else 503 else
508 return &c->llc_shared_map; 504 return &c->llc_shared_map;
509} 505}
@@ -522,7 +518,7 @@ static void impress_friends(void)
522 */ 518 */
523 pr_debug("Before bogomips.\n"); 519 pr_debug("Before bogomips.\n");
524 for_each_possible_cpu(cpu) 520 for_each_possible_cpu(cpu)
525 if (cpu_isset(cpu, cpu_callout_map)) 521 if (cpumask_test_cpu(cpu, cpu_callout_mask))
526 bogosum += cpu_data(cpu).loops_per_jiffy; 522 bogosum += cpu_data(cpu).loops_per_jiffy;
527 printk(KERN_INFO 523 printk(KERN_INFO
528 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 524 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@ -903,19 +899,19 @@ do_rest:
903 * allow APs to start initializing. 899 * allow APs to start initializing.
904 */ 900 */
905 pr_debug("Before Callout %d.\n", cpu); 901 pr_debug("Before Callout %d.\n", cpu);
906 cpu_set(cpu, cpu_callout_map); 902 cpumask_set_cpu(cpu, cpu_callout_mask);
907 pr_debug("After Callout %d.\n", cpu); 903 pr_debug("After Callout %d.\n", cpu);
908 904
909 /* 905 /*
910 * Wait 5s total for a response 906 * Wait 5s total for a response
911 */ 907 */
912 for (timeout = 0; timeout < 50000; timeout++) { 908 for (timeout = 0; timeout < 50000; timeout++) {
913 if (cpu_isset(cpu, cpu_callin_map)) 909 if (cpumask_test_cpu(cpu, cpu_callin_mask))
914 break; /* It has booted */ 910 break; /* It has booted */
915 udelay(100); 911 udelay(100);
916 } 912 }
917 913
918 if (cpu_isset(cpu, cpu_callin_map)) { 914 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
919 /* number CPUs logically, starting from 1 (BSP is 0) */ 915 /* number CPUs logically, starting from 1 (BSP is 0) */
920 pr_debug("OK.\n"); 916 pr_debug("OK.\n");
921 printk(KERN_INFO "CPU%d: ", cpu); 917 printk(KERN_INFO "CPU%d: ", cpu);
@@ -940,9 +936,14 @@ restore_state:
940 if (boot_error) { 936 if (boot_error) {
941 /* Try to put things back the way they were before ... */ 937 /* Try to put things back the way they were before ... */
942 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 938 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
943 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ 939
944 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 940 /* was set by do_boot_cpu() */
945 cpu_clear(cpu, cpu_present_map); 941 cpumask_clear_cpu(cpu, cpu_callout_mask);
942
943 /* was set by cpu_init() */
944 cpumask_clear_cpu(cpu, cpu_initialized_mask);
945
946 set_cpu_present(cpu, false);
946 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 947 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
947 } 948 }
948 949
@@ -976,7 +977,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
976 /* 977 /*
977 * Already booted CPU? 978 * Already booted CPU?
978 */ 979 */
979 if (cpu_isset(cpu, cpu_callin_map)) { 980 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
980 pr_debug("do_boot_cpu %d Already started\n", cpu); 981 pr_debug("do_boot_cpu %d Already started\n", cpu);
981 return -ENOSYS; 982 return -ENOSYS;
982 } 983 }
@@ -1031,8 +1032,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
1031 */ 1032 */
1032static __init void disable_smp(void) 1033static __init void disable_smp(void)
1033{ 1034{
1034 cpu_present_map = cpumask_of_cpu(0); 1035 /* use the read/write pointers to the present and possible maps */
1035 cpu_possible_map = cpumask_of_cpu(0); 1036 cpumask_copy(&cpu_present_map, cpumask_of(0));
1037 cpumask_copy(&cpu_possible_map, cpumask_of(0));
1036 smpboot_clear_io_apic_irqs(); 1038 smpboot_clear_io_apic_irqs();
1037 1039
1038 if (smp_found_config) 1040 if (smp_found_config)
@@ -1040,8 +1042,8 @@ static __init void disable_smp(void)
1040 else 1042 else
1041 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1043 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1042 map_cpu_to_logical_apicid(); 1044 map_cpu_to_logical_apicid();
1043 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1045 cpumask_set_cpu(0, cpu_sibling_mask(0));
1044 cpu_set(0, per_cpu(cpu_core_map, 0)); 1046 cpumask_set_cpu(0, cpu_core_mask(0));
1045} 1047}
1046 1048
1047/* 1049/*
@@ -1063,14 +1065,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1063 nr = 0; 1065 nr = 0;
1064 for_each_present_cpu(cpu) { 1066 for_each_present_cpu(cpu) {
1065 if (nr >= 8) 1067 if (nr >= 8)
1066 cpu_clear(cpu, cpu_present_map); 1068 set_cpu_present(cpu, false);
1067 nr++; 1069 nr++;
1068 } 1070 }
1069 1071
1070 nr = 0; 1072 nr = 0;
1071 for_each_possible_cpu(cpu) { 1073 for_each_possible_cpu(cpu) {
1072 if (nr >= 8) 1074 if (nr >= 8)
1073 cpu_clear(cpu, cpu_possible_map); 1075 set_cpu_possible(cpu, false);
1074 nr++; 1076 nr++;
1075 } 1077 }
1076 1078
@@ -1166,7 +1168,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1166 preempt_disable(); 1168 preempt_disable();
1167 smp_cpu_index_default(); 1169 smp_cpu_index_default();
1168 current_cpu_data = boot_cpu_data; 1170 current_cpu_data = boot_cpu_data;
1169 cpu_callin_map = cpumask_of_cpu(0); 1171 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1170 mb(); 1172 mb();
1171 /* 1173 /*
1172 * Setup boot CPU information 1174 * Setup boot CPU information
@@ -1241,8 +1243,8 @@ void __init native_smp_prepare_boot_cpu(void)
1241 init_gdt(me); 1243 init_gdt(me);
1242#endif 1244#endif
1243 switch_to_new_gdt(); 1245 switch_to_new_gdt();
1244 /* already set me in cpu_online_map in boot_cpu_init() */ 1246 /* already set me in cpu_online_mask in boot_cpu_init() */
1245 cpu_set(me, cpu_callout_map); 1247 cpumask_set_cpu(me, cpu_callout_mask);
1246 per_cpu(cpu_state, me) = CPU_ONLINE; 1248 per_cpu(cpu_state, me) = CPU_ONLINE;
1247} 1249}
1248 1250
@@ -1310,7 +1312,7 @@ __init void prefill_possible_map(void)
1310 possible, max_t(int, possible - num_processors, 0)); 1312 possible, max_t(int, possible - num_processors, 0));
1311 1313
1312 for (i = 0; i < possible; i++) 1314 for (i = 0; i < possible; i++)
1313 cpu_set(i, cpu_possible_map); 1315 set_cpu_possible(i, true);
1314 1316
1315 nr_cpu_ids = possible; 1317 nr_cpu_ids = possible;
1316} 1318}
@@ -1322,31 +1324,31 @@ static void remove_siblinginfo(int cpu)
1322 int sibling; 1324 int sibling;
1323 struct cpuinfo_x86 *c = &cpu_data(cpu); 1325 struct cpuinfo_x86 *c = &cpu_data(cpu);
1324 1326
1325 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { 1327 for_each_cpu(sibling, cpu_core_mask(cpu)) {
1326 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1328 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1327 /*/ 1329 /*/
1328 * last thread sibling in this cpu core going down 1330 * last thread sibling in this cpu core going down
1329 */ 1331 */
1330 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1332 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1331 cpu_data(sibling).booted_cores--; 1333 cpu_data(sibling).booted_cores--;
1332 } 1334 }
1333 1335
1334 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) 1336 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1335 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1337 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1336 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1338 cpumask_clear(cpu_sibling_mask(cpu));
1337 cpus_clear(per_cpu(cpu_core_map, cpu)); 1339 cpumask_clear(cpu_core_mask(cpu));
1338 c->phys_proc_id = 0; 1340 c->phys_proc_id = 0;
1339 c->cpu_core_id = 0; 1341 c->cpu_core_id = 0;
1340 cpu_clear(cpu, cpu_sibling_setup_map); 1342 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1341} 1343}
1342 1344
1343static void __ref remove_cpu_from_maps(int cpu) 1345static void __ref remove_cpu_from_maps(int cpu)
1344{ 1346{
1345 cpu_clear(cpu, cpu_online_map); 1347 set_cpu_online(cpu, false);
1346 cpu_clear(cpu, cpu_callout_map); 1348 cpumask_clear_cpu(cpu, cpu_callout_mask);
1347 cpu_clear(cpu, cpu_callin_map); 1349 cpumask_clear_cpu(cpu, cpu_callin_mask);
1348 /* was set by cpu_init() */ 1350 /* was set by cpu_init() */
1349 cpu_clear(cpu, cpu_initialized); 1351 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1350 numa_remove_cpu(cpu); 1352 numa_remove_cpu(cpu);
1351} 1353}
1352 1354