aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-04 08:18:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-04 09:39:26 -0500
commitc2d1cec1c77f7714672c1efeae075424c929e0d5 (patch)
tree94afecf37405b93b6807377e4e99cc2ac9323034 /arch/x86/kernel
parent588235bb53f2c215f0d4b08fd30b461fedc3338e (diff)
x86: cleanup remaining cpumask_t ops in smpboot code
Impact: use new cpumask API to reduce memory and stack usage Allocate the following local cpumasks based on the number of cpus that are present. References will use new cpumask API. (Currently only modified for x86_64, x86_32 continues to use the *_map variants.) cpu_callin_mask cpu_callout_mask cpu_initialized_mask cpu_sibling_setup_mask Provide the following accessor functions: struct cpumask *cpu_sibling_mask(int cpu) struct cpumask *cpu_core_mask(int cpu) Other changes are when setting or clearing the cpu online, possible or present maps, use the accessor functions. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c26
-rw-r--r--arch/x86/kernel/setup_percpu.c25
-rw-r--r--arch/x86/kernel/smp.c17
-rw-r--r--arch/x86/kernel/smpboot.c128
4 files changed, 123 insertions, 73 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3f95a40f718a..83492b1f93b1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -40,6 +40,26 @@
40 40
41#include "cpu.h" 41#include "cpu.h"
42 42
43#ifdef CONFIG_X86_64
44
45/* all of these masks are initialized in setup_cpu_local_masks() */
46cpumask_var_t cpu_callin_mask;
47cpumask_var_t cpu_callout_mask;
48cpumask_var_t cpu_initialized_mask;
49
50/* representing cpus for which sibling maps can be computed */
51cpumask_var_t cpu_sibling_setup_mask;
52
53#else /* CONFIG_X86_32 */
54
55cpumask_t cpu_callin_map;
56cpumask_t cpu_callout_map;
57cpumask_t cpu_initialized;
58cpumask_t cpu_sibling_setup_map;
59
60#endif /* CONFIG_X86_32 */
61
62
43static struct cpu_dev *this_cpu __cpuinitdata; 63static struct cpu_dev *this_cpu __cpuinitdata;
44 64
45#ifdef CONFIG_X86_64 65#ifdef CONFIG_X86_64
@@ -856,8 +876,6 @@ static __init int setup_disablecpuid(char *arg)
856} 876}
857__setup("clearcpuid=", setup_disablecpuid); 877__setup("clearcpuid=", setup_disablecpuid);
858 878
859cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
860
861#ifdef CONFIG_X86_64 879#ifdef CONFIG_X86_64
862struct x8664_pda **_cpu_pda __read_mostly; 880struct x8664_pda **_cpu_pda __read_mostly;
863EXPORT_SYMBOL(_cpu_pda); 881EXPORT_SYMBOL(_cpu_pda);
@@ -976,7 +994,7 @@ void __cpuinit cpu_init(void)
976 994
977 me = current; 995 me = current;
978 996
979 if (cpu_test_and_set(cpu, cpu_initialized)) 997 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
980 panic("CPU#%d already initialized!\n", cpu); 998 panic("CPU#%d already initialized!\n", cpu);
981 999
982 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1000 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1085,7 +1103,7 @@ void __cpuinit cpu_init(void)
1085 struct tss_struct *t = &per_cpu(init_tss, cpu); 1103 struct tss_struct *t = &per_cpu(init_tss, cpu);
1086 struct thread_struct *thread = &curr->thread; 1104 struct thread_struct *thread = &curr->thread;
1087 1105
1088 if (cpu_test_and_set(cpu, cpu_initialized)) { 1106 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1089 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1107 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1090 for (;;) local_irq_enable(); 1108 for (;;) local_irq_enable();
1091 } 1109 }
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a4b619c33106..aa55764602b1 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -131,7 +131,27 @@ static void __init setup_cpu_pda_map(void)
131 /* point to new pointer table */ 131 /* point to new pointer table */
132 _cpu_pda = new_cpu_pda; 132 _cpu_pda = new_cpu_pda;
133} 133}
134#endif 134
135#endif /* CONFIG_SMP && CONFIG_X86_64 */
136
137#ifdef CONFIG_X86_64
138
139/* correctly size the local cpu masks */
140static void setup_cpu_local_masks(void)
141{
142 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
143 alloc_bootmem_cpumask_var(&cpu_callin_mask);
144 alloc_bootmem_cpumask_var(&cpu_callout_mask);
145 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
146}
147
148#else /* CONFIG_X86_32 */
149
150static inline void setup_cpu_local_masks(void)
151{
152}
153
154#endif /* CONFIG_X86_32 */
135 155
136/* 156/*
137 * Great future plan: 157 * Great future plan:
@@ -187,6 +207,9 @@ void __init setup_per_cpu_areas(void)
187 207
188 /* Setup node to cpumask map */ 208 /* Setup node to cpumask map */
189 setup_node_to_cpumask_map(); 209 setup_node_to_cpumask_map();
210
211 /* Setup cpu initialized, callin, callout masks */
212 setup_cpu_local_masks();
190} 213}
191 214
192#endif 215#endif
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index beea2649a240..182135ba1eaf 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -128,16 +128,23 @@ void native_send_call_func_single_ipi(int cpu)
128 128
129void native_send_call_func_ipi(const struct cpumask *mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_var_t allbutself;
132 132
133 allbutself = cpu_online_map; 133 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
134 cpu_clear(smp_processor_id(), allbutself); 134 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
135 return;
136 }
135 137
136 if (cpus_equal(*mask, allbutself) && 138 cpumask_copy(allbutself, cpu_online_mask);
137 cpus_equal(cpu_online_map, cpu_callout_map)) 139 cpumask_clear_cpu(smp_processor_id(), allbutself);
140
141 if (cpumask_equal(mask, allbutself) &&
142 cpumask_equal(cpu_online_mask, cpu_callout_mask))
138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 143 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
139 else 144 else
140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 145 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
146
147 free_cpumask_var(allbutself);
141} 148}
142 149
143/* 150/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6bd4d9b73870..00e17e589482 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,9 +102,6 @@ EXPORT_SYMBOL(smp_num_siblings);
102/* Last level cache ID of each logical CPU */ 102/* Last level cache ID of each logical CPU */
103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; 103DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
104 104
105cpumask_t cpu_callin_map;
106cpumask_t cpu_callout_map;
107
108/* representing HT siblings of each logical CPU */ 105/* representing HT siblings of each logical CPU */
109DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 106DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
110EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 107EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -120,9 +117,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
120static atomic_t init_deasserted; 117static atomic_t init_deasserted;
121 118
122 119
123/* representing cpus for which sibling maps can be computed */
124static cpumask_t cpu_sibling_setup_map;
125
126/* Set if we find a B stepping CPU */ 120/* Set if we find a B stepping CPU */
127static int __cpuinitdata smp_b_stepping; 121static int __cpuinitdata smp_b_stepping;
128 122
@@ -140,7 +134,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
140static void map_cpu_to_node(int cpu, int node) 134static void map_cpu_to_node(int cpu, int node)
141{ 135{
142 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); 136 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
143 cpu_set(cpu, node_to_cpumask_map[node]); 137 cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
144 cpu_to_node_map[cpu] = node; 138 cpu_to_node_map[cpu] = node;
145} 139}
146 140
@@ -151,7 +145,7 @@ static void unmap_cpu_to_node(int cpu)
151 145
152 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); 146 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
153 for (node = 0; node < MAX_NUMNODES; node++) 147 for (node = 0; node < MAX_NUMNODES; node++)
154 cpu_clear(cpu, node_to_cpumask_map[node]); 148 cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
155 cpu_to_node_map[cpu] = 0; 149 cpu_to_node_map[cpu] = 0;
156} 150}
157#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ 151#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -209,7 +203,7 @@ static void __cpuinit smp_callin(void)
209 */ 203 */
210 phys_id = read_apic_id(); 204 phys_id = read_apic_id();
211 cpuid = smp_processor_id(); 205 cpuid = smp_processor_id();
212 if (cpu_isset(cpuid, cpu_callin_map)) { 206 if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
213 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 207 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
214 phys_id, cpuid); 208 phys_id, cpuid);
215 } 209 }
@@ -231,7 +225,7 @@ static void __cpuinit smp_callin(void)
231 /* 225 /*
232 * Has the boot CPU finished it's STARTUP sequence? 226 * Has the boot CPU finished it's STARTUP sequence?
233 */ 227 */
234 if (cpu_isset(cpuid, cpu_callout_map)) 228 if (cpumask_test_cpu(cpuid, cpu_callout_mask))
235 break; 229 break;
236 cpu_relax(); 230 cpu_relax();
237 } 231 }
@@ -274,7 +268,7 @@ static void __cpuinit smp_callin(void)
274 /* 268 /*
275 * Allow the master to continue. 269 * Allow the master to continue.
276 */ 270 */
277 cpu_set(cpuid, cpu_callin_map); 271 cpumask_set_cpu(cpuid, cpu_callin_mask);
278} 272}
279 273
280static int __cpuinitdata unsafe_smp; 274static int __cpuinitdata unsafe_smp;
@@ -332,7 +326,7 @@ notrace static void __cpuinit start_secondary(void *unused)
332 ipi_call_lock(); 326 ipi_call_lock();
333 lock_vector_lock(); 327 lock_vector_lock();
334 __setup_vector_irq(smp_processor_id()); 328 __setup_vector_irq(smp_processor_id());
335 cpu_set(smp_processor_id(), cpu_online_map); 329 set_cpu_online(smp_processor_id(), true);
336 unlock_vector_lock(); 330 unlock_vector_lock();
337 ipi_call_unlock(); 331 ipi_call_unlock();
338 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 332 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -438,50 +432,52 @@ void __cpuinit set_cpu_sibling_map(int cpu)
438 int i; 432 int i;
439 struct cpuinfo_x86 *c = &cpu_data(cpu); 433 struct cpuinfo_x86 *c = &cpu_data(cpu);
440 434
441 cpu_set(cpu, cpu_sibling_setup_map); 435 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
442 436
443 if (smp_num_siblings > 1) { 437 if (smp_num_siblings > 1) {
444 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 438 for_each_cpu(i, cpu_sibling_setup_mask) {
445 if (c->phys_proc_id == cpu_data(i).phys_proc_id && 439 struct cpuinfo_x86 *o = &cpu_data(i);
446 c->cpu_core_id == cpu_data(i).cpu_core_id) { 440
447 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 441 if (c->phys_proc_id == o->phys_proc_id &&
448 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 442 c->cpu_core_id == o->cpu_core_id) {
449 cpu_set(i, per_cpu(cpu_core_map, cpu)); 443 cpumask_set_cpu(i, cpu_sibling_mask(cpu));
450 cpu_set(cpu, per_cpu(cpu_core_map, i)); 444 cpumask_set_cpu(cpu, cpu_sibling_mask(i));
451 cpu_set(i, c->llc_shared_map); 445 cpumask_set_cpu(i, cpu_core_mask(cpu));
452 cpu_set(cpu, cpu_data(i).llc_shared_map); 446 cpumask_set_cpu(cpu, cpu_core_mask(i));
447 cpumask_set_cpu(i, &c->llc_shared_map);
448 cpumask_set_cpu(cpu, &o->llc_shared_map);
453 } 449 }
454 } 450 }
455 } else { 451 } else {
456 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 452 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
457 } 453 }
458 454
459 cpu_set(cpu, c->llc_shared_map); 455 cpumask_set_cpu(cpu, &c->llc_shared_map);
460 456
461 if (current_cpu_data.x86_max_cores == 1) { 457 if (current_cpu_data.x86_max_cores == 1) {
462 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 458 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
463 c->booted_cores = 1; 459 c->booted_cores = 1;
464 return; 460 return;
465 } 461 }
466 462
467 for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { 463 for_each_cpu(i, cpu_sibling_setup_mask) {
468 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 464 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
469 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 465 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
470 cpu_set(i, c->llc_shared_map); 466 cpumask_set_cpu(i, &c->llc_shared_map);
471 cpu_set(cpu, cpu_data(i).llc_shared_map); 467 cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
472 } 468 }
473 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 469 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
474 cpu_set(i, per_cpu(cpu_core_map, cpu)); 470 cpumask_set_cpu(i, cpu_core_mask(cpu));
475 cpu_set(cpu, per_cpu(cpu_core_map, i)); 471 cpumask_set_cpu(cpu, cpu_core_mask(i));
476 /* 472 /*
477 * Does this new cpu bringup a new core? 473 * Does this new cpu bringup a new core?
478 */ 474 */
479 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { 475 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
480 /* 476 /*
481 * for each core in package, increment 477 * for each core in package, increment
482 * the booted_cores for this new cpu 478 * the booted_cores for this new cpu
483 */ 479 */
484 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 480 if (cpumask_first(cpu_sibling_mask(i)) == i)
485 c->booted_cores++; 481 c->booted_cores++;
486 /* 482 /*
487 * increment the core count for all 483 * increment the core count for all
@@ -504,7 +500,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
504 * And for power savings, we return cpu_core_map 500 * And for power savings, we return cpu_core_map
505 */ 501 */
506 if (sched_mc_power_savings || sched_smt_power_savings) 502 if (sched_mc_power_savings || sched_smt_power_savings)
507 return &per_cpu(cpu_core_map, cpu); 503 return cpu_core_mask(cpu);
508 else 504 else
509 return &c->llc_shared_map; 505 return &c->llc_shared_map;
510} 506}
@@ -523,7 +519,7 @@ static void impress_friends(void)
523 */ 519 */
524 pr_debug("Before bogomips.\n"); 520 pr_debug("Before bogomips.\n");
525 for_each_possible_cpu(cpu) 521 for_each_possible_cpu(cpu)
526 if (cpu_isset(cpu, cpu_callout_map)) 522 if (cpumask_test_cpu(cpu, cpu_callout_mask))
527 bogosum += cpu_data(cpu).loops_per_jiffy; 523 bogosum += cpu_data(cpu).loops_per_jiffy;
528 printk(KERN_INFO 524 printk(KERN_INFO
529 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 525 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@ -904,19 +900,19 @@ do_rest:
904 * allow APs to start initializing. 900 * allow APs to start initializing.
905 */ 901 */
906 pr_debug("Before Callout %d.\n", cpu); 902 pr_debug("Before Callout %d.\n", cpu);
907 cpu_set(cpu, cpu_callout_map); 903 cpumask_set_cpu(cpu, cpu_callout_mask);
908 pr_debug("After Callout %d.\n", cpu); 904 pr_debug("After Callout %d.\n", cpu);
909 905
910 /* 906 /*
911 * Wait 5s total for a response 907 * Wait 5s total for a response
912 */ 908 */
913 for (timeout = 0; timeout < 50000; timeout++) { 909 for (timeout = 0; timeout < 50000; timeout++) {
914 if (cpu_isset(cpu, cpu_callin_map)) 910 if (cpumask_test_cpu(cpu, cpu_callin_mask))
915 break; /* It has booted */ 911 break; /* It has booted */
916 udelay(100); 912 udelay(100);
917 } 913 }
918 914
919 if (cpu_isset(cpu, cpu_callin_map)) { 915 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
920 /* number CPUs logically, starting from 1 (BSP is 0) */ 916 /* number CPUs logically, starting from 1 (BSP is 0) */
921 pr_debug("OK.\n"); 917 pr_debug("OK.\n");
922 printk(KERN_INFO "CPU%d: ", cpu); 918 printk(KERN_INFO "CPU%d: ", cpu);
@@ -941,9 +937,14 @@ restore_state:
941 if (boot_error) { 937 if (boot_error) {
942 /* Try to put things back the way they were before ... */ 938 /* Try to put things back the way they were before ... */
943 numa_remove_cpu(cpu); /* was set by numa_add_cpu */ 939 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
944 cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ 940
945 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 941 /* was set by do_boot_cpu() */
946 cpu_clear(cpu, cpu_present_map); 942 cpumask_clear_cpu(cpu, cpu_callout_mask);
943
944 /* was set by cpu_init() */
945 cpumask_clear_cpu(cpu, cpu_initialized_mask);
946
947 set_cpu_present(cpu, false);
947 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 948 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
948 } 949 }
949 950
@@ -977,7 +978,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
977 /* 978 /*
978 * Already booted CPU? 979 * Already booted CPU?
979 */ 980 */
980 if (cpu_isset(cpu, cpu_callin_map)) { 981 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
981 pr_debug("do_boot_cpu %d Already started\n", cpu); 982 pr_debug("do_boot_cpu %d Already started\n", cpu);
982 return -ENOSYS; 983 return -ENOSYS;
983 } 984 }
@@ -1032,8 +1033,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
1032 */ 1033 */
1033static __init void disable_smp(void) 1034static __init void disable_smp(void)
1034{ 1035{
1035 cpu_present_map = cpumask_of_cpu(0); 1036 /* use the read/write pointers to the present and possible maps */
1036 cpu_possible_map = cpumask_of_cpu(0); 1037 cpumask_copy(&cpu_present_map, cpumask_of(0));
1038 cpumask_copy(&cpu_possible_map, cpumask_of(0));
1037 smpboot_clear_io_apic_irqs(); 1039 smpboot_clear_io_apic_irqs();
1038 1040
1039 if (smp_found_config) 1041 if (smp_found_config)
@@ -1041,8 +1043,8 @@ static __init void disable_smp(void)
1041 else 1043 else
1042 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1044 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1043 map_cpu_to_logical_apicid(); 1045 map_cpu_to_logical_apicid();
1044 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1046 cpumask_set_cpu(0, cpu_sibling_mask(0));
1045 cpu_set(0, per_cpu(cpu_core_map, 0)); 1047 cpumask_set_cpu(0, cpu_core_mask(0));
1046} 1048}
1047 1049
1048/* 1050/*
@@ -1064,14 +1066,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
1064 nr = 0; 1066 nr = 0;
1065 for_each_present_cpu(cpu) { 1067 for_each_present_cpu(cpu) {
1066 if (nr >= 8) 1068 if (nr >= 8)
1067 cpu_clear(cpu, cpu_present_map); 1069 set_cpu_present(cpu, false);
1068 nr++; 1070 nr++;
1069 } 1071 }
1070 1072
1071 nr = 0; 1073 nr = 0;
1072 for_each_possible_cpu(cpu) { 1074 for_each_possible_cpu(cpu) {
1073 if (nr >= 8) 1075 if (nr >= 8)
1074 cpu_clear(cpu, cpu_possible_map); 1076 set_cpu_possible(cpu, false);
1075 nr++; 1077 nr++;
1076 } 1078 }
1077 1079
@@ -1167,7 +1169,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1167 preempt_disable(); 1169 preempt_disable();
1168 smp_cpu_index_default(); 1170 smp_cpu_index_default();
1169 current_cpu_data = boot_cpu_data; 1171 current_cpu_data = boot_cpu_data;
1170 cpu_callin_map = cpumask_of_cpu(0); 1172 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1171 mb(); 1173 mb();
1172 /* 1174 /*
1173 * Setup boot CPU information 1175 * Setup boot CPU information
@@ -1242,8 +1244,8 @@ void __init native_smp_prepare_boot_cpu(void)
1242 init_gdt(me); 1244 init_gdt(me);
1243#endif 1245#endif
1244 switch_to_new_gdt(); 1246 switch_to_new_gdt();
1245 /* already set me in cpu_online_map in boot_cpu_init() */ 1247 /* already set me in cpu_online_mask in boot_cpu_init() */
1246 cpu_set(me, cpu_callout_map); 1248 cpumask_set_cpu(me, cpu_callout_mask);
1247 per_cpu(cpu_state, me) = CPU_ONLINE; 1249 per_cpu(cpu_state, me) = CPU_ONLINE;
1248} 1250}
1249 1251
@@ -1311,7 +1313,7 @@ __init void prefill_possible_map(void)
1311 possible, max_t(int, possible - num_processors, 0)); 1313 possible, max_t(int, possible - num_processors, 0));
1312 1314
1313 for (i = 0; i < possible; i++) 1315 for (i = 0; i < possible; i++)
1314 cpu_set(i, cpu_possible_map); 1316 set_cpu_possible(i, true);
1315 1317
1316 nr_cpu_ids = possible; 1318 nr_cpu_ids = possible;
1317} 1319}
@@ -1323,31 +1325,31 @@ static void remove_siblinginfo(int cpu)
1323 int sibling; 1325 int sibling;
1324 struct cpuinfo_x86 *c = &cpu_data(cpu); 1326 struct cpuinfo_x86 *c = &cpu_data(cpu);
1325 1327
1326 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { 1328 for_each_cpu(sibling, cpu_core_mask(cpu)) {
1327 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1329 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1328 /*/ 1330 /*/
1329 * last thread sibling in this cpu core going down 1331 * last thread sibling in this cpu core going down
1330 */ 1332 */
1331 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1333 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1332 cpu_data(sibling).booted_cores--; 1334 cpu_data(sibling).booted_cores--;
1333 } 1335 }
1334 1336
1335 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) 1337 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1336 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1338 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1337 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1339 cpumask_clear(cpu_sibling_mask(cpu));
1338 cpus_clear(per_cpu(cpu_core_map, cpu)); 1340 cpumask_clear(cpu_core_mask(cpu));
1339 c->phys_proc_id = 0; 1341 c->phys_proc_id = 0;
1340 c->cpu_core_id = 0; 1342 c->cpu_core_id = 0;
1341 cpu_clear(cpu, cpu_sibling_setup_map); 1343 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1342} 1344}
1343 1345
1344static void __ref remove_cpu_from_maps(int cpu) 1346static void __ref remove_cpu_from_maps(int cpu)
1345{ 1347{
1346 cpu_clear(cpu, cpu_online_map); 1348 set_cpu_online(cpu, false);
1347 cpu_clear(cpu, cpu_callout_map); 1349 cpumask_clear_cpu(cpu, cpu_callout_mask);
1348 cpu_clear(cpu, cpu_callin_map); 1350 cpumask_clear_cpu(cpu, cpu_callin_mask);
1349 /* was set by cpu_init() */ 1351 /* was set by cpu_init() */
1350 cpu_clear(cpu, cpu_initialized); 1352 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1351 numa_remove_cpu(cpu); 1353 numa_remove_cpu(cpu);
1352} 1354}
1353 1355