aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:54 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:54 -0400
commit4f0628963c86d2f97b8cb9acc024a7fe288a6a57 (patch)
tree83e7592c0706f96979628802344d886481a98b07
parent3f76a183de8ad3aeb7425f3d9685bb6003abd1a5 (diff)
cpumask: use new cpumask functions throughout x86
Impact: cleanup 1) &cpu_online_map -> cpu_online_mask 2) first_cpu/next_cpu_nr -> cpumask_first/cpumask_next 3) cpu_*_map manipulation -> init_cpu_* / set_cpu_* Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/x86/include/asm/topology.h4
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c16
-rw-r--r--arch/x86/kernel/apic/es7000_32.c6
-rw-r--r--arch/x86/kernel/apic/summit_32.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel_64.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--arch/x86/xen/smp.c6
9 files changed, 26 insertions, 27 deletions
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index f8b833e1257f..1ce1e1afa801 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -174,11 +174,11 @@ static inline int early_cpu_to_node(int cpu)
174 174
175static inline const cpumask_t *cpumask_of_node(int node) 175static inline const cpumask_t *cpumask_of_node(int node)
176{ 176{
177 return &cpu_online_map; 177 return cpu_online_mask;
178} 178}
179static inline int node_to_first_cpu(int node) 179static inline int node_to_first_cpu(int node)
180{ 180{
181 return first_cpu(cpu_online_map); 181 return cpumask_first(cpu_online_mask);
182} 182}
183 183
184static inline void setup_node_to_cpumask_map(void) { } 184static inline void setup_node_to_cpumask_map(void) { }
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index d806ecaa948f..676cdac385c0 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
26 return 1; 26 return 1;
27} 27}
28 28
29static const cpumask_t *bigsmp_target_cpus(void) 29static const struct cpumask *bigsmp_target_cpus(void)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 return &cpu_online_map; 32 return cpu_online_mask;
33#else 33#else
34 return &cpumask_of_cpu(0); 34 return cpumask_of(0);
35#endif 35#endif
36} 36}
37 37
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
118} 118}
119 119
120/* As we are using single CPU as destination, pick only one CPU here */ 120/* As we are using single CPU as destination, pick only one CPU here */
121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) 121static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
122{ 122{
123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); 123 return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
124} 124}
125 125
126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
188 { } /* NULL entry stops DMI scanning */ 188 { } /* NULL entry stops DMI scanning */
189}; 189};
190 190
191static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) 191static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
192{ 192{
193 cpus_clear(*retmask); 193 cpumask_clear(retmask);
194 cpu_set(cpu, *retmask); 194 cpumask_set_cpu(cpu, retmask);
195} 195}
196 196
197static int probe_bigsmp(void) 197static int probe_bigsmp(void)
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 1322f5409e20..26d3a3eba75b 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -460,9 +460,9 @@ static const cpumask_t *target_cpus_cluster(void)
460 return cpu_all_mask; 460 return cpu_all_mask;
461} 461}
462 462
463static const cpumask_t *es7000_target_cpus(void) 463static const struct cpumask *es7000_target_cpus(void)
464{ 464{
465 return &cpumask_of_cpu(smp_processor_id()); 465 return cpumask_of(smp_processor_id());
466} 466}
467 467
468static unsigned long 468static unsigned long
@@ -517,7 +517,7 @@ static void es7000_setup_apic_routing(void)
517 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 517 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
518 (apic_version[apic] == 0x14) ? 518 (apic_version[apic] == 0x14) ?
519 "Physical Cluster" : "Logical Cluster", 519 "Physical Cluster" : "Logical Cluster",
520 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); 520 nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
521} 521}
522 522
523static int es7000_apicid_to_node(int logical_apicid) 523static int es7000_apicid_to_node(int logical_apicid)
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index aac52fa873ff..7f6bd908da46 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -192,7 +192,7 @@ static const cpumask_t *summit_target_cpus(void)
192 * dest_LowestPrio mode logical clustered apic interrupt routing 192 * dest_LowestPrio mode logical clustered apic interrupt routing
193 * Just start on cpu 0. IRQ balancing will spread load 193 * Just start on cpu 0. IRQ balancing will spread load
194 */ 194 */
195 return &cpumask_of_cpu(0); 195 return cpumask_of(0);
196} 196}
197 197
198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) 198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
index aaa7d9730938..96b2a85545aa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
@@ -249,7 +249,7 @@ void cmci_rediscover(int dying)
249 for_each_online_cpu (cpu) { 249 for_each_online_cpu (cpu) {
250 if (cpu == dying) 250 if (cpu == dying)
251 continue; 251 continue;
252 if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) 252 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
253 continue; 253 continue;
254 /* Recheck banks in case CPUs don't all have the same */ 254 /* Recheck banks in case CPUs don't all have the same */
255 if (cmci_supported(&banks)) 255 if (cmci_supported(&banks))
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 4dd610e226e0..f93047fed791 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
143static void *c_start(struct seq_file *m, loff_t *pos) 143static void *c_start(struct seq_file *m, loff_t *pos)
144{ 144{
145 if (*pos == 0) /* just in case, cpu 0 is not the first */ 145 if (*pos == 0) /* just in case, cpu 0 is not the first */
146 *pos = first_cpu(cpu_online_map); 146 *pos = cpumask_first(cpu_online_mask);
147 else 147 else
148 *pos = next_cpu_nr(*pos - 1, cpu_online_map); 148 *pos = cpumask_next(*pos - 1, cpu_online_mask);
149 if ((*pos) < nr_cpu_ids) 149 if ((*pos) < nr_cpu_ids)
150 return &cpu_data(*pos); 150 return &cpu_data(*pos);
151 return NULL; 151 return NULL;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cad5431951aa..6638294cec8d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -324,7 +324,7 @@ void stop_this_cpu(void *dummy)
324 /* 324 /*
325 * Remove this CPU: 325 * Remove this CPU:
326 */ 326 */
327 cpu_clear(smp_processor_id(), cpu_online_map); 327 set_cpu_online(smp_processor_id(), false);
328 disable_local_APIC(); 328 disable_local_APIC();
329 329
330 for (;;) { 330 for (;;) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index d6427aa56966..58d24ef917d8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -296,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused)
296 __flush_tlb_all(); 296 __flush_tlb_all();
297#endif 297#endif
298 298
299 /* This must be done before setting cpu_online_map */ 299 /* This must be done before setting cpu_online_mask */
300 set_cpu_sibling_map(raw_smp_processor_id()); 300 set_cpu_sibling_map(raw_smp_processor_id());
301 wmb(); 301 wmb();
302 302
@@ -904,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
904 */ 904 */
905static __init void disable_smp(void) 905static __init void disable_smp(void)
906{ 906{
907 /* use the read/write pointers to the present and possible maps */ 907 init_cpu_present(cpumask_of(0));
908 cpumask_copy(&cpu_present_map, cpumask_of(0)); 908 init_cpu_possible(cpumask_of(0));
909 cpumask_copy(&cpu_possible_map, cpumask_of(0));
910 smpboot_clear_io_apic_irqs(); 909 smpboot_clear_io_apic_irqs();
911 910
912 if (smp_found_config) 911 if (smp_found_config)
@@ -1149,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus);
1149 1148
1150 1149
1151/* 1150/*
1152 * cpu_possible_map should be static, it cannot change as cpu's 1151 * cpu_possible_mask should be static, it cannot change as cpu's
1153 * are onlined, or offlined. The reason is per-cpu data-structures 1152 * are onlined, or offlined. The reason is per-cpu data-structures
1154 * are allocated by some modules at init time, and dont expect to 1153 * are allocated by some modules at init time, and dont expect to
1155 * do this dynamically on cpu arrival/departure. 1154 * do this dynamically on cpu arrival/departure.
1156 * cpu_present_map on the other hand can change dynamically. 1155 * cpu_present_mask on the other hand can change dynamically.
1157 * In case when cpu_hotplug is not compiled, then we resort to current 1156 * In case when cpu_hotplug is not compiled, then we resort to current
1158 * behaviour, which is cpu_possible == cpu_present. 1157 * behaviour, which is cpu_possible == cpu_present.
1159 * - Ashok Raj 1158 * - Ashok Raj
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8d470562ffc9..585a6e330837 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
159 if (rc >= 0) { 159 if (rc >= 0) {
160 num_processors++; 160 num_processors++;
161 cpu_set(i, cpu_possible_map); 161 set_cpu_possible(i, true);
162 } 162 }
163 } 163 }
164} 164}
@@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
197 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 197 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
198 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 198 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
199 continue; 199 continue;
200 cpu_clear(cpu, cpu_possible_map); 200 set_cpu_possible(cpu, false);
201 } 201 }
202 202
203 for_each_possible_cpu (cpu) { 203 for_each_possible_cpu (cpu) {
@@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
210 if (IS_ERR(idle)) 210 if (IS_ERR(idle))
211 panic("failed fork for CPU %d", cpu); 211 panic("failed fork for CPU %d", cpu);
212 212
213 cpu_set(cpu, cpu_present_map); 213 set_cpu_present(cpu, true);
214 } 214 }
215} 215}
216 216