diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-20 13:19:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-20 13:19:03 -0400 |
commit | 6496edfce95f943e1da43631c2f437509e56af7f (patch) | |
tree | 6b6e3b6bcc74c038b707a2facf45ee98fd61544e | |
parent | b19a42e3cb9e73cad59e60ab7403e5afe7f4b262 (diff) | |
parent | e4afa120c98252e44390067c3a6cc775cde30659 (diff) |
Merge tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull final removal of deprecated cpus_* cpumask functions from Rusty Russell:
"This is the final removal (after several years!) of the obsolete
cpus_* functions, prompted by their mis-use in staging.
With these function removed, all cpu functions should only iterate to
nr_cpu_ids, so we finally only allocate that many bits when cpumasks
are allocated offstack"
* tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (25 commits)
cpumask: remove __first_cpu / __next_cpu
cpumask: resurrect CPU_MASK_CPU0
linux/cpumask.h: add typechecking to cpumask_test_cpu
cpumask: only allocate nr_cpumask_bits.
Fix weird uses of num_online_cpus().
cpumask: remove deprecated functions.
mips: fix obsolete cpumask_of_cpu usage.
x86: fix more deprecated cpu function usage.
ia64: remove deprecated cpus_ usage.
powerpc: fix deprecated CPU_MASK_CPU0 usage.
CPU_MASK_ALL/CPU_MASK_NONE: remove from deprecated region.
staging/lustre/o2iblnd: Don't use cpus_weight
staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
staging/lustre/ptlrpc: Do not use deprecated cpus_* functions
blackfin: fix up obsolete cpu function usage.
parisc: fix up obsolete cpu function usage.
tile: fix up obsolete cpu function usage.
arm64: fix up obsolete cpu function usage.
mips: fix up obsolete cpu function usage.
x86: fix up obsolete cpu function usage.
...
51 files changed, 170 insertions, 358 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index a0b005d2bd95..f9ad5e048b11 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt | |||
@@ -108,7 +108,7 @@ Never use anything other than cpumask_t to represent bitmap of CPUs. | |||
108 | for_each_possible_cpu - Iterate over cpu_possible_mask | 108 | for_each_possible_cpu - Iterate over cpu_possible_mask |
109 | for_each_online_cpu - Iterate over cpu_online_mask | 109 | for_each_online_cpu - Iterate over cpu_online_mask |
110 | for_each_present_cpu - Iterate over cpu_present_mask | 110 | for_each_present_cpu - Iterate over cpu_present_mask |
111 | for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. | 111 | for_each_cpu(x,mask) - Iterate over some random collection of cpu mask. |
112 | 112 | ||
113 | #include <linux/cpu.h> | 113 | #include <linux/cpu.h> |
114 | get_online_cpus() and put_online_cpus(): | 114 | get_online_cpus() and put_online_cpus(): |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ffe8e1b814e0..714411f62391 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -636,7 +636,7 @@ void smp_send_stop(void) | |||
636 | cpumask_t mask; | 636 | cpumask_t mask; |
637 | 637 | ||
638 | cpumask_copy(&mask, cpu_online_mask); | 638 | cpumask_copy(&mask, cpu_online_mask); |
639 | cpu_clear(smp_processor_id(), mask); | 639 | cpumask_clear_cpu(smp_processor_id(), &mask); |
640 | 640 | ||
641 | smp_cross_call(&mask, IPI_CPU_STOP); | 641 | smp_cross_call(&mask, IPI_CPU_STOP); |
642 | } | 642 | } |
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index 11789beca75a..8c0c80fd1a45 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c | |||
@@ -124,7 +124,7 @@ void platform_send_ipi(cpumask_t callmap, int irq) | |||
124 | unsigned int cpu; | 124 | unsigned int cpu; |
125 | int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8; | 125 | int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8; |
126 | 126 | ||
127 | for_each_cpu_mask(cpu, callmap) { | 127 | for_each_cpu(cpu, &callmap) { |
128 | BUG_ON(cpu >= 2); | 128 | BUG_ON(cpu >= 2); |
129 | SSYNC(); | 129 | SSYNC(); |
130 | bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); | 130 | bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu))); |
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index a1d91ab4c5ef..aa0fdf125aba 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h | |||
@@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) | |||
117 | #ifdef CONFIG_ACPI_NUMA | 117 | #ifdef CONFIG_ACPI_NUMA |
118 | extern cpumask_t early_cpu_possible_map; | 118 | extern cpumask_t early_cpu_possible_map; |
119 | #define for_each_possible_early_cpu(cpu) \ | 119 | #define for_each_possible_early_cpu(cpu) \ |
120 | for_each_cpu_mask((cpu), early_cpu_possible_map) | 120 | for_each_cpu((cpu), &early_cpu_possible_map) |
121 | 121 | ||
122 | static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) | 122 | static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) |
123 | { | 123 | { |
@@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) | |||
125 | int cpu; | 125 | int cpu; |
126 | int next_nid = 0; | 126 | int next_nid = 0; |
127 | 127 | ||
128 | low_cpu = cpus_weight(early_cpu_possible_map); | 128 | low_cpu = cpumask_weight(&early_cpu_possible_map); |
129 | 129 | ||
130 | high_cpu = max(low_cpu, min_cpus); | 130 | high_cpu = max(low_cpu, min_cpus); |
131 | high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); | 131 | high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); |
132 | 132 | ||
133 | for (cpu = low_cpu; cpu < high_cpu; cpu++) { | 133 | for (cpu = low_cpu; cpu < high_cpu; cpu++) { |
134 | cpu_set(cpu, early_cpu_possible_map); | 134 | cpumask_set_cpu(cpu, &early_cpu_possible_map); |
135 | if (node_cpuid[cpu].nid == NUMA_NO_NODE) { | 135 | if (node_cpuid[cpu].nid == NUMA_NO_NODE) { |
136 | node_cpuid[cpu].nid = next_nid; | 136 | node_cpuid[cpu].nid = next_nid; |
137 | next_nid++; | 137 | next_nid++; |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 2c4498919d3c..35bf22cc71b7 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
483 | (pa->apic_id << 8) | (pa->local_sapic_eid); | 483 | (pa->apic_id << 8) | (pa->local_sapic_eid); |
484 | /* nid should be overridden as logical node id later */ | 484 | /* nid should be overridden as logical node id later */ |
485 | node_cpuid[srat_num_cpus].nid = pxm; | 485 | node_cpuid[srat_num_cpus].nid = pxm; |
486 | cpu_set(srat_num_cpus, early_cpu_possible_map); | 486 | cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); |
487 | srat_num_cpus++; | 487 | srat_num_cpus++; |
488 | } | 488 | } |
489 | 489 | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index cd44a57c73be..bc9501e36e77 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -690,7 +690,7 @@ skip_numa_setup: | |||
690 | do { | 690 | do { |
691 | if (++cpu >= nr_cpu_ids) | 691 | if (++cpu >= nr_cpu_ids) |
692 | cpu = 0; | 692 | cpu = 0; |
693 | } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); | 693 | } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain)); |
694 | 694 | ||
695 | return cpu_physical_id(cpu); | 695 | return cpu_physical_id(cpu); |
696 | #else /* CONFIG_SMP */ | 696 | #else /* CONFIG_SMP */ |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 698d8fefde6c..eaa3199f98c8 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain) | |||
109 | int pos, vector; | 109 | int pos, vector; |
110 | 110 | ||
111 | cpumask_and(&mask, &domain, cpu_online_mask); | 111 | cpumask_and(&mask, &domain, cpu_online_mask); |
112 | if (cpus_empty(mask)) | 112 | if (cpumask_empty(&mask)) |
113 | return -EINVAL; | 113 | return -EINVAL; |
114 | 114 | ||
115 | for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { | 115 | for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { |
116 | vector = IA64_FIRST_DEVICE_VECTOR + pos; | 116 | vector = IA64_FIRST_DEVICE_VECTOR + pos; |
117 | cpus_and(mask, domain, vector_table[vector]); | 117 | cpumask_and(&mask, &domain, &vector_table[vector]); |
118 | if (!cpus_empty(mask)) | 118 | if (!cpumask_empty(&mask)) |
119 | continue; | 119 | continue; |
120 | return vector; | 120 | return vector; |
121 | } | 121 | } |
@@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
132 | BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); | 132 | BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); |
133 | 133 | ||
134 | cpumask_and(&mask, &domain, cpu_online_mask); | 134 | cpumask_and(&mask, &domain, cpu_online_mask); |
135 | if (cpus_empty(mask)) | 135 | if (cpumask_empty(&mask)) |
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) | 137 | if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) |
138 | return 0; | 138 | return 0; |
139 | if (cfg->vector != IRQ_VECTOR_UNASSIGNED) | 139 | if (cfg->vector != IRQ_VECTOR_UNASSIGNED) |
140 | return -EBUSY; | 140 | return -EBUSY; |
141 | for_each_cpu_mask(cpu, mask) | 141 | for_each_cpu(cpu, &mask) |
142 | per_cpu(vector_irq, cpu)[vector] = irq; | 142 | per_cpu(vector_irq, cpu)[vector] = irq; |
143 | cfg->vector = vector; | 143 | cfg->vector = vector; |
144 | cfg->domain = domain; | 144 | cfg->domain = domain; |
145 | irq_status[irq] = IRQ_USED; | 145 | irq_status[irq] = IRQ_USED; |
146 | cpus_or(vector_table[vector], vector_table[vector], domain); | 146 | cpumask_or(&vector_table[vector], &vector_table[vector], &domain); |
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
@@ -161,7 +161,6 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
161 | static void __clear_irq_vector(int irq) | 161 | static void __clear_irq_vector(int irq) |
162 | { | 162 | { |
163 | int vector, cpu; | 163 | int vector, cpu; |
164 | cpumask_t mask; | ||
165 | cpumask_t domain; | 164 | cpumask_t domain; |
166 | struct irq_cfg *cfg = &irq_cfg[irq]; | 165 | struct irq_cfg *cfg = &irq_cfg[irq]; |
167 | 166 | ||
@@ -169,13 +168,12 @@ static void __clear_irq_vector(int irq) | |||
169 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); | 168 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); |
170 | vector = cfg->vector; | 169 | vector = cfg->vector; |
171 | domain = cfg->domain; | 170 | domain = cfg->domain; |
172 | cpumask_and(&mask, &cfg->domain, cpu_online_mask); | 171 | for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask) |
173 | for_each_cpu_mask(cpu, mask) | ||
174 | per_cpu(vector_irq, cpu)[vector] = -1; | 172 | per_cpu(vector_irq, cpu)[vector] = -1; |
175 | cfg->vector = IRQ_VECTOR_UNASSIGNED; | 173 | cfg->vector = IRQ_VECTOR_UNASSIGNED; |
176 | cfg->domain = CPU_MASK_NONE; | 174 | cfg->domain = CPU_MASK_NONE; |
177 | irq_status[irq] = IRQ_UNUSED; | 175 | irq_status[irq] = IRQ_UNUSED; |
178 | cpus_andnot(vector_table[vector], vector_table[vector], domain); | 176 | cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain); |
179 | } | 177 | } |
180 | 178 | ||
181 | static void clear_irq_vector(int irq) | 179 | static void clear_irq_vector(int irq) |
@@ -244,7 +242,7 @@ void __setup_vector_irq(int cpu) | |||
244 | per_cpu(vector_irq, cpu)[vector] = -1; | 242 | per_cpu(vector_irq, cpu)[vector] = -1; |
245 | /* Mark the inuse vectors */ | 243 | /* Mark the inuse vectors */ |
246 | for (irq = 0; irq < NR_IRQS; ++irq) { | 244 | for (irq = 0; irq < NR_IRQS; ++irq) { |
247 | if (!cpu_isset(cpu, irq_cfg[irq].domain)) | 245 | if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain)) |
248 | continue; | 246 | continue; |
249 | vector = irq_to_vector(irq); | 247 | vector = irq_to_vector(irq); |
250 | per_cpu(vector_irq, cpu)[vector] = irq; | 248 | per_cpu(vector_irq, cpu)[vector] = irq; |
@@ -261,7 +259,7 @@ static enum vector_domain_type { | |||
261 | static cpumask_t vector_allocation_domain(int cpu) | 259 | static cpumask_t vector_allocation_domain(int cpu) |
262 | { | 260 | { |
263 | if (vector_domain_type == VECTOR_DOMAIN_PERCPU) | 261 | if (vector_domain_type == VECTOR_DOMAIN_PERCPU) |
264 | return cpumask_of_cpu(cpu); | 262 | return *cpumask_of(cpu); |
265 | return CPU_MASK_ALL; | 263 | return CPU_MASK_ALL; |
266 | } | 264 | } |
267 | 265 | ||
@@ -275,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu) | |||
275 | return -EBUSY; | 273 | return -EBUSY; |
276 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | 274 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) |
277 | return -EINVAL; | 275 | return -EINVAL; |
278 | if (cpu_isset(cpu, cfg->domain)) | 276 | if (cpumask_test_cpu(cpu, &cfg->domain)) |
279 | return 0; | 277 | return 0; |
280 | domain = vector_allocation_domain(cpu); | 278 | domain = vector_allocation_domain(cpu); |
281 | vector = find_unassigned_vector(domain); | 279 | vector = find_unassigned_vector(domain); |
@@ -309,12 +307,12 @@ void irq_complete_move(unsigned irq) | |||
309 | if (likely(!cfg->move_in_progress)) | 307 | if (likely(!cfg->move_in_progress)) |
310 | return; | 308 | return; |
311 | 309 | ||
312 | if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) | 310 | if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain))) |
313 | return; | 311 | return; |
314 | 312 | ||
315 | cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); | 313 | cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); |
316 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 314 | cfg->move_cleanup_count = cpumask_weight(&cleanup_mask); |
317 | for_each_cpu_mask(i, cleanup_mask) | 315 | for_each_cpu(i, &cleanup_mask) |
318 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); | 316 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); |
319 | cfg->move_in_progress = 0; | 317 | cfg->move_in_progress = 0; |
320 | } | 318 | } |
@@ -340,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
340 | if (!cfg->move_cleanup_count) | 338 | if (!cfg->move_cleanup_count) |
341 | goto unlock; | 339 | goto unlock; |
342 | 340 | ||
343 | if (!cpu_isset(me, cfg->old_domain)) | 341 | if (!cpumask_test_cpu(me, &cfg->old_domain)) |
344 | goto unlock; | 342 | goto unlock; |
345 | 343 | ||
346 | spin_lock_irqsave(&vector_lock, flags); | 344 | spin_lock_irqsave(&vector_lock, flags); |
347 | __this_cpu_write(vector_irq[vector], -1); | 345 | __this_cpu_write(vector_irq[vector], -1); |
348 | cpu_clear(me, vector_table[vector]); | 346 | cpumask_clear_cpu(me, &vector_table[vector]); |
349 | spin_unlock_irqrestore(&vector_lock, flags); | 347 | spin_unlock_irqrestore(&vector_lock, flags); |
350 | cfg->move_cleanup_count--; | 348 | cfg->move_cleanup_count--; |
351 | unlock: | 349 | unlock: |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8bfd36af46f8..dd5801eb4c69 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1293 | monarch_cpu = cpu; | 1293 | monarch_cpu = cpu; |
1294 | sos->monarch = 1; | 1294 | sos->monarch = 1; |
1295 | } else { | 1295 | } else { |
1296 | cpu_set(cpu, mca_cpu); | 1296 | cpumask_set_cpu(cpu, &mca_cpu); |
1297 | sos->monarch = 0; | 1297 | sos->monarch = 0; |
1298 | } | 1298 | } |
1299 | mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " | 1299 | mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " |
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1316 | */ | 1316 | */ |
1317 | ia64_mca_wakeup_all(); | 1317 | ia64_mca_wakeup_all(); |
1318 | } else { | 1318 | } else { |
1319 | while (cpu_isset(cpu, mca_cpu)) | 1319 | while (cpumask_test_cpu(cpu, &mca_cpu)) |
1320 | cpu_relax(); /* spin until monarch wakes us */ | 1320 | cpu_relax(); /* spin until monarch wakes us */ |
1321 | } | 1321 | } |
1322 | 1322 | ||
@@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1355 | * and put this cpu in the rendez loop. | 1355 | * and put this cpu in the rendez loop. |
1356 | */ | 1356 | */ |
1357 | for_each_online_cpu(i) { | 1357 | for_each_online_cpu(i) { |
1358 | if (cpu_isset(i, mca_cpu)) { | 1358 | if (cpumask_test_cpu(i, &mca_cpu)) { |
1359 | monarch_cpu = i; | 1359 | monarch_cpu = i; |
1360 | cpu_clear(i, mca_cpu); /* wake next cpu */ | 1360 | cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */ |
1361 | while (monarch_cpu != -1) | 1361 | while (monarch_cpu != -1) |
1362 | cpu_relax(); /* spin until last cpu leaves */ | 1362 | cpu_relax(); /* spin until last cpu leaves */ |
1363 | set_curr_task(cpu, previous_current); | 1363 | set_curr_task(cpu, previous_current); |
@@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, | |||
1822 | ti->cpu = cpu; | 1822 | ti->cpu = cpu; |
1823 | p->stack = ti; | 1823 | p->stack = ti; |
1824 | p->state = TASK_UNINTERRUPTIBLE; | 1824 | p->state = TASK_UNINTERRUPTIBLE; |
1825 | cpu_set(cpu, p->cpus_allowed); | 1825 | cpumask_set_cpu(cpu, &p->cpus_allowed); |
1826 | INIT_LIST_HEAD(&p->tasks); | 1826 | INIT_LIST_HEAD(&p->tasks); |
1827 | p->parent = p->real_parent = p->group_leader = p; | 1827 | p->parent = p->real_parent = p->group_leader = p; |
1828 | INIT_LIST_HEAD(&p->children); | 1828 | INIT_LIST_HEAD(&p->children); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 8ae36ea177d3..9dd7464f8c17 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -47,15 +47,14 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
47 | struct msi_msg msg; | 47 | struct msi_msg msg; |
48 | unsigned long dest_phys_id; | 48 | unsigned long dest_phys_id; |
49 | int irq, vector; | 49 | int irq, vector; |
50 | cpumask_t mask; | ||
51 | 50 | ||
52 | irq = create_irq(); | 51 | irq = create_irq(); |
53 | if (irq < 0) | 52 | if (irq < 0) |
54 | return irq; | 53 | return irq; |
55 | 54 | ||
56 | irq_set_msi_desc(irq, desc); | 55 | irq_set_msi_desc(irq, desc); |
57 | cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); | 56 | dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)), |
58 | dest_phys_id = cpu_physical_id(first_cpu(mask)); | 57 | cpu_online_mask)); |
59 | vector = irq_to_vector(irq); | 58 | vector = irq_to_vector(irq); |
60 | 59 | ||
61 | msg.address_hi = 0; | 60 | msg.address_hi = 0; |
@@ -171,10 +170,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | |||
171 | { | 170 | { |
172 | struct irq_cfg *cfg = irq_cfg + irq; | 171 | struct irq_cfg *cfg = irq_cfg + irq; |
173 | unsigned dest; | 172 | unsigned dest; |
174 | cpumask_t mask; | ||
175 | 173 | ||
176 | cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); | 174 | dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)), |
177 | dest = cpu_physical_id(first_cpu(mask)); | 175 | cpu_online_mask)); |
178 | 176 | ||
179 | msg->address_hi = 0; | 177 | msg->address_hi = 0; |
180 | msg->address_lo = | 178 | msg->address_lo = |
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index d288cde93606..92c376279c6d 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c | |||
@@ -39,7 +39,7 @@ void map_cpu_to_node(int cpu, int nid) | |||
39 | } | 39 | } |
40 | /* sanity check first */ | 40 | /* sanity check first */ |
41 | oldnid = cpu_to_node_map[cpu]; | 41 | oldnid = cpu_to_node_map[cpu]; |
42 | if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) { | 42 | if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { |
43 | return; /* nothing to do */ | 43 | return; /* nothing to do */ |
44 | } | 44 | } |
45 | /* we don't have cpu-driven node hot add yet... | 45 | /* we don't have cpu-driven node hot add yet... |
@@ -47,16 +47,16 @@ void map_cpu_to_node(int cpu, int nid) | |||
47 | if (!node_online(nid)) | 47 | if (!node_online(nid)) |
48 | nid = first_online_node; | 48 | nid = first_online_node; |
49 | cpu_to_node_map[cpu] = nid; | 49 | cpu_to_node_map[cpu] = nid; |
50 | cpu_set(cpu, node_to_cpu_mask[nid]); | 50 | cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); |
51 | return; | 51 | return; |
52 | } | 52 | } |
53 | 53 | ||
54 | void unmap_cpu_from_node(int cpu, int nid) | 54 | void unmap_cpu_from_node(int cpu, int nid) |
55 | { | 55 | { |
56 | WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); | 56 | WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); |
57 | WARN_ON(cpu_to_node_map[cpu] != nid); | 57 | WARN_ON(cpu_to_node_map[cpu] != nid); |
58 | cpu_to_node_map[cpu] = 0; | 58 | cpu_to_node_map[cpu] = 0; |
59 | cpu_clear(cpu, node_to_cpu_mask[nid]); | 59 | cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); |
60 | } | 60 | } |
61 | 61 | ||
62 | 62 | ||
@@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void) | |||
71 | int cpu, i, node; | 71 | int cpu, i, node; |
72 | 72 | ||
73 | for(node=0; node < MAX_NUMNODES; node++) | 73 | for(node=0; node < MAX_NUMNODES; node++) |
74 | cpus_clear(node_to_cpu_mask[node]); | 74 | cpumask_clear(&node_to_cpu_mask[node]); |
75 | 75 | ||
76 | for_each_possible_early_cpu(cpu) { | 76 | for_each_possible_early_cpu(cpu) { |
77 | node = -1; | 77 | node = -1; |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ee9719eebb1e..1eeffb7fbb16 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -256,7 +256,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) | |||
256 | data_saved->buffer = buffer; | 256 | data_saved->buffer = buffer; |
257 | } | 257 | } |
258 | } | 258 | } |
259 | cpu_set(smp_processor_id(), data->cpu_event); | 259 | cpumask_set_cpu(smp_processor_id(), &data->cpu_event); |
260 | if (irqsafe) { | 260 | if (irqsafe) { |
261 | salinfo_work_to_do(data); | 261 | salinfo_work_to_do(data); |
262 | spin_unlock_irqrestore(&data_saved_lock, flags); | 262 | spin_unlock_irqrestore(&data_saved_lock, flags); |
@@ -274,7 +274,7 @@ salinfo_timeout_check(struct salinfo_data *data) | |||
274 | unsigned long flags; | 274 | unsigned long flags; |
275 | if (!data->open) | 275 | if (!data->open) |
276 | return; | 276 | return; |
277 | if (!cpus_empty(data->cpu_event)) { | 277 | if (!cpumask_empty(&data->cpu_event)) { |
278 | spin_lock_irqsave(&data_saved_lock, flags); | 278 | spin_lock_irqsave(&data_saved_lock, flags); |
279 | salinfo_work_to_do(data); | 279 | salinfo_work_to_do(data); |
280 | spin_unlock_irqrestore(&data_saved_lock, flags); | 280 | spin_unlock_irqrestore(&data_saved_lock, flags); |
@@ -308,7 +308,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t | |||
308 | int i, n, cpu = -1; | 308 | int i, n, cpu = -1; |
309 | 309 | ||
310 | retry: | 310 | retry: |
311 | if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { | 311 | if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) { |
312 | if (file->f_flags & O_NONBLOCK) | 312 | if (file->f_flags & O_NONBLOCK) |
313 | return -EAGAIN; | 313 | return -EAGAIN; |
314 | if (down_interruptible(&data->mutex)) | 314 | if (down_interruptible(&data->mutex)) |
@@ -317,9 +317,9 @@ retry: | |||
317 | 317 | ||
318 | n = data->cpu_check; | 318 | n = data->cpu_check; |
319 | for (i = 0; i < nr_cpu_ids; i++) { | 319 | for (i = 0; i < nr_cpu_ids; i++) { |
320 | if (cpu_isset(n, data->cpu_event)) { | 320 | if (cpumask_test_cpu(n, &data->cpu_event)) { |
321 | if (!cpu_online(n)) { | 321 | if (!cpu_online(n)) { |
322 | cpu_clear(n, data->cpu_event); | 322 | cpumask_clear_cpu(n, &data->cpu_event); |
323 | continue; | 323 | continue; |
324 | } | 324 | } |
325 | cpu = n; | 325 | cpu = n; |
@@ -451,7 +451,7 @@ retry: | |||
451 | call_on_cpu(cpu, salinfo_log_read_cpu, data); | 451 | call_on_cpu(cpu, salinfo_log_read_cpu, data); |
452 | if (!data->log_size) { | 452 | if (!data->log_size) { |
453 | data->state = STATE_NO_DATA; | 453 | data->state = STATE_NO_DATA; |
454 | cpu_clear(cpu, data->cpu_event); | 454 | cpumask_clear_cpu(cpu, &data->cpu_event); |
455 | } else { | 455 | } else { |
456 | data->state = STATE_LOG_RECORD; | 456 | data->state = STATE_LOG_RECORD; |
457 | } | 457 | } |
@@ -491,11 +491,11 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) | |||
491 | unsigned long flags; | 491 | unsigned long flags; |
492 | spin_lock_irqsave(&data_saved_lock, flags); | 492 | spin_lock_irqsave(&data_saved_lock, flags); |
493 | data->state = STATE_NO_DATA; | 493 | data->state = STATE_NO_DATA; |
494 | if (!cpu_isset(cpu, data->cpu_event)) { | 494 | if (!cpumask_test_cpu(cpu, &data->cpu_event)) { |
495 | spin_unlock_irqrestore(&data_saved_lock, flags); | 495 | spin_unlock_irqrestore(&data_saved_lock, flags); |
496 | return 0; | 496 | return 0; |
497 | } | 497 | } |
498 | cpu_clear(cpu, data->cpu_event); | 498 | cpumask_clear_cpu(cpu, &data->cpu_event); |
499 | if (data->saved_num) { | 499 | if (data->saved_num) { |
500 | shift1_data_saved(data, data->saved_num - 1); | 500 | shift1_data_saved(data, data->saved_num - 1); |
501 | data->saved_num = 0; | 501 | data->saved_num = 0; |
@@ -509,7 +509,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) | |||
509 | salinfo_log_new_read(cpu, data); | 509 | salinfo_log_new_read(cpu, data); |
510 | if (data->state == STATE_LOG_RECORD) { | 510 | if (data->state == STATE_LOG_RECORD) { |
511 | spin_lock_irqsave(&data_saved_lock, flags); | 511 | spin_lock_irqsave(&data_saved_lock, flags); |
512 | cpu_set(cpu, data->cpu_event); | 512 | cpumask_set_cpu(cpu, &data->cpu_event); |
513 | salinfo_work_to_do(data); | 513 | salinfo_work_to_do(data); |
514 | spin_unlock_irqrestore(&data_saved_lock, flags); | 514 | spin_unlock_irqrestore(&data_saved_lock, flags); |
515 | } | 515 | } |
@@ -581,7 +581,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu | |||
581 | for (i = 0, data = salinfo_data; | 581 | for (i = 0, data = salinfo_data; |
582 | i < ARRAY_SIZE(salinfo_data); | 582 | i < ARRAY_SIZE(salinfo_data); |
583 | ++i, ++data) { | 583 | ++i, ++data) { |
584 | cpu_set(cpu, data->cpu_event); | 584 | cpumask_set_cpu(cpu, &data->cpu_event); |
585 | salinfo_work_to_do(data); | 585 | salinfo_work_to_do(data); |
586 | } | 586 | } |
587 | spin_unlock_irqrestore(&data_saved_lock, flags); | 587 | spin_unlock_irqrestore(&data_saved_lock, flags); |
@@ -601,7 +601,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu | |||
601 | shift1_data_saved(data, j); | 601 | shift1_data_saved(data, j); |
602 | } | 602 | } |
603 | } | 603 | } |
604 | cpu_clear(cpu, data->cpu_event); | 604 | cpumask_clear_cpu(cpu, &data->cpu_event); |
605 | } | 605 | } |
606 | spin_unlock_irqrestore(&data_saved_lock, flags); | 606 | spin_unlock_irqrestore(&data_saved_lock, flags); |
607 | break; | 607 | break; |
@@ -659,7 +659,7 @@ salinfo_init(void) | |||
659 | 659 | ||
660 | /* we missed any events before now */ | 660 | /* we missed any events before now */ |
661 | for_each_online_cpu(j) | 661 | for_each_online_cpu(j) |
662 | cpu_set(j, data->cpu_event); | 662 | cpumask_set_cpu(j, &data->cpu_event); |
663 | 663 | ||
664 | *sdir++ = dir; | 664 | *sdir++ = dir; |
665 | } | 665 | } |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index d86669bcdfb2..b9761389cb8d 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -562,8 +562,8 @@ setup_arch (char **cmdline_p) | |||
562 | # ifdef CONFIG_ACPI_HOTPLUG_CPU | 562 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
563 | prefill_possible_map(); | 563 | prefill_possible_map(); |
564 | # endif | 564 | # endif |
565 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 565 | per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ? |
566 | 32 : cpus_weight(early_cpu_possible_map)), | 566 | 32 : cpumask_weight(&early_cpu_possible_map)), |
567 | additional_cpus > 0 ? additional_cpus : 0); | 567 | additional_cpus > 0 ? additional_cpus : 0); |
568 | # endif | 568 | # endif |
569 | #endif /* CONFIG_APCI_BOOT */ | 569 | #endif /* CONFIG_APCI_BOOT */ |
@@ -702,7 +702,8 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
702 | c->itc_freq / 1000000, c->itc_freq % 1000000, | 702 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
703 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | 703 | lpj*HZ/500000, (lpj*HZ/5000) % 100); |
704 | #ifdef CONFIG_SMP | 704 | #ifdef CONFIG_SMP |
705 | seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); | 705 | seq_printf(m, "siblings : %u\n", |
706 | cpumask_weight(&cpu_core_map[cpunum])); | ||
706 | if (c->socket_id != -1) | 707 | if (c->socket_id != -1) |
707 | seq_printf(m, "physical id: %u\n", c->socket_id); | 708 | seq_printf(m, "physical id: %u\n", c->socket_id); |
708 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) | 709 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) |
@@ -933,8 +934,8 @@ cpu_init (void) | |||
933 | * (must be done after per_cpu area is setup) | 934 | * (must be done after per_cpu area is setup) |
934 | */ | 935 | */ |
935 | if (smp_processor_id() == 0) { | 936 | if (smp_processor_id() == 0) { |
936 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | 937 | cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); |
937 | cpu_set(0, cpu_core_map[0]); | 938 | cpumask_set_cpu(0, &cpu_core_map[0]); |
938 | } else { | 939 | } else { |
939 | /* | 940 | /* |
940 | * Set ar.k3 so that assembly code in MCA handler can compute | 941 | * Set ar.k3 so that assembly code in MCA handler can compute |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 9fcd4e63048f..7f706d4f84f7 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -262,11 +262,11 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
262 | preempt_disable(); | 262 | preempt_disable(); |
263 | mycpu = smp_processor_id(); | 263 | mycpu = smp_processor_id(); |
264 | 264 | ||
265 | for_each_cpu_mask(cpu, cpumask) | 265 | for_each_cpu(cpu, &cpumask) |
266 | counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; | 266 | counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; |
267 | 267 | ||
268 | mb(); | 268 | mb(); |
269 | for_each_cpu_mask(cpu, cpumask) { | 269 | for_each_cpu(cpu, &cpumask) { |
270 | if (cpu == mycpu) | 270 | if (cpu == mycpu) |
271 | flush_mycpu = 1; | 271 | flush_mycpu = 1; |
272 | else | 272 | else |
@@ -276,7 +276,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
276 | if (flush_mycpu) | 276 | if (flush_mycpu) |
277 | smp_local_flush_tlb(); | 277 | smp_local_flush_tlb(); |
278 | 278 | ||
279 | for_each_cpu_mask(cpu, cpumask) | 279 | for_each_cpu(cpu, &cpumask) |
280 | while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) | 280 | while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) |
281 | udelay(FLUSH_DELAY); | 281 | udelay(FLUSH_DELAY); |
282 | 282 | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 547a48d78bd7..15051e9c2c6f 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -434,7 +434,7 @@ smp_callin (void) | |||
434 | /* | 434 | /* |
435 | * Allow the master to continue. | 435 | * Allow the master to continue. |
436 | */ | 436 | */ |
437 | cpu_set(cpuid, cpu_callin_map); | 437 | cpumask_set_cpu(cpuid, &cpu_callin_map); |
438 | Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); | 438 | Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); |
439 | } | 439 | } |
440 | 440 | ||
@@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) | |||
475 | */ | 475 | */ |
476 | Dprintk("Waiting on callin_map ..."); | 476 | Dprintk("Waiting on callin_map ..."); |
477 | for (timeout = 0; timeout < 100000; timeout++) { | 477 | for (timeout = 0; timeout < 100000; timeout++) { |
478 | if (cpu_isset(cpu, cpu_callin_map)) | 478 | if (cpumask_test_cpu(cpu, &cpu_callin_map)) |
479 | break; /* It has booted */ | 479 | break; /* It has booted */ |
480 | udelay(100); | 480 | udelay(100); |
481 | } | 481 | } |
482 | Dprintk("\n"); | 482 | Dprintk("\n"); |
483 | 483 | ||
484 | if (!cpu_isset(cpu, cpu_callin_map)) { | 484 | if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { |
485 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); | 485 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); |
486 | ia64_cpu_to_sapicid[cpu] = -1; | 486 | ia64_cpu_to_sapicid[cpu] = -1; |
487 | set_cpu_online(cpu, false); /* was set in smp_callin() */ | 487 | set_cpu_online(cpu, false); /* was set in smp_callin() */ |
@@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
541 | 541 | ||
542 | smp_setup_percpu_timer(); | 542 | smp_setup_percpu_timer(); |
543 | 543 | ||
544 | cpu_set(0, cpu_callin_map); | 544 | cpumask_set_cpu(0, &cpu_callin_map); |
545 | 545 | ||
546 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; | 546 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; |
547 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 547 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
@@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
565 | void smp_prepare_boot_cpu(void) | 565 | void smp_prepare_boot_cpu(void) |
566 | { | 566 | { |
567 | set_cpu_online(smp_processor_id(), true); | 567 | set_cpu_online(smp_processor_id(), true); |
568 | cpu_set(smp_processor_id(), cpu_callin_map); | 568 | cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); |
569 | set_numa_node(cpu_to_node_map[smp_processor_id()]); | 569 | set_numa_node(cpu_to_node_map[smp_processor_id()]); |
570 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 570 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
571 | paravirt_post_smp_prepare_boot_cpu(); | 571 | paravirt_post_smp_prepare_boot_cpu(); |
@@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu) | |||
577 | { | 577 | { |
578 | int i; | 578 | int i; |
579 | 579 | ||
580 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | 580 | for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) |
581 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | 581 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); |
582 | for_each_cpu_mask(i, cpu_core_map[cpu]) | 582 | for_each_cpu(i, &cpu_core_map[cpu]) |
583 | cpu_clear(cpu, cpu_core_map[i]); | 583 | cpumask_clear_cpu(cpu, &cpu_core_map[i]); |
584 | 584 | ||
585 | per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; | 585 | per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; |
586 | } | 586 | } |
@@ -592,12 +592,12 @@ remove_siblinginfo(int cpu) | |||
592 | 592 | ||
593 | if (cpu_data(cpu)->threads_per_core == 1 && | 593 | if (cpu_data(cpu)->threads_per_core == 1 && |
594 | cpu_data(cpu)->cores_per_socket == 1) { | 594 | cpu_data(cpu)->cores_per_socket == 1) { |
595 | cpu_clear(cpu, cpu_core_map[cpu]); | 595 | cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); |
596 | cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); | 596 | cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); |
597 | return; | 597 | return; |
598 | } | 598 | } |
599 | 599 | ||
600 | last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); | 600 | last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0); |
601 | 601 | ||
602 | /* remove it from all sibling map's */ | 602 | /* remove it from all sibling map's */ |
603 | clear_cpu_sibling_map(cpu); | 603 | clear_cpu_sibling_map(cpu); |
@@ -673,7 +673,7 @@ int __cpu_disable(void) | |||
673 | remove_siblinginfo(cpu); | 673 | remove_siblinginfo(cpu); |
674 | fixup_irqs(); | 674 | fixup_irqs(); |
675 | local_flush_tlb_all(); | 675 | local_flush_tlb_all(); |
676 | cpu_clear(cpu, cpu_callin_map); | 676 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
679 | 679 | ||
@@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu) | |||
718 | 718 | ||
719 | for_each_online_cpu(i) { | 719 | for_each_online_cpu(i) { |
720 | if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { | 720 | if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { |
721 | cpu_set(i, cpu_core_map[cpu]); | 721 | cpumask_set_cpu(i, &cpu_core_map[cpu]); |
722 | cpu_set(cpu, cpu_core_map[i]); | 722 | cpumask_set_cpu(cpu, &cpu_core_map[i]); |
723 | if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { | 723 | if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { |
724 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | 724 | cpumask_set_cpu(i, |
725 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | 725 | &per_cpu(cpu_sibling_map, cpu)); |
726 | cpumask_set_cpu(cpu, | ||
727 | &per_cpu(cpu_sibling_map, i)); | ||
726 | } | 728 | } |
727 | } | 729 | } |
728 | } | 730 | } |
@@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
742 | * Already booted cpu? not valid anymore since we dont | 744 | * Already booted cpu? not valid anymore since we dont |
743 | * do idle loop tightspin anymore. | 745 | * do idle loop tightspin anymore. |
744 | */ | 746 | */ |
745 | if (cpu_isset(cpu, cpu_callin_map)) | 747 | if (cpumask_test_cpu(cpu, &cpu_callin_map)) |
746 | return -EINVAL; | 748 | return -EINVAL; |
747 | 749 | ||
748 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 750 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
@@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
753 | 755 | ||
754 | if (cpu_data(cpu)->threads_per_core == 1 && | 756 | if (cpu_data(cpu)->threads_per_core == 1 && |
755 | cpu_data(cpu)->cores_per_socket == 1) { | 757 | cpu_data(cpu)->cores_per_socket == 1) { |
756 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | 758 | cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); |
757 | cpu_set(cpu, cpu_core_map[cpu]); | 759 | cpumask_set_cpu(cpu, &cpu_core_map[cpu]); |
758 | return 0; | 760 | return 0; |
759 | } | 761 | } |
760 | 762 | ||
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 965ab42fabb0..c01fe8991244 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -148,7 +148,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, | |||
148 | 148 | ||
149 | if (cpu_data(cpu)->threads_per_core <= 1 && | 149 | if (cpu_data(cpu)->threads_per_core <= 1 && |
150 | cpu_data(cpu)->cores_per_socket <= 1) { | 150 | cpu_data(cpu)->cores_per_socket <= 1) { |
151 | cpu_set(cpu, this_leaf->shared_cpu_map); | 151 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); |
152 | return; | 152 | return; |
153 | } | 153 | } |
154 | 154 | ||
@@ -164,7 +164,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, | |||
164 | if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id | 164 | if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id |
165 | && cpu_data(j)->core_id == csi.log1_cid | 165 | && cpu_data(j)->core_id == csi.log1_cid |
166 | && cpu_data(j)->thread_id == csi.log1_tid) | 166 | && cpu_data(j)->thread_id == csi.log1_tid) |
167 | cpu_set(j, this_leaf->shared_cpu_map); | 167 | cpumask_set_cpu(j, &this_leaf->shared_cpu_map); |
168 | 168 | ||
169 | i++; | 169 | i++; |
170 | } while (i < num_shared && | 170 | } while (i < num_shared && |
@@ -177,7 +177,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu, | |||
177 | static void cache_shared_cpu_map_setup(unsigned int cpu, | 177 | static void cache_shared_cpu_map_setup(unsigned int cpu, |
178 | struct cache_info * this_leaf) | 178 | struct cache_info * this_leaf) |
179 | { | 179 | { |
180 | cpu_set(cpu, this_leaf->shared_cpu_map); | 180 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); |
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | #endif | 183 | #endif |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index bb21f4f63170..a468467542f4 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -376,7 +376,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
376 | if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) | 376 | if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) |
377 | BUG(); | 377 | BUG(); |
378 | 378 | ||
379 | for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) | 379 | for_each_online_cpu(cpu_id) |
380 | show_cpu_info(cpu_id); | 380 | show_cpu_info(cpu_id); |
381 | 381 | ||
382 | /* | 382 | /* |
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index b94bf44d8d8e..e3e808a6c542 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c | |||
@@ -58,9 +58,9 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, | |||
58 | 58 | ||
59 | #ifdef CONFIG_SMP | 59 | #ifdef CONFIG_SMP |
60 | if (m) | 60 | if (m) |
61 | enable &= cpu_isset(cpu, *m); | 61 | enable &= cpumask_test_cpu(cpu, m); |
62 | else if (irqd_affinity_was_set(d)) | 62 | else if (irqd_affinity_was_set(d)) |
63 | enable &= cpu_isset(cpu, *d->affinity); | 63 | enable &= cpumask_test_cpu(cpu, d->affinity); |
64 | #endif | 64 | #endif |
65 | return enable; | 65 | return enable; |
66 | } | 66 | } |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 8b1eeffa12ed..56f5d080ef9d 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -72,7 +72,7 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask, | |||
72 | { | 72 | { |
73 | unsigned int i; | 73 | unsigned int i; |
74 | 74 | ||
75 | for_each_cpu_mask(i, *mask) | 75 | for_each_cpu(i, mask) |
76 | octeon_send_ipi_single(i, action); | 76 | octeon_send_ipi_single(i, action); |
77 | } | 77 | } |
78 | 78 | ||
@@ -239,7 +239,7 @@ static int octeon_cpu_disable(void) | |||
239 | return -ENOTSUPP; | 239 | return -ENOTSUPP; |
240 | 240 | ||
241 | set_cpu_online(cpu, false); | 241 | set_cpu_online(cpu, false); |
242 | cpu_clear(cpu, cpu_callin_map); | 242 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
243 | octeon_fixup_irqs(); | 243 | octeon_fixup_irqs(); |
244 | 244 | ||
245 | flush_cache_all(); | 245 | flush_cache_all(); |
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index eacf865d21c2..bb02fac9b4fa 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -88,7 +88,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
88 | { | 88 | { |
89 | extern struct plat_smp_ops *mp_ops; /* private */ | 89 | extern struct plat_smp_ops *mp_ops; /* private */ |
90 | 90 | ||
91 | mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); | 91 | mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 94 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index d21264681e97..d434d5d5ae6e 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c | |||
@@ -25,9 +25,9 @@ static void crash_shutdown_secondary(void *ignore) | |||
25 | return; | 25 | return; |
26 | 26 | ||
27 | local_irq_disable(); | 27 | local_irq_disable(); |
28 | if (!cpu_isset(cpu, cpus_in_crash)) | 28 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
29 | crash_save_cpu(regs, cpu); | 29 | crash_save_cpu(regs, cpu); |
30 | cpu_set(cpu, cpus_in_crash); | 30 | cpumask_set_cpu(cpu, &cpus_in_crash); |
31 | 31 | ||
32 | while (!atomic_read(&kexec_ready_to_reboot)) | 32 | while (!atomic_read(&kexec_ready_to_reboot)) |
33 | cpu_relax(); | 33 | cpu_relax(); |
@@ -50,7 +50,7 @@ static void crash_kexec_prepare_cpus(void) | |||
50 | */ | 50 | */ |
51 | pr_emerg("Sending IPI to other cpus...\n"); | 51 | pr_emerg("Sending IPI to other cpus...\n"); |
52 | msecs = 10000; | 52 | msecs = 10000; |
53 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | 53 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { |
54 | cpu_relax(); | 54 | cpu_relax(); |
55 | mdelay(1); | 55 | mdelay(1); |
56 | } | 56 | } |
@@ -66,5 +66,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
66 | crashing_cpu = smp_processor_id(); | 66 | crashing_cpu = smp_processor_id(); |
67 | crash_save_cpu(regs, crashing_cpu); | 67 | crash_save_cpu(regs, crashing_cpu); |
68 | crash_kexec_prepare_cpus(); | 68 | crash_kexec_prepare_cpus(); |
69 | cpu_set(crashing_cpu, cpus_in_crash); | 69 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
70 | } | 70 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 362bb3707e62..3e4491aa6d6b 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
114 | /* Compute new global allowed CPU set if necessary */ | 114 | /* Compute new global allowed CPU set if necessary */ |
115 | ti = task_thread_info(p); | 115 | ti = task_thread_info(p); |
116 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && | 116 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && |
117 | cpus_intersects(*new_mask, mt_fpu_cpumask)) { | 117 | cpumask_intersects(new_mask, &mt_fpu_cpumask)) { |
118 | cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); | 118 | cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask); |
119 | retval = set_cpus_allowed_ptr(p, effective_mask); | 119 | retval = set_cpus_allowed_ptr(p, effective_mask); |
120 | } else { | 120 | } else { |
121 | cpumask_copy(effective_mask, new_mask); | 121 | cpumask_copy(effective_mask, new_mask); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d295bd1e4996..f2975d4d1e44 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -49,7 +49,7 @@ | |||
49 | void arch_cpu_idle_dead(void) | 49 | void arch_cpu_idle_dead(void) |
50 | { | 50 | { |
51 | /* What the heck is this check doing ? */ | 51 | /* What the heck is this check doing ? */ |
52 | if (!cpu_isset(smp_processor_id(), cpu_callin_map)) | 52 | if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map)) |
53 | play_dead(); | 53 | play_dead(); |
54 | } | 54 | } |
55 | #endif | 55 | #endif |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index b8bd9340c9c7..fd528d7ea278 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -362,7 +362,7 @@ static int bmips_cpu_disable(void) | |||
362 | pr_info("SMP: CPU%d is offline\n", cpu); | 362 | pr_info("SMP: CPU%d is offline\n", cpu); |
363 | 363 | ||
364 | set_cpu_online(cpu, false); | 364 | set_cpu_online(cpu, false); |
365 | cpu_clear(cpu, cpu_callin_map); | 365 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
366 | clear_c0_status(IE_IRQ5); | 366 | clear_c0_status(IE_IRQ5); |
367 | 367 | ||
368 | local_flush_tlb_all(); | 368 | local_flush_tlb_all(); |
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index e36a859af666..d5e0f949dc48 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -66,7 +66,7 @@ static void cmp_smp_finish(void) | |||
66 | #ifdef CONFIG_MIPS_MT_FPAFF | 66 | #ifdef CONFIG_MIPS_MT_FPAFF |
67 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 67 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
68 | if (cpu_has_fpu) | 68 | if (cpu_has_fpu) |
69 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | 69 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); |
70 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 70 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
71 | 71 | ||
72 | local_irq_enable(); | 72 | local_irq_enable(); |
@@ -110,7 +110,7 @@ void __init cmp_smp_setup(void) | |||
110 | #ifdef CONFIG_MIPS_MT_FPAFF | 110 | #ifdef CONFIG_MIPS_MT_FPAFF |
111 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 111 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
112 | if (cpu_has_fpu) | 112 | if (cpu_has_fpu) |
113 | cpu_set(0, mt_fpu_cpumask); | 113 | cpumask_set_cpu(0, &mt_fpu_cpumask); |
114 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 114 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
115 | 115 | ||
116 | for (i = 1; i < NR_CPUS; i++) { | 116 | for (i = 1; i < NR_CPUS; i++) { |
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index d5589bedd0a4..7e011f95bb8e 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -290,7 +290,7 @@ static void cps_smp_finish(void) | |||
290 | #ifdef CONFIG_MIPS_MT_FPAFF | 290 | #ifdef CONFIG_MIPS_MT_FPAFF |
291 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 291 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
292 | if (cpu_has_fpu) | 292 | if (cpu_has_fpu) |
293 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | 293 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); |
294 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 294 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
295 | 295 | ||
296 | local_irq_enable(); | 296 | local_irq_enable(); |
@@ -313,7 +313,7 @@ static int cps_cpu_disable(void) | |||
313 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | 313 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); |
314 | smp_mb__after_atomic(); | 314 | smp_mb__after_atomic(); |
315 | set_cpu_online(cpu, false); | 315 | set_cpu_online(cpu, false); |
316 | cpu_clear(cpu, cpu_callin_map); | 316 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
317 | 317 | ||
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 17ea705f6c40..86311a164ef1 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -178,7 +178,7 @@ static void vsmp_smp_finish(void) | |||
178 | #ifdef CONFIG_MIPS_MT_FPAFF | 178 | #ifdef CONFIG_MIPS_MT_FPAFF |
179 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 179 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
180 | if (cpu_has_fpu) | 180 | if (cpu_has_fpu) |
181 | cpu_set(smp_processor_id(), mt_fpu_cpumask); | 181 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); |
182 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 182 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
183 | 183 | ||
184 | local_irq_enable(); | 184 | local_irq_enable(); |
@@ -239,7 +239,7 @@ static void __init vsmp_smp_setup(void) | |||
239 | #ifdef CONFIG_MIPS_MT_FPAFF | 239 | #ifdef CONFIG_MIPS_MT_FPAFF |
240 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 240 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
241 | if (cpu_has_fpu) | 241 | if (cpu_has_fpu) |
242 | cpu_set(0, mt_fpu_cpumask); | 242 | cpumask_set_cpu(0, &mt_fpu_cpumask); |
243 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 243 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
244 | if (!cpu_has_mipsmt) | 244 | if (!cpu_has_mipsmt) |
245 | return; | 245 | return; |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 5b020bda3e05..193ace7955fb 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -75,30 +75,30 @@ static inline void set_cpu_sibling_map(int cpu) | |||
75 | { | 75 | { |
76 | int i; | 76 | int i; |
77 | 77 | ||
78 | cpu_set(cpu, cpu_sibling_setup_map); | 78 | cpumask_set_cpu(cpu, &cpu_sibling_setup_map); |
79 | 79 | ||
80 | if (smp_num_siblings > 1) { | 80 | if (smp_num_siblings > 1) { |
81 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 81 | for_each_cpu(i, &cpu_sibling_setup_map) { |
82 | if (cpu_data[cpu].package == cpu_data[i].package && | 82 | if (cpu_data[cpu].package == cpu_data[i].package && |
83 | cpu_data[cpu].core == cpu_data[i].core) { | 83 | cpu_data[cpu].core == cpu_data[i].core) { |
84 | cpu_set(i, cpu_sibling_map[cpu]); | 84 | cpumask_set_cpu(i, &cpu_sibling_map[cpu]); |
85 | cpu_set(cpu, cpu_sibling_map[i]); | 85 | cpumask_set_cpu(cpu, &cpu_sibling_map[i]); |
86 | } | 86 | } |
87 | } | 87 | } |
88 | } else | 88 | } else |
89 | cpu_set(cpu, cpu_sibling_map[cpu]); | 89 | cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void set_cpu_core_map(int cpu) | 92 | static inline void set_cpu_core_map(int cpu) |
93 | { | 93 | { |
94 | int i; | 94 | int i; |
95 | 95 | ||
96 | cpu_set(cpu, cpu_core_setup_map); | 96 | cpumask_set_cpu(cpu, &cpu_core_setup_map); |
97 | 97 | ||
98 | for_each_cpu_mask(i, cpu_core_setup_map) { | 98 | for_each_cpu(i, &cpu_core_setup_map) { |
99 | if (cpu_data[cpu].package == cpu_data[i].package) { | 99 | if (cpu_data[cpu].package == cpu_data[i].package) { |
100 | cpu_set(i, cpu_core_map[cpu]); | 100 | cpumask_set_cpu(i, &cpu_core_map[cpu]); |
101 | cpu_set(cpu, cpu_core_map[i]); | 101 | cpumask_set_cpu(cpu, &cpu_core_map[i]); |
102 | } | 102 | } |
103 | } | 103 | } |
104 | } | 104 | } |
@@ -138,7 +138,7 @@ asmlinkage void start_secondary(void) | |||
138 | cpu = smp_processor_id(); | 138 | cpu = smp_processor_id(); |
139 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 139 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
140 | 140 | ||
141 | cpu_set(cpu, cpu_coherent_mask); | 141 | cpumask_set_cpu(cpu, &cpu_coherent_mask); |
142 | notify_cpu_starting(cpu); | 142 | notify_cpu_starting(cpu); |
143 | 143 | ||
144 | set_cpu_online(cpu, true); | 144 | set_cpu_online(cpu, true); |
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void) | |||
146 | set_cpu_sibling_map(cpu); | 146 | set_cpu_sibling_map(cpu); |
147 | set_cpu_core_map(cpu); | 147 | set_cpu_core_map(cpu); |
148 | 148 | ||
149 | cpu_set(cpu, cpu_callin_map); | 149 | cpumask_set_cpu(cpu, &cpu_callin_map); |
150 | 150 | ||
151 | synchronise_count_slave(cpu); | 151 | synchronise_count_slave(cpu); |
152 | 152 | ||
@@ -208,7 +208,7 @@ void smp_prepare_boot_cpu(void) | |||
208 | { | 208 | { |
209 | set_cpu_possible(0, true); | 209 | set_cpu_possible(0, true); |
210 | set_cpu_online(0, true); | 210 | set_cpu_online(0, true); |
211 | cpu_set(0, cpu_callin_map); | 211 | cpumask_set_cpu(0, &cpu_callin_map); |
212 | } | 212 | } |
213 | 213 | ||
214 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) | 214 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
@@ -218,7 +218,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
218 | /* | 218 | /* |
219 | * Trust is futile. We should really have timeouts ... | 219 | * Trust is futile. We should really have timeouts ... |
220 | */ | 220 | */ |
221 | while (!cpu_isset(cpu, cpu_callin_map)) | 221 | while (!cpumask_test_cpu(cpu, &cpu_callin_map)) |
222 | udelay(100); | 222 | udelay(100); |
223 | 223 | ||
224 | synchronise_count_master(cpu); | 224 | synchronise_count_master(cpu); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e334c641a81b..ba32e48d4697 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1153,13 +1153,13 @@ static void mt_ase_fp_affinity(void) | |||
1153 | * restricted the allowed set to exclude any CPUs with FPUs, | 1153 | * restricted the allowed set to exclude any CPUs with FPUs, |
1154 | * we'll skip the procedure. | 1154 | * we'll skip the procedure. |
1155 | */ | 1155 | */ |
1156 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 1156 | if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { |
1157 | cpumask_t tmask; | 1157 | cpumask_t tmask; |
1158 | 1158 | ||
1159 | current->thread.user_cpus_allowed | 1159 | current->thread.user_cpus_allowed |
1160 | = current->cpus_allowed; | 1160 | = current->cpus_allowed; |
1161 | cpus_and(tmask, current->cpus_allowed, | 1161 | cpumask_and(&tmask, ¤t->cpus_allowed, |
1162 | mt_fpu_cpumask); | 1162 | &mt_fpu_cpumask); |
1163 | set_cpus_allowed_ptr(current, &tmask); | 1163 | set_cpus_allowed_ptr(current, &tmask); |
1164 | set_thread_flag(TIF_FPUBOUND); | 1164 | set_thread_flag(TIF_FPUBOUND); |
1165 | } | 1165 | } |
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 6cae0e75de27..12d14ed48778 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c | |||
@@ -233,7 +233,7 @@ static __init void prom_meminit(void) | |||
233 | if (node_online(node)) { | 233 | if (node_online(node)) { |
234 | szmem(node); | 234 | szmem(node); |
235 | node_mem_init(node); | 235 | node_mem_init(node); |
236 | cpus_clear(__node_data[(node)]->cpumask); | 236 | cpumask_clear(&__node_data[(node)]->cpumask); |
237 | } | 237 | } |
238 | } | 238 | } |
239 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { | 239 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { |
@@ -244,7 +244,7 @@ static __init void prom_meminit(void) | |||
244 | if (loongson_sysconf.reserved_cpus_mask & (1<<cpu)) | 244 | if (loongson_sysconf.reserved_cpus_mask & (1<<cpu)) |
245 | continue; | 245 | continue; |
246 | 246 | ||
247 | cpu_set(active_cpu, __node_data[(node)]->cpumask); | 247 | cpumask_set_cpu(active_cpu, &__node_data[(node)]->cpumask); |
248 | pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); | 248 | pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node); |
249 | 249 | ||
250 | active_cpu++; | 250 | active_cpu++; |
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index e2eb688b5434..e3c68b5da18d 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c | |||
@@ -408,7 +408,7 @@ static int loongson3_cpu_disable(void) | |||
408 | return -EBUSY; | 408 | return -EBUSY; |
409 | 409 | ||
410 | set_cpu_online(cpu, false); | 410 | set_cpu_online(cpu, false); |
411 | cpu_clear(cpu, cpu_callin_map); | 411 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
412 | local_irq_save(flags); | 412 | local_irq_save(flags); |
413 | fixup_irqs(); | 413 | fixup_irqs(); |
414 | local_irq_restore(flags); | 414 | local_irq_restore(flags); |
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 0164b0c48352..42181c7105df 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c | |||
@@ -75,7 +75,7 @@ static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int acti | |||
75 | { | 75 | { |
76 | unsigned int cpu; | 76 | unsigned int cpu; |
77 | 77 | ||
78 | for_each_cpu_mask(cpu, *mask) | 78 | for_each_cpu(cpu, mask) |
79 | paravirt_send_ipi_single(cpu, action); | 79 | paravirt_send_ipi_single(cpu, action); |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index ee736bd103f8..570098bfdf87 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c | |||
@@ -60,7 +60,7 @@ static void per_hub_init(cnodeid_t cnode) | |||
60 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); | 60 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); |
61 | int i; | 61 | int i; |
62 | 62 | ||
63 | cpu_set(smp_processor_id(), hub->h_cpus); | 63 | cpumask_set_cpu(smp_processor_id(), &hub->h_cpus); |
64 | 64 | ||
65 | if (test_and_set_bit(cnode, hub_init_mask)) | 65 | if (test_and_set_bit(cnode, hub_init_mask)) |
66 | return; | 66 | return; |
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index ecbb62f339c5..bda90cf87e8c 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c | |||
@@ -29,8 +29,8 @@ static cpumask_t ktext_repmask; | |||
29 | void __init setup_replication_mask(void) | 29 | void __init setup_replication_mask(void) |
30 | { | 30 | { |
31 | /* Set only the master cnode's bit. The master cnode is always 0. */ | 31 | /* Set only the master cnode's bit. The master cnode is always 0. */ |
32 | cpus_clear(ktext_repmask); | 32 | cpumask_clear(&ktext_repmask); |
33 | cpu_set(0, ktext_repmask); | 33 | cpumask_set_cpu(0, &ktext_repmask); |
34 | 34 | ||
35 | #ifdef CONFIG_REPLICATE_KTEXT | 35 | #ifdef CONFIG_REPLICATE_KTEXT |
36 | #ifndef CONFIG_MAPPED_KERNEL | 36 | #ifndef CONFIG_MAPPED_KERNEL |
@@ -43,7 +43,7 @@ void __init setup_replication_mask(void) | |||
43 | if (cnode == 0) | 43 | if (cnode == 0) |
44 | continue; | 44 | continue; |
45 | /* Advertise that we have a copy of the kernel */ | 45 | /* Advertise that we have a copy of the kernel */ |
46 | cpu_set(cnode, ktext_repmask); | 46 | cpumask_set_cpu(cnode, &ktext_repmask); |
47 | } | 47 | } |
48 | } | 48 | } |
49 | #endif | 49 | #endif |
@@ -99,7 +99,7 @@ void __init replicate_kernel_text() | |||
99 | client_nasid = COMPACT_TO_NASID_NODEID(cnode); | 99 | client_nasid = COMPACT_TO_NASID_NODEID(cnode); |
100 | 100 | ||
101 | /* Check if this node should get a copy of the kernel */ | 101 | /* Check if this node should get a copy of the kernel */ |
102 | if (cpu_isset(cnode, ktext_repmask)) { | 102 | if (cpumask_test_cpu(cnode, &ktext_repmask)) { |
103 | server_nasid = client_nasid; | 103 | server_nasid = client_nasid; |
104 | copy_kernel(server_nasid); | 104 | copy_kernel(server_nasid); |
105 | } | 105 | } |
@@ -124,7 +124,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode) | |||
124 | loadbase += 16777216; | 124 | loadbase += 16777216; |
125 | #endif | 125 | #endif |
126 | offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; | 126 | offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; |
127 | if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) | 127 | if ((cnode == 0) || (cpumask_test_cpu(cnode, &ktext_repmask))) |
128 | return TO_NODE(nasid, offset) >> PAGE_SHIFT; | 128 | return TO_NODE(nasid, offset) >> PAGE_SHIFT; |
129 | else | 129 | else |
130 | return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT; | 130 | return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT; |
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 0b68469e063f..8d0eb2643248 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c | |||
@@ -404,7 +404,7 @@ static void __init node_mem_init(cnodeid_t node) | |||
404 | NODE_DATA(node)->node_start_pfn = start_pfn; | 404 | NODE_DATA(node)->node_start_pfn = start_pfn; |
405 | NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; | 405 | NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; |
406 | 406 | ||
407 | cpus_clear(hub_data(node)->h_cpus); | 407 | cpumask_clear(&hub_data(node)->h_cpus); |
408 | 408 | ||
409 | slot_freepfn += PFN_UP(sizeof(struct pglist_data) + | 409 | slot_freepfn += PFN_UP(sizeof(struct pglist_data) + |
410 | sizeof(struct hub_data)); | 410 | sizeof(struct hub_data)); |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index cfe056fe7f5c..f3191db6e2e9 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -525,8 +525,8 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
525 | desc = irq_to_desc(irq); | 525 | desc = irq_to_desc(irq); |
526 | cpumask_copy(&dest, desc->irq_data.affinity); | 526 | cpumask_copy(&dest, desc->irq_data.affinity); |
527 | if (irqd_is_per_cpu(&desc->irq_data) && | 527 | if (irqd_is_per_cpu(&desc->irq_data) && |
528 | !cpu_isset(smp_processor_id(), dest)) { | 528 | !cpumask_test_cpu(smp_processor_id(), &dest)) { |
529 | int cpu = first_cpu(dest); | 529 | int cpu = cpumask_first(&dest); |
530 | 530 | ||
531 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | 531 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", |
532 | irq, smp_processor_id(), cpu); | 532 | irq, smp_processor_id(), cpu); |
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index 4c8ad592ae33..5be6c4753667 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h | |||
@@ -25,7 +25,7 @@ extern cpumask_t threads_core_mask; | |||
25 | #define threads_per_core 1 | 25 | #define threads_per_core 1 |
26 | #define threads_per_subcore 1 | 26 | #define threads_per_subcore 1 |
27 | #define threads_shift 0 | 27 | #define threads_shift 0 |
28 | #define threads_core_mask (CPU_MASK_CPU0) | 28 | #define threads_core_mask (*get_cpu_mask(0)) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* cpu_thread_mask_to_cores - Return a cpumask of one per cores | 31 | /* cpu_thread_mask_to_cores - Return a cpumask of one per cores |
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index b9d9489a5012..9f417feaf6e8 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h | |||
@@ -99,7 +99,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
99 | { | 99 | { |
100 | int i; | 100 | int i; |
101 | 101 | ||
102 | for (i = 0; i < num_online_cpus(); i++) | 102 | for_each_online_cpu(i) |
103 | cpu_context(i, mm) = NO_CONTEXT; | 103 | cpu_context(i, mm) = NO_CONTEXT; |
104 | 104 | ||
105 | return 0; | 105 | return 0; |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index fc5acfc93c92..de6be008fc01 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -363,7 +363,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
363 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); | 363 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); |
364 | } else { | 364 | } else { |
365 | int i; | 365 | int i; |
366 | for (i = 0; i < num_online_cpus(); i++) | 366 | for_each_online_cpu(i) |
367 | if (smp_processor_id() != i) | 367 | if (smp_processor_id() != i) |
368 | cpu_context(i, mm) = 0; | 368 | cpu_context(i, mm) = 0; |
369 | } | 369 | } |
@@ -400,7 +400,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
400 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); | 400 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); |
401 | } else { | 401 | } else { |
402 | int i; | 402 | int i; |
403 | for (i = 0; i < num_online_cpus(); i++) | 403 | for_each_online_cpu(i) |
404 | if (smp_processor_id() != i) | 404 | if (smp_processor_id() != i) |
405 | cpu_context(i, mm) = 0; | 405 | cpu_context(i, mm) = 0; |
406 | } | 406 | } |
@@ -443,7 +443,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
443 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); | 443 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); |
444 | } else { | 444 | } else { |
445 | int i; | 445 | int i; |
446 | for (i = 0; i < num_online_cpus(); i++) | 446 | for_each_online_cpu(i) |
447 | if (smp_processor_id() != i) | 447 | if (smp_processor_id() != i) |
448 | cpu_context(i, vma->vm_mm) = 0; | 448 | cpu_context(i, vma->vm_mm) = 0; |
449 | } | 449 | } |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 18147a5523d9..8caf45ee81d9 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -194,7 +194,7 @@ static __init int setup_timer_cs(void) | |||
194 | static void percpu_ce_setup(enum clock_event_mode mode, | 194 | static void percpu_ce_setup(enum clock_event_mode mode, |
195 | struct clock_event_device *evt) | 195 | struct clock_event_device *evt) |
196 | { | 196 | { |
197 | int cpu = __first_cpu(evt->cpumask); | 197 | int cpu = cpumask_first(evt->cpumask); |
198 | 198 | ||
199 | switch (mode) { | 199 | switch (mode) { |
200 | case CLOCK_EVT_MODE_PERIODIC: | 200 | case CLOCK_EVT_MODE_PERIODIC: |
@@ -214,7 +214,7 @@ static void percpu_ce_setup(enum clock_event_mode mode, | |||
214 | static int percpu_ce_set_next_event(unsigned long delta, | 214 | static int percpu_ce_set_next_event(unsigned long delta, |
215 | struct clock_event_device *evt) | 215 | struct clock_event_device *evt) |
216 | { | 216 | { |
217 | int cpu = __first_cpu(evt->cpumask); | 217 | int cpu = cpumask_first(evt->cpumask); |
218 | unsigned int next = (unsigned int)delta; | 218 | unsigned int next = (unsigned int)delta; |
219 | 219 | ||
220 | sparc_config.load_profile_irq(cpu, next); | 220 | sparc_config.load_profile_irq(cpu, next); |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 7833b2ccdfbc..6873f006f7d0 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void) | |||
774 | * though, there'll be no lowmem, so we just alloc_bootmem | 774 | * though, there'll be no lowmem, so we just alloc_bootmem |
775 | * the memmap. There will be no percpu memory either. | 775 | * the memmap. There will be no percpu memory either. |
776 | */ | 776 | */ |
777 | if (i != 0 && cpu_isset(i, isolnodes)) { | 777 | if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { |
778 | node_memmap_pfn[i] = | 778 | node_memmap_pfn[i] = |
779 | alloc_bootmem_pfn(0, memmap_size, 0); | 779 | alloc_bootmem_pfn(0, memmap_size, 0); |
780 | BUG_ON(node_percpu[i] != 0); | 780 | BUG_ON(node_percpu[i] != 0); |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index d9d0bd2faaf4..ab3219b3fbda 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -171,8 +171,8 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
171 | for_each_online_cpu(cpu) { | 171 | for_each_online_cpu(cpu) { |
172 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) | 172 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) |
173 | continue; | 173 | continue; |
174 | __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu)); | 174 | cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); |
175 | __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu)); | 175 | cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); |
176 | } | 176 | } |
177 | free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); | 177 | free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); |
178 | free_cpumask_var(per_cpu(ipi_mask, this_cpu)); | 178 | free_cpumask_var(per_cpu(ipi_mask, this_cpu)); |
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index f3656a6b0382..35a88097af3c 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c | |||
@@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode, | |||
117 | unsigned long period; | 117 | unsigned long period; |
118 | struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); | 118 | struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); |
119 | 119 | ||
120 | pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), | 120 | pr_debug("%s CPU %d mode=%d\n", __func__, |
121 | cpumask_first(evt->cpumask), | ||
121 | mode); | 122 | mode); |
122 | 123 | ||
123 | switch (mode) { | 124 | switch (mode) { |
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 73fe2f8d7f96..7936dce4b878 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c | |||
@@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, | |||
292 | */ | 292 | */ |
293 | smp_rmb(); | 293 | smp_rmb(); |
294 | 294 | ||
295 | for_each_cpu_mask(i, coupled->coupled_cpus) | 295 | for_each_cpu(i, &coupled->coupled_cpus) |
296 | if (cpu_online(i) && coupled->requested_state[i] < state) | 296 | if (cpu_online(i) && coupled->requested_state[i] < state) |
297 | state = coupled->requested_state[i]; | 297 | state = coupled->requested_state[i]; |
298 | 298 | ||
@@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, | |||
338 | { | 338 | { |
339 | int cpu; | 339 | int cpu; |
340 | 340 | ||
341 | for_each_cpu_mask(cpu, coupled->coupled_cpus) | 341 | for_each_cpu(cpu, &coupled->coupled_cpus) |
342 | if (cpu != this_cpu && cpu_online(cpu)) | 342 | if (cpu != this_cpu && cpu_online(cpu)) |
343 | cpuidle_coupled_poke(cpu); | 343 | cpuidle_coupled_poke(cpu); |
344 | } | 344 | } |
@@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev) | |||
638 | if (cpumask_empty(&dev->coupled_cpus)) | 638 | if (cpumask_empty(&dev->coupled_cpus)) |
639 | return 0; | 639 | return 0; |
640 | 640 | ||
641 | for_each_cpu_mask(cpu, dev->coupled_cpus) { | 641 | for_each_cpu(cpu, &dev->coupled_cpus) { |
642 | other_dev = per_cpu(cpuidle_devices, cpu); | 642 | other_dev = per_cpu(cpuidle_devices, cpu); |
643 | if (other_dev && other_dev->coupled) { | 643 | if (other_dev && other_dev->coupled) { |
644 | coupled = other_dev->coupled; | 644 | coupled = other_dev->coupled; |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index afd136b45f49..10a9aeff1666 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | |||
1754 | dev->dev.of_node->full_name); | 1754 | dev->dev.of_node->full_name); |
1755 | return -EINVAL; | 1755 | return -EINVAL; |
1756 | } | 1756 | } |
1757 | cpu_set(*id, p->sharing); | 1757 | cpumask_set_cpu(*id, &p->sharing); |
1758 | table[*id] = p; | 1758 | table[*id] = p; |
1759 | } | 1759 | } |
1760 | return 0; | 1760 | return 0; |
@@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | |||
1776 | return -ENOMEM; | 1776 | return -ENOMEM; |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | cpus_clear(p->sharing); | 1779 | cpumask_clear(&p->sharing); |
1780 | spin_lock_init(&p->lock); | 1780 | spin_lock_init(&p->lock); |
1781 | p->q_type = q_type; | 1781 | p->q_type = q_type; |
1782 | INIT_LIST_HEAD(&p->jobs); | 1782 | INIT_LIST_HEAD(&p->jobs); |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 4f2fb62e6f37..49875adb6b44 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -567,7 +567,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
567 | */ | 567 | */ |
568 | smp_wmb(); | 568 | smp_wmb(); |
569 | 569 | ||
570 | for_each_cpu_mask(cpu, *mask) { | 570 | for_each_cpu(cpu, mask) { |
571 | u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; | 571 | u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; |
572 | u16 tlist; | 572 | u16 tlist; |
573 | 573 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index bc48b7dc89ec..57f09cb54464 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -389,19 +389,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | |||
389 | int i; | 389 | int i; |
390 | 390 | ||
391 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 391 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
392 | if (cpus_empty(tmp)) | 392 | if (cpumask_empty(&tmp)) |
393 | return -EINVAL; | 393 | return -EINVAL; |
394 | 394 | ||
395 | /* Assumption : cpumask refers to a single CPU */ | 395 | /* Assumption : cpumask refers to a single CPU */ |
396 | spin_lock_irqsave(&gic_lock, flags); | 396 | spin_lock_irqsave(&gic_lock, flags); |
397 | 397 | ||
398 | /* Re-route this IRQ */ | 398 | /* Re-route this IRQ */ |
399 | gic_map_to_vpe(irq, first_cpu(tmp)); | 399 | gic_map_to_vpe(irq, cpumask_first(&tmp)); |
400 | 400 | ||
401 | /* Update the pcpu_masks */ | 401 | /* Update the pcpu_masks */ |
402 | for (i = 0; i < NR_CPUS; i++) | 402 | for (i = 0; i < NR_CPUS; i++) |
403 | clear_bit(irq, pcpu_masks[i].pcpu_mask); | 403 | clear_bit(irq, pcpu_masks[i].pcpu_mask); |
404 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); | 404 | set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); |
405 | 405 | ||
406 | cpumask_copy(d->affinity, cpumask); | 406 | cpumask_copy(d->affinity, cpumask); |
407 | spin_unlock_irqrestore(&gic_lock, flags); | 407 | spin_unlock_irqrestore(&gic_lock, flags); |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index a789a2054388..a3f7610002aa 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -1123,7 +1123,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, | |||
1123 | addr + i * sizeof(struct tile_net_comps); | 1123 | addr + i * sizeof(struct tile_net_comps); |
1124 | 1124 | ||
1125 | /* If this is a network cpu, create an iqueue. */ | 1125 | /* If this is a network cpu, create an iqueue. */ |
1126 | if (cpu_isset(cpu, network_cpus_map)) { | 1126 | if (cpumask_test_cpu(cpu, &network_cpus_map)) { |
1127 | order = get_order(NOTIF_RING_SIZE); | 1127 | order = get_order(NOTIF_RING_SIZE); |
1128 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | 1128 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); |
1129 | if (page == NULL) { | 1129 | if (page == NULL) { |
@@ -1299,7 +1299,7 @@ static int tile_net_init_mpipe(struct net_device *dev) | |||
1299 | int first_ring, ring; | 1299 | int first_ring, ring; |
1300 | int instance = mpipe_instance(dev); | 1300 | int instance = mpipe_instance(dev); |
1301 | struct mpipe_data *md = &mpipe_data[instance]; | 1301 | struct mpipe_data *md = &mpipe_data[instance]; |
1302 | int network_cpus_count = cpus_weight(network_cpus_map); | 1302 | int network_cpus_count = cpumask_weight(&network_cpus_map); |
1303 | 1303 | ||
1304 | if (!hash_default) { | 1304 | if (!hash_default) { |
1305 | netdev_err(dev, "Networking requires hash_default!\n"); | 1305 | netdev_err(dev, "Networking requires hash_default!\n"); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a1cfbd3dda47..8eab107b53fb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -6632,14 +6632,12 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h) | |||
6632 | 6632 | ||
6633 | static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) | 6633 | static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) |
6634 | { | 6634 | { |
6635 | int i, cpu; | 6635 | int cpu; |
6636 | 6636 | ||
6637 | cpu = cpumask_first(cpu_online_mask); | 6637 | for_each_online_cpu(cpu) { |
6638 | for (i = 0; i < num_online_cpus(); i++) { | ||
6639 | u32 *lockup_detected; | 6638 | u32 *lockup_detected; |
6640 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); | 6639 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); |
6641 | *lockup_detected = value; | 6640 | *lockup_detected = value; |
6642 | cpu = cpumask_next(cpu, cpu_online_mask); | ||
6643 | } | 6641 | } |
6644 | wmb(); /* be sure the per-cpu variables are out to memory */ | 6642 | wmb(); /* be sure the per-cpu variables are out to memory */ |
6645 | } | 6643 | } |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 086549a665e2..27e285b92b5f 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/bitmap.h> | 11 | #include <linux/bitmap.h> |
12 | #include <linux/bug.h> | 12 | #include <linux/bug.h> |
13 | 13 | ||
14 | /* Don't assign or return these: may not be this big! */ | ||
14 | typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; | 15 | typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; |
15 | 16 | ||
16 | /** | 17 | /** |
@@ -289,11 +290,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) | |||
289 | * @cpumask: the cpumask pointer | 290 | * @cpumask: the cpumask pointer |
290 | * | 291 | * |
291 | * Returns 1 if @cpu is set in @cpumask, else returns 0 | 292 | * Returns 1 if @cpu is set in @cpumask, else returns 0 |
292 | * | ||
293 | * No static inline type checking - see Subtlety (1) above. | ||
294 | */ | 293 | */ |
295 | #define cpumask_test_cpu(cpu, cpumask) \ | 294 | static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) |
296 | test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) | 295 | { |
296 | return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); | ||
297 | } | ||
297 | 298 | ||
298 | /** | 299 | /** |
299 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask | 300 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask |
@@ -609,9 +610,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | |||
609 | */ | 610 | */ |
610 | static inline size_t cpumask_size(void) | 611 | static inline size_t cpumask_size(void) |
611 | { | 612 | { |
612 | /* FIXME: Once all cpumask assignments are eliminated, this | 613 | return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); |
613 | * can be nr_cpumask_bits */ | ||
614 | return BITS_TO_LONGS(NR_CPUS) * sizeof(long); | ||
615 | } | 614 | } |
616 | 615 | ||
617 | /* | 616 | /* |
@@ -768,7 +767,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) | |||
768 | #if NR_CPUS <= BITS_PER_LONG | 767 | #if NR_CPUS <= BITS_PER_LONG |
769 | #define CPU_BITS_ALL \ | 768 | #define CPU_BITS_ALL \ |
770 | { \ | 769 | { \ |
771 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 770 | [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ |
772 | } | 771 | } |
773 | 772 | ||
774 | #else /* NR_CPUS > BITS_PER_LONG */ | 773 | #else /* NR_CPUS > BITS_PER_LONG */ |
@@ -776,7 +775,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) | |||
776 | #define CPU_BITS_ALL \ | 775 | #define CPU_BITS_ALL \ |
777 | { \ | 776 | { \ |
778 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | 777 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
779 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 778 | [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ |
780 | } | 779 | } |
781 | #endif /* NR_CPUS > BITS_PER_LONG */ | 780 | #endif /* NR_CPUS > BITS_PER_LONG */ |
782 | 781 | ||
@@ -797,32 +796,18 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) | |||
797 | nr_cpu_ids); | 796 | nr_cpu_ids); |
798 | } | 797 | } |
799 | 798 | ||
800 | /* | ||
801 | * | ||
802 | * From here down, all obsolete. Use cpumask_ variants! | ||
803 | * | ||
804 | */ | ||
805 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
806 | #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) | ||
807 | |||
808 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) | ||
809 | |||
810 | #if NR_CPUS <= BITS_PER_LONG | 799 | #if NR_CPUS <= BITS_PER_LONG |
811 | |||
812 | #define CPU_MASK_ALL \ | 800 | #define CPU_MASK_ALL \ |
813 | (cpumask_t) { { \ | 801 | (cpumask_t) { { \ |
814 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 802 | [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ |
815 | } } | 803 | } } |
816 | |||
817 | #else | 804 | #else |
818 | |||
819 | #define CPU_MASK_ALL \ | 805 | #define CPU_MASK_ALL \ |
820 | (cpumask_t) { { \ | 806 | (cpumask_t) { { \ |
821 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | 807 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
822 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 808 | [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ |
823 | } } | 809 | } } |
824 | 810 | #endif /* NR_CPUS > BITS_PER_LONG */ | |
825 | #endif | ||
826 | 811 | ||
827 | #define CPU_MASK_NONE \ | 812 | #define CPU_MASK_NONE \ |
828 | (cpumask_t) { { \ | 813 | (cpumask_t) { { \ |
@@ -834,143 +819,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) | |||
834 | [0] = 1UL \ | 819 | [0] = 1UL \ |
835 | } } | 820 | } } |
836 | 821 | ||
837 | #if NR_CPUS == 1 | ||
838 | #define first_cpu(src) ({ (void)(src); 0; }) | ||
839 | #define next_cpu(n, src) ({ (void)(src); 1; }) | ||
840 | #define any_online_cpu(mask) 0 | ||
841 | #define for_each_cpu_mask(cpu, mask) \ | ||
842 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | ||
843 | #else /* NR_CPUS > 1 */ | ||
844 | int __first_cpu(const cpumask_t *srcp); | ||
845 | int __next_cpu(int n, const cpumask_t *srcp); | ||
846 | |||
847 | #define first_cpu(src) __first_cpu(&(src)) | ||
848 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
849 | #define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) | ||
850 | #define for_each_cpu_mask(cpu, mask) \ | ||
851 | for ((cpu) = -1; \ | ||
852 | (cpu) = next_cpu((cpu), (mask)), \ | ||
853 | (cpu) < NR_CPUS; ) | ||
854 | #endif /* SMP */ | ||
855 | |||
856 | #if NR_CPUS <= 64 | ||
857 | |||
858 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | ||
859 | |||
860 | #else /* NR_CPUS > 64 */ | ||
861 | |||
862 | int __next_cpu_nr(int n, const cpumask_t *srcp); | ||
863 | #define for_each_cpu_mask_nr(cpu, mask) \ | ||
864 | for ((cpu) = -1; \ | ||
865 | (cpu) = __next_cpu_nr((cpu), &(mask)), \ | ||
866 | (cpu) < nr_cpu_ids; ) | ||
867 | |||
868 | #endif /* NR_CPUS > 64 */ | ||
869 | |||
870 | #define cpus_addr(src) ((src).bits) | ||
871 | |||
872 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) | ||
873 | static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) | ||
874 | { | ||
875 | set_bit(cpu, dstp->bits); | ||
876 | } | ||
877 | |||
878 | #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) | ||
879 | static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) | ||
880 | { | ||
881 | clear_bit(cpu, dstp->bits); | ||
882 | } | ||
883 | |||
884 | #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) | ||
885 | static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits) | ||
886 | { | ||
887 | bitmap_fill(dstp->bits, nbits); | ||
888 | } | ||
889 | |||
890 | #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) | ||
891 | static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits) | ||
892 | { | ||
893 | bitmap_zero(dstp->bits, nbits); | ||
894 | } | ||
895 | |||
896 | /* No static inline type checking - see Subtlety (1) above. */ | ||
897 | #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) | ||
898 | |||
899 | #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) | ||
900 | static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) | ||
901 | { | ||
902 | return test_and_set_bit(cpu, addr->bits); | ||
903 | } | ||
904 | |||
905 | #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) | ||
906 | static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, | ||
907 | const cpumask_t *src2p, unsigned int nbits) | ||
908 | { | ||
909 | return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
910 | } | ||
911 | |||
912 | #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) | ||
913 | static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, | ||
914 | const cpumask_t *src2p, unsigned int nbits) | ||
915 | { | ||
916 | bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
917 | } | ||
918 | |||
919 | #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) | ||
920 | static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, | ||
921 | const cpumask_t *src2p, unsigned int nbits) | ||
922 | { | ||
923 | bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
924 | } | ||
925 | |||
926 | #define cpus_andnot(dst, src1, src2) \ | ||
927 | __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) | ||
928 | static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, | ||
929 | const cpumask_t *src2p, unsigned int nbits) | ||
930 | { | ||
931 | return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
932 | } | ||
933 | |||
934 | #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) | ||
935 | static inline int __cpus_equal(const cpumask_t *src1p, | ||
936 | const cpumask_t *src2p, unsigned int nbits) | ||
937 | { | ||
938 | return bitmap_equal(src1p->bits, src2p->bits, nbits); | ||
939 | } | ||
940 | |||
941 | #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) | ||
942 | static inline int __cpus_intersects(const cpumask_t *src1p, | ||
943 | const cpumask_t *src2p, unsigned int nbits) | ||
944 | { | ||
945 | return bitmap_intersects(src1p->bits, src2p->bits, nbits); | ||
946 | } | ||
947 | |||
948 | #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) | ||
949 | static inline int __cpus_subset(const cpumask_t *src1p, | ||
950 | const cpumask_t *src2p, unsigned int nbits) | ||
951 | { | ||
952 | return bitmap_subset(src1p->bits, src2p->bits, nbits); | ||
953 | } | ||
954 | |||
955 | #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) | ||
956 | static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits) | ||
957 | { | ||
958 | return bitmap_empty(srcp->bits, nbits); | ||
959 | } | ||
960 | |||
961 | #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) | ||
962 | static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits) | ||
963 | { | ||
964 | return bitmap_weight(srcp->bits, nbits); | ||
965 | } | ||
966 | |||
967 | #define cpus_shift_left(dst, src, n) \ | ||
968 | __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) | ||
969 | static inline void __cpus_shift_left(cpumask_t *dstp, | ||
970 | const cpumask_t *srcp, int n, int nbits) | ||
971 | { | ||
972 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | ||
973 | } | ||
974 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
975 | |||
976 | #endif /* __LINUX_CPUMASK_H */ | 822 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/lib/Kconfig b/lib/Kconfig index f5440221d929..601965a948e8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -396,10 +396,6 @@ config CPUMASK_OFFSTACK | |||
396 | them on the stack. This is a bit more expensive, but avoids | 396 | them on the stack. This is a bit more expensive, but avoids |
397 | stack overflow. | 397 | stack overflow. |
398 | 398 | ||
399 | config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
400 | bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS | ||
401 | depends on BROKEN | ||
402 | |||
403 | config CPU_RMAP | 399 | config CPU_RMAP |
404 | bool | 400 | bool |
405 | depends on SMP | 401 | depends on SMP |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 5ab1553fd076..830dd5dec40f 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -5,27 +5,6 @@ | |||
5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
6 | #include <linux/bootmem.h> | 6 | #include <linux/bootmem.h> |
7 | 7 | ||
8 | int __first_cpu(const cpumask_t *srcp) | ||
9 | { | ||
10 | return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); | ||
11 | } | ||
12 | EXPORT_SYMBOL(__first_cpu); | ||
13 | |||
14 | int __next_cpu(int n, const cpumask_t *srcp) | ||
15 | { | ||
16 | return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); | ||
17 | } | ||
18 | EXPORT_SYMBOL(__next_cpu); | ||
19 | |||
20 | #if NR_CPUS > 64 | ||
21 | int __next_cpu_nr(int n, const cpumask_t *srcp) | ||
22 | { | ||
23 | return min_t(int, nr_cpu_ids, | ||
24 | find_next_bit(srcp->bits, nr_cpu_ids, n+1)); | ||
25 | } | ||
26 | EXPORT_SYMBOL(__next_cpu_nr); | ||
27 | #endif | ||
28 | |||
29 | /** | 8 | /** |
30 | * cpumask_next_and - get the next cpu in *src1p & *src2p | 9 | * cpumask_next_and - get the next cpu in *src1p & *src2p |
31 | * @n: the cpu prior to the place to search (ie. return will be > @n) | 10 | * @n: the cpu prior to the place to search (ie. return will be > @n) |
@@ -90,13 +69,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) | |||
90 | dump_stack(); | 69 | dump_stack(); |
91 | } | 70 | } |
92 | #endif | 71 | #endif |
93 | /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ | ||
94 | if (*mask) { | ||
95 | unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); | ||
96 | unsigned int tail; | ||
97 | tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); | ||
98 | memset(ptr + cpumask_size() - tail, 0, tail); | ||
99 | } | ||
100 | 72 | ||
101 | return *mask != NULL; | 73 | return *mask != NULL; |
102 | } | 74 | } |