aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-03-04 19:19:16 -0500
committerRusty Russell <rusty@rustcorp.com.au>2015-03-04 23:55:04 -0500
commit5d2068da8d339e4dff8f9b9a1246e6a79e2949d8 (patch)
tree6365a79aa004b05476de252247bc8a27916e768f
parentf9b531fe14a539ec2ad802b73c9638f324e4a4ff (diff)
ia64: fix up obsolete cpu function usage.
Thanks to spatch, then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: linux-ia64@vger.kernel.org
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c28
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/numa.c10
-rw-r--r--arch/ia64/kernel/salinfo.c24
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/kernel/smpboot.c42
-rw-r--r--arch/ia64/kernel/topology.c6
11 files changed, 75 insertions, 72 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index a1d91ab4c5ef..aa0fdf125aba 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
117#ifdef CONFIG_ACPI_NUMA 117#ifdef CONFIG_ACPI_NUMA
118extern cpumask_t early_cpu_possible_map; 118extern cpumask_t early_cpu_possible_map;
119#define for_each_possible_early_cpu(cpu) \ 119#define for_each_possible_early_cpu(cpu) \
120 for_each_cpu_mask((cpu), early_cpu_possible_map) 120 for_each_cpu((cpu), &early_cpu_possible_map)
121 121
122static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) 122static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
123{ 123{
@@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
125 int cpu; 125 int cpu;
126 int next_nid = 0; 126 int next_nid = 0;
127 127
128 low_cpu = cpus_weight(early_cpu_possible_map); 128 low_cpu = cpumask_weight(&early_cpu_possible_map);
129 129
130 high_cpu = max(low_cpu, min_cpus); 130 high_cpu = max(low_cpu, min_cpus);
131 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); 131 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
132 132
133 for (cpu = low_cpu; cpu < high_cpu; cpu++) { 133 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
134 cpu_set(cpu, early_cpu_possible_map); 134 cpumask_set_cpu(cpu, &early_cpu_possible_map);
135 if (node_cpuid[cpu].nid == NUMA_NO_NODE) { 135 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
136 node_cpuid[cpu].nid = next_nid; 136 node_cpuid[cpu].nid = next_nid;
137 next_nid++; 137 next_nid++;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 2c4498919d3c..35bf22cc71b7 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
483 (pa->apic_id << 8) | (pa->local_sapic_eid); 483 (pa->apic_id << 8) | (pa->local_sapic_eid);
484 /* nid should be overridden as logical node id later */ 484 /* nid should be overridden as logical node id later */
485 node_cpuid[srat_num_cpus].nid = pxm; 485 node_cpuid[srat_num_cpus].nid = pxm;
486 cpu_set(srat_num_cpus, early_cpu_possible_map); 486 cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
487 srat_num_cpus++; 487 srat_num_cpus++;
488} 488}
489 489
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index cd44a57c73be..bc9501e36e77 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -690,7 +690,7 @@ skip_numa_setup:
690 do { 690 do {
691 if (++cpu >= nr_cpu_ids) 691 if (++cpu >= nr_cpu_ids)
692 cpu = 0; 692 cpu = 0;
693 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); 693 } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
694 694
695 return cpu_physical_id(cpu); 695 return cpu_physical_id(cpu);
696#else /* CONFIG_SMP */ 696#else /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 3329177c262e..9f40d972969c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain)
109 int pos, vector; 109 int pos, vector;
110 110
111 cpumask_and(&mask, &domain, cpu_online_mask); 111 cpumask_and(&mask, &domain, cpu_online_mask);
112 if (cpus_empty(mask)) 112 if (cpumask_empty(&mask))
113 return -EINVAL; 113 return -EINVAL;
114 114
115 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { 115 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
116 vector = IA64_FIRST_DEVICE_VECTOR + pos; 116 vector = IA64_FIRST_DEVICE_VECTOR + pos;
117 cpus_and(mask, domain, vector_table[vector]); 117 cpumask_and(&mask, &domain, &vector_table[vector]);
118 if (!cpus_empty(mask)) 118 if (!cpumask_empty(&mask))
119 continue; 119 continue;
120 return vector; 120 return vector;
121 } 121 }
@@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
132 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); 132 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
133 133
134 cpumask_and(&mask, &domain, cpu_online_mask); 134 cpumask_and(&mask, &domain, cpu_online_mask);
135 if (cpus_empty(mask)) 135 if (cpumask_empty(&mask))
136 return -EINVAL; 136 return -EINVAL;
137 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) 137 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
138 return 0; 138 return 0;
139 if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 139 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
140 return -EBUSY; 140 return -EBUSY;
141 for_each_cpu_mask(cpu, mask) 141 for_each_cpu(cpu, &mask)
142 per_cpu(vector_irq, cpu)[vector] = irq; 142 per_cpu(vector_irq, cpu)[vector] = irq;
143 cfg->vector = vector; 143 cfg->vector = vector;
144 cfg->domain = domain; 144 cfg->domain = domain;
145 irq_status[irq] = IRQ_USED; 145 irq_status[irq] = IRQ_USED;
146 cpus_or(vector_table[vector], vector_table[vector], domain); 146 cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
147 return 0; 147 return 0;
148} 148}
149 149
@@ -242,7 +242,7 @@ void __setup_vector_irq(int cpu)
242 per_cpu(vector_irq, cpu)[vector] = -1; 242 per_cpu(vector_irq, cpu)[vector] = -1;
243 /* Mark the inuse vectors */ 243 /* Mark the inuse vectors */
244 for (irq = 0; irq < NR_IRQS; ++irq) { 244 for (irq = 0; irq < NR_IRQS; ++irq) {
245 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 245 if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
246 continue; 246 continue;
247 vector = irq_to_vector(irq); 247 vector = irq_to_vector(irq);
248 per_cpu(vector_irq, cpu)[vector] = irq; 248 per_cpu(vector_irq, cpu)[vector] = irq;
@@ -273,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu)
273 return -EBUSY; 273 return -EBUSY;
274 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) 274 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
275 return -EINVAL; 275 return -EINVAL;
276 if (cpu_isset(cpu, cfg->domain)) 276 if (cpumask_test_cpu(cpu, &cfg->domain))
277 return 0; 277 return 0;
278 domain = vector_allocation_domain(cpu); 278 domain = vector_allocation_domain(cpu);
279 vector = find_unassigned_vector(domain); 279 vector = find_unassigned_vector(domain);
@@ -307,12 +307,12 @@ void irq_complete_move(unsigned irq)
307 if (likely(!cfg->move_in_progress)) 307 if (likely(!cfg->move_in_progress))
308 return; 308 return;
309 309
310 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) 310 if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
311 return; 311 return;
312 312
313 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); 313 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
314 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 314 cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
315 for_each_cpu_mask(i, cleanup_mask) 315 for_each_cpu(i, &cleanup_mask)
316 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); 316 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
317 cfg->move_in_progress = 0; 317 cfg->move_in_progress = 0;
318} 318}
@@ -338,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
338 if (!cfg->move_cleanup_count) 338 if (!cfg->move_cleanup_count)
339 goto unlock; 339 goto unlock;
340 340
341 if (!cpu_isset(me, cfg->old_domain)) 341 if (!cpumask_test_cpu(me, &cfg->old_domain))
342 goto unlock; 342 goto unlock;
343 343
344 spin_lock_irqsave(&vector_lock, flags); 344 spin_lock_irqsave(&vector_lock, flags);
345 __this_cpu_write(vector_irq[vector], -1); 345 __this_cpu_write(vector_irq[vector], -1);
346 cpu_clear(me, vector_table[vector]); 346 cpumask_clear_cpu(me, &vector_table[vector]);
347 spin_unlock_irqrestore(&vector_lock, flags); 347 spin_unlock_irqrestore(&vector_lock, flags);
348 cfg->move_cleanup_count--; 348 cfg->move_cleanup_count--;
349 unlock: 349 unlock:
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8bfd36af46f8..dd5801eb4c69 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1293 monarch_cpu = cpu; 1293 monarch_cpu = cpu;
1294 sos->monarch = 1; 1294 sos->monarch = 1;
1295 } else { 1295 } else {
1296 cpu_set(cpu, mca_cpu); 1296 cpumask_set_cpu(cpu, &mca_cpu);
1297 sos->monarch = 0; 1297 sos->monarch = 0;
1298 } 1298 }
1299 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " 1299 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1316 */ 1316 */
1317 ia64_mca_wakeup_all(); 1317 ia64_mca_wakeup_all();
1318 } else { 1318 } else {
1319 while (cpu_isset(cpu, mca_cpu)) 1319 while (cpumask_test_cpu(cpu, &mca_cpu))
1320 cpu_relax(); /* spin until monarch wakes us */ 1320 cpu_relax(); /* spin until monarch wakes us */
1321 } 1321 }
1322 1322
@@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1355 * and put this cpu in the rendez loop. 1355 * and put this cpu in the rendez loop.
1356 */ 1356 */
1357 for_each_online_cpu(i) { 1357 for_each_online_cpu(i) {
1358 if (cpu_isset(i, mca_cpu)) { 1358 if (cpumask_test_cpu(i, &mca_cpu)) {
1359 monarch_cpu = i; 1359 monarch_cpu = i;
1360 cpu_clear(i, mca_cpu); /* wake next cpu */ 1360 cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */
1361 while (monarch_cpu != -1) 1361 while (monarch_cpu != -1)
1362 cpu_relax(); /* spin until last cpu leaves */ 1362 cpu_relax(); /* spin until last cpu leaves */
1363 set_curr_task(cpu, previous_current); 1363 set_curr_task(cpu, previous_current);
@@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1822 ti->cpu = cpu; 1822 ti->cpu = cpu;
1823 p->stack = ti; 1823 p->stack = ti;
1824 p->state = TASK_UNINTERRUPTIBLE; 1824 p->state = TASK_UNINTERRUPTIBLE;
1825 cpu_set(cpu, p->cpus_allowed); 1825 cpumask_set_cpu(cpu, &p->cpus_allowed);
1826 INIT_LIST_HEAD(&p->tasks); 1826 INIT_LIST_HEAD(&p->tasks);
1827 p->parent = p->real_parent = p->group_leader = p; 1827 p->parent = p->real_parent = p->group_leader = p;
1828 INIT_LIST_HEAD(&p->children); 1828 INIT_LIST_HEAD(&p->children);
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index d288cde93606..92c376279c6d 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -39,7 +39,7 @@ void map_cpu_to_node(int cpu, int nid)
39 } 39 }
40 /* sanity check first */ 40 /* sanity check first */
41 oldnid = cpu_to_node_map[cpu]; 41 oldnid = cpu_to_node_map[cpu];
42 if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) { 42 if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
43 return; /* nothing to do */ 43 return; /* nothing to do */
44 } 44 }
45 /* we don't have cpu-driven node hot add yet... 45 /* we don't have cpu-driven node hot add yet...
@@ -47,16 +47,16 @@ void map_cpu_to_node(int cpu, int nid)
47 if (!node_online(nid)) 47 if (!node_online(nid))
48 nid = first_online_node; 48 nid = first_online_node;
49 cpu_to_node_map[cpu] = nid; 49 cpu_to_node_map[cpu] = nid;
50 cpu_set(cpu, node_to_cpu_mask[nid]); 50 cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
51 return; 51 return;
52} 52}
53 53
54void unmap_cpu_from_node(int cpu, int nid) 54void unmap_cpu_from_node(int cpu, int nid)
55{ 55{
56 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); 56 WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
57 WARN_ON(cpu_to_node_map[cpu] != nid); 57 WARN_ON(cpu_to_node_map[cpu] != nid);
58 cpu_to_node_map[cpu] = 0; 58 cpu_to_node_map[cpu] = 0;
59 cpu_clear(cpu, node_to_cpu_mask[nid]); 59 cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
60} 60}
61 61
62 62
@@ -71,7 +71,7 @@ void __init build_cpu_to_node_map(void)
71 int cpu, i, node; 71 int cpu, i, node;
72 72
73 for(node=0; node < MAX_NUMNODES; node++) 73 for(node=0; node < MAX_NUMNODES; node++)
74 cpus_clear(node_to_cpu_mask[node]); 74 cpumask_clear(&node_to_cpu_mask[node]);
75 75
76 for_each_possible_early_cpu(cpu) { 76 for_each_possible_early_cpu(cpu) {
77 node = -1; 77 node = -1;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index ee9719eebb1e..1eeffb7fbb16 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -256,7 +256,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
256 data_saved->buffer = buffer; 256 data_saved->buffer = buffer;
257 } 257 }
258 } 258 }
259 cpu_set(smp_processor_id(), data->cpu_event); 259 cpumask_set_cpu(smp_processor_id(), &data->cpu_event);
260 if (irqsafe) { 260 if (irqsafe) {
261 salinfo_work_to_do(data); 261 salinfo_work_to_do(data);
262 spin_unlock_irqrestore(&data_saved_lock, flags); 262 spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -274,7 +274,7 @@ salinfo_timeout_check(struct salinfo_data *data)
274 unsigned long flags; 274 unsigned long flags;
275 if (!data->open) 275 if (!data->open)
276 return; 276 return;
277 if (!cpus_empty(data->cpu_event)) { 277 if (!cpumask_empty(&data->cpu_event)) {
278 spin_lock_irqsave(&data_saved_lock, flags); 278 spin_lock_irqsave(&data_saved_lock, flags);
279 salinfo_work_to_do(data); 279 salinfo_work_to_do(data);
280 spin_unlock_irqrestore(&data_saved_lock, flags); 280 spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -308,7 +308,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
308 int i, n, cpu = -1; 308 int i, n, cpu = -1;
309 309
310retry: 310retry:
311 if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { 311 if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) {
312 if (file->f_flags & O_NONBLOCK) 312 if (file->f_flags & O_NONBLOCK)
313 return -EAGAIN; 313 return -EAGAIN;
314 if (down_interruptible(&data->mutex)) 314 if (down_interruptible(&data->mutex))
@@ -317,9 +317,9 @@ retry:
317 317
318 n = data->cpu_check; 318 n = data->cpu_check;
319 for (i = 0; i < nr_cpu_ids; i++) { 319 for (i = 0; i < nr_cpu_ids; i++) {
320 if (cpu_isset(n, data->cpu_event)) { 320 if (cpumask_test_cpu(n, &data->cpu_event)) {
321 if (!cpu_online(n)) { 321 if (!cpu_online(n)) {
322 cpu_clear(n, data->cpu_event); 322 cpumask_clear_cpu(n, &data->cpu_event);
323 continue; 323 continue;
324 } 324 }
325 cpu = n; 325 cpu = n;
@@ -451,7 +451,7 @@ retry:
451 call_on_cpu(cpu, salinfo_log_read_cpu, data); 451 call_on_cpu(cpu, salinfo_log_read_cpu, data);
452 if (!data->log_size) { 452 if (!data->log_size) {
453 data->state = STATE_NO_DATA; 453 data->state = STATE_NO_DATA;
454 cpu_clear(cpu, data->cpu_event); 454 cpumask_clear_cpu(cpu, &data->cpu_event);
455 } else { 455 } else {
456 data->state = STATE_LOG_RECORD; 456 data->state = STATE_LOG_RECORD;
457 } 457 }
@@ -491,11 +491,11 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
491 unsigned long flags; 491 unsigned long flags;
492 spin_lock_irqsave(&data_saved_lock, flags); 492 spin_lock_irqsave(&data_saved_lock, flags);
493 data->state = STATE_NO_DATA; 493 data->state = STATE_NO_DATA;
494 if (!cpu_isset(cpu, data->cpu_event)) { 494 if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
495 spin_unlock_irqrestore(&data_saved_lock, flags); 495 spin_unlock_irqrestore(&data_saved_lock, flags);
496 return 0; 496 return 0;
497 } 497 }
498 cpu_clear(cpu, data->cpu_event); 498 cpumask_clear_cpu(cpu, &data->cpu_event);
499 if (data->saved_num) { 499 if (data->saved_num) {
500 shift1_data_saved(data, data->saved_num - 1); 500 shift1_data_saved(data, data->saved_num - 1);
501 data->saved_num = 0; 501 data->saved_num = 0;
@@ -509,7 +509,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
509 salinfo_log_new_read(cpu, data); 509 salinfo_log_new_read(cpu, data);
510 if (data->state == STATE_LOG_RECORD) { 510 if (data->state == STATE_LOG_RECORD) {
511 spin_lock_irqsave(&data_saved_lock, flags); 511 spin_lock_irqsave(&data_saved_lock, flags);
512 cpu_set(cpu, data->cpu_event); 512 cpumask_set_cpu(cpu, &data->cpu_event);
513 salinfo_work_to_do(data); 513 salinfo_work_to_do(data);
514 spin_unlock_irqrestore(&data_saved_lock, flags); 514 spin_unlock_irqrestore(&data_saved_lock, flags);
515 } 515 }
@@ -581,7 +581,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
581 for (i = 0, data = salinfo_data; 581 for (i = 0, data = salinfo_data;
582 i < ARRAY_SIZE(salinfo_data); 582 i < ARRAY_SIZE(salinfo_data);
583 ++i, ++data) { 583 ++i, ++data) {
584 cpu_set(cpu, data->cpu_event); 584 cpumask_set_cpu(cpu, &data->cpu_event);
585 salinfo_work_to_do(data); 585 salinfo_work_to_do(data);
586 } 586 }
587 spin_unlock_irqrestore(&data_saved_lock, flags); 587 spin_unlock_irqrestore(&data_saved_lock, flags);
@@ -601,7 +601,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
601 shift1_data_saved(data, j); 601 shift1_data_saved(data, j);
602 } 602 }
603 } 603 }
604 cpu_clear(cpu, data->cpu_event); 604 cpumask_clear_cpu(cpu, &data->cpu_event);
605 } 605 }
606 spin_unlock_irqrestore(&data_saved_lock, flags); 606 spin_unlock_irqrestore(&data_saved_lock, flags);
607 break; 607 break;
@@ -659,7 +659,7 @@ salinfo_init(void)
659 659
660 /* we missed any events before now */ 660 /* we missed any events before now */
661 for_each_online_cpu(j) 661 for_each_online_cpu(j)
662 cpu_set(j, data->cpu_event); 662 cpumask_set_cpu(j, &data->cpu_event);
663 663
664 *sdir++ = dir; 664 *sdir++ = dir;
665 } 665 }
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index d86669bcdfb2..b9761389cb8d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -562,8 +562,8 @@ setup_arch (char **cmdline_p)
562# ifdef CONFIG_ACPI_HOTPLUG_CPU 562# ifdef CONFIG_ACPI_HOTPLUG_CPU
563 prefill_possible_map(); 563 prefill_possible_map();
564# endif 564# endif
565 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 565 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
566 32 : cpus_weight(early_cpu_possible_map)), 566 32 : cpumask_weight(&early_cpu_possible_map)),
567 additional_cpus > 0 ? additional_cpus : 0); 567 additional_cpus > 0 ? additional_cpus : 0);
568# endif 568# endif
569#endif /* CONFIG_APCI_BOOT */ 569#endif /* CONFIG_APCI_BOOT */
@@ -702,7 +702,8 @@ show_cpuinfo (struct seq_file *m, void *v)
702 c->itc_freq / 1000000, c->itc_freq % 1000000, 702 c->itc_freq / 1000000, c->itc_freq % 1000000,
703 lpj*HZ/500000, (lpj*HZ/5000) % 100); 703 lpj*HZ/500000, (lpj*HZ/5000) % 100);
704#ifdef CONFIG_SMP 704#ifdef CONFIG_SMP
705 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 705 seq_printf(m, "siblings : %u\n",
706 cpumask_weight(&cpu_core_map[cpunum]));
706 if (c->socket_id != -1) 707 if (c->socket_id != -1)
707 seq_printf(m, "physical id: %u\n", c->socket_id); 708 seq_printf(m, "physical id: %u\n", c->socket_id);
708 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 709 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
@@ -933,8 +934,8 @@ cpu_init (void)
933 * (must be done after per_cpu area is setup) 934 * (must be done after per_cpu area is setup)
934 */ 935 */
935 if (smp_processor_id() == 0) { 936 if (smp_processor_id() == 0) {
936 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 937 cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
937 cpu_set(0, cpu_core_map[0]); 938 cpumask_set_cpu(0, &cpu_core_map[0]);
938 } else { 939 } else {
939 /* 940 /*
940 * Set ar.k3 so that assembly code in MCA handler can compute 941 * Set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9fcd4e63048f..7f706d4f84f7 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -262,11 +262,11 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
262 preempt_disable(); 262 preempt_disable();
263 mycpu = smp_processor_id(); 263 mycpu = smp_processor_id();
264 264
265 for_each_cpu_mask(cpu, cpumask) 265 for_each_cpu(cpu, &cpumask)
266 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; 266 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
267 267
268 mb(); 268 mb();
269 for_each_cpu_mask(cpu, cpumask) { 269 for_each_cpu(cpu, &cpumask) {
270 if (cpu == mycpu) 270 if (cpu == mycpu)
271 flush_mycpu = 1; 271 flush_mycpu = 1;
272 else 272 else
@@ -276,7 +276,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
276 if (flush_mycpu) 276 if (flush_mycpu)
277 smp_local_flush_tlb(); 277 smp_local_flush_tlb();
278 278
279 for_each_cpu_mask(cpu, cpumask) 279 for_each_cpu(cpu, &cpumask)
280 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) 280 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
281 udelay(FLUSH_DELAY); 281 udelay(FLUSH_DELAY);
282 282
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 547a48d78bd7..15051e9c2c6f 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -434,7 +434,7 @@ smp_callin (void)
434 /* 434 /*
435 * Allow the master to continue. 435 * Allow the master to continue.
436 */ 436 */
437 cpu_set(cpuid, cpu_callin_map); 437 cpumask_set_cpu(cpuid, &cpu_callin_map);
438 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); 438 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
439} 439}
440 440
@@ -475,13 +475,13 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
475 */ 475 */
476 Dprintk("Waiting on callin_map ..."); 476 Dprintk("Waiting on callin_map ...");
477 for (timeout = 0; timeout < 100000; timeout++) { 477 for (timeout = 0; timeout < 100000; timeout++) {
478 if (cpu_isset(cpu, cpu_callin_map)) 478 if (cpumask_test_cpu(cpu, &cpu_callin_map))
479 break; /* It has booted */ 479 break; /* It has booted */
480 udelay(100); 480 udelay(100);
481 } 481 }
482 Dprintk("\n"); 482 Dprintk("\n");
483 483
484 if (!cpu_isset(cpu, cpu_callin_map)) { 484 if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
485 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); 485 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
486 ia64_cpu_to_sapicid[cpu] = -1; 486 ia64_cpu_to_sapicid[cpu] = -1;
487 set_cpu_online(cpu, false); /* was set in smp_callin() */ 487 set_cpu_online(cpu, false); /* was set in smp_callin() */
@@ -541,7 +541,7 @@ smp_prepare_cpus (unsigned int max_cpus)
541 541
542 smp_setup_percpu_timer(); 542 smp_setup_percpu_timer();
543 543
544 cpu_set(0, cpu_callin_map); 544 cpumask_set_cpu(0, &cpu_callin_map);
545 545
546 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 546 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
547 ia64_cpu_to_sapicid[0] = boot_cpu_id; 547 ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -565,7 +565,7 @@ smp_prepare_cpus (unsigned int max_cpus)
565void smp_prepare_boot_cpu(void) 565void smp_prepare_boot_cpu(void)
566{ 566{
567 set_cpu_online(smp_processor_id(), true); 567 set_cpu_online(smp_processor_id(), true);
568 cpu_set(smp_processor_id(), cpu_callin_map); 568 cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
569 set_numa_node(cpu_to_node_map[smp_processor_id()]); 569 set_numa_node(cpu_to_node_map[smp_processor_id()]);
570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
571 paravirt_post_smp_prepare_boot_cpu(); 571 paravirt_post_smp_prepare_boot_cpu();
@@ -577,10 +577,10 @@ clear_cpu_sibling_map(int cpu)
577{ 577{
578 int i; 578 int i;
579 579
580 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 580 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
581 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 581 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
582 for_each_cpu_mask(i, cpu_core_map[cpu]) 582 for_each_cpu(i, &cpu_core_map[cpu])
583 cpu_clear(cpu, cpu_core_map[i]); 583 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
584 584
585 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; 585 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
586} 586}
@@ -592,12 +592,12 @@ remove_siblinginfo(int cpu)
592 592
593 if (cpu_data(cpu)->threads_per_core == 1 && 593 if (cpu_data(cpu)->threads_per_core == 1 &&
594 cpu_data(cpu)->cores_per_socket == 1) { 594 cpu_data(cpu)->cores_per_socket == 1) {
595 cpu_clear(cpu, cpu_core_map[cpu]); 595 cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
596 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); 596 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
597 return; 597 return;
598 } 598 }
599 599
600 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); 600 last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
601 601
602 /* remove it from all sibling map's */ 602 /* remove it from all sibling map's */
603 clear_cpu_sibling_map(cpu); 603 clear_cpu_sibling_map(cpu);
@@ -673,7 +673,7 @@ int __cpu_disable(void)
673 remove_siblinginfo(cpu); 673 remove_siblinginfo(cpu);
674 fixup_irqs(); 674 fixup_irqs();
675 local_flush_tlb_all(); 675 local_flush_tlb_all();
676 cpu_clear(cpu, cpu_callin_map); 676 cpumask_clear_cpu(cpu, &cpu_callin_map);
677 return 0; 677 return 0;
678} 678}
679 679
@@ -718,11 +718,13 @@ static inline void set_cpu_sibling_map(int cpu)
718 718
719 for_each_online_cpu(i) { 719 for_each_online_cpu(i) {
720 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { 720 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
721 cpu_set(i, cpu_core_map[cpu]); 721 cpumask_set_cpu(i, &cpu_core_map[cpu]);
722 cpu_set(cpu, cpu_core_map[i]); 722 cpumask_set_cpu(cpu, &cpu_core_map[i]);
723 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 723 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
724 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 724 cpumask_set_cpu(i,
725 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 725 &per_cpu(cpu_sibling_map, cpu));
726 cpumask_set_cpu(cpu,
727 &per_cpu(cpu_sibling_map, i));
726 } 728 }
727 } 729 }
728 } 730 }
@@ -742,7 +744,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
742 * Already booted cpu? not valid anymore since we dont 744 * Already booted cpu? not valid anymore since we dont
743 * do idle loop tightspin anymore. 745 * do idle loop tightspin anymore.
744 */ 746 */
745 if (cpu_isset(cpu, cpu_callin_map)) 747 if (cpumask_test_cpu(cpu, &cpu_callin_map))
746 return -EINVAL; 748 return -EINVAL;
747 749
748 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 750 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
@@ -753,8 +755,8 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
753 755
754 if (cpu_data(cpu)->threads_per_core == 1 && 756 if (cpu_data(cpu)->threads_per_core == 1 &&
755 cpu_data(cpu)->cores_per_socket == 1) { 757 cpu_data(cpu)->cores_per_socket == 1) {
756 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 758 cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
757 cpu_set(cpu, cpu_core_map[cpu]); 759 cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
758 return 0; 760 return 0;
759 } 761 }
760 762
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 965ab42fabb0..c01fe8991244 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -148,7 +148,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
148 148
149 if (cpu_data(cpu)->threads_per_core <= 1 && 149 if (cpu_data(cpu)->threads_per_core <= 1 &&
150 cpu_data(cpu)->cores_per_socket <= 1) { 150 cpu_data(cpu)->cores_per_socket <= 1) {
151 cpu_set(cpu, this_leaf->shared_cpu_map); 151 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
152 return; 152 return;
153 } 153 }
154 154
@@ -164,7 +164,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id 164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
165 && cpu_data(j)->core_id == csi.log1_cid 165 && cpu_data(j)->core_id == csi.log1_cid
166 && cpu_data(j)->thread_id == csi.log1_tid) 166 && cpu_data(j)->thread_id == csi.log1_tid)
167 cpu_set(j, this_leaf->shared_cpu_map); 167 cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
168 168
169 i++; 169 i++;
170 } while (i < num_shared && 170 } while (i < num_shared &&
@@ -177,7 +177,7 @@ static void cache_shared_cpu_map_setup(unsigned int cpu,
177static void cache_shared_cpu_map_setup(unsigned int cpu, 177static void cache_shared_cpu_map_setup(unsigned int cpu,
178 struct cache_info * this_leaf) 178 struct cache_info * this_leaf)
179{ 179{
180 cpu_set(cpu, this_leaf->shared_cpu_map); 180 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
181 return; 181 return;
182} 182}
183#endif 183#endif