aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-03-04 19:19:16 -0500
committerRusty Russell <rusty@rustcorp.com.au>2015-03-04 22:07:02 -0500
commitf9b531fe14a539ec2ad802b73c9638f324e4a4ff (patch)
treeb01a1070d33aef3ac045faca56772bc6432f283e
parent51f7bd8590267011db7b632f53f3d32ec83ee8bb (diff)
drivers: fix up obsolete cpu function usage.
Thanks to spatch, plus manual removal of "&*". Then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Jason Cooper <jason@lakedaemon.net> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: netdev@vger.kernel.org
-rw-r--r--drivers/clocksource/dw_apb_timer.c3
-rw-r--r--drivers/cpuidle/coupled.c6
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c6
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
6 files changed, 13 insertions, 12 deletions
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index f3656a6b0382..35a88097af3c 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode,
117 unsigned long period; 117 unsigned long period;
118 struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); 118 struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
119 119
120 pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), 120 pr_debug("%s CPU %d mode=%d\n", __func__,
121 cpumask_first(evt->cpumask),
121 mode); 122 mode);
122 123
123 switch (mode) { 124 switch (mode) {
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 73fe2f8d7f96..7936dce4b878 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
292 */ 292 */
293 smp_rmb(); 293 smp_rmb();
294 294
295 for_each_cpu_mask(i, coupled->coupled_cpus) 295 for_each_cpu(i, &coupled->coupled_cpus)
296 if (cpu_online(i) && coupled->requested_state[i] < state) 296 if (cpu_online(i) && coupled->requested_state[i] < state)
297 state = coupled->requested_state[i]; 297 state = coupled->requested_state[i];
298 298
@@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
338{ 338{
339 int cpu; 339 int cpu;
340 340
341 for_each_cpu_mask(cpu, coupled->coupled_cpus) 341 for_each_cpu(cpu, &coupled->coupled_cpus)
342 if (cpu != this_cpu && cpu_online(cpu)) 342 if (cpu != this_cpu && cpu_online(cpu))
343 cpuidle_coupled_poke(cpu); 343 cpuidle_coupled_poke(cpu);
344} 344}
@@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
638 if (cpumask_empty(&dev->coupled_cpus)) 638 if (cpumask_empty(&dev->coupled_cpus))
639 return 0; 639 return 0;
640 640
641 for_each_cpu_mask(cpu, dev->coupled_cpus) { 641 for_each_cpu(cpu, &dev->coupled_cpus) {
642 other_dev = per_cpu(cpuidle_devices, cpu); 642 other_dev = per_cpu(cpuidle_devices, cpu);
643 if (other_dev && other_dev->coupled) { 643 if (other_dev && other_dev->coupled) {
644 coupled = other_dev->coupled; 644 coupled = other_dev->coupled;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index afd136b45f49..10a9aeff1666 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1754 dev->dev.of_node->full_name); 1754 dev->dev.of_node->full_name);
1755 return -EINVAL; 1755 return -EINVAL;
1756 } 1756 }
1757 cpu_set(*id, p->sharing); 1757 cpumask_set_cpu(*id, &p->sharing);
1758 table[*id] = p; 1758 table[*id] = p;
1759 } 1759 }
1760 return 0; 1760 return 0;
@@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1776 return -ENOMEM; 1776 return -ENOMEM;
1777 } 1777 }
1778 1778
1779 cpus_clear(p->sharing); 1779 cpumask_clear(&p->sharing);
1780 spin_lock_init(&p->lock); 1780 spin_lock_init(&p->lock);
1781 p->q_type = q_type; 1781 p->q_type = q_type;
1782 INIT_LIST_HEAD(&p->jobs); 1782 INIT_LIST_HEAD(&p->jobs);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2fbc34..04b6f0732c1a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -512,7 +512,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
512 */ 512 */
513 smp_wmb(); 513 smp_wmb();
514 514
515 for_each_cpu_mask(cpu, *mask) { 515 for_each_cpu(cpu, mask) {
516 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; 516 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
517 u16 tlist; 517 u16 tlist;
518 518
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9acdc080e7ec..f26307908a2a 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -345,19 +345,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
345 int i; 345 int i;
346 346
347 cpumask_and(&tmp, cpumask, cpu_online_mask); 347 cpumask_and(&tmp, cpumask, cpu_online_mask);
348 if (cpus_empty(tmp)) 348 if (cpumask_empty(&tmp))
349 return -EINVAL; 349 return -EINVAL;
350 350
351 /* Assumption : cpumask refers to a single CPU */ 351 /* Assumption : cpumask refers to a single CPU */
352 spin_lock_irqsave(&gic_lock, flags); 352 spin_lock_irqsave(&gic_lock, flags);
353 353
354 /* Re-route this IRQ */ 354 /* Re-route this IRQ */
355 gic_map_to_vpe(irq, first_cpu(tmp)); 355 gic_map_to_vpe(irq, cpumask_first(&tmp));
356 356
357 /* Update the pcpu_masks */ 357 /* Update the pcpu_masks */
358 for (i = 0; i < NR_CPUS; i++) 358 for (i = 0; i < NR_CPUS; i++)
359 clear_bit(irq, pcpu_masks[i].pcpu_mask); 359 clear_bit(irq, pcpu_masks[i].pcpu_mask);
360 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 360 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
361 361
362 cpumask_copy(d->affinity, cpumask); 362 cpumask_copy(d->affinity, cpumask);
363 spin_unlock_irqrestore(&gic_lock, flags); 363 spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index bea8cd2bb56c..deac41498c6e 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1122,7 +1122,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
1122 addr + i * sizeof(struct tile_net_comps); 1122 addr + i * sizeof(struct tile_net_comps);
1123 1123
1124 /* If this is a network cpu, create an iqueue. */ 1124 /* If this is a network cpu, create an iqueue. */
1125 if (cpu_isset(cpu, network_cpus_map)) { 1125 if (cpumask_test_cpu(cpu, &network_cpus_map)) {
1126 order = get_order(NOTIF_RING_SIZE); 1126 order = get_order(NOTIF_RING_SIZE);
1127 page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 1127 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
1128 if (page == NULL) { 1128 if (page == NULL) {
@@ -1298,7 +1298,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
1298 int first_ring, ring; 1298 int first_ring, ring;
1299 int instance = mpipe_instance(dev); 1299 int instance = mpipe_instance(dev);
1300 struct mpipe_data *md = &mpipe_data[instance]; 1300 struct mpipe_data *md = &mpipe_data[instance];
1301 int network_cpus_count = cpus_weight(network_cpus_map); 1301 int network_cpus_count = cpumask_weight(&network_cpus_map);
1302 1302
1303 if (!hash_default) { 1303 if (!hash_default) {
1304 netdev_err(dev, "Networking requires hash_default!\n"); 1304 netdev_err(dev, "Networking requires hash_default!\n");