diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 17:35:18 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 17:35:18 -0500 |
commit | b29179c3d32021d79c11ece7199a1da41d31b1b7 (patch) | |
tree | d86d238d7298092b7cd2886dc0a3d8b01cf9af72 /drivers/infiniband/hw | |
parent | 259c4ddd00237e5072921afa15a900839643fd98 (diff) |
cpumask: use new cpumask API in drivers/infiniband/hw/ehca
Impact: cleanup
We're moving from handing around cpumask_t's to handing around struct
cpumask *'s. cpus_*, cpumask_t and cpu_*_map are deprecated: convert
to cpumask_*, cpu_*_mask.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Tested-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Cc: Christoph Raisch <raisch@de.ibm.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 6305209fdea8..3128a5090dbd 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
659 | 659 | ||
660 | WARN_ON_ONCE(!in_interrupt()); | 660 | WARN_ON_ONCE(!in_interrupt()); |
661 | if (ehca_debug_level >= 3) | 661 | if (ehca_debug_level >= 3) |
662 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 662 | ehca_dmp(cpu_online_mask, cpumask_size(), ""); |
663 | 663 | ||
664 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 664 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
665 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); | 665 | cpu = cpumask_next(pool->last_cpu, cpu_online_mask); |
666 | if (cpu >= nr_cpu_ids) | 666 | if (cpu >= nr_cpu_ids) |
667 | cpu = first_cpu(cpu_online_map); | 667 | cpu = cpumask_first(cpu_online_mask); |
668 | pool->last_cpu = cpu; | 668 | pool->last_cpu = cpu; |
669 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 669 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |
670 | 670 | ||
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb, | |||
855 | case CPU_UP_CANCELED_FROZEN: | 855 | case CPU_UP_CANCELED_FROZEN: |
856 | ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); | 856 | ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); |
857 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | 857 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); |
858 | kthread_bind(cct->task, any_online_cpu(cpu_online_map)); | 858 | kthread_bind(cct->task, cpumask_any(cpu_online_mask)); |
859 | destroy_comp_task(pool, cpu); | 859 | destroy_comp_task(pool, cpu); |
860 | break; | 860 | break; |
861 | case CPU_ONLINE: | 861 | case CPU_ONLINE: |
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void) | |||
902 | return -ENOMEM; | 902 | return -ENOMEM; |
903 | 903 | ||
904 | spin_lock_init(&pool->last_cpu_lock); | 904 | spin_lock_init(&pool->last_cpu_lock); |
905 | pool->last_cpu = any_online_cpu(cpu_online_map); | 905 | pool->last_cpu = cpumask_any(cpu_online_mask); |
906 | 906 | ||
907 | pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); | 907 | pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); |
908 | if (pool->cpu_comp_tasks == NULL) { | 908 | if (pool->cpu_comp_tasks == NULL) { |