aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:13 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 12:35:12 -0400
commit363ab6f1424cdea63e5d182312d60e19077b892a (patch)
treee200197412691015ca8de083155985e7e460ecfc /kernel
parent068b12772a64c2440ef2f64ac5d780688c06576f (diff)
core: use performance variant for_each_cpu_mask_nr
Change references from for_each_cpu_mask to for_each_cpu_mask_nr where appropriate Reviewed-by: Paul Jackson <pj@sgi.com> Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c6
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/workqueue.c6
8 files changed, 34 insertions, 34 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c77bc3a1c722..50ae922c6022 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void)
390 goto out; 390 goto out;
391 391
392 printk("Enabling non-boot CPUs ...\n"); 392 printk("Enabling non-boot CPUs ...\n");
393 for_each_cpu_mask(cpu, frozen_cpus) { 393 for_each_cpu_mask_nr(cpu, frozen_cpus) {
394 error = _cpu_up(cpu, 1); 394 error = _cpu_up(cpu, 1);
395 if (!error) { 395 if (!error) {
396 printk("CPU%d is up\n", cpu); 396 printk("CPU%d is up\n", cpu);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index f4ffbd0f306f..251358de70b0 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
92 */ 92 */
93 cpumask = rcp->cpumask; 93 cpumask = rcp->cpumask;
94 cpu_clear(rdp->cpu, cpumask); 94 cpu_clear(rdp->cpu, cpumask);
95 for_each_cpu_mask(cpu, cpumask) 95 for_each_cpu_mask_nr(cpu, cpumask)
96 smp_send_reschedule(cpu); 96 smp_send_reschedule(cpu);
97 } 97 }
98} 98}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e1cdf196a515..18af270125cf 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -657,7 +657,7 @@ rcu_try_flip_idle(void)
657 657
658 /* Now ask each CPU for acknowledgement of the flip. */ 658 /* Now ask each CPU for acknowledgement of the flip. */
659 659
660 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 660 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
661 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 661 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
662 dyntick_save_progress_counter(cpu); 662 dyntick_save_progress_counter(cpu);
663 } 663 }
@@ -675,7 +675,7 @@ rcu_try_flip_waitack(void)
675 int cpu; 675 int cpu;
676 676
677 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 677 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
678 for_each_cpu_mask(cpu, rcu_cpu_online_map) 678 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
679 if (rcu_try_flip_waitack_needed(cpu) && 679 if (rcu_try_flip_waitack_needed(cpu) &&
680 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 680 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
681 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 681 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void)
707 /* Check to see if the sum of the "last" counters is zero. */ 707 /* Check to see if the sum of the "last" counters is zero. */
708 708
709 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 709 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
710 for_each_cpu_mask(cpu, rcu_cpu_online_map) 710 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
711 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 711 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
712 if (sum != 0) { 712 if (sum != 0) {
713 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 713 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void)
722 smp_mb(); /* ^^^^^^^^^^^^ */ 722 smp_mb(); /* ^^^^^^^^^^^^ */
723 723
724 /* Call for a memory barrier from each CPU. */ 724 /* Call for a memory barrier from each CPU. */
725 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 725 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
726 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 726 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
727 dyntick_save_progress_counter(cpu); 727 dyntick_save_progress_counter(cpu);
728 } 728 }
@@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void)
742 int cpu; 742 int cpu;
743 743
744 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 744 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
745 for_each_cpu_mask(cpu, rcu_cpu_online_map) 745 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
746 if (rcu_try_flip_waitmb_needed(cpu) && 746 if (rcu_try_flip_waitmb_needed(cpu) &&
747 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 747 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
748 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 748 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1ed8011db826..814d6e17f1e1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2271 /* Tally up the load of all CPUs in the group */ 2271 /* Tally up the load of all CPUs in the group */
2272 avg_load = 0; 2272 avg_load = 0;
2273 2273
2274 for_each_cpu_mask(i, group->cpumask) { 2274 for_each_cpu_mask_nr(i, group->cpumask) {
2275 /* Bias balancing toward cpus of our domain */ 2275 /* Bias balancing toward cpus of our domain */
2276 if (local_group) 2276 if (local_group)
2277 load = source_load(i, load_idx); 2277 load = source_load(i, load_idx);
@@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2313 /* Traverse only the allowed CPUs */ 2313 /* Traverse only the allowed CPUs */
2314 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2314 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2315 2315
2316 for_each_cpu_mask(i, *tmp) { 2316 for_each_cpu_mask_nr(i, *tmp) {
2317 load = weighted_cpuload(i); 2317 load = weighted_cpuload(i);
2318 2318
2319 if (load < min_load || (load == min_load && i == this_cpu)) { 2319 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3296 max_cpu_load = 0; 3296 max_cpu_load = 0;
3297 min_cpu_load = ~0UL; 3297 min_cpu_load = ~0UL;
3298 3298
3299 for_each_cpu_mask(i, group->cpumask) { 3299 for_each_cpu_mask_nr(i, group->cpumask) {
3300 struct rq *rq; 3300 struct rq *rq;
3301 3301
3302 if (!cpu_isset(i, *cpus)) 3302 if (!cpu_isset(i, *cpus))
@@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3560 unsigned long max_load = 0; 3560 unsigned long max_load = 0;
3561 int i; 3561 int i;
3562 3562
3563 for_each_cpu_mask(i, group->cpumask) { 3563 for_each_cpu_mask_nr(i, group->cpumask) {
3564 unsigned long wl; 3564 unsigned long wl;
3565 3565
3566 if (!cpu_isset(i, *cpus)) 3566 if (!cpu_isset(i, *cpus))
@@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h)
4100 int balance_cpu; 4100 int balance_cpu;
4101 4101
4102 cpu_clear(this_cpu, cpus); 4102 cpu_clear(this_cpu, cpus);
4103 for_each_cpu_mask(balance_cpu, cpus) { 4103 for_each_cpu_mask_nr(balance_cpu, cpus) {
4104 /* 4104 /*
4105 * If this cpu gets work to do, stop the load balancing 4105 * If this cpu gets work to do, stop the load balancing
4106 * work being done for other cpus. Next load 4106 * work being done for other cpus. Next load
@@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6832 6832
6833 cpus_clear(*covered); 6833 cpus_clear(*covered);
6834 6834
6835 for_each_cpu_mask(i, *span) { 6835 for_each_cpu_mask_nr(i, *span) {
6836 struct sched_group *sg; 6836 struct sched_group *sg;
6837 int group = group_fn(i, cpu_map, &sg, tmpmask); 6837 int group = group_fn(i, cpu_map, &sg, tmpmask);
6838 int j; 6838 int j;
@@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6843 cpus_clear(sg->cpumask); 6843 cpus_clear(sg->cpumask);
6844 sg->__cpu_power = 0; 6844 sg->__cpu_power = 0;
6845 6845
6846 for_each_cpu_mask(j, *span) { 6846 for_each_cpu_mask_nr(j, *span) {
6847 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6847 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6848 continue; 6848 continue;
6849 6849
@@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7043 if (!sg) 7043 if (!sg)
7044 return; 7044 return;
7045 do { 7045 do {
7046 for_each_cpu_mask(j, sg->cpumask) { 7046 for_each_cpu_mask_nr(j, sg->cpumask) {
7047 struct sched_domain *sd; 7047 struct sched_domain *sd;
7048 7048
7049 sd = &per_cpu(phys_domains, j); 7049 sd = &per_cpu(phys_domains, j);
@@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7068{ 7068{
7069 int cpu, i; 7069 int cpu, i;
7070 7070
7071 for_each_cpu_mask(cpu, *cpu_map) { 7071 for_each_cpu_mask_nr(cpu, *cpu_map) {
7072 struct sched_group **sched_group_nodes 7072 struct sched_group **sched_group_nodes
7073 = sched_group_nodes_bycpu[cpu]; 7073 = sched_group_nodes_bycpu[cpu];
7074 7074
@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7302 /* 7302 /*
7303 * Set up domains for cpus specified by the cpu_map. 7303 * Set up domains for cpus specified by the cpu_map.
7304 */ 7304 */
7305 for_each_cpu_mask(i, *cpu_map) { 7305 for_each_cpu_mask_nr(i, *cpu_map) {
7306 struct sched_domain *sd = NULL, *p; 7306 struct sched_domain *sd = NULL, *p;
7307 SCHED_CPUMASK_VAR(nodemask, allmasks); 7307 SCHED_CPUMASK_VAR(nodemask, allmasks);
7308 7308
@@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7374 7374
7375#ifdef CONFIG_SCHED_SMT 7375#ifdef CONFIG_SCHED_SMT
7376 /* Set up CPU (sibling) groups */ 7376 /* Set up CPU (sibling) groups */
7377 for_each_cpu_mask(i, *cpu_map) { 7377 for_each_cpu_mask_nr(i, *cpu_map) {
7378 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7378 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7379 SCHED_CPUMASK_VAR(send_covered, allmasks); 7379 SCHED_CPUMASK_VAR(send_covered, allmasks);
7380 7380
@@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7391 7391
7392#ifdef CONFIG_SCHED_MC 7392#ifdef CONFIG_SCHED_MC
7393 /* Set up multi-core groups */ 7393 /* Set up multi-core groups */
7394 for_each_cpu_mask(i, *cpu_map) { 7394 for_each_cpu_mask_nr(i, *cpu_map) {
7395 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7395 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7396 SCHED_CPUMASK_VAR(send_covered, allmasks); 7396 SCHED_CPUMASK_VAR(send_covered, allmasks);
7397 7397
@@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7458 goto error; 7458 goto error;
7459 } 7459 }
7460 sched_group_nodes[i] = sg; 7460 sched_group_nodes[i] = sg;
7461 for_each_cpu_mask(j, *nodemask) { 7461 for_each_cpu_mask_nr(j, *nodemask) {
7462 struct sched_domain *sd; 7462 struct sched_domain *sd;
7463 7463
7464 sd = &per_cpu(node_domains, j); 7464 sd = &per_cpu(node_domains, j);
@@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7504 7504
7505 /* Calculate CPU power for physical packages and nodes */ 7505 /* Calculate CPU power for physical packages and nodes */
7506#ifdef CONFIG_SCHED_SMT 7506#ifdef CONFIG_SCHED_SMT
7507 for_each_cpu_mask(i, *cpu_map) { 7507 for_each_cpu_mask_nr(i, *cpu_map) {
7508 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7508 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7509 7509
7510 init_sched_groups_power(i, sd); 7510 init_sched_groups_power(i, sd);
7511 } 7511 }
7512#endif 7512#endif
7513#ifdef CONFIG_SCHED_MC 7513#ifdef CONFIG_SCHED_MC
7514 for_each_cpu_mask(i, *cpu_map) { 7514 for_each_cpu_mask_nr(i, *cpu_map) {
7515 struct sched_domain *sd = &per_cpu(core_domains, i); 7515 struct sched_domain *sd = &per_cpu(core_domains, i);
7516 7516
7517 init_sched_groups_power(i, sd); 7517 init_sched_groups_power(i, sd);
7518 } 7518 }
7519#endif 7519#endif
7520 7520
7521 for_each_cpu_mask(i, *cpu_map) { 7521 for_each_cpu_mask_nr(i, *cpu_map) {
7522 struct sched_domain *sd = &per_cpu(phys_domains, i); 7522 struct sched_domain *sd = &per_cpu(phys_domains, i);
7523 7523
7524 init_sched_groups_power(i, sd); 7524 init_sched_groups_power(i, sd);
@@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7538#endif 7538#endif
7539 7539
7540 /* Attach the domains */ 7540 /* Attach the domains */
7541 for_each_cpu_mask(i, *cpu_map) { 7541 for_each_cpu_mask_nr(i, *cpu_map) {
7542 struct sched_domain *sd; 7542 struct sched_domain *sd;
7543#ifdef CONFIG_SCHED_SMT 7543#ifdef CONFIG_SCHED_SMT
7544 sd = &per_cpu(cpu_domains, i); 7544 sd = &per_cpu(cpu_domains, i);
@@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7621 7621
7622 unregister_sched_domain_sysctl(); 7622 unregister_sched_domain_sysctl();
7623 7623
7624 for_each_cpu_mask(i, *cpu_map) 7624 for_each_cpu_mask_nr(i, *cpu_map)
7625 cpu_attach_domain(NULL, &def_root_domain, i); 7625 cpu_attach_domain(NULL, &def_root_domain, i);
7626 synchronize_sched(); 7626 synchronize_sched();
7627 arch_destroy_sched_domains(cpu_map, &tmpmask); 7627 arch_destroy_sched_domains(cpu_map, &tmpmask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e24ecd39c4b8..e398318f1014 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1022 || ((sd->flags & SD_WAKE_IDLE_FAR) 1022 || ((sd->flags & SD_WAKE_IDLE_FAR)
1023 && !task_hot(p, task_rq(p)->clock, sd))) { 1023 && !task_hot(p, task_rq(p)->clock, sd))) {
1024 cpus_and(tmp, sd->span, p->cpus_allowed); 1024 cpus_and(tmp, sd->span, p->cpus_allowed);
1025 for_each_cpu_mask(i, tmp) { 1025 for_each_cpu_mask_nr(i, tmp) {
1026 if (idle_cpu(i)) { 1026 if (idle_cpu(i)) {
1027 if (i != task_cpu(p)) { 1027 if (i != task_cpu(p)) {
1028 schedstat_inc(p, 1028 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 060e87b0cb1c..d73386c6e361 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
231 return 1; 231 return 1;
232 232
233 span = sched_rt_period_mask(); 233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) { 234 for_each_cpu_mask_nr(i, span) {
235 int enqueue = 0; 235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq); 237 struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq)
272 272
273 spin_lock(&rt_b->rt_runtime_lock); 273 spin_lock(&rt_b->rt_runtime_lock);
274 rt_period = ktime_to_ns(rt_b->rt_period); 274 rt_period = ktime_to_ns(rt_b->rt_period);
275 for_each_cpu_mask(i, rd->span) { 275 for_each_cpu_mask_nr(i, rd->span) {
276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277 s64 diff; 277 s64 diff;
278 278
@@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq)
1000 1000
1001 next = pick_next_task_rt(this_rq); 1001 next = pick_next_task_rt(this_rq);
1002 1002
1003 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1003 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1004 if (this_cpu == cpu) 1004 if (this_cpu == cpu)
1005 continue; 1005 continue;
1006 1006
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..06b17547f4e7 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29fc39f1029c..28c2b2c96ac7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 397 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 399 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 400 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 402}
403EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 477 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 478 cpu_map = wq_cpu_map(wq);
479 479
480 for_each_cpu_mask(cpu, *cpu_map) 480 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 482}
483 483
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 813 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 814 spin_unlock(&workqueue_lock);
815 815
816 for_each_cpu_mask(cpu, *cpu_map) 816 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 818 put_online_cpus();
819 819