aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-06 05:18:06 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:24:35 -0400
commit8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (patch)
tree6cb6767064d2d43441212566da2d83dcc9a0cd8e /net
parent490f5de52a87063fcb40e3b22f61b0779603ff6d (diff)
smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/core/flow.c2
-rw-r--r--net/iucv/iucv.c14
2 files changed, 8 insertions, 8 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 19991175fdeb..5cf81052d044 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -298,7 +298,7 @@ void flow_cache_flush(void)
298 init_completion(&info.completion); 298 init_completion(&info.completion);
299 299
300 local_bh_disable(); 300 local_bh_disable();
301 smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); 301 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
302 flow_cache_flush_tasklet((unsigned long)&info); 302 flow_cache_flush_tasklet((unsigned long)&info);
303 local_bh_enable(); 303 local_bh_enable();
304 304
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 918970762131..94d5a45c3a57 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void)
480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 480 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
481 !cpu_isset(cpu, iucv_irq_cpumask)) 481 !cpu_isset(cpu, iucv_irq_cpumask))
482 smp_call_function_single(cpu, iucv_allow_cpu, 482 smp_call_function_single(cpu, iucv_allow_cpu,
483 NULL, 0, 1); 483 NULL, 1);
484 preempt_enable(); 484 preempt_enable();
485} 485}
486 486
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void)
498 cpumask = iucv_irq_cpumask; 498 cpumask = iucv_irq_cpumask;
499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
500 for_each_cpu_mask(cpu, cpumask) 500 for_each_cpu_mask(cpu, cpumask)
501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); 501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
502} 502}
503 503
504/** 504/**
@@ -523,7 +523,7 @@ static int iucv_enable(void)
523 rc = -EIO; 523 rc = -EIO;
524 preempt_disable(); 524 preempt_disable();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
527 preempt_enable(); 527 preempt_enable();
528 if (cpus_empty(iucv_buffer_cpumask)) 528 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 529 /* No cpu could declare an iucv buffer. */
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
580 case CPU_ONLINE_FROZEN: 580 case CPU_ONLINE_FROZEN:
581 case CPU_DOWN_FAILED: 581 case CPU_DOWN_FAILED:
582 case CPU_DOWN_FAILED_FROZEN: 582 case CPU_DOWN_FAILED_FROZEN:
583 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 583 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
584 break; 584 break;
585 case CPU_DOWN_PREPARE: 585 case CPU_DOWN_PREPARE:
586 case CPU_DOWN_PREPARE_FROZEN: 586 case CPU_DOWN_PREPARE_FROZEN:
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
589 if (cpus_empty(cpumask)) 589 if (cpus_empty(cpumask))
590 /* Can't offline last IUCV enabled cpu. */ 590 /* Can't offline last IUCV enabled cpu. */
591 return NOTIFY_BAD; 591 return NOTIFY_BAD;
592 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); 592 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
593 if (cpus_empty(iucv_irq_cpumask)) 593 if (cpus_empty(iucv_irq_cpumask))
594 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 594 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
595 iucv_allow_cpu, NULL, 0, 1); 595 iucv_allow_cpu, NULL, 1);
596 break; 596 break;
597 } 597 }
598 return NOTIFY_OK; 598 return NOTIFY_OK;
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void)
652 * pending interrupts force them to the work queue by calling 652 * pending interrupts force them to the work queue by calling
653 * an empty function on all cpus. 653 * an empty function on all cpus.
654 */ 654 */
655 smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); 655 smp_call_function(__iucv_cleanup_queue, NULL, 1);
656 spin_lock_irq(&iucv_queue_lock); 656 spin_lock_irq(&iucv_queue_lock);
657 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 657 list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
658 /* Remove stale work items from the task queue. */ 658 /* Remove stale work items from the task queue. */