diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-06-06 05:18:06 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:24:35 -0400 |
commit | 8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (patch) | |
tree | 6cb6767064d2d43441212566da2d83dcc9a0cd8e /net/iucv | |
parent | 490f5de52a87063fcb40e3b22f61b0779603ff6d (diff) |
smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry
interchangably. So get rid of it.
Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/iucv.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 918970762131..94d5a45c3a57 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void) | |||
480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && | 480 | if (cpu_isset(cpu, iucv_buffer_cpumask) && |
481 | !cpu_isset(cpu, iucv_irq_cpumask)) | 481 | !cpu_isset(cpu, iucv_irq_cpumask)) |
482 | smp_call_function_single(cpu, iucv_allow_cpu, | 482 | smp_call_function_single(cpu, iucv_allow_cpu, |
483 | NULL, 0, 1); | 483 | NULL, 1); |
484 | preempt_enable(); | 484 | preempt_enable(); |
485 | } | 485 | } |
486 | 486 | ||
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void) | |||
498 | cpumask = iucv_irq_cpumask; | 498 | cpumask = iucv_irq_cpumask; |
499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); |
500 | for_each_cpu_mask(cpu, cpumask) | 500 | for_each_cpu_mask(cpu, cpumask) |
501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); | 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
502 | } | 502 | } |
503 | 503 | ||
504 | /** | 504 | /** |
@@ -523,7 +523,7 @@ static int iucv_enable(void) | |||
523 | rc = -EIO; | 523 | rc = -EIO; |
524 | preempt_disable(); | 524 | preempt_disable(); |
525 | for_each_online_cpu(cpu) | 525 | for_each_online_cpu(cpu) |
526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
527 | preempt_enable(); | 527 | preempt_enable(); |
528 | if (cpus_empty(iucv_buffer_cpumask)) | 528 | if (cpus_empty(iucv_buffer_cpumask)) |
529 | /* No cpu could declare an iucv buffer. */ | 529 | /* No cpu could declare an iucv buffer. */ |
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
580 | case CPU_ONLINE_FROZEN: | 580 | case CPU_ONLINE_FROZEN: |
581 | case CPU_DOWN_FAILED: | 581 | case CPU_DOWN_FAILED: |
582 | case CPU_DOWN_FAILED_FROZEN: | 582 | case CPU_DOWN_FAILED_FROZEN: |
583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); | 583 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
584 | break; | 584 | break; |
585 | case CPU_DOWN_PREPARE: | 585 | case CPU_DOWN_PREPARE: |
586 | case CPU_DOWN_PREPARE_FROZEN: | 586 | case CPU_DOWN_PREPARE_FROZEN: |
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
589 | if (cpus_empty(cpumask)) | 589 | if (cpus_empty(cpumask)) |
590 | /* Can't offline last IUCV enabled cpu. */ | 590 | /* Can't offline last IUCV enabled cpu. */ |
591 | return NOTIFY_BAD; | 591 | return NOTIFY_BAD; |
592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); | 592 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
593 | if (cpus_empty(iucv_irq_cpumask)) | 593 | if (cpus_empty(iucv_irq_cpumask)) |
594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 594 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
595 | iucv_allow_cpu, NULL, 0, 1); | 595 | iucv_allow_cpu, NULL, 1); |
596 | break; | 596 | break; |
597 | } | 597 | } |
598 | return NOTIFY_OK; | 598 | return NOTIFY_OK; |
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void) | |||
652 | * pending interrupts force them to the work queue by calling | 652 | * pending interrupts force them to the work queue by calling |
653 | * an empty function on all cpus. | 653 | * an empty function on all cpus. |
654 | */ | 654 | */ |
655 | smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); | 655 | smp_call_function(__iucv_cleanup_queue, NULL, 1); |
656 | spin_lock_irq(&iucv_queue_lock); | 656 | spin_lock_irq(&iucv_queue_lock); |
657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { | 657 | list_for_each_entry_safe(p, n, &iucv_task_queue, list) { |
658 | /* Remove stale work items from the task queue. */ | 658 | /* Remove stale work items from the task queue. */ |