aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-06 05:18:06 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:24:35 -0400
commit8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (patch)
tree6cb6767064d2d43441212566da2d83dcc9a0cd8e
parent490f5de52a87063fcb40e3b22f61b0779603ff6d (diff)
smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/smp.c6
-rw-r--r--arch/alpha/oprofile/common.c6
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/cris/arch-v32/kernel/smp.c5
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/uncached.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/s390/appldata/appldata_base.c4
-rw-r--r--arch/s390/kernel/smp.c16
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/sh/kernel/smp.c10
-rw-r--r--arch/sparc64/kernel/smp.c12
-rw-r--r--arch/um/kernel/smp.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/nmi_32.c2
-rw-r--r--arch/x86/kernel/nmi_64.c2
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/msr-on-cpu.c8
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--include/asm-alpha/smp.h2
-rw-r--r--include/asm-sparc/smp.h2
-rw-r--r--include/linux/smp.h8
-rw-r--r--kernel/smp.c6
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--net/core/flow.c2
-rw-r--r--net/iucv/iucv.c14
-rw-r--r--virt/kvm/kvm_main.c6
49 files changed, 95 insertions, 108 deletions
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index ced4aae8b804..04dcc5e5d4c1 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -662,7 +662,7 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
662 if (smp_processor_id() != boot_cpuid) 662 if (smp_processor_id() != boot_cpuid)
663 smp_call_function_single(boot_cpuid, 663 smp_call_function_single(boot_cpuid,
664 __marvel_access_rtc, 664 __marvel_access_rtc,
665 &rtc_access, 1, 1); 665 &rtc_access, 1);
666 else 666 else
667 __marvel_access_rtc(&rtc_access); 667 __marvel_access_rtc(&rtc_access);
668#else 668#else
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 95c905be9154..44114c8dbb2a 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -710,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm)
710 } 710 }
711 } 711 }
712 712
713 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { 713 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
714 printk(KERN_CRIT "flush_tlb_mm: timed out\n"); 714 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
715 } 715 }
716 716
@@ -763,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
763 data.mm = mm; 763 data.mm = mm;
764 data.addr = addr; 764 data.addr = addr;
765 765
766 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { 766 if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
767 printk(KERN_CRIT "flush_tlb_page: timed out\n"); 767 printk(KERN_CRIT "flush_tlb_page: timed out\n");
768 } 768 }
769 769
@@ -815,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
815 } 815 }
816 } 816 }
817 817
818 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { 818 if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
819 printk(KERN_CRIT "flush_icache_page: timed out\n"); 819 printk(KERN_CRIT "flush_icache_page: timed out\n");
820 } 820 }
821 821
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index 9fc0eeb4f0ab..7c3d5ec6ec67 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -65,7 +65,7 @@ op_axp_setup(void)
65 model->reg_setup(&reg, ctr, &sys); 65 model->reg_setup(&reg, ctr, &sys);
66 66
67 /* Configure the registers on all cpus. */ 67 /* Configure the registers on all cpus. */
68 (void)smp_call_function(model->cpu_setup, &reg, 0, 1); 68 (void)smp_call_function(model->cpu_setup, &reg, 1);
69 model->cpu_setup(&reg); 69 model->cpu_setup(&reg);
70 return 0; 70 return 0;
71} 71}
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
86static int 86static int
87op_axp_start(void) 87op_axp_start(void)
88{ 88{
89 (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); 89 (void)smp_call_function(op_axp_cpu_start, NULL, 1);
90 op_axp_cpu_start(NULL); 90 op_axp_cpu_start(NULL);
91 return 0; 91 return 0;
92} 92}
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
101static void 101static void
102op_axp_stop(void) 102op_axp_stop(void)
103{ 103{
104 (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); 104 (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
105 op_axp_cpu_stop(NULL); 105 op_axp_cpu_stop(NULL);
106} 106}
107 107
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 74fae6045650..4458705021e0 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void))
201 data.ret = 0; 201 data.ret = 0;
202 202
203 preempt_disable(); 203 preempt_disable();
204 smp_call_function(em_func, &data, 1, 1); 204 smp_call_function(em_func, &data, 1);
205 em_func(&data); 205 em_func(&data);
206 preempt_enable(); 206 preempt_enable();
207 207
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 32455c633f1c..c0d2c9bb952b 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -352,7 +352,7 @@ static int __init vfp_init(void)
352 else if (vfpsid & FPSID_NODOUBLE) { 352 else if (vfpsid & FPSID_NODOUBLE) {
353 printk("no double precision support\n"); 353 printk("no double precision support\n");
354 } else { 354 } else {
355 smp_call_function(vfp_enable, NULL, 1, 1); 355 smp_call_function(vfp_enable, NULL, 1);
356 356
357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n", 358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index a9c3334e46c9..952a24b2f5a9 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy)
194/* Other calls */ 194/* Other calls */
195void smp_send_stop(void) 195void smp_send_stop(void)
196{ 196{
197 smp_call_function(stop_this_cpu, NULL, 1, 0); 197 smp_call_function(stop_this_cpu, NULL, 0);
198} 198}
199 199
200int setup_profiling_timer(unsigned int multiplier) 200int setup_profiling_timer(unsigned int multiplier)
@@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
316 * You must not call this function with disabled interrupts or from a 316 * You must not call this function with disabled interrupts or from a
317 * hardware interrupt handler or from a bottom half handler. 317 * hardware interrupt handler or from a bottom half handler.
318 */ 318 */
319int smp_call_function(void (*func)(void *info), void *info, 319int smp_call_function(void (*func)(void *info), void *info, int wait)
320 int nonatomic, int wait)
321{ 320{
322 cpumask_t cpu_mask = CPU_MASK_ALL; 321 cpumask_t cpu_mask = CPU_MASK_ALL;
323 struct call_data_struct data; 322 struct call_data_struct data;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 705176b434b3..9cd818cc7008 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1881 case CPU_ONLINE: 1881 case CPU_ONLINE:
1882 case CPU_ONLINE_FROZEN: 1882 case CPU_ONLINE_FROZEN:
1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1884 NULL, 1, 0); 1884 NULL, 0);
1885 break; 1885 break;
1886 } 1886 }
1887 return NOTIFY_OK; 1887 return NOTIFY_OK;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 9dc00f7fe10e..e5c57f413ca2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
921 921
922 922
923 /* will send IPI to other CPU and wait for completion of remote call */ 923 /* will send IPI to other CPU and wait for completion of remote call */
924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { 924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " 925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); 926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927 return 0; 927 return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 7714a97b0104..9baa48255c12 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1820 int ret; 1820 int ret;
1821 1821
1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); 1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); 1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); 1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1825} 1825}
1826#endif /* CONFIG_SMP */ 1826#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a3a34b4eb038..fabaf08d9a69 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -286,7 +286,7 @@ void cpu_idle_wait(void)
286{ 286{
287 smp_mb(); 287 smp_mb();
288 /* kick all the CPUs so that they exit out of pm_idle */ 288 /* kick all the CPUs so that they exit out of pm_idle */
289 smp_call_function(do_nothing, NULL, 0, 1); 289 smp_call_function(do_nothing, NULL, 1);
290} 290}
291EXPORT_SYMBOL_GPL(cpu_idle_wait); 291EXPORT_SYMBOL_GPL(cpu_idle_wait);
292 292
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index eaa1b6795a13..9d1d429c6c59 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master)
317 317
318 go[MASTER] = 1; 318 go[MASTER] = 1;
319 319
320 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { 320 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
321 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 321 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
322 return; 322 return;
323 } 323 }
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index e77995a6e3ed..8eff8c1d40a6 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125 atomic_set(&uc_pool->status, 0); 125 atomic_set(&uc_pool->status, 0);
126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127 0, 1);
128 if (status || atomic_read(&uc_pool->status)) 127 if (status || atomic_read(&uc_pool->status))
129 goto failed; 128 goto failed;
130 } else if (status != PAL_VISIBILITY_OK) 129 } else if (status != PAL_VISIBILITY_OK)
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
146 if (status != PAL_STATUS_SUCCESS) 145 if (status != PAL_STATUS_SUCCESS)
147 goto failed; 146 goto failed;
148 atomic_set(&uc_pool->status, 0); 147 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 148 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150 if (status || atomic_read(&uc_pool->status)) 149 if (status || atomic_read(&uc_pool->status))
151 goto failed; 150 goto failed;
152 151
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 8cc0c4753d89..636588e7e068 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
629 if (use_ipi) { 629 if (use_ipi) {
630 /* use an interprocessor interrupt to call SAL */ 630 /* use an interprocessor interrupt to call SAL */
631 smp_call_function_single(cpu, sn_hwperf_call_sal, 631 smp_call_function_single(cpu, sn_hwperf_call_sal,
632 op_info, 1, 1); 632 op_info, 1);
633 } 633 }
634 else { 634 else {
635 /* migrate the task before calling SAL */ 635 /* migrate the task before calling SAL */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 74eb7bcd5a40..7577f971ea4e 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -212,7 +212,7 @@ void smp_flush_tlb_all(void)
212 local_irq_save(flags); 212 local_irq_save(flags);
213 __flush_tlb_all(); 213 __flush_tlb_all();
214 local_irq_restore(flags); 214 local_irq_restore(flags);
215 smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); 215 smp_call_function(flush_tlb_all_ipi, NULL, 1);
216 preempt_enable(); 216 preempt_enable();
217} 217}
218 218
@@ -505,7 +505,7 @@ void smp_invalidate_interrupt(void)
505 *==========================================================================*/ 505 *==========================================================================*/
506void smp_send_stop(void) 506void smp_send_stop(void)
507{ 507{
508 smp_call_function(stop_this_cpu, NULL, 1, 0); 508 smp_call_function(stop_this_cpu, NULL, 0);
509} 509}
510 510
511/*==========================================================================* 511/*==========================================================================*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c75b26cb61df..7a9ae830be86 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -167,7 +167,7 @@ static void stop_this_cpu(void *dummy)
167 167
168void smp_send_stop(void) 168void smp_send_stop(void)
169{ 169{
170 smp_call_function(stop_this_cpu, NULL, 1, 0); 170 smp_call_function(stop_this_cpu, NULL, 0);
171} 171}
172 172
173void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
@@ -266,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
267{ 267{
268#ifndef CONFIG_MIPS_MT_SMTC 268#ifndef CONFIG_MIPS_MT_SMTC
269 smp_call_function(func, info, 1, 1); 269 smp_call_function(func, info, 1);
270#endif 270#endif
271} 271}
272 272
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 27096751ddce..71df3390c07b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -43,12 +43,12 @@
43 * primary cache. 43 * primary cache.
44 */ 44 */
45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46 int retry, int wait) 46 int wait)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 49
50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func, info, retry, wait); 51 smp_call_function(func, info, wait);
52#endif 52#endif
53 func(info); 53 func(info);
54 preempt_enable(); 54 preempt_enable();
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
350 350
351static void r4k___flush_cache_all(void) 351static void r4k___flush_cache_all(void)
352{ 352{
353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354} 354}
355 355
356static inline int has_valid_asid(const struct mm_struct *mm) 356static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
397 int exec = vma->vm_flags & VM_EXEC; 397 int exec = vma->vm_flags & VM_EXEC;
398 398
399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401} 401}
402 402
403static inline void local_r4k_flush_cache_mm(void * args) 403static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
429 if (!cpu_has_dc_aliases) 429 if (!cpu_has_dc_aliases)
430 return; 430 return;
431 431
432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433} 433}
434 434
435struct flush_cache_page_args { 435struct flush_cache_page_args {
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
521 args.addr = addr; 521 args.addr = addr;
522 args.pfn = pfn; 522 args.pfn = pfn;
523 523
524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525} 525}
526 526
527static inline void local_r4k_flush_data_cache_page(void * addr) 527static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
535 local_r4k_flush_data_cache_page((void *)addr); 535 local_r4k_flush_data_cache_page((void *)addr);
536 else 536 else
537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 1, 1); 538 1);
539} 539}
540 540
541struct flush_icache_range_args { 541struct flush_icache_range_args {
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 571 args.start = start;
572 args.end = end; 572 args.end = end;
573 573
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
575 instruction_hazard(); 575 instruction_hazard();
576} 576}
577 577
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
672 672
673static void r4k_flush_cache_sigtramp(unsigned long addr) 673static void r4k_flush_cache_sigtramp(unsigned long addr)
674{ 674{
675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
676} 676}
677 677
678static void r4k_flush_icache_all(void) 678static void r4k_flush_icache_all(void)
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 35dc435846a6..cf4c868715ac 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -64,7 +64,7 @@ static void prom_exit(void)
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 if (smp_processor_id()) 65 if (smp_processor_id())
66 /* CPU 1 */ 66 /* CPU 1 */
67 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 67 smp_call_function(prom_cpu0_exit, NULL, 1);
68#endif 68#endif
69 prom_cpu0_exit(NULL); 69 prom_cpu0_exit(NULL);
70} 70}
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 33fce826f8bf..fd9604d5555a 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
74 if (!reboot_smp) { 74 if (!reboot_smp) {
75 /* Get CPU 0 to do the cfe_exit */ 75 /* Get CPU 0 to do the cfe_exit */
76 reboot_smp = 1; 76 reboot_smp = 1;
77 smp_call_function(cfe_linux_exit, arg, 1, 0); 77 smp_call_function(cfe_linux_exit, arg, 0);
78 } 78 }
79 } else { 79 } else {
80 printk("Passing control back to CFE...\n"); 80 printk("Passing control back to CFE...\n");
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index cf8f6b3de86c..65b1af66b674 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -66,7 +66,7 @@ static void prom_linux_exit(void)
66{ 66{
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 if (smp_processor_id()) { 68 if (smp_processor_id()) {
69 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 69 smp_call_function(prom_cpu0_exit, NULL, 1);
70 } 70 }
71#endif 71#endif
72 while(1); 72 while(1);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 37a5ab410dcc..5191b46a611e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -168,7 +168,7 @@ static void stop_this_cpu(void *dummy)
168 168
169void smp_send_stop(void) 169void smp_send_stop(void)
170{ 170{
171 smp_call_function(stop_this_cpu, NULL, 0, 0); 171 smp_call_function(stop_this_cpu, NULL, 0);
172} 172}
173 173
174extern struct gettimeofday_struct do_gtod; 174extern struct gettimeofday_struct do_gtod;
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ad40729bec3d..837a3b3e7759 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -209,7 +209,7 @@ __appldata_vtimer_setup(int cmd)
209 per_cpu(appldata_timer, i).expires = per_cpu_interval; 209 per_cpu(appldata_timer, i).expires = per_cpu_interval;
210 smp_call_function_single(i, add_virt_timer_periodic, 210 smp_call_function_single(i, add_virt_timer_periodic,
211 &per_cpu(appldata_timer, i), 211 &per_cpu(appldata_timer, i),
212 0, 1); 212 1);
213 } 213 }
214 appldata_timer_active = 1; 214 appldata_timer_active = 1;
215 P_INFO("Monitoring timer started.\n"); 215 P_INFO("Monitoring timer started.\n");
@@ -236,7 +236,7 @@ __appldata_vtimer_setup(int cmd)
236 args.timer = &per_cpu(appldata_timer, i); 236 args.timer = &per_cpu(appldata_timer, i);
237 args.expires = per_cpu_interval; 237 args.expires = per_cpu_interval;
238 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 238 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
239 &args, 0, 1); 239 &args, 1);
240 } 240 }
241 } 241 }
242} 242}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5d4fa4b1c74c..276b105fb2a4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -109,7 +109,7 @@ static void do_call_function(void)
109} 109}
110 110
111static void __smp_call_function_map(void (*func) (void *info), void *info, 111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int nonatomic, int wait, cpumask_t map) 112 int wait, cpumask_t map)
113{ 113{
114 struct call_data_struct data; 114 struct call_data_struct data;
115 int cpu, local = 0; 115 int cpu, local = 0;
@@ -162,7 +162,6 @@ out:
162 * smp_call_function: 162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking 163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function 164 * @info: an arbitrary pointer to pass to the function
165 * @nonatomic: unused
166 * @wait: if true, wait (atomically) until function has completed on other CPUs 165 * @wait: if true, wait (atomically) until function has completed on other CPUs
167 * 166 *
168 * Run a function on all other CPUs. 167 * Run a function on all other CPUs.
@@ -170,15 +169,14 @@ out:
170 * You must not call this function with disabled interrupts, from a 169 * You must not call this function with disabled interrupts, from a
171 * hardware interrupt handler or from a bottom half. 170 * hardware interrupt handler or from a bottom half.
172 */ 171 */
173int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 172int smp_call_function(void (*func) (void *info), void *info, int wait)
174 int wait)
175{ 173{
176 cpumask_t map; 174 cpumask_t map;
177 175
178 spin_lock(&call_lock); 176 spin_lock(&call_lock);
179 map = cpu_online_map; 177 map = cpu_online_map;
180 cpu_clear(smp_processor_id(), map); 178 cpu_clear(smp_processor_id(), map);
181 __smp_call_function_map(func, info, nonatomic, wait, map); 179 __smp_call_function_map(func, info, wait, map);
182 spin_unlock(&call_lock); 180 spin_unlock(&call_lock);
183 return 0; 181 return 0;
184} 182}
@@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function);
189 * @cpu: the CPU where func should run 187 * @cpu: the CPU where func should run
190 * @func: the function to run; this must be fast and non-blocking 188 * @func: the function to run; this must be fast and non-blocking
191 * @info: an arbitrary pointer to pass to the function 189 * @info: an arbitrary pointer to pass to the function
192 * @nonatomic: unused
193 * @wait: if true, wait (atomically) until function has completed on other CPUs 190 * @wait: if true, wait (atomically) until function has completed on other CPUs
194 * 191 *
195 * Run a function on one processor. 192 * Run a function on one processor.
@@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function);
198 * hardware interrupt handler or from a bottom half. 195 * hardware interrupt handler or from a bottom half.
199 */ 196 */
200int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
201 int nonatomic, int wait) 198 int wait)
202{ 199{
203 spin_lock(&call_lock); 200 spin_lock(&call_lock);
204 __smp_call_function_map(func, info, nonatomic, wait, 201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
205 cpumask_of_cpu(cpu));
206 spin_unlock(&call_lock); 202 spin_unlock(&call_lock);
207 return 0; 203 return 0;
208} 204}
@@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
228{ 224{
229 spin_lock(&call_lock); 225 spin_lock(&call_lock);
230 cpu_clear(smp_processor_id(), mask); 226 cpu_clear(smp_processor_id(), mask);
231 __smp_call_function_map(func, info, 0, wait, mask); 227 __smp_call_function_map(func, info, wait, mask);
232 spin_unlock(&call_lock); 228 spin_unlock(&call_lock);
233 return 0; 229 return 0;
234} 230}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd5..bf7bf2c2236a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -690,7 +690,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
690 */ 690 */
691 memset(&etr_sync, 0, sizeof(etr_sync)); 691 memset(&etr_sync, 0, sizeof(etr_sync));
692 preempt_disable(); 692 preempt_disable();
693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 693 smp_call_function(etr_sync_cpu_start, NULL, 0);
694 local_irq_disable(); 694 local_irq_disable();
695 etr_enable_sync_clock(); 695 etr_enable_sync_clock();
696 696
@@ -729,7 +729,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
729 rc = -EAGAIN; 729 rc = -EAGAIN;
730 } 730 }
731 local_irq_enable(); 731 local_irq_enable();
732 smp_call_function(etr_sync_cpu_end,NULL,0,0); 732 smp_call_function(etr_sync_cpu_end,NULL,0);
733 preempt_enable(); 733 preempt_enable();
734 return rc; 734 return rc;
735} 735}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2ed8dceb297b..71781ba2675b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -168,7 +168,7 @@ static void stop_this_cpu(void *unused)
168 168
169void smp_send_stop(void) 169void smp_send_stop(void)
170{ 170{
171 smp_call_function(stop_this_cpu, 0, 1, 0); 171 smp_call_function(stop_this_cpu, 0, 0);
172} 172}
173 173
174void arch_send_call_function_ipi(cpumask_t mask) 174void arch_send_call_function_ipi(cpumask_t mask)
@@ -223,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
223 preempt_disable(); 223 preempt_disable();
224 224
225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
226 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 226 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
227 } else { 227 } else {
228 int i; 228 int i;
229 for (i = 0; i < num_online_cpus(); i++) 229 for (i = 0; i < num_online_cpus(); i++)
@@ -260,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
260 fd.vma = vma; 260 fd.vma = vma;
261 fd.addr1 = start; 261 fd.addr1 = start;
262 fd.addr2 = end; 262 fd.addr2 = end;
263 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 263 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
264 } else { 264 } else {
265 int i; 265 int i;
266 for (i = 0; i < num_online_cpus(); i++) 266 for (i = 0; i < num_online_cpus(); i++)
@@ -303,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
303 303
304 fd.vma = vma; 304 fd.vma = vma;
305 fd.addr1 = page; 305 fd.addr1 = page;
306 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 306 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
307 } else { 307 } else {
308 int i; 308 int i;
309 for (i = 0; i < num_online_cpus(); i++) 309 for (i = 0; i < num_online_cpus(); i++)
@@ -327,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
327 fd.addr1 = asid; 327 fd.addr1 = asid;
328 fd.addr2 = vaddr; 328 fd.addr2 = vaddr;
329 329
330 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); 330 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
331 local_flush_tlb_one(asid, vaddr); 331 local_flush_tlb_one(asid, vaddr);
332} 332}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b82d017a1744..c099d96f1239 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -807,7 +807,6 @@ extern unsigned long xcall_call_function;
807 * smp_call_function(): Run a function on all other CPUs. 807 * smp_call_function(): Run a function on all other CPUs.
808 * @func: The function to run. This must be fast and non-blocking. 808 * @func: The function to run. This must be fast and non-blocking.
809 * @info: An arbitrary pointer to pass to the function. 809 * @info: An arbitrary pointer to pass to the function.
810 * @nonatomic: currently unused.
811 * @wait: If true, wait (atomically) until function has completed on other CPUs. 810 * @wait: If true, wait (atomically) until function has completed on other CPUs.
812 * 811 *
813 * Returns 0 on success, else a negative status code. Does not return until 812 * Returns 0 on success, else a negative status code. Does not return until
@@ -817,8 +816,7 @@ extern unsigned long xcall_call_function;
817 * hardware interrupt handler or from a bottom half handler. 816 * hardware interrupt handler or from a bottom half handler.
818 */ 817 */
819static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, 818static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
820 int nonatomic, int wait, 819 int wait, cpumask_t mask)
821 cpumask_t mask)
822{ 820{
823 struct call_data_struct data; 821 struct call_data_struct data;
824 int cpus; 822 int cpus;
@@ -853,11 +851,9 @@ out_unlock:
853 return 0; 851 return 0;
854} 852}
855 853
856int smp_call_function(void (*func)(void *info), void *info, 854int smp_call_function(void (*func)(void *info), void *info, int wait)
857 int nonatomic, int wait)
858{ 855{
859 return sparc64_smp_call_function_mask(func, info, nonatomic, wait, 856 return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
860 cpu_online_map);
861} 857}
862 858
863void smp_call_function_client(int irq, struct pt_regs *regs) 859void smp_call_function_client(int irq, struct pt_regs *regs)
@@ -894,7 +890,7 @@ static void tsb_sync(void *info)
894 890
895void smp_tsb_sync(struct mm_struct *mm) 891void smp_tsb_sync(struct mm_struct *mm)
896{ 892{
897 sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); 893 sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
898} 894}
899 895
900extern unsigned long xcall_flush_tlb_mm; 896extern unsigned long xcall_flush_tlb_mm;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index e1062ec36d40..be2d50c3aa95 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu)
214 atomic_inc(&scf_finished); 214 atomic_inc(&scf_finished);
215} 215}
216 216
217int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 217int smp_call_function(void (*_func)(void *info), void *_info, int wait)
218 int wait)
219{ 218{
220 int cpus = num_online_cpus() - 1; 219 int cpus = num_online_cpus() - 1;
221 int i; 220 int i;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6a1e278d9323..290652cefddb 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -222,7 +222,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
222 atomic_set(&data.gate,0); 222 atomic_set(&data.gate,0);
223 223
224 /* Start the ball rolling on other CPUs */ 224 /* Start the ball rolling on other CPUs */
225 if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 225 if (smp_call_function(ipi_handler, &data, 0) != 0)
226 panic("mtrr: timed out waiting for other CPUs\n"); 226 panic("mtrr: timed out waiting for other CPUs\n");
227 227
228 local_irq_save(flags); 228 local_irq_save(flags);
@@ -822,7 +822,7 @@ void mtrr_ap_init(void)
822 */ 822 */
823void mtrr_save_state(void) 823void mtrr_save_state(void)
824{ 824{
825 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); 825 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
826} 826}
827 827
828static int __init mtrr_init_finialize(void) 828static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index daff52a62248..336dd43c9158 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -95,7 +95,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
95 for (; count; count -= 16) { 95 for (; count; count -= 16) {
96 cmd.eax = pos; 96 cmd.eax = pos;
97 cmd.ecx = pos >> 32; 97 cmd.ecx = pos >> 32;
98 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 98 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
99 if (copy_to_user(tmp, &cmd, 16)) 99 if (copy_to_user(tmp, &cmd, 16))
100 return -EFAULT; 100 return -EFAULT;
101 tmp += 16; 101 tmp += 16;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 0224c3637c73..cb0a6398c64b 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
68 load_LDT(pc); 68 load_LDT(pc);
69 mask = cpumask_of_cpu(smp_processor_id()); 69 mask = cpumask_of_cpu(smp_processor_id());
70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 70 if (!cpus_equal(current->mm->cpu_vm_mask, mask))
71 smp_call_function(flush_ldt, NULL, 1, 1); 71 smp_call_function(flush_ldt, NULL, 1);
72 preempt_enable(); 72 preempt_enable();
73#else 73#else
74 load_LDT(pc); 74 load_LDT(pc);
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index 84160f74eeb0..5562dab0bd20 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -87,7 +87,7 @@ int __init check_nmi_watchdog(void)
87 87
88#ifdef CONFIG_SMP 88#ifdef CONFIG_SMP
89 if (nmi_watchdog == NMI_LOCAL_APIC) 89 if (nmi_watchdog == NMI_LOCAL_APIC)
90 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 90 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
91#endif 91#endif
92 92
93 for_each_possible_cpu(cpu) 93 for_each_possible_cpu(cpu)
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index 5a29ded994fa..2f1e4f503c9e 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -96,7 +96,7 @@ int __init check_nmi_watchdog(void)
96 96
97#ifdef CONFIG_SMP 97#ifdef CONFIG_SMP
98 if (nmi_watchdog == NMI_LOCAL_APIC) 98 if (nmi_watchdog == NMI_LOCAL_APIC)
99 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 99 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
100#endif 100#endif
101 101
102 for (cpu = 0; cpu < NR_CPUS; cpu++) 102 for (cpu = 0; cpu < NR_CPUS; cpu++)
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 575aa3d7248a..56546e8a13ac 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -164,7 +164,7 @@ static void native_smp_send_stop(void)
164 if (reboot_force) 164 if (reboot_force)
165 return; 165 return;
166 166
167 smp_call_function(stop_this_cpu, NULL, 0, 0); 167 smp_call_function(stop_this_cpu, NULL, 0);
168 local_irq_save(flags); 168 local_irq_save(flags);
169 disable_local_APIC(); 169 disable_local_APIC();
170 local_irq_restore(flags); 170 local_irq_restore(flags);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 61efa2f7d564..0a03d57f9b3b 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -278,7 +278,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
278{ 278{
279 long cpu = (long)arg; 279 long cpu = (long)arg;
280 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 280 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
281 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 281 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
282 return NOTIFY_DONE; 282 return NOTIFY_DONE;
283} 283}
284 284
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 540e95179074..5534fe59b5fc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
335{ 335{
336 if (vmx->vcpu.cpu == -1) 336 if (vmx->vcpu.cpu == -1)
337 return; 337 return;
338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); 338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
339 vmx->launched = 0; 339 vmx->launched = 0;
340} 340}
341 341
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63a77caa59f1..0faa2546b1cd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4044 * So need not to call smp_call_function_single() in that case. 4044 * So need not to call smp_call_function_single() in that case.
4045 */ 4045 */
4046 if (vcpu->guest_mode && vcpu->cpu != cpu) 4046 if (vcpu->guest_mode && vcpu->cpu != cpu)
4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
4048 put_cpu(); 4048 put_cpu();
4049} 4049}
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 57d043fa893e..d5a2b39f882b 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 if (safe) {
33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); 33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
34 err = rv.err; 34 err = rv.err;
35 } else { 35 } else {
36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); 36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
37 } 37 }
38 *l = rv.l; 38 *l = rv.l;
39 *h = rv.h; 39 *h = rv.h;
@@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
64 rv.l = l; 64 rv.l = l;
65 rv.h = h; 65 rv.h = h;
66 if (safe) { 66 if (safe) {
67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); 67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
68 err = rv.err; 68 err = rv.err;
69 } else { 69 } else {
70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); 70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
71 } 71 }
72 72
73 return err; 73 return err;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index cb34407a9930..04f596eab749 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1113,7 +1113,7 @@ int safe_smp_processor_id(void)
1113/* broadcast a halt to all other CPUs */ 1113/* broadcast a halt to all other CPUs */
1114static void voyager_smp_send_stop(void) 1114static void voyager_smp_send_stop(void)
1115{ 1115{
1116 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1116 smp_call_function(smp_stop_cpu_function, NULL, 1);
1117} 1117}
1118 1118
1119/* this function is triggered in time.c when a clock tick fires 1119/* this function is triggered in time.c when a clock tick fires
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index b3786e749b8e..a1651d029ea8 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -331,7 +331,7 @@ static void stop_self(void *v)
331 331
332void xen_smp_send_stop(void) 332void xen_smp_send_stop(void)
333{ 333{
334 smp_call_function(stop_self, NULL, 0, 0); 334 smp_call_function(stop_self, NULL, 0);
335} 335}
336 336
337void xen_smp_send_reschedule(int cpu) 337void xen_smp_send_reschedule(int cpu)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 556ee1585192..4976e5db2b3f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1339,7 +1339,7 @@ static void smp_callback(void *v)
1339static int acpi_processor_latency_notify(struct notifier_block *b, 1339static int acpi_processor_latency_notify(struct notifier_block *b,
1340 unsigned long l, void *v) 1340 unsigned long l, void *v)
1341{ 1341{
1342 smp_call_function(smp_callback, NULL, 0, 1); 1342 smp_call_function(smp_callback, NULL, 1);
1343 return NOTIFY_OK; 1343 return NOTIFY_OK;
1344} 1344}
1345 1345
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 23554b676d6e..5405769020a1 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -340,7 +340,7 @@ static void smp_callback(void *v)
340static int cpuidle_latency_notify(struct notifier_block *b, 340static int cpuidle_latency_notify(struct notifier_block *b,
341 unsigned long l, void *v) 341 unsigned long l, void *v)
342{ 342{
343 smp_call_function(smp_callback, NULL, 0, 1); 343 smp_call_function(smp_callback, NULL, 1);
344 return NOTIFY_OK; 344 return NOTIFY_OK;
345} 345}
346 346
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 2f60a362d75e..544c69af8168 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -53,7 +53,7 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
53#else /* CONFIG_SMP */ 53#else /* CONFIG_SMP */
54 54
55#define hard_smp_processor_id() 0 55#define hard_smp_processor_id() 0
56#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) 56#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; })
57 57
58#endif /* CONFIG_SMP */ 58#endif /* CONFIG_SMP */
59 59
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index e6d561599726..b61e74bea06a 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5) 72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74 74
75static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 75static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{ 76{
77 xc1((smpfunc_t)func, (unsigned long)info); 77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0; 78 return 0;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index eac3e062250f..338cad1b9548 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -62,11 +62,11 @@ extern void smp_cpus_done(unsigned int max_cpus);
62/* 62/*
63 * Call a function on all other processors 63 * Call a function on all other processors
64 */ 64 */
65int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); 65int smp_call_function(void(*func)(void *info), void *info, int wait);
66int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 66int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
67 int wait); 67 int wait);
68int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 68int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
69 int retry, int wait); 69 int wait);
70void __smp_call_function_single(int cpuid, struct call_single_data *data); 70void __smp_call_function_single(int cpuid, struct call_single_data *data);
71 71
72/* 72/*
@@ -119,7 +119,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
119{ 119{
120 return 0; 120 return 0;
121} 121}
122#define smp_call_function(func, info, retry, wait) \ 122#define smp_call_function(func, info, wait) \
123 (up_smp_call_function(func, info)) 123 (up_smp_call_function(func, info))
124#define on_each_cpu(func,info,retry,wait) \ 124#define on_each_cpu(func,info,retry,wait) \
125 ({ \ 125 ({ \
@@ -131,7 +131,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
131static inline void smp_send_reschedule(int cpu) { } 131static inline void smp_send_reschedule(int cpu) { }
132#define num_booting_cpus() 1 132#define num_booting_cpus() 1
133#define smp_prepare_boot_cpu() do {} while (0) 133#define smp_prepare_boot_cpu() do {} while (0)
134#define smp_call_function_single(cpuid, func, info, retry, wait) \ 134#define smp_call_function_single(cpuid, func, info, wait) \
135({ \ 135({ \
136 WARN_ON(cpuid != 0); \ 136 WARN_ON(cpuid != 0); \
137 local_irq_disable(); \ 137 local_irq_disable(); \
diff --git a/kernel/smp.c b/kernel/smp.c
index f77b75c027ad..7e0432a4a0e2 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -195,7 +195,6 @@ void generic_smp_call_function_single_interrupt(void)
195 * smp_call_function_single - Run a function on a specific CPU 195 * smp_call_function_single - Run a function on a specific CPU
196 * @func: The function to run. This must be fast and non-blocking. 196 * @func: The function to run. This must be fast and non-blocking.
197 * @info: An arbitrary pointer to pass to the function. 197 * @info: An arbitrary pointer to pass to the function.
198 * @retry: Unused
199 * @wait: If true, wait until function has completed on other CPUs. 198 * @wait: If true, wait until function has completed on other CPUs.
200 * 199 *
201 * Returns 0 on success, else a negative status code. Note that @wait 200 * Returns 0 on success, else a negative status code. Note that @wait
@@ -203,7 +202,7 @@ void generic_smp_call_function_single_interrupt(void)
203 * we fall back to on-stack allocation. 202 * we fall back to on-stack allocation.
204 */ 203 */
205int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 204int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
206 int retry, int wait) 205 int wait)
207{ 206{
208 struct call_single_data d; 207 struct call_single_data d;
209 unsigned long flags; 208 unsigned long flags;
@@ -339,7 +338,6 @@ EXPORT_SYMBOL(smp_call_function_mask);
339 * smp_call_function(): Run a function on all other CPUs. 338 * smp_call_function(): Run a function on all other CPUs.
340 * @func: The function to run. This must be fast and non-blocking. 339 * @func: The function to run. This must be fast and non-blocking.
341 * @info: An arbitrary pointer to pass to the function. 340 * @info: An arbitrary pointer to pass to the function.
342 * @natomic: Unused
343 * @wait: If true, wait (atomically) until function has completed on other CPUs. 341 * @wait: If true, wait (atomically) until function has completed on other CPUs.
344 * 342 *
345 * Returns 0 on success, else a negative status code. 343 * Returns 0 on success, else a negative status code.
@@ -351,7 +349,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
351 * You must not call this function with disabled interrupts or from a 349 * You must not call this function with disabled interrupts or from a
352 * hardware interrupt handler or from a bottom half handler. 350 * hardware interrupt handler or from a bottom half handler.
353 */ 351 */
354int smp_call_function(void (*func)(void *), void *info, int natomic, int wait) 352int smp_call_function(void (*func)(void *), void *info, int wait)
355{ 353{
356 int ret; 354 int ret;
357 355
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 36e061740047..d73afb4764ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -679,7 +679,7 @@ int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
679 int ret = 0; 679 int ret = 0;
680 680
681 preempt_disable(); 681 preempt_disable();
682 ret = smp_call_function(func, info, retry, wait); 682 ret = smp_call_function(func, info, wait);
683 local_irq_disable(); 683 local_irq_disable();
684 func(info); 684 func(info);
685 local_irq_enable(); 685 local_irq_enable();
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 57a1f02e5ec0..75e718539dcb 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -266,7 +266,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
266 "offline CPU #%d\n", *oncpu); 266 "offline CPU #%d\n", *oncpu);
267 else 267 else
268 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 268 smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
269 &reason, 1, 1); 269 &reason, 1);
270} 270}
271 271
272/* 272/*
diff --git a/net/core/flow.c b/net/core/flow.c
index 19991175fdeb..5cf81052d044 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -298,7 +298,7 @@ void flow_cache_flush(void)
298 init_completion(&info.completion); 298 init_completion(&info.completion);
299 299
300 local_bh_disable(); 300 local_bh_disable();
301 smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); 301 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
302 flow_cache_flush_tasklet((unsigned long)&info); 302 flow_cache_flush_tasklet((unsigned long)&info);
303 local_bh_enable(); 303 local_bh_enable();
304 304
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 918970762131..94d5a45c3a57 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void)
480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 480 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
481 !cpu_isset(cpu, iucv_irq_cpumask)) 481 !cpu_isset(cpu, iucv_irq_cpumask))
482 smp_call_function_single(cpu, iucv_allow_cpu, 482 smp_call_function_single(cpu, iucv_allow_cpu,
483 NULL, 0, 1); 483 NULL, 1);
484 preempt_enable(); 484 preempt_enable();
485} 485}
486 486
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void)
498 cpumask = iucv_irq_cpumask; 498 cpumask = iucv_irq_cpumask;
499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
500 for_each_cpu_mask(cpu, cpumask) 500 for_each_cpu_mask(cpu, cpumask)
501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); 501 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
502} 502}
503 503
504/** 504/**
@@ -523,7 +523,7 @@ static int iucv_enable(void)
523 rc = -EIO; 523 rc = -EIO;
524 preempt_disable(); 524 preempt_disable();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
527 preempt_enable(); 527 preempt_enable();
528 if (cpus_empty(iucv_buffer_cpumask)) 528 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 529 /* No cpu could declare an iucv buffer. */
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
580 case CPU_ONLINE_FROZEN: 580 case CPU_ONLINE_FROZEN:
581 case CPU_DOWN_FAILED: 581 case CPU_DOWN_FAILED:
582 case CPU_DOWN_FAILED_FROZEN: 582 case CPU_DOWN_FAILED_FROZEN:
583 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 583 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
584 break; 584 break;
585 case CPU_DOWN_PREPARE: 585 case CPU_DOWN_PREPARE:
586 case CPU_DOWN_PREPARE_FROZEN: 586 case CPU_DOWN_PREPARE_FROZEN:
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
589 if (cpus_empty(cpumask)) 589 if (cpus_empty(cpumask))
590 /* Can't offline last IUCV enabled cpu. */ 590 /* Can't offline last IUCV enabled cpu. */
591 return NOTIFY_BAD; 591 return NOTIFY_BAD;
592 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); 592 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
593 if (cpus_empty(iucv_irq_cpumask)) 593 if (cpus_empty(iucv_irq_cpumask))
594 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 594 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
595 iucv_allow_cpu, NULL, 0, 1); 595 iucv_allow_cpu, NULL, 1);
596 break; 596 break;
597 } 597 }
598 return NOTIFY_OK; 598 return NOTIFY_OK;
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void)
652 * pending interrupts force them to the work queue by calling 652 * pending interrupts force them to the work queue by calling
653 * an empty function on all cpus. 653 * an empty function on all cpus.
654 */ 654 */
655 smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); 655 smp_call_function(__iucv_cleanup_queue, NULL, 1);
656 spin_lock_irq(&iucv_queue_lock); 656 spin_lock_irq(&iucv_queue_lock);
657 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 657 list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
658 /* Remove stale work items from the task queue. */ 658 /* Remove stale work items from the task queue. */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2d29e260da3d..ea1f595f8a87 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1266,12 +1266,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1266 case CPU_UP_CANCELED: 1266 case CPU_UP_CANCELED:
1267 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1267 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1268 cpu); 1268 cpu);
1269 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); 1269 smp_call_function_single(cpu, hardware_disable, NULL, 1);
1270 break; 1270 break;
1271 case CPU_ONLINE: 1271 case CPU_ONLINE:
1272 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1272 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1273 cpu); 1273 cpu);
1274 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); 1274 smp_call_function_single(cpu, hardware_enable, NULL, 1);
1275 break; 1275 break;
1276 } 1276 }
1277 return NOTIFY_OK; 1277 return NOTIFY_OK;
@@ -1474,7 +1474,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
1474 for_each_online_cpu(cpu) { 1474 for_each_online_cpu(cpu) {
1475 smp_call_function_single(cpu, 1475 smp_call_function_single(cpu,
1476 kvm_arch_check_processor_compat, 1476 kvm_arch_check_processor_compat,
1477 &r, 0, 1); 1477 &r, 1);
1478 if (r < 0) 1478 if (r < 0)
1479 goto out_free_1; 1479 goto out_free_1;
1480 } 1480 }