aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-06 05:18:06 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:24:35 -0400
commit8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (patch)
tree6cb6767064d2d43441212566da2d83dcc9a0cd8e /arch
parent490f5de52a87063fcb40e3b22f61b0779603ff6d (diff)
smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/smp.c6
-rw-r--r--arch/alpha/oprofile/common.c6
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/cris/arch-v32/kernel/smp.c5
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/uncached.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/s390/appldata/appldata_base.c4
-rw-r--r--arch/s390/kernel/smp.c16
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/sh/kernel/smp.c10
-rw-r--r--arch/sparc64/kernel/smp.c12
-rw-r--r--arch/um/kernel/smp.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/nmi_32.c2
-rw-r--r--arch/x86/kernel/nmi_64.c2
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/msr-on-cpu.c8
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/xen/smp.c2
38 files changed, 72 insertions, 83 deletions
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index ced4aae8b80..04dcc5e5d4c 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -662,7 +662,7 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
662 if (smp_processor_id() != boot_cpuid) 662 if (smp_processor_id() != boot_cpuid)
663 smp_call_function_single(boot_cpuid, 663 smp_call_function_single(boot_cpuid,
664 __marvel_access_rtc, 664 __marvel_access_rtc,
665 &rtc_access, 1, 1); 665 &rtc_access, 1);
666 else 666 else
667 __marvel_access_rtc(&rtc_access); 667 __marvel_access_rtc(&rtc_access);
668#else 668#else
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 95c905be915..44114c8dbb2 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -710,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm)
710 } 710 }
711 } 711 }
712 712
713 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { 713 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
714 printk(KERN_CRIT "flush_tlb_mm: timed out\n"); 714 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
715 } 715 }
716 716
@@ -763,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
763 data.mm = mm; 763 data.mm = mm;
764 data.addr = addr; 764 data.addr = addr;
765 765
766 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { 766 if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
767 printk(KERN_CRIT "flush_tlb_page: timed out\n"); 767 printk(KERN_CRIT "flush_tlb_page: timed out\n");
768 } 768 }
769 769
@@ -815,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
815 } 815 }
816 } 816 }
817 817
818 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { 818 if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
819 printk(KERN_CRIT "flush_icache_page: timed out\n"); 819 printk(KERN_CRIT "flush_icache_page: timed out\n");
820 } 820 }
821 821
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index 9fc0eeb4f0a..7c3d5ec6ec6 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -65,7 +65,7 @@ op_axp_setup(void)
65 model->reg_setup(&reg, ctr, &sys); 65 model->reg_setup(&reg, ctr, &sys);
66 66
67 /* Configure the registers on all cpus. */ 67 /* Configure the registers on all cpus. */
68 (void)smp_call_function(model->cpu_setup, &reg, 0, 1); 68 (void)smp_call_function(model->cpu_setup, &reg, 1);
69 model->cpu_setup(&reg); 69 model->cpu_setup(&reg);
70 return 0; 70 return 0;
71} 71}
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
86static int 86static int
87op_axp_start(void) 87op_axp_start(void)
88{ 88{
89 (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); 89 (void)smp_call_function(op_axp_cpu_start, NULL, 1);
90 op_axp_cpu_start(NULL); 90 op_axp_cpu_start(NULL);
91 return 0; 91 return 0;
92} 92}
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
101static void 101static void
102op_axp_stop(void) 102op_axp_stop(void)
103{ 103{
104 (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); 104 (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
105 op_axp_cpu_stop(NULL); 105 op_axp_cpu_stop(NULL);
106} 106}
107 107
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 74fae604565..4458705021e 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void))
201 data.ret = 0; 201 data.ret = 0;
202 202
203 preempt_disable(); 203 preempt_disable();
204 smp_call_function(em_func, &data, 1, 1); 204 smp_call_function(em_func, &data, 1);
205 em_func(&data); 205 em_func(&data);
206 preempt_enable(); 206 preempt_enable();
207 207
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 32455c633f1..c0d2c9bb952 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -352,7 +352,7 @@ static int __init vfp_init(void)
352 else if (vfpsid & FPSID_NODOUBLE) { 352 else if (vfpsid & FPSID_NODOUBLE) {
353 printk("no double precision support\n"); 353 printk("no double precision support\n");
354 } else { 354 } else {
355 smp_call_function(vfp_enable, NULL, 1, 1); 355 smp_call_function(vfp_enable, NULL, 1);
356 356
357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n", 358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index a9c3334e46c..952a24b2f5a 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy)
194/* Other calls */ 194/* Other calls */
195void smp_send_stop(void) 195void smp_send_stop(void)
196{ 196{
197 smp_call_function(stop_this_cpu, NULL, 1, 0); 197 smp_call_function(stop_this_cpu, NULL, 0);
198} 198}
199 199
200int setup_profiling_timer(unsigned int multiplier) 200int setup_profiling_timer(unsigned int multiplier)
@@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
316 * You must not call this function with disabled interrupts or from a 316 * You must not call this function with disabled interrupts or from a
317 * hardware interrupt handler or from a bottom half handler. 317 * hardware interrupt handler or from a bottom half handler.
318 */ 318 */
319int smp_call_function(void (*func)(void *info), void *info, 319int smp_call_function(void (*func)(void *info), void *info, int wait)
320 int nonatomic, int wait)
321{ 320{
322 cpumask_t cpu_mask = CPU_MASK_ALL; 321 cpumask_t cpu_mask = CPU_MASK_ALL;
323 struct call_data_struct data; 322 struct call_data_struct data;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 705176b434b..9cd818cc700 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1881 case CPU_ONLINE: 1881 case CPU_ONLINE:
1882 case CPU_ONLINE_FROZEN: 1882 case CPU_ONLINE_FROZEN:
1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1884 NULL, 1, 0); 1884 NULL, 0);
1885 break; 1885 break;
1886 } 1886 }
1887 return NOTIFY_OK; 1887 return NOTIFY_OK;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 9dc00f7fe10..e5c57f413ca 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
921 921
922 922
923 /* will send IPI to other CPU and wait for completion of remote call */ 923 /* will send IPI to other CPU and wait for completion of remote call */
924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { 924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " 925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); 926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927 return 0; 927 return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 7714a97b010..9baa48255c1 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1820 int ret; 1820 int ret;
1821 1821
1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); 1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); 1823 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); 1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1825} 1825}
1826#endif /* CONFIG_SMP */ 1826#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a3a34b4eb03..fabaf08d9a6 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -286,7 +286,7 @@ void cpu_idle_wait(void)
286{ 286{
287 smp_mb(); 287 smp_mb();
288 /* kick all the CPUs so that they exit out of pm_idle */ 288 /* kick all the CPUs so that they exit out of pm_idle */
289 smp_call_function(do_nothing, NULL, 0, 1); 289 smp_call_function(do_nothing, NULL, 1);
290} 290}
291EXPORT_SYMBOL_GPL(cpu_idle_wait); 291EXPORT_SYMBOL_GPL(cpu_idle_wait);
292 292
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index eaa1b6795a1..9d1d429c6c5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master)
317 317
318 go[MASTER] = 1; 318 go[MASTER] = 1;
319 319
320 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { 320 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
321 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 321 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
322 return; 322 return;
323 } 323 }
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index e77995a6e3e..8eff8c1d40a 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125 atomic_set(&uc_pool->status, 0); 125 atomic_set(&uc_pool->status, 0);
126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127 0, 1);
128 if (status || atomic_read(&uc_pool->status)) 127 if (status || atomic_read(&uc_pool->status))
129 goto failed; 128 goto failed;
130 } else if (status != PAL_VISIBILITY_OK) 129 } else if (status != PAL_VISIBILITY_OK)
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
146 if (status != PAL_STATUS_SUCCESS) 145 if (status != PAL_STATUS_SUCCESS)
147 goto failed; 146 goto failed;
148 atomic_set(&uc_pool->status, 0); 147 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 148 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150 if (status || atomic_read(&uc_pool->status)) 149 if (status || atomic_read(&uc_pool->status))
151 goto failed; 150 goto failed;
152 151
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 8cc0c4753d8..636588e7e06 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
629 if (use_ipi) { 629 if (use_ipi) {
630 /* use an interprocessor interrupt to call SAL */ 630 /* use an interprocessor interrupt to call SAL */
631 smp_call_function_single(cpu, sn_hwperf_call_sal, 631 smp_call_function_single(cpu, sn_hwperf_call_sal,
632 op_info, 1, 1); 632 op_info, 1);
633 } 633 }
634 else { 634 else {
635 /* migrate the task before calling SAL */ 635 /* migrate the task before calling SAL */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 74eb7bcd5a4..7577f971ea4 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -212,7 +212,7 @@ void smp_flush_tlb_all(void)
212 local_irq_save(flags); 212 local_irq_save(flags);
213 __flush_tlb_all(); 213 __flush_tlb_all();
214 local_irq_restore(flags); 214 local_irq_restore(flags);
215 smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); 215 smp_call_function(flush_tlb_all_ipi, NULL, 1);
216 preempt_enable(); 216 preempt_enable();
217} 217}
218 218
@@ -505,7 +505,7 @@ void smp_invalidate_interrupt(void)
505 *==========================================================================*/ 505 *==========================================================================*/
506void smp_send_stop(void) 506void smp_send_stop(void)
507{ 507{
508 smp_call_function(stop_this_cpu, NULL, 1, 0); 508 smp_call_function(stop_this_cpu, NULL, 0);
509} 509}
510 510
511/*==========================================================================* 511/*==========================================================================*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c75b26cb61d..7a9ae830be8 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -167,7 +167,7 @@ static void stop_this_cpu(void *dummy)
167 167
168void smp_send_stop(void) 168void smp_send_stop(void)
169{ 169{
170 smp_call_function(stop_this_cpu, NULL, 1, 0); 170 smp_call_function(stop_this_cpu, NULL, 0);
171} 171}
172 172
173void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
@@ -266,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm)
266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 266static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
267{ 267{
268#ifndef CONFIG_MIPS_MT_SMTC 268#ifndef CONFIG_MIPS_MT_SMTC
269 smp_call_function(func, info, 1, 1); 269 smp_call_function(func, info, 1);
270#endif 270#endif
271} 271}
272 272
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 27096751ddc..71df3390c07 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -43,12 +43,12 @@
43 * primary cache. 43 * primary cache.
44 */ 44 */
45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 45static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46 int retry, int wait) 46 int wait)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 49
50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func, info, retry, wait); 51 smp_call_function(func, info, wait);
52#endif 52#endif
53 func(info); 53 func(info);
54 preempt_enable(); 54 preempt_enable();
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
350 350
351static void r4k___flush_cache_all(void) 351static void r4k___flush_cache_all(void)
352{ 352{
353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354} 354}
355 355
356static inline int has_valid_asid(const struct mm_struct *mm) 356static inline int has_valid_asid(const struct mm_struct *mm)
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
397 int exec = vma->vm_flags & VM_EXEC; 397 int exec = vma->vm_flags & VM_EXEC;
398 398
399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401} 401}
402 402
403static inline void local_r4k_flush_cache_mm(void * args) 403static inline void local_r4k_flush_cache_mm(void * args)
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
429 if (!cpu_has_dc_aliases) 429 if (!cpu_has_dc_aliases)
430 return; 430 return;
431 431
432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433} 433}
434 434
435struct flush_cache_page_args { 435struct flush_cache_page_args {
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
521 args.addr = addr; 521 args.addr = addr;
522 args.pfn = pfn; 522 args.pfn = pfn;
523 523
524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525} 525}
526 526
527static inline void local_r4k_flush_data_cache_page(void * addr) 527static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
535 local_r4k_flush_data_cache_page((void *)addr); 535 local_r4k_flush_data_cache_page((void *)addr);
536 else 536 else
537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 1, 1); 538 1);
539} 539}
540 540
541struct flush_icache_range_args { 541struct flush_icache_range_args {
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 571 args.start = start;
572 args.end = end; 572 args.end = end;
573 573
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
575 instruction_hazard(); 575 instruction_hazard();
576} 576}
577 577
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
672 672
673static void r4k_flush_cache_sigtramp(unsigned long addr) 673static void r4k_flush_cache_sigtramp(unsigned long addr)
674{ 674{
675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 675 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
676} 676}
677 677
678static void r4k_flush_icache_all(void) 678static void r4k_flush_icache_all(void)
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 35dc435846a..cf4c868715a 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -64,7 +64,7 @@ static void prom_exit(void)
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 if (smp_processor_id()) 65 if (smp_processor_id())
66 /* CPU 1 */ 66 /* CPU 1 */
67 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 67 smp_call_function(prom_cpu0_exit, NULL, 1);
68#endif 68#endif
69 prom_cpu0_exit(NULL); 69 prom_cpu0_exit(NULL);
70} 70}
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 33fce826f8b..fd9604d5555 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg)
74 if (!reboot_smp) { 74 if (!reboot_smp) {
75 /* Get CPU 0 to do the cfe_exit */ 75 /* Get CPU 0 to do the cfe_exit */
76 reboot_smp = 1; 76 reboot_smp = 1;
77 smp_call_function(cfe_linux_exit, arg, 1, 0); 77 smp_call_function(cfe_linux_exit, arg, 0);
78 } 78 }
79 } else { 79 } else {
80 printk("Passing control back to CFE...\n"); 80 printk("Passing control back to CFE...\n");
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index cf8f6b3de86..65b1af66b67 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -66,7 +66,7 @@ static void prom_linux_exit(void)
66{ 66{
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 if (smp_processor_id()) { 68 if (smp_processor_id()) {
69 smp_call_function(prom_cpu0_exit, NULL, 1, 1); 69 smp_call_function(prom_cpu0_exit, NULL, 1);
70 } 70 }
71#endif 71#endif
72 while(1); 72 while(1);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 37a5ab410dc..5191b46a611 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -168,7 +168,7 @@ static void stop_this_cpu(void *dummy)
168 168
169void smp_send_stop(void) 169void smp_send_stop(void)
170{ 170{
171 smp_call_function(stop_this_cpu, NULL, 0, 0); 171 smp_call_function(stop_this_cpu, NULL, 0);
172} 172}
173 173
174extern struct gettimeofday_struct do_gtod; 174extern struct gettimeofday_struct do_gtod;
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ad40729bec3..837a3b3e775 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -209,7 +209,7 @@ __appldata_vtimer_setup(int cmd)
209 per_cpu(appldata_timer, i).expires = per_cpu_interval; 209 per_cpu(appldata_timer, i).expires = per_cpu_interval;
210 smp_call_function_single(i, add_virt_timer_periodic, 210 smp_call_function_single(i, add_virt_timer_periodic,
211 &per_cpu(appldata_timer, i), 211 &per_cpu(appldata_timer, i),
212 0, 1); 212 1);
213 } 213 }
214 appldata_timer_active = 1; 214 appldata_timer_active = 1;
215 P_INFO("Monitoring timer started.\n"); 215 P_INFO("Monitoring timer started.\n");
@@ -236,7 +236,7 @@ __appldata_vtimer_setup(int cmd)
236 args.timer = &per_cpu(appldata_timer, i); 236 args.timer = &per_cpu(appldata_timer, i);
237 args.expires = per_cpu_interval; 237 args.expires = per_cpu_interval;
238 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 238 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
239 &args, 0, 1); 239 &args, 1);
240 } 240 }
241 } 241 }
242} 242}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5d4fa4b1c74..276b105fb2a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -109,7 +109,7 @@ static void do_call_function(void)
109} 109}
110 110
111static void __smp_call_function_map(void (*func) (void *info), void *info, 111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int nonatomic, int wait, cpumask_t map) 112 int wait, cpumask_t map)
113{ 113{
114 struct call_data_struct data; 114 struct call_data_struct data;
115 int cpu, local = 0; 115 int cpu, local = 0;
@@ -162,7 +162,6 @@ out:
162 * smp_call_function: 162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking 163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function 164 * @info: an arbitrary pointer to pass to the function
165 * @nonatomic: unused
166 * @wait: if true, wait (atomically) until function has completed on other CPUs 165 * @wait: if true, wait (atomically) until function has completed on other CPUs
167 * 166 *
168 * Run a function on all other CPUs. 167 * Run a function on all other CPUs.
@@ -170,15 +169,14 @@ out:
170 * You must not call this function with disabled interrupts, from a 169 * You must not call this function with disabled interrupts, from a
171 * hardware interrupt handler or from a bottom half. 170 * hardware interrupt handler or from a bottom half.
172 */ 171 */
173int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 172int smp_call_function(void (*func) (void *info), void *info, int wait)
174 int wait)
175{ 173{
176 cpumask_t map; 174 cpumask_t map;
177 175
178 spin_lock(&call_lock); 176 spin_lock(&call_lock);
179 map = cpu_online_map; 177 map = cpu_online_map;
180 cpu_clear(smp_processor_id(), map); 178 cpu_clear(smp_processor_id(), map);
181 __smp_call_function_map(func, info, nonatomic, wait, map); 179 __smp_call_function_map(func, info, wait, map);
182 spin_unlock(&call_lock); 180 spin_unlock(&call_lock);
183 return 0; 181 return 0;
184} 182}
@@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function);
189 * @cpu: the CPU where func should run 187 * @cpu: the CPU where func should run
190 * @func: the function to run; this must be fast and non-blocking 188 * @func: the function to run; this must be fast and non-blocking
191 * @info: an arbitrary pointer to pass to the function 189 * @info: an arbitrary pointer to pass to the function
192 * @nonatomic: unused
193 * @wait: if true, wait (atomically) until function has completed on other CPUs 190 * @wait: if true, wait (atomically) until function has completed on other CPUs
194 * 191 *
195 * Run a function on one processor. 192 * Run a function on one processor.
@@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function);
198 * hardware interrupt handler or from a bottom half. 195 * hardware interrupt handler or from a bottom half.
199 */ 196 */
200int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
201 int nonatomic, int wait) 198 int wait)
202{ 199{
203 spin_lock(&call_lock); 200 spin_lock(&call_lock);
204 __smp_call_function_map(func, info, nonatomic, wait, 201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
205 cpumask_of_cpu(cpu));
206 spin_unlock(&call_lock); 202 spin_unlock(&call_lock);
207 return 0; 203 return 0;
208} 204}
@@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
228{ 224{
229 spin_lock(&call_lock); 225 spin_lock(&call_lock);
230 cpu_clear(smp_processor_id(), mask); 226 cpu_clear(smp_processor_id(), mask);
231 __smp_call_function_map(func, info, 0, wait, mask); 227 __smp_call_function_map(func, info, wait, mask);
232 spin_unlock(&call_lock); 228 spin_unlock(&call_lock);
233 return 0; 229 return 0;
234} 230}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd..bf7bf2c2236 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -690,7 +690,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
690 */ 690 */
691 memset(&etr_sync, 0, sizeof(etr_sync)); 691 memset(&etr_sync, 0, sizeof(etr_sync));
692 preempt_disable(); 692 preempt_disable();
693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 693 smp_call_function(etr_sync_cpu_start, NULL, 0);
694 local_irq_disable(); 694 local_irq_disable();
695 etr_enable_sync_clock(); 695 etr_enable_sync_clock();
696 696
@@ -729,7 +729,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
729 rc = -EAGAIN; 729 rc = -EAGAIN;
730 } 730 }
731 local_irq_enable(); 731 local_irq_enable();
732 smp_call_function(etr_sync_cpu_end,NULL,0,0); 732 smp_call_function(etr_sync_cpu_end,NULL,0);
733 preempt_enable(); 733 preempt_enable();
734 return rc; 734 return rc;
735} 735}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2ed8dceb297..71781ba2675 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -168,7 +168,7 @@ static void stop_this_cpu(void *unused)
168 168
169void smp_send_stop(void) 169void smp_send_stop(void)
170{ 170{
171 smp_call_function(stop_this_cpu, 0, 1, 0); 171 smp_call_function(stop_this_cpu, 0, 0);
172} 172}
173 173
174void arch_send_call_function_ipi(cpumask_t mask) 174void arch_send_call_function_ipi(cpumask_t mask)
@@ -223,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
223 preempt_disable(); 223 preempt_disable();
224 224
225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
226 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 226 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
227 } else { 227 } else {
228 int i; 228 int i;
229 for (i = 0; i < num_online_cpus(); i++) 229 for (i = 0; i < num_online_cpus(); i++)
@@ -260,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
260 fd.vma = vma; 260 fd.vma = vma;
261 fd.addr1 = start; 261 fd.addr1 = start;
262 fd.addr2 = end; 262 fd.addr2 = end;
263 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 263 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
264 } else { 264 } else {
265 int i; 265 int i;
266 for (i = 0; i < num_online_cpus(); i++) 266 for (i = 0; i < num_online_cpus(); i++)
@@ -303,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
303 303
304 fd.vma = vma; 304 fd.vma = vma;
305 fd.addr1 = page; 305 fd.addr1 = page;
306 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 306 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
307 } else { 307 } else {
308 int i; 308 int i;
309 for (i = 0; i < num_online_cpus(); i++) 309 for (i = 0; i < num_online_cpus(); i++)
@@ -327,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
327 fd.addr1 = asid; 327 fd.addr1 = asid;
328 fd.addr2 = vaddr; 328 fd.addr2 = vaddr;
329 329
330 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); 330 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
331 local_flush_tlb_one(asid, vaddr); 331 local_flush_tlb_one(asid, vaddr);
332} 332}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b82d017a174..c099d96f123 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -807,7 +807,6 @@ extern unsigned long xcall_call_function;
807 * smp_call_function(): Run a function on all other CPUs. 807 * smp_call_function(): Run a function on all other CPUs.
808 * @func: The function to run. This must be fast and non-blocking. 808 * @func: The function to run. This must be fast and non-blocking.
809 * @info: An arbitrary pointer to pass to the function. 809 * @info: An arbitrary pointer to pass to the function.
810 * @nonatomic: currently unused.
811 * @wait: If true, wait (atomically) until function has completed on other CPUs. 810 * @wait: If true, wait (atomically) until function has completed on other CPUs.
812 * 811 *
813 * Returns 0 on success, else a negative status code. Does not return until 812 * Returns 0 on success, else a negative status code. Does not return until
@@ -817,8 +816,7 @@ extern unsigned long xcall_call_function;
817 * hardware interrupt handler or from a bottom half handler. 816 * hardware interrupt handler or from a bottom half handler.
818 */ 817 */
819static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, 818static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
820 int nonatomic, int wait, 819 int wait, cpumask_t mask)
821 cpumask_t mask)
822{ 820{
823 struct call_data_struct data; 821 struct call_data_struct data;
824 int cpus; 822 int cpus;
@@ -853,11 +851,9 @@ out_unlock:
853 return 0; 851 return 0;
854} 852}
855 853
856int smp_call_function(void (*func)(void *info), void *info, 854int smp_call_function(void (*func)(void *info), void *info, int wait)
857 int nonatomic, int wait)
858{ 855{
859 return sparc64_smp_call_function_mask(func, info, nonatomic, wait, 856 return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
860 cpu_online_map);
861} 857}
862 858
863void smp_call_function_client(int irq, struct pt_regs *regs) 859void smp_call_function_client(int irq, struct pt_regs *regs)
@@ -894,7 +890,7 @@ static void tsb_sync(void *info)
894 890
895void smp_tsb_sync(struct mm_struct *mm) 891void smp_tsb_sync(struct mm_struct *mm)
896{ 892{
897 sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); 893 sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
898} 894}
899 895
900extern unsigned long xcall_flush_tlb_mm; 896extern unsigned long xcall_flush_tlb_mm;
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index e1062ec36d4..be2d50c3aa9 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu)
214 atomic_inc(&scf_finished); 214 atomic_inc(&scf_finished);
215} 215}
216 216
217int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 217int smp_call_function(void (*_func)(void *info), void *_info, int wait)
218 int wait)
219{ 218{
220 int cpus = num_online_cpus() - 1; 219 int cpus = num_online_cpus() - 1;
221 int i; 220 int i;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6a1e278d932..290652cefdd 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -222,7 +222,7 @@ static void set_mtrr(unsigned int reg, unsigned long base,
222 atomic_set(&data.gate,0); 222 atomic_set(&data.gate,0);
223 223
224 /* Start the ball rolling on other CPUs */ 224 /* Start the ball rolling on other CPUs */
225 if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 225 if (smp_call_function(ipi_handler, &data, 0) != 0)
226 panic("mtrr: timed out waiting for other CPUs\n"); 226 panic("mtrr: timed out waiting for other CPUs\n");
227 227
228 local_irq_save(flags); 228 local_irq_save(flags);
@@ -822,7 +822,7 @@ void mtrr_ap_init(void)
822 */ 822 */
823void mtrr_save_state(void) 823void mtrr_save_state(void)
824{ 824{
825 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); 825 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
826} 826}
827 827
828static int __init mtrr_init_finialize(void) 828static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index daff52a6224..336dd43c915 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -95,7 +95,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
95 for (; count; count -= 16) { 95 for (; count; count -= 16) {
96 cmd.eax = pos; 96 cmd.eax = pos;
97 cmd.ecx = pos >> 32; 97 cmd.ecx = pos >> 32;
98 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 98 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
99 if (copy_to_user(tmp, &cmd, 16)) 99 if (copy_to_user(tmp, &cmd, 16))
100 return -EFAULT; 100 return -EFAULT;
101 tmp += 16; 101 tmp += 16;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 0224c3637c7..cb0a6398c64 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
68 load_LDT(pc); 68 load_LDT(pc);
69 mask = cpumask_of_cpu(smp_processor_id()); 69 mask = cpumask_of_cpu(smp_processor_id());
70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 70 if (!cpus_equal(current->mm->cpu_vm_mask, mask))
71 smp_call_function(flush_ldt, NULL, 1, 1); 71 smp_call_function(flush_ldt, NULL, 1);
72 preempt_enable(); 72 preempt_enable();
73#else 73#else
74 load_LDT(pc); 74 load_LDT(pc);
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index 84160f74eeb..5562dab0bd2 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -87,7 +87,7 @@ int __init check_nmi_watchdog(void)
87 87
88#ifdef CONFIG_SMP 88#ifdef CONFIG_SMP
89 if (nmi_watchdog == NMI_LOCAL_APIC) 89 if (nmi_watchdog == NMI_LOCAL_APIC)
90 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 90 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
91#endif 91#endif
92 92
93 for_each_possible_cpu(cpu) 93 for_each_possible_cpu(cpu)
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index 5a29ded994f..2f1e4f503c9 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -96,7 +96,7 @@ int __init check_nmi_watchdog(void)
96 96
97#ifdef CONFIG_SMP 97#ifdef CONFIG_SMP
98 if (nmi_watchdog == NMI_LOCAL_APIC) 98 if (nmi_watchdog == NMI_LOCAL_APIC)
99 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 99 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
100#endif 100#endif
101 101
102 for (cpu = 0; cpu < NR_CPUS; cpu++) 102 for (cpu = 0; cpu < NR_CPUS; cpu++)
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 575aa3d7248..56546e8a13a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -164,7 +164,7 @@ static void native_smp_send_stop(void)
164 if (reboot_force) 164 if (reboot_force)
165 return; 165 return;
166 166
167 smp_call_function(stop_this_cpu, NULL, 0, 0); 167 smp_call_function(stop_this_cpu, NULL, 0);
168 local_irq_save(flags); 168 local_irq_save(flags);
169 disable_local_APIC(); 169 disable_local_APIC();
170 local_irq_restore(flags); 170 local_irq_restore(flags);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 61efa2f7d56..0a03d57f9b3 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -278,7 +278,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
278{ 278{
279 long cpu = (long)arg; 279 long cpu = (long)arg;
280 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 280 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
281 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 281 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
282 return NOTIFY_DONE; 282 return NOTIFY_DONE;
283} 283}
284 284
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 540e9517907..5534fe59b5f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
335{ 335{
336 if (vmx->vcpu.cpu == -1) 336 if (vmx->vcpu.cpu == -1)
337 return; 337 return;
338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); 338 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
339 vmx->launched = 0; 339 vmx->launched = 0;
340} 340}
341 341
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63a77caa59f..0faa2546b1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4044 * So need not to call smp_call_function_single() in that case. 4044 * So need not to call smp_call_function_single() in that case.
4045 */ 4045 */
4046 if (vcpu->guest_mode && vcpu->cpu != cpu) 4046 if (vcpu->guest_mode && vcpu->cpu != cpu)
4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 4047 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
4048 put_cpu(); 4048 put_cpu();
4049} 4049}
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 57d043fa893..d5a2b39f882 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 if (safe) {
33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); 33 smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
34 err = rv.err; 34 err = rv.err;
35 } else { 35 } else {
36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); 36 smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
37 } 37 }
38 *l = rv.l; 38 *l = rv.l;
39 *h = rv.h; 39 *h = rv.h;
@@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
64 rv.l = l; 64 rv.l = l;
65 rv.h = h; 65 rv.h = h;
66 if (safe) { 66 if (safe) {
67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); 67 smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
68 err = rv.err; 68 err = rv.err;
69 } else { 69 } else {
70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); 70 smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
71 } 71 }
72 72
73 return err; 73 return err;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index cb34407a993..04f596eab74 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1113,7 +1113,7 @@ int safe_smp_processor_id(void)
1113/* broadcast a halt to all other CPUs */ 1113/* broadcast a halt to all other CPUs */
1114static void voyager_smp_send_stop(void) 1114static void voyager_smp_send_stop(void)
1115{ 1115{
1116 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1116 smp_call_function(smp_stop_cpu_function, NULL, 1);
1117} 1117}
1118 1118
1119/* this function is triggered in time.c when a clock tick fires 1119/* this function is triggered in time.c when a clock tick fires
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index b3786e749b8..a1651d029ea 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -331,7 +331,7 @@ static void stop_self(void *v)
331 331
332void xen_smp_send_stop(void) 332void xen_smp_send_stop(void)
333{ 333{
334 smp_call_function(stop_self, NULL, 0, 0); 334 smp_call_function(stop_self, NULL, 0);
335} 335}
336 336
337void xen_smp_send_reschedule(int cpu) 337void xen_smp_send_reschedule(int cpu)