aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNadav Amit <namit@vmware.com>2019-06-13 02:48:05 -0400
committerThomas Gleixner <tglx@linutronix.de>2019-06-23 08:26:26 -0400
commitcaa759323c73676b3e48c8d9c86093c88b4aba97 (patch)
tree15c62c8f6b18b481233ab7edf9cc7cea091dfd71
parenta22793c79d6ea0a492ce1a308ec46df52ee9406e (diff)
smp: Remove smp_call_function() and on_each_cpu() return values
The return value is fixed. Remove it and amend the callers. [ tglx: Fixup arm/bL_switcher and powerpc/rtas ] Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20190613064813.8102-2-namit@vmware.com
-rw-r--r--arch/alpha/kernel/smp.c19
-rw-r--r--arch/alpha/oprofile/common.c6
-rw-r--r--arch/arm/common/bL_switcher.c6
-rw-r--r--arch/ia64/kernel/perfmon.c12
-rw-r--r--arch/ia64/kernel/uncached.c8
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/x86/lib/cache-smp.c3
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--include/linux/smp.h7
-rw-r--r--kernel/smp.c10
-rw-r--r--kernel/up.c3
11 files changed, 27 insertions, 53 deletions
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index d0dccae53ba9..5f90df30be20 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -614,8 +614,7 @@ void
614smp_imb(void) 614smp_imb(void)
615{ 615{
616 /* Must wait other processors to flush their icache before continue. */ 616 /* Must wait other processors to flush their icache before continue. */
617 if (on_each_cpu(ipi_imb, NULL, 1)) 617 on_each_cpu(ipi_imb, NULL, 1);
618 printk(KERN_CRIT "smp_imb: timed out\n");
619} 618}
620EXPORT_SYMBOL(smp_imb); 619EXPORT_SYMBOL(smp_imb);
621 620
@@ -630,9 +629,7 @@ flush_tlb_all(void)
630{ 629{
631 /* Although we don't have any data to pass, we do want to 630 /* Although we don't have any data to pass, we do want to
632 synchronize with the other processors. */ 631 synchronize with the other processors. */
633 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { 632 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
634 printk(KERN_CRIT "flush_tlb_all: timed out\n");
635 }
636} 633}
637 634
638#define asn_locked() (cpu_data[smp_processor_id()].asn_lock) 635#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
@@ -667,9 +664,7 @@ flush_tlb_mm(struct mm_struct *mm)
667 } 664 }
668 } 665 }
669 666
670 if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { 667 smp_call_function(ipi_flush_tlb_mm, mm, 1);
671 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
672 }
673 668
674 preempt_enable(); 669 preempt_enable();
675} 670}
@@ -720,9 +715,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
720 data.mm = mm; 715 data.mm = mm;
721 data.addr = addr; 716 data.addr = addr;
722 717
723 if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { 718 smp_call_function(ipi_flush_tlb_page, &data, 1);
724 printk(KERN_CRIT "flush_tlb_page: timed out\n");
725 }
726 719
727 preempt_enable(); 720 preempt_enable();
728} 721}
@@ -772,9 +765,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
772 } 765 }
773 } 766 }
774 767
775 if (smp_call_function(ipi_flush_icache_page, mm, 1)) { 768 smp_call_function(ipi_flush_icache_page, mm, 1);
776 printk(KERN_CRIT "flush_icache_page: timed out\n");
777 }
778 769
779 preempt_enable(); 770 preempt_enable();
780} 771}
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index 310a4ce1dccc..1b1259c7d7d1 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -65,7 +65,7 @@ op_axp_setup(void)
65 model->reg_setup(&reg, ctr, &sys); 65 model->reg_setup(&reg, ctr, &sys);
66 66
67 /* Configure the registers on all cpus. */ 67 /* Configure the registers on all cpus. */
68 (void)smp_call_function(model->cpu_setup, &reg, 1); 68 smp_call_function(model->cpu_setup, &reg, 1);
69 model->cpu_setup(&reg); 69 model->cpu_setup(&reg);
70 return 0; 70 return 0;
71} 71}
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
86static int 86static int
87op_axp_start(void) 87op_axp_start(void)
88{ 88{
89 (void)smp_call_function(op_axp_cpu_start, NULL, 1); 89 smp_call_function(op_axp_cpu_start, NULL, 1);
90 op_axp_cpu_start(NULL); 90 op_axp_cpu_start(NULL);
91 return 0; 91 return 0;
92} 92}
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
101static void 101static void
102op_axp_stop(void) 102op_axp_stop(void)
103{ 103{
104 (void)smp_call_function(op_axp_cpu_stop, NULL, 1); 104 smp_call_function(op_axp_cpu_stop, NULL, 1);
105 op_axp_cpu_stop(NULL); 105 op_axp_cpu_stop(NULL);
106} 106}
107 107
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 57f3b7512636..17bc259729e2 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -542,16 +542,14 @@ static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
542 542
543int bL_switcher_trace_trigger(void) 543int bL_switcher_trace_trigger(void)
544{ 544{
545 int ret;
546
547 preempt_disable(); 545 preempt_disable();
548 546
549 bL_switcher_trace_trigger_cpu(NULL); 547 bL_switcher_trace_trigger_cpu(NULL);
550 ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); 548 smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
551 549
552 preempt_enable(); 550 preempt_enable();
553 551
554 return ret; 552 return 0;
555} 553}
556EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); 554EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
557 555
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 58a6337c0690..7c52bd2695a2 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -6390,11 +6390,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6390 } 6390 }
6391 6391
6392 /* save the current system wide pmu states */ 6392 /* save the current system wide pmu states */
6393 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); 6393 on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6394 if (ret) {
6395 DPRINT(("on_each_cpu() failed: %d\n", ret));
6396 goto cleanup_reserve;
6397 }
6398 6394
6399 /* officially change to the alternate interrupt handler */ 6395 /* officially change to the alternate interrupt handler */
6400 pfm_alt_intr_handler = hdl; 6396 pfm_alt_intr_handler = hdl;
@@ -6421,7 +6417,6 @@ int
6421pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) 6417pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6422{ 6418{
6423 int i; 6419 int i;
6424 int ret;
6425 6420
6426 if (hdl == NULL) return -EINVAL; 6421 if (hdl == NULL) return -EINVAL;
6427 6422
@@ -6435,10 +6430,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6435 6430
6436 pfm_alt_intr_handler = NULL; 6431 pfm_alt_intr_handler = NULL;
6437 6432
6438 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); 6433 on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6439 if (ret) {
6440 DPRINT(("on_each_cpu() failed: %d\n", ret));
6441 }
6442 6434
6443 for_each_online_cpu(i) { 6435 for_each_online_cpu(i) {
6444 pfm_unreserve_session(NULL, 1, i); 6436 pfm_unreserve_session(NULL, 1, i);
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 583f7ff6b589..c618d0745e22 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -124,8 +124,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
124 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 124 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
125 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 125 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
126 atomic_set(&uc_pool->status, 0); 126 atomic_set(&uc_pool->status, 0);
127 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); 127 smp_call_function(uncached_ipi_visibility, uc_pool, 1);
128 if (status || atomic_read(&uc_pool->status)) 128 if (atomic_read(&uc_pool->status))
129 goto failed; 129 goto failed;
130 } else if (status != PAL_VISIBILITY_OK) 130 } else if (status != PAL_VISIBILITY_OK)
131 goto failed; 131 goto failed;
@@ -146,8 +146,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
146 if (status != PAL_STATUS_SUCCESS) 146 if (status != PAL_STATUS_SUCCESS)
147 goto failed; 147 goto failed;
148 atomic_set(&uc_pool->status, 0); 148 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); 149 smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150 if (status || atomic_read(&uc_pool->status)) 150 if (atomic_read(&uc_pool->status))
151 goto failed; 151 goto failed;
152 152
153 /* 153 /*
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index fbc676160adf..64d95eb6ffff 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -994,8 +994,7 @@ int rtas_ibm_suspend_me(u64 handle)
994 /* Call function on all CPUs. One of us will make the 994 /* Call function on all CPUs. One of us will make the
995 * rtas call 995 * rtas call
996 */ 996 */
997 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) 997 on_each_cpu(rtas_percpu_suspend_me, &data, 0);
998 atomic_set(&data.error, -EINVAL);
999 998
1000 wait_for_completion(&done); 999 wait_for_completion(&done);
1001 1000
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index 1811fa4a1b1a..7c48ff4ae8d1 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -15,6 +15,7 @@ EXPORT_SYMBOL(wbinvd_on_cpu);
15 15
16int wbinvd_on_all_cpus(void) 16int wbinvd_on_all_cpus(void)
17{ 17{
18 return on_each_cpu(__wbinvd, NULL, 1); 18 on_each_cpu(__wbinvd, NULL, 1);
19 return 0;
19} 20}
20EXPORT_SYMBOL(wbinvd_on_all_cpus); 21EXPORT_SYMBOL(wbinvd_on_all_cpus);
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 658664a5a5aa..df1edb5ec0ad 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1311,8 +1311,7 @@ static void ipi_handler(void *null)
1311 1311
1312void global_cache_flush(void) 1312void global_cache_flush(void)
1313{ 1313{
1314 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1314 on_each_cpu(ipi_handler, NULL, 1);
1315 panic(PFX "timed out waiting for the other CPUs!\n");
1316} 1315}
1317EXPORT_SYMBOL(global_cache_flush); 1316EXPORT_SYMBOL(global_cache_flush);
1318 1317
diff --git a/include/linux/smp.h b/include/linux/smp.h
index a56f08ff3097..bb8b451ab01f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -35,7 +35,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
35/* 35/*
36 * Call a function on all processors 36 * Call a function on all processors
37 */ 37 */
38int on_each_cpu(smp_call_func_t func, void *info, int wait); 38void on_each_cpu(smp_call_func_t func, void *info, int wait);
39 39
40/* 40/*
41 * Call a function on processors specified by mask, which might include 41 * Call a function on processors specified by mask, which might include
@@ -101,7 +101,7 @@ extern void smp_cpus_done(unsigned int max_cpus);
101/* 101/*
102 * Call a function on all other processors 102 * Call a function on all other processors
103 */ 103 */
104int smp_call_function(smp_call_func_t func, void *info, int wait); 104void smp_call_function(smp_call_func_t func, void *info, int wait);
105void smp_call_function_many(const struct cpumask *mask, 105void smp_call_function_many(const struct cpumask *mask,
106 smp_call_func_t func, void *info, bool wait); 106 smp_call_func_t func, void *info, bool wait);
107 107
@@ -144,9 +144,8 @@ static inline void smp_send_stop(void) { }
144 * These macros fold the SMP functionality into a single CPU system 144 * These macros fold the SMP functionality into a single CPU system
145 */ 145 */
146#define raw_smp_processor_id() 0 146#define raw_smp_processor_id() 0
147static inline int up_smp_call_function(smp_call_func_t func, void *info) 147static inline void up_smp_call_function(smp_call_func_t func, void *info)
148{ 148{
149 return 0;
150} 149}
151#define smp_call_function(func, info, wait) \ 150#define smp_call_function(func, info, wait) \
152 (up_smp_call_function(func, info)) 151 (up_smp_call_function(func, info))
diff --git a/kernel/smp.c b/kernel/smp.c
index 220ad142f5dd..616d4d114847 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -487,13 +487,11 @@ EXPORT_SYMBOL(smp_call_function_many);
487 * You must not call this function with disabled interrupts or from a 487 * You must not call this function with disabled interrupts or from a
488 * hardware interrupt handler or from a bottom half handler. 488 * hardware interrupt handler or from a bottom half handler.
489 */ 489 */
490int smp_call_function(smp_call_func_t func, void *info, int wait) 490void smp_call_function(smp_call_func_t func, void *info, int wait)
491{ 491{
492 preempt_disable(); 492 preempt_disable();
493 smp_call_function_many(cpu_online_mask, func, info, wait); 493 smp_call_function_many(cpu_online_mask, func, info, wait);
494 preempt_enable(); 494 preempt_enable();
495
496 return 0;
497} 495}
498EXPORT_SYMBOL(smp_call_function); 496EXPORT_SYMBOL(smp_call_function);
499 497
@@ -594,18 +592,16 @@ void __init smp_init(void)
594 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 592 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
595 * of local_irq_disable/enable(). 593 * of local_irq_disable/enable().
596 */ 594 */
597int on_each_cpu(void (*func) (void *info), void *info, int wait) 595void on_each_cpu(void (*func) (void *info), void *info, int wait)
598{ 596{
599 unsigned long flags; 597 unsigned long flags;
600 int ret = 0;
601 598
602 preempt_disable(); 599 preempt_disable();
603 ret = smp_call_function(func, info, wait); 600 smp_call_function(func, info, wait);
604 local_irq_save(flags); 601 local_irq_save(flags);
605 func(info); 602 func(info);
606 local_irq_restore(flags); 603 local_irq_restore(flags);
607 preempt_enable(); 604 preempt_enable();
608 return ret;
609} 605}
610EXPORT_SYMBOL(on_each_cpu); 606EXPORT_SYMBOL(on_each_cpu);
611 607
diff --git a/kernel/up.c b/kernel/up.c
index 483c9962c999..862b460ab97a 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -35,14 +35,13 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd)
35} 35}
36EXPORT_SYMBOL(smp_call_function_single_async); 36EXPORT_SYMBOL(smp_call_function_single_async);
37 37
38int on_each_cpu(smp_call_func_t func, void *info, int wait) 38void on_each_cpu(smp_call_func_t func, void *info, int wait)
39{ 39{
40 unsigned long flags; 40 unsigned long flags;
41 41
42 local_irq_save(flags); 42 local_irq_save(flags);
43 func(info); 43 func(info);
44 local_irq_restore(flags); 44 local_irq_restore(flags);
45 return 0;
46} 45}
47EXPORT_SYMBOL(on_each_cpu); 46EXPORT_SYMBOL(on_each_cpu);
48 47