aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@surriel.com>2018-09-25 23:58:41 -0400
committerPeter Zijlstra <peterz@infradead.org>2018-10-09 10:51:11 -0400
commit7d49b28a80b830c3ca876d33bedc58d62a78e16f (patch)
treec6a906d70f899bb2989f97cad1efdd93e9af2015
parentc3f7f2c7eba1a53d2e5ffbc2dcc9a20c5f094890 (diff)
smp,cpumask: introduce on_each_cpu_cond_mask
Introduce a variant of on_each_cpu_cond that iterates only over the CPUs in a cpumask, in order to avoid making callbacks for every single CPU in the system when we only need to test a subset. Cc: npiggin@gmail.com Cc: mingo@kernel.org Cc: will.deacon@arm.com Cc: songliubraving@fb.com Cc: kernel-team@fb.com Cc: hpa@zytor.com Cc: luto@kernel.org Signed-off-by: Rik van Riel <riel@surriel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: http://lkml.kernel.org/r/20180926035844.1420-5-riel@surriel.com
-rw-r--r--include/linux/smp.h4
-rw-r--r--kernel/smp.c17
-rw-r--r--kernel/up.c14
3 files changed, 28 insertions, 7 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..a56f08ff3097 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
53 smp_call_func_t func, void *info, bool wait, 53 smp_call_func_t func, void *info, bool wait,
54 gfp_t gfp_flags); 54 gfp_t gfp_flags);
55 55
56void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
57 smp_call_func_t func, void *info, bool wait,
58 gfp_t gfp_flags, const struct cpumask *mask);
59
56int smp_call_function_single_async(int cpu, call_single_data_t *csd); 60int smp_call_function_single_async(int cpu, call_single_data_t *csd);
57 61
58#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
diff --git a/kernel/smp.c b/kernel/smp.c
index a7d4f9f50a49..163c451af42e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -669,9 +669,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
669 * You must not call this function with disabled interrupts or 669 * You must not call this function with disabled interrupts or
670 * from a hardware interrupt handler or from a bottom half handler. 670 * from a hardware interrupt handler or from a bottom half handler.
671 */ 671 */
672void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 672void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
673 smp_call_func_t func, void *info, bool wait, 673 smp_call_func_t func, void *info, bool wait,
674 gfp_t gfp_flags) 674 gfp_t gfp_flags, const struct cpumask *mask)
675{ 675{
676 cpumask_var_t cpus; 676 cpumask_var_t cpus;
677 int cpu, ret; 677 int cpu, ret;
@@ -680,7 +680,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
680 680
681 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { 681 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
682 preempt_disable(); 682 preempt_disable();
683 for_each_online_cpu(cpu) 683 for_each_cpu(cpu, mask)
684 if (cond_func(cpu, info)) 684 if (cond_func(cpu, info))
685 __cpumask_set_cpu(cpu, cpus); 685 __cpumask_set_cpu(cpu, cpus);
686 on_each_cpu_mask(cpus, func, info, wait); 686 on_each_cpu_mask(cpus, func, info, wait);
@@ -692,7 +692,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
692 * just have to IPI them one by one. 692 * just have to IPI them one by one.
693 */ 693 */
694 preempt_disable(); 694 preempt_disable();
695 for_each_online_cpu(cpu) 695 for_each_cpu(cpu, mask)
696 if (cond_func(cpu, info)) { 696 if (cond_func(cpu, info)) {
697 ret = smp_call_function_single(cpu, func, 697 ret = smp_call_function_single(cpu, func,
698 info, wait); 698 info, wait);
@@ -701,6 +701,15 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
701 preempt_enable(); 701 preempt_enable();
702 } 702 }
703} 703}
704EXPORT_SYMBOL(on_each_cpu_cond_mask);
705
706void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
707 smp_call_func_t func, void *info, bool wait,
708 gfp_t gfp_flags)
709{
710 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
711 cpu_online_mask);
712}
704EXPORT_SYMBOL(on_each_cpu_cond); 713EXPORT_SYMBOL(on_each_cpu_cond);
705 714
706static void do_nothing(void *unused) 715static void do_nothing(void *unused)
diff --git a/kernel/up.c b/kernel/up.c
index 42c46bf3e0a5..ff536f9cc8a2 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
68 * Preemption is disabled here to make sure the cond_func is called under the 68 * Preemption is disabled here to make sure the cond_func is called under the
69 * same condtions in UP and SMP. 69 * same condtions in UP and SMP.
70 */ 70 */
71void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), 71void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
72 smp_call_func_t func, void *info, bool wait, 72 smp_call_func_t func, void *info, bool wait,
73 gfp_t gfp_flags) 73 gfp_t gfp_flags, const struct cpumask *mask)
74{ 74{
75 unsigned long flags; 75 unsigned long flags;
76 76
@@ -82,6 +82,14 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
82 } 82 }
83 preempt_enable(); 83 preempt_enable();
84} 84}
85EXPORT_SYMBOL(on_each_cpu_cond_mask);
86
87void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
88 smp_call_func_t func, void *info, bool wait,
89 gfp_t gfp_flags)
90{
91 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
92}
85EXPORT_SYMBOL(on_each_cpu_cond); 93EXPORT_SYMBOL(on_each_cpu_cond);
86 94
87int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 95int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)