aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 487653b5844f..f38a1e692259 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,7 @@
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/sched.h>
16 17
17#include "smpboot.h" 18#include "smpboot.h"
18 19
@@ -164,7 +165,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
164 if (!csd) { 165 if (!csd) {
165 csd = &csd_stack; 166 csd = &csd_stack;
166 if (!wait) 167 if (!wait)
167 csd = &__get_cpu_var(csd_data); 168 csd = this_cpu_ptr(&csd_data);
168 } 169 }
169 170
170 csd_lock(csd); 171 csd_lock(csd);
@@ -229,7 +230,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
229 230
230 WARN_ON(!irqs_disabled()); 231 WARN_ON(!irqs_disabled());
231 232
232 head = &__get_cpu_var(call_single_queue); 233 head = this_cpu_ptr(&call_single_queue);
233 entry = llist_del_all(head); 234 entry = llist_del_all(head);
234 entry = llist_reverse_order(entry); 235 entry = llist_reverse_order(entry);
235 236
@@ -419,7 +420,7 @@ void smp_call_function_many(const struct cpumask *mask,
419 return; 420 return;
420 } 421 }
421 422
422 cfd = &__get_cpu_var(cfd_data); 423 cfd = this_cpu_ptr(&cfd_data);
423 424
424 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 425 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
425 cpumask_clear_cpu(this_cpu, cfd->cpumask); 426 cpumask_clear_cpu(this_cpu, cfd->cpumask);
@@ -670,7 +671,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
670 if (cond_func(cpu, info)) { 671 if (cond_func(cpu, info)) {
671 ret = smp_call_function_single(cpu, func, 672 ret = smp_call_function_single(cpu, func,
672 info, wait); 673 info, wait);
673 WARN_ON_ONCE(!ret); 674 WARN_ON_ONCE(ret);
674 } 675 }
675 preempt_enable(); 676 preempt_enable();
676 } 677 }
@@ -699,3 +700,24 @@ void kick_all_cpus_sync(void)
699 smp_call_function(do_nothing, NULL, 1); 700 smp_call_function(do_nothing, NULL, 1);
700} 701}
701EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 702EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
703
704/**
705 * wake_up_all_idle_cpus - break all cpus out of idle
706 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
707 * including idle polling cpus, for non-idle cpus, we will do nothing
708 * for them.
709 */
710void wake_up_all_idle_cpus(void)
711{
712 int cpu;
713
714 preempt_disable();
715 for_each_online_cpu(cpu) {
716 if (cpu == smp_processor_id())
717 continue;
718
719 wake_up_if_idle(cpu);
720 }
721 preempt_enable();
722}
723EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);