aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
685int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
686{ 686{
687 int cpu; 687 int cpu;
688 int orig = -1;
688 struct work_struct *works; 689 struct work_struct *works;
689 690
690 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
692 return -ENOMEM; 693 return -ENOMEM;
693 694
694 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
695 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
696 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
697 707
698 INIT_WORK(work, func); 708 INIT_WORK(work, func);
699 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
700 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
701 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
702 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
703 put_online_cpus(); 718 put_online_cpus();
704 free_percpu(works); 719 free_percpu(works);
705 return 0; 720 return 0;