aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..12328147132c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
640EXPORT_SYMBOL(schedule_delayed_work); 640EXPORT_SYMBOL(schedule_delayed_work);
641 641
642/** 642/**
643 * flush_delayed_work - block until a dwork_struct's callback has terminated
644 * @dwork: the delayed work which is to be flushed
645 *
646 * Any timeout is cancelled, and any pending work is run immediately.
647 */
648void flush_delayed_work(struct delayed_work *dwork)
649{
650 if (del_timer_sync(&dwork->timer)) {
651 struct cpu_workqueue_struct *cwq;
652 cwq = wq_per_cpu(keventd_wq, get_cpu());
653 __queue_work(cwq, &dwork->work);
654 put_cpu();
655 }
656 flush_work(&dwork->work);
657}
658EXPORT_SYMBOL(flush_delayed_work);
659
660/**
643 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
644 * @cpu: cpu to use 662 * @cpu: cpu to use
645 * @dwork: job to be done 663 * @dwork: job to be done
@@ -667,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
667int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
668{ 686{
669 int cpu; 687 int cpu;
688 int orig = -1;
670 struct work_struct *works; 689 struct work_struct *works;
671 690
672 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
673 if (!works) 692 if (!works)
674 return -ENOMEM; 693 return -ENOMEM;
675 694
695 /*
696 * when running in keventd don't schedule a work item on itself.
697 * Can just call directly because the work queue is already bound.
698 * This also is faster.
699 * Make this a generic parameter for other workqueues?
700 */
701 if (current_is_keventd()) {
702 orig = raw_smp_processor_id();
703 INIT_WORK(per_cpu_ptr(works, orig), func);
704 func(per_cpu_ptr(works, orig));
705 }
706
676 get_online_cpus(); 707 get_online_cpus();
677 for_each_online_cpu(cpu) { 708 for_each_online_cpu(cpu) {
678 struct work_struct *work = per_cpu_ptr(works, cpu); 709 struct work_struct *work = per_cpu_ptr(works, cpu);
679 710
711 if (cpu == orig)
712 continue;
680 INIT_WORK(work, func); 713 INIT_WORK(work, func);
681 schedule_work_on(cpu, work); 714 schedule_work_on(cpu, work);
682 } 715 }
683 for_each_online_cpu(cpu) 716 for_each_online_cpu(cpu) {
684 flush_work(per_cpu_ptr(works, cpu)); 717 if (cpu != orig)
718 flush_work(per_cpu_ptr(works, cpu));
719 }
685 put_online_cpus(); 720 put_online_cpus();
686 free_percpu(works); 721 free_percpu(works);
687 return 0; 722 return 0;