diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 23 |
1 files changed, 20 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ccefe574dcf7..12328147132c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -647,7 +647,7 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
647 | */ | 647 | */ |
648 | void flush_delayed_work(struct delayed_work *dwork) | 648 | void flush_delayed_work(struct delayed_work *dwork) |
649 | { | 649 | { |
650 | if (del_timer(&dwork->timer)) { | 650 | if (del_timer_sync(&dwork->timer)) { |
651 | struct cpu_workqueue_struct *cwq; | 651 | struct cpu_workqueue_struct *cwq; |
652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | 652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); |
653 | __queue_work(cwq, &dwork->work); | 653 | __queue_work(cwq, &dwork->work); |
@@ -685,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
685 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
686 | { | 686 | { |
687 | int cpu; | 687 | int cpu; |
688 | int orig = -1; | ||
688 | struct work_struct *works; | 689 | struct work_struct *works; |
689 | 690 | ||
690 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
691 | if (!works) | 692 | if (!works) |
692 | return -ENOMEM; | 693 | return -ENOMEM; |
693 | 694 | ||
695 | /* | ||
696 | * when running in keventd don't schedule a work item on itself. | ||
697 | * Can just call directly because the work queue is already bound. | ||
698 | * This also is faster. | ||
699 | * Make this a generic parameter for other workqueues? | ||
700 | */ | ||
701 | if (current_is_keventd()) { | ||
702 | orig = raw_smp_processor_id(); | ||
703 | INIT_WORK(per_cpu_ptr(works, orig), func); | ||
704 | func(per_cpu_ptr(works, orig)); | ||
705 | } | ||
706 | |||
694 | get_online_cpus(); | 707 | get_online_cpus(); |
695 | for_each_online_cpu(cpu) { | 708 | for_each_online_cpu(cpu) { |
696 | struct work_struct *work = per_cpu_ptr(works, cpu); | 709 | struct work_struct *work = per_cpu_ptr(works, cpu); |
697 | 710 | ||
711 | if (cpu == orig) | ||
712 | continue; | ||
698 | INIT_WORK(work, func); | 713 | INIT_WORK(work, func); |
699 | schedule_work_on(cpu, work); | 714 | schedule_work_on(cpu, work); |
700 | } | 715 | } |
701 | for_each_online_cpu(cpu) | 716 | for_each_online_cpu(cpu) { |
702 | flush_work(per_cpu_ptr(works, cpu)); | 717 | if (cpu != orig) |
718 | flush_work(per_cpu_ptr(works, cpu)); | ||
719 | } | ||
703 | put_online_cpus(); | 720 | put_online_cpus(); |
704 | free_percpu(works); | 721 | free_percpu(works); |
705 | return 0; | 722 | return 0; |