diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b1..67e526b6ae81 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, | |||
| 640 | EXPORT_SYMBOL(schedule_delayed_work); | 640 | EXPORT_SYMBOL(schedule_delayed_work); |
| 641 | 641 | ||
| 642 | /** | 642 | /** |
| 643 | * flush_delayed_work - block until a dwork_struct's callback has terminated | ||
| 644 | * @dwork: the delayed work which is to be flushed | ||
| 645 | * | ||
| 646 | * Any timeout is cancelled, and any pending work is run immediately. | ||
| 647 | */ | ||
| 648 | void flush_delayed_work(struct delayed_work *dwork) | ||
| 649 | { | ||
| 650 | if (del_timer_sync(&dwork->timer)) { | ||
| 651 | struct cpu_workqueue_struct *cwq; | ||
| 652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | ||
| 653 | __queue_work(cwq, &dwork->work); | ||
| 654 | put_cpu(); | ||
| 655 | } | ||
| 656 | flush_work(&dwork->work); | ||
| 657 | } | ||
| 658 | EXPORT_SYMBOL(flush_delayed_work); | ||
| 659 | |||
| 660 | /** | ||
| 643 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 661 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
| 644 | * @cpu: cpu to use | 662 | * @cpu: cpu to use |
| 645 | * @dwork: job to be done | 663 | * @dwork: job to be done |
| @@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
| 667 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
| 668 | { | 686 | { |
| 669 | int cpu; | 687 | int cpu; |
| 688 | int orig = -1; | ||
| 670 | struct work_struct *works; | 689 | struct work_struct *works; |
| 671 | 690 | ||
| 672 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
| @@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func) | |||
| 674 | return -ENOMEM; | 693 | return -ENOMEM; |
| 675 | 694 | ||
| 676 | get_online_cpus(); | 695 | get_online_cpus(); |
| 696 | |||
| 697 | /* | ||
| 698 | * When running in keventd don't schedule a work item on | ||
| 699 | * itself. Can just call directly because the work queue is | ||
| 700 | * already bound. This also is faster. | ||
| 701 | */ | ||
| 702 | if (current_is_keventd()) | ||
| 703 | orig = raw_smp_processor_id(); | ||
| 704 | |||
| 677 | for_each_online_cpu(cpu) { | 705 | for_each_online_cpu(cpu) { |
| 678 | struct work_struct *work = per_cpu_ptr(works, cpu); | 706 | struct work_struct *work = per_cpu_ptr(works, cpu); |
| 679 | 707 | ||
| 680 | INIT_WORK(work, func); | 708 | INIT_WORK(work, func); |
| 681 | schedule_work_on(cpu, work); | 709 | if (cpu != orig) |
| 710 | schedule_work_on(cpu, work); | ||
| 682 | } | 711 | } |
| 712 | if (orig >= 0) | ||
| 713 | func(per_cpu_ptr(works, orig)); | ||
| 714 | |||
| 683 | for_each_online_cpu(cpu) | 715 | for_each_online_cpu(cpu) |
| 684 | flush_work(per_cpu_ptr(works, cpu)); | 716 | flush_work(per_cpu_ptr(works, cpu)); |
| 717 | |||
| 685 | put_online_cpus(); | 718 | put_online_cpus(); |
| 686 | free_percpu(works); | 719 | free_percpu(works); |
| 687 | return 0; | 720 | return 0; |
