aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c44
1 files changed, 40 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0668795d8818..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -317,8 +317,6 @@ static int worker_thread(void *__cwq)
317 if (cwq->wq->freezeable) 317 if (cwq->wq->freezeable)
318 set_freezable(); 318 set_freezable();
319 319
320 set_user_nice(current, -5);
321
322 for (;;) { 320 for (;;) {
323 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
324 if (!freezing(current) && 322 if (!freezing(current) &&
@@ -600,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
600 * schedule_work - put work task in global workqueue 598 * schedule_work - put work task in global workqueue
601 * @work: job to be done 599 * @work: job to be done
602 * 600 *
603 * This puts a job in the kernel-global workqueue. 601 * Returns zero if @work was already on the kernel-global workqueue and
602 * non-zero otherwise.
603 *
604 * This puts a job in the kernel-global workqueue if it was not already
605 * queued and leaves it in the same position on the kernel-global
606 * workqueue otherwise.
604 */ 607 */
605int schedule_work(struct work_struct *work) 608int schedule_work(struct work_struct *work)
606{ 609{
@@ -637,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
637EXPORT_SYMBOL(schedule_delayed_work); 640EXPORT_SYMBOL(schedule_delayed_work);
638 641
639/** 642/**
643 * flush_delayed_work - block until a dwork_struct's callback has terminated
644 * @dwork: the delayed work which is to be flushed
645 *
646 * Any timeout is cancelled, and any pending work is run immediately.
647 */
648void flush_delayed_work(struct delayed_work *dwork)
649{
650 if (del_timer_sync(&dwork->timer)) {
651 struct cpu_workqueue_struct *cwq;
652 cwq = wq_per_cpu(keventd_wq, get_cpu());
653 __queue_work(cwq, &dwork->work);
654 put_cpu();
655 }
656 flush_work(&dwork->work);
657}
658EXPORT_SYMBOL(flush_delayed_work);
659
660/**
640 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
641 * @cpu: cpu to use 662 * @cpu: cpu to use
642 * @dwork: job to be done 663 * @dwork: job to be done
@@ -664,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
664int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
665{ 686{
666 int cpu; 687 int cpu;
688 int orig = -1;
667 struct work_struct *works; 689 struct work_struct *works;
668 690
669 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -671,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
671 return -ENOMEM; 693 return -ENOMEM;
672 694
673 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
674 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
675 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
676 707
677 INIT_WORK(work, func); 708 INIT_WORK(work, func);
678 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
679 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
680 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
681 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
682 put_online_cpus(); 718 put_online_cpus();
683 free_percpu(works); 719 free_percpu(works);
684 return 0; 720 return 0;