aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c182
1 files changed, 139 insertions, 43 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..4048e92aa04f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
125} 125}
126 126
127static void insert_work(struct cpu_workqueue_struct *cwq, 127static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, int tail) 128 struct work_struct *work, struct list_head *head)
129{ 129{
130 set_wq_data(work, cwq); 130 set_wq_data(work, cwq);
131 /* 131 /*
@@ -133,21 +133,17 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
133 * result of list_add() below, see try_to_grab_pending(). 133 * result of list_add() below, see try_to_grab_pending().
134 */ 134 */
135 smp_wmb(); 135 smp_wmb();
136 if (tail) 136 list_add_tail(&work->entry, head);
137 list_add_tail(&work->entry, &cwq->worklist);
138 else
139 list_add(&work->entry, &cwq->worklist);
140 wake_up(&cwq->more_work); 137 wake_up(&cwq->more_work);
141} 138}
142 139
143/* Preempt must be disabled. */
144static void __queue_work(struct cpu_workqueue_struct *cwq, 140static void __queue_work(struct cpu_workqueue_struct *cwq,
145 struct work_struct *work) 141 struct work_struct *work)
146{ 142{
147 unsigned long flags; 143 unsigned long flags;
148 144
149 spin_lock_irqsave(&cwq->lock, flags); 145 spin_lock_irqsave(&cwq->lock, flags);
150 insert_work(cwq, work, 1); 146 insert_work(cwq, work, &cwq->worklist);
151 spin_unlock_irqrestore(&cwq->lock, flags); 147 spin_unlock_irqrestore(&cwq->lock, flags);
152} 148}
153 149
@@ -163,17 +159,39 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
163 */ 159 */
164int queue_work(struct workqueue_struct *wq, struct work_struct *work) 160int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 161{
162 int ret;
163
164 ret = queue_work_on(get_cpu(), wq, work);
165 put_cpu();
166
167 return ret;
168}
169EXPORT_SYMBOL_GPL(queue_work);
170
171/**
172 * queue_work_on - queue work on specific cpu
173 * @cpu: CPU number to execute work on
174 * @wq: workqueue to use
175 * @work: work to queue
176 *
177 * Returns 0 if @work was already on a queue, non-zero otherwise.
178 *
179 * We queue the work to a specific CPU, the caller must ensure it
180 * can't go away.
181 */
182int
183queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
184{
166 int ret = 0; 185 int ret = 0;
167 186
168 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
169 BUG_ON(!list_empty(&work->entry)); 188 BUG_ON(!list_empty(&work->entry));
170 __queue_work(wq_per_cpu(wq, get_cpu()), work); 189 __queue_work(wq_per_cpu(wq, cpu), work);
171 put_cpu();
172 ret = 1; 190 ret = 1;
173 } 191 }
174 return ret; 192 return ret;
175} 193}
176EXPORT_SYMBOL_GPL(queue_work); 194EXPORT_SYMBOL_GPL(queue_work_on);
177 195
178static void delayed_work_timer_fn(unsigned long __data) 196static void delayed_work_timer_fn(unsigned long __data)
179{ 197{
@@ -272,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
272 290
273 BUG_ON(get_wq_data(work) != cwq); 291 BUG_ON(get_wq_data(work) != cwq);
274 work_clear_pending(work); 292 work_clear_pending(work);
275 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 293 lock_map_acquire(&cwq->wq->lockdep_map);
276 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); 294 lock_map_acquire(&lockdep_map);
277 f(work); 295 f(work);
278 lock_release(&lockdep_map, 1, _THIS_IP_); 296 lock_map_release(&lockdep_map);
279 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 297 lock_map_release(&cwq->wq->lockdep_map);
280 298
281 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -337,14 +355,14 @@ static void wq_barrier_func(struct work_struct *work)
337} 355}
338 356
339static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 357static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 struct wq_barrier *barr, int tail) 358 struct wq_barrier *barr, struct list_head *head)
341{ 359{
342 INIT_WORK(&barr->work, wq_barrier_func); 360 INIT_WORK(&barr->work, wq_barrier_func);
343 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 361 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
344 362
345 init_completion(&barr->done); 363 init_completion(&barr->done);
346 364
347 insert_work(cwq, &barr->work, tail); 365 insert_work(cwq, &barr->work, head);
348} 366}
349 367
350static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 368static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@@ -364,7 +382,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
364 active = 0; 382 active = 0;
365 spin_lock_irq(&cwq->lock); 383 spin_lock_irq(&cwq->lock);
366 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 384 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
367 insert_wq_barrier(cwq, &barr, 1); 385 insert_wq_barrier(cwq, &barr, &cwq->worklist);
368 active = 1; 386 active = 1;
369 } 387 }
370 spin_unlock_irq(&cwq->lock); 388 spin_unlock_irq(&cwq->lock);
@@ -395,13 +413,64 @@ void flush_workqueue(struct workqueue_struct *wq)
395 int cpu; 413 int cpu;
396 414
397 might_sleep(); 415 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 416 lock_map_acquire(&wq->lockdep_map);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 417 lock_map_release(&wq->lockdep_map);
400 for_each_cpu_mask(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 420}
403EXPORT_SYMBOL_GPL(flush_workqueue); 421EXPORT_SYMBOL_GPL(flush_workqueue);
404 422
423/**
424 * flush_work - block until a work_struct's callback has terminated
425 * @work: the work which is to be flushed
426 *
427 * Returns false if @work has already terminated.
428 *
429 * It is expected that, prior to calling flush_work(), the caller has
430 * arranged for the work to not be requeued, otherwise it doesn't make
431 * sense to use this function.
432 */
433int flush_work(struct work_struct *work)
434{
435 struct cpu_workqueue_struct *cwq;
436 struct list_head *prev;
437 struct wq_barrier barr;
438
439 might_sleep();
440 cwq = get_wq_data(work);
441 if (!cwq)
442 return 0;
443
444 lock_map_acquire(&cwq->wq->lockdep_map);
445 lock_map_release(&cwq->wq->lockdep_map);
446
447 prev = NULL;
448 spin_lock_irq(&cwq->lock);
449 if (!list_empty(&work->entry)) {
450 /*
451 * See the comment near try_to_grab_pending()->smp_rmb().
452 * If it was re-queued under us we are not going to wait.
453 */
454 smp_rmb();
455 if (unlikely(cwq != get_wq_data(work)))
456 goto out;
457 prev = &work->entry;
458 } else {
459 if (cwq->current_work != work)
460 goto out;
461 prev = &cwq->worklist;
462 }
463 insert_wq_barrier(cwq, &barr, prev->next);
464out:
465 spin_unlock_irq(&cwq->lock);
466 if (!prev)
467 return 0;
468
469 wait_for_completion(&barr.done);
470 return 1;
471}
472EXPORT_SYMBOL_GPL(flush_work);
473
405/* 474/*
406 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 475 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
407 * so this work can't be re-armed in any way. 476 * so this work can't be re-armed in any way.
@@ -449,7 +518,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
449 518
450 spin_lock_irq(&cwq->lock); 519 spin_lock_irq(&cwq->lock);
451 if (unlikely(cwq->current_work == work)) { 520 if (unlikely(cwq->current_work == work)) {
452 insert_wq_barrier(cwq, &barr, 0); 521 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
453 running = 1; 522 running = 1;
454 } 523 }
455 spin_unlock_irq(&cwq->lock); 524 spin_unlock_irq(&cwq->lock);
@@ -467,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
467 536
468 might_sleep(); 537 might_sleep();
469 538
470 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 539 lock_map_acquire(&work->lockdep_map);
471 lock_release(&work->lockdep_map, 1, _THIS_IP_); 540 lock_map_release(&work->lockdep_map);
472 541
473 cwq = get_wq_data(work); 542 cwq = get_wq_data(work);
474 if (!cwq) 543 if (!cwq)
@@ -477,7 +546,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 546 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 547 cpu_map = wq_cpu_map(wq);
479 548
480 for_each_cpu_mask(cpu, *cpu_map) 549 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 550 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 551}
483 552
@@ -553,6 +622,19 @@ int schedule_work(struct work_struct *work)
553} 622}
554EXPORT_SYMBOL(schedule_work); 623EXPORT_SYMBOL(schedule_work);
555 624
625/*
626 * schedule_work_on - put work task on a specific cpu
627 * @cpu: cpu to put the work task on
628 * @work: job to be done
629 *
630 * This puts a job on a specific cpu
631 */
632int schedule_work_on(int cpu, struct work_struct *work)
633{
634 return queue_work_on(cpu, keventd_wq, work);
635}
636EXPORT_SYMBOL(schedule_work_on);
637
556/** 638/**
557 * schedule_delayed_work - put work task in global workqueue after delay 639 * schedule_delayed_work - put work task in global workqueue after delay
558 * @dwork: job to be done 640 * @dwork: job to be done
@@ -607,10 +689,10 @@ int schedule_on_each_cpu(work_func_t func)
607 struct work_struct *work = per_cpu_ptr(works, cpu); 689 struct work_struct *work = per_cpu_ptr(works, cpu);
608 690
609 INIT_WORK(work, func); 691 INIT_WORK(work, func);
610 set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 692 schedule_work_on(cpu, work);
611 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
612 } 693 }
613 flush_workqueue(keventd_wq); 694 for_each_online_cpu(cpu)
695 flush_work(per_cpu_ptr(works, cpu));
614 put_online_cpus(); 696 put_online_cpus();
615 free_percpu(works); 697 free_percpu(works);
616 return 0; 698 return 0;
@@ -747,11 +829,22 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
747 err = create_workqueue_thread(cwq, singlethread_cpu); 829 err = create_workqueue_thread(cwq, singlethread_cpu);
748 start_workqueue_thread(cwq, -1); 830 start_workqueue_thread(cwq, -1);
749 } else { 831 } else {
750 get_online_cpus(); 832 cpu_maps_update_begin();
833 /*
834 * We must place this wq on list even if the code below fails.
835 * cpu_down(cpu) can remove cpu from cpu_populated_map before
836 * destroy_workqueue() takes the lock, in that case we leak
837 * cwq[cpu]->thread.
838 */
751 spin_lock(&workqueue_lock); 839 spin_lock(&workqueue_lock);
752 list_add(&wq->list, &workqueues); 840 list_add(&wq->list, &workqueues);
753 spin_unlock(&workqueue_lock); 841 spin_unlock(&workqueue_lock);
754 842 /*
843 * We must initialize cwqs for each possible cpu even if we
844 * are going to call destroy_workqueue() finally. Otherwise
845 * cpu_up() can hit the uninitialized cwq once we drop the
846 * lock.
847 */
755 for_each_possible_cpu(cpu) { 848 for_each_possible_cpu(cpu) {
756 cwq = init_cpu_workqueue(wq, cpu); 849 cwq = init_cpu_workqueue(wq, cpu);
757 if (err || !cpu_online(cpu)) 850 if (err || !cpu_online(cpu))
@@ -759,7 +852,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
759 err = create_workqueue_thread(cwq, cpu); 852 err = create_workqueue_thread(cwq, cpu);
760 start_workqueue_thread(cwq, cpu); 853 start_workqueue_thread(cwq, cpu);
761 } 854 }
762 put_online_cpus(); 855 cpu_maps_update_done();
763 } 856 }
764 857
765 if (err) { 858 if (err) {
@@ -773,18 +866,18 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
773static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 866static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
774{ 867{
775 /* 868 /*
776 * Our caller is either destroy_workqueue() or CPU_DEAD, 869 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
777 * get_online_cpus() protects cwq->thread. 870 * cpu_add_remove_lock protects cwq->thread.
778 */ 871 */
779 if (cwq->thread == NULL) 872 if (cwq->thread == NULL)
780 return; 873 return;
781 874
782 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 875 lock_map_acquire(&cwq->wq->lockdep_map);
783 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 876 lock_map_release(&cwq->wq->lockdep_map);
784 877
785 flush_cpu_workqueue(cwq); 878 flush_cpu_workqueue(cwq);
786 /* 879 /*
787 * If the caller is CPU_DEAD and cwq->worklist was not empty, 880 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
788 * a concurrent flush_workqueue() can insert a barrier after us. 881 * a concurrent flush_workqueue() can insert a barrier after us.
789 * However, in that case run_workqueue() won't return and check 882 * However, in that case run_workqueue() won't return and check
790 * kthread_should_stop() until it flushes all work_struct's. 883 * kthread_should_stop() until it flushes all work_struct's.
@@ -808,14 +901,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
808 const cpumask_t *cpu_map = wq_cpu_map(wq); 901 const cpumask_t *cpu_map = wq_cpu_map(wq);
809 int cpu; 902 int cpu;
810 903
811 get_online_cpus(); 904 cpu_maps_update_begin();
812 spin_lock(&workqueue_lock); 905 spin_lock(&workqueue_lock);
813 list_del(&wq->list); 906 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 907 spin_unlock(&workqueue_lock);
815 908
816 for_each_cpu_mask(cpu, *cpu_map) 909 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 910 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 911 cpu_maps_update_done();
819 912
820 free_percpu(wq->cpu_wq); 913 free_percpu(wq->cpu_wq);
821 kfree(wq); 914 kfree(wq);
@@ -829,6 +922,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
829 unsigned int cpu = (unsigned long)hcpu; 922 unsigned int cpu = (unsigned long)hcpu;
830 struct cpu_workqueue_struct *cwq; 923 struct cpu_workqueue_struct *cwq;
831 struct workqueue_struct *wq; 924 struct workqueue_struct *wq;
925 int ret = NOTIFY_OK;
832 926
833 action &= ~CPU_TASKS_FROZEN; 927 action &= ~CPU_TASKS_FROZEN;
834 928
@@ -836,7 +930,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
836 case CPU_UP_PREPARE: 930 case CPU_UP_PREPARE:
837 cpu_set(cpu, cpu_populated_map); 931 cpu_set(cpu, cpu_populated_map);
838 } 932 }
839 933undo:
840 list_for_each_entry(wq, &workqueues, list) { 934 list_for_each_entry(wq, &workqueues, list) {
841 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 935 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
842 936
@@ -846,7 +940,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
846 break; 940 break;
847 printk(KERN_ERR "workqueue [%s] for %i failed\n", 941 printk(KERN_ERR "workqueue [%s] for %i failed\n",
848 wq->name, cpu); 942 wq->name, cpu);
849 return NOTIFY_BAD; 943 action = CPU_UP_CANCELED;
944 ret = NOTIFY_BAD;
945 goto undo;
850 946
851 case CPU_ONLINE: 947 case CPU_ONLINE:
852 start_workqueue_thread(cwq, cpu); 948 start_workqueue_thread(cwq, cpu);
@@ -854,7 +950,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
854 950
855 case CPU_UP_CANCELED: 951 case CPU_UP_CANCELED:
856 start_workqueue_thread(cwq, -1); 952 start_workqueue_thread(cwq, -1);
857 case CPU_DEAD: 953 case CPU_POST_DEAD:
858 cleanup_workqueue_thread(cwq); 954 cleanup_workqueue_thread(cwq);
859 break; 955 break;
860 } 956 }
@@ -862,11 +958,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
862 958
863 switch (action) { 959 switch (action) {
864 case CPU_UP_CANCELED: 960 case CPU_UP_CANCELED:
865 case CPU_DEAD: 961 case CPU_POST_DEAD:
866 cpu_clear(cpu, cpu_populated_map); 962 cpu_clear(cpu, cpu_populated_map);
867 } 963 }
868 964
869 return NOTIFY_OK; 965 return ret;
870} 966}
871 967
872void __init init_workqueues(void) 968void __init init_workqueues(void)