diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 149 |
1 files changed, 117 insertions, 32 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce7799540c91..ec7e4f62aaff 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | static void insert_work(struct cpu_workqueue_struct *cwq, | 127 | static void insert_work(struct cpu_workqueue_struct *cwq, |
128 | struct work_struct *work, int tail) | 128 | struct work_struct *work, struct list_head *head) |
129 | { | 129 | { |
130 | set_wq_data(work, cwq); | 130 | set_wq_data(work, cwq); |
131 | /* | 131 | /* |
@@ -133,21 +133,17 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
133 | * result of list_add() below, see try_to_grab_pending(). | 133 | * result of list_add() below, see try_to_grab_pending(). |
134 | */ | 134 | */ |
135 | smp_wmb(); | 135 | smp_wmb(); |
136 | if (tail) | 136 | list_add_tail(&work->entry, head); |
137 | list_add_tail(&work->entry, &cwq->worklist); | ||
138 | else | ||
139 | list_add(&work->entry, &cwq->worklist); | ||
140 | wake_up(&cwq->more_work); | 137 | wake_up(&cwq->more_work); |
141 | } | 138 | } |
142 | 139 | ||
143 | /* Preempt must be disabled. */ | ||
144 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 140 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
145 | struct work_struct *work) | 141 | struct work_struct *work) |
146 | { | 142 | { |
147 | unsigned long flags; | 143 | unsigned long flags; |
148 | 144 | ||
149 | spin_lock_irqsave(&cwq->lock, flags); | 145 | spin_lock_irqsave(&cwq->lock, flags); |
150 | insert_work(cwq, work, 1); | 146 | insert_work(cwq, work, &cwq->worklist); |
151 | spin_unlock_irqrestore(&cwq->lock, flags); | 147 | spin_unlock_irqrestore(&cwq->lock, flags); |
152 | } | 148 | } |
153 | 149 | ||
@@ -163,17 +159,39 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
163 | */ | 159 | */ |
164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 160 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
165 | { | 161 | { |
162 | int ret; | ||
163 | |||
164 | ret = queue_work_on(get_cpu(), wq, work); | ||
165 | put_cpu(); | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | EXPORT_SYMBOL_GPL(queue_work); | ||
170 | |||
171 | /** | ||
172 | * queue_work_on - queue work on specific cpu | ||
173 | * @cpu: CPU number to execute work on | ||
174 | * @wq: workqueue to use | ||
175 | * @work: work to queue | ||
176 | * | ||
177 | * Returns 0 if @work was already on a queue, non-zero otherwise. | ||
178 | * | ||
179 | * We queue the work to a specific CPU, the caller must ensure it | ||
180 | * can't go away. | ||
181 | */ | ||
182 | int | ||
183 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | ||
184 | { | ||
166 | int ret = 0; | 185 | int ret = 0; |
167 | 186 | ||
168 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 187 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
169 | BUG_ON(!list_empty(&work->entry)); | 188 | BUG_ON(!list_empty(&work->entry)); |
170 | __queue_work(wq_per_cpu(wq, get_cpu()), work); | 189 | __queue_work(wq_per_cpu(wq, cpu), work); |
171 | put_cpu(); | ||
172 | ret = 1; | 190 | ret = 1; |
173 | } | 191 | } |
174 | return ret; | 192 | return ret; |
175 | } | 193 | } |
176 | EXPORT_SYMBOL_GPL(queue_work); | 194 | EXPORT_SYMBOL_GPL(queue_work_on); |
177 | 195 | ||
178 | static void delayed_work_timer_fn(unsigned long __data) | 196 | static void delayed_work_timer_fn(unsigned long __data) |
179 | { | 197 | { |
@@ -337,14 +355,14 @@ static void wq_barrier_func(struct work_struct *work) | |||
337 | } | 355 | } |
338 | 356 | ||
339 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 357 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
340 | struct wq_barrier *barr, int tail) | 358 | struct wq_barrier *barr, struct list_head *head) |
341 | { | 359 | { |
342 | INIT_WORK(&barr->work, wq_barrier_func); | 360 | INIT_WORK(&barr->work, wq_barrier_func); |
343 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 361 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
344 | 362 | ||
345 | init_completion(&barr->done); | 363 | init_completion(&barr->done); |
346 | 364 | ||
347 | insert_work(cwq, &barr->work, tail); | 365 | insert_work(cwq, &barr->work, head); |
348 | } | 366 | } |
349 | 367 | ||
350 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 368 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
@@ -364,7 +382,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
364 | active = 0; | 382 | active = 0; |
365 | spin_lock_irq(&cwq->lock); | 383 | spin_lock_irq(&cwq->lock); |
366 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 384 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
367 | insert_wq_barrier(cwq, &barr, 1); | 385 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
368 | active = 1; | 386 | active = 1; |
369 | } | 387 | } |
370 | spin_unlock_irq(&cwq->lock); | 388 | spin_unlock_irq(&cwq->lock); |
@@ -397,11 +415,62 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
397 | might_sleep(); | 415 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 416 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); |
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 417 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); |
400 | for_each_cpu_mask(cpu, *cpu_map) | 418 | for_each_cpu_mask_nr(cpu, *cpu_map) |
401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
402 | } | 420 | } |
403 | EXPORT_SYMBOL_GPL(flush_workqueue); | 421 | EXPORT_SYMBOL_GPL(flush_workqueue); |
404 | 422 | ||
423 | /** | ||
424 | * flush_work - block until a work_struct's callback has terminated | ||
425 | * @work: the work which is to be flushed | ||
426 | * | ||
427 | * Returns false if @work has already terminated. | ||
428 | * | ||
429 | * It is expected that, prior to calling flush_work(), the caller has | ||
430 | * arranged for the work to not be requeued, otherwise it doesn't make | ||
431 | * sense to use this function. | ||
432 | */ | ||
433 | int flush_work(struct work_struct *work) | ||
434 | { | ||
435 | struct cpu_workqueue_struct *cwq; | ||
436 | struct list_head *prev; | ||
437 | struct wq_barrier barr; | ||
438 | |||
439 | might_sleep(); | ||
440 | cwq = get_wq_data(work); | ||
441 | if (!cwq) | ||
442 | return 0; | ||
443 | |||
444 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
445 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | ||
446 | |||
447 | prev = NULL; | ||
448 | spin_lock_irq(&cwq->lock); | ||
449 | if (!list_empty(&work->entry)) { | ||
450 | /* | ||
451 | * See the comment near try_to_grab_pending()->smp_rmb(). | ||
452 | * If it was re-queued under us we are not going to wait. | ||
453 | */ | ||
454 | smp_rmb(); | ||
455 | if (unlikely(cwq != get_wq_data(work))) | ||
456 | goto out; | ||
457 | prev = &work->entry; | ||
458 | } else { | ||
459 | if (cwq->current_work != work) | ||
460 | goto out; | ||
461 | prev = &cwq->worklist; | ||
462 | } | ||
463 | insert_wq_barrier(cwq, &barr, prev->next); | ||
464 | out: | ||
465 | spin_unlock_irq(&cwq->lock); | ||
466 | if (!prev) | ||
467 | return 0; | ||
468 | |||
469 | wait_for_completion(&barr.done); | ||
470 | return 1; | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(flush_work); | ||
473 | |||
405 | /* | 474 | /* |
406 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 475 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
407 | * so this work can't be re-armed in any way. | 476 | * so this work can't be re-armed in any way. |
@@ -449,7 +518,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
449 | 518 | ||
450 | spin_lock_irq(&cwq->lock); | 519 | spin_lock_irq(&cwq->lock); |
451 | if (unlikely(cwq->current_work == work)) { | 520 | if (unlikely(cwq->current_work == work)) { |
452 | insert_wq_barrier(cwq, &barr, 0); | 521 | insert_wq_barrier(cwq, &barr, cwq->worklist.next); |
453 | running = 1; | 522 | running = 1; |
454 | } | 523 | } |
455 | spin_unlock_irq(&cwq->lock); | 524 | spin_unlock_irq(&cwq->lock); |
@@ -477,7 +546,7 @@ static void wait_on_work(struct work_struct *work) | |||
477 | wq = cwq->wq; | 546 | wq = cwq->wq; |
478 | cpu_map = wq_cpu_map(wq); | 547 | cpu_map = wq_cpu_map(wq); |
479 | 548 | ||
480 | for_each_cpu_mask(cpu, *cpu_map) | 549 | for_each_cpu_mask_nr(cpu, *cpu_map) |
481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 550 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
482 | } | 551 | } |
483 | 552 | ||
@@ -553,6 +622,19 @@ int schedule_work(struct work_struct *work) | |||
553 | } | 622 | } |
554 | EXPORT_SYMBOL(schedule_work); | 623 | EXPORT_SYMBOL(schedule_work); |
555 | 624 | ||
625 | /* | ||
626 | * schedule_work_on - put work task on a specific cpu | ||
627 | * @cpu: cpu to put the work task on | ||
628 | * @work: job to be done | ||
629 | * | ||
630 | * This puts a job on a specific cpu | ||
631 | */ | ||
632 | int schedule_work_on(int cpu, struct work_struct *work) | ||
633 | { | ||
634 | return queue_work_on(cpu, keventd_wq, work); | ||
635 | } | ||
636 | EXPORT_SYMBOL(schedule_work_on); | ||
637 | |||
556 | /** | 638 | /** |
557 | * schedule_delayed_work - put work task in global workqueue after delay | 639 | * schedule_delayed_work - put work task in global workqueue after delay |
558 | * @dwork: job to be done | 640 | * @dwork: job to be done |
@@ -607,10 +689,10 @@ int schedule_on_each_cpu(work_func_t func) | |||
607 | struct work_struct *work = per_cpu_ptr(works, cpu); | 689 | struct work_struct *work = per_cpu_ptr(works, cpu); |
608 | 690 | ||
609 | INIT_WORK(work, func); | 691 | INIT_WORK(work, func); |
610 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 692 | schedule_work_on(cpu, work); |
611 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | ||
612 | } | 693 | } |
613 | flush_workqueue(keventd_wq); | 694 | for_each_online_cpu(cpu) |
695 | flush_work(per_cpu_ptr(works, cpu)); | ||
614 | put_online_cpus(); | 696 | put_online_cpus(); |
615 | free_percpu(works); | 697 | free_percpu(works); |
616 | return 0; | 698 | return 0; |
@@ -747,7 +829,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
747 | err = create_workqueue_thread(cwq, singlethread_cpu); | 829 | err = create_workqueue_thread(cwq, singlethread_cpu); |
748 | start_workqueue_thread(cwq, -1); | 830 | start_workqueue_thread(cwq, -1); |
749 | } else { | 831 | } else { |
750 | get_online_cpus(); | 832 | cpu_maps_update_begin(); |
751 | spin_lock(&workqueue_lock); | 833 | spin_lock(&workqueue_lock); |
752 | list_add(&wq->list, &workqueues); | 834 | list_add(&wq->list, &workqueues); |
753 | spin_unlock(&workqueue_lock); | 835 | spin_unlock(&workqueue_lock); |
@@ -759,7 +841,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
759 | err = create_workqueue_thread(cwq, cpu); | 841 | err = create_workqueue_thread(cwq, cpu); |
760 | start_workqueue_thread(cwq, cpu); | 842 | start_workqueue_thread(cwq, cpu); |
761 | } | 843 | } |
762 | put_online_cpus(); | 844 | cpu_maps_update_done(); |
763 | } | 845 | } |
764 | 846 | ||
765 | if (err) { | 847 | if (err) { |
@@ -773,8 +855,8 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key); | |||
773 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 855 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
774 | { | 856 | { |
775 | /* | 857 | /* |
776 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 858 | * Our caller is either destroy_workqueue() or CPU_POST_DEAD, |
777 | * get_online_cpus() protects cwq->thread. | 859 | * cpu_add_remove_lock protects cwq->thread. |
778 | */ | 860 | */ |
779 | if (cwq->thread == NULL) | 861 | if (cwq->thread == NULL) |
780 | return; | 862 | return; |
@@ -784,7 +866,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
784 | 866 | ||
785 | flush_cpu_workqueue(cwq); | 867 | flush_cpu_workqueue(cwq); |
786 | /* | 868 | /* |
787 | * If the caller is CPU_DEAD and cwq->worklist was not empty, | 869 | * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, |
788 | * a concurrent flush_workqueue() can insert a barrier after us. | 870 | * a concurrent flush_workqueue() can insert a barrier after us. |
789 | * However, in that case run_workqueue() won't return and check | 871 | * However, in that case run_workqueue() won't return and check |
790 | * kthread_should_stop() until it flushes all work_struct's. | 872 | * kthread_should_stop() until it flushes all work_struct's. |
@@ -808,14 +890,14 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
808 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 890 | const cpumask_t *cpu_map = wq_cpu_map(wq); |
809 | int cpu; | 891 | int cpu; |
810 | 892 | ||
811 | get_online_cpus(); | 893 | cpu_maps_update_begin(); |
812 | spin_lock(&workqueue_lock); | 894 | spin_lock(&workqueue_lock); |
813 | list_del(&wq->list); | 895 | list_del(&wq->list); |
814 | spin_unlock(&workqueue_lock); | 896 | spin_unlock(&workqueue_lock); |
815 | 897 | ||
816 | for_each_cpu_mask(cpu, *cpu_map) | 898 | for_each_cpu_mask_nr(cpu, *cpu_map) |
817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 899 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
818 | put_online_cpus(); | 900 | cpu_maps_update_done(); |
819 | 901 | ||
820 | free_percpu(wq->cpu_wq); | 902 | free_percpu(wq->cpu_wq); |
821 | kfree(wq); | 903 | kfree(wq); |
@@ -829,6 +911,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
829 | unsigned int cpu = (unsigned long)hcpu; | 911 | unsigned int cpu = (unsigned long)hcpu; |
830 | struct cpu_workqueue_struct *cwq; | 912 | struct cpu_workqueue_struct *cwq; |
831 | struct workqueue_struct *wq; | 913 | struct workqueue_struct *wq; |
914 | int ret = NOTIFY_OK; | ||
832 | 915 | ||
833 | action &= ~CPU_TASKS_FROZEN; | 916 | action &= ~CPU_TASKS_FROZEN; |
834 | 917 | ||
@@ -836,7 +919,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
836 | case CPU_UP_PREPARE: | 919 | case CPU_UP_PREPARE: |
837 | cpu_set(cpu, cpu_populated_map); | 920 | cpu_set(cpu, cpu_populated_map); |
838 | } | 921 | } |
839 | 922 | undo: | |
840 | list_for_each_entry(wq, &workqueues, list) { | 923 | list_for_each_entry(wq, &workqueues, list) { |
841 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 924 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
842 | 925 | ||
@@ -846,7 +929,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
846 | break; | 929 | break; |
847 | printk(KERN_ERR "workqueue [%s] for %i failed\n", | 930 | printk(KERN_ERR "workqueue [%s] for %i failed\n", |
848 | wq->name, cpu); | 931 | wq->name, cpu); |
849 | return NOTIFY_BAD; | 932 | action = CPU_UP_CANCELED; |
933 | ret = NOTIFY_BAD; | ||
934 | goto undo; | ||
850 | 935 | ||
851 | case CPU_ONLINE: | 936 | case CPU_ONLINE: |
852 | start_workqueue_thread(cwq, cpu); | 937 | start_workqueue_thread(cwq, cpu); |
@@ -854,7 +939,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
854 | 939 | ||
855 | case CPU_UP_CANCELED: | 940 | case CPU_UP_CANCELED: |
856 | start_workqueue_thread(cwq, -1); | 941 | start_workqueue_thread(cwq, -1); |
857 | case CPU_DEAD: | 942 | case CPU_POST_DEAD: |
858 | cleanup_workqueue_thread(cwq); | 943 | cleanup_workqueue_thread(cwq); |
859 | break; | 944 | break; |
860 | } | 945 | } |
@@ -862,11 +947,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
862 | 947 | ||
863 | switch (action) { | 948 | switch (action) { |
864 | case CPU_UP_CANCELED: | 949 | case CPU_UP_CANCELED: |
865 | case CPU_DEAD: | 950 | case CPU_POST_DEAD: |
866 | cpu_clear(cpu, cpu_populated_map); | 951 | cpu_clear(cpu, cpu_populated_map); |
867 | } | 952 | } |
868 | 953 | ||
869 | return NOTIFY_OK; | 954 | return ret; |
870 | } | 955 | } |
871 | 956 | ||
872 | void __init init_workqueues(void) | 957 | void __init init_workqueues(void) |