diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 95 |
1 files changed, 92 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b7bb37ab03bc..918d55267a12 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -46,6 +46,7 @@ struct cpu_workqueue_struct { | |||
46 | 46 | ||
47 | struct workqueue_struct *wq; | 47 | struct workqueue_struct *wq; |
48 | struct task_struct *thread; | 48 | struct task_struct *thread; |
49 | struct work_struct *current_work; | ||
49 | 50 | ||
50 | int run_depth; /* Detect run_workqueue() recursion depth */ | 51 | int run_depth; /* Detect run_workqueue() recursion depth */ |
51 | 52 | ||
@@ -120,6 +121,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work | |||
120 | && work_pending(work) | 121 | && work_pending(work) |
121 | && !list_empty(&work->entry)) { | 122 | && !list_empty(&work->entry)) { |
122 | work_func_t f = work->func; | 123 | work_func_t f = work->func; |
124 | cwq->current_work = work; | ||
123 | list_del_init(&work->entry); | 125 | list_del_init(&work->entry); |
124 | spin_unlock_irqrestore(&cwq->lock, flags); | 126 | spin_unlock_irqrestore(&cwq->lock, flags); |
125 | 127 | ||
@@ -128,6 +130,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work | |||
128 | f(work); | 130 | f(work); |
129 | 131 | ||
130 | spin_lock_irqsave(&cwq->lock, flags); | 132 | spin_lock_irqsave(&cwq->lock, flags); |
133 | cwq->current_work = NULL; | ||
131 | ret = 1; | 134 | ret = 1; |
132 | } | 135 | } |
133 | spin_unlock_irqrestore(&cwq->lock, flags); | 136 | spin_unlock_irqrestore(&cwq->lock, flags); |
@@ -166,6 +169,17 @@ int fastcall run_scheduled_work(struct work_struct *work) | |||
166 | } | 169 | } |
167 | EXPORT_SYMBOL(run_scheduled_work); | 170 | EXPORT_SYMBOL(run_scheduled_work); |
168 | 171 | ||
172 | static void insert_work(struct cpu_workqueue_struct *cwq, | ||
173 | struct work_struct *work, int tail) | ||
174 | { | ||
175 | set_wq_data(work, cwq); | ||
176 | if (tail) | ||
177 | list_add_tail(&work->entry, &cwq->worklist); | ||
178 | else | ||
179 | list_add(&work->entry, &cwq->worklist); | ||
180 | wake_up(&cwq->more_work); | ||
181 | } | ||
182 | |||
169 | /* Preempt must be disabled. */ | 183 | /* Preempt must be disabled. */ |
170 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 184 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
171 | struct work_struct *work) | 185 | struct work_struct *work) |
@@ -173,9 +187,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
173 | unsigned long flags; | 187 | unsigned long flags; |
174 | 188 | ||
175 | spin_lock_irqsave(&cwq->lock, flags); | 189 | spin_lock_irqsave(&cwq->lock, flags); |
176 | set_wq_data(work, cwq); | 190 | insert_work(cwq, work, 1); |
177 | list_add_tail(&work->entry, &cwq->worklist); | ||
178 | wake_up(&cwq->more_work); | ||
179 | spin_unlock_irqrestore(&cwq->lock, flags); | 191 | spin_unlock_irqrestore(&cwq->lock, flags); |
180 | } | 192 | } |
181 | 193 | ||
@@ -305,6 +317,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
305 | struct work_struct, entry); | 317 | struct work_struct, entry); |
306 | work_func_t f = work->func; | 318 | work_func_t f = work->func; |
307 | 319 | ||
320 | cwq->current_work = work; | ||
308 | list_del_init(cwq->worklist.next); | 321 | list_del_init(cwq->worklist.next); |
309 | spin_unlock_irqrestore(&cwq->lock, flags); | 322 | spin_unlock_irqrestore(&cwq->lock, flags); |
310 | 323 | ||
@@ -325,6 +338,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
325 | } | 338 | } |
326 | 339 | ||
327 | spin_lock_irqsave(&cwq->lock, flags); | 340 | spin_lock_irqsave(&cwq->lock, flags); |
341 | cwq->current_work = NULL; | ||
328 | } | 342 | } |
329 | cwq->run_depth--; | 343 | cwq->run_depth--; |
330 | spin_unlock_irqrestore(&cwq->lock, flags); | 344 | spin_unlock_irqrestore(&cwq->lock, flags); |
@@ -449,6 +463,75 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
449 | } | 463 | } |
450 | EXPORT_SYMBOL_GPL(flush_workqueue); | 464 | EXPORT_SYMBOL_GPL(flush_workqueue); |
451 | 465 | ||
466 | static void wait_on_work(struct cpu_workqueue_struct *cwq, | ||
467 | struct work_struct *work) | ||
468 | { | ||
469 | struct wq_barrier barr; | ||
470 | int running = 0; | ||
471 | |||
472 | spin_lock_irq(&cwq->lock); | ||
473 | if (unlikely(cwq->current_work == work)) { | ||
474 | init_wq_barrier(&barr); | ||
475 | insert_work(cwq, &barr.work, 0); | ||
476 | running = 1; | ||
477 | } | ||
478 | spin_unlock_irq(&cwq->lock); | ||
479 | |||
480 | if (unlikely(running)) { | ||
481 | mutex_unlock(&workqueue_mutex); | ||
482 | wait_for_completion(&barr.done); | ||
483 | mutex_lock(&workqueue_mutex); | ||
484 | } | ||
485 | } | ||
486 | |||
487 | /** | ||
488 | * flush_work - block until a work_struct's callback has terminated | ||
489 | * @wq: the workqueue on which the work is queued | ||
490 | * @work: the work which is to be flushed | ||
491 | * | ||
492 | * flush_work() will attempt to cancel the work if it is queued. If the work's | ||
493 | * callback appears to be running, flush_work() will block until it has | ||
494 | * completed. | ||
495 | * | ||
496 | * flush_work() is designed to be used when the caller is tearing down data | ||
497 | * structures which the callback function operates upon. It is expected that, | ||
498 | * prior to calling flush_work(), the caller has arranged for the work to not | ||
499 | * be requeued. | ||
500 | */ | ||
501 | void flush_work(struct workqueue_struct *wq, struct work_struct *work) | ||
502 | { | ||
503 | struct cpu_workqueue_struct *cwq; | ||
504 | |||
505 | mutex_lock(&workqueue_mutex); | ||
506 | cwq = get_wq_data(work); | ||
507 | /* Was it ever queued ? */ | ||
508 | if (!cwq) | ||
509 | goto out; | ||
510 | |||
511 | /* | ||
512 | * This work can't be re-queued, and the lock above protects us | ||
513 | * from take_over_work(), no need to re-check that get_wq_data() | ||
514 | * is still the same when we take cwq->lock. | ||
515 | */ | ||
516 | spin_lock_irq(&cwq->lock); | ||
517 | list_del_init(&work->entry); | ||
518 | work_release(work); | ||
519 | spin_unlock_irq(&cwq->lock); | ||
520 | |||
521 | if (is_single_threaded(wq)) { | ||
522 | /* Always use first cpu's area. */ | ||
523 | wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); | ||
524 | } else { | ||
525 | int cpu; | ||
526 | |||
527 | for_each_online_cpu(cpu) | ||
528 | wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | ||
529 | } | ||
530 | out: | ||
531 | mutex_unlock(&workqueue_mutex); | ||
532 | } | ||
533 | EXPORT_SYMBOL_GPL(flush_work); | ||
534 | |||
452 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 535 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, |
453 | int cpu, int freezeable) | 536 | int cpu, int freezeable) |
454 | { | 537 | { |
@@ -650,6 +733,12 @@ void flush_scheduled_work(void) | |||
650 | } | 733 | } |
651 | EXPORT_SYMBOL(flush_scheduled_work); | 734 | EXPORT_SYMBOL(flush_scheduled_work); |
652 | 735 | ||
736 | void flush_work_keventd(struct work_struct *work) | ||
737 | { | ||
738 | flush_work(keventd_wq, work); | ||
739 | } | ||
740 | EXPORT_SYMBOL(flush_work_keventd); | ||
741 | |||
653 | /** | 742 | /** |
654 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. | 743 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. |
655 | * @wq: the controlling workqueue structure | 744 | * @wq: the controlling workqueue structure |