diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-07-25 04:47:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:40 -0400 |
commit | db700897224b5ebdf852f2d38920ce428940d059 (patch) | |
tree | 98f0052929e79b35393352addde70fa5e97b8dee /kernel/workqueue.c | |
parent | 1a4d9b0aa0d3c50314e57525a5e5ec2cfc48b4c8 (diff) |
workqueues: implement flush_work()
Most of users of flush_workqueue() can be changed to use cancel_work_sync(),
but sometimes we really need to wait for the completion and cancelling is not
an option. schedule_on_each_cpu() is good example.
Add the new helper, flush_work(work), which waits for the completion of the
specific work_struct. More precisely, it "flushes" the result of of the last
queue_work() which is visible to the caller.
For example, this code
queue_work(wq, work);
/* WINDOW */
queue_work(wq, work);
flush_work(work);
doesn't necessary work "as expected". What can happen in the WINDOW above is
- wq starts the execution of work->func()
- the caller migrates to another CPU
now, after the 2nd queue_work() this work is active on the previous CPU, and
at the same time it is queued on another. In this case flush_work(work) may
return before the first work->func() completes.
It is trivial to add another helper
int flush_work_sync(struct work_struct *work)
{
return flush_work(work) || wait_on_work(work);
}
which works "more correctly", but it has to iterate over all CPUs and thus
it much slower than flush_work().
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Max Krasnyansky <maxk@qualcomm.com>
Acked-by: Jarek Poplawski <jarkao2@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d9a2d65cc63e..ee41cf857d55 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -423,6 +423,52 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
423 | } | 423 | } |
424 | EXPORT_SYMBOL_GPL(flush_workqueue); | 424 | EXPORT_SYMBOL_GPL(flush_workqueue); |
425 | 425 | ||
426 | /** | ||
427 | * flush_work - block until a work_struct's callback has terminated | ||
428 | * @work: the work which is to be flushed | ||
429 | * | ||
430 | * It is expected that, prior to calling flush_work(), the caller has | ||
431 | * arranged for the work to not be requeued, otherwise it doesn't make | ||
432 | * sense to use this function. | ||
433 | */ | ||
434 | int flush_work(struct work_struct *work) | ||
435 | { | ||
436 | struct cpu_workqueue_struct *cwq; | ||
437 | struct list_head *prev; | ||
438 | struct wq_barrier barr; | ||
439 | |||
440 | might_sleep(); | ||
441 | cwq = get_wq_data(work); | ||
442 | if (!cwq) | ||
443 | return 0; | ||
444 | |||
445 | prev = NULL; | ||
446 | spin_lock_irq(&cwq->lock); | ||
447 | if (!list_empty(&work->entry)) { | ||
448 | /* | ||
449 | * See the comment near try_to_grab_pending()->smp_rmb(). | ||
450 | * If it was re-queued under us we are not going to wait. | ||
451 | */ | ||
452 | smp_rmb(); | ||
453 | if (unlikely(cwq != get_wq_data(work))) | ||
454 | goto out; | ||
455 | prev = &work->entry; | ||
456 | } else { | ||
457 | if (cwq->current_work != work) | ||
458 | goto out; | ||
459 | prev = &cwq->worklist; | ||
460 | } | ||
461 | insert_wq_barrier(cwq, &barr, prev->next); | ||
462 | out: | ||
463 | spin_unlock_irq(&cwq->lock); | ||
464 | if (!prev) | ||
465 | return 0; | ||
466 | |||
467 | wait_for_completion(&barr.done); | ||
468 | return 1; | ||
469 | } | ||
470 | EXPORT_SYMBOL_GPL(flush_work); | ||
471 | |||
426 | /* | 472 | /* |
427 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 473 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
428 | * so this work can't be re-armed in any way. | 474 | * so this work can't be re-armed in any way. |