aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-16 04:48:29 -0400
committerTejun Heo <tj@kernel.org>2010-09-19 11:51:05 -0400
commit09383498c5d35262e643bfdbae84826177a3c624 (patch)
treeec75ee767bff28cabbd1d1b82cfc3457147dda33 /kernel
parentbaf59022c37d43f202e62d5130e4bac5e825b426 (diff)
workqueue: implement flush[_delayed]_work_sync()
Implement flush[_delayed]_work_sync(). These are flush functions which also make sure no CPU is still executing the target work from earlier queueing instances. These are similar to cancel[_delayed]_work_sync() except that the target work item is flushed instead of cancelled. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33d31d768706..19e4bc15ee99 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2435,6 +2435,41 @@ static bool wait_on_work(struct work_struct *work)
2435 return ret; 2435 return ret;
2436} 2436}
2437 2437
2438/**
2439 * flush_work_sync - wait until a work has finished execution
2440 * @work: the work to flush
2441 *
2442 * Wait until @work has finished execution. On return, it's
2443 * guaranteed that all queueing instances of @work which happened
2444 * before this function is called are finished. In other words, if
2445 * @work hasn't been requeued since this function was called, @work is
2446 * guaranteed to be idle on return.
2447 *
2448 * RETURNS:
2449 * %true if flush_work_sync() waited for the work to finish execution,
2450 * %false if it was already idle.
2451 */
2452bool flush_work_sync(struct work_struct *work)
2453{
2454 struct wq_barrier barr;
2455 bool pending, waited;
2456
2457 /* we'll wait for executions separately, queue barr only if pending */
2458 pending = start_flush_work(work, &barr, false);
2459
2460 /* wait for executions to finish */
2461 waited = wait_on_work(work);
2462
2463 /* wait for the pending one */
2464 if (pending) {
2465 wait_for_completion(&barr.done);
2466 destroy_work_on_stack(&barr.work);
2467 }
2468
2469 return pending || waited;
2470}
2471EXPORT_SYMBOL_GPL(flush_work_sync);
2472
2438/* 2473/*
2439 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2474 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2440 * so this work can't be re-armed in any way. 2475 * so this work can't be re-armed in any way.
@@ -2539,6 +2574,27 @@ bool flush_delayed_work(struct delayed_work *dwork)
2539EXPORT_SYMBOL(flush_delayed_work); 2574EXPORT_SYMBOL(flush_delayed_work);
2540 2575
2541/** 2576/**
2577 * flush_delayed_work_sync - wait for a dwork to finish
2578 * @dwork: the delayed work to flush
2579 *
2580 * Delayed timer is cancelled and the pending work is queued for
2581 * execution immediately. Other than timer handling, its behavior
2582 * is identical to flush_work_sync().
2583 *
2584 * RETURNS:
2585 * %true if flush_work_sync() waited for the work to finish execution,
2586 * %false if it was already idle.
2587 */
2588bool flush_delayed_work_sync(struct delayed_work *dwork)
2589{
2590 if (del_timer_sync(&dwork->timer))
2591 __queue_work(raw_smp_processor_id(),
2592 get_work_cwq(&dwork->work)->wq, &dwork->work);
2593 return flush_work_sync(&dwork->work);
2594}
2595EXPORT_SYMBOL(flush_delayed_work_sync);
2596
2597/**
2542 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2598 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2543 * @dwork: the delayed work cancel 2599 * @dwork: the delayed work cancel
2544 * 2600 *