aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-16 04:42:16 -0400
committerTejun Heo <tj@kernel.org>2010-09-19 11:51:05 -0400
commitbaf59022c37d43f202e62d5130e4bac5e825b426 (patch)
tree43eea7aac112b2ee07b195e00bce4b14465d1183 /kernel/workqueue.c
parent401a8d048eadfbe1b1c1bf53d3b614fcc894c61a (diff)
workqueue: factor out start_flush_work()
Factor out start_flush_work() from flush_work(). start_flush_work() has @wait_executing argument which controls whether the barrier is queued only if the work is pending or also if executing. As flush_work() needs to wait for execution too, it uses %true. This commit doesn't cause any behavior difference. start_flush_work() will be used to implement flush_work_sync(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c64
1 files changed, 37 insertions, 27 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1240b9d94b03..33d31d768706 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2326,35 +2326,17 @@ out_unlock:
2326} 2326}
2327EXPORT_SYMBOL_GPL(flush_workqueue); 2327EXPORT_SYMBOL_GPL(flush_workqueue);
2328 2328
2329/** 2329static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2330 * flush_work - wait for a work to finish executing the last queueing instance 2330 bool wait_executing)
2331 * @work: the work to flush
2332 *
2333 * Wait until @work has finished execution. This function considers
2334 * only the last queueing instance of @work. If @work has been
2335 * enqueued across different CPUs on a non-reentrant workqueue or on
2336 * multiple workqueues, @work might still be executing on return on
2337 * some of the CPUs from earlier queueing.
2338 *
2339 * If @work was queued only on a non-reentrant, ordered or unbound
2340 * workqueue, @work is guaranteed to be idle on return if it hasn't
2341 * been requeued since flush started.
2342 *
2343 * RETURNS:
2344 * %true if flush_work() waited for the work to finish execution,
2345 * %false if it was already idle.
2346 */
2347bool flush_work(struct work_struct *work)
2348{ 2331{
2349 struct worker *worker = NULL; 2332 struct worker *worker = NULL;
2350 struct global_cwq *gcwq; 2333 struct global_cwq *gcwq;
2351 struct cpu_workqueue_struct *cwq; 2334 struct cpu_workqueue_struct *cwq;
2352 struct wq_barrier barr;
2353 2335
2354 might_sleep(); 2336 might_sleep();
2355 gcwq = get_work_gcwq(work); 2337 gcwq = get_work_gcwq(work);
2356 if (!gcwq) 2338 if (!gcwq)
2357 return 0; 2339 return false;
2358 2340
2359 spin_lock_irq(&gcwq->lock); 2341 spin_lock_irq(&gcwq->lock);
2360 if (!list_empty(&work->entry)) { 2342 if (!list_empty(&work->entry)) {
@@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work)
2367 cwq = get_work_cwq(work); 2349 cwq = get_work_cwq(work);
2368 if (unlikely(!cwq || gcwq != cwq->gcwq)) 2350 if (unlikely(!cwq || gcwq != cwq->gcwq))
2369 goto already_gone; 2351 goto already_gone;
2370 } else { 2352 } else if (wait_executing) {
2371 worker = find_worker_executing_work(gcwq, work); 2353 worker = find_worker_executing_work(gcwq, work);
2372 if (!worker) 2354 if (!worker)
2373 goto already_gone; 2355 goto already_gone;
2374 cwq = worker->current_cwq; 2356 cwq = worker->current_cwq;
2375 } 2357 } else
2358 goto already_gone;
2376 2359
2377 insert_wq_barrier(cwq, &barr, work, worker); 2360 insert_wq_barrier(cwq, barr, work, worker);
2378 spin_unlock_irq(&gcwq->lock); 2361 spin_unlock_irq(&gcwq->lock);
2379 2362
2380 lock_map_acquire(&cwq->wq->lockdep_map); 2363 lock_map_acquire(&cwq->wq->lockdep_map);
2381 lock_map_release(&cwq->wq->lockdep_map); 2364 lock_map_release(&cwq->wq->lockdep_map);
2382
2383 wait_for_completion(&barr.done);
2384 destroy_work_on_stack(&barr.work);
2385 return true; 2365 return true;
2386already_gone: 2366already_gone:
2387 spin_unlock_irq(&gcwq->lock); 2367 spin_unlock_irq(&gcwq->lock);
2388 return false; 2368 return false;
2389} 2369}
2370
2371/**
2372 * flush_work - wait for a work to finish executing the last queueing instance
2373 * @work: the work to flush
2374 *
2375 * Wait until @work has finished execution. This function considers
2376 * only the last queueing instance of @work. If @work has been
2377 * enqueued across different CPUs on a non-reentrant workqueue or on
2378 * multiple workqueues, @work might still be executing on return on
2379 * some of the CPUs from earlier queueing.
2380 *
2381 * If @work was queued only on a non-reentrant, ordered or unbound
2382 * workqueue, @work is guaranteed to be idle on return if it hasn't
2383 * been requeued since flush started.
2384 *
2385 * RETURNS:
2386 * %true if flush_work() waited for the work to finish execution,
2387 * %false if it was already idle.
2388 */
2389bool flush_work(struct work_struct *work)
2390{
2391 struct wq_barrier barr;
2392
2393 if (start_flush_work(work, &barr, true)) {
2394 wait_for_completion(&barr.done);
2395 destroy_work_on_stack(&barr.work);
2396 return true;
2397 } else
2398 return false;
2399}
2390EXPORT_SYMBOL_GPL(flush_work); 2400EXPORT_SYMBOL_GPL(flush_work);
2391 2401
2392static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2402static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)