aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-16 04:36:00 -0400
committerTejun Heo <tj@kernel.org>2010-09-19 11:51:05 -0400
commit401a8d048eadfbe1b1c1bf53d3b614fcc894c61a (patch)
tree7e1761149643e395a33619deb67ec99d8949a7a4 /kernel/workqueue.c
parent81dcaf6516d8bbd75b894862c8ae7bba04380cfe (diff)
workqueue: cleanup flush/cancel functions
Make the following cleanup changes. * Relocate flush/cancel function prototypes and definitions. * Relocate wait_on_cpu_work() and wait_on_work() before try_to_grab_pending(). These will be used to implement flush_work_sync(). * Make all flush/cancel functions return bool instead of int. * Update wait_on_cpu_work() and wait_on_work() to return %true if they actually waited. * Add / update comments. This patch doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c175
1 files changed, 94 insertions, 81 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f77afd939229..1240b9d94b03 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2327,16 +2327,24 @@ out_unlock:
2327EXPORT_SYMBOL_GPL(flush_workqueue); 2327EXPORT_SYMBOL_GPL(flush_workqueue);
2328 2328
2329/** 2329/**
2330 * flush_work - block until a work_struct's callback has terminated 2330 * flush_work - wait for a work to finish executing the last queueing instance
2331 * @work: the work which is to be flushed 2331 * @work: the work to flush
2332 * 2332 *
2333 * Returns false if @work has already terminated. 2333 * Wait until @work has finished execution. This function considers
2334 * only the last queueing instance of @work. If @work has been
2335 * enqueued across different CPUs on a non-reentrant workqueue or on
2336 * multiple workqueues, @work might still be executing on return on
2337 * some of the CPUs from earlier queueing.
2334 * 2338 *
2335 * It is expected that, prior to calling flush_work(), the caller has 2339 * If @work was queued only on a non-reentrant, ordered or unbound
2336 * arranged for the work to not be requeued, otherwise it doesn't make 2340 * workqueue, @work is guaranteed to be idle on return if it hasn't
2337 * sense to use this function. 2341 * been requeued since flush started.
2342 *
2343 * RETURNS:
2344 * %true if flush_work() waited for the work to finish execution,
2345 * %false if it was already idle.
2338 */ 2346 */
2339int flush_work(struct work_struct *work) 2347bool flush_work(struct work_struct *work)
2340{ 2348{
2341 struct worker *worker = NULL; 2349 struct worker *worker = NULL;
2342 struct global_cwq *gcwq; 2350 struct global_cwq *gcwq;
@@ -2374,13 +2382,49 @@ int flush_work(struct work_struct *work)
2374 2382
2375 wait_for_completion(&barr.done); 2383 wait_for_completion(&barr.done);
2376 destroy_work_on_stack(&barr.work); 2384 destroy_work_on_stack(&barr.work);
2377 return 1; 2385 return true;
2378already_gone: 2386already_gone:
2379 spin_unlock_irq(&gcwq->lock); 2387 spin_unlock_irq(&gcwq->lock);
2380 return 0; 2388 return false;
2381} 2389}
2382EXPORT_SYMBOL_GPL(flush_work); 2390EXPORT_SYMBOL_GPL(flush_work);
2383 2391
2392static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2393{
2394 struct wq_barrier barr;
2395 struct worker *worker;
2396
2397 spin_lock_irq(&gcwq->lock);
2398
2399 worker = find_worker_executing_work(gcwq, work);
2400 if (unlikely(worker))
2401 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2402
2403 spin_unlock_irq(&gcwq->lock);
2404
2405 if (unlikely(worker)) {
2406 wait_for_completion(&barr.done);
2407 destroy_work_on_stack(&barr.work);
2408 return true;
2409 } else
2410 return false;
2411}
2412
2413static bool wait_on_work(struct work_struct *work)
2414{
2415 bool ret = false;
2416 int cpu;
2417
2418 might_sleep();
2419
2420 lock_map_acquire(&work->lockdep_map);
2421 lock_map_release(&work->lockdep_map);
2422
2423 for_each_gcwq_cpu(cpu)
2424 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2425 return ret;
2426}
2427
2384/* 2428/*
2385 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2429 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2386 * so this work can't be re-armed in any way. 2430 * so this work can't be re-armed in any way.
@@ -2423,39 +2467,7 @@ static int try_to_grab_pending(struct work_struct *work)
2423 return ret; 2467 return ret;
2424} 2468}
2425 2469
2426static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2470static bool __cancel_work_timer(struct work_struct *work,
2427{
2428 struct wq_barrier barr;
2429 struct worker *worker;
2430
2431 spin_lock_irq(&gcwq->lock);
2432
2433 worker = find_worker_executing_work(gcwq, work);
2434 if (unlikely(worker))
2435 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2436
2437 spin_unlock_irq(&gcwq->lock);
2438
2439 if (unlikely(worker)) {
2440 wait_for_completion(&barr.done);
2441 destroy_work_on_stack(&barr.work);
2442 }
2443}
2444
2445static void wait_on_work(struct work_struct *work)
2446{
2447 int cpu;
2448
2449 might_sleep();
2450
2451 lock_map_acquire(&work->lockdep_map);
2452 lock_map_release(&work->lockdep_map);
2453
2454 for_each_gcwq_cpu(cpu)
2455 wait_on_cpu_work(get_gcwq(cpu), work);
2456}
2457
2458static int __cancel_work_timer(struct work_struct *work,
2459 struct timer_list* timer) 2471 struct timer_list* timer)
2460{ 2472{
2461 int ret; 2473 int ret;
@@ -2472,42 +2484,60 @@ static int __cancel_work_timer(struct work_struct *work,
2472} 2484}
2473 2485
2474/** 2486/**
2475 * cancel_work_sync - block until a work_struct's callback has terminated 2487 * cancel_work_sync - cancel a work and wait for it to finish
2476 * @work: the work which is to be flushed 2488 * @work: the work to cancel
2477 *
2478 * Returns true if @work was pending.
2479 * 2489 *
2480 * cancel_work_sync() will cancel the work if it is queued. If the work's 2490 * Cancel @work and wait for its execution to finish. This function
2481 * callback appears to be running, cancel_work_sync() will block until it 2491 * can be used even if the work re-queues itself or migrates to
2482 * has completed. 2492 * another workqueue. On return from this function, @work is
2493 * guaranteed to be not pending or executing on any CPU.
2483 * 2494 *
2484 * It is possible to use this function if the work re-queues itself. It can 2495 * cancel_work_sync(&delayed_work->work) must not be used for
2485 * cancel the work even if it migrates to another workqueue, however in that 2496 * delayed_work's. Use cancel_delayed_work_sync() instead.
2486 * case it only guarantees that work->func() has completed on the last queued
2487 * workqueue.
2488 *
2489 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2490 * pending, otherwise it goes into a busy-wait loop until the timer expires.
2491 * 2497 *
2492 * The caller must ensure that workqueue_struct on which this work was last 2498 * The caller must ensure that the workqueue on which @work was last
2493 * queued can't be destroyed before this function returns. 2499 * queued can't be destroyed before this function returns.
2500 *
2501 * RETURNS:
2502 * %true if @work was pending, %false otherwise.
2494 */ 2503 */
2495int cancel_work_sync(struct work_struct *work) 2504bool cancel_work_sync(struct work_struct *work)
2496{ 2505{
2497 return __cancel_work_timer(work, NULL); 2506 return __cancel_work_timer(work, NULL);
2498} 2507}
2499EXPORT_SYMBOL_GPL(cancel_work_sync); 2508EXPORT_SYMBOL_GPL(cancel_work_sync);
2500 2509
2501/** 2510/**
2502 * cancel_delayed_work_sync - reliably kill off a delayed work. 2511 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2503 * @dwork: the delayed work struct 2512 * @dwork: the delayed work to flush
2513 *
2514 * Delayed timer is cancelled and the pending work is queued for
2515 * immediate execution. Like flush_work(), this function only
2516 * considers the last queueing instance of @dwork.
2517 *
2518 * RETURNS:
2519 * %true if flush_work() waited for the work to finish execution,
2520 * %false if it was already idle.
2521 */
2522bool flush_delayed_work(struct delayed_work *dwork)
2523{
2524 if (del_timer_sync(&dwork->timer))
2525 __queue_work(raw_smp_processor_id(),
2526 get_work_cwq(&dwork->work)->wq, &dwork->work);
2527 return flush_work(&dwork->work);
2528}
2529EXPORT_SYMBOL(flush_delayed_work);
2530
2531/**
2532 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2533 * @dwork: the delayed work cancel
2504 * 2534 *
2505 * Returns true if @dwork was pending. 2535 * This is cancel_work_sync() for delayed works.
2506 * 2536 *
2507 * It is possible to use this function if @dwork rearms itself via queue_work() 2537 * RETURNS:
2508 * or queue_delayed_work(). See also the comment for cancel_work_sync(). 2538 * %true if @dwork was pending, %false otherwise.
2509 */ 2539 */
2510int cancel_delayed_work_sync(struct delayed_work *dwork) 2540bool cancel_delayed_work_sync(struct delayed_work *dwork)
2511{ 2541{
2512 return __cancel_work_timer(&dwork->work, &dwork->timer); 2542 return __cancel_work_timer(&dwork->work, &dwork->timer);
2513} 2543}
@@ -2559,23 +2589,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
2559EXPORT_SYMBOL(schedule_delayed_work); 2589EXPORT_SYMBOL(schedule_delayed_work);
2560 2590
2561/** 2591/**
2562 * flush_delayed_work - block until a dwork_struct's callback has terminated
2563 * @dwork: the delayed work which is to be flushed
2564 *
2565 * Any timeout is cancelled, and any pending work is run immediately.
2566 */
2567void flush_delayed_work(struct delayed_work *dwork)
2568{
2569 if (del_timer_sync(&dwork->timer)) {
2570 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2571 &dwork->work);
2572 put_cpu();
2573 }
2574 flush_work(&dwork->work);
2575}
2576EXPORT_SYMBOL(flush_delayed_work);
2577
2578/**
2579 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 2592 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2580 * @cpu: cpu to use 2593 * @cpu: cpu to use
2581 * @dwork: job to be done 2594 * @dwork: job to be done