diff options
-rw-r--r-- | include/linux/workqueue.h | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 56 |
2 files changed, 58 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bb9b683ea6fa..e33ff4a91703 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -355,9 +355,11 @@ extern int keventd_up(void); | |||
355 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 355 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
356 | 356 | ||
357 | extern bool flush_work(struct work_struct *work); | 357 | extern bool flush_work(struct work_struct *work); |
358 | extern bool flush_work_sync(struct work_struct *work); | ||
358 | extern bool cancel_work_sync(struct work_struct *work); | 359 | extern bool cancel_work_sync(struct work_struct *work); |
359 | 360 | ||
360 | extern bool flush_delayed_work(struct delayed_work *dwork); | 361 | extern bool flush_delayed_work(struct delayed_work *dwork); |
362 | extern bool flush_delayed_work_sync(struct delayed_work *work); | ||
361 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | 363 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
362 | 364 | ||
363 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 365 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 33d31d768706..19e4bc15ee99 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2435,6 +2435,41 @@ static bool wait_on_work(struct work_struct *work) | |||
2435 | return ret; | 2435 | return ret; |
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | /** | ||
2439 | * flush_work_sync - wait until a work has finished execution | ||
2440 | * @work: the work to flush | ||
2441 | * | ||
2442 | * Wait until @work has finished execution. On return, it's | ||
2443 | * guaranteed that all queueing instances of @work which happened | ||
2444 | * before this function is called are finished. In other words, if | ||
2445 | * @work hasn't been requeued since this function was called, @work is | ||
2446 | * guaranteed to be idle on return. | ||
2447 | * | ||
2448 | * RETURNS: | ||
2449 | * %true if flush_work_sync() waited for the work to finish execution, | ||
2450 | * %false if it was already idle. | ||
2451 | */ | ||
2452 | bool flush_work_sync(struct work_struct *work) | ||
2453 | { | ||
2454 | struct wq_barrier barr; | ||
2455 | bool pending, waited; | ||
2456 | |||
2457 | /* we'll wait for executions separately, queue barr only if pending */ | ||
2458 | pending = start_flush_work(work, &barr, false); | ||
2459 | |||
2460 | /* wait for executions to finish */ | ||
2461 | waited = wait_on_work(work); | ||
2462 | |||
2463 | /* wait for the pending one */ | ||
2464 | if (pending) { | ||
2465 | wait_for_completion(&barr.done); | ||
2466 | destroy_work_on_stack(&barr.work); | ||
2467 | } | ||
2468 | |||
2469 | return pending || waited; | ||
2470 | } | ||
2471 | EXPORT_SYMBOL_GPL(flush_work_sync); | ||
2472 | |||
2438 | /* | 2473 | /* |
2439 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 2474 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
2440 | * so this work can't be re-armed in any way. | 2475 | * so this work can't be re-armed in any way. |
@@ -2539,6 +2574,27 @@ bool flush_delayed_work(struct delayed_work *dwork) | |||
2539 | EXPORT_SYMBOL(flush_delayed_work); | 2574 | EXPORT_SYMBOL(flush_delayed_work); |
2540 | 2575 | ||
2541 | /** | 2576 | /** |
2577 | * flush_delayed_work_sync - wait for a dwork to finish | ||
2578 | * @dwork: the delayed work to flush | ||
2579 | * | ||
2580 | * Delayed timer is cancelled and the pending work is queued for | ||
2581 | * execution immediately. Other than timer handling, its behavior | ||
2582 | * is identical to flush_work_sync(). | ||
2583 | * | ||
2584 | * RETURNS: | ||
2585 | * %true if flush_work_sync() waited for the work to finish execution, | ||
2586 | * %false if it was already idle. | ||
2587 | */ | ||
2588 | bool flush_delayed_work_sync(struct delayed_work *dwork) | ||
2589 | { | ||
2590 | if (del_timer_sync(&dwork->timer)) | ||
2591 | __queue_work(raw_smp_processor_id(), | ||
2592 | get_work_cwq(&dwork->work)->wq, &dwork->work); | ||
2593 | return flush_work_sync(&dwork->work); | ||
2594 | } | ||
2595 | EXPORT_SYMBOL(flush_delayed_work_sync); | ||
2596 | |||
2597 | /** | ||
2542 | * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish | 2598 | * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish |
2543 | * @dwork: the delayed work cancel | 2599 | * @dwork: the delayed work cancel |
2544 | * | 2600 | * |