diff options
-rw-r--r-- | include/linux/workqueue.h | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 46 |
2 files changed, 48 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 14d47120682b..5c158c477ac7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -201,6 +201,8 @@ extern int keventd_up(void); | |||
201 | extern void init_workqueues(void); | 201 | extern void init_workqueues(void); |
202 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 202 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
203 | 203 | ||
204 | extern int flush_work(struct work_struct *work); | ||
205 | |||
204 | extern int cancel_work_sync(struct work_struct *work); | 206 | extern int cancel_work_sync(struct work_struct *work); |
205 | 207 | ||
206 | /* | 208 | /* |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d9a2d65cc63e..ee41cf857d55 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -423,6 +423,52 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
423 | } | 423 | } |
424 | EXPORT_SYMBOL_GPL(flush_workqueue); | 424 | EXPORT_SYMBOL_GPL(flush_workqueue); |
425 | 425 | ||
426 | /** | ||
427 | * flush_work - block until a work_struct's callback has terminated | ||
428 | * @work: the work which is to be flushed | ||
429 | * | ||
430 | * It is expected that, prior to calling flush_work(), the caller has | ||
431 | * arranged for the work to not be requeued, otherwise it doesn't make | ||
432 | * sense to use this function. | ||
433 | */ | ||
434 | int flush_work(struct work_struct *work) | ||
435 | { | ||
436 | struct cpu_workqueue_struct *cwq; | ||
437 | struct list_head *prev; | ||
438 | struct wq_barrier barr; | ||
439 | |||
440 | might_sleep(); | ||
441 | cwq = get_wq_data(work); | ||
442 | if (!cwq) | ||
443 | return 0; | ||
444 | |||
445 | prev = NULL; | ||
446 | spin_lock_irq(&cwq->lock); | ||
447 | if (!list_empty(&work->entry)) { | ||
448 | /* | ||
449 | * See the comment near try_to_grab_pending()->smp_rmb(). | ||
450 | * If it was re-queued under us we are not going to wait. | ||
451 | */ | ||
452 | smp_rmb(); | ||
453 | if (unlikely(cwq != get_wq_data(work))) | ||
454 | goto out; | ||
455 | prev = &work->entry; | ||
456 | } else { | ||
457 | if (cwq->current_work != work) | ||
458 | goto out; | ||
459 | prev = &cwq->worklist; | ||
460 | } | ||
461 | insert_wq_barrier(cwq, &barr, prev->next); | ||
462 | out: | ||
463 | spin_unlock_irq(&cwq->lock); | ||
464 | if (!prev) | ||
465 | return 0; | ||
466 | |||
467 | wait_for_completion(&barr.done); | ||
468 | return 1; | ||
469 | } | ||
470 | EXPORT_SYMBOL_GPL(flush_work); | ||
471 | |||
426 | /* | 472 | /* |
427 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 473 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
428 | * so this work can't be re-armed in any way. | 474 | * so this work can't be re-armed in any way. |