diff options
| author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 05:34:22 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:53 -0400 |
| commit | 28e53bddf814485699a4142bc056fd37d4e11dd4 (patch) | |
| tree | 5182090c4cc2186eedbda3cb90ed82a2836f6ff6 /kernel/workqueue.c | |
| parent | 5830c5902138f80b0a097b797200c739466beedd (diff) | |
unify flush_work/flush_work_keventd and rename it to cancel_work_sync
flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq
(this was possible from the very beginnig, I missed this). So we can unify
flush_work_keventd and flush_work.
Also, rename flush_work() to cancel_work_sync() and fix all callers.
Perhaps this is not the best name, but "flush_work" is really bad.
(akpm: this is why the earlier patches bypassed maintainers)
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Tejun Heo <htejun@gmail.com>
Cc: Auke Kok <auke-jan.h.kok@intel.com>,
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 36 |
1 files changed, 17 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 63885abf1ba0..c9ab4293904f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, | |||
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | /** | 415 | /** |
| 416 | * flush_work - block until a work_struct's callback has terminated | 416 | * cancel_work_sync - block until a work_struct's callback has terminated |
| 417 | * @wq: the workqueue on which the work is queued | ||
| 418 | * @work: the work which is to be flushed | 417 | * @work: the work which is to be flushed |
| 419 | * | 418 | * |
| 420 | * flush_work() will attempt to cancel the work if it is queued. If the work's | 419 | * cancel_work_sync() will attempt to cancel the work if it is queued. If the |
| 421 | * callback appears to be running, flush_work() will block until it has | 420 | * work's callback appears to be running, cancel_work_sync() will block until |
| 422 | * completed. | 421 | * it has completed. |
| 423 | * | 422 | * |
| 424 | * flush_work() is designed to be used when the caller is tearing down data | 423 | * cancel_work_sync() is designed to be used when the caller is tearing down |
| 425 | * structures which the callback function operates upon. It is expected that, | 424 | * data structures which the callback function operates upon. It is expected |
| 426 | * prior to calling flush_work(), the caller has arranged for the work to not | 425 | * that, prior to calling cancel_work_sync(), the caller has arranged for the |
| 427 | * be requeued. | 426 | * work to not be requeued. |
| 428 | */ | 427 | */ |
| 429 | void flush_work(struct workqueue_struct *wq, struct work_struct *work) | 428 | void cancel_work_sync(struct work_struct *work) |
| 430 | { | 429 | { |
| 431 | const cpumask_t *cpu_map = wq_cpu_map(wq); | ||
| 432 | struct cpu_workqueue_struct *cwq; | 430 | struct cpu_workqueue_struct *cwq; |
| 431 | struct workqueue_struct *wq; | ||
| 432 | const cpumask_t *cpu_map; | ||
| 433 | int cpu; | 433 | int cpu; |
| 434 | 434 | ||
| 435 | might_sleep(); | 435 | might_sleep(); |
| @@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) | |||
| 448 | work_clear_pending(work); | 448 | work_clear_pending(work); |
| 449 | spin_unlock_irq(&cwq->lock); | 449 | spin_unlock_irq(&cwq->lock); |
| 450 | 450 | ||
| 451 | wq = cwq->wq; | ||
| 452 | cpu_map = wq_cpu_map(wq); | ||
| 453 | |||
| 451 | for_each_cpu_mask(cpu, *cpu_map) | 454 | for_each_cpu_mask(cpu, *cpu_map) |
| 452 | wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 455 | wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 453 | } | 456 | } |
| 454 | EXPORT_SYMBOL_GPL(flush_work); | 457 | EXPORT_SYMBOL_GPL(cancel_work_sync); |
| 455 | 458 | ||
| 456 | 459 | ||
| 457 | static struct workqueue_struct *keventd_wq; | 460 | static struct workqueue_struct *keventd_wq; |
| @@ -540,18 +543,13 @@ void flush_scheduled_work(void) | |||
| 540 | } | 543 | } |
| 541 | EXPORT_SYMBOL(flush_scheduled_work); | 544 | EXPORT_SYMBOL(flush_scheduled_work); |
| 542 | 545 | ||
| 543 | void flush_work_keventd(struct work_struct *work) | ||
| 544 | { | ||
| 545 | flush_work(keventd_wq, work); | ||
| 546 | } | ||
| 547 | EXPORT_SYMBOL(flush_work_keventd); | ||
| 548 | |||
| 549 | /** | 546 | /** |
| 550 | * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. | 547 | * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. |
| 551 | * @dwork: the delayed work struct | 548 | * @dwork: the delayed work struct |
| 552 | * | 549 | * |
| 553 | * Note that the work callback function may still be running on return from | 550 | * Note that the work callback function may still be running on return from |
| 554 | * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. | 551 | * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait |
| 552 | * on it. | ||
| 555 | */ | 553 | */ |
| 556 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 554 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |
| 557 | { | 555 | { |
