diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-07-16 02:41:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:51 -0400 |
commit | 1f1f642e2f092e37eb9038060eb0100c44f55a11 (patch) | |
tree | 73dea7896dea85dcf5cfa13b9e3ebf9645868160 /kernel | |
parent | f5a421a4509a7e2dff11da0f01b0548f4f84d503 (diff) |
make cancel_xxx_work_sync() return a boolean
Change cancel_work_sync() and cancel_delayed_work_sync() to return a boolean
indicating whether the work was actually cancelled. A zero return value means
that the work was not pending/queued.
Without that kind of change it is not possible to avoid flush_workqueue()
sometimes, see the next patch as an example.
Also, this patch unifies both functions and kills the (unlikely) busy-wait
loop.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Jarek Poplawski <jarkao2@o2.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 41 |
1 files changed, 27 insertions, 14 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ad9656886daa..d7d3fa3072e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -382,16 +382,16 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 382 | EXPORT_SYMBOL_GPL(flush_workqueue); |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit, | 385 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
386 | * so this work can't be re-armed in any way. | 386 | * so this work can't be re-armed in any way. |
387 | */ | 387 | */ |
388 | static int try_to_grab_pending(struct work_struct *work) | 388 | static int try_to_grab_pending(struct work_struct *work) |
389 | { | 389 | { |
390 | struct cpu_workqueue_struct *cwq; | 390 | struct cpu_workqueue_struct *cwq; |
391 | int ret = 0; | 391 | int ret = -1; |
392 | 392 | ||
393 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 393 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) |
394 | return 1; | 394 | return 0; |
395 | 395 | ||
396 | /* | 396 | /* |
397 | * The queueing is in progress, or it is already queued. Try to | 397 | * The queueing is in progress, or it is already queued. Try to |
@@ -457,10 +457,28 @@ static void wait_on_work(struct work_struct *work) | |||
457 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 457 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
458 | } | 458 | } |
459 | 459 | ||
460 | static int __cancel_work_timer(struct work_struct *work, | ||
461 | struct timer_list* timer) | ||
462 | { | ||
463 | int ret; | ||
464 | |||
465 | do { | ||
466 | ret = (timer && likely(del_timer(timer))); | ||
467 | if (!ret) | ||
468 | ret = try_to_grab_pending(work); | ||
469 | wait_on_work(work); | ||
470 | } while (unlikely(ret < 0)); | ||
471 | |||
472 | work_clear_pending(work); | ||
473 | return ret; | ||
474 | } | ||
475 | |||
460 | /** | 476 | /** |
461 | * cancel_work_sync - block until a work_struct's callback has terminated | 477 | * cancel_work_sync - block until a work_struct's callback has terminated |
462 | * @work: the work which is to be flushed | 478 | * @work: the work which is to be flushed |
463 | * | 479 | * |
480 | * Returns true if @work was pending. | ||
481 | * | ||
464 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 482 | * cancel_work_sync() will cancel the work if it is queued. If the work's |
465 | * callback appears to be running, cancel_work_sync() will block until it | 483 | * callback appears to be running, cancel_work_sync() will block until it |
466 | * has completed. | 484 | * has completed. |
@@ -476,12 +494,9 @@ static void wait_on_work(struct work_struct *work) | |||
476 | * The caller must ensure that workqueue_struct on which this work was last | 494 | * The caller must ensure that workqueue_struct on which this work was last |
477 | * queued can't be destroyed before this function returns. | 495 | * queued can't be destroyed before this function returns. |
478 | */ | 496 | */ |
479 | void cancel_work_sync(struct work_struct *work) | 497 | int cancel_work_sync(struct work_struct *work) |
480 | { | 498 | { |
481 | while (!try_to_grab_pending(work)) | 499 | return __cancel_work_timer(work, NULL); |
482 | cpu_relax(); | ||
483 | wait_on_work(work); | ||
484 | work_clear_pending(work); | ||
485 | } | 500 | } |
486 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 501 | EXPORT_SYMBOL_GPL(cancel_work_sync); |
487 | 502 | ||
@@ -489,16 +504,14 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | |||
489 | * cancel_delayed_work_sync - reliably kill off a delayed work. | 504 | * cancel_delayed_work_sync - reliably kill off a delayed work. |
490 | * @dwork: the delayed work struct | 505 | * @dwork: the delayed work struct |
491 | * | 506 | * |
507 | * Returns true if @dwork was pending. | ||
508 | * | ||
492 | * It is possible to use this function if @dwork rearms itself via queue_work() | 509 | * It is possible to use this function if @dwork rearms itself via queue_work() |
493 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 510 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). |
494 | */ | 511 | */ |
495 | void cancel_delayed_work_sync(struct delayed_work *dwork) | 512 | int cancel_delayed_work_sync(struct delayed_work *dwork) |
496 | { | 513 | { |
497 | while (!del_timer(&dwork->timer) && | 514 | return __cancel_work_timer(&dwork->work, &dwork->timer); |
498 | !try_to_grab_pending(&dwork->work)) | ||
499 | cpu_relax(); | ||
500 | wait_on_work(&dwork->work); | ||
501 | work_clear_pending(&dwork->work); | ||
502 | } | 515 | } |
503 | EXPORT_SYMBOL(cancel_delayed_work_sync); | 516 | EXPORT_SYMBOL(cancel_delayed_work_sync); |
504 | 517 | ||