diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 60 |
1 files changed, 36 insertions, 24 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3bebf73be976..58e5c152a6bb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -282,8 +282,8 @@ static int worker_thread(void *__cwq) | |||
282 | struct cpu_workqueue_struct *cwq = __cwq; | 282 | struct cpu_workqueue_struct *cwq = __cwq; |
283 | DEFINE_WAIT(wait); | 283 | DEFINE_WAIT(wait); |
284 | 284 | ||
285 | if (!cwq->wq->freezeable) | 285 | if (cwq->wq->freezeable) |
286 | current->flags |= PF_NOFREEZE; | 286 | set_freezable(); |
287 | 287 | ||
288 | set_user_nice(current, -5); | 288 | set_user_nice(current, -5); |
289 | 289 | ||
@@ -382,16 +382,16 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 382 | EXPORT_SYMBOL_GPL(flush_workqueue); |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit, | 385 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
386 | * so this work can't be re-armed in any way. | 386 | * so this work can't be re-armed in any way. |
387 | */ | 387 | */ |
388 | static int try_to_grab_pending(struct work_struct *work) | 388 | static int try_to_grab_pending(struct work_struct *work) |
389 | { | 389 | { |
390 | struct cpu_workqueue_struct *cwq; | 390 | struct cpu_workqueue_struct *cwq; |
391 | int ret = 0; | 391 | int ret = -1; |
392 | 392 | ||
393 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) | 393 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) |
394 | return 1; | 394 | return 0; |
395 | 395 | ||
396 | /* | 396 | /* |
397 | * The queueing is in progress, or it is already queued. Try to | 397 | * The queueing is in progress, or it is already queued. Try to |
@@ -457,10 +457,28 @@ static void wait_on_work(struct work_struct *work) | |||
457 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 457 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
458 | } | 458 | } |
459 | 459 | ||
460 | static int __cancel_work_timer(struct work_struct *work, | ||
461 | struct timer_list* timer) | ||
462 | { | ||
463 | int ret; | ||
464 | |||
465 | do { | ||
466 | ret = (timer && likely(del_timer(timer))); | ||
467 | if (!ret) | ||
468 | ret = try_to_grab_pending(work); | ||
469 | wait_on_work(work); | ||
470 | } while (unlikely(ret < 0)); | ||
471 | |||
472 | work_clear_pending(work); | ||
473 | return ret; | ||
474 | } | ||
475 | |||
460 | /** | 476 | /** |
461 | * cancel_work_sync - block until a work_struct's callback has terminated | 477 | * cancel_work_sync - block until a work_struct's callback has terminated |
462 | * @work: the work which is to be flushed | 478 | * @work: the work which is to be flushed |
463 | * | 479 | * |
480 | * Returns true if @work was pending. | ||
481 | * | ||
464 | * cancel_work_sync() will cancel the work if it is queued. If the work's | 482 | * cancel_work_sync() will cancel the work if it is queued. If the work's |
465 | * callback appears to be running, cancel_work_sync() will block until it | 483 | * callback appears to be running, cancel_work_sync() will block until it |
466 | * has completed. | 484 | * has completed. |
@@ -476,31 +494,26 @@ static void wait_on_work(struct work_struct *work) | |||
476 | * The caller must ensure that workqueue_struct on which this work was last | 494 | * The caller must ensure that workqueue_struct on which this work was last |
477 | * queued can't be destroyed before this function returns. | 495 | * queued can't be destroyed before this function returns. |
478 | */ | 496 | */ |
479 | void cancel_work_sync(struct work_struct *work) | 497 | int cancel_work_sync(struct work_struct *work) |
480 | { | 498 | { |
481 | while (!try_to_grab_pending(work)) | 499 | return __cancel_work_timer(work, NULL); |
482 | cpu_relax(); | ||
483 | wait_on_work(work); | ||
484 | work_clear_pending(work); | ||
485 | } | 500 | } |
486 | EXPORT_SYMBOL_GPL(cancel_work_sync); | 501 | EXPORT_SYMBOL_GPL(cancel_work_sync); |
487 | 502 | ||
488 | /** | 503 | /** |
489 | * cancel_rearming_delayed_work - reliably kill off a delayed work. | 504 | * cancel_delayed_work_sync - reliably kill off a delayed work. |
490 | * @dwork: the delayed work struct | 505 | * @dwork: the delayed work struct |
491 | * | 506 | * |
507 | * Returns true if @dwork was pending. | ||
508 | * | ||
492 | * It is possible to use this function if @dwork rearms itself via queue_work() | 509 | * It is possible to use this function if @dwork rearms itself via queue_work() |
493 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). | 510 | * or queue_delayed_work(). See also the comment for cancel_work_sync(). |
494 | */ | 511 | */ |
495 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 512 | int cancel_delayed_work_sync(struct delayed_work *dwork) |
496 | { | 513 | { |
497 | while (!del_timer(&dwork->timer) && | 514 | return __cancel_work_timer(&dwork->work, &dwork->timer); |
498 | !try_to_grab_pending(&dwork->work)) | ||
499 | cpu_relax(); | ||
500 | wait_on_work(&dwork->work); | ||
501 | work_clear_pending(&dwork->work); | ||
502 | } | 515 | } |
503 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 516 | EXPORT_SYMBOL(cancel_delayed_work_sync); |
504 | 517 | ||
505 | static struct workqueue_struct *keventd_wq __read_mostly; | 518 | static struct workqueue_struct *keventd_wq __read_mostly; |
506 | 519 | ||
@@ -739,18 +752,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
739 | if (cwq->thread == NULL) | 752 | if (cwq->thread == NULL) |
740 | return; | 753 | return; |
741 | 754 | ||
755 | flush_cpu_workqueue(cwq); | ||
742 | /* | 756 | /* |
743 | * If the caller is CPU_DEAD the single flush_cpu_workqueue() | 757 | * If the caller is CPU_DEAD and cwq->worklist was not empty, |
744 | * is not enough, a concurrent flush_workqueue() can insert a | 758 | * a concurrent flush_workqueue() can insert a barrier after us. |
745 | * barrier after us. | 759 | * However, in that case run_workqueue() won't return and check |
760 | * kthread_should_stop() until it flushes all work_struct's. | ||
746 | * When ->worklist becomes empty it is safe to exit because no | 761 | * When ->worklist becomes empty it is safe to exit because no |
747 | * more work_structs can be queued on this cwq: flush_workqueue | 762 | * more work_structs can be queued on this cwq: flush_workqueue |
748 | * checks list_empty(), and a "normal" queue_work() can't use | 763 | * checks list_empty(), and a "normal" queue_work() can't use |
749 | * a dead CPU. | 764 | * a dead CPU. |
750 | */ | 765 | */ |
751 | while (flush_cpu_workqueue(cwq)) | ||
752 | ; | ||
753 | |||
754 | kthread_stop(cwq->thread); | 766 | kthread_stop(cwq->thread); |
755 | cwq->thread = NULL; | 767 | cwq->thread = NULL; |
756 | } | 768 | } |