diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-07 19:03:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-08 12:00:39 -0400 |
commit | c0ea1c22bce63a27b47da90ad1ac49ce48e1a8aa (patch) | |
tree | 5a5fe4a6b86c4208afb3b812426dfb34f54ae75a /mm | |
parent | b68757341d8015d28e261990deea58dd836e04da (diff) |
bdi: make backing_dev_info->wb.dwork canceling stricter
Canceling of bdi->wb.dwork is currently a bit mushy.
bdi_wb_shutdown() performs cancel_delayed_work_sync() at the end after
shutting down and flushing the delayed_work and bdi_destroy() tries
yet again after bdi_unregister().
bdi->wb.dwork is queued only after checking BDI_registered while
holding bdi->wb_lock and bdi_wb_shutdown() clears the flag while
holding the same lock and then flushes the delayed_work. There's no
way the delayed_work can be queued again after that.
Replace the two unnecessary cancel_delayed_work_sync() invocations
with WARNs on pending. This simplifies and clarifies the code a bit
and will help future changes in further isolating bdi_writeback
handling.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 4afeefe9e365..cb7c5e323814 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -376,13 +376,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
376 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); | 376 | mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); |
377 | flush_delayed_work(&bdi->wb.dwork); | 377 | flush_delayed_work(&bdi->wb.dwork); |
378 | WARN_ON(!list_empty(&bdi->work_list)); | 378 | WARN_ON(!list_empty(&bdi->work_list)); |
379 | 379 | WARN_ON(delayed_work_pending(&bdi->wb.dwork)); | |
380 | /* | ||
381 | * This shouldn't be necessary unless @bdi for some reason has | ||
382 | * unflushed dirty IO after work_list is drained. Do it anyway | ||
383 | * just in case. | ||
384 | */ | ||
385 | cancel_delayed_work_sync(&bdi->wb.dwork); | ||
386 | } | 380 | } |
387 | 381 | ||
388 | /* | 382 | /* |
@@ -497,12 +491,7 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
497 | 491 | ||
498 | bdi_unregister(bdi); | 492 | bdi_unregister(bdi); |
499 | 493 | ||
500 | /* | 494 | WARN_ON(delayed_work_pending(&bdi->wb.dwork)); |
501 | * If bdi_unregister() had already been called earlier, the dwork | ||
502 | * could still be pending because bdi_prune_sb() can race with the | ||
503 | * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty(). | ||
504 | */ | ||
505 | cancel_delayed_work_sync(&bdi->wb.dwork); | ||
506 | 495 | ||
507 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) | 496 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) |
508 | percpu_counter_destroy(&bdi->bdi_stat[i]); | 497 | percpu_counter_destroy(&bdi->bdi_stat[i]); |