diff options
author | Jan Kara <jack@suse.cz> | 2017-03-22 20:36:57 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-03-22 22:11:28 -0400 |
commit | 5318ce7d46866e1dbc20ab9349b93753edba0b3e (patch) | |
tree | 6a4070a385e86006c64868a2f9bd363ab002cfc8 /mm/backing-dev.c | |
parent | e8cb72b322cf4a729633b7e2080fbeab477f6ea2 (diff) |
bdi: Shutdown writeback on all cgwbs in cgwb_bdi_destroy()
Currently we waited for all cgwbs to get freed in cgwb_bdi_destroy()
which also means that writeback has been shutdown on them. Since this
wait is going away, directly shutdown writeback on cgwbs from
cgwb_bdi_destroy() to avoid live writeback structures after
bdi_unregister() has finished. To make that safe with concurrent
shutdown from cgwb_release_workfn(), we also have to make sure
wb_shutdown() returns only after the bdi_writeback structure is really
shutdown.
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index e3d56dba4da8..b67be4fc12c4 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -356,8 +356,15 @@ static void wb_shutdown(struct bdi_writeback *wb) | |||
356 | spin_lock_bh(&wb->work_lock); | 356 | spin_lock_bh(&wb->work_lock); |
357 | if (!test_and_clear_bit(WB_registered, &wb->state)) { | 357 | if (!test_and_clear_bit(WB_registered, &wb->state)) { |
358 | spin_unlock_bh(&wb->work_lock); | 358 | spin_unlock_bh(&wb->work_lock); |
359 | /* | ||
360 | * Wait for wb shutdown to finish if someone else is just | ||
361 | * running wb_shutdown(). Otherwise we could proceed to wb / | ||
362 | * bdi destruction before wb_shutdown() is finished. | ||
363 | */ | ||
364 | wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE); | ||
359 | return; | 365 | return; |
360 | } | 366 | } |
367 | set_bit(WB_shutting_down, &wb->state); | ||
361 | spin_unlock_bh(&wb->work_lock); | 368 | spin_unlock_bh(&wb->work_lock); |
362 | 369 | ||
363 | cgwb_remove_from_bdi_list(wb); | 370 | cgwb_remove_from_bdi_list(wb); |
@@ -369,6 +376,12 @@ static void wb_shutdown(struct bdi_writeback *wb) | |||
369 | mod_delayed_work(bdi_wq, &wb->dwork, 0); | 376 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
370 | flush_delayed_work(&wb->dwork); | 377 | flush_delayed_work(&wb->dwork); |
371 | WARN_ON(!list_empty(&wb->work_list)); | 378 | WARN_ON(!list_empty(&wb->work_list)); |
379 | /* | ||
380 | * Make sure bit gets cleared after shutdown is finished. Matches with | ||
381 | * the barrier provided by test_and_clear_bit() above. | ||
382 | */ | ||
383 | smp_wmb(); | ||
384 | clear_bit(WB_shutting_down, &wb->state); | ||
372 | } | 385 | } |
373 | 386 | ||
374 | static void wb_exit(struct bdi_writeback *wb) | 387 | static void wb_exit(struct bdi_writeback *wb) |
@@ -699,12 +712,21 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | |||
699 | { | 712 | { |
700 | struct radix_tree_iter iter; | 713 | struct radix_tree_iter iter; |
701 | void **slot; | 714 | void **slot; |
715 | struct bdi_writeback *wb; | ||
702 | 716 | ||
703 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | 717 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
704 | 718 | ||
705 | spin_lock_irq(&cgwb_lock); | 719 | spin_lock_irq(&cgwb_lock); |
706 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | 720 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
707 | cgwb_kill(*slot); | 721 | cgwb_kill(*slot); |
722 | |||
723 | while (!list_empty(&bdi->wb_list)) { | ||
724 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, | ||
725 | bdi_node); | ||
726 | spin_unlock_irq(&cgwb_lock); | ||
727 | wb_shutdown(wb); | ||
728 | spin_lock_irq(&cgwb_lock); | ||
729 | } | ||
708 | spin_unlock_irq(&cgwb_lock); | 730 | spin_unlock_irq(&cgwb_lock); |
709 | 731 | ||
710 | /* | 732 | /* |