aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c40
1 files changed, 16 insertions, 24 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 12a992b62576..0ae0df55000b 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -40,7 +40,7 @@ LIST_HEAD(bdi_list);
40/* bdi_wq serves all asynchronous writeback tasks */ 40/* bdi_wq serves all asynchronous writeback tasks */
41struct workqueue_struct *bdi_wq; 41struct workqueue_struct *bdi_wq;
42 42
43void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) 43static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
44{ 44{
45 if (wb1 < wb2) { 45 if (wb1 < wb2) {
46 spin_lock(&wb1->list_lock); 46 spin_lock(&wb1->list_lock);
@@ -376,13 +376,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
376 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 376 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
377 flush_delayed_work(&bdi->wb.dwork); 377 flush_delayed_work(&bdi->wb.dwork);
378 WARN_ON(!list_empty(&bdi->work_list)); 378 WARN_ON(!list_empty(&bdi->work_list));
379 379 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
380 /*
381 * This shouldn't be necessary unless @bdi for some reason has
382 * unflushed dirty IO after work_list is drained. Do it anyway
383 * just in case.
384 */
385 cancel_delayed_work_sync(&bdi->wb.dwork);
386} 380}
387 381
388/* 382/*
@@ -402,21 +396,15 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
402 396
403void bdi_unregister(struct backing_dev_info *bdi) 397void bdi_unregister(struct backing_dev_info *bdi)
404{ 398{
405 struct device *dev = bdi->dev; 399 if (bdi->dev) {
406
407 if (dev) {
408 bdi_set_min_ratio(bdi, 0); 400 bdi_set_min_ratio(bdi, 0);
409 trace_writeback_bdi_unregister(bdi); 401 trace_writeback_bdi_unregister(bdi);
410 bdi_prune_sb(bdi); 402 bdi_prune_sb(bdi);
411 403
412 bdi_wb_shutdown(bdi); 404 bdi_wb_shutdown(bdi);
413 bdi_debug_unregister(bdi); 405 bdi_debug_unregister(bdi);
414 406 device_unregister(bdi->dev);
415 spin_lock_bh(&bdi->wb_lock);
416 bdi->dev = NULL; 407 bdi->dev = NULL;
417 spin_unlock_bh(&bdi->wb_lock);
418
419 device_unregister(dev);
420 } 408 }
421} 409}
422EXPORT_SYMBOL(bdi_unregister); 410EXPORT_SYMBOL(bdi_unregister);
@@ -487,8 +475,17 @@ void bdi_destroy(struct backing_dev_info *bdi)
487 int i; 475 int i;
488 476
489 /* 477 /*
490 * Splice our entries to the default_backing_dev_info, if this 478 * Splice our entries to the default_backing_dev_info. This
491 * bdi disappears 479 * condition shouldn't happen. @wb must be empty at this point and
480 * dirty inodes on it might cause other issues. This workaround is
481 * added by ce5f8e779519 ("writeback: splice dirty inode entries to
482 * default bdi on bdi_destroy()") without root-causing the issue.
483 *
484 * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
485 * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
486 *
487 * We should probably add WARN_ON() to find out whether it still
488 * happens and track it down if so.
492 */ 489 */
493 if (bdi_has_dirty_io(bdi)) { 490 if (bdi_has_dirty_io(bdi)) {
494 struct bdi_writeback *dst = &default_backing_dev_info.wb; 491 struct bdi_writeback *dst = &default_backing_dev_info.wb;
@@ -503,12 +500,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
503 500
504 bdi_unregister(bdi); 501 bdi_unregister(bdi);
505 502
506 /* 503 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
507 * If bdi_unregister() had already been called earlier, the dwork
508 * could still be pending because bdi_prune_sb() can race with the
509 * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
510 */
511 cancel_delayed_work_sync(&bdi->wb.dwork);
512 504
513 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 505 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
514 percpu_counter_destroy(&bdi->bdi_stat[i]); 506 percpu_counter_destroy(&bdi->bdi_stat[i]);