diff options
-rw-r--r-- | mm/backing-dev.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6ac932210f56..c6f2a37028c2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -683,30 +683,18 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
683 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | 683 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) |
684 | { | 684 | { |
685 | struct radix_tree_iter iter; | 685 | struct radix_tree_iter iter; |
686 | struct rb_node *rbn; | ||
687 | void **slot; | 686 | void **slot; |
688 | 687 | ||
689 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | 688 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
690 | 689 | ||
691 | spin_lock_irq(&cgwb_lock); | 690 | spin_lock_irq(&cgwb_lock); |
692 | |||
693 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | 691 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
694 | cgwb_kill(*slot); | 692 | cgwb_kill(*slot); |
695 | |||
696 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | ||
697 | struct bdi_writeback_congested *congested = | ||
698 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | ||
699 | |||
700 | rb_erase(rbn, &bdi->cgwb_congested_tree); | ||
701 | congested->bdi = NULL; /* mark @congested unlinked */ | ||
702 | } | ||
703 | |||
704 | spin_unlock_irq(&cgwb_lock); | 693 | spin_unlock_irq(&cgwb_lock); |
705 | 694 | ||
706 | /* | 695 | /* |
707 | * All cgwb's and their congested states must be shutdown and | 696 | * All cgwb's must be shutdown and released before returning. Drain |
708 | * released before returning. Drain the usage counter to wait for | 697 | * the usage counter to wait for all cgwb's ever created on @bdi. |
709 | * all cgwb's and cgwb_congested's ever created on @bdi. | ||
710 | */ | 698 | */ |
711 | atomic_dec(&bdi->usage_cnt); | 699 | atomic_dec(&bdi->usage_cnt); |
712 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); | 700 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); |
@@ -754,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg) | |||
754 | spin_unlock_irq(&cgwb_lock); | 742 | spin_unlock_irq(&cgwb_lock); |
755 | } | 743 | } |
756 | 744 | ||
745 | static void cgwb_bdi_exit(struct backing_dev_info *bdi) | ||
746 | { | ||
747 | struct rb_node *rbn; | ||
748 | |||
749 | spin_lock_irq(&cgwb_lock); | ||
750 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | ||
751 | struct bdi_writeback_congested *congested = | ||
752 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | ||
753 | |||
754 | rb_erase(rbn, &bdi->cgwb_congested_tree); | ||
755 | congested->bdi = NULL; /* mark @congested unlinked */ | ||
756 | } | ||
757 | spin_unlock_irq(&cgwb_lock); | ||
758 | } | ||
759 | |||
757 | #else /* CONFIG_CGROUP_WRITEBACK */ | 760 | #else /* CONFIG_CGROUP_WRITEBACK */ |
758 | 761 | ||
759 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | 762 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
@@ -774,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
774 | return 0; | 777 | return 0; |
775 | } | 778 | } |
776 | 779 | ||
777 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | 780 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } |
781 | |||
782 | static void cgwb_bdi_exit(struct backing_dev_info *bdi) | ||
778 | { | 783 | { |
779 | wb_congested_put(bdi->wb_congested); | 784 | wb_congested_put(bdi->wb_congested); |
780 | } | 785 | } |
@@ -905,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi) | |||
905 | { | 910 | { |
906 | WARN_ON_ONCE(bdi->dev); | 911 | WARN_ON_ONCE(bdi->dev); |
907 | wb_exit(&bdi->wb); | 912 | wb_exit(&bdi->wb); |
913 | cgwb_bdi_exit(bdi); | ||
908 | } | 914 | } |
909 | 915 | ||
910 | static void release_bdi(struct kref *ref) | 916 | static void release_bdi(struct kref *ref) |