diff options
author | Tejun Heo <tj@kernel.org> | 2015-10-13 18:14:19 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-10-21 10:17:29 -0400 |
commit | e27c5b9d23168cc2cb8fec147ae7ed1f7a2005c3 (patch) | |
tree | d7ed4b0682df716b85b14e09e3da6337b2ee9f1d | |
parent | 0dfc70c33409afc232ef0b9ec210535dfbf9bc61 (diff) |
writeback: remove broken rbtree_postorder_for_each_entry_safe() usage in cgwb_bdi_destroy()
a20135ffbc44 ("writeback: don't drain bdi_writeback_congested on bdi
destruction") added rbtree_postorder_for_each_entry_safe() which is
used to remove all entries; however, according to Cody, the iterator
isn't safe against operations which may rebalance the tree. Fix it by
switching to repeatedly removing rb_first() until empty.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Cody P Schafer <dev@codyps.com>
Fixes: a20135ffbc44 ("writeback: don't drain bdi_writeback_congested on bdi destruction")
Link: http://lkml.kernel.org/g/1443997973-1700-1-git-send-email-dev@codyps.com
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | mm/backing-dev.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 9e841399041a..619984fc07ec 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -681,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
681 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | 681 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) |
682 | { | 682 | { |
683 | struct radix_tree_iter iter; | 683 | struct radix_tree_iter iter; |
684 | struct bdi_writeback_congested *congested, *congested_n; | 684 | struct rb_node *rbn; |
685 | void **slot; | 685 | void **slot; |
686 | 686 | ||
687 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | 687 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
@@ -691,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | |||
691 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | 691 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
692 | cgwb_kill(*slot); | 692 | cgwb_kill(*slot); |
693 | 693 | ||
694 | rbtree_postorder_for_each_entry_safe(congested, congested_n, | 694 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { |
695 | &bdi->cgwb_congested_tree, rb_node) { | 695 | struct bdi_writeback_congested *congested = |
696 | rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); | 696 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); |
697 | |||
698 | rb_erase(rbn, &bdi->cgwb_congested_tree); | ||
697 | congested->bdi = NULL; /* mark @congested unlinked */ | 699 | congested->bdi = NULL; /* mark @congested unlinked */ |
698 | } | 700 | } |
699 | 701 | ||