summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:54:52 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:15 -0400
commit1ed8d48c57bf7400eac7b8dc622ab0413715cafb (patch)
treecca6d4773d4f043cb5b9feb766441c7c26401d25 /fs
parent11743ee0477ab9691d08aa121c583184769d2847 (diff)
writeback: bdi_for_each_wb() iteration is memcg ID based not blkcg
wb's (bdi_writeback's) are currently keyed by memcg ID; however, in an earlier implementation, wb's were keyed by blkcg ID. bdi_for_each_wb() walks bdi->cgwb_tree in the ascending ID order and allows iterations to start from an arbitrary ID which is used to interrupt and resume iterations. Unfortunately, while changing wb to be keyed by memcg ID instead of blkcg, bdi_for_each_wb() was missed and is still assuming that wb's are keyed by blkcg ID. This doesn't affect iterations which don't get interrupted but bdi_split_work_to_wbs() makes use of iteration resuming on allocation failures and thus may incorrectly skip or repeat wb's. Fix it by changing bdi_for_each_wb() to take memcg IDs instead of blkcg IDs and updating bdi_split_work_to_wbs() accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 518c6294bf6c..c9def2115aca 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -839,7 +839,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
839 bool skip_if_busy) 839 bool skip_if_busy)
840{ 840{
841 long nr_pages = base_work->nr_pages; 841 long nr_pages = base_work->nr_pages;
842 int next_blkcg_id = 0; 842 int next_memcg_id = 0;
843 struct bdi_writeback *wb; 843 struct bdi_writeback *wb;
844 struct wb_iter iter; 844 struct wb_iter iter;
845 845
@@ -849,14 +849,14 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
849 return; 849 return;
850restart: 850restart:
851 rcu_read_lock(); 851 rcu_read_lock();
852 bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) { 852 bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) {
853 if (!wb_has_dirty_io(wb) || 853 if (!wb_has_dirty_io(wb) ||
854 (skip_if_busy && writeback_in_progress(wb))) 854 (skip_if_busy && writeback_in_progress(wb)))
855 continue; 855 continue;
856 856
857 base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages); 857 base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
858 if (!wb_clone_and_queue_work(wb, base_work)) { 858 if (!wb_clone_and_queue_work(wb, base_work)) {
859 next_blkcg_id = wb->blkcg_css->id + 1; 859 next_memcg_id = wb->memcg_css->id + 1;
860 rcu_read_unlock(); 860 rcu_read_unlock();
861 wb_wait_for_single_work(bdi, base_work); 861 wb_wait_for_single_work(bdi, base_work);
862 goto restart; 862 goto restart;