diff options
-rw-r--r-- | fs/fs-writeback.c | 70 |
1 files changed, 14 insertions, 56 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c5e91225501d..14f06b459197 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -50,7 +50,6 @@ struct wb_writeback_args { | |||
50 | */ | 50 | */ |
51 | struct bdi_work { | 51 | struct bdi_work { |
52 | struct list_head list; | 52 | struct list_head list; |
53 | struct list_head wait_list; | ||
54 | struct rcu_head rcu_head; | 53 | struct rcu_head rcu_head; |
55 | 54 | ||
56 | unsigned long seen; | 55 | unsigned long seen; |
@@ -198,7 +197,8 @@ static void bdi_wait_on_work_clear(struct bdi_work *work) | |||
198 | TASK_UNINTERRUPTIBLE); | 197 | TASK_UNINTERRUPTIBLE); |
199 | } | 198 | } |
200 | 199 | ||
201 | static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) | 200 | static void bdi_alloc_queue_work(struct backing_dev_info *bdi, |
201 | struct writeback_control *wbc) | ||
202 | { | 202 | { |
203 | struct bdi_work *work; | 203 | struct bdi_work *work; |
204 | 204 | ||
@@ -206,7 +206,7 @@ static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) | |||
206 | if (work) | 206 | if (work) |
207 | bdi_work_init(work, wbc); | 207 | bdi_work_init(work, wbc); |
208 | 208 | ||
209 | return work; | 209 | bdi_queue_work(bdi, work); |
210 | } | 210 | } |
211 | 211 | ||
212 | void bdi_start_writeback(struct writeback_control *wbc) | 212 | void bdi_start_writeback(struct writeback_control *wbc) |
@@ -216,11 +216,9 @@ void bdi_start_writeback(struct writeback_control *wbc) | |||
216 | * bdi_queue_work() will wake up the thread and flush old data. This | 216 | * bdi_queue_work() will wake up the thread and flush old data. This |
217 | * should ensure some amount of progress in freeing memory. | 217 | * should ensure some amount of progress in freeing memory. |
218 | */ | 218 | */ |
219 | if (wbc->sync_mode != WB_SYNC_ALL) { | 219 | if (wbc->sync_mode != WB_SYNC_ALL) |
220 | struct bdi_work *w = bdi_alloc_work(wbc); | 220 | bdi_alloc_queue_work(wbc->bdi, wbc); |
221 | 221 | else { | |
222 | bdi_queue_work(wbc->bdi, w); | ||
223 | } else { | ||
224 | struct bdi_work work; | 222 | struct bdi_work work; |
225 | 223 | ||
226 | bdi_work_init(&work, wbc); | 224 | bdi_work_init(&work, wbc); |
@@ -860,67 +858,26 @@ int bdi_writeback_task(struct bdi_writeback *wb) | |||
860 | } | 858 | } |
861 | 859 | ||
862 | /* | 860 | /* |
863 | * Schedule writeback for all backing devices. Expensive! If this is a data | 861 | * Schedule writeback for all backing devices. Can only be used for |
864 | * integrity operation, writeback will be complete when this returns. If | 862 | * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback() |
865 | * we are simply called for WB_SYNC_NONE, then writeback will merely be | 863 | * and pass in the superblock. |
866 | * scheduled to run. | ||
867 | */ | 864 | */ |
868 | static void bdi_writeback_all(struct writeback_control *wbc) | 865 | static void bdi_writeback_all(struct writeback_control *wbc) |
869 | { | 866 | { |
870 | const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; | ||
871 | struct backing_dev_info *bdi; | 867 | struct backing_dev_info *bdi; |
872 | struct bdi_work *work; | ||
873 | LIST_HEAD(list); | ||
874 | 868 | ||
875 | restart: | 869 | WARN_ON(wbc->sync_mode == WB_SYNC_ALL); |
870 | |||
876 | spin_lock(&bdi_lock); | 871 | spin_lock(&bdi_lock); |
877 | 872 | ||
878 | list_for_each_entry(bdi, &bdi_list, bdi_list) { | 873 | list_for_each_entry(bdi, &bdi_list, bdi_list) { |
879 | struct bdi_work *work; | ||
880 | |||
881 | if (!bdi_has_dirty_io(bdi)) | 874 | if (!bdi_has_dirty_io(bdi)) |
882 | continue; | 875 | continue; |
883 | 876 | ||
884 | /* | 877 | bdi_alloc_queue_work(bdi, wbc); |
885 | * If work allocation fails, do the writes inline. We drop | ||
886 | * the lock and restart the list writeout. This should be OK, | ||
887 | * since this happens rarely and because the writeout should | ||
888 | * eventually make more free memory available. | ||
889 | */ | ||
890 | work = bdi_alloc_work(wbc); | ||
891 | if (!work) { | ||
892 | struct writeback_control __wbc; | ||
893 | |||
894 | /* | ||
895 | * Not a data integrity writeout, just continue | ||
896 | */ | ||
897 | if (!must_wait) | ||
898 | continue; | ||
899 | |||
900 | spin_unlock(&bdi_lock); | ||
901 | __wbc = *wbc; | ||
902 | __wbc.bdi = bdi; | ||
903 | writeback_inodes_wbc(&__wbc); | ||
904 | goto restart; | ||
905 | } | ||
906 | if (must_wait) | ||
907 | list_add_tail(&work->wait_list, &list); | ||
908 | |||
909 | bdi_queue_work(bdi, work); | ||
910 | } | 878 | } |
911 | 879 | ||
912 | spin_unlock(&bdi_lock); | 880 | spin_unlock(&bdi_lock); |
913 | |||
914 | /* | ||
915 | * If this is for WB_SYNC_ALL, wait for pending work to complete | ||
916 | * before returning. | ||
917 | */ | ||
918 | while (!list_empty(&list)) { | ||
919 | work = list_entry(list.next, struct bdi_work, wait_list); | ||
920 | list_del(&work->wait_list); | ||
921 | bdi_wait_on_work_clear(work); | ||
922 | call_rcu(&work->rcu_head, bdi_work_free); | ||
923 | } | ||
924 | } | 881 | } |
925 | 882 | ||
926 | /* | 883 | /* |
@@ -1177,6 +1134,7 @@ long sync_inodes_sb(struct super_block *sb) | |||
1177 | { | 1134 | { |
1178 | struct writeback_control wbc = { | 1135 | struct writeback_control wbc = { |
1179 | .sb = sb, | 1136 | .sb = sb, |
1137 | .bdi = sb->s_bdi, | ||
1180 | .sync_mode = WB_SYNC_ALL, | 1138 | .sync_mode = WB_SYNC_ALL, |
1181 | .range_start = 0, | 1139 | .range_start = 0, |
1182 | .range_end = LLONG_MAX, | 1140 | .range_end = LLONG_MAX, |
@@ -1184,7 +1142,7 @@ long sync_inodes_sb(struct super_block *sb) | |||
1184 | long nr_to_write = LONG_MAX; /* doesn't actually matter */ | 1142 | long nr_to_write = LONG_MAX; /* doesn't actually matter */ |
1185 | 1143 | ||
1186 | wbc.nr_to_write = nr_to_write; | 1144 | wbc.nr_to_write = nr_to_write; |
1187 | bdi_writeback_all(&wbc); | 1145 | bdi_start_writeback(&wbc); |
1188 | wait_sb_inodes(&wbc); | 1146 | wait_sb_inodes(&wbc); |
1189 | return nr_to_write - wbc.nr_to_write; | 1147 | return nr_to_write - wbc.nr_to_write; |
1190 | } | 1148 | } |