diff options
author | Christoph Hellwig <hch@lst.de> | 2010-06-08 12:14:43 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-06-11 06:58:07 -0400 |
commit | 3c4d716538f3eefb1c1f10961a047a6456a2b590 (patch) | |
tree | 3cfc50c18cc3d2351ddac4309e8f228e22faed71 /fs/fs-writeback.c | |
parent | 7f0e7bed936a0c422641a046551829a01341dd80 (diff) |
writeback: queue work on stack in writeback_inodes_sb
If we want to rely on s_umount in the caller we need to wait for completion
of the I/O submission before returning to the caller. Refactor
bdi_sync_writeback into a bdi_queue_work_onstack helper and use it for this
case.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index dbf6f108e868..759666966c6d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -178,30 +178,22 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * bdi_sync_writeback - start and wait for writeback | 181 | * bdi_queue_work_onstack - start and wait for writeback |
182 | * @bdi: the backing device to write from | ||
183 | * @sb: write inodes from this super_block | 182 | * @sb: write inodes from this super_block |
184 | * | 183 | * |
185 | * Description: | 184 | * Description: |
186 | * This does WB_SYNC_ALL data integrity writeback and waits for the | 185 | * This function initiates writeback and waits for the operation to |
187 | * IO to complete. Callers must hold the sb s_umount semaphore for | 186 | * complete. Callers must hold the sb s_umount semaphore for |
188 | * reading, to avoid having the super disappear before we are done. | 187 | * reading, to avoid having the super disappear before we are done. |
189 | */ | 188 | */ |
190 | static void bdi_sync_writeback(struct backing_dev_info *bdi, | 189 | static void bdi_queue_work_onstack(struct wb_writeback_args *args) |
191 | struct super_block *sb) | ||
192 | { | 190 | { |
193 | struct wb_writeback_args args = { | ||
194 | .sb = sb, | ||
195 | .sync_mode = WB_SYNC_ALL, | ||
196 | .nr_pages = LONG_MAX, | ||
197 | .range_cyclic = 0, | ||
198 | }; | ||
199 | struct bdi_work work; | 191 | struct bdi_work work; |
200 | 192 | ||
201 | bdi_work_init(&work, &args); | 193 | bdi_work_init(&work, args); |
202 | __set_bit(WS_ONSTACK, &work.state); | 194 | __set_bit(WS_ONSTACK, &work.state); |
203 | 195 | ||
204 | bdi_queue_work(bdi, &work); | 196 | bdi_queue_work(args->sb->s_bdi, &work); |
205 | bdi_wait_on_work_done(&work); | 197 | bdi_wait_on_work_done(&work); |
206 | } | 198 | } |
207 | 199 | ||
@@ -944,7 +936,7 @@ int bdi_writeback_task(struct bdi_writeback *wb) | |||
944 | 936 | ||
945 | /* | 937 | /* |
946 | * Schedule writeback for all backing devices. This does WB_SYNC_NONE | 938 | * Schedule writeback for all backing devices. This does WB_SYNC_NONE |
947 | * writeback, for integrity writeback see bdi_sync_writeback(). | 939 | * writeback, for integrity writeback see bdi_queue_work_onstack(). |
948 | */ | 940 | */ |
949 | static void bdi_writeback_all(struct super_block *sb, long nr_pages) | 941 | static void bdi_writeback_all(struct super_block *sb, long nr_pages) |
950 | { | 942 | { |
@@ -1183,12 +1175,15 @@ void writeback_inodes_sb(struct super_block *sb) | |||
1183 | { | 1175 | { |
1184 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | 1176 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); |
1185 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | 1177 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); |
1186 | long nr_to_write; | 1178 | struct wb_writeback_args args = { |
1179 | .sb = sb, | ||
1180 | .sync_mode = WB_SYNC_NONE, | ||
1181 | }; | ||
1187 | 1182 | ||
1188 | nr_to_write = nr_dirty + nr_unstable + | 1183 | args.nr_pages = nr_dirty + nr_unstable + |
1189 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 1184 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
1190 | 1185 | ||
1191 | bdi_start_writeback(sb->s_bdi, sb, nr_to_write); | 1186 | bdi_queue_work_onstack(&args); |
1192 | } | 1187 | } |
1193 | EXPORT_SYMBOL(writeback_inodes_sb); | 1188 | EXPORT_SYMBOL(writeback_inodes_sb); |
1194 | 1189 | ||
@@ -1218,7 +1213,14 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); | |||
1218 | */ | 1213 | */ |
1219 | void sync_inodes_sb(struct super_block *sb) | 1214 | void sync_inodes_sb(struct super_block *sb) |
1220 | { | 1215 | { |
1221 | bdi_sync_writeback(sb->s_bdi, sb); | 1216 | struct wb_writeback_args args = { |
1217 | .sb = sb, | ||
1218 | .sync_mode = WB_SYNC_ALL, | ||
1219 | .nr_pages = LONG_MAX, | ||
1220 | .range_cyclic = 0, | ||
1221 | }; | ||
1222 | |||
1223 | bdi_queue_work_onstack(&args); | ||
1222 | wait_sb_inodes(sb); | 1224 | wait_sb_inodes(sb); |
1223 | } | 1225 | } |
1224 | EXPORT_SYMBOL(sync_inodes_sb); | 1226 | EXPORT_SYMBOL(sync_inodes_sb); |