diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-14 12:30:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-14 12:30:54 -0400 |
commit | b973425cbb51e08301b34fecdfd476a44507d8cf (patch) | |
tree | 689cfb1e5bb3064f4fad6b381bb208c3ed54cf64 /fs/ext4/page-io.c | |
parent | 7fb30d2b606beb78cda805647faf4d3cdfb39c42 (diff) | |
parent | e2555fde4159467fb579e6ae3c0a8fc33015d0f5 (diff) |
Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 update from Ted Ts'o:
"Fixed regressions (two stability regressions and a performance
regression) introduced during the 3.10-rc1 merge window.
Also included is a bug fix relating to allocating blocks after
resizing an ext3 file system when using the ext4 file system driver"
* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
jbd,jbd2: fix oops in jbd2_journal_put_journal_head()
ext4: revert "ext4: use io_end for multiple bios"
ext4: limit group search loop for non-extent files
ext4: fix fio regression
Diffstat (limited to 'fs/ext4/page-io.c')
-rw-r--r-- | fs/ext4/page-io.c | 121 |
1 files changed, 45 insertions, 76 deletions
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 19599bded62a..4acf1f78881b 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode) | |||
62 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); | 62 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); |
63 | } | 63 | } |
64 | 64 | ||
65 | static void ext4_release_io_end(ext4_io_end_t *io_end) | 65 | void ext4_free_io_end(ext4_io_end_t *io) |
66 | { | 66 | { |
67 | BUG_ON(!list_empty(&io_end->list)); | 67 | BUG_ON(!io); |
68 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | 68 | BUG_ON(!list_empty(&io->list)); |
69 | 69 | BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN); | |
70 | if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count)) | ||
71 | wake_up_all(ext4_ioend_wq(io_end->inode)); | ||
72 | if (io_end->flag & EXT4_IO_END_DIRECT) | ||
73 | inode_dio_done(io_end->inode); | ||
74 | if (io_end->iocb) | ||
75 | aio_complete(io_end->iocb, io_end->result, 0); | ||
76 | kmem_cache_free(io_end_cachep, io_end); | ||
77 | } | ||
78 | |||
79 | static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) | ||
80 | { | ||
81 | struct inode *inode = io_end->inode; | ||
82 | 70 | ||
83 | io_end->flag &= ~EXT4_IO_END_UNWRITTEN; | 71 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) |
84 | /* Wake up anyone waiting on unwritten extent conversion */ | 72 | wake_up_all(ext4_ioend_wq(io->inode)); |
85 | if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) | 73 | kmem_cache_free(io_end_cachep, io); |
86 | wake_up_all(ext4_ioend_wq(inode)); | ||
87 | } | 74 | } |
88 | 75 | ||
89 | /* check a range of space and convert unwritten extents to written. */ | 76 | /* check a range of space and convert unwritten extents to written. */ |
@@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io) | |||
106 | "(inode %lu, offset %llu, size %zd, error %d)", | 93 | "(inode %lu, offset %llu, size %zd, error %d)", |
107 | inode->i_ino, offset, size, ret); | 94 | inode->i_ino, offset, size, ret); |
108 | } | 95 | } |
109 | ext4_clear_io_unwritten_flag(io); | 96 | /* Wake up anyone waiting on unwritten extent conversion */ |
110 | ext4_release_io_end(io); | 97 | if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) |
98 | wake_up_all(ext4_ioend_wq(inode)); | ||
99 | if (io->flag & EXT4_IO_END_DIRECT) | ||
100 | inode_dio_done(inode); | ||
101 | if (io->iocb) | ||
102 | aio_complete(io->iocb, io->result, 0); | ||
111 | return ret; | 103 | return ret; |
112 | } | 104 | } |
113 | 105 | ||
@@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode) | |||
138 | } | 130 | } |
139 | 131 | ||
140 | /* Add the io_end to per-inode completed end_io list. */ | 132 | /* Add the io_end to per-inode completed end_io list. */ |
141 | static void ext4_add_complete_io(ext4_io_end_t *io_end) | 133 | void ext4_add_complete_io(ext4_io_end_t *io_end) |
142 | { | 134 | { |
143 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); | 135 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
144 | struct workqueue_struct *wq; | 136 | struct workqueue_struct *wq; |
@@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode) | |||
175 | err = ext4_end_io(io); | 167 | err = ext4_end_io(io); |
176 | if (unlikely(!ret && err)) | 168 | if (unlikely(!ret && err)) |
177 | ret = err; | 169 | ret = err; |
170 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | ||
171 | ext4_free_io_end(io); | ||
178 | } | 172 | } |
179 | return ret; | 173 | return ret; |
180 | } | 174 | } |
@@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) | |||
206 | atomic_inc(&EXT4_I(inode)->i_ioend_count); | 200 | atomic_inc(&EXT4_I(inode)->i_ioend_count); |
207 | io->inode = inode; | 201 | io->inode = inode; |
208 | INIT_LIST_HEAD(&io->list); | 202 | INIT_LIST_HEAD(&io->list); |
209 | atomic_set(&io->count, 1); | ||
210 | } | 203 | } |
211 | return io; | 204 | return io; |
212 | } | 205 | } |
213 | 206 | ||
214 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) | ||
215 | { | ||
216 | if (atomic_dec_and_test(&io_end->count)) { | ||
217 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { | ||
218 | ext4_release_io_end(io_end); | ||
219 | return; | ||
220 | } | ||
221 | ext4_add_complete_io(io_end); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | int ext4_put_io_end(ext4_io_end_t *io_end) | ||
226 | { | ||
227 | int err = 0; | ||
228 | |||
229 | if (atomic_dec_and_test(&io_end->count)) { | ||
230 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { | ||
231 | err = ext4_convert_unwritten_extents(io_end->inode, | ||
232 | io_end->offset, io_end->size); | ||
233 | ext4_clear_io_unwritten_flag(io_end); | ||
234 | } | ||
235 | ext4_release_io_end(io_end); | ||
236 | } | ||
237 | return err; | ||
238 | } | ||
239 | |||
240 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | ||
241 | { | ||
242 | atomic_inc(&io_end->count); | ||
243 | return io_end; | ||
244 | } | ||
245 | |||
246 | /* | 207 | /* |
247 | * Print an buffer I/O error compatible with the fs/buffer.c. This | 208 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
248 | * provides compatibility with dmesg scrapers that look for a specific | 209 | * provides compatibility with dmesg scrapers that look for a specific |
@@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
325 | bi_sector >> (inode->i_blkbits - 9)); | 286 | bi_sector >> (inode->i_blkbits - 9)); |
326 | } | 287 | } |
327 | 288 | ||
328 | ext4_put_io_end_defer(io_end); | 289 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
290 | ext4_free_io_end(io_end); | ||
291 | return; | ||
292 | } | ||
293 | |||
294 | ext4_add_complete_io(io_end); | ||
329 | } | 295 | } |
330 | 296 | ||
331 | void ext4_io_submit(struct ext4_io_submit *io) | 297 | void ext4_io_submit(struct ext4_io_submit *io) |
@@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io) | |||
339 | bio_put(io->io_bio); | 305 | bio_put(io->io_bio); |
340 | } | 306 | } |
341 | io->io_bio = NULL; | 307 | io->io_bio = NULL; |
342 | } | 308 | io->io_op = 0; |
343 | |||
344 | void ext4_io_submit_init(struct ext4_io_submit *io, | ||
345 | struct writeback_control *wbc) | ||
346 | { | ||
347 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | ||
348 | io->io_bio = NULL; | ||
349 | io->io_end = NULL; | 309 | io->io_end = NULL; |
350 | } | 310 | } |
351 | 311 | ||
352 | static int io_submit_init_bio(struct ext4_io_submit *io, | 312 | static int io_submit_init(struct ext4_io_submit *io, |
353 | struct buffer_head *bh) | 313 | struct inode *inode, |
314 | struct writeback_control *wbc, | ||
315 | struct buffer_head *bh) | ||
354 | { | 316 | { |
317 | ext4_io_end_t *io_end; | ||
318 | struct page *page = bh->b_page; | ||
355 | int nvecs = bio_get_nr_vecs(bh->b_bdev); | 319 | int nvecs = bio_get_nr_vecs(bh->b_bdev); |
356 | struct bio *bio; | 320 | struct bio *bio; |
357 | 321 | ||
322 | io_end = ext4_init_io_end(inode, GFP_NOFS); | ||
323 | if (!io_end) | ||
324 | return -ENOMEM; | ||
358 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); | 325 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); |
359 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 326 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
360 | bio->bi_bdev = bh->b_bdev; | 327 | bio->bi_bdev = bh->b_bdev; |
328 | bio->bi_private = io->io_end = io_end; | ||
361 | bio->bi_end_io = ext4_end_bio; | 329 | bio->bi_end_io = ext4_end_bio; |
362 | bio->bi_private = ext4_get_io_end(io->io_end); | 330 | |
363 | if (!io->io_end->size) | 331 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
364 | io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) | 332 | |
365 | + bh_offset(bh); | ||
366 | io->io_bio = bio; | 333 | io->io_bio = bio; |
334 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | ||
367 | io->io_next_block = bh->b_blocknr; | 335 | io->io_next_block = bh->b_blocknr; |
368 | return 0; | 336 | return 0; |
369 | } | 337 | } |
370 | 338 | ||
371 | static int io_submit_add_bh(struct ext4_io_submit *io, | 339 | static int io_submit_add_bh(struct ext4_io_submit *io, |
372 | struct inode *inode, | 340 | struct inode *inode, |
341 | struct writeback_control *wbc, | ||
373 | struct buffer_head *bh) | 342 | struct buffer_head *bh) |
374 | { | 343 | { |
375 | ext4_io_end_t *io_end; | 344 | ext4_io_end_t *io_end; |
@@ -380,18 +349,18 @@ submit_and_retry: | |||
380 | ext4_io_submit(io); | 349 | ext4_io_submit(io); |
381 | } | 350 | } |
382 | if (io->io_bio == NULL) { | 351 | if (io->io_bio == NULL) { |
383 | ret = io_submit_init_bio(io, bh); | 352 | ret = io_submit_init(io, inode, wbc, bh); |
384 | if (ret) | 353 | if (ret) |
385 | return ret; | 354 | return ret; |
386 | } | 355 | } |
387 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); | ||
388 | if (ret != bh->b_size) | ||
389 | goto submit_and_retry; | ||
390 | io_end = io->io_end; | 356 | io_end = io->io_end; |
391 | if (test_clear_buffer_uninit(bh)) | 357 | if (test_clear_buffer_uninit(bh)) |
392 | ext4_set_io_unwritten_flag(inode, io_end); | 358 | ext4_set_io_unwritten_flag(inode, io_end); |
393 | io_end->size += bh->b_size; | 359 | io->io_end->size += bh->b_size; |
394 | io->io_next_block++; | 360 | io->io_next_block++; |
361 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); | ||
362 | if (ret != bh->b_size) | ||
363 | goto submit_and_retry; | ||
395 | return 0; | 364 | return 0; |
396 | } | 365 | } |
397 | 366 | ||
@@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
463 | do { | 432 | do { |
464 | if (!buffer_async_write(bh)) | 433 | if (!buffer_async_write(bh)) |
465 | continue; | 434 | continue; |
466 | ret = io_submit_add_bh(io, inode, bh); | 435 | ret = io_submit_add_bh(io, inode, wbc, bh); |
467 | if (ret) { | 436 | if (ret) { |
468 | /* | 437 | /* |
469 | * We only get here on ENOMEM. Not much else | 438 | * We only get here on ENOMEM. Not much else |