aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/page-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/page-io.c')
-rw-r--r--fs/ext4/page-io.c336
1 files changed, 192 insertions, 144 deletions
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 4acf1f78881b..6625d210fb45 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -25,6 +25,7 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/ratelimit.h>
28 29
29#include "ext4_jbd2.h" 30#include "ext4_jbd2.h"
30#include "xattr.h" 31#include "xattr.h"
@@ -46,46 +47,121 @@ void ext4_exit_pageio(void)
46} 47}
47 48
48/* 49/*
49 * This function is called by ext4_evict_inode() to make sure there is 50 * Print an buffer I/O error compatible with the fs/buffer.c. This
50 * no more pending I/O completion work left to do. 51 * provides compatibility with dmesg scrapers that look for a specific
52 * buffer I/O error message. We really need a unified error reporting
53 * structure to userspace ala Digital Unix's uerf system, but it's
54 * probably not going to happen in my lifetime, due to LKML politics...
51 */ 55 */
52void ext4_ioend_shutdown(struct inode *inode) 56static void buffer_io_error(struct buffer_head *bh)
57{
58 char b[BDEVNAME_SIZE];
59 printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
60 bdevname(bh->b_bdev, b),
61 (unsigned long long)bh->b_blocknr);
62}
63
64static void ext4_finish_bio(struct bio *bio)
53{ 65{
54 wait_queue_head_t *wq = ext4_ioend_wq(inode); 66 int i;
67 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
55 68
56 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); 69 for (i = 0; i < bio->bi_vcnt; i++) {
57 /* 70 struct bio_vec *bvec = &bio->bi_io_vec[i];
58 * We need to make sure the work structure is finished being 71 struct page *page = bvec->bv_page;
59 * used before we let the inode get destroyed. 72 struct buffer_head *bh, *head;
60 */ 73 unsigned bio_start = bvec->bv_offset;
61 if (work_pending(&EXT4_I(inode)->i_unwritten_work)) 74 unsigned bio_end = bio_start + bvec->bv_len;
62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); 75 unsigned under_io = 0;
76 unsigned long flags;
77
78 if (!page)
79 continue;
80
81 if (error) {
82 SetPageError(page);
83 set_bit(AS_EIO, &page->mapping->flags);
84 }
85 bh = head = page_buffers(page);
86 /*
87 * We check all buffers in the page under BH_Uptodate_Lock
88 * to avoid races with other end io clearing async_write flags
89 */
90 local_irq_save(flags);
91 bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
92 do {
93 if (bh_offset(bh) < bio_start ||
94 bh_offset(bh) + bh->b_size > bio_end) {
95 if (buffer_async_write(bh))
96 under_io++;
97 continue;
98 }
99 clear_buffer_async_write(bh);
100 if (error)
101 buffer_io_error(bh);
102 } while ((bh = bh->b_this_page) != head);
103 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
104 local_irq_restore(flags);
105 if (!under_io)
106 end_page_writeback(page);
107 }
108}
109
110static void ext4_release_io_end(ext4_io_end_t *io_end)
111{
112 struct bio *bio, *next_bio;
113
114 BUG_ON(!list_empty(&io_end->list));
115 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
116 WARN_ON(io_end->handle);
117
118 if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
119 wake_up_all(ext4_ioend_wq(io_end->inode));
120
121 for (bio = io_end->bio; bio; bio = next_bio) {
122 next_bio = bio->bi_private;
123 ext4_finish_bio(bio);
124 bio_put(bio);
125 }
126 if (io_end->flag & EXT4_IO_END_DIRECT)
127 inode_dio_done(io_end->inode);
128 if (io_end->iocb)
129 aio_complete(io_end->iocb, io_end->result, 0);
130 kmem_cache_free(io_end_cachep, io_end);
63} 131}
64 132
65void ext4_free_io_end(ext4_io_end_t *io) 133static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
66{ 134{
67 BUG_ON(!io); 135 struct inode *inode = io_end->inode;
68 BUG_ON(!list_empty(&io->list));
69 BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
70 136
71 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) 137 io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
72 wake_up_all(ext4_ioend_wq(io->inode)); 138 /* Wake up anyone waiting on unwritten extent conversion */
73 kmem_cache_free(io_end_cachep, io); 139 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
140 wake_up_all(ext4_ioend_wq(inode));
74} 141}
75 142
76/* check a range of space and convert unwritten extents to written. */ 143/*
144 * Check a range of space and convert unwritten extents to written. Note that
145 * we are protected from truncate touching same part of extent tree by the
146 * fact that truncate code waits for all DIO to finish (thus exclusion from
147 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
148 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
149 * completed (happens from ext4_free_ioend()).
150 */
77static int ext4_end_io(ext4_io_end_t *io) 151static int ext4_end_io(ext4_io_end_t *io)
78{ 152{
79 struct inode *inode = io->inode; 153 struct inode *inode = io->inode;
80 loff_t offset = io->offset; 154 loff_t offset = io->offset;
81 ssize_t size = io->size; 155 ssize_t size = io->size;
156 handle_t *handle = io->handle;
82 int ret = 0; 157 int ret = 0;
83 158
84 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," 159 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
85 "list->prev 0x%p\n", 160 "list->prev 0x%p\n",
86 io, inode->i_ino, io->list.next, io->list.prev); 161 io, inode->i_ino, io->list.next, io->list.prev);
87 162
88 ret = ext4_convert_unwritten_extents(inode, offset, size); 163 io->handle = NULL; /* Following call will use up the handle */
164 ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
89 if (ret < 0) { 165 if (ret < 0) {
90 ext4_msg(inode->i_sb, KERN_EMERG, 166 ext4_msg(inode->i_sb, KERN_EMERG,
91 "failed to convert unwritten extents to written " 167 "failed to convert unwritten extents to written "
@@ -93,30 +169,22 @@ static int ext4_end_io(ext4_io_end_t *io)
93 "(inode %lu, offset %llu, size %zd, error %d)", 169 "(inode %lu, offset %llu, size %zd, error %d)",
94 inode->i_ino, offset, size, ret); 170 inode->i_ino, offset, size, ret);
95 } 171 }
96 /* Wake up anyone waiting on unwritten extent conversion */ 172 ext4_clear_io_unwritten_flag(io);
97 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) 173 ext4_release_io_end(io);
98 wake_up_all(ext4_ioend_wq(inode));
99 if (io->flag & EXT4_IO_END_DIRECT)
100 inode_dio_done(inode);
101 if (io->iocb)
102 aio_complete(io->iocb, io->result, 0);
103 return ret; 174 return ret;
104} 175}
105 176
106static void dump_completed_IO(struct inode *inode) 177static void dump_completed_IO(struct inode *inode, struct list_head *head)
107{ 178{
108#ifdef EXT4FS_DEBUG 179#ifdef EXT4FS_DEBUG
109 struct list_head *cur, *before, *after; 180 struct list_head *cur, *before, *after;
110 ext4_io_end_t *io, *io0, *io1; 181 ext4_io_end_t *io, *io0, *io1;
111 182
112 if (list_empty(&EXT4_I(inode)->i_completed_io_list)) { 183 if (list_empty(head))
113 ext4_debug("inode %lu completed_io list is empty\n",
114 inode->i_ino);
115 return; 184 return;
116 }
117 185
118 ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino); 186 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
119 list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) { 187 list_for_each_entry(io, head, list) {
120 cur = &io->list; 188 cur = &io->list;
121 before = cur->prev; 189 before = cur->prev;
122 io0 = container_of(before, ext4_io_end_t, list); 190 io0 = container_of(before, ext4_io_end_t, list);
@@ -130,23 +198,30 @@ static void dump_completed_IO(struct inode *inode)
130} 198}
131 199
132/* Add the io_end to per-inode completed end_io list. */ 200/* Add the io_end to per-inode completed end_io list. */
133void ext4_add_complete_io(ext4_io_end_t *io_end) 201static void ext4_add_complete_io(ext4_io_end_t *io_end)
134{ 202{
135 struct ext4_inode_info *ei = EXT4_I(io_end->inode); 203 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
136 struct workqueue_struct *wq; 204 struct workqueue_struct *wq;
137 unsigned long flags; 205 unsigned long flags;
138 206
139 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); 207 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
140 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
141
142 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 208 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
143 if (list_empty(&ei->i_completed_io_list)) 209 if (io_end->handle) {
144 queue_work(wq, &ei->i_unwritten_work); 210 wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
145 list_add_tail(&io_end->list, &ei->i_completed_io_list); 211 if (list_empty(&ei->i_rsv_conversion_list))
212 queue_work(wq, &ei->i_rsv_conversion_work);
213 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
214 } else {
215 wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
216 if (list_empty(&ei->i_unrsv_conversion_list))
217 queue_work(wq, &ei->i_unrsv_conversion_work);
218 list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
219 }
146 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 220 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
147} 221}
148 222
149static int ext4_do_flush_completed_IO(struct inode *inode) 223static int ext4_do_flush_completed_IO(struct inode *inode,
224 struct list_head *head)
150{ 225{
151 ext4_io_end_t *io; 226 ext4_io_end_t *io;
152 struct list_head unwritten; 227 struct list_head unwritten;
@@ -155,8 +230,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
155 int err, ret = 0; 230 int err, ret = 0;
156 231
157 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 232 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
158 dump_completed_IO(inode); 233 dump_completed_IO(inode, head);
159 list_replace_init(&ei->i_completed_io_list, &unwritten); 234 list_replace_init(head, &unwritten);
160 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 235 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
161 236
162 while (!list_empty(&unwritten)) { 237 while (!list_empty(&unwritten)) {
@@ -167,30 +242,25 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
167 err = ext4_end_io(io); 242 err = ext4_end_io(io);
168 if (unlikely(!ret && err)) 243 if (unlikely(!ret && err))
169 ret = err; 244 ret = err;
170 io->flag &= ~EXT4_IO_END_UNWRITTEN;
171 ext4_free_io_end(io);
172 } 245 }
173 return ret; 246 return ret;
174} 247}
175 248
176/* 249/*
177 * work on completed aio dio IO, to convert unwritten extents to extents 250 * work on completed IO, to convert unwritten extents to extents
178 */ 251 */
179void ext4_end_io_work(struct work_struct *work) 252void ext4_end_io_rsv_work(struct work_struct *work)
180{ 253{
181 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, 254 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
182 i_unwritten_work); 255 i_rsv_conversion_work);
183 ext4_do_flush_completed_IO(&ei->vfs_inode); 256 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
184} 257}
185 258
186int ext4_flush_unwritten_io(struct inode *inode) 259void ext4_end_io_unrsv_work(struct work_struct *work)
187{ 260{
188 int ret; 261 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
189 WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) && 262 i_unrsv_conversion_work);
190 !(inode->i_state & I_FREEING)); 263 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
191 ret = ext4_do_flush_completed_IO(inode);
192 ext4_unwritten_wait(inode);
193 return ret;
194} 264}
195 265
196ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 266ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
@@ -200,83 +270,59 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
200 atomic_inc(&EXT4_I(inode)->i_ioend_count); 270 atomic_inc(&EXT4_I(inode)->i_ioend_count);
201 io->inode = inode; 271 io->inode = inode;
202 INIT_LIST_HEAD(&io->list); 272 INIT_LIST_HEAD(&io->list);
273 atomic_set(&io->count, 1);
203 } 274 }
204 return io; 275 return io;
205} 276}
206 277
207/* 278void ext4_put_io_end_defer(ext4_io_end_t *io_end)
208 * Print an buffer I/O error compatible with the fs/buffer.c. This
209 * provides compatibility with dmesg scrapers that look for a specific
210 * buffer I/O error message. We really need a unified error reporting
211 * structure to userspace ala Digital Unix's uerf system, but it's
212 * probably not going to happen in my lifetime, due to LKML politics...
213 */
214static void buffer_io_error(struct buffer_head *bh)
215{ 279{
216 char b[BDEVNAME_SIZE]; 280 if (atomic_dec_and_test(&io_end->count)) {
217 printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", 281 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
218 bdevname(bh->b_bdev, b), 282 ext4_release_io_end(io_end);
219 (unsigned long long)bh->b_blocknr); 283 return;
284 }
285 ext4_add_complete_io(io_end);
286 }
220} 287}
221 288
289int ext4_put_io_end(ext4_io_end_t *io_end)
290{
291 int err = 0;
292
293 if (atomic_dec_and_test(&io_end->count)) {
294 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
295 err = ext4_convert_unwritten_extents(io_end->handle,
296 io_end->inode, io_end->offset,
297 io_end->size);
298 io_end->handle = NULL;
299 ext4_clear_io_unwritten_flag(io_end);
300 }
301 ext4_release_io_end(io_end);
302 }
303 return err;
304}
305
306ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
307{
308 atomic_inc(&io_end->count);
309 return io_end;
310}
311
312/* BIO completion function for page writeback */
222static void ext4_end_bio(struct bio *bio, int error) 313static void ext4_end_bio(struct bio *bio, int error)
223{ 314{
224 ext4_io_end_t *io_end = bio->bi_private; 315 ext4_io_end_t *io_end = bio->bi_private;
225 struct inode *inode;
226 int i;
227 int blocksize;
228 sector_t bi_sector = bio->bi_sector; 316 sector_t bi_sector = bio->bi_sector;
229 317
230 BUG_ON(!io_end); 318 BUG_ON(!io_end);
231 inode = io_end->inode;
232 blocksize = 1 << inode->i_blkbits;
233 bio->bi_private = NULL;
234 bio->bi_end_io = NULL; 319 bio->bi_end_io = NULL;
235 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 320 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
236 error = 0; 321 error = 0;
237 for (i = 0; i < bio->bi_vcnt; i++) {
238 struct bio_vec *bvec = &bio->bi_io_vec[i];
239 struct page *page = bvec->bv_page;
240 struct buffer_head *bh, *head;
241 unsigned bio_start = bvec->bv_offset;
242 unsigned bio_end = bio_start + bvec->bv_len;
243 unsigned under_io = 0;
244 unsigned long flags;
245
246 if (!page)
247 continue;
248
249 if (error) {
250 SetPageError(page);
251 set_bit(AS_EIO, &page->mapping->flags);
252 }
253 bh = head = page_buffers(page);
254 /*
255 * We check all buffers in the page under BH_Uptodate_Lock
256 * to avoid races with other end io clearing async_write flags
257 */
258 local_irq_save(flags);
259 bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
260 do {
261 if (bh_offset(bh) < bio_start ||
262 bh_offset(bh) + blocksize > bio_end) {
263 if (buffer_async_write(bh))
264 under_io++;
265 continue;
266 }
267 clear_buffer_async_write(bh);
268 if (error)
269 buffer_io_error(bh);
270 } while ((bh = bh->b_this_page) != head);
271 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
272 local_irq_restore(flags);
273 if (!under_io)
274 end_page_writeback(page);
275 }
276 bio_put(bio);
277 322
278 if (error) { 323 if (error) {
279 io_end->flag |= EXT4_IO_END_ERROR; 324 struct inode *inode = io_end->inode;
325
280 ext4_warning(inode->i_sb, "I/O error writing to inode %lu " 326 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
281 "(offset %llu size %ld starting block %llu)", 327 "(offset %llu size %ld starting block %llu)",
282 inode->i_ino, 328 inode->i_ino,
@@ -286,12 +332,23 @@ static void ext4_end_bio(struct bio *bio, int error)
286 bi_sector >> (inode->i_blkbits - 9)); 332 bi_sector >> (inode->i_blkbits - 9));
287 } 333 }
288 334
289 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 335 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
290 ext4_free_io_end(io_end); 336 /*
291 return; 337 * Link bio into list hanging from io_end. We have to do it
338 * atomically as bio completions can be racing against each
339 * other.
340 */
341 bio->bi_private = xchg(&io_end->bio, bio);
342 ext4_put_io_end_defer(io_end);
343 } else {
344 /*
345 * Drop io_end reference early. Inode can get freed once
346 * we finish the bio.
347 */
348 ext4_put_io_end_defer(io_end);
349 ext4_finish_bio(bio);
350 bio_put(bio);
292 } 351 }
293
294 ext4_add_complete_io(io_end);
295} 352}
296 353
297void ext4_io_submit(struct ext4_io_submit *io) 354void ext4_io_submit(struct ext4_io_submit *io)
@@ -305,43 +362,38 @@ void ext4_io_submit(struct ext4_io_submit *io)
305 bio_put(io->io_bio); 362 bio_put(io->io_bio);
306 } 363 }
307 io->io_bio = NULL; 364 io->io_bio = NULL;
308 io->io_op = 0; 365}
366
367void ext4_io_submit_init(struct ext4_io_submit *io,
368 struct writeback_control *wbc)
369{
370 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
371 io->io_bio = NULL;
309 io->io_end = NULL; 372 io->io_end = NULL;
310} 373}
311 374
312static int io_submit_init(struct ext4_io_submit *io, 375static int io_submit_init_bio(struct ext4_io_submit *io,
313 struct inode *inode, 376 struct buffer_head *bh)
314 struct writeback_control *wbc,
315 struct buffer_head *bh)
316{ 377{
317 ext4_io_end_t *io_end;
318 struct page *page = bh->b_page;
319 int nvecs = bio_get_nr_vecs(bh->b_bdev); 378 int nvecs = bio_get_nr_vecs(bh->b_bdev);
320 struct bio *bio; 379 struct bio *bio;
321 380
322 io_end = ext4_init_io_end(inode, GFP_NOFS);
323 if (!io_end)
324 return -ENOMEM;
325 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 381 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
382 if (!bio)
383 return -ENOMEM;
326 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 384 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
327 bio->bi_bdev = bh->b_bdev; 385 bio->bi_bdev = bh->b_bdev;
328 bio->bi_private = io->io_end = io_end;
329 bio->bi_end_io = ext4_end_bio; 386 bio->bi_end_io = ext4_end_bio;
330 387 bio->bi_private = ext4_get_io_end(io->io_end);
331 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
332
333 io->io_bio = bio; 388 io->io_bio = bio;
334 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
335 io->io_next_block = bh->b_blocknr; 389 io->io_next_block = bh->b_blocknr;
336 return 0; 390 return 0;
337} 391}
338 392
339static int io_submit_add_bh(struct ext4_io_submit *io, 393static int io_submit_add_bh(struct ext4_io_submit *io,
340 struct inode *inode, 394 struct inode *inode,
341 struct writeback_control *wbc,
342 struct buffer_head *bh) 395 struct buffer_head *bh)
343{ 396{
344 ext4_io_end_t *io_end;
345 int ret; 397 int ret;
346 398
347 if (io->io_bio && bh->b_blocknr != io->io_next_block) { 399 if (io->io_bio && bh->b_blocknr != io->io_next_block) {
@@ -349,18 +401,14 @@ submit_and_retry:
349 ext4_io_submit(io); 401 ext4_io_submit(io);
350 } 402 }
351 if (io->io_bio == NULL) { 403 if (io->io_bio == NULL) {
352 ret = io_submit_init(io, inode, wbc, bh); 404 ret = io_submit_init_bio(io, bh);
353 if (ret) 405 if (ret)
354 return ret; 406 return ret;
355 } 407 }
356 io_end = io->io_end;
357 if (test_clear_buffer_uninit(bh))
358 ext4_set_io_unwritten_flag(inode, io_end);
359 io->io_end->size += bh->b_size;
360 io->io_next_block++;
361 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 408 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
362 if (ret != bh->b_size) 409 if (ret != bh->b_size)
363 goto submit_and_retry; 410 goto submit_and_retry;
411 io->io_next_block++;
364 return 0; 412 return 0;
365} 413}
366 414
@@ -432,7 +480,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
432 do { 480 do {
433 if (!buffer_async_write(bh)) 481 if (!buffer_async_write(bh))
434 continue; 482 continue;
435 ret = io_submit_add_bh(io, inode, wbc, bh); 483 ret = io_submit_add_bh(io, inode, bh);
436 if (ret) { 484 if (ret) {
437 /* 485 /*
438 * We only get here on ENOMEM. Not much else 486 * We only get here on ENOMEM. Not much else