aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c114
1 files changed, 37 insertions, 77 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 70f989895d15..87813e405cef 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -235,71 +235,36 @@ xfs_setfilesize(
235} 235}
236 236
237/* 237/*
238 * Buffered IO write completion for delayed allocate extents. 238 * IO write completion.
239 */ 239 */
240STATIC void 240STATIC void
241xfs_end_bio_delalloc( 241xfs_end_io(
242 struct work_struct *work)
243{
244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
246
247 xfs_setfilesize(ioend);
248 xfs_destroy_ioend(ioend);
249}
250
251/*
252 * Buffered IO write completion for regular, written extents.
253 */
254STATIC void
255xfs_end_bio_written(
256 struct work_struct *work)
257{
258 xfs_ioend_t *ioend =
259 container_of(work, xfs_ioend_t, io_work);
260
261 xfs_setfilesize(ioend);
262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * IO write completion for unwritten extents.
267 *
268 * Issue transactions to convert a buffer range from unwritten
269 * to written extents.
270 */
271STATIC void
272xfs_end_bio_unwritten(
273 struct work_struct *work) 242 struct work_struct *work)
274{ 243{
275 xfs_ioend_t *ioend = 244 xfs_ioend_t *ioend =
276 container_of(work, xfs_ioend_t, io_work); 245 container_of(work, xfs_ioend_t, io_work);
277 struct xfs_inode *ip = XFS_I(ioend->io_inode); 246 struct xfs_inode *ip = XFS_I(ioend->io_inode);
278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
280
281 if (likely(!ioend->io_error)) {
282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
283 int error;
284 error = xfs_iomap_write_unwritten(ip, offset, size);
285 if (error)
286 ioend->io_error = error;
287 }
288 xfs_setfilesize(ioend);
289 }
290 xfs_destroy_ioend(ioend);
291}
292 247
293/* 248 /*
294 * IO read completion for regular, written extents. 249 * For unwritten extents we need to issue transactions to convert a
295 */ 250 * range to normal written extens after the data I/O has finished.
296STATIC void 251 */
297xfs_end_bio_read( 252 if (ioend->io_type == IOMAP_UNWRITTEN &&
298 struct work_struct *work) 253 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
299{ 254 int error;
300 xfs_ioend_t *ioend = 255
301 container_of(work, xfs_ioend_t, io_work); 256 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
257 ioend->io_size);
258 if (error)
259 ioend->io_error = error;
260 }
302 261
262 /*
263 * We might have to update the on-disk file size after extending
264 * writes.
265 */
266 if (ioend->io_type != IOMAP_READ)
267 xfs_setfilesize(ioend);
303 xfs_destroy_ioend(ioend); 268 xfs_destroy_ioend(ioend);
304} 269}
305 270
@@ -314,10 +279,10 @@ xfs_finish_ioend(
314 int wait) 279 int wait)
315{ 280{
316 if (atomic_dec_and_test(&ioend->io_remaining)) { 281 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue; 282 struct workqueue_struct *wq;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
320 283
284 wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
285 xfsconvertd_workqueue : xfsdatad_workqueue;
321 queue_work(wq, &ioend->io_work); 286 queue_work(wq, &ioend->io_work);
322 if (wait) 287 if (wait)
323 flush_workqueue(wq); 288 flush_workqueue(wq);
@@ -355,15 +320,7 @@ xfs_alloc_ioend(
355 ioend->io_offset = 0; 320 ioend->io_offset = 0;
356 ioend->io_size = 0; 321 ioend->io_size = 0;
357 322
358 if (type == IOMAP_UNWRITTEN) 323 INIT_WORK(&ioend->io_work, xfs_end_io);
359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
360 else if (type == IOMAP_DELAY)
361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
364 else
365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
366
367 return ioend; 324 return ioend;
368} 325}
369 326
@@ -380,7 +337,7 @@ xfs_map_blocks(
380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); 337 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
381} 338}
382 339
383STATIC_INLINE int 340STATIC int
384xfs_iomap_valid( 341xfs_iomap_valid(
385 xfs_iomap_t *iomapp, 342 xfs_iomap_t *iomapp,
386 loff_t offset) 343 loff_t offset)
@@ -412,8 +369,9 @@ xfs_end_bio(
412 369
413STATIC void 370STATIC void
414xfs_submit_ioend_bio( 371xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend, 372 struct writeback_control *wbc,
416 struct bio *bio) 373 xfs_ioend_t *ioend,
374 struct bio *bio)
417{ 375{
418 atomic_inc(&ioend->io_remaining); 376 atomic_inc(&ioend->io_remaining);
419 bio->bi_private = ioend; 377 bio->bi_private = ioend;
@@ -426,7 +384,8 @@ xfs_submit_ioend_bio(
426 if (xfs_ioend_new_eof(ioend)) 384 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); 385 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428 386
429 submit_bio(WRITE, bio); 387 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
388 WRITE_SYNC_PLUG : WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 389 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio); 390 bio_put(bio);
432} 391}
@@ -505,6 +464,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
505 */ 464 */
506STATIC void 465STATIC void
507xfs_submit_ioend( 466xfs_submit_ioend(
467 struct writeback_control *wbc,
508 xfs_ioend_t *ioend) 468 xfs_ioend_t *ioend)
509{ 469{
510 xfs_ioend_t *head = ioend; 470 xfs_ioend_t *head = ioend;
@@ -533,19 +493,19 @@ xfs_submit_ioend(
533 retry: 493 retry:
534 bio = xfs_alloc_ioend_bio(bh); 494 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) { 495 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio); 496 xfs_submit_ioend_bio(wbc, ioend, bio);
537 goto retry; 497 goto retry;
538 } 498 }
539 499
540 if (bio_add_buffer(bio, bh) != bh->b_size) { 500 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio); 501 xfs_submit_ioend_bio(wbc, ioend, bio);
542 goto retry; 502 goto retry;
543 } 503 }
544 504
545 lastblock = bh->b_blocknr; 505 lastblock = bh->b_blocknr;
546 } 506 }
547 if (bio) 507 if (bio)
548 xfs_submit_ioend_bio(ioend, bio); 508 xfs_submit_ioend_bio(wbc, ioend, bio);
549 xfs_finish_ioend(ioend, 0); 509 xfs_finish_ioend(ioend, 0);
550 } while ((ioend = next) != NULL); 510 } while ((ioend = next) != NULL);
551} 511}
@@ -1191,7 +1151,7 @@ xfs_page_state_convert(
1191 } 1151 }
1192 1152
1193 if (iohead) 1153 if (iohead)
1194 xfs_submit_ioend(iohead); 1154 xfs_submit_ioend(wbc, iohead);
1195 1155
1196 return page_dirty; 1156 return page_dirty;
1197 1157
@@ -1528,7 +1488,7 @@ xfs_end_io_direct(
1528 * didn't map an unwritten extent so switch it's completion 1488 * didn't map an unwritten extent so switch it's completion
1529 * handler. 1489 * handler.
1530 */ 1490 */
1531 INIT_WORK(&ioend->io_work, xfs_end_bio_written); 1491 ioend->io_type = IOMAP_NEW;
1532 xfs_finish_ioend(ioend, 0); 1492 xfs_finish_ioend(ioend, 0);
1533 } 1493 }
1534 1494