aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2009-10-30 05:09:15 -0400
committerAlex Elder <aelder@sgi.com>2009-12-11 16:11:20 -0500
commit06342cf8adb23464deae0f58f8bcb87818a3bee6 (patch)
tree5e137892c622fd66216f7b7c3454d3f5e9ffccd1 /fs/xfs
parent033da48fda9d56e28b3fe3ef87ef6fd43290f554 (diff)
xfs: use WRITE_SYNC_PLUG for synchronous writeout
The VM and I/O schedulers now expect us to use WRITE_SYNC_PLUG for synchronous writeout. Right now I can't see any changes in performance numbers with this, but we're getting some beating for not using it, and the knowledge definitely could help the block code to make better decisions. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c2e30eea74dc..d13fc7391e8b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -412,8 +412,9 @@ xfs_end_bio(
412 412
413STATIC void 413STATIC void
414xfs_submit_ioend_bio( 414xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend, 415 struct writeback_control *wbc,
416 struct bio *bio) 416 xfs_ioend_t *ioend,
417 struct bio *bio)
417{ 418{
418 atomic_inc(&ioend->io_remaining); 419 atomic_inc(&ioend->io_remaining);
419 bio->bi_private = ioend; 420 bio->bi_private = ioend;
@@ -426,7 +427,8 @@ xfs_submit_ioend_bio(
426 if (xfs_ioend_new_eof(ioend)) 427 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); 428 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428 429
429 submit_bio(WRITE, bio); 430 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
431 WRITE_SYNC_PLUG : WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 432 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio); 433 bio_put(bio);
432} 434}
@@ -505,6 +507,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
505 */ 507 */
506STATIC void 508STATIC void
507xfs_submit_ioend( 509xfs_submit_ioend(
510 struct writeback_control *wbc,
508 xfs_ioend_t *ioend) 511 xfs_ioend_t *ioend)
509{ 512{
510 xfs_ioend_t *head = ioend; 513 xfs_ioend_t *head = ioend;
@@ -533,19 +536,19 @@ xfs_submit_ioend(
533 retry: 536 retry:
534 bio = xfs_alloc_ioend_bio(bh); 537 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) { 538 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio); 539 xfs_submit_ioend_bio(wbc, ioend, bio);
537 goto retry; 540 goto retry;
538 } 541 }
539 542
540 if (bio_add_buffer(bio, bh) != bh->b_size) { 543 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio); 544 xfs_submit_ioend_bio(wbc, ioend, bio);
542 goto retry; 545 goto retry;
543 } 546 }
544 547
545 lastblock = bh->b_blocknr; 548 lastblock = bh->b_blocknr;
546 } 549 }
547 if (bio) 550 if (bio)
548 xfs_submit_ioend_bio(ioend, bio); 551 xfs_submit_ioend_bio(wbc, ioend, bio);
549 xfs_finish_ioend(ioend, 0); 552 xfs_finish_ioend(ioend, 0);
550 } while ((ioend = next) != NULL); 553 } while ((ioend = next) != NULL);
551} 554}
@@ -1198,7 +1201,7 @@ xfs_page_state_convert(
1198 } 1201 }
1199 1202
1200 if (iohead) 1203 if (iohead)
1201 xfs_submit_ioend(iohead); 1204 xfs_submit_ioend(wbc, iohead);
1202 1205
1203 return page_dirty; 1206 return page_dirty;
1204 1207