aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-02 06:34:32 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 03:20:25 -0400
commitd8a8559cd7a9ccac98d5f6f13297a2ff68a43627 (patch)
tree165e02117205e9790c21b2facc130b23addf3775 /fs/fs-writeback.c
parent0d03d59d9b31cd1e33b7e46a80b6fef66244b1f2 (diff)
writeback: get rid of generic_sync_sb_inodes() export
This adds two new exported functions: - writeback_inodes_sb(), which only attempts to writeback dirty inodes on this super_block, for WB_SYNC_NONE writeout. - sync_inodes_sb(), which writes out all dirty inodes on this super_block and also waits for the IO to complete. Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c70
1 files changed, 42 insertions, 28 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c54226be5294..271e5f44e871 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -458,8 +458,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
458 * on the writer throttling path, and we get decent balancing between many 458 * on the writer throttling path, and we get decent balancing between many
459 * throttled threads: we don't want them all piling up on inode_sync_wait. 459 * throttled threads: we don't want them all piling up on inode_sync_wait.
460 */ 460 */
461void generic_sync_sb_inodes(struct super_block *sb, 461static void generic_sync_sb_inodes(struct super_block *sb,
462 struct writeback_control *wbc) 462 struct writeback_control *wbc)
463{ 463{
464 const unsigned long start = jiffies; /* livelock avoidance */ 464 const unsigned long start = jiffies; /* livelock avoidance */
465 int sync = wbc->sync_mode == WB_SYNC_ALL; 465 int sync = wbc->sync_mode == WB_SYNC_ALL;
@@ -593,13 +593,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
593 593
594 return; /* Leave any unwritten inodes on s_io */ 594 return; /* Leave any unwritten inodes on s_io */
595} 595}
596EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
597
598static void sync_sb_inodes(struct super_block *sb,
599 struct writeback_control *wbc)
600{
601 generic_sync_sb_inodes(sb, wbc);
602}
603 596
604/* 597/*
605 * Start writeback of dirty pagecache data against all unlocked inodes. 598 * Start writeback of dirty pagecache data against all unlocked inodes.
@@ -640,7 +633,7 @@ restart:
640 */ 633 */
641 if (down_read_trylock(&sb->s_umount)) { 634 if (down_read_trylock(&sb->s_umount)) {
642 if (sb->s_root) 635 if (sb->s_root)
643 sync_sb_inodes(sb, wbc); 636 generic_sync_sb_inodes(sb, wbc);
644 up_read(&sb->s_umount); 637 up_read(&sb->s_umount);
645 } 638 }
646 spin_lock(&sb_lock); 639 spin_lock(&sb_lock);
@@ -653,35 +646,56 @@ restart:
653 spin_unlock(&sb_lock); 646 spin_unlock(&sb_lock);
654} 647}
655 648
656/* 649/**
657 * writeback and wait upon the filesystem's dirty inodes. The caller will 650 * writeback_inodes_sb - writeback dirty inodes from given super_block
658 * do this in two passes - one to write, and one to wait. 651 * @sb: the superblock
659 *
660 * A finite limit is set on the number of pages which will be written.
661 * To prevent infinite livelock of sys_sync().
662 * 652 *
663 * We add in the number of potentially dirty inodes, because each inode write 653 * Start writeback on some inodes on this super_block. No guarantees are made
664 * can dirty pagecache in the underlying blockdev. 654 * on how many (if any) will be written, and this function does not wait
655 * for IO completion of submitted IO. The number of pages submitted is
656 * returned.
665 */ 657 */
666void sync_inodes_sb(struct super_block *sb, int wait) 658long writeback_inodes_sb(struct super_block *sb)
667{ 659{
668 struct writeback_control wbc = { 660 struct writeback_control wbc = {
669 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 661 .sync_mode = WB_SYNC_NONE,
670 .range_start = 0, 662 .range_start = 0,
671 .range_end = LLONG_MAX, 663 .range_end = LLONG_MAX,
672 }; 664 };
665 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
666 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
667 long nr_to_write;
673 668
674 if (!wait) { 669 nr_to_write = nr_dirty + nr_unstable +
675 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
676 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
677
678 wbc.nr_to_write = nr_dirty + nr_unstable +
679 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 670 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
680 } else
681 wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
682 671
683 sync_sb_inodes(sb, &wbc); 672 wbc.nr_to_write = nr_to_write;
673 generic_sync_sb_inodes(sb, &wbc);
674 return nr_to_write - wbc.nr_to_write;
675}
676EXPORT_SYMBOL(writeback_inodes_sb);
677
678/**
679 * sync_inodes_sb - sync sb inode pages
680 * @sb: the superblock
681 *
682 * This function writes and waits on any dirty inode belonging to this
683 * super_block. The number of pages synced is returned.
684 */
685long sync_inodes_sb(struct super_block *sb)
686{
687 struct writeback_control wbc = {
688 .sync_mode = WB_SYNC_ALL,
689 .range_start = 0,
690 .range_end = LLONG_MAX,
691 };
692 long nr_to_write = LONG_MAX; /* doesn't actually matter */
693
694 wbc.nr_to_write = nr_to_write;
695 generic_sync_sb_inodes(sb, &wbc);
696 return nr_to_write - wbc.nr_to_write;
684} 697}
698EXPORT_SYMBOL(sync_inodes_sb);
685 699
686/** 700/**
687 * write_inode_now - write an inode to disk 701 * write_inode_now - write an inode to disk