diff options
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 70 |
1 files changed, 42 insertions, 28 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c54226be5294..271e5f44e871 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -458,8 +458,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
458 | * on the writer throttling path, and we get decent balancing between many | 458 | * on the writer throttling path, and we get decent balancing between many |
459 | * throttled threads: we don't want them all piling up on inode_sync_wait. | 459 | * throttled threads: we don't want them all piling up on inode_sync_wait. |
460 | */ | 460 | */ |
461 | void generic_sync_sb_inodes(struct super_block *sb, | 461 | static void generic_sync_sb_inodes(struct super_block *sb, |
462 | struct writeback_control *wbc) | 462 | struct writeback_control *wbc) |
463 | { | 463 | { |
464 | const unsigned long start = jiffies; /* livelock avoidance */ | 464 | const unsigned long start = jiffies; /* livelock avoidance */ |
465 | int sync = wbc->sync_mode == WB_SYNC_ALL; | 465 | int sync = wbc->sync_mode == WB_SYNC_ALL; |
@@ -593,13 +593,6 @@ void generic_sync_sb_inodes(struct super_block *sb, | |||
593 | 593 | ||
594 | return; /* Leave any unwritten inodes on s_io */ | 594 | return; /* Leave any unwritten inodes on s_io */ |
595 | } | 595 | } |
596 | EXPORT_SYMBOL_GPL(generic_sync_sb_inodes); | ||
597 | |||
598 | static void sync_sb_inodes(struct super_block *sb, | ||
599 | struct writeback_control *wbc) | ||
600 | { | ||
601 | generic_sync_sb_inodes(sb, wbc); | ||
602 | } | ||
603 | 596 | ||
604 | /* | 597 | /* |
605 | * Start writeback of dirty pagecache data against all unlocked inodes. | 598 | * Start writeback of dirty pagecache data against all unlocked inodes. |
@@ -640,7 +633,7 @@ restart: | |||
640 | */ | 633 | */ |
641 | if (down_read_trylock(&sb->s_umount)) { | 634 | if (down_read_trylock(&sb->s_umount)) { |
642 | if (sb->s_root) | 635 | if (sb->s_root) |
643 | sync_sb_inodes(sb, wbc); | 636 | generic_sync_sb_inodes(sb, wbc); |
644 | up_read(&sb->s_umount); | 637 | up_read(&sb->s_umount); |
645 | } | 638 | } |
646 | spin_lock(&sb_lock); | 639 | spin_lock(&sb_lock); |
@@ -653,35 +646,56 @@ restart: | |||
653 | spin_unlock(&sb_lock); | 646 | spin_unlock(&sb_lock); |
654 | } | 647 | } |
655 | 648 | ||
656 | /* | 649 | /** |
657 | * writeback and wait upon the filesystem's dirty inodes. The caller will | 650 | * writeback_inodes_sb - writeback dirty inodes from given super_block |
658 | * do this in two passes - one to write, and one to wait. | 651 | * @sb: the superblock |
659 | * | ||
660 | * A finite limit is set on the number of pages which will be written. | ||
661 | * To prevent infinite livelock of sys_sync(). | ||
662 | * | 652 | * |
663 | * We add in the number of potentially dirty inodes, because each inode write | 653 | * Start writeback on some inodes on this super_block. No guarantees are made |
664 | * can dirty pagecache in the underlying blockdev. | 654 | * on how many (if any) will be written, and this function does not wait |
655 | * for IO completion of submitted IO. The number of pages submitted is | ||
656 | * returned. | ||
665 | */ | 657 | */ |
666 | void sync_inodes_sb(struct super_block *sb, int wait) | 658 | long writeback_inodes_sb(struct super_block *sb) |
667 | { | 659 | { |
668 | struct writeback_control wbc = { | 660 | struct writeback_control wbc = { |
669 | .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, | 661 | .sync_mode = WB_SYNC_NONE, |
670 | .range_start = 0, | 662 | .range_start = 0, |
671 | .range_end = LLONG_MAX, | 663 | .range_end = LLONG_MAX, |
672 | }; | 664 | }; |
665 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | ||
666 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | ||
667 | long nr_to_write; | ||
673 | 668 | ||
674 | if (!wait) { | 669 | nr_to_write = nr_dirty + nr_unstable + |
675 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | ||
676 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | ||
677 | |||
678 | wbc.nr_to_write = nr_dirty + nr_unstable + | ||
679 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 670 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
680 | } else | ||
681 | wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */ | ||
682 | 671 | ||
683 | sync_sb_inodes(sb, &wbc); | 672 | wbc.nr_to_write = nr_to_write; |
673 | generic_sync_sb_inodes(sb, &wbc); | ||
674 | return nr_to_write - wbc.nr_to_write; | ||
675 | } | ||
676 | EXPORT_SYMBOL(writeback_inodes_sb); | ||
677 | |||
678 | /** | ||
679 | * sync_inodes_sb - sync sb inode pages | ||
680 | * @sb: the superblock | ||
681 | * | ||
682 | * This function writes and waits on any dirty inode belonging to this | ||
683 | * super_block. The number of pages synced is returned. | ||
684 | */ | ||
685 | long sync_inodes_sb(struct super_block *sb) | ||
686 | { | ||
687 | struct writeback_control wbc = { | ||
688 | .sync_mode = WB_SYNC_ALL, | ||
689 | .range_start = 0, | ||
690 | .range_end = LLONG_MAX, | ||
691 | }; | ||
692 | long nr_to_write = LONG_MAX; /* doesn't actually matter */ | ||
693 | |||
694 | wbc.nr_to_write = nr_to_write; | ||
695 | generic_sync_sb_inodes(sb, &wbc); | ||
696 | return nr_to_write - wbc.nr_to_write; | ||
684 | } | 697 | } |
698 | EXPORT_SYMBOL(sync_inodes_sb); | ||
685 | 699 | ||
686 | /** | 700 | /** |
687 | * write_inode_now - write an inode to disk | 701 | * write_inode_now - write an inode to disk |