aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/fs-writeback.c70
-rw-r--r--fs/sync.c18
-rw-r--r--fs/ubifs/budget.c16
-rw-r--r--fs/ubifs/super.c8
4 files changed, 55 insertions, 57 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c54226be5294..271e5f44e871 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -458,8 +458,8 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
458 * on the writer throttling path, and we get decent balancing between many 458 * on the writer throttling path, and we get decent balancing between many
459 * throttled threads: we don't want them all piling up on inode_sync_wait. 459 * throttled threads: we don't want them all piling up on inode_sync_wait.
460 */ 460 */
461void generic_sync_sb_inodes(struct super_block *sb, 461static void generic_sync_sb_inodes(struct super_block *sb,
462 struct writeback_control *wbc) 462 struct writeback_control *wbc)
463{ 463{
464 const unsigned long start = jiffies; /* livelock avoidance */ 464 const unsigned long start = jiffies; /* livelock avoidance */
465 int sync = wbc->sync_mode == WB_SYNC_ALL; 465 int sync = wbc->sync_mode == WB_SYNC_ALL;
@@ -593,13 +593,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
593 593
594 return; /* Leave any unwritten inodes on s_io */ 594 return; /* Leave any unwritten inodes on s_io */
595} 595}
596EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
597
598static void sync_sb_inodes(struct super_block *sb,
599 struct writeback_control *wbc)
600{
601 generic_sync_sb_inodes(sb, wbc);
602}
603 596
604/* 597/*
605 * Start writeback of dirty pagecache data against all unlocked inodes. 598 * Start writeback of dirty pagecache data against all unlocked inodes.
@@ -640,7 +633,7 @@ restart:
640 */ 633 */
641 if (down_read_trylock(&sb->s_umount)) { 634 if (down_read_trylock(&sb->s_umount)) {
642 if (sb->s_root) 635 if (sb->s_root)
643 sync_sb_inodes(sb, wbc); 636 generic_sync_sb_inodes(sb, wbc);
644 up_read(&sb->s_umount); 637 up_read(&sb->s_umount);
645 } 638 }
646 spin_lock(&sb_lock); 639 spin_lock(&sb_lock);
@@ -653,35 +646,56 @@ restart:
653 spin_unlock(&sb_lock); 646 spin_unlock(&sb_lock);
654} 647}
655 648
656/* 649/**
657 * writeback and wait upon the filesystem's dirty inodes. The caller will 650 * writeback_inodes_sb - writeback dirty inodes from given super_block
658 * do this in two passes - one to write, and one to wait. 651 * @sb: the superblock
659 *
660 * A finite limit is set on the number of pages which will be written.
661 * To prevent infinite livelock of sys_sync().
662 * 652 *
663 * We add in the number of potentially dirty inodes, because each inode write 653 * Start writeback on some inodes on this super_block. No guarantees are made
664 * can dirty pagecache in the underlying blockdev. 654 * on how many (if any) will be written, and this function does not wait
655 * for IO completion of submitted IO. The number of pages submitted is
656 * returned.
665 */ 657 */
666void sync_inodes_sb(struct super_block *sb, int wait) 658long writeback_inodes_sb(struct super_block *sb)
667{ 659{
668 struct writeback_control wbc = { 660 struct writeback_control wbc = {
669 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 661 .sync_mode = WB_SYNC_NONE,
670 .range_start = 0, 662 .range_start = 0,
671 .range_end = LLONG_MAX, 663 .range_end = LLONG_MAX,
672 }; 664 };
665 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
666 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
667 long nr_to_write;
673 668
674 if (!wait) { 669 nr_to_write = nr_dirty + nr_unstable +
675 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
676 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
677
678 wbc.nr_to_write = nr_dirty + nr_unstable +
679 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 670 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
680 } else
681 wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
682 671
683 sync_sb_inodes(sb, &wbc); 672 wbc.nr_to_write = nr_to_write;
673 generic_sync_sb_inodes(sb, &wbc);
674 return nr_to_write - wbc.nr_to_write;
675}
676EXPORT_SYMBOL(writeback_inodes_sb);
677
678/**
679 * sync_inodes_sb - sync sb inode pages
680 * @sb: the superblock
681 *
682 * This function writes and waits on any dirty inode belonging to this
683 * super_block. The number of pages synced is returned.
684 */
685long sync_inodes_sb(struct super_block *sb)
686{
687 struct writeback_control wbc = {
688 .sync_mode = WB_SYNC_ALL,
689 .range_start = 0,
690 .range_end = LLONG_MAX,
691 };
692 long nr_to_write = LONG_MAX; /* doesn't actually matter */
693
694 wbc.nr_to_write = nr_to_write;
695 generic_sync_sb_inodes(sb, &wbc);
696 return nr_to_write - wbc.nr_to_write;
684} 697}
698EXPORT_SYMBOL(sync_inodes_sb);
685 699
686/** 700/**
687 * write_inode_now - write an inode to disk 701 * write_inode_now - write an inode to disk
diff --git a/fs/sync.c b/fs/sync.c
index 3422ba61d86d..66f210476f40 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -19,20 +19,22 @@
19 SYNC_FILE_RANGE_WAIT_AFTER) 19 SYNC_FILE_RANGE_WAIT_AFTER)
20 20
21/* 21/*
22 * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) 22 * Do the filesystem syncing work. For simple filesystems
23 * just dirties buffers with inodes so we have to submit IO for these buffers 23 * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
24 * via __sync_blockdev(). This also speeds up the wait == 1 case since in that 24 * submit IO for these buffers via __sync_blockdev(). This also speeds up the
25 * case write_inode() functions do sync_dirty_buffer() and thus effectively 25 * wait == 1 case since in that case write_inode() functions do
26 * write one block at a time. 26 * sync_dirty_buffer() and thus effectively write one block at a time.
27 */ 27 */
28static int __sync_filesystem(struct super_block *sb, int wait) 28static int __sync_filesystem(struct super_block *sb, int wait)
29{ 29{
30 /* Avoid doing twice syncing and cache pruning for quota sync */ 30 /* Avoid doing twice syncing and cache pruning for quota sync */
31 if (!wait) 31 if (!wait) {
32 writeout_quota_sb(sb, -1); 32 writeout_quota_sb(sb, -1);
33 else 33 writeback_inodes_sb(sb);
34 } else {
34 sync_quota_sb(sb, -1); 35 sync_quota_sb(sb, -1);
35 sync_inodes_sb(sb, wait); 36 sync_inodes_sb(sb);
37 }
36 if (sb->s_op->sync_fs) 38 if (sb->s_op->sync_fs)
37 sb->s_op->sync_fs(sb, wait); 39 sb->s_op->sync_fs(sb, wait);
38 return __sync_blockdev(sb->s_bdev, wait); 40 return __sync_blockdev(sb->s_bdev, wait);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index eaf6d891d46f..1c8991b0db13 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -65,26 +65,14 @@
65static int shrink_liability(struct ubifs_info *c, int nr_to_write) 65static int shrink_liability(struct ubifs_info *c, int nr_to_write)
66{ 66{
67 int nr_written; 67 int nr_written;
68 struct writeback_control wbc = {
69 .sync_mode = WB_SYNC_NONE,
70 .range_end = LLONG_MAX,
71 .nr_to_write = nr_to_write,
72 };
73
74 generic_sync_sb_inodes(c->vfs_sb, &wbc);
75 nr_written = nr_to_write - wbc.nr_to_write;
76 68
69 nr_written = writeback_inodes_sb(c->vfs_sb);
77 if (!nr_written) { 70 if (!nr_written) {
78 /* 71 /*
79 * Re-try again but wait on pages/inodes which are being 72 * Re-try again but wait on pages/inodes which are being
80 * written-back concurrently (e.g., by pdflush). 73 * written-back concurrently (e.g., by pdflush).
81 */ 74 */
82 memset(&wbc, 0, sizeof(struct writeback_control)); 75 nr_written = sync_inodes_sb(c->vfs_sb);
83 wbc.sync_mode = WB_SYNC_ALL;
84 wbc.range_end = LLONG_MAX;
85 wbc.nr_to_write = nr_to_write;
86 generic_sync_sb_inodes(c->vfs_sb, &wbc);
87 nr_written = nr_to_write - wbc.nr_to_write;
88 } 76 }
89 77
90 dbg_budg("%d pages were written back", nr_written); 78 dbg_budg("%d pages were written back", nr_written);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 26d2e0d80465..8d6050a5966c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -438,12 +438,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
438{ 438{
439 int i, err; 439 int i, err;
440 struct ubifs_info *c = sb->s_fs_info; 440 struct ubifs_info *c = sb->s_fs_info;
441 struct writeback_control wbc = {
442 .sync_mode = WB_SYNC_ALL,
443 .range_start = 0,
444 .range_end = LLONG_MAX,
445 .nr_to_write = LONG_MAX,
446 };
447 441
448 /* 442 /*
449 * Zero @wait is just an advisory thing to help the file system shove 443 * Zero @wait is just an advisory thing to help the file system shove
@@ -462,7 +456,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
462 * the user be able to get more accurate results of 'statfs()' after 456 * the user be able to get more accurate results of 'statfs()' after
463 * they synchronize the file system. 457 * they synchronize the file system.
464 */ 458 */
465 generic_sync_sb_inodes(sb, &wbc); 459 sync_inodes_sb(sb);
466 460
467 /* 461 /*
468 * Synchronize write buffers, because 'ubifs_run_commit()' does not 462 * Synchronize write buffers, because 'ubifs_run_commit()' does not