aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Reiser <reiser@namesys.com>2008-05-07 08:48:57 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-07-14 12:10:52 -0400
commitae8547b0a9e5d718ce272ddc48f91703a0f52a0b (patch)
treee7caceb5171ae64c71b575d7fc38bdc319379791
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
VFS: move inode_lock into sync_sb_inodes
This patch makes 'sync_sb_inodes()' lock 'inode_lock', rather than expect that the caller will do this. This change was previously done by Hans Reiser <reiser@namesys.com> and sat in the -mm tree. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
-rw-r--r--fs/fs-writeback.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ae45f77765c0..16519fe1399c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -424,8 +424,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
424 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so 424 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
425 * that it can be located for waiting on in __writeback_single_inode(). 425 * that it can be located for waiting on in __writeback_single_inode().
426 * 426 *
427 * Called under inode_lock.
428 *
429 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 427 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
430 * This function assumes that the blockdev superblock's inodes are backed by 428 * This function assumes that the blockdev superblock's inodes are backed by
431 * a variety of queues, so all inodes are searched. For other superblocks, 429 * a variety of queues, so all inodes are searched. For other superblocks,
@@ -446,6 +444,7 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
446{ 444{
447 const unsigned long start = jiffies; /* livelock avoidance */ 445 const unsigned long start = jiffies; /* livelock avoidance */
448 446
447 spin_lock(&inode_lock);
449 if (!wbc->for_kupdate || list_empty(&sb->s_io)) 448 if (!wbc->for_kupdate || list_empty(&sb->s_io))
450 queue_io(sb, wbc->older_than_this); 449 queue_io(sb, wbc->older_than_this);
451 450
@@ -524,6 +523,7 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
524 if (!list_empty(&sb->s_more_io)) 523 if (!list_empty(&sb->s_more_io))
525 wbc->more_io = 1; 524 wbc->more_io = 1;
526 } 525 }
526 spin_unlock(&inode_lock);
527 return; /* Leave any unwritten inodes on s_io */ 527 return; /* Leave any unwritten inodes on s_io */
528} 528}
529 529
@@ -565,11 +565,8 @@ restart:
565 * be unmounted by the time it is released. 565 * be unmounted by the time it is released.
566 */ 566 */
567 if (down_read_trylock(&sb->s_umount)) { 567 if (down_read_trylock(&sb->s_umount)) {
568 if (sb->s_root) { 568 if (sb->s_root)
569 spin_lock(&inode_lock);
570 sync_sb_inodes(sb, wbc); 569 sync_sb_inodes(sb, wbc);
571 spin_unlock(&inode_lock);
572 }
573 up_read(&sb->s_umount); 570 up_read(&sb->s_umount);
574 } 571 }
575 spin_lock(&sb_lock); 572 spin_lock(&sb_lock);
@@ -607,9 +604,7 @@ void sync_inodes_sb(struct super_block *sb, int wait)
607 (inodes_stat.nr_inodes - inodes_stat.nr_unused) + 604 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
608 nr_dirty + nr_unstable; 605 nr_dirty + nr_unstable;
609 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */ 606 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
610 spin_lock(&inode_lock);
611 sync_sb_inodes(sb, &wbc); 607 sync_sb_inodes(sb, &wbc);
612 spin_unlock(&inode_lock);
613} 608}
614 609
615/* 610/*