aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2011-03-15 16:51:24 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2011-03-16 09:56:03 -0400
commitbab1d9444d9a147f1dc3478dd06c16f490227f3e (patch)
treee5dd5220c976f604163c0b96945af3ee0be1fe24 /fs/inode.c
parent5229645bdc35f1cc43eb8b25b6993c8fa58b4b43 (diff)
prune back iprune_sem
iprune_sem is continously giving us lockdep warnings because we do take it in read mode in the reclaim path, but we're also doing non-NOFS allocations under it taken in write mode. Taking a bit deeper look at it I think it's fixable quite trivially: - for invalidate_inodes we do not need iprune_sem at all. We have an active reference on the superblock, so the filesystem is not going away until it has finished. - for evict_inodes we do need it, to make sure prune_icache has done it's work before we tear down the superblock. But there is no reason to hold it over the actual reclaim operation - it's enough to cycle through it after the actual reclaim to make sure we wait for any pending prune_icache to complete. We just have to remove the WARN_ON for otherwise busy inodes as they can actually happen now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0647d80accf6..9910c039f026 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -84,16 +84,13 @@ static struct hlist_head *inode_hashtable __read_mostly;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_sem provides exclusion between the icache shrinking and the
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * umount path.
89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 * 89 *
94 * We make this an rwsem because the fastpath is icache shrinking. In 90 * We don't actually need it to protect anything in the umount path,
95 * some cases a filesystem may be doing a significant amount of work in 91 * but only need to cycle through it to make sure any inode that
96 * its inode reclaim code, so this should improve parallelism. 92 * prune_icache took off the LRU list has been fully torn down by the
93 * time we are past evict_inodes.
97 */ 94 */
98static DECLARE_RWSEM(iprune_sem); 95static DECLARE_RWSEM(iprune_sem);
99 96
@@ -516,17 +513,12 @@ void evict_inodes(struct super_block *sb)
516 struct inode *inode, *next; 513 struct inode *inode, *next;
517 LIST_HEAD(dispose); 514 LIST_HEAD(dispose);
518 515
519 down_write(&iprune_sem);
520
521 spin_lock(&inode_lock); 516 spin_lock(&inode_lock);
522 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 517 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
523 if (atomic_read(&inode->i_count)) 518 if (atomic_read(&inode->i_count))
524 continue; 519 continue;
525 520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
526 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
527 WARN_ON(1);
528 continue; 521 continue;
529 }
530 522
531 inode->i_state |= I_FREEING; 523 inode->i_state |= I_FREEING;
532 524
@@ -542,6 +534,13 @@ void evict_inodes(struct super_block *sb)
542 spin_unlock(&inode_lock); 534 spin_unlock(&inode_lock);
543 535
544 dispose_list(&dispose); 536 dispose_list(&dispose);
537
538 /*
539 * Cycle through iprune_sem to make sure any inode that prune_icache
540 * moved off the list before we took the lock has been fully torn
541 * down.
542 */
543 down_write(&iprune_sem);
545 up_write(&iprune_sem); 544 up_write(&iprune_sem);
546} 545}
547 546
@@ -561,8 +560,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
561 struct inode *inode, *next; 560 struct inode *inode, *next;
562 LIST_HEAD(dispose); 561 LIST_HEAD(dispose);
563 562
564 down_write(&iprune_sem);
565
566 spin_lock(&inode_lock); 563 spin_lock(&inode_lock);
567 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 564 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
568 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 565 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
@@ -590,7 +587,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
590 spin_unlock(&inode_lock); 587 spin_unlock(&inode_lock);
591 588
592 dispose_list(&dispose); 589 dispose_list(&dispose);
593 up_write(&iprune_sem);
594 590
595 return busy; 591 return busy;
596} 592}