aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0647d80accf6..9910c039f026 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -84,16 +84,13 @@ static struct hlist_head *inode_hashtable __read_mostly;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_sem provides exclusion between the icache shrinking and the
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * umount path.
89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 * 89 *
94 * We make this an rwsem because the fastpath is icache shrinking. In 90 * We don't actually need it to protect anything in the umount path,
95 * some cases a filesystem may be doing a significant amount of work in 91 * but only need to cycle through it to make sure any inode that
96 * its inode reclaim code, so this should improve parallelism. 92 * prune_icache took off the LRU list has been fully torn down by the
93 * time we are past evict_inodes.
97 */ 94 */
98static DECLARE_RWSEM(iprune_sem); 95static DECLARE_RWSEM(iprune_sem);
99 96
@@ -516,17 +513,12 @@ void evict_inodes(struct super_block *sb)
516 struct inode *inode, *next; 513 struct inode *inode, *next;
517 LIST_HEAD(dispose); 514 LIST_HEAD(dispose);
518 515
519 down_write(&iprune_sem);
520
521 spin_lock(&inode_lock); 516 spin_lock(&inode_lock);
522 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 517 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
523 if (atomic_read(&inode->i_count)) 518 if (atomic_read(&inode->i_count))
524 continue; 519 continue;
525 520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
526 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
527 WARN_ON(1);
528 continue; 521 continue;
529 }
530 522
531 inode->i_state |= I_FREEING; 523 inode->i_state |= I_FREEING;
532 524
@@ -542,6 +534,13 @@ void evict_inodes(struct super_block *sb)
542 spin_unlock(&inode_lock); 534 spin_unlock(&inode_lock);
543 535
544 dispose_list(&dispose); 536 dispose_list(&dispose);
537
538 /*
539 * Cycle through iprune_sem to make sure any inode that prune_icache
540 * moved off the list before we took the lock has been fully torn
541 * down.
542 */
543 down_write(&iprune_sem);
545 up_write(&iprune_sem); 544 up_write(&iprune_sem);
546} 545}
547 546
@@ -561,8 +560,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
561 struct inode *inode, *next; 560 struct inode *inode, *next;
562 LIST_HEAD(dispose); 561 LIST_HEAD(dispose);
563 562
564 down_write(&iprune_sem);
565
566 spin_lock(&inode_lock); 563 spin_lock(&inode_lock);
567 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 564 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
568 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 565 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
@@ -590,7 +587,6 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
590 spin_unlock(&inode_lock); 587 spin_unlock(&inode_lock);
591 588
592 dispose_list(&dispose); 589 dispose_list(&dispose);
593 up_write(&iprune_sem);
594 590
595 return busy; 591 return busy;
596} 592}