aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/inode.c27
-rw-r--r--fs/super.c1
-rw-r--r--include/linux/fs.h3
3 files changed, 16 insertions, 15 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 8c3491302e0c..0450e25aeda0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -33,7 +33,7 @@
33 * 33 *
34 * inode->i_lock protects: 34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget() 35 * inode->i_state, inode->i_hash, __iget()
36 * inode_lru_lock protects: 36 * inode->i_sb->s_inode_lru_lock protects:
37 * inode->i_sb->s_inode_lru, inode->i_lru 37 * inode->i_sb->s_inode_lru, inode->i_lru
38 * inode_sb_list_lock protects: 38 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list 39 * sb->s_inodes, inode->i_sb_list
@@ -46,7 +46,7 @@
46 * 46 *
47 * inode_sb_list_lock 47 * inode_sb_list_lock
48 * inode->i_lock 48 * inode->i_lock
49 * inode_lru_lock 49 * inode->i_sb->s_inode_lru_lock
50 * 50 *
51 * inode_wb_list_lock 51 * inode_wb_list_lock
52 * inode->i_lock 52 * inode->i_lock
@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly;
64static struct hlist_head *inode_hashtable __read_mostly; 64static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
66 66
67static DEFINE_SPINLOCK(inode_lru_lock);
68
69__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 67__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
70__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); 68__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
71 69
@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold);
342 340
343static void inode_lru_list_add(struct inode *inode) 341static void inode_lru_list_add(struct inode *inode)
344{ 342{
345 spin_lock(&inode_lru_lock); 343 spin_lock(&inode->i_sb->s_inode_lru_lock);
346 if (list_empty(&inode->i_lru)) { 344 if (list_empty(&inode->i_lru)) {
347 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); 345 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
348 inode->i_sb->s_nr_inodes_unused++; 346 inode->i_sb->s_nr_inodes_unused++;
349 this_cpu_inc(nr_unused); 347 this_cpu_inc(nr_unused);
350 } 348 }
351 spin_unlock(&inode_lru_lock); 349 spin_unlock(&inode->i_sb->s_inode_lru_lock);
352} 350}
353 351
354static void inode_lru_list_del(struct inode *inode) 352static void inode_lru_list_del(struct inode *inode)
355{ 353{
356 spin_lock(&inode_lru_lock); 354 spin_lock(&inode->i_sb->s_inode_lru_lock);
357 if (!list_empty(&inode->i_lru)) { 355 if (!list_empty(&inode->i_lru)) {
358 list_del_init(&inode->i_lru); 356 list_del_init(&inode->i_lru);
359 inode->i_sb->s_nr_inodes_unused--; 357 inode->i_sb->s_nr_inodes_unused--;
360 this_cpu_dec(nr_unused); 358 this_cpu_dec(nr_unused);
361 } 359 }
362 spin_unlock(&inode_lru_lock); 360 spin_unlock(&inode->i_sb->s_inode_lru_lock);
363} 361}
364 362
365/** 363/**
@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode)
615 613
616/* 614/*
617 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a 615 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
618 * temporary list and then are freed outside inode_lru_lock by dispose_list(). 616 * temporary list and then are freed outside sb->s_inode_lru_lock by
617 * dispose_list().
619 * 618 *
620 * Any inodes which are pinned purely because of attached pagecache have their 619 * Any inodes which are pinned purely because of attached pagecache have their
621 * pagecache removed. If the inode has metadata buffers attached to 620 * pagecache removed. If the inode has metadata buffers attached to
@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
635 int nr_scanned; 634 int nr_scanned;
636 unsigned long reap = 0; 635 unsigned long reap = 0;
637 636
638 spin_lock(&inode_lru_lock); 637 spin_lock(&sb->s_inode_lru_lock);
639 for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) { 638 for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) {
640 struct inode *inode; 639 struct inode *inode;
641 640
@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
645 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); 644 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
646 645
647 /* 646 /*
648 * we are inverting the inode_lru_lock/inode->i_lock here, 647 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
649 * so use a trylock. If we fail to get the lock, just move the 648 * so use a trylock. If we fail to get the lock, just move the
650 * inode to the back of the list so we don't spin on it. 649 * inode to the back of the list so we don't spin on it.
651 */ 650 */
@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
677 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 676 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
678 __iget(inode); 677 __iget(inode);
679 spin_unlock(&inode->i_lock); 678 spin_unlock(&inode->i_lock);
680 spin_unlock(&inode_lru_lock); 679 spin_unlock(&sb->s_inode_lru_lock);
681 if (remove_inode_buffers(inode)) 680 if (remove_inode_buffers(inode))
682 reap += invalidate_mapping_pages(&inode->i_data, 681 reap += invalidate_mapping_pages(&inode->i_data,
683 0, -1); 682 0, -1);
684 iput(inode); 683 iput(inode);
685 spin_lock(&inode_lru_lock); 684 spin_lock(&sb->s_inode_lru_lock);
686 685
687 if (inode != list_entry(sb->s_inode_lru.next, 686 if (inode != list_entry(sb->s_inode_lru.next,
688 struct inode, i_lru)) 687 struct inode, i_lru))
@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
707 __count_vm_events(KSWAPD_INODESTEAL, reap); 706 __count_vm_events(KSWAPD_INODESTEAL, reap);
708 else 707 else
709 __count_vm_events(PGINODESTEAL, reap); 708 __count_vm_events(PGINODESTEAL, reap);
710 spin_unlock(&inode_lru_lock); 709 spin_unlock(&sb->s_inode_lru_lock);
711 *nr_to_scan = nr_scanned; 710 *nr_to_scan = nr_scanned;
712 711
713 dispose_list(&freeable); 712 dispose_list(&freeable);
diff --git a/fs/super.c b/fs/super.c
index e8e6dbfefe8c..73ab9f9b3571 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -78,6 +78,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
78 INIT_LIST_HEAD(&s->s_inodes); 78 INIT_LIST_HEAD(&s->s_inodes);
79 INIT_LIST_HEAD(&s->s_dentry_lru); 79 INIT_LIST_HEAD(&s->s_dentry_lru);
80 INIT_LIST_HEAD(&s->s_inode_lru); 80 INIT_LIST_HEAD(&s->s_inode_lru);
81 spin_lock_init(&s->s_inode_lru_lock);
81 init_rwsem(&s->s_umount); 82 init_rwsem(&s->s_umount);
82 mutex_init(&s->s_lock); 83 mutex_init(&s->s_lock);
83 lockdep_set_class(&s->s_umount, &type->s_umount_key); 84 lockdep_set_class(&s->s_umount, &type->s_umount_key);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9724f0a48742..460d2cc21ec6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1397,7 +1397,8 @@ struct super_block {
1397 struct list_head s_dentry_lru; /* unused dentry lru */ 1397 struct list_head s_dentry_lru; /* unused dentry lru */
1398 int s_nr_dentry_unused; /* # of dentry on lru */ 1398 int s_nr_dentry_unused; /* # of dentry on lru */
1399 1399
1400 /* inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ 1400 /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
1401 spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
1401 struct list_head s_inode_lru; /* unused inode lru */ 1402 struct list_head s_inode_lru; /* unused inode lru */
1402 int s_nr_inodes_unused; /* # of inodes on lru */ 1403 int s_nr_inodes_unused; /* # of inodes on lru */
1403 1404