diff options
author | Dave Chinner <dchinner@redhat.com> | 2011-03-22 07:23:40 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2011-03-24 21:16:32 -0400 |
commit | 55fa6091d83160ca772fc37cebae45d42695a708 (patch) | |
tree | 4df49f372032e30449e1a2dd64daf443e20b781c /fs/inode.c | |
parent | f283c86afe6aa70b733d1ecebad5d9464943b774 (diff) |
fs: move i_sb_list out from under inode_lock
Protect the per-sb inode list with a new global lock
inode_sb_list_lock and use it to protect the list manipulations and
traversals. This lock replaces the inode_lock as the inodes on the
list can be validity checked while holding the inode->i_lock and
hence the inode_lock is no longer needed to protect the list.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 43 |
1 files changed, 23 insertions, 20 deletions
diff --git a/fs/inode.c b/fs/inode.c index 389f5a247599..785b1ab23ff0 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -34,10 +34,15 @@ | |||
34 | * inode->i_state, inode->i_hash, __iget() | 34 | * inode->i_state, inode->i_hash, __iget() |
35 | * inode_lru_lock protects: | 35 | * inode_lru_lock protects: |
36 | * inode_lru, inode->i_lru | 36 | * inode_lru, inode->i_lru |
37 | * inode_sb_list_lock protects: | ||
38 | * sb->s_inodes, inode->i_sb_list | ||
37 | * | 39 | * |
38 | * Lock ordering: | 40 | * Lock ordering: |
39 | * inode_lock | 41 | * inode_lock |
40 | * inode->i_lock | 42 | * inode->i_lock |
43 | * | ||
44 | * inode_sb_list_lock | ||
45 | * inode->i_lock | ||
41 | * inode_lru_lock | 46 | * inode_lru_lock |
42 | */ | 47 | */ |
43 | 48 | ||
@@ -99,6 +104,8 @@ static struct hlist_head *inode_hashtable __read_mostly; | |||
99 | */ | 104 | */ |
100 | DEFINE_SPINLOCK(inode_lock); | 105 | DEFINE_SPINLOCK(inode_lock); |
101 | 106 | ||
107 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); | ||
108 | |||
102 | /* | 109 | /* |
103 | * iprune_sem provides exclusion between the icache shrinking and the | 110 | * iprune_sem provides exclusion between the icache shrinking and the |
104 | * umount path. | 111 | * umount path. |
@@ -378,26 +385,23 @@ static void inode_lru_list_del(struct inode *inode) | |||
378 | spin_unlock(&inode_lru_lock); | 385 | spin_unlock(&inode_lru_lock); |
379 | } | 386 | } |
380 | 387 | ||
381 | static inline void __inode_sb_list_add(struct inode *inode) | ||
382 | { | ||
383 | list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); | ||
384 | } | ||
385 | |||
386 | /** | 388 | /** |
387 | * inode_sb_list_add - add inode to the superblock list of inodes | 389 | * inode_sb_list_add - add inode to the superblock list of inodes |
388 | * @inode: inode to add | 390 | * @inode: inode to add |
389 | */ | 391 | */ |
390 | void inode_sb_list_add(struct inode *inode) | 392 | void inode_sb_list_add(struct inode *inode) |
391 | { | 393 | { |
392 | spin_lock(&inode_lock); | 394 | spin_lock(&inode_sb_list_lock); |
393 | __inode_sb_list_add(inode); | 395 | list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); |
394 | spin_unlock(&inode_lock); | 396 | spin_unlock(&inode_sb_list_lock); |
395 | } | 397 | } |
396 | EXPORT_SYMBOL_GPL(inode_sb_list_add); | 398 | EXPORT_SYMBOL_GPL(inode_sb_list_add); |
397 | 399 | ||
398 | static inline void __inode_sb_list_del(struct inode *inode) | 400 | static inline void inode_sb_list_del(struct inode *inode) |
399 | { | 401 | { |
402 | spin_lock(&inode_sb_list_lock); | ||
400 | list_del_init(&inode->i_sb_list); | 403 | list_del_init(&inode->i_sb_list); |
404 | spin_unlock(&inode_sb_list_lock); | ||
401 | } | 405 | } |
402 | 406 | ||
403 | static unsigned long hash(struct super_block *sb, unsigned long hashval) | 407 | static unsigned long hash(struct super_block *sb, unsigned long hashval) |
@@ -481,9 +485,10 @@ static void evict(struct inode *inode) | |||
481 | 485 | ||
482 | spin_lock(&inode_lock); | 486 | spin_lock(&inode_lock); |
483 | list_del_init(&inode->i_wb_list); | 487 | list_del_init(&inode->i_wb_list); |
484 | __inode_sb_list_del(inode); | ||
485 | spin_unlock(&inode_lock); | 488 | spin_unlock(&inode_lock); |
486 | 489 | ||
490 | inode_sb_list_del(inode); | ||
491 | |||
487 | if (op->evict_inode) { | 492 | if (op->evict_inode) { |
488 | op->evict_inode(inode); | 493 | op->evict_inode(inode); |
489 | } else { | 494 | } else { |
@@ -539,7 +544,7 @@ void evict_inodes(struct super_block *sb) | |||
539 | struct inode *inode, *next; | 544 | struct inode *inode, *next; |
540 | LIST_HEAD(dispose); | 545 | LIST_HEAD(dispose); |
541 | 546 | ||
542 | spin_lock(&inode_lock); | 547 | spin_lock(&inode_sb_list_lock); |
543 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 548 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
544 | if (atomic_read(&inode->i_count)) | 549 | if (atomic_read(&inode->i_count)) |
545 | continue; | 550 | continue; |
@@ -555,7 +560,7 @@ void evict_inodes(struct super_block *sb) | |||
555 | spin_unlock(&inode->i_lock); | 560 | spin_unlock(&inode->i_lock); |
556 | list_add(&inode->i_lru, &dispose); | 561 | list_add(&inode->i_lru, &dispose); |
557 | } | 562 | } |
558 | spin_unlock(&inode_lock); | 563 | spin_unlock(&inode_sb_list_lock); |
559 | 564 | ||
560 | dispose_list(&dispose); | 565 | dispose_list(&dispose); |
561 | 566 | ||
@@ -584,7 +589,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) | |||
584 | struct inode *inode, *next; | 589 | struct inode *inode, *next; |
585 | LIST_HEAD(dispose); | 590 | LIST_HEAD(dispose); |
586 | 591 | ||
587 | spin_lock(&inode_lock); | 592 | spin_lock(&inode_sb_list_lock); |
588 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 593 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
589 | spin_lock(&inode->i_lock); | 594 | spin_lock(&inode->i_lock); |
590 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { | 595 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { |
@@ -607,7 +612,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) | |||
607 | spin_unlock(&inode->i_lock); | 612 | spin_unlock(&inode->i_lock); |
608 | list_add(&inode->i_lru, &dispose); | 613 | list_add(&inode->i_lru, &dispose); |
609 | } | 614 | } |
610 | spin_unlock(&inode_lock); | 615 | spin_unlock(&inode_sb_list_lock); |
611 | 616 | ||
612 | dispose_list(&dispose); | 617 | dispose_list(&dispose); |
613 | 618 | ||
@@ -867,16 +872,14 @@ struct inode *new_inode(struct super_block *sb) | |||
867 | { | 872 | { |
868 | struct inode *inode; | 873 | struct inode *inode; |
869 | 874 | ||
870 | spin_lock_prefetch(&inode_lock); | 875 | spin_lock_prefetch(&inode_sb_list_lock); |
871 | 876 | ||
872 | inode = alloc_inode(sb); | 877 | inode = alloc_inode(sb); |
873 | if (inode) { | 878 | if (inode) { |
874 | spin_lock(&inode_lock); | ||
875 | spin_lock(&inode->i_lock); | 879 | spin_lock(&inode->i_lock); |
876 | inode->i_state = 0; | 880 | inode->i_state = 0; |
877 | spin_unlock(&inode->i_lock); | 881 | spin_unlock(&inode->i_lock); |
878 | __inode_sb_list_add(inode); | 882 | inode_sb_list_add(inode); |
879 | spin_unlock(&inode_lock); | ||
880 | } | 883 | } |
881 | return inode; | 884 | return inode; |
882 | } | 885 | } |
@@ -945,7 +948,7 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
945 | inode->i_state = I_NEW; | 948 | inode->i_state = I_NEW; |
946 | hlist_add_head(&inode->i_hash, head); | 949 | hlist_add_head(&inode->i_hash, head); |
947 | spin_unlock(&inode->i_lock); | 950 | spin_unlock(&inode->i_lock); |
948 | __inode_sb_list_add(inode); | 951 | inode_sb_list_add(inode); |
949 | spin_unlock(&inode_lock); | 952 | spin_unlock(&inode_lock); |
950 | 953 | ||
951 | /* Return the locked inode with I_NEW set, the | 954 | /* Return the locked inode with I_NEW set, the |
@@ -994,7 +997,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
994 | inode->i_state = I_NEW; | 997 | inode->i_state = I_NEW; |
995 | hlist_add_head(&inode->i_hash, head); | 998 | hlist_add_head(&inode->i_hash, head); |
996 | spin_unlock(&inode->i_lock); | 999 | spin_unlock(&inode->i_lock); |
997 | __inode_sb_list_add(inode); | 1000 | inode_sb_list_add(inode); |
998 | spin_unlock(&inode_lock); | 1001 | spin_unlock(&inode_lock); |
999 | 1002 | ||
1000 | /* Return the locked inode with I_NEW set, the | 1003 | /* Return the locked inode with I_NEW set, the |