diff options
author | Dave Chinner <dchinner@redhat.com> | 2015-03-04 12:37:22 -0500 |
---|---|---|
committer | Josef Bacik <jbacik@fb.com> | 2015-08-17 18:39:46 -0400 |
commit | 74278da9f70d84d715601fe794567a6d2bfdf078 (patch) | |
tree | 49262a88fc42b85bfe4930f5cd7a832d5ba647c6 /fs/inode.c | |
parent | cbedaac63481dea52327127a9f1c60f092bd6b07 (diff) |
inode: convert inode_sb_list_lock to per-sb
The process of reducing contention on per-superblock inode lists
starts with moving the locking to match the per-superblock inode
list. This takes the global lock out of the picture and reduces the
contention problems to within a single filesystem. This doesn't get
rid of contention as the locks still have global CPU scope, but it
does isolate operations on different superblocks form each other.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 28 |
1 files changed, 13 insertions, 15 deletions
diff --git a/fs/inode.c b/fs/inode.c index d30640f7a193..a2de294f6b77 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -28,8 +28,8 @@ | |||
28 | * inode->i_state, inode->i_hash, __iget() | 28 | * inode->i_state, inode->i_hash, __iget() |
29 | * Inode LRU list locks protect: | 29 | * Inode LRU list locks protect: |
30 | * inode->i_sb->s_inode_lru, inode->i_lru | 30 | * inode->i_sb->s_inode_lru, inode->i_lru |
31 | * inode_sb_list_lock protects: | 31 | * inode->i_sb->s_inode_list_lock protects: |
32 | * sb->s_inodes, inode->i_sb_list | 32 | * inode->i_sb->s_inodes, inode->i_sb_list |
33 | * bdi->wb.list_lock protects: | 33 | * bdi->wb.list_lock protects: |
34 | * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list | 34 | * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list |
35 | * inode_hash_lock protects: | 35 | * inode_hash_lock protects: |
@@ -37,7 +37,7 @@ | |||
37 | * | 37 | * |
38 | * Lock ordering: | 38 | * Lock ordering: |
39 | * | 39 | * |
40 | * inode_sb_list_lock | 40 | * inode->i_sb->s_inode_list_lock |
41 | * inode->i_lock | 41 | * inode->i_lock |
42 | * Inode LRU list locks | 42 | * Inode LRU list locks |
43 | * | 43 | * |
@@ -45,7 +45,7 @@ | |||
45 | * inode->i_lock | 45 | * inode->i_lock |
46 | * | 46 | * |
47 | * inode_hash_lock | 47 | * inode_hash_lock |
48 | * inode_sb_list_lock | 48 | * inode->i_sb->s_inode_list_lock |
49 | * inode->i_lock | 49 | * inode->i_lock |
50 | * | 50 | * |
51 | * iunique_lock | 51 | * iunique_lock |
@@ -57,8 +57,6 @@ static unsigned int i_hash_shift __read_mostly; | |||
57 | static struct hlist_head *inode_hashtable __read_mostly; | 57 | static struct hlist_head *inode_hashtable __read_mostly; |
58 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); | 58 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); |
59 | 59 | ||
60 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); | ||
61 | |||
62 | /* | 60 | /* |
63 | * Empty aops. Can be used for the cases where the user does not | 61 | * Empty aops. Can be used for the cases where the user does not |
64 | * define any of the address_space operations. | 62 | * define any of the address_space operations. |
@@ -426,18 +424,18 @@ static void inode_lru_list_del(struct inode *inode) | |||
426 | */ | 424 | */ |
427 | void inode_sb_list_add(struct inode *inode) | 425 | void inode_sb_list_add(struct inode *inode) |
428 | { | 426 | { |
429 | spin_lock(&inode_sb_list_lock); | 427 | spin_lock(&inode->i_sb->s_inode_list_lock); |
430 | list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); | 428 | list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); |
431 | spin_unlock(&inode_sb_list_lock); | 429 | spin_unlock(&inode->i_sb->s_inode_list_lock); |
432 | } | 430 | } |
433 | EXPORT_SYMBOL_GPL(inode_sb_list_add); | 431 | EXPORT_SYMBOL_GPL(inode_sb_list_add); |
434 | 432 | ||
435 | static inline void inode_sb_list_del(struct inode *inode) | 433 | static inline void inode_sb_list_del(struct inode *inode) |
436 | { | 434 | { |
437 | if (!list_empty(&inode->i_sb_list)) { | 435 | if (!list_empty(&inode->i_sb_list)) { |
438 | spin_lock(&inode_sb_list_lock); | 436 | spin_lock(&inode->i_sb->s_inode_list_lock); |
439 | list_del_init(&inode->i_sb_list); | 437 | list_del_init(&inode->i_sb_list); |
440 | spin_unlock(&inode_sb_list_lock); | 438 | spin_unlock(&inode->i_sb->s_inode_list_lock); |
441 | } | 439 | } |
442 | } | 440 | } |
443 | 441 | ||
@@ -594,7 +592,7 @@ void evict_inodes(struct super_block *sb) | |||
594 | struct inode *inode, *next; | 592 | struct inode *inode, *next; |
595 | LIST_HEAD(dispose); | 593 | LIST_HEAD(dispose); |
596 | 594 | ||
597 | spin_lock(&inode_sb_list_lock); | 595 | spin_lock(&sb->s_inode_list_lock); |
598 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 596 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
599 | if (atomic_read(&inode->i_count)) | 597 | if (atomic_read(&inode->i_count)) |
600 | continue; | 598 | continue; |
@@ -610,7 +608,7 @@ void evict_inodes(struct super_block *sb) | |||
610 | spin_unlock(&inode->i_lock); | 608 | spin_unlock(&inode->i_lock); |
611 | list_add(&inode->i_lru, &dispose); | 609 | list_add(&inode->i_lru, &dispose); |
612 | } | 610 | } |
613 | spin_unlock(&inode_sb_list_lock); | 611 | spin_unlock(&sb->s_inode_list_lock); |
614 | 612 | ||
615 | dispose_list(&dispose); | 613 | dispose_list(&dispose); |
616 | } | 614 | } |
@@ -631,7 +629,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) | |||
631 | struct inode *inode, *next; | 629 | struct inode *inode, *next; |
632 | LIST_HEAD(dispose); | 630 | LIST_HEAD(dispose); |
633 | 631 | ||
634 | spin_lock(&inode_sb_list_lock); | 632 | spin_lock(&sb->s_inode_list_lock); |
635 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 633 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
636 | spin_lock(&inode->i_lock); | 634 | spin_lock(&inode->i_lock); |
637 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { | 635 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { |
@@ -654,7 +652,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) | |||
654 | spin_unlock(&inode->i_lock); | 652 | spin_unlock(&inode->i_lock); |
655 | list_add(&inode->i_lru, &dispose); | 653 | list_add(&inode->i_lru, &dispose); |
656 | } | 654 | } |
657 | spin_unlock(&inode_sb_list_lock); | 655 | spin_unlock(&sb->s_inode_list_lock); |
658 | 656 | ||
659 | dispose_list(&dispose); | 657 | dispose_list(&dispose); |
660 | 658 | ||
@@ -890,7 +888,7 @@ struct inode *new_inode(struct super_block *sb) | |||
890 | { | 888 | { |
891 | struct inode *inode; | 889 | struct inode *inode; |
892 | 890 | ||
893 | spin_lock_prefetch(&inode_sb_list_lock); | 891 | spin_lock_prefetch(&sb->s_inode_list_lock); |
894 | 892 | ||
895 | inode = new_inode_pseudo(sb); | 893 | inode = new_inode_pseudo(sb); |
896 | if (inode) | 894 | if (inode) |