aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:05 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:31 -0500
commitceb5bdc2d246f6d81cf61ed70f325308a11821d2 (patch)
tree65fc9e0227994d4ffc80530dd15bb5a9672a295e /fs/dcache.c
parent626d607435617cc0f033522083e2bb195b81813c (diff)
fs: dcache per-bucket dcache hash locking
We can turn the dcache hash locking from a global dcache_hash_lock into per-bucket locking. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c133
1 files changed, 83 insertions, 50 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 07d1f6862dc7..9f04e1ba75b7 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -33,14 +33,18 @@
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/fs_struct.h> 34#include <linux/fs_struct.h>
35#include <linux/hardirq.h> 35#include <linux/hardirq.h>
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
36#include "internal.h" 38#include "internal.h"
37 39
38/* 40/*
39 * Usage: 41 * Usage:
40 * dcache_inode_lock protects: 42 * dcache_inode_lock protects:
41 * - i_dentry, d_alias, d_inode 43 * - i_dentry, d_alias, d_inode
42 * dcache_hash_lock protects: 44 * dcache_hash_bucket lock protects:
43 * - the dcache hash table, s_anon lists 45 * - the dcache hash table
46 * s_anon bl list spinlock protects:
47 * - the s_anon list (see __d_drop)
44 * dcache_lru_lock protects: 48 * dcache_lru_lock protects:
45 * - the dcache lru lists and counters 49 * - the dcache lru lists and counters
46 * d_lock protects: 50 * d_lock protects:
@@ -57,7 +61,8 @@
57 * dcache_inode_lock 61 * dcache_inode_lock
58 * dentry->d_lock 62 * dentry->d_lock
59 * dcache_lru_lock 63 * dcache_lru_lock
60 * dcache_hash_lock 64 * dcache_hash_bucket lock
65 * s_anon lock
61 * 66 *
62 * If there is an ancestor relationship: 67 * If there is an ancestor relationship:
63 * dentry->d_parent->...->d_parent->d_lock 68 * dentry->d_parent->...->d_parent->d_lock
@@ -74,7 +79,6 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
74EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 79EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
75 80
76__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock); 81__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock);
77static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
78static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 82static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
79__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 83__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
80 84
@@ -96,7 +100,29 @@ static struct kmem_cache *dentry_cache __read_mostly;
96 100
97static unsigned int d_hash_mask __read_mostly; 101static unsigned int d_hash_mask __read_mostly;
98static unsigned int d_hash_shift __read_mostly; 102static unsigned int d_hash_shift __read_mostly;
99static struct hlist_head *dentry_hashtable __read_mostly; 103
104struct dcache_hash_bucket {
105 struct hlist_bl_head head;
106};
107static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
108
109static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
110 unsigned long hash)
111{
112 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
113 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
114 return dentry_hashtable + (hash & D_HASHMASK);
115}
116
117static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
118{
119 bit_spin_lock(0, (unsigned long *)&b->head.first);
120}
121
122static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
123{
124 __bit_spin_unlock(0, (unsigned long *)&b->head.first);
125}
100 126
101/* Statistics gathering. */ 127/* Statistics gathering. */
102struct dentry_stat_t dentry_stat = { 128struct dentry_stat_t dentry_stat = {
@@ -144,7 +170,7 @@ static void d_free(struct dentry *dentry)
144 dentry->d_op->d_release(dentry); 170 dentry->d_op->d_release(dentry);
145 171
146 /* if dentry was never inserted into hash, immediate free is OK */ 172 /* if dentry was never inserted into hash, immediate free is OK */
147 if (hlist_unhashed(&dentry->d_hash)) 173 if (hlist_bl_unhashed(&dentry->d_hash))
148 __d_free(&dentry->d_u.d_rcu); 174 __d_free(&dentry->d_u.d_rcu);
149 else 175 else
150 call_rcu(&dentry->d_u.d_rcu, __d_free); 176 call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -302,11 +328,27 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
302void __d_drop(struct dentry *dentry) 328void __d_drop(struct dentry *dentry)
303{ 329{
304 if (!(dentry->d_flags & DCACHE_UNHASHED)) { 330 if (!(dentry->d_flags & DCACHE_UNHASHED)) {
305 dentry->d_flags |= DCACHE_UNHASHED; 331 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
306 spin_lock(&dcache_hash_lock); 332 bit_spin_lock(0,
307 hlist_del_rcu(&dentry->d_hash); 333 (unsigned long *)&dentry->d_sb->s_anon.first);
308 spin_unlock(&dcache_hash_lock); 334 dentry->d_flags |= DCACHE_UNHASHED;
309 dentry_rcuwalk_barrier(dentry); 335 hlist_bl_del_init(&dentry->d_hash);
336 __bit_spin_unlock(0,
337 (unsigned long *)&dentry->d_sb->s_anon.first);
338 } else {
339 struct dcache_hash_bucket *b;
340 b = d_hash(dentry->d_parent, dentry->d_name.hash);
341 spin_lock_bucket(b);
342 /*
343 * We may not actually need to put DCACHE_UNHASHED
344 * manipulations under the hash lock, but follow
345 * the principle of least surprise.
346 */
347 dentry->d_flags |= DCACHE_UNHASHED;
348 hlist_bl_del_rcu(&dentry->d_hash);
349 spin_unlock_bucket(b);
350 dentry_rcuwalk_barrier(dentry);
351 }
310 } 352 }
311} 353}
312EXPORT_SYMBOL(__d_drop); 354EXPORT_SYMBOL(__d_drop);
@@ -961,8 +1003,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
961 spin_unlock(&dentry->d_lock); 1003 spin_unlock(&dentry->d_lock);
962 shrink_dcache_for_umount_subtree(dentry); 1004 shrink_dcache_for_umount_subtree(dentry);
963 1005
964 while (!hlist_empty(&sb->s_anon)) { 1006 while (!hlist_bl_empty(&sb->s_anon)) {
965 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 1007 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
966 shrink_dcache_for_umount_subtree(dentry); 1008 shrink_dcache_for_umount_subtree(dentry);
967 } 1009 }
968} 1010}
@@ -1263,7 +1305,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1263 dentry->d_sb = NULL; 1305 dentry->d_sb = NULL;
1264 dentry->d_op = NULL; 1306 dentry->d_op = NULL;
1265 dentry->d_fsdata = NULL; 1307 dentry->d_fsdata = NULL;
1266 INIT_HLIST_NODE(&dentry->d_hash); 1308 INIT_HLIST_BL_NODE(&dentry->d_hash);
1267 INIT_LIST_HEAD(&dentry->d_lru); 1309 INIT_LIST_HEAD(&dentry->d_lru);
1268 INIT_LIST_HEAD(&dentry->d_subdirs); 1310 INIT_LIST_HEAD(&dentry->d_subdirs);
1269 INIT_LIST_HEAD(&dentry->d_alias); 1311 INIT_LIST_HEAD(&dentry->d_alias);
@@ -1459,14 +1501,6 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1459} 1501}
1460EXPORT_SYMBOL(d_alloc_root); 1502EXPORT_SYMBOL(d_alloc_root);
1461 1503
1462static inline struct hlist_head *d_hash(struct dentry *parent,
1463 unsigned long hash)
1464{
1465 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1466 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1467 return dentry_hashtable + (hash & D_HASHMASK);
1468}
1469
1470/** 1504/**
1471 * d_obtain_alias - find or allocate a dentry for a given inode 1505 * d_obtain_alias - find or allocate a dentry for a given inode
1472 * @inode: inode to allocate the dentry for 1506 * @inode: inode to allocate the dentry for
@@ -1521,11 +1555,11 @@ struct dentry *d_obtain_alias(struct inode *inode)
1521 tmp->d_sb = inode->i_sb; 1555 tmp->d_sb = inode->i_sb;
1522 tmp->d_inode = inode; 1556 tmp->d_inode = inode;
1523 tmp->d_flags |= DCACHE_DISCONNECTED; 1557 tmp->d_flags |= DCACHE_DISCONNECTED;
1524 tmp->d_flags &= ~DCACHE_UNHASHED;
1525 list_add(&tmp->d_alias, &inode->i_dentry); 1558 list_add(&tmp->d_alias, &inode->i_dentry);
1526 spin_lock(&dcache_hash_lock); 1559 bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1527 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon); 1560 tmp->d_flags &= ~DCACHE_UNHASHED;
1528 spin_unlock(&dcache_hash_lock); 1561 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1562 __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1529 spin_unlock(&tmp->d_lock); 1563 spin_unlock(&tmp->d_lock);
1530 spin_unlock(&dcache_inode_lock); 1564 spin_unlock(&dcache_inode_lock);
1531 1565
@@ -1567,7 +1601,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1567 d_move(new, dentry); 1601 d_move(new, dentry);
1568 iput(inode); 1602 iput(inode);
1569 } else { 1603 } else {
1570 /* already taking dcache_inode_lock, so d_add() by hand */ 1604 /* already got dcache_inode_lock, so d_add() by hand */
1571 __d_instantiate(dentry, inode); 1605 __d_instantiate(dentry, inode);
1572 spin_unlock(&dcache_inode_lock); 1606 spin_unlock(&dcache_inode_lock);
1573 security_d_instantiate(dentry, inode); 1607 security_d_instantiate(dentry, inode);
@@ -1702,8 +1736,8 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1702 unsigned int len = name->len; 1736 unsigned int len = name->len;
1703 unsigned int hash = name->hash; 1737 unsigned int hash = name->hash;
1704 const unsigned char *str = name->name; 1738 const unsigned char *str = name->name;
1705 struct hlist_head *head = d_hash(parent, hash); 1739 struct dcache_hash_bucket *b = d_hash(parent, hash);
1706 struct hlist_node *node; 1740 struct hlist_bl_node *node;
1707 struct dentry *dentry; 1741 struct dentry *dentry;
1708 1742
1709 /* 1743 /*
@@ -1726,7 +1760,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1726 * 1760 *
1727 * See Documentation/vfs/dcache-locking.txt for more details. 1761 * See Documentation/vfs/dcache-locking.txt for more details.
1728 */ 1762 */
1729 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1763 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1730 struct inode *i; 1764 struct inode *i;
1731 const char *tname; 1765 const char *tname;
1732 int tlen; 1766 int tlen;
@@ -1820,8 +1854,8 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1820 unsigned int len = name->len; 1854 unsigned int len = name->len;
1821 unsigned int hash = name->hash; 1855 unsigned int hash = name->hash;
1822 const unsigned char *str = name->name; 1856 const unsigned char *str = name->name;
1823 struct hlist_head *head = d_hash(parent,hash); 1857 struct dcache_hash_bucket *b = d_hash(parent, hash);
1824 struct hlist_node *node; 1858 struct hlist_bl_node *node;
1825 struct dentry *found = NULL; 1859 struct dentry *found = NULL;
1826 struct dentry *dentry; 1860 struct dentry *dentry;
1827 1861
@@ -1847,7 +1881,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1847 */ 1881 */
1848 rcu_read_lock(); 1882 rcu_read_lock();
1849 1883
1850 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1884 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
1851 const char *tname; 1885 const char *tname;
1852 int tlen; 1886 int tlen;
1853 1887
@@ -1998,11 +2032,13 @@ again:
1998} 2032}
1999EXPORT_SYMBOL(d_delete); 2033EXPORT_SYMBOL(d_delete);
2000 2034
2001static void __d_rehash(struct dentry * entry, struct hlist_head *list) 2035static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
2002{ 2036{
2003 2037 BUG_ON(!d_unhashed(entry));
2038 spin_lock_bucket(b);
2004 entry->d_flags &= ~DCACHE_UNHASHED; 2039 entry->d_flags &= ~DCACHE_UNHASHED;
2005 hlist_add_head_rcu(&entry->d_hash, list); 2040 hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
2041 spin_unlock_bucket(b);
2006} 2042}
2007 2043
2008static void _d_rehash(struct dentry * entry) 2044static void _d_rehash(struct dentry * entry)
@@ -2020,9 +2056,7 @@ static void _d_rehash(struct dentry * entry)
2020void d_rehash(struct dentry * entry) 2056void d_rehash(struct dentry * entry)
2021{ 2057{
2022 spin_lock(&entry->d_lock); 2058 spin_lock(&entry->d_lock);
2023 spin_lock(&dcache_hash_lock);
2024 _d_rehash(entry); 2059 _d_rehash(entry);
2025 spin_unlock(&dcache_hash_lock);
2026 spin_unlock(&entry->d_lock); 2060 spin_unlock(&entry->d_lock);
2027} 2061}
2028EXPORT_SYMBOL(d_rehash); 2062EXPORT_SYMBOL(d_rehash);
@@ -2165,15 +2199,16 @@ void d_move(struct dentry * dentry, struct dentry * target)
2165 write_seqcount_begin(&dentry->d_seq); 2199 write_seqcount_begin(&dentry->d_seq);
2166 write_seqcount_begin(&target->d_seq); 2200 write_seqcount_begin(&target->d_seq);
2167 2201
2168 /* Move the dentry to the target hash queue, if on different bucket */ 2202 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2169 spin_lock(&dcache_hash_lock); 2203
2170 if (!d_unhashed(dentry)) 2204 /*
2171 hlist_del_rcu(&dentry->d_hash); 2205 * Move the dentry to the target hash queue. Don't bother checking
2206 * for the same hash queue because of how unlikely it is.
2207 */
2208 __d_drop(dentry);
2172 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2209 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2173 spin_unlock(&dcache_hash_lock);
2174 2210
2175 /* Unhash the target: dput() will then get rid of it */ 2211 /* Unhash the target: dput() will then get rid of it */
2176 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2177 __d_drop(target); 2212 __d_drop(target);
2178 2213
2179 list_del(&dentry->d_u.d_child); 2214 list_del(&dentry->d_u.d_child);
@@ -2369,9 +2404,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2369 2404
2370 spin_lock(&actual->d_lock); 2405 spin_lock(&actual->d_lock);
2371found: 2406found:
2372 spin_lock(&dcache_hash_lock);
2373 _d_rehash(actual); 2407 _d_rehash(actual);
2374 spin_unlock(&dcache_hash_lock);
2375 spin_unlock(&actual->d_lock); 2408 spin_unlock(&actual->d_lock);
2376 spin_unlock(&dcache_inode_lock); 2409 spin_unlock(&dcache_inode_lock);
2377out_nolock: 2410out_nolock:
@@ -2953,7 +2986,7 @@ static void __init dcache_init_early(void)
2953 2986
2954 dentry_hashtable = 2987 dentry_hashtable =
2955 alloc_large_system_hash("Dentry cache", 2988 alloc_large_system_hash("Dentry cache",
2956 sizeof(struct hlist_head), 2989 sizeof(struct dcache_hash_bucket),
2957 dhash_entries, 2990 dhash_entries,
2958 13, 2991 13,
2959 HASH_EARLY, 2992 HASH_EARLY,
@@ -2962,7 +2995,7 @@ static void __init dcache_init_early(void)
2962 0); 2995 0);
2963 2996
2964 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2997 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2965 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2998 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
2966} 2999}
2967 3000
2968static void __init dcache_init(void) 3001static void __init dcache_init(void)
@@ -2985,7 +3018,7 @@ static void __init dcache_init(void)
2985 3018
2986 dentry_hashtable = 3019 dentry_hashtable =
2987 alloc_large_system_hash("Dentry cache", 3020 alloc_large_system_hash("Dentry cache",
2988 sizeof(struct hlist_head), 3021 sizeof(struct dcache_hash_bucket),
2989 dhash_entries, 3022 dhash_entries,
2990 13, 3023 13,
2991 0, 3024 0,
@@ -2994,7 +3027,7 @@ static void __init dcache_init(void)
2994 0); 3027 0);
2995 3028
2996 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3029 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2997 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 3030 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
2998} 3031}
2999 3032
3000/* SLAB cache for __getname() consumers */ 3033/* SLAB cache for __getname() consumers */