aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-04-25 14:01:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-25 21:14:10 -0400
commit1879fd6a26571fd4e8e1f4bb3e7537bc936b1fe7 (patch)
tree5fb41fdaef01d462ff9c7ecfe688760a44e89b6a /fs
parent3dd2ee4824b668a635d6d2bb6bc73f33708cab9f (diff)
add hlist_bl_lock/unlock helpers
Now that the whole dcache_hash_bucket crap is gone, go all the way and also remove the weird locking layering violations for locking the hash buckets. Add hlist_bl_lock/unlock helpers to move the locking into the list abstraction instead of requiring each caller to open code it. After all allowing for the bit locks is the whole point of these helpers over the plain hlist variant. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c22
-rw-r--r--fs/gfs2/glock.c6
2 files changed, 8 insertions, 20 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index d600a0af3b2e..22a0ef41bad1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -109,16 +109,6 @@ static inline struct hlist_bl_head *d_hash(struct dentry *parent,
109 return dentry_hashtable + (hash & D_HASHMASK); 109 return dentry_hashtable + (hash & D_HASHMASK);
110} 110}
111 111
112static inline void spin_lock_bucket(struct hlist_bl_head *b)
113{
114 bit_spin_lock(0, (unsigned long *)&b->first);
115}
116
117static inline void spin_unlock_bucket(struct hlist_bl_head *b)
118{
119 __bit_spin_unlock(0, (unsigned long *)&b->first);
120}
121
122/* Statistics gathering. */ 112/* Statistics gathering. */
123struct dentry_stat_t dentry_stat = { 113struct dentry_stat_t dentry_stat = {
124 .age_limit = 45, 114 .age_limit = 45,
@@ -334,10 +324,10 @@ void __d_drop(struct dentry *dentry)
334 else 324 else
335 b = d_hash(dentry->d_parent, dentry->d_name.hash); 325 b = d_hash(dentry->d_parent, dentry->d_name.hash);
336 326
337 spin_lock_bucket(b); 327 hlist_bl_lock(b);
338 __hlist_bl_del(&dentry->d_hash); 328 __hlist_bl_del(&dentry->d_hash);
339 dentry->d_hash.pprev = NULL; 329 dentry->d_hash.pprev = NULL;
340 spin_unlock_bucket(b); 330 hlist_bl_unlock(b);
341 331
342 dentry_rcuwalk_barrier(dentry); 332 dentry_rcuwalk_barrier(dentry);
343 } 333 }
@@ -1594,9 +1584,9 @@ struct dentry *d_obtain_alias(struct inode *inode)
1594 tmp->d_inode = inode; 1584 tmp->d_inode = inode;
1595 tmp->d_flags |= DCACHE_DISCONNECTED; 1585 tmp->d_flags |= DCACHE_DISCONNECTED;
1596 list_add(&tmp->d_alias, &inode->i_dentry); 1586 list_add(&tmp->d_alias, &inode->i_dentry);
1597 spin_lock_bucket(&tmp->d_sb->s_anon); 1587 hlist_bl_lock(&tmp->d_sb->s_anon);
1598 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1588 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1599 spin_unlock_bucket(&tmp->d_sb->s_anon); 1589 hlist_bl_unlock(&tmp->d_sb->s_anon);
1600 spin_unlock(&tmp->d_lock); 1590 spin_unlock(&tmp->d_lock);
1601 spin_unlock(&inode->i_lock); 1591 spin_unlock(&inode->i_lock);
1602 security_d_instantiate(tmp, inode); 1592 security_d_instantiate(tmp, inode);
@@ -2076,10 +2066,10 @@ EXPORT_SYMBOL(d_delete);
2076static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2066static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2077{ 2067{
2078 BUG_ON(!d_unhashed(entry)); 2068 BUG_ON(!d_unhashed(entry));
2079 spin_lock_bucket(b); 2069 hlist_bl_lock(b);
2080 entry->d_flags |= DCACHE_RCUACCESS; 2070 entry->d_flags |= DCACHE_RCUACCESS;
2081 hlist_bl_add_head_rcu(&entry->d_hash, b); 2071 hlist_bl_add_head_rcu(&entry->d_hash, b);
2082 spin_unlock_bucket(b); 2072 hlist_bl_unlock(b);
2083} 2073}
2084 2074
2085static void _d_rehash(struct dentry * entry) 2075static void _d_rehash(struct dentry * entry)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f07643e21bfa..7a4fb630a320 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -93,14 +93,12 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
93 93
94static inline void spin_lock_bucket(unsigned int hash) 94static inline void spin_lock_bucket(unsigned int hash)
95{ 95{
96 struct hlist_bl_head *bl = &gl_hash_table[hash]; 96 hlist_bl_lock(&gl_hash_table[hash]);
97 bit_spin_lock(0, (unsigned long *)bl);
98} 97}
99 98
100static inline void spin_unlock_bucket(unsigned int hash) 99static inline void spin_unlock_bucket(unsigned int hash)
101{ 100{
102 struct hlist_bl_head *bl = &gl_hash_table[hash]; 101 hlist_bl_unlock(&gl_hash_table[hash]);
103 __bit_spin_unlock(0, (unsigned long *)bl);
104} 102}
105 103
106static void gfs2_glock_dealloc(struct rcu_head *rcu) 104static void gfs2_glock_dealloc(struct rcu_head *rcu)