diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-24 10:58:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-04-24 10:58:46 -0400 |
commit | dea3667bc3c2a0521e8d8855e407a49d9d70028c (patch) | |
tree | bda1a624616f2e1bf9d3ec38b30ab002379a8e6a /fs | |
parent | b07ad9967f40b164af77205027352ba53729cf5a (diff) |
vfs: get rid of insane dentry hashing rules
The dentry hashing rules have been really quite complicated for a long
while, in odd ways. That made functions like __d_drop() very fragile
and non-obvious.
In particular, whether a dentry was hashed or not was indicated with an
explicit DCACHE_UNHASHED bit. That's despite the fact that the hash
abstraction that the dentries use actually have a 'is this entry hashed
or not' model (which is a simple test of the 'pprev' pointer).
The reason that was done is because we used the normal 'is this entry
unhashed' model to mark whether the dentry had _ever_ been hashed in the
dentry hash tables, and that logic goes back many years (commit
b3423415fbc2: "dcache: avoid RCU for never-hashed dentries").
That, in turn, meant that __d_drop had totally different unhashing logic
for the dentry hash table case and for the anonymous dcache case,
because in order to use the "is this dentry hashed" logic as a flag for
whether it had ever been on the RCU hash table, we had to unhash such a
dentry differently so that we'd never think that it wasn't 'unhashed'
and wouldn't be free'd correctly.
That's just insane. It made the logic really hard to follow, when there
were two different kinds of "unhashed" states, and one of them (the one
that used "list_bl_unhashed()") really had nothing at all to do with
being unhashed per se, but with a very subtle lifetime rule instead.
So turn all of it around, and make it logical.
Instead of having a DENTRY_UNHASHED bit in d_flags to indicate whether
the dentry is on the hash chains or not, use the hash chain unhashed
logic for that. Suddenly "d_unhashed()" just uses "list_bl_unhashed()",
and everything makes sense.
And for the lifetime rule, just use an explicit DENTRY_RCUACCEES bit.
If we ever insert the dentry into the dentry hash table so that it is
visible to RCU lookup, we mark it DENTRY_RCUACCESS to show that it now
needs the RCU lifetime rules. Now suddently that test at dentry free
time makes sense too.
And because unhashing now is sane and doesn't depend on where the dentry
got unhashed from (because the dentry hash chain details doesn't have
some subtle side effects), we can re-unify the __d_drop() logic and use
common code for the unhashing.
Also fix one more open-coded hash chain bit_spin_lock() that I missed in
the previous chain locking cleanup commit.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dcache.c | 42 |
1 files changed, 16 insertions, 26 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 7108c15685dd..d600a0af3b2e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -164,8 +164,8 @@ static void d_free(struct dentry *dentry) | |||
164 | if (dentry->d_op && dentry->d_op->d_release) | 164 | if (dentry->d_op && dentry->d_op->d_release) |
165 | dentry->d_op->d_release(dentry); | 165 | dentry->d_op->d_release(dentry); |
166 | 166 | ||
167 | /* if dentry was never inserted into hash, immediate free is OK */ | 167 | /* if dentry was never visible to RCU, immediate free is OK */ |
168 | if (hlist_bl_unhashed(&dentry->d_hash)) | 168 | if (!(dentry->d_flags & DCACHE_RCUACCESS)) |
169 | __d_free(&dentry->d_u.d_rcu); | 169 | __d_free(&dentry->d_u.d_rcu); |
170 | else | 170 | else |
171 | call_rcu(&dentry->d_u.d_rcu, __d_free); | 171 | call_rcu(&dentry->d_u.d_rcu, __d_free); |
@@ -327,28 +327,19 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) | |||
327 | */ | 327 | */ |
328 | void __d_drop(struct dentry *dentry) | 328 | void __d_drop(struct dentry *dentry) |
329 | { | 329 | { |
330 | if (!(dentry->d_flags & DCACHE_UNHASHED)) { | 330 | if (!d_unhashed(dentry)) { |
331 | struct hlist_bl_head *b; | 331 | struct hlist_bl_head *b; |
332 | if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) { | 332 | if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) |
333 | b = &dentry->d_sb->s_anon; | 333 | b = &dentry->d_sb->s_anon; |
334 | spin_lock_bucket(b); | 334 | else |
335 | dentry->d_flags |= DCACHE_UNHASHED; | ||
336 | hlist_bl_del_init(&dentry->d_hash); | ||
337 | spin_unlock_bucket(b); | ||
338 | } else { | ||
339 | struct hlist_bl_head *b; | ||
340 | b = d_hash(dentry->d_parent, dentry->d_name.hash); | 335 | b = d_hash(dentry->d_parent, dentry->d_name.hash); |
341 | spin_lock_bucket(b); | 336 | |
342 | /* | 337 | spin_lock_bucket(b); |
343 | * We may not actually need to put DCACHE_UNHASHED | 338 | __hlist_bl_del(&dentry->d_hash); |
344 | * manipulations under the hash lock, but follow | 339 | dentry->d_hash.pprev = NULL; |
345 | * the principle of least surprise. | 340 | spin_unlock_bucket(b); |
346 | */ | 341 | |
347 | dentry->d_flags |= DCACHE_UNHASHED; | 342 | dentry_rcuwalk_barrier(dentry); |
348 | hlist_bl_del_rcu(&dentry->d_hash); | ||
349 | spin_unlock_bucket(b); | ||
350 | dentry_rcuwalk_barrier(dentry); | ||
351 | } | ||
352 | } | 343 | } |
353 | } | 344 | } |
354 | EXPORT_SYMBOL(__d_drop); | 345 | EXPORT_SYMBOL(__d_drop); |
@@ -1301,7 +1292,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |||
1301 | dname[name->len] = 0; | 1292 | dname[name->len] = 0; |
1302 | 1293 | ||
1303 | dentry->d_count = 1; | 1294 | dentry->d_count = 1; |
1304 | dentry->d_flags = DCACHE_UNHASHED; | 1295 | dentry->d_flags = 0; |
1305 | spin_lock_init(&dentry->d_lock); | 1296 | spin_lock_init(&dentry->d_lock); |
1306 | seqcount_init(&dentry->d_seq); | 1297 | seqcount_init(&dentry->d_seq); |
1307 | dentry->d_inode = NULL; | 1298 | dentry->d_inode = NULL; |
@@ -1603,10 +1594,9 @@ struct dentry *d_obtain_alias(struct inode *inode) | |||
1603 | tmp->d_inode = inode; | 1594 | tmp->d_inode = inode; |
1604 | tmp->d_flags |= DCACHE_DISCONNECTED; | 1595 | tmp->d_flags |= DCACHE_DISCONNECTED; |
1605 | list_add(&tmp->d_alias, &inode->i_dentry); | 1596 | list_add(&tmp->d_alias, &inode->i_dentry); |
1606 | bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first); | 1597 | spin_lock_bucket(&tmp->d_sb->s_anon); |
1607 | tmp->d_flags &= ~DCACHE_UNHASHED; | ||
1608 | hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); | 1598 | hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); |
1609 | __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); | 1599 | spin_unlock_bucket(&tmp->d_sb->s_anon); |
1610 | spin_unlock(&tmp->d_lock); | 1600 | spin_unlock(&tmp->d_lock); |
1611 | spin_unlock(&inode->i_lock); | 1601 | spin_unlock(&inode->i_lock); |
1612 | security_d_instantiate(tmp, inode); | 1602 | security_d_instantiate(tmp, inode); |
@@ -2087,7 +2077,7 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) | |||
2087 | { | 2077 | { |
2088 | BUG_ON(!d_unhashed(entry)); | 2078 | BUG_ON(!d_unhashed(entry)); |
2089 | spin_lock_bucket(b); | 2079 | spin_lock_bucket(b); |
2090 | entry->d_flags &= ~DCACHE_UNHASHED; | 2080 | entry->d_flags |= DCACHE_RCUACCESS; |
2091 | hlist_bl_add_head_rcu(&entry->d_hash, b); | 2081 | hlist_bl_add_head_rcu(&entry->d_hash, b); |
2092 | spin_unlock_bucket(b); | 2082 | spin_unlock_bucket(b); |
2093 | } | 2083 | } |