aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-04-15 03:33:13 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-05-02 19:49:27 -0400
commitd9171b9345261e0d941d92fdda5672b5db67f968 (patch)
tree233065eea867fee5f67823e72678923b14cb8a3b /fs/dcache.c
parent94bdd655caba2080ae81d83d756d325abdffcb9f (diff)
parallel lookups machinery, part 4 (and last)
If we *do* run into an in-lookup match, we need to wait for it to cease being in-lookup. Fortunately, we do have unused space in in-lookup dentries - d_lru is never looked at until it stops being in-lookup. So we can stash a pointer to wait_queue_head from stack frame of the caller of ->lookup(). Some precautions are needed while waiting, but it's not that hard - we do hold a reference to dentry we are waiting for, so it can't go away. If it's found to be in-lookup the wait_queue_head is still alive and will remain so at least while ->d_lock is held. Moreover, the condition we are waiting for becomes true at the same point where everything on that wq gets woken up, so we can just add ourselves to the queue once. d_alloc_parallel() gets a pointer to wait_queue_head_t from its caller; lookup_slow() adjusted, d_add_ci() taught to use d_alloc_parallel() if the dentry passed to it happens to be in-lookup one (i.e. if it's been called from the parallel lookup). That's pretty much it - all that remains is to switch ->i_mutex to rwsem and have lookup_slow() take it shared. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c94
1 files changed, 74 insertions, 20 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index ea2de7c19b08..59fcffcbf096 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1987,28 +1987,36 @@ EXPORT_SYMBOL(d_obtain_root);
1987struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1987struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1988 struct qstr *name) 1988 struct qstr *name)
1989{ 1989{
1990 struct dentry *found; 1990 struct dentry *found, *res;
1991 struct dentry *new;
1992 1991
1993 /* 1992 /*
1994 * First check if a dentry matching the name already exists, 1993 * First check if a dentry matching the name already exists,
1995 * if not go ahead and create it now. 1994 * if not go ahead and create it now.
1996 */ 1995 */
1997 found = d_hash_and_lookup(dentry->d_parent, name); 1996 found = d_hash_and_lookup(dentry->d_parent, name);
1998 if (!found) { 1997 if (found) {
1999 new = d_alloc(dentry->d_parent, name); 1998 iput(inode);
2000 if (!new) { 1999 return found;
2001 found = ERR_PTR(-ENOMEM); 2000 }
2002 } else { 2001 if (d_in_lookup(dentry)) {
2003 found = d_splice_alias(inode, new); 2002 found = d_alloc_parallel(dentry->d_parent, name,
2004 if (found) { 2003 dentry->d_wait);
2005 dput(new); 2004 if (IS_ERR(found) || !d_in_lookup(found)) {
2006 return found; 2005 iput(inode);
2007 } 2006 return found;
2008 return new;
2009 } 2007 }
2008 } else {
2009 found = d_alloc(dentry->d_parent, name);
2010 if (!found) {
2011 iput(inode);
2012 return ERR_PTR(-ENOMEM);
2013 }
2014 }
2015 res = d_splice_alias(inode, found);
2016 if (res) {
2017 dput(found);
2018 return res;
2010 } 2019 }
2011 iput(inode);
2012 return found; 2020 return found;
2013} 2021}
2014EXPORT_SYMBOL(d_add_ci); 2022EXPORT_SYMBOL(d_add_ci);
@@ -2391,8 +2399,23 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
2391 smp_store_release(&dir->i_dir_seq, n + 2); 2399 smp_store_release(&dir->i_dir_seq, n + 2);
2392} 2400}
2393 2401
2402static void d_wait_lookup(struct dentry *dentry)
2403{
2404 if (d_in_lookup(dentry)) {
2405 DECLARE_WAITQUEUE(wait, current);
2406 add_wait_queue(dentry->d_wait, &wait);
2407 do {
2408 set_current_state(TASK_UNINTERRUPTIBLE);
2409 spin_unlock(&dentry->d_lock);
2410 schedule();
2411 spin_lock(&dentry->d_lock);
2412 } while (d_in_lookup(dentry));
2413 }
2414}
2415
2394struct dentry *d_alloc_parallel(struct dentry *parent, 2416struct dentry *d_alloc_parallel(struct dentry *parent,
2395 const struct qstr *name) 2417 const struct qstr *name,
2418 wait_queue_head_t *wq)
2396{ 2419{
2397 unsigned int len = name->len; 2420 unsigned int len = name->len;
2398 unsigned int hash = name->hash; 2421 unsigned int hash = name->hash;
@@ -2463,18 +2486,47 @@ retry:
2463 } 2486 }
2464 dget(dentry); 2487 dget(dentry);
2465 hlist_bl_unlock(b); 2488 hlist_bl_unlock(b);
2466 /* impossible until we actually enable parallel lookups */ 2489 /* somebody is doing lookup for it right now; wait for it */
2467 BUG(); 2490 spin_lock(&dentry->d_lock);
2468 /* and this will be "wait for it to stop being in-lookup" */ 2491 d_wait_lookup(dentry);
2469 /* this one will be handled in the next commit */ 2492 /*
2493 * it's not in-lookup anymore; in principle we should repeat
2494 * everything from dcache lookup, but it's likely to be what
2495 * d_lookup() would've found anyway. If it is, just return it;
2496 * otherwise we really have to repeat the whole thing.
2497 */
2498 if (unlikely(dentry->d_name.hash != hash))
2499 goto mismatch;
2500 if (unlikely(dentry->d_parent != parent))
2501 goto mismatch;
2502 if (unlikely(d_unhashed(dentry)))
2503 goto mismatch;
2504 if (parent->d_flags & DCACHE_OP_COMPARE) {
2505 int tlen = dentry->d_name.len;
2506 const char *tname = dentry->d_name.name;
2507 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2508 goto mismatch;
2509 } else {
2510 if (unlikely(dentry->d_name.len != len))
2511 goto mismatch;
2512 if (unlikely(dentry_cmp(dentry, str, len)))
2513 goto mismatch;
2514 }
2515 /* OK, it *is* a hashed match; return it */
2516 spin_unlock(&dentry->d_lock);
2470 dput(new); 2517 dput(new);
2471 return dentry; 2518 return dentry;
2472 } 2519 }
2473 /* we can't take ->d_lock here; it's OK, though. */ 2520 /* we can't take ->d_lock here; it's OK, though. */
2474 new->d_flags |= DCACHE_PAR_LOOKUP; 2521 new->d_flags |= DCACHE_PAR_LOOKUP;
2522 new->d_wait = wq;
2475 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b); 2523 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2476 hlist_bl_unlock(b); 2524 hlist_bl_unlock(b);
2477 return new; 2525 return new;
2526mismatch:
2527 spin_unlock(&dentry->d_lock);
2528 dput(dentry);
2529 goto retry;
2478} 2530}
2479EXPORT_SYMBOL(d_alloc_parallel); 2531EXPORT_SYMBOL(d_alloc_parallel);
2480 2532
@@ -2485,9 +2537,11 @@ void __d_lookup_done(struct dentry *dentry)
2485 hlist_bl_lock(b); 2537 hlist_bl_lock(b);
2486 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; 2538 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2487 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); 2539 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2540 wake_up_all(dentry->d_wait);
2541 dentry->d_wait = NULL;
2488 hlist_bl_unlock(b); 2542 hlist_bl_unlock(b);
2489 INIT_HLIST_NODE(&dentry->d_u.d_alias); 2543 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2490 /* more stuff will land here */ 2544 INIT_LIST_HEAD(&dentry->d_lru);
2491} 2545}
2492EXPORT_SYMBOL(__d_lookup_done); 2546EXPORT_SYMBOL(__d_lookup_done);
2493 2547