summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-04-15 00:58:55 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-05-02 19:49:26 -0400
commit84e710da2a1dfacfc87f604869a4d22df91ce6cd (patch)
tree37625d5ef48c43a17c6da2f5a80b107da5524957 /fs/dcache.c
parent85c7f81041d57cfe9dc97f4680d5586b54534a39 (diff)
parallel lookups machinery, part 2
We'll need to verify that there's neither a hashed nor in-lookup dentry with desired parent/name before adding to in-lookup set. One possible solution would be to hold the parent's ->d_lock through both checks, but while the in-lookup set is relatively small at any time, dcache is not. And holding the parent's ->d_lock through something like __d_lookup_rcu() would suck too badly. So we leave the parent's ->d_lock alone, which means that we watch out for the following scenario: * we verify that there's no hashed match * existing in-lookup match gets hashed by another process * we verify that there's no in-lookup matches and decide that everything's fine. Solution: per-directory kinda-sorta seqlock, bumped around the times we hash something that used to be in-lookup or move (and hash) something in place of in-lookup. Then the above would turn into * read the counter * do dcache lookup * if no matches found, check for in-lookup matches * if there had been none of those either, check if the counter has changed; repeat if it has. The "kinda-sorta" part is due to the fact that we don't have much spare space in inode. There is a spare word (shared with i_bdev/i_cdev/i_pipe), so the counter part is not a problem, but spinlock is a different story. We could use the parent's ->d_lock, and it would be less painful in terms of contention, for __d_add() it would be rather inconvenient to grab; we could do that (using lock_parent()), but... Fortunately, we can get serialization on the counter itself, and it might be a good idea in general; we can use cmpxchg() in a loop to get from even to odd and smp_store_release() from odd to even. This commit adds the counter and updating logics; the readers will be added in the next commit. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 0f1d93866e69..10988f7e5a23 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2364,6 +2364,22 @@ void d_rehash(struct dentry * entry)
2364} 2364}
2365EXPORT_SYMBOL(d_rehash); 2365EXPORT_SYMBOL(d_rehash);
2366 2366
2367static inline unsigned start_dir_add(struct inode *dir)
2368{
2369
2370 for (;;) {
2371 unsigned n = dir->i_dir_seq;
2372 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2373 return n;
2374 cpu_relax();
2375 }
2376}
2377
2378static inline void end_dir_add(struct inode *dir, unsigned n)
2379{
2380 smp_store_release(&dir->i_dir_seq, n + 2);
2381}
2382
2367void __d_lookup_done(struct dentry *dentry) 2383void __d_lookup_done(struct dentry *dentry)
2368{ 2384{
2369 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; 2385 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
@@ -2375,9 +2391,14 @@ EXPORT_SYMBOL(__d_lookup_done);
2375 2391
2376static inline void __d_add(struct dentry *dentry, struct inode *inode) 2392static inline void __d_add(struct dentry *dentry, struct inode *inode)
2377{ 2393{
2394 struct inode *dir = NULL;
2395 unsigned n;
2378 spin_lock(&dentry->d_lock); 2396 spin_lock(&dentry->d_lock);
2379 if (unlikely(d_in_lookup(dentry))) 2397 if (unlikely(d_in_lookup(dentry))) {
2398 dir = dentry->d_parent->d_inode;
2399 n = start_dir_add(dir);
2380 __d_lookup_done(dentry); 2400 __d_lookup_done(dentry);
2401 }
2381 if (inode) { 2402 if (inode) {
2382 unsigned add_flags = d_flags_for_inode(inode); 2403 unsigned add_flags = d_flags_for_inode(inode);
2383 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 2404 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -2387,6 +2408,8 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
2387 __fsnotify_d_instantiate(dentry); 2408 __fsnotify_d_instantiate(dentry);
2388 } 2409 }
2389 _d_rehash(dentry); 2410 _d_rehash(dentry);
2411 if (dir)
2412 end_dir_add(dir, n);
2390 spin_unlock(&dentry->d_lock); 2413 spin_unlock(&dentry->d_lock);
2391 if (inode) 2414 if (inode)
2392 spin_unlock(&inode->i_lock); 2415 spin_unlock(&inode->i_lock);
@@ -2616,6 +2639,8 @@ static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2616static void __d_move(struct dentry *dentry, struct dentry *target, 2639static void __d_move(struct dentry *dentry, struct dentry *target,
2617 bool exchange) 2640 bool exchange)
2618{ 2641{
2642 struct inode *dir = NULL;
2643 unsigned n;
2619 if (!dentry->d_inode) 2644 if (!dentry->d_inode)
2620 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2645 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2621 2646
@@ -2623,8 +2648,11 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2623 BUG_ON(d_ancestor(target, dentry)); 2648 BUG_ON(d_ancestor(target, dentry));
2624 2649
2625 dentry_lock_for_move(dentry, target); 2650 dentry_lock_for_move(dentry, target);
2626 if (unlikely(d_in_lookup(target))) 2651 if (unlikely(d_in_lookup(target))) {
2652 dir = target->d_parent->d_inode;
2653 n = start_dir_add(dir);
2627 __d_lookup_done(target); 2654 __d_lookup_done(target);
2655 }
2628 2656
2629 write_seqcount_begin(&dentry->d_seq); 2657 write_seqcount_begin(&dentry->d_seq);
2630 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); 2658 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
@@ -2674,6 +2702,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2674 write_seqcount_end(&target->d_seq); 2702 write_seqcount_end(&target->d_seq);
2675 write_seqcount_end(&dentry->d_seq); 2703 write_seqcount_end(&dentry->d_seq);
2676 2704
2705 if (dir)
2706 end_dir_add(dir, n);
2677 dentry_unlock_for_move(dentry, target); 2707 dentry_unlock_for_move(dentry, target);
2678} 2708}
2679 2709