aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c149
1 files changed, 114 insertions, 35 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 7d34f04ec7aa..dc400fd29f4d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
511 * dentry_iput drops the locks, at which point nobody (except 511 * dentry_iput drops the locks, at which point nobody (except
512 * transient RCU lookups) can reach this dentry. 512 * transient RCU lookups) can reach this dentry.
513 */ 513 */
514 BUG_ON((int)dentry->d_lockref.count > 0); 514 BUG_ON(dentry->d_lockref.count > 0);
515 this_cpu_dec(nr_dentry); 515 this_cpu_dec(nr_dentry);
516 if (dentry->d_op && dentry->d_op->d_release) 516 if (dentry->d_op && dentry->d_op->d_release)
517 dentry->d_op->d_release(dentry); 517 dentry->d_op->d_release(dentry);
@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
564 struct dentry *parent = dentry->d_parent; 564 struct dentry *parent = dentry->d_parent;
565 if (IS_ROOT(dentry)) 565 if (IS_ROOT(dentry))
566 return NULL; 566 return NULL;
567 if (unlikely((int)dentry->d_lockref.count < 0)) 567 if (unlikely(dentry->d_lockref.count < 0))
568 return NULL; 568 return NULL;
569 if (likely(spin_trylock(&parent->d_lock))) 569 if (likely(spin_trylock(&parent->d_lock)))
570 return parent; 570 return parent;
@@ -593,6 +593,110 @@ again:
593 return parent; 593 return parent;
594} 594}
595 595
596/*
597 * Try to do a lockless dput(), and return whether that was successful.
598 *
599 * If unsuccessful, we return false, having already taken the dentry lock.
600 *
601 * The caller needs to hold the RCU read lock, so that the dentry is
602 * guaranteed to stay around even if the refcount goes down to zero!
603 */
604static inline bool fast_dput(struct dentry *dentry)
605{
606 int ret;
607 unsigned int d_flags;
608
609 /*
610 * If we have a d_op->d_delete() operation, we sould not
611 * let the dentry count go to zero, so use "put__or_lock".
612 */
613 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
614 return lockref_put_or_lock(&dentry->d_lockref);
615
616 /*
617 * .. otherwise, we can try to just decrement the
618 * lockref optimistically.
619 */
620 ret = lockref_put_return(&dentry->d_lockref);
621
622 /*
623 * If the lockref_put_return() failed due to the lock being held
624 * by somebody else, the fast path has failed. We will need to
625 * get the lock, and then check the count again.
626 */
627 if (unlikely(ret < 0)) {
628 spin_lock(&dentry->d_lock);
629 if (dentry->d_lockref.count > 1) {
630 dentry->d_lockref.count--;
631 spin_unlock(&dentry->d_lock);
632 return 1;
633 }
634 return 0;
635 }
636
637 /*
638 * If we weren't the last ref, we're done.
639 */
640 if (ret)
641 return 1;
642
643 /*
644 * Careful, careful. The reference count went down
645 * to zero, but we don't hold the dentry lock, so
646 * somebody else could get it again, and do another
647 * dput(), and we need to not race with that.
648 *
649 * However, there is a very special and common case
650 * where we don't care, because there is nothing to
651 * do: the dentry is still hashed, it does not have
652 * a 'delete' op, and it's referenced and already on
653 * the LRU list.
654 *
655 * NOTE! Since we aren't locked, these values are
656 * not "stable". However, it is sufficient that at
657 * some point after we dropped the reference the
658 * dentry was hashed and the flags had the proper
659 * value. Other dentry users may have re-gotten
660 * a reference to the dentry and change that, but
661 * our work is done - we can leave the dentry
662 * around with a zero refcount.
663 */
664 smp_rmb();
665 d_flags = ACCESS_ONCE(dentry->d_flags);
666 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
667
668 /* Nothing to do? Dropping the reference was all we needed? */
669 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
670 return 1;
671
672 /*
673 * Not the fast normal case? Get the lock. We've already decremented
674 * the refcount, but we'll need to re-check the situation after
675 * getting the lock.
676 */
677 spin_lock(&dentry->d_lock);
678
679 /*
680 * Did somebody else grab a reference to it in the meantime, and
681 * we're no longer the last user after all? Alternatively, somebody
682 * else could have killed it and marked it dead. Either way, we
683 * don't need to do anything else.
684 */
685 if (dentry->d_lockref.count) {
686 spin_unlock(&dentry->d_lock);
687 return 1;
688 }
689
690 /*
691 * Re-get the reference we optimistically dropped. We hold the
692 * lock, and we just tested that it was zero, so we can just
693 * set it to 1.
694 */
695 dentry->d_lockref.count = 1;
696 return 0;
697}
698
699
596/* 700/*
597 * This is dput 701 * This is dput
598 * 702 *
@@ -625,8 +729,14 @@ void dput(struct dentry *dentry)
625 return; 729 return;
626 730
627repeat: 731repeat:
628 if (lockref_put_or_lock(&dentry->d_lockref)) 732 rcu_read_lock();
733 if (likely(fast_dput(dentry))) {
734 rcu_read_unlock();
629 return; 735 return;
736 }
737
738 /* Slow case: now with the dentry lock held */
739 rcu_read_unlock();
630 740
631 /* Unreachable? Get rid of it */ 741 /* Unreachable? Get rid of it */
632 if (unlikely(d_unhashed(dentry))) 742 if (unlikely(d_unhashed(dentry)))
@@ -813,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
813 * We found an inuse dentry which was not removed from 923 * We found an inuse dentry which was not removed from
814 * the LRU because of laziness during lookup. Do not free it. 924 * the LRU because of laziness during lookup. Do not free it.
815 */ 925 */
816 if ((int)dentry->d_lockref.count > 0) { 926 if (dentry->d_lockref.count > 0) {
817 spin_unlock(&dentry->d_lock); 927 spin_unlock(&dentry->d_lock);
818 if (parent) 928 if (parent)
819 spin_unlock(&parent->d_lock); 929 spin_unlock(&parent->d_lock);
@@ -2191,37 +2301,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2191} 2301}
2192EXPORT_SYMBOL(d_hash_and_lookup); 2302EXPORT_SYMBOL(d_hash_and_lookup);
2193 2303
2194/**
2195 * d_validate - verify dentry provided from insecure source (deprecated)
2196 * @dentry: The dentry alleged to be valid child of @dparent
2197 * @dparent: The parent dentry (known to be valid)
2198 *
2199 * An insecure source has sent us a dentry, here we verify it and dget() it.
2200 * This is used by ncpfs in its readdir implementation.
2201 * Zero is returned in the dentry is invalid.
2202 *
2203 * This function is slow for big directories, and deprecated, do not use it.
2204 */
2205int d_validate(struct dentry *dentry, struct dentry *dparent)
2206{
2207 struct dentry *child;
2208
2209 spin_lock(&dparent->d_lock);
2210 list_for_each_entry(child, &dparent->d_subdirs, d_child) {
2211 if (dentry == child) {
2212 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2213 __dget_dlock(dentry);
2214 spin_unlock(&dentry->d_lock);
2215 spin_unlock(&dparent->d_lock);
2216 return 1;
2217 }
2218 }
2219 spin_unlock(&dparent->d_lock);
2220
2221 return 0;
2222}
2223EXPORT_SYMBOL(d_validate);
2224
2225/* 2304/*
2226 * When a file is deleted, we have two options: 2305 * When a file is deleted, we have two options:
2227 * - turn this dentry into a negative dentry 2306 * - turn this dentry into a negative dentry