aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-01-09 18:19:03 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2015-01-25 23:16:29 -0500
commit360f54796ed65939093ae373b92ebd5ef3341776 (patch)
treeb89eeed53d0f4ff4ca9b753ca3f239e6fa0be005 /fs/dcache.c
parent32426f6653cbfde1ca16aff27a530ee36332f796 (diff)
dcache: let the dentry count go down to zero without taking d_lock
We can be more aggressive about this, if we are clever and careful. This is subtle. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c118
1 files changed, 114 insertions, 4 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 40432e59d72e..a14d00e9839e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
508 * dentry_iput drops the locks, at which point nobody (except 508 * dentry_iput drops the locks, at which point nobody (except
509 * transient RCU lookups) can reach this dentry. 509 * transient RCU lookups) can reach this dentry.
510 */ 510 */
511 BUG_ON((int)dentry->d_lockref.count > 0); 511 BUG_ON(dentry->d_lockref.count > 0);
512 this_cpu_dec(nr_dentry); 512 this_cpu_dec(nr_dentry);
513 if (dentry->d_op && dentry->d_op->d_release) 513 if (dentry->d_op && dentry->d_op->d_release)
514 dentry->d_op->d_release(dentry); 514 dentry->d_op->d_release(dentry);
@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
561 struct dentry *parent = dentry->d_parent; 561 struct dentry *parent = dentry->d_parent;
562 if (IS_ROOT(dentry)) 562 if (IS_ROOT(dentry))
563 return NULL; 563 return NULL;
564 if (unlikely((int)dentry->d_lockref.count < 0)) 564 if (unlikely(dentry->d_lockref.count < 0))
565 return NULL; 565 return NULL;
566 if (likely(spin_trylock(&parent->d_lock))) 566 if (likely(spin_trylock(&parent->d_lock)))
567 return parent; 567 return parent;
@@ -590,6 +590,110 @@ again:
590 return parent; 590 return parent;
591} 591}
592 592
593/*
594 * Try to do a lockless dput(), and return whether that was successful.
595 *
596 * If unsuccessful, we return false, having already taken the dentry lock.
597 *
598 * The caller needs to hold the RCU read lock, so that the dentry is
599 * guaranteed to stay around even if the refcount goes down to zero!
600 */
601static inline bool fast_dput(struct dentry *dentry)
602{
603 int ret;
604 unsigned int d_flags;
605
606 /*
607 * If we have a d_op->d_delete() operation, we sould not
608 * let the dentry count go to zero, so use "put__or_lock".
609 */
610 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
611 return lockref_put_or_lock(&dentry->d_lockref);
612
613 /*
614 * .. otherwise, we can try to just decrement the
615 * lockref optimistically.
616 */
617 ret = lockref_put_return(&dentry->d_lockref);
618
619 /*
620 * If the lockref_put_return() failed due to the lock being held
621 * by somebody else, the fast path has failed. We will need to
622 * get the lock, and then check the count again.
623 */
624 if (unlikely(ret < 0)) {
625 spin_lock(&dentry->d_lock);
626 if (dentry->d_lockref.count > 1) {
627 dentry->d_lockref.count--;
628 spin_unlock(&dentry->d_lock);
629 return 1;
630 }
631 return 0;
632 }
633
634 /*
635 * If we weren't the last ref, we're done.
636 */
637 if (ret)
638 return 1;
639
640 /*
641 * Careful, careful. The reference count went down
642 * to zero, but we don't hold the dentry lock, so
643 * somebody else could get it again, and do another
644 * dput(), and we need to not race with that.
645 *
646 * However, there is a very special and common case
647 * where we don't care, because there is nothing to
648 * do: the dentry is still hashed, it does not have
649 * a 'delete' op, and it's referenced and already on
650 * the LRU list.
651 *
652 * NOTE! Since we aren't locked, these values are
653 * not "stable". However, it is sufficient that at
654 * some point after we dropped the reference the
655 * dentry was hashed and the flags had the proper
656 * value. Other dentry users may have re-gotten
657 * a reference to the dentry and change that, but
658 * our work is done - we can leave the dentry
659 * around with a zero refcount.
660 */
661 smp_rmb();
662 d_flags = ACCESS_ONCE(dentry->d_flags);
663 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
664
665 /* Nothing to do? Dropping the reference was all we needed? */
666 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
667 return 1;
668
669 /*
670 * Not the fast normal case? Get the lock. We've already decremented
671 * the refcount, but we'll need to re-check the situation after
672 * getting the lock.
673 */
674 spin_lock(&dentry->d_lock);
675
676 /*
677 * Did somebody else grab a reference to it in the meantime, and
678 * we're no longer the last user after all? Alternatively, somebody
679 * else could have killed it and marked it dead. Either way, we
680 * don't need to do anything else.
681 */
682 if (dentry->d_lockref.count) {
683 spin_unlock(&dentry->d_lock);
684 return 1;
685 }
686
687 /*
688 * Re-get the reference we optimistically dropped. We hold the
689 * lock, and we just tested that it was zero, so we can just
690 * set it to 1.
691 */
692 dentry->d_lockref.count = 1;
693 return 0;
694}
695
696
593/* 697/*
594 * This is dput 698 * This is dput
595 * 699 *
@@ -622,8 +726,14 @@ void dput(struct dentry *dentry)
622 return; 726 return;
623 727
624repeat: 728repeat:
625 if (lockref_put_or_lock(&dentry->d_lockref)) 729 rcu_read_lock();
730 if (likely(fast_dput(dentry))) {
731 rcu_read_unlock();
626 return; 732 return;
733 }
734
735 /* Slow case: now with the dentry lock held */
736 rcu_read_unlock();
627 737
628 /* Unreachable? Get rid of it */ 738 /* Unreachable? Get rid of it */
629 if (unlikely(d_unhashed(dentry))) 739 if (unlikely(d_unhashed(dentry)))
@@ -810,7 +920,7 @@ static void shrink_dentry_list(struct list_head *list)
810 * We found an inuse dentry which was not removed from 920 * We found an inuse dentry which was not removed from
811 * the LRU because of laziness during lookup. Do not free it. 921 * the LRU because of laziness during lookup. Do not free it.
812 */ 922 */
813 if ((int)dentry->d_lockref.count > 0) { 923 if (dentry->d_lockref.count > 0) {
814 spin_unlock(&dentry->d_lock); 924 spin_unlock(&dentry->d_lock);
815 if (parent) 925 if (parent)
816 spin_unlock(&parent->d_lock); 926 spin_unlock(&parent->d_lock);