aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-01-09 18:19:03 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2015-01-25 23:16:29 -0500
commit360f54796ed65939093ae373b92ebd5ef3341776 (patch)
treeb89eeed53d0f4ff4ca9b753ca3f239e6fa0be005
parent32426f6653cbfde1ca16aff27a530ee36332f796 (diff)
dcache: let the dentry count go down to zero without taking d_lock
We can be more aggressive about this, if we are clever and careful. This is subtle. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/dcache.c118
-rw-r--r--include/linux/lockref.h3
-rw-r--r--lib/lockref.c36
3 files changed, 144 insertions, 13 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 40432e59d72e..a14d00e9839e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
508 * dentry_iput drops the locks, at which point nobody (except 508 * dentry_iput drops the locks, at which point nobody (except
509 * transient RCU lookups) can reach this dentry. 509 * transient RCU lookups) can reach this dentry.
510 */ 510 */
511 BUG_ON((int)dentry->d_lockref.count > 0); 511 BUG_ON(dentry->d_lockref.count > 0);
512 this_cpu_dec(nr_dentry); 512 this_cpu_dec(nr_dentry);
513 if (dentry->d_op && dentry->d_op->d_release) 513 if (dentry->d_op && dentry->d_op->d_release)
514 dentry->d_op->d_release(dentry); 514 dentry->d_op->d_release(dentry);
@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
561 struct dentry *parent = dentry->d_parent; 561 struct dentry *parent = dentry->d_parent;
562 if (IS_ROOT(dentry)) 562 if (IS_ROOT(dentry))
563 return NULL; 563 return NULL;
564 if (unlikely((int)dentry->d_lockref.count < 0)) 564 if (unlikely(dentry->d_lockref.count < 0))
565 return NULL; 565 return NULL;
566 if (likely(spin_trylock(&parent->d_lock))) 566 if (likely(spin_trylock(&parent->d_lock)))
567 return parent; 567 return parent;
@@ -590,6 +590,110 @@ again:
590 return parent; 590 return parent;
591} 591}
592 592
593/*
594 * Try to do a lockless dput(), and return whether that was successful.
595 *
596 * If unsuccessful, we return false, having already taken the dentry lock.
597 *
598 * The caller needs to hold the RCU read lock, so that the dentry is
599 * guaranteed to stay around even if the refcount goes down to zero!
600 */
601static inline bool fast_dput(struct dentry *dentry)
602{
603 int ret;
604 unsigned int d_flags;
605
606 /*
607 * If we have a d_op->d_delete() operation, we sould not
608 * let the dentry count go to zero, so use "put__or_lock".
609 */
610 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
611 return lockref_put_or_lock(&dentry->d_lockref);
612
613 /*
614 * .. otherwise, we can try to just decrement the
615 * lockref optimistically.
616 */
617 ret = lockref_put_return(&dentry->d_lockref);
618
619 /*
620 * If the lockref_put_return() failed due to the lock being held
621 * by somebody else, the fast path has failed. We will need to
622 * get the lock, and then check the count again.
623 */
624 if (unlikely(ret < 0)) {
625 spin_lock(&dentry->d_lock);
626 if (dentry->d_lockref.count > 1) {
627 dentry->d_lockref.count--;
628 spin_unlock(&dentry->d_lock);
629 return 1;
630 }
631 return 0;
632 }
633
634 /*
635 * If we weren't the last ref, we're done.
636 */
637 if (ret)
638 return 1;
639
640 /*
641 * Careful, careful. The reference count went down
642 * to zero, but we don't hold the dentry lock, so
643 * somebody else could get it again, and do another
644 * dput(), and we need to not race with that.
645 *
646 * However, there is a very special and common case
647 * where we don't care, because there is nothing to
648 * do: the dentry is still hashed, it does not have
649 * a 'delete' op, and it's referenced and already on
650 * the LRU list.
651 *
652 * NOTE! Since we aren't locked, these values are
653 * not "stable". However, it is sufficient that at
654 * some point after we dropped the reference the
655 * dentry was hashed and the flags had the proper
656 * value. Other dentry users may have re-gotten
657 * a reference to the dentry and change that, but
658 * our work is done - we can leave the dentry
659 * around with a zero refcount.
660 */
661 smp_rmb();
662 d_flags = ACCESS_ONCE(dentry->d_flags);
663 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
664
665 /* Nothing to do? Dropping the reference was all we needed? */
666 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
667 return 1;
668
669 /*
670 * Not the fast normal case? Get the lock. We've already decremented
671 * the refcount, but we'll need to re-check the situation after
672 * getting the lock.
673 */
674 spin_lock(&dentry->d_lock);
675
676 /*
677 * Did somebody else grab a reference to it in the meantime, and
678 * we're no longer the last user after all? Alternatively, somebody
679 * else could have killed it and marked it dead. Either way, we
680 * don't need to do anything else.
681 */
682 if (dentry->d_lockref.count) {
683 spin_unlock(&dentry->d_lock);
684 return 1;
685 }
686
687 /*
688 * Re-get the reference we optimistically dropped. We hold the
689 * lock, and we just tested that it was zero, so we can just
690 * set it to 1.
691 */
692 dentry->d_lockref.count = 1;
693 return 0;
694}
695
696
593/* 697/*
594 * This is dput 698 * This is dput
595 * 699 *
@@ -622,8 +726,14 @@ void dput(struct dentry *dentry)
622 return; 726 return;
623 727
624repeat: 728repeat:
625 if (lockref_put_or_lock(&dentry->d_lockref)) 729 rcu_read_lock();
730 if (likely(fast_dput(dentry))) {
731 rcu_read_unlock();
626 return; 732 return;
733 }
734
735 /* Slow case: now with the dentry lock held */
736 rcu_read_unlock();
627 737
628 /* Unreachable? Get rid of it */ 738 /* Unreachable? Get rid of it */
629 if (unlikely(d_unhashed(dentry))) 739 if (unlikely(d_unhashed(dentry)))
@@ -810,7 +920,7 @@ static void shrink_dentry_list(struct list_head *list)
810 * We found an inuse dentry which was not removed from 920 * We found an inuse dentry which was not removed from
811 * the LRU because of laziness during lookup. Do not free it. 921 * the LRU because of laziness during lookup. Do not free it.
812 */ 922 */
813 if ((int)dentry->d_lockref.count > 0) { 923 if (dentry->d_lockref.count > 0) {
814 spin_unlock(&dentry->d_lock); 924 spin_unlock(&dentry->d_lock);
815 if (parent) 925 if (parent)
816 spin_unlock(&parent->d_lock); 926 spin_unlock(&parent->d_lock);
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4bfde0e99ed5..b10b122dd099 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -28,12 +28,13 @@ struct lockref {
28#endif 28#endif
29 struct { 29 struct {
30 spinlock_t lock; 30 spinlock_t lock;
31 unsigned int count; 31 int count;
32 }; 32 };
33 }; 33 };
34}; 34};
35 35
36extern void lockref_get(struct lockref *); 36extern void lockref_get(struct lockref *);
37extern int lockref_put_return(struct lockref *);
37extern int lockref_get_not_zero(struct lockref *); 38extern int lockref_get_not_zero(struct lockref *);
38extern int lockref_get_or_lock(struct lockref *); 39extern int lockref_get_or_lock(struct lockref *);
39extern int lockref_put_or_lock(struct lockref *); 40extern int lockref_put_or_lock(struct lockref *);
diff --git a/lib/lockref.c b/lib/lockref.c
index d2233de9a86e..ecb9a665ec19 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -60,7 +60,7 @@ void lockref_get(struct lockref *lockref)
60EXPORT_SYMBOL(lockref_get); 60EXPORT_SYMBOL(lockref_get);
61 61
62/** 62/**
63 * lockref_get_not_zero - Increments count unless the count is 0 63 * lockref_get_not_zero - Increments count unless the count is 0 or dead
64 * @lockref: pointer to lockref structure 64 * @lockref: pointer to lockref structure
65 * Return: 1 if count updated successfully or 0 if count was zero 65 * Return: 1 if count updated successfully or 0 if count was zero
66 */ 66 */
@@ -70,7 +70,7 @@ int lockref_get_not_zero(struct lockref *lockref)
70 70
71 CMPXCHG_LOOP( 71 CMPXCHG_LOOP(
72 new.count++; 72 new.count++;
73 if (!old.count) 73 if (old.count <= 0)
74 return 0; 74 return 0;
75 , 75 ,
76 return 1; 76 return 1;
@@ -78,7 +78,7 @@ int lockref_get_not_zero(struct lockref *lockref)
78 78
79 spin_lock(&lockref->lock); 79 spin_lock(&lockref->lock);
80 retval = 0; 80 retval = 0;
81 if (lockref->count) { 81 if (lockref->count > 0) {
82 lockref->count++; 82 lockref->count++;
83 retval = 1; 83 retval = 1;
84 } 84 }
@@ -88,7 +88,7 @@ int lockref_get_not_zero(struct lockref *lockref)
88EXPORT_SYMBOL(lockref_get_not_zero); 88EXPORT_SYMBOL(lockref_get_not_zero);
89 89
90/** 90/**
91 * lockref_get_or_lock - Increments count unless the count is 0 91 * lockref_get_or_lock - Increments count unless the count is 0 or dead
92 * @lockref: pointer to lockref structure 92 * @lockref: pointer to lockref structure
93 * Return: 1 if count updated successfully or 0 if count was zero 93 * Return: 1 if count updated successfully or 0 if count was zero
94 * and we got the lock instead. 94 * and we got the lock instead.
@@ -97,14 +97,14 @@ int lockref_get_or_lock(struct lockref *lockref)
97{ 97{
98 CMPXCHG_LOOP( 98 CMPXCHG_LOOP(
99 new.count++; 99 new.count++;
100 if (!old.count) 100 if (old.count <= 0)
101 break; 101 break;
102 , 102 ,
103 return 1; 103 return 1;
104 ); 104 );
105 105
106 spin_lock(&lockref->lock); 106 spin_lock(&lockref->lock);
107 if (!lockref->count) 107 if (lockref->count <= 0)
108 return 0; 108 return 0;
109 lockref->count++; 109 lockref->count++;
110 spin_unlock(&lockref->lock); 110 spin_unlock(&lockref->lock);
@@ -113,6 +113,26 @@ int lockref_get_or_lock(struct lockref *lockref)
113EXPORT_SYMBOL(lockref_get_or_lock); 113EXPORT_SYMBOL(lockref_get_or_lock);
114 114
115/** 115/**
116 * lockref_put_return - Decrement reference count if possible
117 * @lockref: pointer to lockref structure
118 *
119 * Decrement the reference count and return the new value.
120 * If the lockref was dead or locked, return an error.
121 */
122int lockref_put_return(struct lockref *lockref)
123{
124 CMPXCHG_LOOP(
125 new.count--;
126 if (old.count <= 0)
127 return -1;
128 ,
129 return new.count;
130 );
131 return -1;
132}
133EXPORT_SYMBOL(lockref_put_return);
134
135/**
116 * lockref_put_or_lock - decrements count unless count <= 1 before decrement 136 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
117 * @lockref: pointer to lockref structure 137 * @lockref: pointer to lockref structure
118 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken 138 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
@@ -158,7 +178,7 @@ int lockref_get_not_dead(struct lockref *lockref)
158 178
159 CMPXCHG_LOOP( 179 CMPXCHG_LOOP(
160 new.count++; 180 new.count++;
161 if ((int)old.count < 0) 181 if (old.count < 0)
162 return 0; 182 return 0;
163 , 183 ,
164 return 1; 184 return 1;
@@ -166,7 +186,7 @@ int lockref_get_not_dead(struct lockref *lockref)
166 186
167 spin_lock(&lockref->lock); 187 spin_lock(&lockref->lock);
168 retval = 0; 188 retval = 0;
169 if ((int) lockref->count >= 0) { 189 if (lockref->count >= 0) {
170 lockref->count++; 190 lockref->count++;
171 retval = 1; 191 retval = 1;
172 } 192 }