aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 11:08:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 11:08:21 -0400
commitfc6d0b037678f50014ef409c92c5bedc01208fcd (patch)
treeb7de25e97b03c31ea6c5f2540f641b0be2c91832 /include/linux
parent6e4664525b1db28f8c4e1130957f70a94c19213e (diff)
parentbc08b449ee14ace4d869adaa1bb35a44ce68d775 (diff)
Merge branch 'lockref' (locked reference counts)
Merge lockref infrastructure code by me and Waiman Long. I already merged some of the preparatory patches that didn't actually do any semantic changes earlier, but this merges the actual _reason_ for those preparatory patches. The "lockref" structure is a combination "spinlock and reference count" that allows optimized reference count accesses. In particular, it guarantees that the reference count will be updated AS IF the spinlock was held, but using atomic accesses that cover both the reference count and the spinlock words, we can often do the update without actually having to take the lock. This allows us to avoid the nastiest cases of spinlock contention on large machines under heavy pathname lookup loads. When updating the dentry reference counts on a large system, we'll still end up with the cache line bouncing around, but that's much less noticeable than actually having to spin waiting for the lock. * lockref: lockref: implement lockless reference count updates using cmpxchg() lockref: uninline lockref helper functions vfs: reimplement d_rcu_to_refcount() using lockref_get_or_lock() vfs: use lockref_get_not_zero() for optimistic lockless dget_parent() lockref: add 'lockref_get_or_lock() helper
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/dcache.h22
-rw-r--r--include/linux/lockref.h61
2 files changed, 13 insertions, 70 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index efdc94434c30..9169b91ea2d2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -304,28 +304,6 @@ extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
304extern struct dentry *__d_lookup_rcu(const struct dentry *parent, 304extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
305 const struct qstr *name, unsigned *seq); 305 const struct qstr *name, unsigned *seq);
306 306
307/**
308 * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
309 * @dentry: dentry to take a ref on
310 * @seq: seqcount to verify against
311 * Returns: 0 on failure, else 1.
312 *
313 * __d_rcu_to_refcount operates on a dentry,seq pair that was returned
314 * by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
315 */
316static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
317{
318 int ret = 0;
319
320 assert_spin_locked(&dentry->d_lock);
321 if (!read_seqcount_retry(&dentry->d_seq, seq)) {
322 ret = 1;
323 dentry->d_lockref.count++;
324 }
325
326 return ret;
327}
328
329static inline unsigned d_count(const struct dentry *dentry) 307static inline unsigned d_count(const struct dentry *dentry)
330{ 308{
331 return dentry->d_lockref.count; 309 return dentry->d_lockref.count;
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 01233e01627a..ca07b5028b01 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -17,55 +17,20 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18 18
19struct lockref { 19struct lockref {
20 spinlock_t lock; 20 union {
21 unsigned int count; 21#ifdef CONFIG_CMPXCHG_LOCKREF
22 aligned_u64 lock_count;
23#endif
24 struct {
25 spinlock_t lock;
26 unsigned int count;
27 };
28 };
22}; 29};
23 30
24/** 31extern void lockref_get(struct lockref *);
25 * lockref_get - Increments reference count unconditionally 32extern int lockref_get_not_zero(struct lockref *);
26 * @lockcnt: pointer to lockref structure 33extern int lockref_get_or_lock(struct lockref *);
27 * 34extern int lockref_put_or_lock(struct lockref *);
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
60 */
61static inline int lockref_put_or_lock(struct lockref *lockref)
62{
63 spin_lock(&lockref->lock);
64 if (lockref->count <= 1)
65 return 0;
66 lockref->count--;
67 spin_unlock(&lockref->lock);
68 return 1;
69}
70 35
71#endif /* __LINUX_LOCKREF_H */ 36#endif /* __LINUX_LOCKREF_H */