aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/lockref.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-02 14:58:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-02 14:58:20 -0400
commit2f4f12e571c4e2f50f3818a3c2544929145f75dd (patch)
tree22fa6305de44ad62123d4341bff40d5c827085ef /include/linux/lockref.h
parent15570086b590a69d59183b08a7770e316cca20a7 (diff)
lockref: uninline lockref helper functions
They aren't very good to inline, since they already call external functions (the spinlock code), and we're going to create rather more complicated versions of them that can do the reference count updates locklessly. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/lockref.h')
-rw-r--r--include/linux/lockref.h66
1 files changed, 4 insertions, 62 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 0ea026092d1d..4c0af31c8d47 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -21,67 +21,9 @@ struct lockref {
21 unsigned int count; 21 unsigned int count;
22}; 22};
23 23
24/** 24extern void lockref_get(struct lockref *);
25 * lockref_get - Increments reference count unconditionally 25extern int lockref_get_not_zero(struct lockref *);
26 * @lockcnt: pointer to lockref structure 26extern int lockref_get_or_lock(struct lockref *);
27 * 27extern int lockref_put_or_lock(struct lockref *);
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_get_or_lock - Increments count unless the count is 0
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count was zero
60 * and we got the lock instead.
61 */
62static inline int lockref_get_or_lock(struct lockref *lockref)
63{
64 spin_lock(&lockref->lock);
65 if (!lockref->count)
66 return 0;
67 lockref->count++;
68 spin_unlock(&lockref->lock);
69 return 1;
70}
71
72/**
73 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
74 * @lockcnt: pointer to lockref structure
75 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
76 */
77static inline int lockref_put_or_lock(struct lockref *lockref)
78{
79 spin_lock(&lockref->lock);
80 if (lockref->count <= 1)
81 return 0;
82 lockref->count--;
83 spin_unlock(&lockref->lock);
84 return 1;
85}
86 28
87#endif /* __LINUX_LOCKREF_H */ 29#endif /* __LINUX_LOCKREF_H */