aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-02 14:58:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-02 14:58:20 -0400
commit2f4f12e571c4e2f50f3818a3c2544929145f75dd (patch)
tree22fa6305de44ad62123d4341bff40d5c827085ef
parent15570086b590a69d59183b08a7770e316cca20a7 (diff)
lockref: uninline lockref helper functions
They aren't very good to inline, since they already call external functions (the spinlock code), and we're going to create rather more complicated versions of them that can do the reference count updates locklessly. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/lockref.h66
-rw-r--r--lib/Makefile1
-rw-r--r--lib/lockref.c69
3 files changed, 74 insertions, 62 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 0ea026092d1d..4c0af31c8d47 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -21,67 +21,9 @@ struct lockref {
21 unsigned int count; 21 unsigned int count;
22}; 22};
23 23
24/** 24extern void lockref_get(struct lockref *);
25 * lockref_get - Increments reference count unconditionally 25extern int lockref_get_not_zero(struct lockref *);
26 * @lockcnt: pointer to lockref structure 26extern int lockref_get_or_lock(struct lockref *);
27 * 27extern int lockref_put_or_lock(struct lockref *);
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_get_or_lock - Increments count unless the count is 0
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count was zero
60 * and we got the lock instead.
61 */
62static inline int lockref_get_or_lock(struct lockref *lockref)
63{
64 spin_lock(&lockref->lock);
65 if (!lockref->count)
66 return 0;
67 lockref->count++;
68 spin_unlock(&lockref->lock);
69 return 1;
70}
71
72/**
73 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
74 * @lockcnt: pointer to lockref structure
75 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
76 */
77static inline int lockref_put_or_lock(struct lockref *lockref)
78{
79 spin_lock(&lockref->lock);
80 if (lockref->count <= 1)
81 return 0;
82 lockref->count--;
83 spin_unlock(&lockref->lock);
84 return 1;
85}
86 28
87#endif /* __LINUX_LOCKREF_H */ 29#endif /* __LINUX_LOCKREF_H */
diff --git a/lib/Makefile b/lib/Makefile
index 7baccfd8a4e9..f2cb3082697c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -20,6 +20,7 @@ lib-$(CONFIG_MMU) += ioremap.o
20lib-$(CONFIG_SMP) += cpumask.o 20lib-$(CONFIG_SMP) += cpumask.o
21 21
22lib-y += kobject.o klist.o 22lib-y += kobject.o klist.o
23obj-y += lockref.o
23 24
24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
diff --git a/lib/lockref.c b/lib/lockref.c
new file mode 100644
index 000000000000..a9a4f4e1eff5
--- /dev/null
+++ b/lib/lockref.c
@@ -0,0 +1,69 @@
1#include <linux/export.h>
2#include <linux/lockref.h>
3
4/**
5 * lockref_get - Increments reference count unconditionally
6 * @lockcnt: pointer to lockref structure
7 *
8 * This operation is only valid if you already hold a reference
9 * to the object, so you know the count cannot be zero.
10 */
11void lockref_get(struct lockref *lockref)
12{
13 spin_lock(&lockref->lock);
14 lockref->count++;
15 spin_unlock(&lockref->lock);
16}
17EXPORT_SYMBOL(lockref_get);
18
19/**
20 * lockref_get_not_zero - Increments count unless the count is 0
21 * @lockcnt: pointer to lockref structure
22 * Return: 1 if count updated successfully or 0 if count was zero
23 */
24int lockref_get_not_zero(struct lockref *lockref)
25{
26 int retval = 0;
27
28 spin_lock(&lockref->lock);
29 if (lockref->count) {
30 lockref->count++;
31 retval = 1;
32 }
33 spin_unlock(&lockref->lock);
34 return retval;
35}
36EXPORT_SYMBOL(lockref_get_not_zero);
37
38/**
39 * lockref_get_or_lock - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count was zero
42 * and we got the lock instead.
43 */
44int lockref_get_or_lock(struct lockref *lockref)
45{
46 spin_lock(&lockref->lock);
47 if (!lockref->count)
48 return 0;
49 lockref->count++;
50 spin_unlock(&lockref->lock);
51 return 1;
52}
53EXPORT_SYMBOL(lockref_get_or_lock);
54
55/**
56 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
57 * @lockcnt: pointer to lockref structure
58 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
59 */
60int lockref_put_or_lock(struct lockref *lockref)
61{
62 spin_lock(&lockref->lock);
63 if (lockref->count <= 1)
64 return 0;
65 lockref->count--;
66 spin_unlock(&lockref->lock);
67 return 1;
68}
69EXPORT_SYMBOL(lockref_put_or_lock);