diff options
Diffstat (limited to 'include/linux/lockref.h')
-rw-r--r-- | include/linux/lockref.h | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..01233e01627a --- /dev/null +++ b/include/linux/lockref.h | |||
@@ -0,0 +1,71 @@ | |||
1 | #ifndef __LINUX_LOCKREF_H | ||
2 | #define __LINUX_LOCKREF_H | ||
3 | |||
4 | /* | ||
5 | * Locked reference counts. | ||
6 | * | ||
7 | * These are different from just plain atomic refcounts in that they | ||
8 | * are atomic with respect to the spinlock that goes with them. In | ||
9 | * particular, there can be implementations that don't actually get | ||
10 | * the spinlock for the common decrement/increment operations, but they | ||
11 | * still have to check that the operation is done semantically as if | ||
12 | * the spinlock had been taken (using a cmpxchg operation that covers | ||
13 | * both the lock and the count word, or using memory transactions, for | ||
14 | * example). | ||
15 | */ | ||
16 | |||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | struct lockref { | ||
20 | spinlock_t lock; | ||
21 | unsigned int count; | ||
22 | }; | ||
23 | |||
24 | /** | ||
25 | * lockref_get - Increments reference count unconditionally | ||
26 | * @lockcnt: pointer to lockref structure | ||
27 | * | ||
28 | * This operation is only valid if you already hold a reference | ||
29 | * to the object, so you know the count cannot be zero. | ||
30 | */ | ||
31 | static inline void lockref_get(struct lockref *lockref) | ||
32 | { | ||
33 | spin_lock(&lockref->lock); | ||
34 | lockref->count++; | ||
35 | spin_unlock(&lockref->lock); | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * lockref_get_not_zero - Increments count unless the count is 0 | ||
40 | * @lockcnt: pointer to lockref structure | ||
41 | * Return: 1 if count updated successfully or 0 if count is 0 | ||
42 | */ | ||
43 | static inline int lockref_get_not_zero(struct lockref *lockref) | ||
44 | { | ||
45 | int retval = 0; | ||
46 | |||
47 | spin_lock(&lockref->lock); | ||
48 | if (lockref->count) { | ||
49 | lockref->count++; | ||
50 | retval = 1; | ||
51 | } | ||
52 | spin_unlock(&lockref->lock); | ||
53 | return retval; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | ||
58 | * @lockcnt: pointer to lockref structure | ||
59 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken | ||
60 | */ | ||
61 | static inline int lockref_put_or_lock(struct lockref *lockref) | ||
62 | { | ||
63 | spin_lock(&lockref->lock); | ||
64 | if (lockref->count <= 1) | ||
65 | return 0; | ||
66 | lockref->count--; | ||
67 | spin_unlock(&lockref->lock); | ||
68 | return 1; | ||
69 | } | ||
70 | |||
71 | #endif /* __LINUX_LOCKREF_H */ | ||