diff options
Diffstat (limited to 'lib/lockref.c')
-rw-r--r-- | lib/lockref.c | 183 |
1 files changed, 183 insertions, 0 deletions
diff --git a/lib/lockref.c b/lib/lockref.c new file mode 100644 index 000000000000..6f9d434c1521 --- /dev/null +++ b/lib/lockref.c | |||
@@ -0,0 +1,183 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/lockref.h> | ||
3 | |||
4 | #ifdef CONFIG_CMPXCHG_LOCKREF | ||
5 | |||
6 | /* | ||
7 | * Allow weakly-ordered memory architectures to provide barrier-less | ||
8 | * cmpxchg semantics for lockref updates. | ||
9 | */ | ||
10 | #ifndef cmpxchg64_relaxed | ||
11 | # define cmpxchg64_relaxed cmpxchg64 | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. | ||
16 | * This is useful for architectures with an expensive cpu_relax(). | ||
17 | */ | ||
18 | #ifndef arch_mutex_cpu_relax | ||
19 | # define arch_mutex_cpu_relax() cpu_relax() | ||
20 | #endif | ||
21 | |||
22 | /* | ||
23 | * Note that the "cmpxchg()" reloads the "old" value for the | ||
24 | * failure case. | ||
25 | */ | ||
26 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ | ||
27 | struct lockref old; \ | ||
28 | BUILD_BUG_ON(sizeof(old) != 8); \ | ||
29 | old.lock_count = ACCESS_ONCE(lockref->lock_count); \ | ||
30 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | ||
31 | struct lockref new = old, prev = old; \ | ||
32 | CODE \ | ||
33 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ | ||
34 | old.lock_count, \ | ||
35 | new.lock_count); \ | ||
36 | if (likely(old.lock_count == prev.lock_count)) { \ | ||
37 | SUCCESS; \ | ||
38 | } \ | ||
39 | arch_mutex_cpu_relax(); \ | ||
40 | } \ | ||
41 | } while (0) | ||
42 | |||
43 | #else | ||
44 | |||
45 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) | ||
46 | |||
47 | #endif | ||
48 | |||
49 | /** | ||
50 | * lockref_get - Increments reference count unconditionally | ||
51 | * @lockref: pointer to lockref structure | ||
52 | * | ||
53 | * This operation is only valid if you already hold a reference | ||
54 | * to the object, so you know the count cannot be zero. | ||
55 | */ | ||
56 | void lockref_get(struct lockref *lockref) | ||
57 | { | ||
58 | CMPXCHG_LOOP( | ||
59 | new.count++; | ||
60 | , | ||
61 | return; | ||
62 | ); | ||
63 | |||
64 | spin_lock(&lockref->lock); | ||
65 | lockref->count++; | ||
66 | spin_unlock(&lockref->lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL(lockref_get); | ||
69 | |||
70 | /** | ||
71 | * lockref_get_not_zero - Increments count unless the count is 0 | ||
72 | * @lockref: pointer to lockref structure | ||
73 | * Return: 1 if count updated successfully or 0 if count was zero | ||
74 | */ | ||
75 | int lockref_get_not_zero(struct lockref *lockref) | ||
76 | { | ||
77 | int retval; | ||
78 | |||
79 | CMPXCHG_LOOP( | ||
80 | new.count++; | ||
81 | if (!old.count) | ||
82 | return 0; | ||
83 | , | ||
84 | return 1; | ||
85 | ); | ||
86 | |||
87 | spin_lock(&lockref->lock); | ||
88 | retval = 0; | ||
89 | if (lockref->count) { | ||
90 | lockref->count++; | ||
91 | retval = 1; | ||
92 | } | ||
93 | spin_unlock(&lockref->lock); | ||
94 | return retval; | ||
95 | } | ||
96 | EXPORT_SYMBOL(lockref_get_not_zero); | ||
97 | |||
98 | /** | ||
99 | * lockref_get_or_lock - Increments count unless the count is 0 | ||
100 | * @lockref: pointer to lockref structure | ||
101 | * Return: 1 if count updated successfully or 0 if count was zero | ||
102 | * and we got the lock instead. | ||
103 | */ | ||
104 | int lockref_get_or_lock(struct lockref *lockref) | ||
105 | { | ||
106 | CMPXCHG_LOOP( | ||
107 | new.count++; | ||
108 | if (!old.count) | ||
109 | break; | ||
110 | , | ||
111 | return 1; | ||
112 | ); | ||
113 | |||
114 | spin_lock(&lockref->lock); | ||
115 | if (!lockref->count) | ||
116 | return 0; | ||
117 | lockref->count++; | ||
118 | spin_unlock(&lockref->lock); | ||
119 | return 1; | ||
120 | } | ||
121 | EXPORT_SYMBOL(lockref_get_or_lock); | ||
122 | |||
123 | /** | ||
124 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | ||
125 | * @lockref: pointer to lockref structure | ||
126 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken | ||
127 | */ | ||
128 | int lockref_put_or_lock(struct lockref *lockref) | ||
129 | { | ||
130 | CMPXCHG_LOOP( | ||
131 | new.count--; | ||
132 | if (old.count <= 1) | ||
133 | break; | ||
134 | , | ||
135 | return 1; | ||
136 | ); | ||
137 | |||
138 | spin_lock(&lockref->lock); | ||
139 | if (lockref->count <= 1) | ||
140 | return 0; | ||
141 | lockref->count--; | ||
142 | spin_unlock(&lockref->lock); | ||
143 | return 1; | ||
144 | } | ||
145 | EXPORT_SYMBOL(lockref_put_or_lock); | ||
146 | |||
147 | /** | ||
148 | * lockref_mark_dead - mark lockref dead | ||
149 | * @lockref: pointer to lockref structure | ||
150 | */ | ||
151 | void lockref_mark_dead(struct lockref *lockref) | ||
152 | { | ||
153 | assert_spin_locked(&lockref->lock); | ||
154 | lockref->count = -128; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * lockref_get_not_dead - Increments count unless the ref is dead | ||
159 | * @lockref: pointer to lockref structure | ||
160 | * Return: 1 if count updated successfully or 0 if lockref was dead | ||
161 | */ | ||
162 | int lockref_get_not_dead(struct lockref *lockref) | ||
163 | { | ||
164 | int retval; | ||
165 | |||
166 | CMPXCHG_LOOP( | ||
167 | new.count++; | ||
168 | if ((int)old.count < 0) | ||
169 | return 0; | ||
170 | , | ||
171 | return 1; | ||
172 | ); | ||
173 | |||
174 | spin_lock(&lockref->lock); | ||
175 | retval = 0; | ||
176 | if ((int) lockref->count >= 0) { | ||
177 | lockref->count++; | ||
178 | retval = 1; | ||
179 | } | ||
180 | spin_unlock(&lockref->lock); | ||
181 | return retval; | ||
182 | } | ||
183 | EXPORT_SYMBOL(lockref_get_not_dead); | ||