aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Windsor <dwindsor@gmail.com>2017-03-10 10:34:12 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-13 02:41:08 -0400
commitbd174169c7a12a37b3b4aa2221f084ade010b182 (patch)
treee2be1ee97039943699fe637804c86bbe6626a23c
parent4495c08e84729385774601b5146d51d9e5849f81 (diff)
locking/refcount: Add refcount_t API kernel-doc comments
Signed-off-by: David Windsor <dwindsor@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: elena.reshetova@intel.com Cc: kernel-hardening@lists.openwall.com Link: http://lkml.kernel.org/r/1489160052-20293-1-git-send-email-dwindsor@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/refcount.h19
-rw-r--r--lib/refcount.c122
2 files changed, 129 insertions, 12 deletions
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0023fee4bbbc..b34aa649d204 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -6,17 +6,36 @@
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8 8
9/**
10 * refcount_t - variant of atomic_t specialized for reference counts
11 * @refs: atomic_t counter field
12 *
13 * The counter saturates at UINT_MAX and will not move once
14 * there. This avoids wrapping the counter and causing 'spurious'
15 * use-after-free bugs.
16 */
9typedef struct refcount_struct { 17typedef struct refcount_struct {
10 atomic_t refs; 18 atomic_t refs;
11} refcount_t; 19} refcount_t;
12 20
13#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } 21#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
14 22
23/**
24 * refcount_set - set a refcount's value
25 * @r: the refcount
26 * @n: value to which the refcount will be set
27 */
15static inline void refcount_set(refcount_t *r, unsigned int n) 28static inline void refcount_set(refcount_t *r, unsigned int n)
16{ 29{
17 atomic_set(&r->refs, n); 30 atomic_set(&r->refs, n);
18} 31}
19 32
33/**
34 * refcount_read - get a refcount's value
35 * @r: the refcount
36 *
37 * Return: the refcount's value
38 */
20static inline unsigned int refcount_read(const refcount_t *r) 39static inline unsigned int refcount_read(const refcount_t *r)
21{ 40{
22 return atomic_read(&r->refs); 41 return atomic_read(&r->refs);
diff --git a/lib/refcount.c b/lib/refcount.c
index aa09ad3c30b0..8e206ce5609b 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,6 +37,24 @@
37#include <linux/refcount.h> 37#include <linux/refcount.h>
38#include <linux/bug.h> 38#include <linux/bug.h>
39 39
40/**
41 * refcount_add_not_zero - add a value to a refcount unless it is 0
42 * @i: the value to add to the refcount
43 * @r: the refcount
44 *
45 * Will saturate at UINT_MAX and WARN.
46 *
47 * Provides no memory ordering, it is assumed the caller has guaranteed the
48 * object memory to be stable (RCU, etc.). It does provide a control dependency
49 * and thereby orders future stores. See the comment on top.
50 *
51 * Use of this function is not recommended for the normal reference counting
52 * use case in which references are taken and released one at a time. In these
53 * cases, refcount_inc(), or one of its variants, should instead be used to
54 * increment a reference count.
55 *
56 * Return: false if the passed refcount is 0, true otherwise
57 */
40bool refcount_add_not_zero(unsigned int i, refcount_t *r) 58bool refcount_add_not_zero(unsigned int i, refcount_t *r)
41{ 59{
42 unsigned int old, new, val = atomic_read(&r->refs); 60 unsigned int old, new, val = atomic_read(&r->refs);
@@ -64,18 +82,39 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
64} 82}
65EXPORT_SYMBOL_GPL(refcount_add_not_zero); 83EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66 84
85/**
86 * refcount_add - add a value to a refcount
87 * @i: the value to add to the refcount
88 * @r: the refcount
89 *
90 * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
91 *
92 * Provides no memory ordering, it is assumed the caller has guaranteed the
93 * object memory to be stable (RCU, etc.). It does provide a control dependency
94 * and thereby orders future stores. See the comment on top.
95 *
96 * Use of this function is not recommended for the normal reference counting
97 * use case in which references are taken and released one at a time. In these
98 * cases, refcount_inc(), or one of its variants, should instead be used to
99 * increment a reference count.
100 */
67void refcount_add(unsigned int i, refcount_t *r) 101void refcount_add(unsigned int i, refcount_t *r)
68{ 102{
69 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); 103 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70} 104}
71EXPORT_SYMBOL_GPL(refcount_add); 105EXPORT_SYMBOL_GPL(refcount_add);
72 106
73/* 107/**
74 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. 108 * refcount_inc_not_zero - increment a refcount unless it is 0
109 * @r: the refcount to increment
110 *
111 * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
75 * 112 *
76 * Provides no memory ordering, it is assumed the caller has guaranteed the 113 * Provides no memory ordering, it is assumed the caller has guaranteed the
77 * object memory to be stable (RCU, etc.). It does provide a control dependency 114 * object memory to be stable (RCU, etc.). It does provide a control dependency
78 * and thereby orders future stores. See the comment on top. 115 * and thereby orders future stores. See the comment on top.
116 *
117 * Return: true if the increment was successful, false otherwise
79 */ 118 */
80bool refcount_inc_not_zero(refcount_t *r) 119bool refcount_inc_not_zero(refcount_t *r)
81{ 120{
@@ -103,11 +142,17 @@ bool refcount_inc_not_zero(refcount_t *r)
103} 142}
104EXPORT_SYMBOL_GPL(refcount_inc_not_zero); 143EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
105 144
106/* 145/**
107 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. 146 * refcount_inc - increment a refcount
147 * @r: the refcount to increment
148 *
149 * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
108 * 150 *
109 * Provides no memory ordering, it is assumed the caller already has a 151 * Provides no memory ordering, it is assumed the caller already has a
110 * reference on the object, will WARN when this is not so. 152 * reference on the object.
153 *
154 * Will WARN if the refcount is 0, as this represents a possible use-after-free
155 * condition.
111 */ 156 */
112void refcount_inc(refcount_t *r) 157void refcount_inc(refcount_t *r)
113{ 158{
@@ -115,6 +160,26 @@ void refcount_inc(refcount_t *r)
115} 160}
116EXPORT_SYMBOL_GPL(refcount_inc); 161EXPORT_SYMBOL_GPL(refcount_inc);
117 162
163/**
164 * refcount_sub_and_test - subtract from a refcount and test if it is 0
165 * @i: amount to subtract from the refcount
166 * @r: the refcount
167 *
168 * Similar to atomic_dec_and_test(), but it will WARN, return false and
169 * ultimately leak on underflow and will fail to decrement when saturated
170 * at UINT_MAX.
171 *
172 * Provides release memory ordering, such that prior loads and stores are done
173 * before, and provides a control dependency such that free() must come after.
174 * See the comment on top.
175 *
176 * Use of this function is not recommended for the normal reference counting
177 * use case in which references are taken and released one at a time. In these
178 * cases, refcount_dec(), or one of its variants, should instead be used to
179 * decrement a reference count.
180 *
181 * Return: true if the resulting refcount is 0, false otherwise
182 */
118bool refcount_sub_and_test(unsigned int i, refcount_t *r) 183bool refcount_sub_and_test(unsigned int i, refcount_t *r)
119{ 184{
120 unsigned int old, new, val = atomic_read(&r->refs); 185 unsigned int old, new, val = atomic_read(&r->refs);
@@ -140,13 +205,18 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
140} 205}
141EXPORT_SYMBOL_GPL(refcount_sub_and_test); 206EXPORT_SYMBOL_GPL(refcount_sub_and_test);
142 207
143/* 208/**
209 * refcount_dec_and_test - decrement a refcount and test if it is 0
210 * @r: the refcount
211 *
144 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to 212 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
145 * decrement when saturated at UINT_MAX. 213 * decrement when saturated at UINT_MAX.
146 * 214 *
147 * Provides release memory ordering, such that prior loads and stores are done 215 * Provides release memory ordering, such that prior loads and stores are done
148 * before, and provides a control dependency such that free() must come after. 216 * before, and provides a control dependency such that free() must come after.
149 * See the comment on top. 217 * See the comment on top.
218 *
219 * Return: true if the resulting refcount is 0, false otherwise
150 */ 220 */
151bool refcount_dec_and_test(refcount_t *r) 221bool refcount_dec_and_test(refcount_t *r)
152{ 222{
@@ -154,21 +224,26 @@ bool refcount_dec_and_test(refcount_t *r)
154} 224}
155EXPORT_SYMBOL_GPL(refcount_dec_and_test); 225EXPORT_SYMBOL_GPL(refcount_dec_and_test);
156 226
157/* 227/**
228 * refcount_dec - decrement a refcount
229 * @r: the refcount
230 *
158 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement 231 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
159 * when saturated at UINT_MAX. 232 * when saturated at UINT_MAX.
160 * 233 *
161 * Provides release memory ordering, such that prior loads and stores are done 234 * Provides release memory ordering, such that prior loads and stores are done
162 * before. 235 * before.
163 */ 236 */
164
165void refcount_dec(refcount_t *r) 237void refcount_dec(refcount_t *r)
166{ 238{
167 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); 239 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168} 240}
169EXPORT_SYMBOL_GPL(refcount_dec); 241EXPORT_SYMBOL_GPL(refcount_dec);
170 242
171/* 243/**
244 * refcount_dec_if_one - decrement a refcount if it is 1
245 * @r: the refcount
246 *
172 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the 247 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
173 * success thereof. 248 * success thereof.
174 * 249 *
@@ -178,6 +253,8 @@ EXPORT_SYMBOL_GPL(refcount_dec);
178 * It can be used like a try-delete operator; this explicit case is provided 253 * It can be used like a try-delete operator; this explicit case is provided
179 * and not cmpxchg in generic, because that would allow implementing unsafe 254 * and not cmpxchg in generic, because that would allow implementing unsafe
180 * operations. 255 * operations.
256 *
257 * Return: true if the resulting refcount is 0, false otherwise
181 */ 258 */
182bool refcount_dec_if_one(refcount_t *r) 259bool refcount_dec_if_one(refcount_t *r)
183{ 260{
@@ -185,11 +262,16 @@ bool refcount_dec_if_one(refcount_t *r)
185} 262}
186EXPORT_SYMBOL_GPL(refcount_dec_if_one); 263EXPORT_SYMBOL_GPL(refcount_dec_if_one);
187 264
188/* 265/**
266 * refcount_dec_not_one - decrement a refcount if it is not 1
267 * @r: the refcount
268 *
189 * No atomic_t counterpart, it decrements unless the value is 1, in which case 269 * No atomic_t counterpart, it decrements unless the value is 1, in which case
190 * it will return false. 270 * it will return false.
191 * 271 *
192 * Was often done like: atomic_add_unless(&var, -1, 1) 272 * Was often done like: atomic_add_unless(&var, -1, 1)
273 *
274 * Return: true if the decrement operation was successful, false otherwise
193 */ 275 */
194bool refcount_dec_not_one(refcount_t *r) 276bool refcount_dec_not_one(refcount_t *r)
195{ 277{
@@ -219,13 +301,21 @@ bool refcount_dec_not_one(refcount_t *r)
219} 301}
220EXPORT_SYMBOL_GPL(refcount_dec_not_one); 302EXPORT_SYMBOL_GPL(refcount_dec_not_one);
221 303
222/* 304/**
305 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
306 * refcount to 0
307 * @r: the refcount
308 * @lock: the mutex to be locked
309 *
223 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail 310 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
224 * to decrement when saturated at UINT_MAX. 311 * to decrement when saturated at UINT_MAX.
225 * 312 *
226 * Provides release memory ordering, such that prior loads and stores are done 313 * Provides release memory ordering, such that prior loads and stores are done
227 * before, and provides a control dependency such that free() must come after. 314 * before, and provides a control dependency such that free() must come after.
228 * See the comment on top. 315 * See the comment on top.
316 *
317 * Return: true and hold mutex if able to decrement refcount to 0, false
318 * otherwise
229 */ 319 */
230bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) 320bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
231{ 321{
@@ -242,13 +332,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
242} 332}
243EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); 333EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
244 334
245/* 335/**
336 * refcount_dec_and_lock - return holding spinlock if able to decrement
337 * refcount to 0
338 * @r: the refcount
339 * @lock: the spinlock to be locked
340 *
246 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to 341 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
247 * decrement when saturated at UINT_MAX. 342 * decrement when saturated at UINT_MAX.
248 * 343 *
249 * Provides release memory ordering, such that prior loads and stores are done 344 * Provides release memory ordering, such that prior loads and stores are done
250 * before, and provides a control dependency such that free() must come after. 345 * before, and provides a control dependency such that free() must come after.
251 * See the comment on top. 346 * See the comment on top.
347 *
348 * Return: true and hold spinlock if able to decrement refcount to 0, false
349 * otherwise
252 */ 350 */
253bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) 351bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
254{ 352{