aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/refcount.h277
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile2
-rw-r--r--lib/refcount.c267
4 files changed, 280 insertions, 279 deletions
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 600aadf9cca4..0e8cfb2ce91e 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -1,55 +1,10 @@
1#ifndef _LINUX_REFCOUNT_H 1#ifndef _LINUX_REFCOUNT_H
2#define _LINUX_REFCOUNT_H 2#define _LINUX_REFCOUNT_H
3 3
4/*
5 * Variant of atomic_t specialized for reference counts.
6 *
7 * The interface matches the atomic_t interface (to aid in porting) but only
8 * provides the few functions one should use for reference counting.
9 *
10 * It differs in that the counter saturates at UINT_MAX and will not move once
11 * there. This avoids wrapping the counter and causing 'spurious'
12 * use-after-free issues.
13 *
14 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15 * and provide only what is strictly required for refcounts.
16 *
17 * The increments are fully relaxed; these will not provide ordering. The
18 * rationale is that whatever is used to obtain the object we're increasing the
19 * reference count on will provide the ordering. For locked data structures,
20 * its the lock acquire, for RCU/lockless data structures its the dependent
21 * load.
22 *
23 * Do note that inc_not_zero() provides a control dependency which will order
24 * future stores against the inc, this ensures we'll never modify the object
25 * if we did not in fact acquire a reference.
26 *
27 * The decrements will provide release order, such that all the prior loads and
28 * stores will be issued before, it also provides a control dependency, which
29 * will order us against the subsequent free().
30 *
31 * The control dependency is against the load of the cmpxchg (ll/sc) that
32 * succeeded. This means the stores aren't fully ordered, but this is fine
33 * because the 1->0 transition indicates no concurrency.
34 *
35 * Note that the allocator is responsible for ordering things between free()
36 * and alloc().
37 *
38 */
39
40#include <linux/atomic.h> 4#include <linux/atomic.h>
41#include <linux/bug.h>
42#include <linux/mutex.h> 5#include <linux/mutex.h>
43#include <linux/spinlock.h> 6#include <linux/spinlock.h>
44 7
45#ifdef CONFIG_DEBUG_REFCOUNT
46#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
47#define __refcount_check __must_check
48#else
49#define REFCOUNT_WARN(cond, str) (void)(cond)
50#define __refcount_check
51#endif
52
53typedef struct refcount_struct { 8typedef struct refcount_struct {
54 atomic_t refs; 9 atomic_t refs;
55} refcount_t; 10} refcount_t;
@@ -66,229 +21,21 @@ static inline unsigned int refcount_read(const refcount_t *r)
66 return atomic_read(&r->refs); 21 return atomic_read(&r->refs);
67} 22}
68 23
69static inline __refcount_check 24extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
70bool refcount_add_not_zero(unsigned int i, refcount_t *r) 25extern void refcount_add(unsigned int i, refcount_t *r);
71{
72 unsigned int old, new, val = atomic_read(&r->refs);
73
74 for (;;) {
75 if (!val)
76 return false;
77
78 if (unlikely(val == UINT_MAX))
79 return true;
80
81 new = val + i;
82 if (new < val)
83 new = UINT_MAX;
84 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
85 if (old == val)
86 break;
87
88 val = old;
89 }
90
91 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
92
93 return true;
94}
95
96static inline void refcount_add(unsigned int i, refcount_t *r)
97{
98 REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
99}
100
101/*
102 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
103 *
104 * Provides no memory ordering, it is assumed the caller has guaranteed the
105 * object memory to be stable (RCU, etc.). It does provide a control dependency
106 * and thereby orders future stores. See the comment on top.
107 */
108static inline __refcount_check
109bool refcount_inc_not_zero(refcount_t *r)
110{
111 unsigned int old, new, val = atomic_read(&r->refs);
112
113 for (;;) {
114 new = val + 1;
115
116 if (!val)
117 return false;
118
119 if (unlikely(!new))
120 return true;
121
122 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
123 if (old == val)
124 break;
125
126 val = old;
127 }
128
129 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
130
131 return true;
132}
133
134/*
135 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
136 *
137 * Provides no memory ordering, it is assumed the caller already has a
138 * reference on the object, will WARN when this is not so.
139 */
140static inline void refcount_inc(refcount_t *r)
141{
142 REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
143}
144
145/*
146 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
147 * decrement when saturated at UINT_MAX.
148 *
149 * Provides release memory ordering, such that prior loads and stores are done
150 * before, and provides a control dependency such that free() must come after.
151 * See the comment on top.
152 */
153static inline __refcount_check
154bool refcount_sub_and_test(unsigned int i, refcount_t *r)
155{
156 unsigned int old, new, val = atomic_read(&r->refs);
157
158 for (;;) {
159 if (unlikely(val == UINT_MAX))
160 return false;
161
162 new = val - i;
163 if (new > val) {
164 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
165 return false;
166 }
167
168 old = atomic_cmpxchg_release(&r->refs, val, new);
169 if (old == val)
170 break;
171
172 val = old;
173 }
174
175 return !new;
176}
177
178static inline __refcount_check
179bool refcount_dec_and_test(refcount_t *r)
180{
181 return refcount_sub_and_test(1, r);
182}
183 26
184/* 27extern __must_check bool refcount_inc_not_zero(refcount_t *r);
185 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement 28extern void refcount_inc(refcount_t *r);
186 * when saturated at UINT_MAX.
187 *
188 * Provides release memory ordering, such that prior loads and stores are done
189 * before.
190 */
191static inline
192void refcount_dec(refcount_t *r)
193{
194 REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
195}
196
197/*
198 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
199 * success thereof.
200 *
201 * Like all decrement operations, it provides release memory order and provides
202 * a control dependency.
203 *
204 * It can be used like a try-delete operator; this explicit case is provided
205 * and not cmpxchg in generic, because that would allow implementing unsafe
206 * operations.
207 */
208static inline __refcount_check
209bool refcount_dec_if_one(refcount_t *r)
210{
211 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
212}
213
214/*
215 * No atomic_t counterpart, it decrements unless the value is 1, in which case
216 * it will return false.
217 *
218 * Was often done like: atomic_add_unless(&var, -1, 1)
219 */
220static inline __refcount_check
221bool refcount_dec_not_one(refcount_t *r)
222{
223 unsigned int old, new, val = atomic_read(&r->refs);
224 29
225 for (;;) { 30extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
226 if (unlikely(val == UINT_MAX)) 31extern void refcount_sub(unsigned int i, refcount_t *r);
227 return true;
228 32
229 if (val == 1) 33extern __must_check bool refcount_dec_and_test(refcount_t *r);
230 return false; 34extern void refcount_dec(refcount_t *r);
231 35
232 new = val - 1; 36extern __must_check bool refcount_dec_if_one(refcount_t *r);
233 if (new > val) { 37extern __must_check bool refcount_dec_not_one(refcount_t *r);
234 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 38extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
235 return true; 39extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
236 }
237
238 old = atomic_cmpxchg_release(&r->refs, val, new);
239 if (old == val)
240 break;
241
242 val = old;
243 }
244
245 return true;
246}
247
248/*
249 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
250 * to decrement when saturated at UINT_MAX.
251 *
252 * Provides release memory ordering, such that prior loads and stores are done
253 * before, and provides a control dependency such that free() must come after.
254 * See the comment on top.
255 */
256static inline __refcount_check
257bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
258{
259 if (refcount_dec_not_one(r))
260 return false;
261
262 mutex_lock(lock);
263 if (!refcount_dec_and_test(r)) {
264 mutex_unlock(lock);
265 return false;
266 }
267
268 return true;
269}
270
271/*
272 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
273 * decrement when saturated at UINT_MAX.
274 *
275 * Provides release memory ordering, such that prior loads and stores are done
276 * before, and provides a control dependency such that free() must come after.
277 * See the comment on top.
278 */
279static inline __refcount_check
280bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
281{
282 if (refcount_dec_not_one(r))
283 return false;
284
285 spin_lock(lock);
286 if (!refcount_dec_and_test(r)) {
287 spin_unlock(lock);
288 return false;
289 }
290
291 return true;
292}
293 40
294#endif /* _LINUX_REFCOUNT_H */ 41#endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index acedbe626d47..0dbce99d8433 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -716,19 +716,6 @@ source "lib/Kconfig.kmemcheck"
716 716
717source "lib/Kconfig.kasan" 717source "lib/Kconfig.kasan"
718 718
719config DEBUG_REFCOUNT
720 bool "Verbose refcount checks"
721 help
722 Say Y here if you want reference counters (refcount_t and kref) to
723 generate WARNs on dubious usage. Without this refcount_t will still
724 be a saturating counter and avoid Use-After-Free by turning it into
725 a resource leak Denial-Of-Service.
726
727 Use of this option will increase kernel text size but will alert the
728 admin of potential abuse.
729
730 If in doubt, say "N".
731
732endmenu # "Memory Debugging" 719endmenu # "Memory Debugging"
733 720
734config ARCH_HAS_KCOV 721config ARCH_HAS_KCOV
diff --git a/lib/Makefile b/lib/Makefile
index 19ea76149a37..192e4d03caf9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,7 +36,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
36 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 36 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
37 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 37 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
38 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 38 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
39 once.o 39 once.o refcount.o
40obj-y += string_helpers.o 40obj-y += string_helpers.o
41obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 41obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
42obj-y += hexdump.o 42obj-y += hexdump.o
diff --git a/lib/refcount.c b/lib/refcount.c
new file mode 100644
index 000000000000..1d33366189d1
--- /dev/null
+++ b/lib/refcount.c
@@ -0,0 +1,267 @@
1/*
2 * Variant of atomic_t specialized for reference counts.
3 *
4 * The interface matches the atomic_t interface (to aid in porting) but only
5 * provides the few functions one should use for reference counting.
6 *
7 * It differs in that the counter saturates at UINT_MAX and will not move once
8 * there. This avoids wrapping the counter and causing 'spurious'
9 * use-after-free issues.
10 *
11 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
12 * and provide only what is strictly required for refcounts.
13 *
14 * The increments are fully relaxed; these will not provide ordering. The
15 * rationale is that whatever is used to obtain the object we're increasing the
16 * reference count on will provide the ordering. For locked data structures,
17 * its the lock acquire, for RCU/lockless data structures its the dependent
18 * load.
19 *
20 * Do note that inc_not_zero() provides a control dependency which will order
21 * future stores against the inc, this ensures we'll never modify the object
22 * if we did not in fact acquire a reference.
23 *
24 * The decrements will provide release order, such that all the prior loads and
25 * stores will be issued before, it also provides a control dependency, which
26 * will order us against the subsequent free().
27 *
28 * The control dependency is against the load of the cmpxchg (ll/sc) that
29 * succeeded. This means the stores aren't fully ordered, but this is fine
30 * because the 1->0 transition indicates no concurrency.
31 *
32 * Note that the allocator is responsible for ordering things between free()
33 * and alloc().
34 *
35 */
36
37#include <linux/refcount.h>
38#include <linux/bug.h>
39
40bool refcount_add_not_zero(unsigned int i, refcount_t *r)
41{
42 unsigned int old, new, val = atomic_read(&r->refs);
43
44 for (;;) {
45 if (!val)
46 return false;
47
48 if (unlikely(val == UINT_MAX))
49 return true;
50
51 new = val + i;
52 if (new < val)
53 new = UINT_MAX;
54 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
55 if (old == val)
56 break;
57
58 val = old;
59 }
60
61 WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
62
63 return true;
64}
65EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66
67void refcount_add(unsigned int i, refcount_t *r)
68{
69 WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70}
71EXPORT_SYMBOL_GPL(refcount_add);
72
73/*
74 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
75 *
76 * Provides no memory ordering, it is assumed the caller has guaranteed the
77 * object memory to be stable (RCU, etc.). It does provide a control dependency
78 * and thereby orders future stores. See the comment on top.
79 */
80bool refcount_inc_not_zero(refcount_t *r)
81{
82 unsigned int old, new, val = atomic_read(&r->refs);
83
84 for (;;) {
85 new = val + 1;
86
87 if (!val)
88 return false;
89
90 if (unlikely(!new))
91 return true;
92
93 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
94 if (old == val)
95 break;
96
97 val = old;
98 }
99
100 WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
101
102 return true;
103}
104EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
105
106/*
107 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
108 *
109 * Provides no memory ordering, it is assumed the caller already has a
110 * reference on the object, will WARN when this is not so.
111 */
112void refcount_inc(refcount_t *r)
113{
114 WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
115}
116EXPORT_SYMBOL_GPL(refcount_inc);
117
118bool refcount_sub_and_test(unsigned int i, refcount_t *r)
119{
120 unsigned int old, new, val = atomic_read(&r->refs);
121
122 for (;;) {
123 if (unlikely(val == UINT_MAX))
124 return false;
125
126 new = val - i;
127 if (new > val) {
128 WARN(new > val, "refcount_t: underflow; use-after-free.\n");
129 return false;
130 }
131
132 old = atomic_cmpxchg_release(&r->refs, val, new);
133 if (old == val)
134 break;
135
136 val = old;
137 }
138
139 return !new;
140}
141EXPORT_SYMBOL_GPL(refcount_sub_and_test);
142
143/*
144 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
145 * decrement when saturated at UINT_MAX.
146 *
147 * Provides release memory ordering, such that prior loads and stores are done
148 * before, and provides a control dependency such that free() must come after.
149 * See the comment on top.
150 */
151bool refcount_dec_and_test(refcount_t *r)
152{
153 return refcount_sub_and_test(1, r);
154}
155EXPORT_SYMBOL_GPL(refcount_dec_and_test);
156
157/*
158 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
159 * when saturated at UINT_MAX.
160 *
161 * Provides release memory ordering, such that prior loads and stores are done
162 * before.
163 */
164
165void refcount_dec(refcount_t *r)
166{
167 WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168}
169EXPORT_SYMBOL_GPL(refcount_dec);
170
171/*
172 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
173 * success thereof.
174 *
175 * Like all decrement operations, it provides release memory order and provides
176 * a control dependency.
177 *
178 * It can be used like a try-delete operator; this explicit case is provided
179 * and not cmpxchg in generic, because that would allow implementing unsafe
180 * operations.
181 */
182bool refcount_dec_if_one(refcount_t *r)
183{
184 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
185}
186EXPORT_SYMBOL_GPL(refcount_dec_if_one);
187
188/*
189 * No atomic_t counterpart, it decrements unless the value is 1, in which case
190 * it will return false.
191 *
192 * Was often done like: atomic_add_unless(&var, -1, 1)
193 */
194bool refcount_dec_not_one(refcount_t *r)
195{
196 unsigned int old, new, val = atomic_read(&r->refs);
197
198 for (;;) {
199 if (unlikely(val == UINT_MAX))
200 return true;
201
202 if (val == 1)
203 return false;
204
205 new = val - 1;
206 if (new > val) {
207 WARN(new > val, "refcount_t: underflow; use-after-free.\n");
208 return true;
209 }
210
211 old = atomic_cmpxchg_release(&r->refs, val, new);
212 if (old == val)
213 break;
214
215 val = old;
216 }
217
218 return true;
219}
220EXPORT_SYMBOL_GPL(refcount_dec_not_one);
221
222/*
223 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
224 * to decrement when saturated at UINT_MAX.
225 *
226 * Provides release memory ordering, such that prior loads and stores are done
227 * before, and provides a control dependency such that free() must come after.
228 * See the comment on top.
229 */
230bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
231{
232 if (refcount_dec_not_one(r))
233 return false;
234
235 mutex_lock(lock);
236 if (!refcount_dec_and_test(r)) {
237 mutex_unlock(lock);
238 return false;
239 }
240
241 return true;
242}
243EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
244
245/*
246 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
247 * decrement when saturated at UINT_MAX.
248 *
249 * Provides release memory ordering, such that prior loads and stores are done
250 * before, and provides a control dependency such that free() must come after.
251 * See the comment on top.
252 */
253bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
254{
255 if (refcount_dec_not_one(r))
256 return false;
257
258 spin_lock(lock);
259 if (!refcount_dec_and_test(r)) {
260 spin_unlock(lock);
261 return false;
262 }
263
264 return true;
265}
266EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
267