aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h121
1 files changed, 80 insertions, 41 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 652fd64cab5e..d5c89e0dd0e6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less 14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see 15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS. 16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 * 17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the 18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() 19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -49,28 +49,60 @@
49#include <linux/kernel.h> 49#include <linux/kernel.h>
50#include <linux/percpu.h> 50#include <linux/percpu.h>
51#include <linux/rcupdate.h> 51#include <linux/rcupdate.h>
52#include <linux/gfp.h>
52 53
53struct percpu_ref; 54struct percpu_ref;
54typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
55 56
57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64};
65
66/* @flags for percpu_ref_init() */
67enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81};
82
56struct percpu_ref { 83struct percpu_ref {
57 atomic_t count; 84 atomic_long_t count;
58 /* 85 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 86 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t. 87 * mode; if set, then get/put will manipulate the atomic_t.
61 */ 88 */
62 unsigned long pcpu_count_ptr; 89 unsigned long percpu_count_ptr;
63 percpu_ref_func_t *release; 90 percpu_ref_func_t *release;
64 percpu_ref_func_t *confirm_kill; 91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
65 struct rcu_head rcu; 93 struct rcu_head rcu;
66}; 94};
67 95
68int __must_check percpu_ref_init(struct percpu_ref *ref, 96int __must_check percpu_ref_init(struct percpu_ref *ref,
69 percpu_ref_func_t *release); 97 percpu_ref_func_t *release, unsigned int flags,
70void percpu_ref_reinit(struct percpu_ref *ref); 98 gfp_t gfp);
71void percpu_ref_exit(struct percpu_ref *ref); 99void percpu_ref_exit(struct percpu_ref *ref);
100void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
72void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 103void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 percpu_ref_func_t *confirm_kill); 104 percpu_ref_func_t *confirm_kill);
105void percpu_ref_reinit(struct percpu_ref *ref);
74 106
75/** 107/**
76 * percpu_ref_kill - drop the initial ref 108 * percpu_ref_kill - drop the initial ref
@@ -87,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
87 return percpu_ref_kill_and_confirm(ref, NULL); 119 return percpu_ref_kill_and_confirm(ref, NULL);
88} 120}
89 121
90#define PCPU_REF_DEAD 1
91
92/* 122/*
93 * Internal helper. Don't use outside percpu-refcount proper. The 123 * Internal helper. Don't use outside percpu-refcount proper. The
94 * function doesn't return the pointer and let the caller test it for NULL 124 * function doesn't return the pointer and let the caller test it for NULL
95 * because doing so forces the compiler to generate two conditional 125 * because doing so forces the compiler to generate two conditional
96 * branches as it can't assume that @ref->pcpu_count is not NULL. 126 * branches as it can't assume that @ref->percpu_count is not NULL.
97 */ 127 */
98static inline bool __pcpu_ref_alive(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
99 unsigned __percpu **pcpu_countp) 129 unsigned long __percpu **percpu_countp)
100{ 130{
101 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); 131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
102 132
103 /* paired with smp_store_release() in percpu_ref_reinit() */ 133 /* paired with smp_store_release() in percpu_ref_reinit() */
104 smp_read_barrier_depends(); 134 smp_read_barrier_depends();
105 135
106 if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) 136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
107 return false; 137 return false;
108 138
109 *pcpu_countp = (unsigned __percpu *)pcpu_ptr; 139 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
110 return true; 140 return true;
111} 141}
112 142
@@ -114,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
114 * percpu_ref_get - increment a percpu refcount 144 * percpu_ref_get - increment a percpu refcount
115 * @ref: percpu_ref to get 145 * @ref: percpu_ref to get
116 * 146 *
117 * Analagous to atomic_inc(). 147 * Analagous to atomic_long_inc().
118 */ 148 *
149 * This function is safe to call as long as @ref is between init and exit.
150 */
119static inline void percpu_ref_get(struct percpu_ref *ref) 151static inline void percpu_ref_get(struct percpu_ref *ref)
120{ 152{
121 unsigned __percpu *pcpu_count; 153 unsigned long __percpu *percpu_count;
122 154
123 rcu_read_lock_sched(); 155 rcu_read_lock_sched();
124 156
125 if (__pcpu_ref_alive(ref, &pcpu_count)) 157 if (__ref_is_percpu(ref, &percpu_count))
126 this_cpu_inc(*pcpu_count); 158 this_cpu_inc(*percpu_count);
127 else 159 else
128 atomic_inc(&ref->count); 160 atomic_long_inc(&ref->count);
129 161
130 rcu_read_unlock_sched(); 162 rcu_read_unlock_sched();
131} 163}
@@ -137,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
137 * Increment a percpu refcount unless its count already reached zero. 169 * Increment a percpu refcount unless its count already reached zero.
138 * Returns %true on success; %false on failure. 170 * Returns %true on success; %false on failure.
139 * 171 *
140 * The caller is responsible for ensuring that @ref stays accessible. 172 * This function is safe to call as long as @ref is between init and exit.
141 */ 173 */
142static inline bool percpu_ref_tryget(struct percpu_ref *ref) 174static inline bool percpu_ref_tryget(struct percpu_ref *ref)
143{ 175{
144 unsigned __percpu *pcpu_count; 176 unsigned long __percpu *percpu_count;
145 int ret = false; 177 int ret;
146 178
147 rcu_read_lock_sched(); 179 rcu_read_lock_sched();
148 180
149 if (__pcpu_ref_alive(ref, &pcpu_count)) { 181 if (__ref_is_percpu(ref, &percpu_count)) {
150 this_cpu_inc(*pcpu_count); 182 this_cpu_inc(*percpu_count);
151 ret = true; 183 ret = true;
152 } else { 184 } else {
153 ret = atomic_inc_not_zero(&ref->count); 185 ret = atomic_long_inc_not_zero(&ref->count);
154 } 186 }
155 187
156 rcu_read_unlock_sched(); 188 rcu_read_unlock_sched();
@@ -165,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
165 * Increment a percpu refcount unless it has already been killed. Returns 197 * Increment a percpu refcount unless it has already been killed. Returns
166 * %true on success; %false on failure. 198 * %true on success; %false on failure.
167 * 199 *
168 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget 200 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
169 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be 201 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
170 * used. After the confirm_kill callback is invoked, it's guaranteed that 202 * should be used. After the confirm_kill callback is invoked, it's
171 * no new reference will be given out by percpu_ref_tryget(). 203 * guaranteed that no new reference will be given out by
204 * percpu_ref_tryget_live().
172 * 205 *
173 * The caller is responsible for ensuring that @ref stays accessible. 206 * This function is safe to call as long as @ref is between init and exit.
174 */ 207 */
175static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 208static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
176{ 209{
177 unsigned __percpu *pcpu_count; 210 unsigned long __percpu *percpu_count;
178 int ret = false; 211 int ret = false;
179 212
180 rcu_read_lock_sched(); 213 rcu_read_lock_sched();
181 214
182 if (__pcpu_ref_alive(ref, &pcpu_count)) { 215 if (__ref_is_percpu(ref, &percpu_count)) {
183 this_cpu_inc(*pcpu_count); 216 this_cpu_inc(*percpu_count);
184 ret = true; 217 ret = true;
218 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
219 ret = atomic_long_inc_not_zero(&ref->count);
185 } 220 }
186 221
187 rcu_read_unlock_sched(); 222 rcu_read_unlock_sched();
@@ -195,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
195 * 230 *
196 * Decrement the refcount, and if 0, call the release function (which was passed 231 * Decrement the refcount, and if 0, call the release function (which was passed
197 * to percpu_ref_init()) 232 * to percpu_ref_init())
233 *
234 * This function is safe to call as long as @ref is between init and exit.
198 */ 235 */
199static inline void percpu_ref_put(struct percpu_ref *ref) 236static inline void percpu_ref_put(struct percpu_ref *ref)
200{ 237{
201 unsigned __percpu *pcpu_count; 238 unsigned long __percpu *percpu_count;
202 239
203 rcu_read_lock_sched(); 240 rcu_read_lock_sched();
204 241
205 if (__pcpu_ref_alive(ref, &pcpu_count)) 242 if (__ref_is_percpu(ref, &percpu_count))
206 this_cpu_dec(*pcpu_count); 243 this_cpu_dec(*percpu_count);
207 else if (unlikely(atomic_dec_and_test(&ref->count))) 244 else if (unlikely(atomic_long_dec_and_test(&ref->count)))
208 ref->release(ref); 245 ref->release(ref);
209 246
210 rcu_read_unlock_sched(); 247 rcu_read_unlock_sched();
@@ -215,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
215 * @ref: percpu_ref to test 252 * @ref: percpu_ref to test
216 * 253 *
217 * Returns %true if @ref reached zero. 254 * Returns %true if @ref reached zero.
255 *
256 * This function is safe to call as long as @ref is between init and exit.
218 */ 257 */
219static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 258static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220{ 259{
221 unsigned __percpu *pcpu_count; 260 unsigned long __percpu *percpu_count;
222 261
223 if (__pcpu_ref_alive(ref, &pcpu_count)) 262 if (__ref_is_percpu(ref, &percpu_count))
224 return false; 263 return false;
225 return !atomic_read(&ref->count); 264 return !atomic_long_read(&ref->count);
226} 265}
227 266
228#endif 267#endif