diff options
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r-- | include/linux/percpu-refcount.h | 122 |
1 files changed, 80 insertions, 42 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 68a64f11ce02..d5c89e0dd0e6 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * | 13 | * |
14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less | 14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less |
15 | * than an atomic_t - this is because of the way shutdown works, see | 15 | * than an atomic_t - this is because of the way shutdown works, see |
16 | * percpu_ref_kill()/PCPU_COUNT_BIAS. | 16 | * percpu_ref_kill()/PERCPU_COUNT_BIAS. |
17 | * | 17 | * |
18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the | 18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the |
19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() | 19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() |
@@ -49,29 +49,60 @@ | |||
49 | #include <linux/kernel.h> | 49 | #include <linux/kernel.h> |
50 | #include <linux/percpu.h> | 50 | #include <linux/percpu.h> |
51 | #include <linux/rcupdate.h> | 51 | #include <linux/rcupdate.h> |
52 | #include <linux/gfp.h> | ||
52 | 53 | ||
53 | struct percpu_ref; | 54 | struct percpu_ref; |
54 | typedef void (percpu_ref_func_t)(struct percpu_ref *); | 55 | typedef void (percpu_ref_func_t)(struct percpu_ref *); |
55 | 56 | ||
57 | /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ | ||
58 | enum { | ||
59 | __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ | ||
60 | __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ | ||
61 | __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, | ||
62 | |||
63 | __PERCPU_REF_FLAG_BITS = 2, | ||
64 | }; | ||
65 | |||
66 | /* @flags for percpu_ref_init() */ | ||
67 | enum { | ||
68 | /* | ||
69 | * Start w/ ref == 1 in atomic mode. Can be switched to percpu | ||
70 | * operation using percpu_ref_switch_to_percpu(). If initialized | ||
71 | * with this flag, the ref will stay in atomic mode until | ||
72 | * percpu_ref_switch_to_percpu() is invoked on it. | ||
73 | */ | ||
74 | PERCPU_REF_INIT_ATOMIC = 1 << 0, | ||
75 | |||
76 | /* | ||
77 | * Start dead w/ ref == 0 in atomic mode. Must be revived with | ||
78 | * percpu_ref_reinit() before used. Implies INIT_ATOMIC. | ||
79 | */ | ||
80 | PERCPU_REF_INIT_DEAD = 1 << 1, | ||
81 | }; | ||
82 | |||
56 | struct percpu_ref { | 83 | struct percpu_ref { |
57 | atomic_t count; | 84 | atomic_long_t count; |
58 | /* | 85 | /* |
59 | * The low bit of the pointer indicates whether the ref is in percpu | 86 | * The low bit of the pointer indicates whether the ref is in percpu |
60 | * mode; if set, then get/put will manipulate the atomic_t. | 87 | * mode; if set, then get/put will manipulate the atomic_t. |
61 | */ | 88 | */ |
62 | unsigned long pcpu_count_ptr; | 89 | unsigned long percpu_count_ptr; |
63 | percpu_ref_func_t *release; | 90 | percpu_ref_func_t *release; |
64 | percpu_ref_func_t *confirm_kill; | 91 | percpu_ref_func_t *confirm_switch; |
92 | bool force_atomic:1; | ||
65 | struct rcu_head rcu; | 93 | struct rcu_head rcu; |
66 | }; | 94 | }; |
67 | 95 | ||
68 | int __must_check percpu_ref_init(struct percpu_ref *ref, | 96 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
69 | percpu_ref_func_t *release); | 97 | percpu_ref_func_t *release, unsigned int flags, |
70 | void percpu_ref_reinit(struct percpu_ref *ref); | 98 | gfp_t gfp); |
71 | void percpu_ref_exit(struct percpu_ref *ref); | 99 | void percpu_ref_exit(struct percpu_ref *ref); |
100 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, | ||
101 | percpu_ref_func_t *confirm_switch); | ||
102 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref); | ||
72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 103 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
73 | percpu_ref_func_t *confirm_kill); | 104 | percpu_ref_func_t *confirm_kill); |
74 | void __percpu_ref_kill_expedited(struct percpu_ref *ref); | 105 | void percpu_ref_reinit(struct percpu_ref *ref); |
75 | 106 | ||
76 | /** | 107 | /** |
77 | * percpu_ref_kill - drop the initial ref | 108 | * percpu_ref_kill - drop the initial ref |
@@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
88 | return percpu_ref_kill_and_confirm(ref, NULL); | 119 | return percpu_ref_kill_and_confirm(ref, NULL); |
89 | } | 120 | } |
90 | 121 | ||
91 | #define PCPU_REF_DEAD 1 | ||
92 | |||
93 | /* | 122 | /* |
94 | * Internal helper. Don't use outside percpu-refcount proper. The | 123 | * Internal helper. Don't use outside percpu-refcount proper. The |
95 | * function doesn't return the pointer and let the caller test it for NULL | 124 | * function doesn't return the pointer and let the caller test it for NULL |
96 | * because doing so forces the compiler to generate two conditional | 125 | * because doing so forces the compiler to generate two conditional |
97 | * branches as it can't assume that @ref->pcpu_count is not NULL. | 126 | * branches as it can't assume that @ref->percpu_count is not NULL. |
98 | */ | 127 | */ |
99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
100 | unsigned __percpu **pcpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
101 | { | 130 | { |
102 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); | 131 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); |
103 | 132 | ||
104 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 133 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
105 | smp_read_barrier_depends(); | 134 | smp_read_barrier_depends(); |
106 | 135 | ||
107 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | 136 | if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) |
108 | return false; | 137 | return false; |
109 | 138 | ||
110 | *pcpu_countp = (unsigned __percpu *)pcpu_ptr; | 139 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |
111 | return true; | 140 | return true; |
112 | } | 141 | } |
113 | 142 | ||
@@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
115 | * percpu_ref_get - increment a percpu refcount | 144 | * percpu_ref_get - increment a percpu refcount |
116 | * @ref: percpu_ref to get | 145 | * @ref: percpu_ref to get |
117 | * | 146 | * |
118 | * Analagous to atomic_inc(). | 147 | * Analagous to atomic_long_inc(). |
119 | */ | 148 | * |
149 | * This function is safe to call as long as @ref is between init and exit. | ||
150 | */ | ||
120 | static inline void percpu_ref_get(struct percpu_ref *ref) | 151 | static inline void percpu_ref_get(struct percpu_ref *ref) |
121 | { | 152 | { |
122 | unsigned __percpu *pcpu_count; | 153 | unsigned long __percpu *percpu_count; |
123 | 154 | ||
124 | rcu_read_lock_sched(); | 155 | rcu_read_lock_sched(); |
125 | 156 | ||
126 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 157 | if (__ref_is_percpu(ref, &percpu_count)) |
127 | this_cpu_inc(*pcpu_count); | 158 | this_cpu_inc(*percpu_count); |
128 | else | 159 | else |
129 | atomic_inc(&ref->count); | 160 | atomic_long_inc(&ref->count); |
130 | 161 | ||
131 | rcu_read_unlock_sched(); | 162 | rcu_read_unlock_sched(); |
132 | } | 163 | } |
@@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
138 | * Increment a percpu refcount unless its count already reached zero. | 169 | * Increment a percpu refcount unless its count already reached zero. |
139 | * Returns %true on success; %false on failure. | 170 | * Returns %true on success; %false on failure. |
140 | * | 171 | * |
141 | * The caller is responsible for ensuring that @ref stays accessible. | 172 | * This function is safe to call as long as @ref is between init and exit. |
142 | */ | 173 | */ |
143 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 174 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
144 | { | 175 | { |
145 | unsigned __percpu *pcpu_count; | 176 | unsigned long __percpu *percpu_count; |
146 | int ret = false; | 177 | int ret; |
147 | 178 | ||
148 | rcu_read_lock_sched(); | 179 | rcu_read_lock_sched(); |
149 | 180 | ||
150 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 181 | if (__ref_is_percpu(ref, &percpu_count)) { |
151 | this_cpu_inc(*pcpu_count); | 182 | this_cpu_inc(*percpu_count); |
152 | ret = true; | 183 | ret = true; |
153 | } else { | 184 | } else { |
154 | ret = atomic_inc_not_zero(&ref->count); | 185 | ret = atomic_long_inc_not_zero(&ref->count); |
155 | } | 186 | } |
156 | 187 | ||
157 | rcu_read_unlock_sched(); | 188 | rcu_read_unlock_sched(); |
@@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
166 | * Increment a percpu refcount unless it has already been killed. Returns | 197 | * Increment a percpu refcount unless it has already been killed. Returns |
167 | * %true on success; %false on failure. | 198 | * %true on success; %false on failure. |
168 | * | 199 | * |
169 | * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget | 200 | * Completion of percpu_ref_kill() in itself doesn't guarantee that this |
170 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be | 201 | * function will fail. For such guarantee, percpu_ref_kill_and_confirm() |
171 | * used. After the confirm_kill callback is invoked, it's guaranteed that | 202 | * should be used. After the confirm_kill callback is invoked, it's |
172 | * no new reference will be given out by percpu_ref_tryget(). | 203 | * guaranteed that no new reference will be given out by |
204 | * percpu_ref_tryget_live(). | ||
173 | * | 205 | * |
174 | * The caller is responsible for ensuring that @ref stays accessible. | 206 | * This function is safe to call as long as @ref is between init and exit. |
175 | */ | 207 | */ |
176 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 208 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
177 | { | 209 | { |
178 | unsigned __percpu *pcpu_count; | 210 | unsigned long __percpu *percpu_count; |
179 | int ret = false; | 211 | int ret = false; |
180 | 212 | ||
181 | rcu_read_lock_sched(); | 213 | rcu_read_lock_sched(); |
182 | 214 | ||
183 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 215 | if (__ref_is_percpu(ref, &percpu_count)) { |
184 | this_cpu_inc(*pcpu_count); | 216 | this_cpu_inc(*percpu_count); |
185 | ret = true; | 217 | ret = true; |
218 | } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { | ||
219 | ret = atomic_long_inc_not_zero(&ref->count); | ||
186 | } | 220 | } |
187 | 221 | ||
188 | rcu_read_unlock_sched(); | 222 | rcu_read_unlock_sched(); |
@@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
196 | * | 230 | * |
197 | * Decrement the refcount, and if 0, call the release function (which was passed | 231 | * Decrement the refcount, and if 0, call the release function (which was passed |
198 | * to percpu_ref_init()) | 232 | * to percpu_ref_init()) |
233 | * | ||
234 | * This function is safe to call as long as @ref is between init and exit. | ||
199 | */ | 235 | */ |
200 | static inline void percpu_ref_put(struct percpu_ref *ref) | 236 | static inline void percpu_ref_put(struct percpu_ref *ref) |
201 | { | 237 | { |
202 | unsigned __percpu *pcpu_count; | 238 | unsigned long __percpu *percpu_count; |
203 | 239 | ||
204 | rcu_read_lock_sched(); | 240 | rcu_read_lock_sched(); |
205 | 241 | ||
206 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 242 | if (__ref_is_percpu(ref, &percpu_count)) |
207 | this_cpu_dec(*pcpu_count); | 243 | this_cpu_dec(*percpu_count); |
208 | else if (unlikely(atomic_dec_and_test(&ref->count))) | 244 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) |
209 | ref->release(ref); | 245 | ref->release(ref); |
210 | 246 | ||
211 | rcu_read_unlock_sched(); | 247 | rcu_read_unlock_sched(); |
@@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
216 | * @ref: percpu_ref to test | 252 | * @ref: percpu_ref to test |
217 | * | 253 | * |
218 | * Returns %true if @ref reached zero. | 254 | * Returns %true if @ref reached zero. |
255 | * | ||
256 | * This function is safe to call as long as @ref is between init and exit. | ||
219 | */ | 257 | */ |
220 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | 258 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
221 | { | 259 | { |
222 | unsigned __percpu *pcpu_count; | 260 | unsigned long __percpu *percpu_count; |
223 | 261 | ||
224 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 262 | if (__ref_is_percpu(ref, &percpu_count)) |
225 | return false; | 263 | return false; |
226 | return !atomic_read(&ref->count); | 264 | return !atomic_long_read(&ref->count); |
227 | } | 265 | } |
228 | 266 | ||
229 | #endif | 267 | #endif |