aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-29 17:47:19 -0400
committerTejun Heo <tj@kernel.org>2016-08-10 15:02:58 -0400
commit3f49bdd95855a33eea749304d2e10530a869218b (patch)
treefe3b7bd2f627ce5fd66084e9e20beec98df1f471
parent18808354b79622ed11857e41f9044ba17aec5b01 (diff)
percpu_ref: restructure operation mode switching
Restructure atomic/percpu mode switching. * The users of __percpu_ref_switch_to_atomic/percpu() now call a new function __percpu_ref_switch_mode() which calls either of the original switching functions depending on the current state of ref->force_atomic and the __PERCPU_REF_DEAD flag. The callers no longer check whether switching is necessary but always invoke __percpu_ref_switch_mode(). * !ref->confirm_switch waiting is collected into __percpu_ref_switch_mode(). This patch doesn't cause any behavior differences. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--lib/percpu-refcount.c64
1 files changed, 29 insertions, 35 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index c3617a8525d7..f3ff793691ac 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -161,16 +161,6 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
161static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, 161static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
162 percpu_ref_func_t *confirm_switch) 162 percpu_ref_func_t *confirm_switch)
163{ 163{
164 /*
165 * If the previous ATOMIC switching hasn't finished yet, wait for
166 * its completion. If the caller ensures that ATOMIC switching
167 * isn't in progress, this function can be called from any context.
168 * Do an extra confirm_switch test to circumvent the unconditional
169 * might_sleep() in wait_event().
170 */
171 if (ref->confirm_switch)
172 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
173
174 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { 164 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
175 if (confirm_switch) 165 if (confirm_switch)
176 confirm_switch(ref); 166 confirm_switch(ref);
@@ -195,16 +185,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
195 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); 185 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
196 int cpu; 186 int cpu;
197 187
198 /*
199 * If the previous ATOMIC switching hasn't finished yet, wait for
200 * its completion. If the caller ensures that ATOMIC switching
201 * isn't in progress, this function can be called from any context.
202 * Do an extra confirm_switch test to circumvent the unconditional
203 * might_sleep() in wait_event().
204 */
205 if (ref->confirm_switch)
206 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
207
208 BUG_ON(!percpu_count); 188 BUG_ON(!percpu_count);
209 189
210 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) 190 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
@@ -225,6 +205,25 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
225 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); 205 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
226} 206}
227 207
208static void __percpu_ref_switch_mode(struct percpu_ref *ref,
209 percpu_ref_func_t *confirm_switch)
210{
211 /*
212 * If the previous ATOMIC switching hasn't finished yet, wait for
213 * its completion. If the caller ensures that ATOMIC switching
214 * isn't in progress, this function can be called from any context.
215 * Do an extra confirm_switch test to circumvent the unconditional
216 * might_sleep() in wait_event().
217 */
218 if (ref->confirm_switch)
219 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
220
221 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
222 __percpu_ref_switch_to_atomic(ref, confirm_switch);
223 else
224 __percpu_ref_switch_to_percpu(ref);
225}
226
228/** 227/**
229 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode 228 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
230 * @ref: percpu_ref to switch to atomic mode 229 * @ref: percpu_ref to switch to atomic mode
@@ -241,16 +240,15 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
241 * operations. Note that @ref will stay in atomic mode across kill/reinit 240 * operations. Note that @ref will stay in atomic mode across kill/reinit
242 * cycles until percpu_ref_switch_to_percpu() is called. 241 * cycles until percpu_ref_switch_to_percpu() is called.
243 * 242 *
244 * This function normally doesn't block and can be called from any context 243 * This function may block if @ref is in the process of switching to atomic
245 * but it may block if @confirm_kill is specified and @ref is already in 244 * mode. If the caller ensures that @ref is not in the process of
246 * the process of switching to atomic mode. In such cases, @confirm_switch 245 * switching to atomic mode, this function can be called from any context.
247 * will be invoked after the switching is complete.
248 */ 246 */
249void percpu_ref_switch_to_atomic(struct percpu_ref *ref, 247void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
250 percpu_ref_func_t *confirm_switch) 248 percpu_ref_func_t *confirm_switch)
251{ 249{
252 ref->force_atomic = true; 250 ref->force_atomic = true;
253 __percpu_ref_switch_to_atomic(ref, confirm_switch); 251 __percpu_ref_switch_mode(ref, confirm_switch);
254} 252}
255 253
256/** 254/**
@@ -267,17 +265,14 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
267 * dying or dead, the actual switching takes place on the following 265 * dying or dead, the actual switching takes place on the following
268 * percpu_ref_reinit(). 266 * percpu_ref_reinit().
269 * 267 *
270 * This function normally doesn't block and can be called from any context 268 * This function may block if @ref is in the process of switching to atomic
271 * but it may block if @ref is in the process of switching to atomic mode 269 * mode. If the caller ensures that @ref is not in the process of
272 * by percpu_ref_switch_atomic(). 270 * switching to atomic mode, this function can be called from any context.
273 */ 271 */
274void percpu_ref_switch_to_percpu(struct percpu_ref *ref) 272void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
275{ 273{
276 ref->force_atomic = false; 274 ref->force_atomic = false;
277 275 __percpu_ref_switch_mode(ref, NULL);
278 /* a dying or dead ref can't be switched to percpu mode w/o reinit */
279 if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
280 __percpu_ref_switch_to_percpu(ref);
281} 276}
282 277
283/** 278/**
@@ -302,7 +297,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
302 "%s called more than once on %pf!", __func__, ref->release); 297 "%s called more than once on %pf!", __func__, ref->release);
303 298
304 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 299 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
305 __percpu_ref_switch_to_atomic(ref, confirm_kill); 300 __percpu_ref_switch_mode(ref, confirm_kill);
306 percpu_ref_put(ref); 301 percpu_ref_put(ref);
307} 302}
308EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); 303EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
@@ -324,7 +319,6 @@ void percpu_ref_reinit(struct percpu_ref *ref)
324 319
325 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; 320 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
326 percpu_ref_get(ref); 321 percpu_ref_get(ref);
327 if (!ref->force_atomic) 322 __percpu_ref_switch_mode(ref, NULL);
328 __percpu_ref_switch_to_percpu(ref);
329} 323}
330EXPORT_SYMBOL_GPL(percpu_ref_reinit); 324EXPORT_SYMBOL_GPL(percpu_ref_reinit);