aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
commitf47ad45784611297b699f3dffb6c7222b76afe64 (patch)
tree9497c0a4bad4c6b97cfd08bf22a8b2313606ce6a /lib
parent490c79a65708873228cf114cf00e32c204e4e907 (diff)
percpu_ref: decouple switching to percpu mode and reinit
percpu_ref has treated the dropping of the base reference and switching to atomic mode as an integral operation; however, there's nothing inherent tying the two together. The use cases for percpu_ref have been expanding continuously. While the current init/kill/reinit/exit model can cover a lot, the coupling of kill/reinit with atomic/percpu mode switching is turning out to be too restrictive for use cases where many percpu_refs are created and destroyed back-to-back with only some of them reaching extended operation. The coupling also makes implementing always-atomic debug mode difficult. This patch separates out percpu mode switching into percpu_ref_switch_to_percpu() and reimplements percpu_ref_reinit() on top of it. * DEAD still requires ATOMIC. A dead ref can't be switched to percpu mode w/o going through reinit. v2: __percpu_ref_switch_to_percpu() was missing static. Fixed. Reported by Fengguang aka kbuild test robot. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: kbuild test robot <fengguang.wu@intel.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c73
1 files changed, 54 insertions, 19 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 6e0d14366c5d..5a6d43baccc5 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -206,40 +206,54 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
206 __percpu_ref_switch_to_atomic(ref, confirm_switch); 206 __percpu_ref_switch_to_atomic(ref, confirm_switch);
207} 207}
208 208
209/** 209static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
210 * percpu_ref_reinit - re-initialize a percpu refcount
211 * @ref: perpcu_ref to re-initialize
212 *
213 * Re-initialize @ref so that it's in the same state as when it finished
214 * percpu_ref_init(). @ref must have been initialized successfully, killed
215 * and reached 0 but not exited.
216 *
217 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
218 * this function is in progress.
219 */
220void percpu_ref_reinit(struct percpu_ref *ref)
221{ 210{
222 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); 211 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
223 int cpu; 212 int cpu;
224 213
225 BUG_ON(!percpu_count); 214 BUG_ON(!percpu_count);
226 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
227 215
228 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); 216 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
217 return;
218
219 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
220
221 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
229 222
230 /* 223 /*
231 * Restore per-cpu operation. smp_store_release() is paired with 224 * Restore per-cpu operation. smp_store_release() is paired with
232 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees 225 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
233 * that the zeroing is visible to all percpu accesses which can see 226 * that the zeroing is visible to all percpu accesses which can see
234 * the following __PERCPU_REF_ATOMIC_DEAD clearing. 227 * the following __PERCPU_REF_ATOMIC clearing.
235 */ 228 */
236 for_each_possible_cpu(cpu) 229 for_each_possible_cpu(cpu)
237 *per_cpu_ptr(percpu_count, cpu) = 0; 230 *per_cpu_ptr(percpu_count, cpu) = 0;
238 231
239 smp_store_release(&ref->percpu_count_ptr, 232 smp_store_release(&ref->percpu_count_ptr,
240 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); 233 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
234}
235
236/**
237 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
238 * @ref: percpu_ref to switch to percpu mode
239 *
240 * There's no reason to use this function for the usual reference counting.
241 * To re-use an expired ref, use percpu_ref_reinit().
242 *
243 * Switch @ref to percpu mode. This function may be invoked concurrently
244 * with all the get/put operations and can safely be mixed with kill and
245 * reinit operations.
246 *
247 * This function normally doesn't block and can be called from any context
248 * but it may block if @ref is in the process of switching to atomic mode
249 * by percpu_ref_switch_atomic().
250 */
251void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
252{
253 /* a dying or dead ref can't be switched to percpu mode w/o reinit */
254 if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
255 __percpu_ref_switch_to_percpu(ref);
241} 256}
242EXPORT_SYMBOL_GPL(percpu_ref_reinit);
243 257
244/** 258/**
245 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation 259 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
@@ -253,8 +267,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
253 * percpu_ref_tryget_live() for details. 267 * percpu_ref_tryget_live() for details.
254 * 268 *
255 * This function normally doesn't block and can be called from any context 269 * This function normally doesn't block and can be called from any context
256 * but it may block if @confirm_kill is specified and @ref is already in 270 * but it may block if @confirm_kill is specified and @ref is in the
257 * the process of switching to atomic mode by percpu_ref_switch_atomic(). 271 * process of switching to atomic mode by percpu_ref_switch_atomic().
258 * 272 *
259 * Due to the way percpu_ref is implemented, @confirm_switch will be called 273 * Due to the way percpu_ref is implemented, @confirm_switch will be called
260 * after at least one full sched RCU grace period has passed but this is an 274 * after at least one full sched RCU grace period has passed but this is an
@@ -271,3 +285,24 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
271 percpu_ref_put(ref); 285 percpu_ref_put(ref);
272} 286}
273EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); 287EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
288
289/**
290 * percpu_ref_reinit - re-initialize a percpu refcount
291 * @ref: perpcu_ref to re-initialize
292 *
293 * Re-initialize @ref so that it's in the same state as when it finished
294 * percpu_ref_init(). @ref must have been initialized successfully and
295 * reached 0 but not exited.
296 *
297 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
298 * this function is in progress.
299 */
300void percpu_ref_reinit(struct percpu_ref *ref)
301{
302 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
303
304 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
305 percpu_ref_get(ref);
306 __percpu_ref_switch_to_percpu(ref);
307}
308EXPORT_SYMBOL_GPL(percpu_ref_reinit);