aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
commit490c79a65708873228cf114cf00e32c204e4e907 (patch)
treeb54e3d1617601a5da2273e4bfa4920beb7f9e779 /include/linux/percpu-refcount.h
parent27344a9017cdaff82a167827da3001a0918afdc3 (diff)
percpu_ref: decouple switching to atomic mode and killing
percpu_ref has treated the dropping of the base reference and switching to atomic mode as an integral operation; however, there's nothing inherent tying the two together. The use cases for percpu_ref have been expanding continuously. While the current init/kill/reinit/exit model can cover a lot, the coupling of kill/reinit with atomic/percpu mode switching is turning out to be too restrictive for use cases where many percpu_refs are created and destroyed back-to-back with only some of them reaching extended operation. The coupling also makes implementing always-atomic debug mode difficult. This patch separates out atomic mode switching into percpu_ref_switch_to_atomic() and reimplements percpu_ref_kill_and_confirm() on top of it. * The handling of __PERCPU_REF_ATOMIC and __PERCPU_REF_DEAD is now differentiated. Among get/put operations, percpu_ref_tryget_live() is the only one which cares about DEAD. * percpu_ref_switch_to_atomic() can be called multiple times on the same ref. This means that multiple @confirm_switch may get queued up which we can't do reliably without extra memory area. This is handled by making the later invocation synchronously wait for the completion of the previous one. This isn't particularly desirable but such synchronous waits shouldn't happen in most cases. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index bd9483d390b4..d1252e1335e8 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -78,9 +78,11 @@ struct percpu_ref {
78int __must_check percpu_ref_init(struct percpu_ref *ref, 78int __must_check percpu_ref_init(struct percpu_ref *ref,
79 percpu_ref_func_t *release, gfp_t gfp); 79 percpu_ref_func_t *release, gfp_t gfp);
80void percpu_ref_exit(struct percpu_ref *ref); 80void percpu_ref_exit(struct percpu_ref *ref);
81void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
82 percpu_ref_func_t *confirm_switch);
83void percpu_ref_reinit(struct percpu_ref *ref);
81void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 84void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
82 percpu_ref_func_t *confirm_kill); 85 percpu_ref_func_t *confirm_kill);
83void percpu_ref_reinit(struct percpu_ref *ref);
84 86
85/** 87/**
86 * percpu_ref_kill - drop the initial ref 88 * percpu_ref_kill - drop the initial ref
@@ -111,7 +113,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
111 /* paired with smp_store_release() in percpu_ref_reinit() */ 113 /* paired with smp_store_release() in percpu_ref_reinit() */
112 smp_read_barrier_depends(); 114 smp_read_barrier_depends();
113 115
114 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) 116 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
115 return false; 117 return false;
116 118
117 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 119 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
@@ -193,6 +195,8 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
193 if (__ref_is_percpu(ref, &percpu_count)) { 195 if (__ref_is_percpu(ref, &percpu_count)) {
194 this_cpu_inc(*percpu_count); 196 this_cpu_inc(*percpu_count);
195 ret = true; 197 ret = true;
198 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
199 ret = atomic_long_inc_not_zero(&ref->count);
196 } 200 }
197 201
198 rcu_read_unlock_sched(); 202 rcu_read_unlock_sched();