aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:48 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:48 -0400
commit9e804d1f58da1eca079f796347c1cf1d1df564e2 (patch)
tree5fc958822d1d44509a9003be88490ede36ae12f9 /include/linux/percpu-refcount.h
parenteecc16ba9a49b05dd847a317af166a6728eb56ca (diff)
percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch
percpu_ref will be restructured so that percpu/atomic mode switching and reference killing are dedoupled. In preparation, do the following renames. * percpu_ref->confirm_kill -> percpu_ref->confirm_switch * __PERCPU_REF_DEAD -> __PERCPU_REF_ATOMIC * __percpu_ref_alive() -> __ref_is_percpu() This patch is pure rename and doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h25
1 files changed, 14 insertions, 11 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3d463a39e0f7..910e5f72055d 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -54,6 +54,11 @@
54struct percpu_ref; 54struct percpu_ref;
55typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
56 56
57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60};
61
57struct percpu_ref { 62struct percpu_ref {
58 atomic_long_t count; 63 atomic_long_t count;
59 /* 64 /*
@@ -62,7 +67,7 @@ struct percpu_ref {
62 */ 67 */
63 unsigned long percpu_count_ptr; 68 unsigned long percpu_count_ptr;
64 percpu_ref_func_t *release; 69 percpu_ref_func_t *release;
65 percpu_ref_func_t *confirm_kill; 70 percpu_ref_func_t *confirm_switch;
66 struct rcu_head rcu; 71 struct rcu_head rcu;
67}; 72};
68 73
@@ -88,23 +93,21 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
88 return percpu_ref_kill_and_confirm(ref, NULL); 93 return percpu_ref_kill_and_confirm(ref, NULL);
89} 94}
90 95
91#define __PERCPU_REF_DEAD 1
92
93/* 96/*
94 * Internal helper. Don't use outside percpu-refcount proper. The 97 * Internal helper. Don't use outside percpu-refcount proper. The
95 * function doesn't return the pointer and let the caller test it for NULL 98 * function doesn't return the pointer and let the caller test it for NULL
96 * because doing so forces the compiler to generate two conditional 99 * because doing so forces the compiler to generate two conditional
97 * branches as it can't assume that @ref->percpu_count is not NULL. 100 * branches as it can't assume that @ref->percpu_count is not NULL.
98 */ 101 */
99static inline bool __percpu_ref_alive(struct percpu_ref *ref, 102static inline bool __ref_is_percpu(struct percpu_ref *ref,
100 unsigned long __percpu **percpu_countp) 103 unsigned long __percpu **percpu_countp)
101{ 104{
102 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); 105 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
103 106
104 /* paired with smp_store_release() in percpu_ref_reinit() */ 107 /* paired with smp_store_release() in percpu_ref_reinit() */
105 smp_read_barrier_depends(); 108 smp_read_barrier_depends();
106 109
107 if (unlikely(percpu_ptr & __PERCPU_REF_DEAD)) 110 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
108 return false; 111 return false;
109 112
110 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 113 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
@@ -125,7 +128,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
125 128
126 rcu_read_lock_sched(); 129 rcu_read_lock_sched();
127 130
128 if (__percpu_ref_alive(ref, &percpu_count)) 131 if (__ref_is_percpu(ref, &percpu_count))
129 this_cpu_inc(*percpu_count); 132 this_cpu_inc(*percpu_count);
130 else 133 else
131 atomic_long_inc(&ref->count); 134 atomic_long_inc(&ref->count);
@@ -149,7 +152,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
149 152
150 rcu_read_lock_sched(); 153 rcu_read_lock_sched();
151 154
152 if (__percpu_ref_alive(ref, &percpu_count)) { 155 if (__ref_is_percpu(ref, &percpu_count)) {
153 this_cpu_inc(*percpu_count); 156 this_cpu_inc(*percpu_count);
154 ret = true; 157 ret = true;
155 } else { 158 } else {
@@ -183,7 +186,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
183 186
184 rcu_read_lock_sched(); 187 rcu_read_lock_sched();
185 188
186 if (__percpu_ref_alive(ref, &percpu_count)) { 189 if (__ref_is_percpu(ref, &percpu_count)) {
187 this_cpu_inc(*percpu_count); 190 this_cpu_inc(*percpu_count);
188 ret = true; 191 ret = true;
189 } 192 }
@@ -208,7 +211,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
208 211
209 rcu_read_lock_sched(); 212 rcu_read_lock_sched();
210 213
211 if (__percpu_ref_alive(ref, &percpu_count)) 214 if (__ref_is_percpu(ref, &percpu_count))
212 this_cpu_dec(*percpu_count); 215 this_cpu_dec(*percpu_count);
213 else if (unlikely(atomic_long_dec_and_test(&ref->count))) 216 else if (unlikely(atomic_long_dec_and_test(&ref->count)))
214 ref->release(ref); 217 ref->release(ref);
@@ -228,7 +231,7 @@ static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
228{ 231{
229 unsigned long __percpu *percpu_count; 232 unsigned long __percpu *percpu_count;
230 233
231 if (__percpu_ref_alive(ref, &percpu_count)) 234 if (__ref_is_percpu(ref, &percpu_count))
232 return false; 235 return false;
233 return !atomic_long_read(&ref->count); 236 return !atomic_long_read(&ref->count);
234} 237}