aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h34
1 files changed, 31 insertions, 3 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b4337646388b..12c9b485beb7 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 unsigned long percpu_ptr;
132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); 132
133 /*
134 * The value of @ref->percpu_count_ptr is tested for
135 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that
139 * ACCESS_ONCE() is required when fetching it.
140 *
141 * Also, we need a data dependency barrier to be paired with
142 * smp_store_release() in __percpu_ref_switch_to_percpu().
143 *
144 * Use lockless deref which contains both.
145 */
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
133 147
134 /* 148 /*
135 * Theoretically, the following could test just ATOMIC; however, 149 * Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
233 if (__ref_is_percpu(ref, &percpu_count)) { 247 if (__ref_is_percpu(ref, &percpu_count)) {
234 this_cpu_inc(*percpu_count); 248 this_cpu_inc(*percpu_count);
235 ret = true; 249 ret = true;
236 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { 250 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
237 ret = atomic_long_inc_not_zero(&ref->count); 251 ret = atomic_long_inc_not_zero(&ref->count);
238 } 252 }
239 253
@@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
281} 295}
282 296
283/** 297/**
298 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
299 * @ref: percpu_ref to test
300 *
301 * Returns %true if @ref is dying or dead.
302 *
303 * This function is safe to call as long as @ref is between init and exit
304 * and the caller is responsible for synchronizing against state changes.
305 */
306static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
307{
308 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
309}
310
311/**
284 * percpu_ref_is_zero - test whether a percpu refcount reached zero 312 * percpu_ref_is_zero - test whether a percpu refcount reached zero
285 * @ref: percpu_ref to test 313 * @ref: percpu_ref to test
286 * 314 *