aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorPranith Kumar <bobby.prani@gmail.com>2014-11-21 10:05:59 -0500
committerTejun Heo <tj@kernel.org>2014-11-22 09:33:17 -0500
commiteadac03e898617521f327faf265932b73ecc3e0f (patch)
tree955dd7f66024f1088e7672ca7a9bf9984a08f2e6 /include/linux/percpu-refcount.h
parentcceb9bd63373061ad7b75c321808a2fb11c86545 (diff)
percpu: Replace smp_read_barrier_depends() with lockless_dereference()
Recently lockless_dereference() was added which can be used in place of hard-coding smp_read_barrier_depends(). The following PATCH makes the change. Signed-off-by: Pranith Kumar <bobby.prani@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h4
1 files changed, 1 insertions, 3 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d5c89e0dd0e6..6b0c81872142 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
132
133 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 /* paired with smp_store_release() in percpu_ref_reinit() */
134 smp_read_barrier_depends(); 132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
135 133
136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) 134 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
137 return false; 135 return false;