aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h64
1 files changed, 43 insertions, 21 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 5d8920e23073..3dfbf237cd8f 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -57,11 +57,9 @@ struct percpu_ref {
57 atomic_t count; 57 atomic_t count;
58 /* 58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 59 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t (this is a 60 * mode; if set, then get/put will manipulate the atomic_t.
61 * hack because we need to keep the pointer around for
62 * percpu_ref_kill_rcu())
63 */ 61 */
64 unsigned __percpu *pcpu_count; 62 unsigned long pcpu_count_ptr;
65 percpu_ref_func_t *release; 63 percpu_ref_func_t *release;
66 percpu_ref_func_t *confirm_kill; 64 percpu_ref_func_t *confirm_kill;
67 struct rcu_head rcu; 65 struct rcu_head rcu;
@@ -69,7 +67,8 @@ struct percpu_ref {
69 67
70int __must_check percpu_ref_init(struct percpu_ref *ref, 68int __must_check percpu_ref_init(struct percpu_ref *ref,
71 percpu_ref_func_t *release); 69 percpu_ref_func_t *release);
72void percpu_ref_cancel_init(struct percpu_ref *ref); 70void percpu_ref_reinit(struct percpu_ref *ref);
71void percpu_ref_exit(struct percpu_ref *ref);
73void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 72void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill); 73 percpu_ref_func_t *confirm_kill);
75 74
@@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
88 return percpu_ref_kill_and_confirm(ref, NULL); 87 return percpu_ref_kill_and_confirm(ref, NULL);
89} 88}
90 89
91#define PCPU_STATUS_BITS 2
92#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
93#define PCPU_REF_PTR 0
94#define PCPU_REF_DEAD 1 90#define PCPU_REF_DEAD 1
95 91
96#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) 92/*
93 * Internal helper. Don't use outside percpu-refcount proper. The
94 * function doesn't return the pointer and let the caller test it for NULL
95 * because doing so forces the compiler to generate two conditional
96 * branches as it can't assume that @ref->pcpu_count is not NULL.
97 */
98static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
99 unsigned __percpu **pcpu_countp)
100{
101 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
102
103 /* paired with smp_store_release() in percpu_ref_reinit() */
104 smp_read_barrier_depends();
105
106 if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
107 return false;
108
109 *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
110 return true;
111}
97 112
98/** 113/**
99 * percpu_ref_get - increment a percpu refcount 114 * percpu_ref_get - increment a percpu refcount
@@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
107 122
108 rcu_read_lock_sched(); 123 rcu_read_lock_sched();
109 124
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 125 if (__pcpu_ref_alive(ref, &pcpu_count))
111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
113 this_cpu_inc(*pcpu_count); 126 this_cpu_inc(*pcpu_count);
114 else 127 else
115 atomic_inc(&ref->count); 128 atomic_inc(&ref->count);
@@ -133,9 +146,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
133 146
134 rcu_read_lock_sched(); 147 rcu_read_lock_sched();
135 148
136 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 149 if (__pcpu_ref_alive(ref, &pcpu_count)) {
137
138 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
139 this_cpu_inc(*pcpu_count); 150 this_cpu_inc(*pcpu_count);
140 ret = true; 151 ret = true;
141 } else { 152 } else {
@@ -168,9 +179,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
168 179
169 rcu_read_lock_sched(); 180 rcu_read_lock_sched();
170 181
171 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 182 if (__pcpu_ref_alive(ref, &pcpu_count)) {
172
173 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
174 this_cpu_inc(*pcpu_count); 183 this_cpu_inc(*pcpu_count);
175 ret = true; 184 ret = true;
176 } 185 }
@@ -193,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
193 202
194 rcu_read_lock_sched(); 203 rcu_read_lock_sched();
195 204
196 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 205 if (__pcpu_ref_alive(ref, &pcpu_count))
197
198 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
199 this_cpu_dec(*pcpu_count); 206 this_cpu_dec(*pcpu_count);
200 else if (unlikely(atomic_dec_and_test(&ref->count))) 207 else if (unlikely(atomic_dec_and_test(&ref->count)))
201 ref->release(ref); 208 ref->release(ref);
@@ -203,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
203 rcu_read_unlock_sched(); 210 rcu_read_unlock_sched();
204} 211}
205 212
213/**
214 * percpu_ref_is_zero - test whether a percpu refcount reached zero
215 * @ref: percpu_ref to test
216 *
217 * Returns %true if @ref reached zero.
218 */
219static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220{
221 unsigned __percpu *pcpu_count;
222
223 if (__pcpu_ref_alive(ref, &pcpu_count))
224 return false;
225 return !atomic_read(&ref->count);
226}
227
206#endif 228#endif