aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h40
1 files changed, 36 insertions, 4 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 95961f0bf62d..5d8920e23073 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 110 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
111 111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) 112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
113 __this_cpu_inc(*pcpu_count); 113 this_cpu_inc(*pcpu_count);
114 else 114 else
115 atomic_inc(&ref->count); 115 atomic_inc(&ref->count);
116 116
@@ -121,6 +121,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
121 * percpu_ref_tryget - try to increment a percpu refcount 121 * percpu_ref_tryget - try to increment a percpu refcount
122 * @ref: percpu_ref to try-get 122 * @ref: percpu_ref to try-get
123 * 123 *
124 * Increment a percpu refcount unless its count already reached zero.
125 * Returns %true on success; %false on failure.
126 *
127 * The caller is responsible for ensuring that @ref stays accessible.
128 */
129static inline bool percpu_ref_tryget(struct percpu_ref *ref)
130{
131 unsigned __percpu *pcpu_count;
132 int ret = false;
133
134 rcu_read_lock_sched();
135
136 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
137
138 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
139 this_cpu_inc(*pcpu_count);
140 ret = true;
141 } else {
142 ret = atomic_inc_not_zero(&ref->count);
143 }
144
145 rcu_read_unlock_sched();
146
147 return ret;
148}
149
150/**
151 * percpu_ref_tryget_live - try to increment a live percpu refcount
152 * @ref: percpu_ref to try-get
153 *
124 * Increment a percpu refcount unless it has already been killed. Returns 154 * Increment a percpu refcount unless it has already been killed. Returns
125 * %true on success; %false on failure. 155 * %true on success; %false on failure.
126 * 156 *
@@ -128,8 +158,10 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
128 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be 158 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
129 * used. After the confirm_kill callback is invoked, it's guaranteed that 159 * used. After the confirm_kill callback is invoked, it's guaranteed that
130 * no new reference will be given out by percpu_ref_tryget(). 160 * no new reference will be given out by percpu_ref_tryget().
161 *
162 * The caller is responsible for ensuring that @ref stays accessible.
131 */ 163 */
132static inline bool percpu_ref_tryget(struct percpu_ref *ref) 164static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
133{ 165{
134 unsigned __percpu *pcpu_count; 166 unsigned __percpu *pcpu_count;
135 int ret = false; 167 int ret = false;
@@ -139,7 +171,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
139 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 171 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
140 172
141 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { 173 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
142 __this_cpu_inc(*pcpu_count); 174 this_cpu_inc(*pcpu_count);
143 ret = true; 175 ret = true;
144 } 176 }
145 177
@@ -164,7 +196,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
164 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 196 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
165 197
166 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) 198 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
167 __this_cpu_dec(*pcpu_count); 199 this_cpu_dec(*pcpu_count);
168 else if (unlikely(atomic_dec_and_test(&ref->count))) 200 else if (unlikely(atomic_dec_and_test(&ref->count)))
169 ref->release(ref); 201 ref->release(ref);
170 202