aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h24
1 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index ee8325122dbd..5df6784bd9d2 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -55,7 +55,7 @@ struct percpu_ref;
55typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
56 56
57struct percpu_ref { 57struct percpu_ref {
58 atomic_t count; 58 atomic_long_t count;
59 /* 59 /*
60 * The low bit of the pointer indicates whether the ref is in percpu 60 * The low bit of the pointer indicates whether the ref is in percpu
61 * mode; if set, then get/put will manipulate the atomic_t. 61 * mode; if set, then get/put will manipulate the atomic_t.
@@ -97,7 +97,7 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
97 * branches as it can't assume that @ref->pcpu_count is not NULL. 97 * branches as it can't assume that @ref->pcpu_count is not NULL.
98 */ 98 */
99static inline bool __pcpu_ref_alive(struct percpu_ref *ref, 99static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
100 unsigned __percpu **pcpu_countp) 100 unsigned long __percpu **pcpu_countp)
101{ 101{
102 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); 102 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
103 103
@@ -107,7 +107,7 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
107 if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) 107 if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
108 return false; 108 return false;
109 109
110 *pcpu_countp = (unsigned __percpu *)pcpu_ptr; 110 *pcpu_countp = (unsigned long __percpu *)pcpu_ptr;
111 return true; 111 return true;
112} 112}
113 113
@@ -119,14 +119,14 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
119 */ 119 */
120static inline void percpu_ref_get(struct percpu_ref *ref) 120static inline void percpu_ref_get(struct percpu_ref *ref)
121{ 121{
122 unsigned __percpu *pcpu_count; 122 unsigned long __percpu *pcpu_count;
123 123
124 rcu_read_lock_sched(); 124 rcu_read_lock_sched();
125 125
126 if (__pcpu_ref_alive(ref, &pcpu_count)) 126 if (__pcpu_ref_alive(ref, &pcpu_count))
127 this_cpu_inc(*pcpu_count); 127 this_cpu_inc(*pcpu_count);
128 else 128 else
129 atomic_inc(&ref->count); 129 atomic_long_inc(&ref->count);
130 130
131 rcu_read_unlock_sched(); 131 rcu_read_unlock_sched();
132} 132}
@@ -142,7 +142,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
142 */ 142 */
143static inline bool percpu_ref_tryget(struct percpu_ref *ref) 143static inline bool percpu_ref_tryget(struct percpu_ref *ref)
144{ 144{
145 unsigned __percpu *pcpu_count; 145 unsigned long __percpu *pcpu_count;
146 int ret = false; 146 int ret = false;
147 147
148 rcu_read_lock_sched(); 148 rcu_read_lock_sched();
@@ -151,7 +151,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
151 this_cpu_inc(*pcpu_count); 151 this_cpu_inc(*pcpu_count);
152 ret = true; 152 ret = true;
153 } else { 153 } else {
154 ret = atomic_inc_not_zero(&ref->count); 154 ret = atomic_long_inc_not_zero(&ref->count);
155 } 155 }
156 156
157 rcu_read_unlock_sched(); 157 rcu_read_unlock_sched();
@@ -175,7 +175,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
175 */ 175 */
176static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 176static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
177{ 177{
178 unsigned __percpu *pcpu_count; 178 unsigned long __percpu *pcpu_count;
179 int ret = false; 179 int ret = false;
180 180
181 rcu_read_lock_sched(); 181 rcu_read_lock_sched();
@@ -199,13 +199,13 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
199 */ 199 */
200static inline void percpu_ref_put(struct percpu_ref *ref) 200static inline void percpu_ref_put(struct percpu_ref *ref)
201{ 201{
202 unsigned __percpu *pcpu_count; 202 unsigned long __percpu *pcpu_count;
203 203
204 rcu_read_lock_sched(); 204 rcu_read_lock_sched();
205 205
206 if (__pcpu_ref_alive(ref, &pcpu_count)) 206 if (__pcpu_ref_alive(ref, &pcpu_count))
207 this_cpu_dec(*pcpu_count); 207 this_cpu_dec(*pcpu_count);
208 else if (unlikely(atomic_dec_and_test(&ref->count))) 208 else if (unlikely(atomic_long_dec_and_test(&ref->count)))
209 ref->release(ref); 209 ref->release(ref);
210 210
211 rcu_read_unlock_sched(); 211 rcu_read_unlock_sched();
@@ -219,11 +219,11 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
219 */ 219 */
220static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 220static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
221{ 221{
222 unsigned __percpu *pcpu_count; 222 unsigned long __percpu *pcpu_count;
223 223
224 if (__pcpu_ref_alive(ref, &pcpu_count)) 224 if (__pcpu_ref_alive(ref, &pcpu_count))
225 return false; 225 return false;
226 return !atomic_read(&ref->count); 226 return !atomic_long_read(&ref->count);
227} 227}
228 228
229#endif 229#endif