diff options
-rw-r--r-- | include/linux/percpu-refcount.h | 24 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 37 |
2 files changed, 31 insertions, 30 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index ee8325122dbd..5df6784bd9d2 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -55,7 +55,7 @@ struct percpu_ref; | |||
55 | typedef void (percpu_ref_func_t)(struct percpu_ref *); | 55 | typedef void (percpu_ref_func_t)(struct percpu_ref *); |
56 | 56 | ||
57 | struct percpu_ref { | 57 | struct percpu_ref { |
58 | atomic_t count; | 58 | atomic_long_t count; |
59 | /* | 59 | /* |
60 | * The low bit of the pointer indicates whether the ref is in percpu | 60 | * The low bit of the pointer indicates whether the ref is in percpu |
61 | * mode; if set, then get/put will manipulate the atomic_t. | 61 | * mode; if set, then get/put will manipulate the atomic_t. |
@@ -97,7 +97,7 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
97 | * branches as it can't assume that @ref->pcpu_count is not NULL. | 97 | * branches as it can't assume that @ref->pcpu_count is not NULL. |
98 | */ | 98 | */ |
99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | 99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, |
100 | unsigned __percpu **pcpu_countp) | 100 | unsigned long __percpu **pcpu_countp) |
101 | { | 101 | { |
102 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); | 102 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); |
103 | 103 | ||
@@ -107,7 +107,7 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
107 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | 107 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) |
108 | return false; | 108 | return false; |
109 | 109 | ||
110 | *pcpu_countp = (unsigned __percpu *)pcpu_ptr; | 110 | *pcpu_countp = (unsigned long __percpu *)pcpu_ptr; |
111 | return true; | 111 | return true; |
112 | } | 112 | } |
113 | 113 | ||
@@ -119,14 +119,14 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
119 | */ | 119 | */ |
120 | static inline void percpu_ref_get(struct percpu_ref *ref) | 120 | static inline void percpu_ref_get(struct percpu_ref *ref) |
121 | { | 121 | { |
122 | unsigned __percpu *pcpu_count; | 122 | unsigned long __percpu *pcpu_count; |
123 | 123 | ||
124 | rcu_read_lock_sched(); | 124 | rcu_read_lock_sched(); |
125 | 125 | ||
126 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 126 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
127 | this_cpu_inc(*pcpu_count); | 127 | this_cpu_inc(*pcpu_count); |
128 | else | 128 | else |
129 | atomic_inc(&ref->count); | 129 | atomic_long_inc(&ref->count); |
130 | 130 | ||
131 | rcu_read_unlock_sched(); | 131 | rcu_read_unlock_sched(); |
132 | } | 132 | } |
@@ -142,7 +142,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
142 | */ | 142 | */ |
143 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 143 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
144 | { | 144 | { |
145 | unsigned __percpu *pcpu_count; | 145 | unsigned long __percpu *pcpu_count; |
146 | int ret = false; | 146 | int ret = false; |
147 | 147 | ||
148 | rcu_read_lock_sched(); | 148 | rcu_read_lock_sched(); |
@@ -151,7 +151,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
151 | this_cpu_inc(*pcpu_count); | 151 | this_cpu_inc(*pcpu_count); |
152 | ret = true; | 152 | ret = true; |
153 | } else { | 153 | } else { |
154 | ret = atomic_inc_not_zero(&ref->count); | 154 | ret = atomic_long_inc_not_zero(&ref->count); |
155 | } | 155 | } |
156 | 156 | ||
157 | rcu_read_unlock_sched(); | 157 | rcu_read_unlock_sched(); |
@@ -175,7 +175,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
175 | */ | 175 | */ |
176 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 176 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
177 | { | 177 | { |
178 | unsigned __percpu *pcpu_count; | 178 | unsigned long __percpu *pcpu_count; |
179 | int ret = false; | 179 | int ret = false; |
180 | 180 | ||
181 | rcu_read_lock_sched(); | 181 | rcu_read_lock_sched(); |
@@ -199,13 +199,13 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
199 | */ | 199 | */ |
200 | static inline void percpu_ref_put(struct percpu_ref *ref) | 200 | static inline void percpu_ref_put(struct percpu_ref *ref) |
201 | { | 201 | { |
202 | unsigned __percpu *pcpu_count; | 202 | unsigned long __percpu *pcpu_count; |
203 | 203 | ||
204 | rcu_read_lock_sched(); | 204 | rcu_read_lock_sched(); |
205 | 205 | ||
206 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 206 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
207 | this_cpu_dec(*pcpu_count); | 207 | this_cpu_dec(*pcpu_count); |
208 | else if (unlikely(atomic_dec_and_test(&ref->count))) | 208 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) |
209 | ref->release(ref); | 209 | ref->release(ref); |
210 | 210 | ||
211 | rcu_read_unlock_sched(); | 211 | rcu_read_unlock_sched(); |
@@ -219,11 +219,11 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
219 | */ | 219 | */ |
220 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | 220 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
221 | { | 221 | { |
222 | unsigned __percpu *pcpu_count; | 222 | unsigned long __percpu *pcpu_count; |
223 | 223 | ||
224 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 224 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
225 | return false; | 225 | return false; |
226 | return !atomic_read(&ref->count); | 226 | return !atomic_long_read(&ref->count); |
227 | } | 227 | } |
228 | 228 | ||
229 | #endif | 229 | #endif |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 70d28c91f35a..559ee0b20318 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -25,15 +25,15 @@ | |||
25 | * works. | 25 | * works. |
26 | * | 26 | * |
27 | * Converting to non percpu mode is done with some RCUish stuff in | 27 | * Converting to non percpu mode is done with some RCUish stuff in |
28 | * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t | 28 | * percpu_ref_kill. Additionally, we need a bias value so that the |
29 | * can't hit 0 before we've added up all the percpu refs. | 29 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define PCPU_COUNT_BIAS (1U << 31) | 32 | #define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
33 | 33 | ||
34 | static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | 34 | static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref) |
35 | { | 35 | { |
36 | return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | 36 | return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); |
37 | } | 37 | } |
38 | 38 | ||
39 | /** | 39 | /** |
@@ -43,7 +43,7 @@ static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | |||
43 | * @gfp: allocation mask to use | 43 | * @gfp: allocation mask to use |
44 | * | 44 | * |
45 | * Initializes the refcount in single atomic counter mode with a refcount of 1; | 45 | * Initializes the refcount in single atomic counter mode with a refcount of 1; |
46 | * analagous to atomic_set(ref, 1). | 46 | * analagous to atomic_long_set(ref, 1). |
47 | * | 47 | * |
48 | * Note that @release must not sleep - it may potentially be called from RCU | 48 | * Note that @release must not sleep - it may potentially be called from RCU |
49 | * callback context by percpu_ref_kill(). | 49 | * callback context by percpu_ref_kill(). |
@@ -51,9 +51,9 @@ static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | |||
51 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, | 51 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
52 | gfp_t gfp) | 52 | gfp_t gfp) |
53 | { | 53 | { |
54 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 54 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
55 | 55 | ||
56 | ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned, gfp); | 56 | ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp); |
57 | if (!ref->pcpu_count_ptr) | 57 | if (!ref->pcpu_count_ptr) |
58 | return -ENOMEM; | 58 | return -ENOMEM; |
59 | 59 | ||
@@ -75,13 +75,13 @@ EXPORT_SYMBOL_GPL(percpu_ref_init); | |||
75 | */ | 75 | */ |
76 | void percpu_ref_reinit(struct percpu_ref *ref) | 76 | void percpu_ref_reinit(struct percpu_ref *ref) |
77 | { | 77 | { |
78 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | 78 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); |
79 | int cpu; | 79 | int cpu; |
80 | 80 | ||
81 | BUG_ON(!pcpu_count); | 81 | BUG_ON(!pcpu_count); |
82 | WARN_ON(!percpu_ref_is_zero(ref)); | 82 | WARN_ON(!percpu_ref_is_zero(ref)); |
83 | 83 | ||
84 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 84 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Restore per-cpu operation. smp_store_release() is paired with | 87 | * Restore per-cpu operation. smp_store_release() is paired with |
@@ -109,7 +109,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit); | |||
109 | */ | 109 | */ |
110 | void percpu_ref_exit(struct percpu_ref *ref) | 110 | void percpu_ref_exit(struct percpu_ref *ref) |
111 | { | 111 | { |
112 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | 112 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); |
113 | 113 | ||
114 | if (pcpu_count) { | 114 | if (pcpu_count) { |
115 | free_percpu(pcpu_count); | 115 | free_percpu(pcpu_count); |
@@ -121,14 +121,15 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit); | |||
121 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) | 121 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
122 | { | 122 | { |
123 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 123 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
124 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | 124 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); |
125 | unsigned count = 0; | 125 | unsigned long count = 0; |
126 | int cpu; | 126 | int cpu; |
127 | 127 | ||
128 | for_each_possible_cpu(cpu) | 128 | for_each_possible_cpu(cpu) |
129 | count += *per_cpu_ptr(pcpu_count, cpu); | 129 | count += *per_cpu_ptr(pcpu_count, cpu); |
130 | 130 | ||
131 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); | 131 | pr_debug("global %ld pcpu %ld", |
132 | atomic_long_read(&ref->count), (long)count); | ||
132 | 133 | ||
133 | /* | 134 | /* |
134 | * It's crucial that we sum the percpu counters _before_ adding the sum | 135 | * It's crucial that we sum the percpu counters _before_ adding the sum |
@@ -143,11 +144,11 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
143 | * time is equivalent and saves us atomic operations: | 144 | * time is equivalent and saves us atomic operations: |
144 | */ | 145 | */ |
145 | 146 | ||
146 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); | 147 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); |
147 | 148 | ||
148 | WARN_ONCE(atomic_read(&ref->count) <= 0, | 149 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
149 | "percpu ref (%pf) <= 0 (%i) after killed", | 150 | "percpu ref (%pf) <= 0 (%ld) after killed", |
150 | ref->release, atomic_read(&ref->count)); | 151 | ref->release, atomic_long_read(&ref->count)); |
151 | 152 | ||
152 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ | 153 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
153 | if (ref->confirm_kill) | 154 | if (ref->confirm_kill) |