diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
commit | eecc16ba9a49b05dd847a317af166a6728eb56ca (patch) | |
tree | e61dbe61074cfe6e09593dc3f60d3fb7bdd454e7 | |
parent | 6251f9976af7656b6970a8820153f356430f5de2 (diff) |
percpu_ref: replace pcpu_ prefix with percpu_
percpu_ref uses pcpu_ prefix for internal stuff and percpu_ for
externally visible ones. This is the same convention used in the
percpu allocator implementation. It works fine there but percpu_ref
doesn't have too much internal-only stuff and scattered usages of
pcpu_ prefix are confusing than helpful.
This patch replaces all pcpu_ prefixes with percpu_. This is pure
rename and there's no functional change. Note that PCPU_REF_DEAD is
renamed to __PERCPU_REF_DEAD to signify that the flag is internal.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r-- | include/linux/percpu-refcount.h | 46 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 56 |
2 files changed, 52 insertions, 50 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d44b027f74fd..3d463a39e0f7 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * | 13 | * |
14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less | 14 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less |
15 | * than an atomic_t - this is because of the way shutdown works, see | 15 | * than an atomic_t - this is because of the way shutdown works, see |
16 | * percpu_ref_kill()/PCPU_COUNT_BIAS. | 16 | * percpu_ref_kill()/PERCPU_COUNT_BIAS. |
17 | * | 17 | * |
18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the | 18 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the |
19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() | 19 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() |
@@ -60,7 +60,7 @@ struct percpu_ref { | |||
60 | * The low bit of the pointer indicates whether the ref is in percpu | 60 | * The low bit of the pointer indicates whether the ref is in percpu |
61 | * mode; if set, then get/put will manipulate the atomic_t. | 61 | * mode; if set, then get/put will manipulate the atomic_t. |
62 | */ | 62 | */ |
63 | unsigned long pcpu_count_ptr; | 63 | unsigned long percpu_count_ptr; |
64 | percpu_ref_func_t *release; | 64 | percpu_ref_func_t *release; |
65 | percpu_ref_func_t *confirm_kill; | 65 | percpu_ref_func_t *confirm_kill; |
66 | struct rcu_head rcu; | 66 | struct rcu_head rcu; |
@@ -88,26 +88,26 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
88 | return percpu_ref_kill_and_confirm(ref, NULL); | 88 | return percpu_ref_kill_and_confirm(ref, NULL); |
89 | } | 89 | } |
90 | 90 | ||
91 | #define PCPU_REF_DEAD 1 | 91 | #define __PERCPU_REF_DEAD 1 |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Internal helper. Don't use outside percpu-refcount proper. The | 94 | * Internal helper. Don't use outside percpu-refcount proper. The |
95 | * function doesn't return the pointer and let the caller test it for NULL | 95 | * function doesn't return the pointer and let the caller test it for NULL |
96 | * because doing so forces the compiler to generate two conditional | 96 | * because doing so forces the compiler to generate two conditional |
97 | * branches as it can't assume that @ref->pcpu_count is not NULL. | 97 | * branches as it can't assume that @ref->percpu_count is not NULL. |
98 | */ | 98 | */ |
99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | 99 | static inline bool __percpu_ref_alive(struct percpu_ref *ref, |
100 | unsigned long __percpu **pcpu_countp) | 100 | unsigned long __percpu **percpu_countp) |
101 | { | 101 | { |
102 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); | 102 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); |
103 | 103 | ||
104 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 104 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
105 | smp_read_barrier_depends(); | 105 | smp_read_barrier_depends(); |
106 | 106 | ||
107 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | 107 | if (unlikely(percpu_ptr & __PERCPU_REF_DEAD)) |
108 | return false; | 108 | return false; |
109 | 109 | ||
110 | *pcpu_countp = (unsigned long __percpu *)pcpu_ptr; | 110 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |
111 | return true; | 111 | return true; |
112 | } | 112 | } |
113 | 113 | ||
@@ -121,12 +121,12 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
121 | */ | 121 | */ |
122 | static inline void percpu_ref_get(struct percpu_ref *ref) | 122 | static inline void percpu_ref_get(struct percpu_ref *ref) |
123 | { | 123 | { |
124 | unsigned long __percpu *pcpu_count; | 124 | unsigned long __percpu *percpu_count; |
125 | 125 | ||
126 | rcu_read_lock_sched(); | 126 | rcu_read_lock_sched(); |
127 | 127 | ||
128 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 128 | if (__percpu_ref_alive(ref, &percpu_count)) |
129 | this_cpu_inc(*pcpu_count); | 129 | this_cpu_inc(*percpu_count); |
130 | else | 130 | else |
131 | atomic_long_inc(&ref->count); | 131 | atomic_long_inc(&ref->count); |
132 | 132 | ||
@@ -144,13 +144,13 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
144 | */ | 144 | */ |
145 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 145 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
146 | { | 146 | { |
147 | unsigned long __percpu *pcpu_count; | 147 | unsigned long __percpu *percpu_count; |
148 | int ret; | 148 | int ret; |
149 | 149 | ||
150 | rcu_read_lock_sched(); | 150 | rcu_read_lock_sched(); |
151 | 151 | ||
152 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 152 | if (__percpu_ref_alive(ref, &percpu_count)) { |
153 | this_cpu_inc(*pcpu_count); | 153 | this_cpu_inc(*percpu_count); |
154 | ret = true; | 154 | ret = true; |
155 | } else { | 155 | } else { |
156 | ret = atomic_long_inc_not_zero(&ref->count); | 156 | ret = atomic_long_inc_not_zero(&ref->count); |
@@ -178,13 +178,13 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
178 | */ | 178 | */ |
179 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 179 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
180 | { | 180 | { |
181 | unsigned long __percpu *pcpu_count; | 181 | unsigned long __percpu *percpu_count; |
182 | int ret = false; | 182 | int ret = false; |
183 | 183 | ||
184 | rcu_read_lock_sched(); | 184 | rcu_read_lock_sched(); |
185 | 185 | ||
186 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | 186 | if (__percpu_ref_alive(ref, &percpu_count)) { |
187 | this_cpu_inc(*pcpu_count); | 187 | this_cpu_inc(*percpu_count); |
188 | ret = true; | 188 | ret = true; |
189 | } | 189 | } |
190 | 190 | ||
@@ -204,12 +204,12 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
204 | */ | 204 | */ |
205 | static inline void percpu_ref_put(struct percpu_ref *ref) | 205 | static inline void percpu_ref_put(struct percpu_ref *ref) |
206 | { | 206 | { |
207 | unsigned long __percpu *pcpu_count; | 207 | unsigned long __percpu *percpu_count; |
208 | 208 | ||
209 | rcu_read_lock_sched(); | 209 | rcu_read_lock_sched(); |
210 | 210 | ||
211 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 211 | if (__percpu_ref_alive(ref, &percpu_count)) |
212 | this_cpu_dec(*pcpu_count); | 212 | this_cpu_dec(*percpu_count); |
213 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) | 213 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) |
214 | ref->release(ref); | 214 | ref->release(ref); |
215 | 215 | ||
@@ -226,9 +226,9 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
226 | */ | 226 | */ |
227 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | 227 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
228 | { | 228 | { |
229 | unsigned long __percpu *pcpu_count; | 229 | unsigned long __percpu *percpu_count; |
230 | 230 | ||
231 | if (__pcpu_ref_alive(ref, &pcpu_count)) | 231 | if (__percpu_ref_alive(ref, &percpu_count)) |
232 | return false; | 232 | return false; |
233 | return !atomic_long_read(&ref->count); | 233 | return !atomic_long_read(&ref->count); |
234 | } | 234 | } |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 8ef3f5c20df6..5aea6b7356c7 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -11,8 +11,8 @@ | |||
11 | * percpu counters will all sum to the correct value | 11 | * percpu counters will all sum to the correct value |
12 | * | 12 | * |
13 | * (More precisely: because moduler arithmatic is commutative the sum of all the | 13 | * (More precisely: because moduler arithmatic is commutative the sum of all the |
14 | * pcpu_count vars will be equal to what it would have been if all the gets and | 14 | * percpu_count vars will be equal to what it would have been if all the gets |
15 | * puts were done to a single integer, even if some of the percpu integers | 15 | * and puts were done to a single integer, even if some of the percpu integers |
16 | * overflow or underflow). | 16 | * overflow or underflow). |
17 | * | 17 | * |
18 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | 18 | * The real trick to implementing percpu refcounts is shutdown. We can't detect |
@@ -29,11 +29,12 @@ | |||
29 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. | 29 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) | 32 | #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
33 | 33 | ||
34 | static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref) | 34 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
35 | { | 35 | { |
36 | return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | 36 | return (unsigned long __percpu *) |
37 | (ref->percpu_count_ptr & ~__PERCPU_REF_DEAD); | ||
37 | } | 38 | } |
38 | 39 | ||
39 | /** | 40 | /** |
@@ -51,10 +52,11 @@ static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref) | |||
51 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, | 52 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
52 | gfp_t gfp) | 53 | gfp_t gfp) |
53 | { | 54 | { |
54 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 55 | atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); |
55 | 56 | ||
56 | ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp); | 57 | ref->percpu_count_ptr = |
57 | if (!ref->pcpu_count_ptr) | 58 | (unsigned long)alloc_percpu_gfp(unsigned long, gfp); |
59 | if (!ref->percpu_count_ptr) | ||
58 | return -ENOMEM; | 60 | return -ENOMEM; |
59 | 61 | ||
60 | ref->release = release; | 62 | ref->release = release; |
@@ -74,11 +76,11 @@ EXPORT_SYMBOL_GPL(percpu_ref_init); | |||
74 | */ | 76 | */ |
75 | void percpu_ref_exit(struct percpu_ref *ref) | 77 | void percpu_ref_exit(struct percpu_ref *ref) |
76 | { | 78 | { |
77 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); | 79 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
78 | 80 | ||
79 | if (pcpu_count) { | 81 | if (percpu_count) { |
80 | free_percpu(pcpu_count); | 82 | free_percpu(percpu_count); |
81 | ref->pcpu_count_ptr = PCPU_REF_DEAD; | 83 | ref->percpu_count_ptr = __PERCPU_REF_DEAD; |
82 | } | 84 | } |
83 | } | 85 | } |
84 | EXPORT_SYMBOL_GPL(percpu_ref_exit); | 86 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
@@ -86,14 +88,14 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit); | |||
86 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) | 88 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
87 | { | 89 | { |
88 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 90 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
89 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); | 91 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
90 | unsigned long count = 0; | 92 | unsigned long count = 0; |
91 | int cpu; | 93 | int cpu; |
92 | 94 | ||
93 | for_each_possible_cpu(cpu) | 95 | for_each_possible_cpu(cpu) |
94 | count += *per_cpu_ptr(pcpu_count, cpu); | 96 | count += *per_cpu_ptr(percpu_count, cpu); |
95 | 97 | ||
96 | pr_debug("global %ld pcpu %ld", | 98 | pr_debug("global %ld percpu %ld", |
97 | atomic_long_read(&ref->count), (long)count); | 99 | atomic_long_read(&ref->count), (long)count); |
98 | 100 | ||
99 | /* | 101 | /* |
@@ -108,7 +110,7 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
108 | * reaching 0 before we add the percpu counts. But doing it at the same | 110 | * reaching 0 before we add the percpu counts. But doing it at the same |
109 | * time is equivalent and saves us atomic operations: | 111 | * time is equivalent and saves us atomic operations: |
110 | */ | 112 | */ |
111 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); | 113 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); |
112 | 114 | ||
113 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, | 115 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
114 | "percpu ref (%pf) <= 0 (%ld) after killed", | 116 | "percpu ref (%pf) <= 0 (%ld) after killed", |
@@ -143,10 +145,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
143 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 145 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
144 | percpu_ref_func_t *confirm_kill) | 146 | percpu_ref_func_t *confirm_kill) |
145 | { | 147 | { |
146 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | 148 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, |
147 | "%s called more than once on %pf!", __func__, ref->release); | 149 | "%s called more than once on %pf!", __func__, ref->release); |
148 | 150 | ||
149 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | 151 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; |
150 | ref->confirm_kill = confirm_kill; | 152 | ref->confirm_kill = confirm_kill; |
151 | 153 | ||
152 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 154 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
@@ -166,24 +168,24 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | |||
166 | */ | 168 | */ |
167 | void percpu_ref_reinit(struct percpu_ref *ref) | 169 | void percpu_ref_reinit(struct percpu_ref *ref) |
168 | { | 170 | { |
169 | unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); | 171 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
170 | int cpu; | 172 | int cpu; |
171 | 173 | ||
172 | BUG_ON(!pcpu_count); | 174 | BUG_ON(!percpu_count); |
173 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); | 175 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
174 | 176 | ||
175 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 177 | atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); |
176 | 178 | ||
177 | /* | 179 | /* |
178 | * Restore per-cpu operation. smp_store_release() is paired with | 180 | * Restore per-cpu operation. smp_store_release() is paired with |
179 | * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees | 181 | * smp_read_barrier_depends() in __percpu_ref_alive() and |
180 | * that the zeroing is visible to all percpu accesses which can see | 182 | * guarantees that the zeroing is visible to all percpu accesses |
181 | * the following PCPU_REF_DEAD clearing. | 183 | * which can see the following __PERCPU_REF_DEAD clearing. |
182 | */ | 184 | */ |
183 | for_each_possible_cpu(cpu) | 185 | for_each_possible_cpu(cpu) |
184 | *per_cpu_ptr(pcpu_count, cpu) = 0; | 186 | *per_cpu_ptr(percpu_count, cpu) = 0; |
185 | 187 | ||
186 | smp_store_release(&ref->pcpu_count_ptr, | 188 | smp_store_release(&ref->percpu_count_ptr, |
187 | ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | 189 | ref->percpu_count_ptr & ~__PERCPU_REF_DEAD); |
188 | } | 190 | } |
189 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | 191 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |