aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu-refcount.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:48 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:48 -0400
commiteecc16ba9a49b05dd847a317af166a6728eb56ca (patch)
treee61dbe61074cfe6e09593dc3f60d3fb7bdd454e7 /lib/percpu-refcount.c
parent6251f9976af7656b6970a8820153f356430f5de2 (diff)
percpu_ref: replace pcpu_ prefix with percpu_
percpu_ref uses pcpu_ prefix for internal stuff and percpu_ for externally visible ones. This is the same convention used in the percpu allocator implementation. It works fine there but percpu_ref doesn't have too much internal-only stuff and scattered usages of pcpu_ prefix are confusing than helpful. This patch replaces all pcpu_ prefixes with percpu_. This is pure rename and there's no functional change. Note that PCPU_REF_DEAD is renamed to __PERCPU_REF_DEAD to signify that the flag is internal. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r--lib/percpu-refcount.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 8ef3f5c20df6..5aea6b7356c7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -11,8 +11,8 @@
11 * percpu counters will all sum to the correct value 11 * percpu counters will all sum to the correct value
12 * 12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the 13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and 14 * percpu_count vars will be equal to what it would have been if all the gets
15 * puts were done to a single integer, even if some of the percpu integers 15 * and puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow). 16 * overflow or underflow).
17 * 17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect 18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -29,11 +29,12 @@
29 * atomic_long_t can't hit 0 before we've added up all the percpu refs. 29 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
30 */ 30 */
31 31
32#define PCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) 32#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
33 33
34static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref) 34static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
35{ 35{
36 return (unsigned long __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); 36 return (unsigned long __percpu *)
37 (ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
37} 38}
38 39
39/** 40/**
@@ -51,10 +52,11 @@ static unsigned long __percpu *pcpu_count_ptr(struct percpu_ref *ref)
51int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, 52int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
52 gfp_t gfp) 53 gfp_t gfp)
53{ 54{
54 atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); 55 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
55 56
56 ref->pcpu_count_ptr = (unsigned long)alloc_percpu_gfp(unsigned long, gfp); 57 ref->percpu_count_ptr =
57 if (!ref->pcpu_count_ptr) 58 (unsigned long)alloc_percpu_gfp(unsigned long, gfp);
59 if (!ref->percpu_count_ptr)
58 return -ENOMEM; 60 return -ENOMEM;
59 61
60 ref->release = release; 62 ref->release = release;
@@ -74,11 +76,11 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
74 */ 76 */
75void percpu_ref_exit(struct percpu_ref *ref) 77void percpu_ref_exit(struct percpu_ref *ref)
76{ 78{
77 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); 79 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
78 80
79 if (pcpu_count) { 81 if (percpu_count) {
80 free_percpu(pcpu_count); 82 free_percpu(percpu_count);
81 ref->pcpu_count_ptr = PCPU_REF_DEAD; 83 ref->percpu_count_ptr = __PERCPU_REF_DEAD;
82 } 84 }
83} 85}
84EXPORT_SYMBOL_GPL(percpu_ref_exit); 86EXPORT_SYMBOL_GPL(percpu_ref_exit);
@@ -86,14 +88,14 @@ EXPORT_SYMBOL_GPL(percpu_ref_exit);
86static void percpu_ref_kill_rcu(struct rcu_head *rcu) 88static void percpu_ref_kill_rcu(struct rcu_head *rcu)
87{ 89{
88 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); 90 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
89 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); 91 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
90 unsigned long count = 0; 92 unsigned long count = 0;
91 int cpu; 93 int cpu;
92 94
93 for_each_possible_cpu(cpu) 95 for_each_possible_cpu(cpu)
94 count += *per_cpu_ptr(pcpu_count, cpu); 96 count += *per_cpu_ptr(percpu_count, cpu);
95 97
96 pr_debug("global %ld pcpu %ld", 98 pr_debug("global %ld percpu %ld",
97 atomic_long_read(&ref->count), (long)count); 99 atomic_long_read(&ref->count), (long)count);
98 100
99 /* 101 /*
@@ -108,7 +110,7 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
108 * reaching 0 before we add the percpu counts. But doing it at the same 110 * reaching 0 before we add the percpu counts. But doing it at the same
109 * time is equivalent and saves us atomic operations: 111 * time is equivalent and saves us atomic operations:
110 */ 112 */
111 atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); 113 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
112 114
113 WARN_ONCE(atomic_long_read(&ref->count) <= 0, 115 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
114 "percpu ref (%pf) <= 0 (%ld) after killed", 116 "percpu ref (%pf) <= 0 (%ld) after killed",
@@ -143,10 +145,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
143void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 145void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
144 percpu_ref_func_t *confirm_kill) 146 percpu_ref_func_t *confirm_kill)
145{ 147{
146 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, 148 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
147 "%s called more than once on %pf!", __func__, ref->release); 149 "%s called more than once on %pf!", __func__, ref->release);
148 150
149 ref->pcpu_count_ptr |= PCPU_REF_DEAD; 151 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
150 ref->confirm_kill = confirm_kill; 152 ref->confirm_kill = confirm_kill;
151 153
152 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); 154 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
@@ -166,24 +168,24 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
166 */ 168 */
167void percpu_ref_reinit(struct percpu_ref *ref) 169void percpu_ref_reinit(struct percpu_ref *ref)
168{ 170{
169 unsigned long __percpu *pcpu_count = pcpu_count_ptr(ref); 171 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
170 int cpu; 172 int cpu;
171 173
172 BUG_ON(!pcpu_count); 174 BUG_ON(!percpu_count);
173 WARN_ON_ONCE(!percpu_ref_is_zero(ref)); 175 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
174 176
175 atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); 177 atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
176 178
177 /* 179 /*
178 * Restore per-cpu operation. smp_store_release() is paired with 180 * Restore per-cpu operation. smp_store_release() is paired with
179 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees 181 * smp_read_barrier_depends() in __percpu_ref_alive() and
180 * that the zeroing is visible to all percpu accesses which can see 182 * guarantees that the zeroing is visible to all percpu accesses
181 * the following PCPU_REF_DEAD clearing. 183 * which can see the following __PERCPU_REF_DEAD clearing.
182 */ 184 */
183 for_each_possible_cpu(cpu) 185 for_each_possible_cpu(cpu)
184 *per_cpu_ptr(pcpu_count, cpu) = 0; 186 *per_cpu_ptr(percpu_count, cpu) = 0;
185 187
186 smp_store_release(&ref->pcpu_count_ptr, 188 smp_store_release(&ref->percpu_count_ptr,
187 ref->pcpu_count_ptr & ~PCPU_REF_DEAD); 189 ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
188} 190}
189EXPORT_SYMBOL_GPL(percpu_ref_reinit); 191EXPORT_SYMBOL_GPL(percpu_ref_reinit);