diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:48 -0400 |
commit | 9e804d1f58da1eca079f796347c1cf1d1df564e2 (patch) | |
tree | 5fc958822d1d44509a9003be88490ede36ae12f9 /lib | |
parent | eecc16ba9a49b05dd847a317af166a6728eb56ca (diff) |
percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch
percpu_ref will be restructured so that percpu/atomic mode switching
and reference killing are dedoupled. In preparation, do the following
renames.
* percpu_ref->confirm_kill -> percpu_ref->confirm_switch
* __PERCPU_REF_DEAD -> __PERCPU_REF_ATOMIC
* __percpu_ref_alive() -> __ref_is_percpu()
This patch is pure rename and doesn't introduce any functional
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/percpu-refcount.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 5aea6b7356c7..7aef590c1ef8 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) | 34 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
35 | { | 35 | { |
36 | return (unsigned long __percpu *) | 36 | return (unsigned long __percpu *) |
37 | (ref->percpu_count_ptr & ~__PERCPU_REF_DEAD); | 37 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); |
38 | } | 38 | } |
39 | 39 | ||
40 | /** | 40 | /** |
@@ -80,7 +80,7 @@ void percpu_ref_exit(struct percpu_ref *ref) | |||
80 | 80 | ||
81 | if (percpu_count) { | 81 | if (percpu_count) { |
82 | free_percpu(percpu_count); | 82 | free_percpu(percpu_count); |
83 | ref->percpu_count_ptr = __PERCPU_REF_DEAD; | 83 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC; |
84 | } | 84 | } |
85 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(percpu_ref_exit); | 86 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
@@ -117,8 +117,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
117 | ref->release, atomic_long_read(&ref->count)); | 117 | ref->release, atomic_long_read(&ref->count)); |
118 | 118 | ||
119 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ | 119 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
120 | if (ref->confirm_kill) | 120 | if (ref->confirm_switch) |
121 | ref->confirm_kill(ref); | 121 | ref->confirm_switch(ref); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Now we're in single atomic_long_t mode with a consistent | 124 | * Now we're in single atomic_long_t mode with a consistent |
@@ -145,11 +145,11 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
145 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 145 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
146 | percpu_ref_func_t *confirm_kill) | 146 | percpu_ref_func_t *confirm_kill) |
147 | { | 147 | { |
148 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, | 148 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC, |
149 | "%s called more than once on %pf!", __func__, ref->release); | 149 | "%s called more than once on %pf!", __func__, ref->release); |
150 | 150 | ||
151 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | 151 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
152 | ref->confirm_kill = confirm_kill; | 152 | ref->confirm_switch = confirm_kill; |
153 | 153 | ||
154 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 154 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
155 | } | 155 | } |
@@ -178,14 +178,14 @@ void percpu_ref_reinit(struct percpu_ref *ref) | |||
178 | 178 | ||
179 | /* | 179 | /* |
180 | * Restore per-cpu operation. smp_store_release() is paired with | 180 | * Restore per-cpu operation. smp_store_release() is paired with |
181 | * smp_read_barrier_depends() in __percpu_ref_alive() and | 181 | * smp_read_barrier_depends() in __ref_is_percpu() and guarantees |
182 | * guarantees that the zeroing is visible to all percpu accesses | 182 | * that the zeroing is visible to all percpu accesses which can see |
183 | * which can see the following __PERCPU_REF_DEAD clearing. | 183 | * the following __PERCPU_REF_ATOMIC clearing. |
184 | */ | 184 | */ |
185 | for_each_possible_cpu(cpu) | 185 | for_each_possible_cpu(cpu) |
186 | *per_cpu_ptr(percpu_count, cpu) = 0; | 186 | *per_cpu_ptr(percpu_count, cpu) = 0; |
187 | 187 | ||
188 | smp_store_release(&ref->percpu_count_ptr, | 188 | smp_store_release(&ref->percpu_count_ptr, |
189 | ref->percpu_count_ptr & ~__PERCPU_REF_DEAD); | 189 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); |
190 | } | 190 | } |
191 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | 191 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |