diff options
author | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:13 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:13 -0400 |
commit | eae7975ddf031b3084f4a5f7d88f698aefad96fb (patch) | |
tree | 5e5dd384eac28d12ac6ebcf197f92f543bd44c0d | |
parent | d630dc4c9adb41e5bd1e06df2dbeaf622469ddd5 (diff) |
percpu-refcount: add helpers for ->percpu_count accesses
* All four percpu_ref_*() operations implemented in the header file
perform the same operation to determine whether the percpu_ref is
alive and extract the percpu pointer. Factor out the common logic
into __pcpu_ref_alive(). This doesn't change the generated code.
* There are a couple places in percpu-refcount.c which masks out
PCPU_REF_DEAD to obtain the percpu pointer. Factor it out into
pcpu_count_ptr().
* The above changes make the WARN_ON_ONCE() conditional at the top of
percpu_ref_kill_and_confirm() the only user of REF_STATUS(). Test
PCPU_REF_DEAD directly and remove REF_STATUS().
This patch doesn't introduce any functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
-rw-r--r-- | include/linux/percpu-refcount.h | 35 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 17 |
2 files changed, 30 insertions, 22 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index bfdeb0d48e21..b62a4ee6d6ad 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -88,10 +88,25 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
88 | return percpu_ref_kill_and_confirm(ref, NULL); | 88 | return percpu_ref_kill_and_confirm(ref, NULL); |
89 | } | 89 | } |
90 | 90 | ||
91 | #define PCPU_REF_PTR 0 | ||
92 | #define PCPU_REF_DEAD 1 | 91 | #define PCPU_REF_DEAD 1 |
93 | 92 | ||
94 | #define REF_STATUS(count) (((unsigned long) count) & PCPU_REF_DEAD) | 93 | /* |
94 | * Internal helper. Don't use outside percpu-refcount proper. The | ||
95 | * function doesn't return the pointer and let the caller test it for NULL | ||
96 | * because doing so forces the compiler to generate two conditional | ||
97 | * branches as it can't assume that @ref->pcpu_count is not NULL. | ||
98 | */ | ||
99 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | ||
100 | unsigned __percpu **pcpu_countp) | ||
101 | { | ||
102 | unsigned long pcpu_ptr = (unsigned long)ACCESS_ONCE(ref->pcpu_count); | ||
103 | |||
104 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | ||
105 | return false; | ||
106 | |||
107 | *pcpu_countp = (unsigned __percpu *)pcpu_ptr; | ||
108 | return true; | ||
109 | } | ||
95 | 110 | ||
96 | /** | 111 | /** |
97 | * percpu_ref_get - increment a percpu refcount | 112 | * percpu_ref_get - increment a percpu refcount |
@@ -105,9 +120,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
105 | 120 | ||
106 | rcu_read_lock_sched(); | 121 | rcu_read_lock_sched(); |
107 | 122 | ||
108 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 123 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
109 | |||
110 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) | ||
111 | this_cpu_inc(*pcpu_count); | 124 | this_cpu_inc(*pcpu_count); |
112 | else | 125 | else |
113 | atomic_inc(&ref->count); | 126 | atomic_inc(&ref->count); |
@@ -131,9 +144,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
131 | 144 | ||
132 | rcu_read_lock_sched(); | 145 | rcu_read_lock_sched(); |
133 | 146 | ||
134 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 147 | if (__pcpu_ref_alive(ref, &pcpu_count)) { |
135 | |||
136 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { | ||
137 | this_cpu_inc(*pcpu_count); | 148 | this_cpu_inc(*pcpu_count); |
138 | ret = true; | 149 | ret = true; |
139 | } else { | 150 | } else { |
@@ -166,9 +177,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
166 | 177 | ||
167 | rcu_read_lock_sched(); | 178 | rcu_read_lock_sched(); |
168 | 179 | ||
169 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 180 | if (__pcpu_ref_alive(ref, &pcpu_count)) { |
170 | |||
171 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { | ||
172 | this_cpu_inc(*pcpu_count); | 181 | this_cpu_inc(*pcpu_count); |
173 | ret = true; | 182 | ret = true; |
174 | } | 183 | } |
@@ -191,9 +200,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
191 | 200 | ||
192 | rcu_read_lock_sched(); | 201 | rcu_read_lock_sched(); |
193 | 202 | ||
194 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 203 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
195 | |||
196 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) | ||
197 | this_cpu_dec(*pcpu_count); | 204 | this_cpu_dec(*pcpu_count); |
198 | else if (unlikely(atomic_dec_and_test(&ref->count))) | 205 | else if (unlikely(atomic_dec_and_test(&ref->count))) |
199 | ref->release(ref); | 206 | ref->release(ref); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 17bce2bccc14..087f1a04f9bc 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -31,6 +31,11 @@ | |||
31 | 31 | ||
32 | #define PCPU_COUNT_BIAS (1U << 31) | 32 | #define PCPU_COUNT_BIAS (1U << 31) |
33 | 33 | ||
34 | static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | ||
35 | { | ||
36 | return (unsigned __percpu *)((unsigned long)ref->pcpu_count & ~PCPU_REF_DEAD); | ||
37 | } | ||
38 | |||
34 | /** | 39 | /** |
35 | * percpu_ref_init - initialize a percpu refcount | 40 | * percpu_ref_init - initialize a percpu refcount |
36 | * @ref: percpu_ref to initialize | 41 | * @ref: percpu_ref to initialize |
@@ -74,7 +79,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_init); | |||
74 | */ | 79 | */ |
75 | void percpu_ref_cancel_init(struct percpu_ref *ref) | 80 | void percpu_ref_cancel_init(struct percpu_ref *ref) |
76 | { | 81 | { |
77 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 82 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
78 | int cpu; | 83 | int cpu; |
79 | 84 | ||
80 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); | 85 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); |
@@ -82,7 +87,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref) | |||
82 | if (pcpu_count) { | 87 | if (pcpu_count) { |
83 | for_each_possible_cpu(cpu) | 88 | for_each_possible_cpu(cpu) |
84 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); | 89 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); |
85 | free_percpu(ref->pcpu_count); | 90 | free_percpu(pcpu_count); |
86 | } | 91 | } |
87 | } | 92 | } |
88 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); | 93 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); |
@@ -90,14 +95,10 @@ EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); | |||
90 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) | 95 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
91 | { | 96 | { |
92 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 97 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
93 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 98 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
94 | unsigned count = 0; | 99 | unsigned count = 0; |
95 | int cpu; | 100 | int cpu; |
96 | 101 | ||
97 | /* Mask out PCPU_REF_DEAD */ | ||
98 | pcpu_count = (unsigned __percpu *) | ||
99 | (((unsigned long) pcpu_count) & ~PCPU_REF_DEAD); | ||
100 | |||
101 | for_each_possible_cpu(cpu) | 102 | for_each_possible_cpu(cpu) |
102 | count += *per_cpu_ptr(pcpu_count, cpu); | 103 | count += *per_cpu_ptr(pcpu_count, cpu); |
103 | 104 | ||
@@ -152,7 +153,7 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
152 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 153 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
153 | percpu_ref_func_t *confirm_kill) | 154 | percpu_ref_func_t *confirm_kill) |
154 | { | 155 | { |
155 | WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, | 156 | WARN_ONCE((unsigned long)ref->pcpu_count & PCPU_REF_DEAD, |
156 | "percpu_ref_kill() called more than once!\n"); | 157 | "percpu_ref_kill() called more than once!\n"); |
157 | 158 | ||
158 | ref->pcpu_count = (unsigned __percpu *) | 159 | ref->pcpu_count = (unsigned __percpu *) |