diff options
-rw-r--r-- | include/linux/percpu-refcount.h | 25 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 14 |
2 files changed, 22 insertions, 17 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index f015f139d491..d44b027f74fd 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -115,8 +115,10 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | |||
115 | * percpu_ref_get - increment a percpu refcount | 115 | * percpu_ref_get - increment a percpu refcount |
116 | * @ref: percpu_ref to get | 116 | * @ref: percpu_ref to get |
117 | * | 117 | * |
118 | * Analagous to atomic_inc(). | 118 | * Analagous to atomic_long_inc(). |
119 | */ | 119 | * |
120 | * This function is safe to call as long as @ref is between init and exit. | ||
121 | */ | ||
120 | static inline void percpu_ref_get(struct percpu_ref *ref) | 122 | static inline void percpu_ref_get(struct percpu_ref *ref) |
121 | { | 123 | { |
122 | unsigned long __percpu *pcpu_count; | 124 | unsigned long __percpu *pcpu_count; |
@@ -138,12 +140,12 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
138 | * Increment a percpu refcount unless its count already reached zero. | 140 | * Increment a percpu refcount unless its count already reached zero. |
139 | * Returns %true on success; %false on failure. | 141 | * Returns %true on success; %false on failure. |
140 | * | 142 | * |
141 | * The caller is responsible for ensuring that @ref stays accessible. | 143 | * This function is safe to call as long as @ref is between init and exit. |
142 | */ | 144 | */ |
143 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 145 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
144 | { | 146 | { |
145 | unsigned long __percpu *pcpu_count; | 147 | unsigned long __percpu *pcpu_count; |
146 | int ret = false; | 148 | int ret; |
147 | 149 | ||
148 | rcu_read_lock_sched(); | 150 | rcu_read_lock_sched(); |
149 | 151 | ||
@@ -166,12 +168,13 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
166 | * Increment a percpu refcount unless it has already been killed. Returns | 168 | * Increment a percpu refcount unless it has already been killed. Returns |
167 | * %true on success; %false on failure. | 169 | * %true on success; %false on failure. |
168 | * | 170 | * |
169 | * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget | 171 | * Completion of percpu_ref_kill() in itself doesn't guarantee that this |
170 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be | 172 | * function will fail. For such guarantee, percpu_ref_kill_and_confirm() |
171 | * used. After the confirm_kill callback is invoked, it's guaranteed that | 173 | * should be used. After the confirm_kill callback is invoked, it's |
172 | * no new reference will be given out by percpu_ref_tryget(). | 174 | * guaranteed that no new reference will be given out by |
175 | * percpu_ref_tryget_live(). | ||
173 | * | 176 | * |
174 | * The caller is responsible for ensuring that @ref stays accessible. | 177 | * This function is safe to call as long as @ref is between init and exit. |
175 | */ | 178 | */ |
176 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 179 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
177 | { | 180 | { |
@@ -196,6 +199,8 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
196 | * | 199 | * |
197 | * Decrement the refcount, and if 0, call the release function (which was passed | 200 | * Decrement the refcount, and if 0, call the release function (which was passed |
198 | * to percpu_ref_init()) | 201 | * to percpu_ref_init()) |
202 | * | ||
203 | * This function is safe to call as long as @ref is between init and exit. | ||
199 | */ | 204 | */ |
200 | static inline void percpu_ref_put(struct percpu_ref *ref) | 205 | static inline void percpu_ref_put(struct percpu_ref *ref) |
201 | { | 206 | { |
@@ -216,6 +221,8 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
216 | * @ref: percpu_ref to test | 221 | * @ref: percpu_ref to test |
217 | * | 222 | * |
218 | * Returns %true if @ref reached zero. | 223 | * Returns %true if @ref reached zero. |
224 | * | ||
225 | * This function is safe to call as long as @ref is between init and exit. | ||
219 | */ | 226 | */ |
220 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | 227 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
221 | { | 228 | { |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 070dab5e7d77..8ef3f5c20df6 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -108,7 +108,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
108 | * reaching 0 before we add the percpu counts. But doing it at the same | 108 | * reaching 0 before we add the percpu counts. But doing it at the same |
109 | * time is equivalent and saves us atomic operations: | 109 | * time is equivalent and saves us atomic operations: |
110 | */ | 110 | */ |
111 | |||
112 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); | 111 | atomic_long_add((long)count - PCPU_COUNT_BIAS, &ref->count); |
113 | 112 | ||
114 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, | 113 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
@@ -120,8 +119,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
120 | ref->confirm_kill(ref); | 119 | ref->confirm_kill(ref); |
121 | 120 | ||
122 | /* | 121 | /* |
123 | * Now we're in single atomic_t mode with a consistent refcount, so it's | 122 | * Now we're in single atomic_long_t mode with a consistent |
124 | * safe to drop our initial ref: | 123 | * refcount, so it's safe to drop our initial ref: |
125 | */ | 124 | */ |
126 | percpu_ref_put(ref); | 125 | percpu_ref_put(ref); |
127 | } | 126 | } |
@@ -134,8 +133,8 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
134 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | 133 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if |
135 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | 134 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be |
136 | * called after @ref is seen as dead from all CPUs - all further | 135 | * called after @ref is seen as dead from all CPUs - all further |
137 | * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() | 136 | * invocations of percpu_ref_tryget_live() will fail. See |
138 | * for more details. | 137 | * percpu_ref_tryget_live() for more details. |
139 | * | 138 | * |
140 | * Due to the way percpu_ref is implemented, @confirm_kill will be called | 139 | * Due to the way percpu_ref is implemented, @confirm_kill will be called |
141 | * after at least one full RCU grace period has passed but this is an | 140 | * after at least one full RCU grace period has passed but this is an |
@@ -145,8 +144,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |||
145 | percpu_ref_func_t *confirm_kill) | 144 | percpu_ref_func_t *confirm_kill) |
146 | { | 145 | { |
147 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | 146 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, |
148 | "percpu_ref_kill() called more than once on %pf!", | 147 | "%s called more than once on %pf!", __func__, ref->release); |
149 | ref->release); | ||
150 | 148 | ||
151 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | 149 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; |
152 | ref->confirm_kill = confirm_kill; | 150 | ref->confirm_kill = confirm_kill; |
@@ -172,7 +170,7 @@ void percpu_ref_reinit(struct percpu_ref *ref) | |||
172 | int cpu; | 170 | int cpu; |
173 | 171 | ||
174 | BUG_ON(!pcpu_count); | 172 | BUG_ON(!pcpu_count); |
175 | WARN_ON(!percpu_ref_is_zero(ref)); | 173 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
176 | 174 | ||
177 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 175 | atomic_long_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
178 | 176 | ||