diff options
| author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:50 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:50 -0400 |
| commit | 1cae13e75b7a7848c03138636d4eb8d8a5054dd5 (patch) | |
| tree | 309eaf4b79e7a8e41e17175f3475d46bef57c2a6 /lib | |
| parent | 2aad2a86f6685c10360ec8a5a55eb9ab7059cb72 (diff) | |
percpu_ref: make INIT_ATOMIC and switch_to_atomic() sticky
Currently, a percpu_ref which is initialized with
PERPCU_REF_INIT_ATOMIC or switched to atomic mode via
switch_to_atomic() automatically reverts to percpu mode on the first
percpu_ref_reinit(). This makes the atomic mode difficult to use for
cases where a percpu_ref is used as a persistent on/off switch which
may be cycled multiple times.
This patch makes such atomic state sticky so that it survives through
kill/reinit cycles. After this patch, atomic state is cleared only by
an explicit percpu_ref_switch_to_percpu() call.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/percpu-refcount.c | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index ed280fb1e5b5..6111bcb28376 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
| @@ -67,6 +67,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, | |||
| 67 | if (!ref->percpu_count_ptr) | 67 | if (!ref->percpu_count_ptr) |
| 68 | return -ENOMEM; | 68 | return -ENOMEM; |
| 69 | 69 | ||
| 70 | ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; | ||
| 71 | |||
| 70 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) | 72 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) |
| 71 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | 73 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
| 72 | else | 74 | else |
| @@ -202,7 +204,8 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |||
| 202 | * are guaraneed to be in atomic mode, @confirm_switch, which may not | 204 | * are guaraneed to be in atomic mode, @confirm_switch, which may not |
| 203 | * block, is invoked. This function may be invoked concurrently with all | 205 | * block, is invoked. This function may be invoked concurrently with all |
| 204 | * the get/put operations and can safely be mixed with kill and reinit | 206 | * the get/put operations and can safely be mixed with kill and reinit |
| 205 | * operations. | 207 | * operations. Note that @ref will stay in atomic mode across kill/reinit |
| 208 | * cycles until percpu_ref_switch_to_percpu() is called. | ||
| 206 | * | 209 | * |
| 207 | * This function normally doesn't block and can be called from any context | 210 | * This function normally doesn't block and can be called from any context |
| 208 | * but it may block if @confirm_kill is specified and @ref is already in | 211 | * but it may block if @confirm_kill is specified and @ref is already in |
| @@ -216,6 +219,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |||
| 216 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, | 219 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
| 217 | percpu_ref_func_t *confirm_switch) | 220 | percpu_ref_func_t *confirm_switch) |
| 218 | { | 221 | { |
| 222 | ref->force_atomic = true; | ||
| 219 | __percpu_ref_switch_to_atomic(ref, confirm_switch); | 223 | __percpu_ref_switch_to_atomic(ref, confirm_switch); |
| 220 | } | 224 | } |
| 221 | 225 | ||
| @@ -255,7 +259,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) | |||
| 255 | * | 259 | * |
| 256 | * Switch @ref to percpu mode. This function may be invoked concurrently | 260 | * Switch @ref to percpu mode. This function may be invoked concurrently |
| 257 | * with all the get/put operations and can safely be mixed with kill and | 261 | * with all the get/put operations and can safely be mixed with kill and |
| 258 | * reinit operations. | 262 | * reinit operations. This function reverses the sticky atomic state set |
| 263 | * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is | ||
| 264 | * dying or dead, the actual switching takes place on the following | ||
| 265 | * percpu_ref_reinit(). | ||
| 259 | * | 266 | * |
| 260 | * This function normally doesn't block and can be called from any context | 267 | * This function normally doesn't block and can be called from any context |
| 261 | * but it may block if @ref is in the process of switching to atomic mode | 268 | * but it may block if @ref is in the process of switching to atomic mode |
| @@ -263,6 +270,8 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) | |||
| 263 | */ | 270 | */ |
| 264 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) | 271 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
| 265 | { | 272 | { |
| 273 | ref->force_atomic = false; | ||
| 274 | |||
| 266 | /* a dying or dead ref can't be switched to percpu mode w/o reinit */ | 275 | /* a dying or dead ref can't be switched to percpu mode w/o reinit */ |
| 267 | if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) | 276 | if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) |
| 268 | __percpu_ref_switch_to_percpu(ref); | 277 | __percpu_ref_switch_to_percpu(ref); |
| @@ -304,8 +313,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | |||
| 304 | * @ref: perpcu_ref to re-initialize | 313 | * @ref: perpcu_ref to re-initialize |
| 305 | * | 314 | * |
| 306 | * Re-initialize @ref so that it's in the same state as when it finished | 315 | * Re-initialize @ref so that it's in the same state as when it finished |
| 307 | * percpu_ref_init(). @ref must have been initialized successfully and | 316 | * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been |
| 308 | * reached 0 but not exited. | 317 | * initialized successfully and reached 0 but not exited. |
| 309 | * | 318 | * |
| 310 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | 319 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while |
| 311 | * this function is in progress. | 320 | * this function is in progress. |
| @@ -316,6 +325,7 @@ void percpu_ref_reinit(struct percpu_ref *ref) | |||
| 316 | 325 | ||
| 317 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; | 326 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; |
| 318 | percpu_ref_get(ref); | 327 | percpu_ref_get(ref); |
| 319 | __percpu_ref_switch_to_percpu(ref); | 328 | if (!ref->force_atomic) |
| 329 | __percpu_ref_switch_to_percpu(ref); | ||
| 320 | } | 330 | } |
| 321 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | 331 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |
