diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:50 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:31:50 -0400 |
commit | 2aad2a86f6685c10360ec8a5a55eb9ab7059cb72 (patch) | |
tree | 85da25f36ba0c8158becdc8ba940201652cf30ce /lib/percpu-refcount.c | |
parent | f47ad45784611297b699f3dffb6c7222b76afe64 (diff) |
percpu_ref: add PERCPU_REF_INIT_* flags
With the recent addition of percpu_ref_reinit(), percpu_ref now can be
used as a persistent switch which can be turned on and off repeatedly
where turning off maps to killing the ref and waiting for it to drain;
however, there currently isn't a way to initialize a percpu_ref in its
off (killed and drained) state, which can be inconvenient for certain
persistent switch use cases.
Similarly, percpu_ref_switch_to_atomic/percpu() allow dynamic
selection of operation mode; however, currently a newly initialized
percpu_ref is always in percpu mode making it impossible to avoid the
latency overhead of switching to atomic mode.
This patch adds @flags to percpu_ref_init() and implements the
following flags.
* PERCPU_REF_INIT_ATOMIC : start ref in atomic mode
* PERCPU_REF_INIT_DEAD : start ref killed and drained
These flags should be able to serve the above two use cases.
v2: target_core_tpg.c conversion was missing. Fixed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r-- | lib/percpu-refcount.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 5a6d43baccc5..ed280fb1e5b5 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -45,27 +45,40 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) | |||
45 | * percpu_ref_init - initialize a percpu refcount | 45 | * percpu_ref_init - initialize a percpu refcount |
46 | * @ref: percpu_ref to initialize | 46 | * @ref: percpu_ref to initialize |
47 | * @release: function which will be called when refcount hits 0 | 47 | * @release: function which will be called when refcount hits 0 |
48 | * @flags: PERCPU_REF_INIT_* flags | ||
48 | * @gfp: allocation mask to use | 49 | * @gfp: allocation mask to use |
49 | * | 50 | * |
50 | * Initializes the refcount in single atomic counter mode with a refcount of 1; | 51 | * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a |
51 | * analagous to atomic_long_set(ref, 1). | 52 | * refcount of 1; analagous to atomic_long_set(ref, 1). See the |
53 | * definitions of PERCPU_REF_INIT_* flags for flag behaviors. | ||
52 | * | 54 | * |
53 | * Note that @release must not sleep - it may potentially be called from RCU | 55 | * Note that @release must not sleep - it may potentially be called from RCU |
54 | * callback context by percpu_ref_kill(). | 56 | * callback context by percpu_ref_kill(). |
55 | */ | 57 | */ |
56 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, | 58 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
57 | gfp_t gfp) | 59 | unsigned int flags, gfp_t gfp) |
58 | { | 60 | { |
59 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, | 61 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, |
60 | __alignof__(unsigned long)); | 62 | __alignof__(unsigned long)); |
61 | 63 | unsigned long start_count = 0; | |
62 | atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS); | ||
63 | 64 | ||
64 | ref->percpu_count_ptr = (unsigned long) | 65 | ref->percpu_count_ptr = (unsigned long) |
65 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); | 66 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); |
66 | if (!ref->percpu_count_ptr) | 67 | if (!ref->percpu_count_ptr) |
67 | return -ENOMEM; | 68 | return -ENOMEM; |
68 | 69 | ||
70 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) | ||
71 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | ||
72 | else | ||
73 | start_count += PERCPU_COUNT_BIAS; | ||
74 | |||
75 | if (flags & PERCPU_REF_INIT_DEAD) | ||
76 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | ||
77 | else | ||
78 | start_count++; | ||
79 | |||
80 | atomic_long_set(&ref->count, start_count); | ||
81 | |||
69 | ref->release = release; | 82 | ref->release = release; |
70 | return 0; | 83 | return 0; |
71 | } | 84 | } |