diff options
author | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:14 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-06-28 08:10:14 -0400 |
commit | 2d7227828e1475c7b272e55bd70c4cec8eea219a (patch) | |
tree | 068171c424acc2390b1e6ecf514182c82cc5811d /lib | |
parent | 9a1049da9bd2cd83fe11d46433e603c193aa9c71 (diff) |
percpu-refcount: implement percpu_ref_reinit() and percpu_ref_is_zero()
Now that explicit invocation of percpu_ref_exit() is necessary to free
the percpu counter, we can implement percpu_ref_reinit() which
reinitializes a released percpu_ref. This can be used implement
scalable gating switch which can be drained and then re-opened without
worrying about memory allocation failures.
percpu_ref_is_zero() is added to be used in a sanity check in
percpu_ref_exit(). As this function will be useful for other purposes
too, make it a public interface.
v2: Use smp_read_barrier_depends() instead of smp_load_acquire(). We
only need data dep barrier and smp_load_acquire() is stronger and
heavier on some archs. Spotted by Lai Jiangshan.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/percpu-refcount.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index ac4299120087..fe5a3342e960 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -61,6 +61,41 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) | |||
61 | EXPORT_SYMBOL_GPL(percpu_ref_init); | 61 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * percpu_ref_reinit - re-initialize a percpu refcount | ||
65 | * @ref: perpcu_ref to re-initialize | ||
66 | * | ||
67 | * Re-initialize @ref so that it's in the same state as when it finished | ||
68 | * percpu_ref_init(). @ref must have been initialized successfully, killed | ||
69 | * and reached 0 but not exited. | ||
70 | * | ||
71 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | ||
72 | * this function is in progress. | ||
73 | */ | ||
74 | void percpu_ref_reinit(struct percpu_ref *ref) | ||
75 | { | ||
76 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | ||
77 | int cpu; | ||
78 | |||
79 | BUG_ON(!pcpu_count); | ||
80 | WARN_ON(!percpu_ref_is_zero(ref)); | ||
81 | |||
82 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | ||
83 | |||
84 | /* | ||
85 | * Restore per-cpu operation. smp_store_release() is paired with | ||
86 | * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees | ||
87 | * that the zeroing is visible to all percpu accesses which can see | ||
88 | * the following PCPU_REF_DEAD clearing. | ||
89 | */ | ||
90 | for_each_possible_cpu(cpu) | ||
91 | *per_cpu_ptr(pcpu_count, cpu) = 0; | ||
92 | |||
93 | smp_store_release(&ref->pcpu_count_ptr, | ||
94 | ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | ||
97 | |||
98 | /** | ||
64 | * percpu_ref_exit - undo percpu_ref_init() | 99 | * percpu_ref_exit - undo percpu_ref_init() |
65 | * @ref: percpu_ref to exit | 100 | * @ref: percpu_ref to exit |
66 | * | 101 | * |