aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-06-13 22:23:53 -0400
committerTejun Heo <tj@kernel.org>2013-06-13 22:23:53 -0400
commitdbece3a0f1ef0b19aff1cc6ed0942fec9ab98de1 (patch)
tree3f594245c15b2f3ae8de6ec0c339e024988d22c4
parentbc497bd33b2d6a6f07bc8574b4764edbd7fdffa8 (diff)
percpu-refcount: implement percpu_tryget() along with percpu_ref_kill_and_confirm()
Implement percpu_tryget() which stops giving out references once the percpu_ref is visible as killed. Because the refcnt is per-cpu, different CPUs will start to see a refcnt as killed at different points in time and tryget() may continue to succeed on subset of cpus for a while after percpu_ref_kill() returns. For use cases where it's necessary to know when all CPUs start to see the refcnt as dead, percpu_ref_kill_and_confirm() is added. The new function takes an extra argument @confirm_kill which is invoked when the refcnt is guaranteed to be viewed as killed on all CPUs. While this isn't the prettiest interface, it doesn't force synchronous wait and is much safer than requiring the caller to do its own call_rcu(). v2: Patch description rephrased to emphasize that tryget() may continue to succeed on some CPUs after kill() returns as suggested by Kent. v3: Function comment in percpu_ref_kill_and_confirm() updated warning people to not depend on the implied RCU grace period from the confirm callback as it's an implementation detail. Signed-off-by: Tejun Heo <tj@kernel.org> Slightly-Grumpily-Acked-by: Kent Overstreet <koverstreet@google.com>
-rw-r--r--include/linux/percpu-refcount.h50
-rw-r--r--lib/percpu-refcount.c23
2 files changed, 66 insertions, 7 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6d843d60690d..dd2a08600453 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -63,13 +63,30 @@ struct percpu_ref {
63 */ 63 */
64 unsigned __percpu *pcpu_count; 64 unsigned __percpu *pcpu_count;
65 percpu_ref_func_t *release; 65 percpu_ref_func_t *release;
66 percpu_ref_func_t *confirm_kill;
66 struct rcu_head rcu; 67 struct rcu_head rcu;
67}; 68};
68 69
69int __must_check percpu_ref_init(struct percpu_ref *ref, 70int __must_check percpu_ref_init(struct percpu_ref *ref,
70 percpu_ref_func_t *release); 71 percpu_ref_func_t *release);
71void percpu_ref_cancel_init(struct percpu_ref *ref); 72void percpu_ref_cancel_init(struct percpu_ref *ref);
72void percpu_ref_kill(struct percpu_ref *ref); 73void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill);
75
76/**
77 * percpu_ref_kill - drop the initial ref
78 * @ref: percpu_ref to kill
79 *
80 * Must be used to drop the initial ref on a percpu refcount; must be called
81 * precisely once before shutdown.
82 *
83 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
84 * percpu counters and dropping the initial ref.
85 */
86static inline void percpu_ref_kill(struct percpu_ref *ref)
87{
88 return percpu_ref_kill_and_confirm(ref, NULL);
89}
73 90
74#define PCPU_STATUS_BITS 2 91#define PCPU_STATUS_BITS 2
75#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1) 92#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
@@ -101,6 +118,37 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
101} 118}
102 119
103/** 120/**
121 * percpu_ref_tryget - try to increment a percpu refcount
122 * @ref: percpu_ref to try-get
123 *
124 * Increment a percpu refcount unless it has already been killed. Returns
125 * %true on success; %false on failure.
126 *
127 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
128 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
129 * used. After the confirm_kill callback is invoked, it's guaranteed that
130 * no new reference will be given out by percpu_ref_tryget().
131 */
132static inline bool percpu_ref_tryget(struct percpu_ref *ref)
133{
134 unsigned __percpu *pcpu_count;
135 int ret = false;
136
137 rcu_read_lock();
138
139 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
140
141 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
142 __this_cpu_inc(*pcpu_count);
143 ret = true;
144 }
145
146 rcu_read_unlock();
147
148 return ret;
149}
150
151/**
104 * percpu_ref_put - decrement a percpu refcount 152 * percpu_ref_put - decrement a percpu refcount
105 * @ref: percpu_ref to put 153 * @ref: percpu_ref to put
106 * 154 *
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index ebeaac274cb9..8bf9e719cca0 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -118,6 +118,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
118 118
119 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); 119 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
120 120
121 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
122 if (ref->confirm_kill)
123 ref->confirm_kill(ref);
124
121 /* 125 /*
122 * Now we're in single atomic_t mode with a consistent refcount, so it's 126 * Now we're in single atomic_t mode with a consistent refcount, so it's
123 * safe to drop our initial ref: 127 * safe to drop our initial ref:
@@ -126,22 +130,29 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
126} 130}
127 131
128/** 132/**
129 * percpu_ref_kill - safely drop initial ref 133 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
130 * @ref: percpu_ref to kill 134 * @ref: percpu_ref to kill
135 * @confirm_kill: optional confirmation callback
131 * 136 *
132 * Must be used to drop the initial ref on a percpu refcount; must be called 137 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
133 * precisely once before shutdown. 138 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
139 * called after @ref is seen as dead from all CPUs - all further
140 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
141 * for more details.
134 * 142 *
135 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the 143 * Due to the way percpu_ref is implemented, @confirm_kill will be called
136 * percpu counters and dropping the initial ref. 144 * after at least one full RCU grace period has passed but this is an
145 * implementation detail and callers must not depend on it.
137 */ 146 */
138void percpu_ref_kill(struct percpu_ref *ref) 147void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
148 percpu_ref_func_t *confirm_kill)
139{ 149{
140 WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, 150 WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
141 "percpu_ref_kill() called more than once!\n"); 151 "percpu_ref_kill() called more than once!\n");
142 152
143 ref->pcpu_count = (unsigned __percpu *) 153 ref->pcpu_count = (unsigned __percpu *)
144 (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); 154 (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
155 ref->confirm_kill = confirm_kill;
145 156
146 call_rcu(&ref->rcu, percpu_ref_kill_rcu); 157 call_rcu(&ref->rcu, percpu_ref_kill_rcu);
147} 158}