aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-29 17:47:16 -0400
committerTejun Heo <tj@kernel.org>2016-08-10 15:02:58 -0400
commita2f5630cb737787c1bfd9aa894b1bf9f3f4554ea (patch)
tree43e66c0656a0cf77954aaba6cb1d6d1f6bac3f8d /lib
parent81abf2525827b29839a78fd55ab0699f033c41a5 (diff)
percpu_ref: remove unnecessary RCU grace period for staggered atomic switching confirmation
At the beginning, percpu_ref guaranteed a RCU grace period between a call to percpu_ref_kill_and_confirm() and the invocation of the confirmation callback. This guarantee exposed internal implementation details and got rescinded while switching over to sched RCU; however, __percpu_ref_switch_to_atomic() still inserts a full sched RCU grace period even when it can simply wait for the previous attempt. Remove the unnecessary grace period and perform the confirmation synchronously for staggered atomic switching attempts. Update comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 27fe74948882..8ade009ca2c9 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -177,17 +177,11 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
177 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); 177 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
178 } else if (confirm_switch) { 178 } else if (confirm_switch) {
179 /* 179 /*
180 * Somebody already set ATOMIC. Switching may still be in 180 * Somebody else already set ATOMIC. Wait for its
181 * progress. @confirm_switch must be invoked after the 181 * completion and invoke @confirm_switch() directly.
182 * switching is complete and a full sched RCU grace period
183 * has passed. Wait synchronously for the previous
184 * switching and schedule @confirm_switch invocation.
185 */ 182 */
186 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); 183 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
187 ref->confirm_switch = confirm_switch; 184 confirm_switch(ref);
188
189 percpu_ref_get(ref); /* put after confirmation */
190 call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
191 } 185 }
192} 186}
193 187
@@ -211,10 +205,6 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
211 * but it may block if @confirm_kill is specified and @ref is already in 205 * but it may block if @confirm_kill is specified and @ref is already in
212 * the process of switching to atomic mode. In such cases, @confirm_switch 206 * the process of switching to atomic mode. In such cases, @confirm_switch
213 * will be invoked after the switching is complete. 207 * will be invoked after the switching is complete.
214 *
215 * Due to the way percpu_ref is implemented, @confirm_switch will be called
216 * after at least one full sched RCU grace period has passed but this is an
217 * implementation detail and must not be depended upon.
218 */ 208 */
219void percpu_ref_switch_to_atomic(struct percpu_ref *ref, 209void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
220 percpu_ref_func_t *confirm_switch) 210 percpu_ref_func_t *confirm_switch)
@@ -290,11 +280,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
290 * 280 *
291 * This function normally doesn't block and can be called from any context 281 * This function normally doesn't block and can be called from any context
292 * but it may block if @confirm_kill is specified and @ref is in the 282 * but it may block if @confirm_kill is specified and @ref is in the
293 * process of switching to atomic mode by percpu_ref_switch_atomic(). 283 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
294 *
295 * Due to the way percpu_ref is implemented, @confirm_switch will be called
296 * after at least one full sched RCU grace period has passed but this is an
297 * implementation detail and must not be depended upon.
298 */ 284 */
299void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 285void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
300 percpu_ref_func_t *confirm_kill) 286 percpu_ref_func_t *confirm_kill)