aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-11-02 16:33:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-01-16 17:59:54 -0500
commit6563de9d6f1336157c9861bcd9864e0b47d65f9d (patch)
treea7bcbcf895f02a1a5b488c6a91ac5b5a2e95033b /kernel/rcu/tree.c
parent52d7e48b86fc108e45a656d8e53e4237993c481d (diff)
rcu: Abstract the dynticks momentary-idle operation
This commit is the first step towards full abstraction of all accesses to the ->dynticks counter, implementing the previously open-coded atomic add of two in a new rcu_dynticks_momentary_idle() function. This abstraction will ease changes to the ->dynticks counter operation. Note that this commit gets rid of the smp_mb__before_atomic() and the smp_mb__after_atomic() calls that were previously present. The reason that this is OK from a memory-ordering perspective is that the atomic operation is now atomic_add_return(), which, as a value-returning atomic, guarantees full ordering. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cb4e2056ccf3..14e283c351f6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -281,6 +281,19 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
281#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 281#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
282}; 282};
283 283
284/*
285 * Do a double-increment of the ->dynticks counter to emulate a
286 * momentary idle-CPU quiescent state.
287 */
288static void rcu_dynticks_momentary_idle(void)
289{
290 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
291 int special = atomic_add_return(2, &rdtp->dynticks);
292
293 /* It is illegal to call this from idle state. */
294 WARN_ON_ONCE(!(special & 0x1));
295}
296
284DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); 297DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
285EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); 298EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
286 299
@@ -300,7 +313,6 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
300static void rcu_momentary_dyntick_idle(void) 313static void rcu_momentary_dyntick_idle(void)
301{ 314{
302 struct rcu_data *rdp; 315 struct rcu_data *rdp;
303 struct rcu_dynticks *rdtp;
304 int resched_mask; 316 int resched_mask;
305 struct rcu_state *rsp; 317 struct rcu_state *rsp;
306 318
@@ -327,10 +339,7 @@ static void rcu_momentary_dyntick_idle(void)
327 * quiescent state, with no need for this CPU to do anything 339 * quiescent state, with no need for this CPU to do anything
328 * further. 340 * further.
329 */ 341 */
330 rdtp = this_cpu_ptr(&rcu_dynticks); 342 rcu_dynticks_momentary_idle();
331 smp_mb__before_atomic(); /* Earlier stuff before QS. */
332 atomic_add(2, &rdtp->dynticks); /* QS. */
333 smp_mb__after_atomic(); /* Later stuff after QS. */
334 break; 343 break;
335 } 344 }
336} 345}