aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c77
1 files changed, 61 insertions, 16 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 50fee7689e71..0efad311ded4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -274,9 +274,19 @@ void rcu_bh_qs(void)
274 274
275static DEFINE_PER_CPU(int, rcu_sched_qs_mask); 275static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
276 276
277/*
278 * Steal a bit from the bottom of ->dynticks for idle entry/exit
279 * control. Initially this is for TLB flushing.
280 */
281#define RCU_DYNTICK_CTRL_MASK 0x1
282#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
283#ifndef rcu_eqs_special_exit
284#define rcu_eqs_special_exit() do { } while (0)
285#endif
286
277static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 287static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
278 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 288 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
279 .dynticks = ATOMIC_INIT(1), 289 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
280#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 290#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
281 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, 291 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
282 .dynticks_idle = ATOMIC_INIT(1), 292 .dynticks_idle = ATOMIC_INIT(1),
@@ -290,15 +300,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
290static void rcu_dynticks_eqs_enter(void) 300static void rcu_dynticks_eqs_enter(void)
291{ 301{
292 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 302 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
293 int special; 303 int seq;
294 304
295 /* 305 /*
296 * CPUs seeing atomic_inc_return() must see prior RCU read-side 306 * CPUs seeing atomic_add_return() must see prior RCU read-side
297 * critical sections, and we also must force ordering with the 307 * critical sections, and we also must force ordering with the
298 * next idle sojourn. 308 * next idle sojourn.
299 */ 309 */
300 special = atomic_inc_return(&rdtp->dynticks); 310 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
301 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1); 311 /* Better be in an extended quiescent state! */
312 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
313 (seq & RCU_DYNTICK_CTRL_CTR));
314 /* Better not have special action (TLB flush) pending! */
315 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
316 (seq & RCU_DYNTICK_CTRL_MASK));
302} 317}
303 318
304/* 319/*
@@ -308,15 +323,22 @@ static void rcu_dynticks_eqs_enter(void)
308static void rcu_dynticks_eqs_exit(void) 323static void rcu_dynticks_eqs_exit(void)
309{ 324{
310 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 325 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
311 int special; 326 int seq;
312 327
313 /* 328 /*
314 * CPUs seeing atomic_inc_return() must see prior idle sojourns, 329 * CPUs seeing atomic_add_return() must see prior idle sojourns,
315 * and we also must force ordering with the next RCU read-side 330 * and we also must force ordering with the next RCU read-side
316 * critical section. 331 * critical section.
317 */ 332 */
318 special = atomic_inc_return(&rdtp->dynticks); 333 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
319 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1)); 334 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
335 !(seq & RCU_DYNTICK_CTRL_CTR));
336 if (seq & RCU_DYNTICK_CTRL_MASK) {
337 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
338 smp_mb__after_atomic(); /* _exit after clearing mask. */
339 /* Prefer duplicate flushes to losing a flush. */
340 rcu_eqs_special_exit();
341 }
320} 342}
321 343
322/* 344/*
@@ -333,9 +355,9 @@ static void rcu_dynticks_eqs_online(void)
333{ 355{
334 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 356 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
335 357
336 if (atomic_read(&rdtp->dynticks) & 0x1) 358 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
337 return; 359 return;
338 atomic_add(0x1, &rdtp->dynticks); 360 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
339} 361}
340 362
341/* 363/*
@@ -347,7 +369,7 @@ bool rcu_dynticks_curr_cpu_in_eqs(void)
347{ 369{
348 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 370 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
349 371
350 return !(atomic_read(&rdtp->dynticks) & 0x1); 372 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
351} 373}
352 374
353/* 375/*
@@ -358,7 +380,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
358{ 380{
359 int snap = atomic_add_return(0, &rdtp->dynticks); 381 int snap = atomic_add_return(0, &rdtp->dynticks);
360 382
361 return snap; 383 return snap & ~RCU_DYNTICK_CTRL_MASK;
362} 384}
363 385
364/* 386/*
@@ -367,7 +389,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
367 */ 389 */
368static bool rcu_dynticks_in_eqs(int snap) 390static bool rcu_dynticks_in_eqs(int snap)
369{ 391{
370 return !(snap & 0x1); 392 return !(snap & RCU_DYNTICK_CTRL_CTR);
371} 393}
372 394
373/* 395/*
@@ -387,10 +409,33 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
387static void rcu_dynticks_momentary_idle(void) 409static void rcu_dynticks_momentary_idle(void)
388{ 410{
389 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 411 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
390 int special = atomic_add_return(2, &rdtp->dynticks); 412 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
413 &rdtp->dynticks);
391 414
392 /* It is illegal to call this from idle state. */ 415 /* It is illegal to call this from idle state. */
393 WARN_ON_ONCE(!(special & 0x1)); 416 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
417}
418
419/*
420 * Set the special (bottom) bit of the specified CPU so that it
421 * will take special action (such as flushing its TLB) on the
422 * next exit from an extended quiescent state. Returns true if
423 * the bit was successfully set, or false if the CPU was not in
424 * an extended quiescent state.
425 */
426bool rcu_eqs_special_set(int cpu)
427{
428 int old;
429 int new;
430 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
431
432 do {
433 old = atomic_read(&rdtp->dynticks);
434 if (old & RCU_DYNTICK_CTRL_CTR)
435 return false;
436 new = old | RCU_DYNTICK_CTRL_MASK;
437 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
438 return true;
394} 439}
395 440
396DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); 441DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);