aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-06-21 16:00:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-08-18 21:58:43 -0400
commiteb348b898290da242e46df75ab0b9772003e08b8 (patch)
treef35ed4203ca3af3489e780430685a09bcb6a5f92 /kernel
parent2333210b26cf7aaf48d71343029afb860103d9f9 (diff)
nohz_full: Add per-CPU idle-state tracking
This commit adds the code that updates the rcu_dyntick structure's new fields to track the per-CPU idle state based on interrupts and transitions into and out of the idle loop (NMIs are ignored because NMI handlers cannot cleanly read out the time anyway). This code is similar to the code that maintains RCU's idea of per-CPU idleness, but differs in that RCU treats CPUs running in user mode as idle, where this new code does not. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/rcutree_plugin.h79
3 files changed, 85 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 4f27b85d8c86..b0d2cc3ea15a 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -431,6 +431,7 @@ void rcu_idle_enter(void)
431 431
432 local_irq_save(flags); 432 local_irq_save(flags);
433 rcu_eqs_enter(false); 433 rcu_eqs_enter(false);
434 rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
434 local_irq_restore(flags); 435 local_irq_restore(flags);
435} 436}
436EXPORT_SYMBOL_GPL(rcu_idle_enter); 437EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -481,6 +482,7 @@ void rcu_irq_exit(void)
481 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 482 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
482 else 483 else
483 rcu_eqs_enter_common(rdtp, oldval, true); 484 rcu_eqs_enter_common(rdtp, oldval, true);
485 rcu_sysidle_enter(rdtp, 1);
484 local_irq_restore(flags); 486 local_irq_restore(flags);
485} 487}
486 488
@@ -549,6 +551,7 @@ void rcu_idle_exit(void)
549 551
550 local_irq_save(flags); 552 local_irq_save(flags);
551 rcu_eqs_exit(false); 553 rcu_eqs_exit(false);
554 rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
552 local_irq_restore(flags); 555 local_irq_restore(flags);
553} 556}
554EXPORT_SYMBOL_GPL(rcu_idle_exit); 557EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -600,6 +603,7 @@ void rcu_irq_enter(void)
600 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); 603 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
601 else 604 else
602 rcu_eqs_exit_common(rdtp, oldval, true); 605 rcu_eqs_exit_common(rdtp, oldval, true);
606 rcu_sysidle_exit(rdtp, 1);
603 local_irq_restore(flags); 607 local_irq_restore(flags);
604} 608}
605 609
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 52d1be108e75..9dd8b177f1ac 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -553,6 +553,8 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
553static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 553static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
554static void rcu_kick_nohz_cpu(int cpu); 554static void rcu_kick_nohz_cpu(int cpu);
555static bool init_nocb_callback_list(struct rcu_data *rdp); 555static bool init_nocb_callback_list(struct rcu_data *rdp);
556static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
557static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
556static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); 558static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
557 559
558#endif /* #ifndef RCU_TREE_NONCORE */ 560#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index e5baccbd8038..eab81da614b8 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2378,6 +2378,77 @@ static void rcu_kick_nohz_cpu(int cpu)
2378#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 2378#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2379 2379
2380/* 2380/*
2381 * Invoked to note exit from irq or task transition to idle. Note that
2382 * usermode execution does -not- count as idle here! After all, we want
2383 * to detect full-system idle states, not RCU quiescent states and grace
2384 * periods. The caller must have disabled interrupts.
2385 */
2386static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2387{
2388 unsigned long j;
2389
2390 /* Adjust nesting, check for fully idle. */
2391 if (irq) {
2392 rdtp->dynticks_idle_nesting--;
2393 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2394 if (rdtp->dynticks_idle_nesting != 0)
2395 return; /* Still not fully idle. */
2396 } else {
2397 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2398 DYNTICK_TASK_NEST_VALUE) {
2399 rdtp->dynticks_idle_nesting = 0;
2400 } else {
2401 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2402 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2403 return; /* Still not fully idle. */
2404 }
2405 }
2406
2407 /* Record start of fully idle period. */
2408 j = jiffies;
2409 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2410 smp_mb__before_atomic_inc();
2411 atomic_inc(&rdtp->dynticks_idle);
2412 smp_mb__after_atomic_inc();
2413 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2414}
2415
2416/*
2417 * Invoked to note entry to irq or task transition from idle. Note that
2418 * usermode execution does -not- count as idle here! The caller must
2419 * have disabled interrupts.
2420 */
2421static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2422{
2423 /* Adjust nesting, check for already non-idle. */
2424 if (irq) {
2425 rdtp->dynticks_idle_nesting++;
2426 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2427 if (rdtp->dynticks_idle_nesting != 1)
2428 return; /* Already non-idle. */
2429 } else {
2430 /*
2431 * Allow for irq misnesting. Yes, it really is possible
2432 * to enter an irq handler then never leave it, and maybe
2433 * also vice versa. Handle both possibilities.
2434 */
2435 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2436 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2437 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2438 return; /* Already non-idle. */
2439 } else {
2440 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2441 }
2442 }
2443
2444 /* Record end of idle period. */
2445 smp_mb__before_atomic_inc();
2446 atomic_inc(&rdtp->dynticks_idle);
2447 smp_mb__after_atomic_inc();
2448 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2449}
2450
2451/*
2381 * Initialize dynticks sysidle state for CPUs coming online. 2452 * Initialize dynticks sysidle state for CPUs coming online.
2382 */ 2453 */
2383static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 2454static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
@@ -2387,6 +2458,14 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2387 2458
2388#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 2459#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2389 2460
2461static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2462{
2463}
2464
2465static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2466{
2467}
2468
2390static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 2469static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2391{ 2470{
2392} 2471}