aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-28 15:28:34 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:32:07 -0500
commit7cb92499000e3c86dae653077b1465458a039ef6 (patch)
treeebc982a5cc562b4fe0cb8f20541f45a5506a0b5f /kernel/rcutree.c
parent3842a0832a1d6eb0b31421f8810a813135967512 (diff)
rcu: Permit dyntick-idle with callbacks pending
The current implementation of RCU_FAST_NO_HZ prevents CPUs from entering dyntick-idle state if they have RCU callbacks pending. Unfortunately, this has the side-effect of often preventing them from entering this state, especially if at least one other CPU is not in dyntick-idle state. However, the resulting per-tick wakeup is wasteful in many cases: if the CPU has already fully responded to the current RCU grace period, there will be nothing for it to do until this grace period ends, which will frequently take several jiffies. This commit therefore permits a CPU that has done everything that the current grace period has asked of it (rcu_pending() == 0) even if it still as RCU callbacks pending. However, such a CPU posts a timer to wake it up several jiffies later (6 jiffies, based on experience with grace-period lengths). This wakeup is required to handle situations that can result in all CPUs being in dyntick-idle mode, thus failing to ever complete the current grace period. If a CPU wakes up before the timer goes off, then it cancels that timer, thus avoiding spurious wakeups. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 69bb37287cc8..bf085d7f6a3f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -448,6 +448,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
448 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 448 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
449 smp_mb__after_atomic_inc(); /* See above. */ 449 smp_mb__after_atomic_inc(); /* See above. */
450 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 450 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
451 rcu_cleanup_after_idle(smp_processor_id());
451 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); 452 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
452 if (!is_idle_task(current)) { 453 if (!is_idle_task(current)) {
453 struct task_struct *idle = idle_task(smp_processor_id()); 454 struct task_struct *idle = idle_task(smp_processor_id());
@@ -2057,6 +2058,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2057 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING; 2058 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
2058 atomic_set(&rdp->dynticks->dynticks, 2059 atomic_set(&rdp->dynticks->dynticks,
2059 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 2060 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2061 rcu_prepare_for_idle_init(cpu);
2060 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2062 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2061 2063
2062 /* 2064 /*
@@ -2138,6 +2140,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2138 rcu_send_cbs_to_online(&rcu_bh_state); 2140 rcu_send_cbs_to_online(&rcu_bh_state);
2139 rcu_send_cbs_to_online(&rcu_sched_state); 2141 rcu_send_cbs_to_online(&rcu_sched_state);
2140 rcu_preempt_send_cbs_to_online(); 2142 rcu_preempt_send_cbs_to_online();
2143 rcu_cleanup_after_idle(cpu);
2141 break; 2144 break;
2142 case CPU_DEAD: 2145 case CPU_DEAD:
2143 case CPU_DEAD_FROZEN: 2146 case CPU_DEAD_FROZEN: